diff options
777 files changed, 19971 insertions, 7665 deletions
diff --git a/Documentation/00-INDEX b/Documentation/00-INDEX index c17cd4bb2290..1b777b960492 100644 --- a/Documentation/00-INDEX +++ b/Documentation/00-INDEX | |||
@@ -328,8 +328,6 @@ sysrq.txt | |||
328 | - info on the magic SysRq key. | 328 | - info on the magic SysRq key. |
329 | telephony/ | 329 | telephony/ |
330 | - directory with info on telephony (e.g. voice over IP) support. | 330 | - directory with info on telephony (e.g. voice over IP) support. |
331 | uml/ | ||
332 | - directory with information about User Mode Linux. | ||
333 | unicode.txt | 331 | unicode.txt |
334 | - info on the Unicode character/font mapping used in Linux. | 332 | - info on the Unicode character/font mapping used in Linux. |
335 | unshare.txt | 333 | unshare.txt |
diff --git a/Documentation/ABI/testing/sysfs-power b/Documentation/ABI/testing/sysfs-power index 194ca446ac28..b464d12761ba 100644 --- a/Documentation/ABI/testing/sysfs-power +++ b/Documentation/ABI/testing/sysfs-power | |||
@@ -158,3 +158,17 @@ Description: | |||
158 | successful, will make the kernel abort a subsequent transition | 158 | successful, will make the kernel abort a subsequent transition |
159 | to a sleep state if any wakeup events are reported after the | 159 | to a sleep state if any wakeup events are reported after the |
160 | write has returned. | 160 | write has returned. |
161 | |||
162 | What: /sys/power/reserved_size | ||
163 | Date: May 2011 | ||
164 | Contact: Rafael J. Wysocki <rjw@sisk.pl> | ||
165 | Description: | ||
166 | The /sys/power/reserved_size file allows user space to control | ||
167 | the amount of memory reserved for allocations made by device | ||
168 | drivers during the "device freeze" stage of hibernation. It can | ||
169 | be written a string representing a non-negative integer that | ||
170 | will be used as the amount of memory to reserve for allocations | ||
171 | made by device drivers' "freeze" callbacks, in bytes. | ||
172 | |||
173 | Reading from this file will display the current value, which is | ||
174 | set to 1 MB by default. | ||
diff --git a/Documentation/DocBook/genericirq.tmpl b/Documentation/DocBook/genericirq.tmpl index fb10fd08c05c..b3422341d65c 100644 --- a/Documentation/DocBook/genericirq.tmpl +++ b/Documentation/DocBook/genericirq.tmpl | |||
@@ -191,8 +191,8 @@ | |||
191 | <para> | 191 | <para> |
192 | Whenever an interrupt triggers, the lowlevel arch code calls into | 192 | Whenever an interrupt triggers, the lowlevel arch code calls into |
193 | the generic interrupt code by calling desc->handle_irq(). | 193 | the generic interrupt code by calling desc->handle_irq(). |
194 | This highlevel IRQ handling function only uses desc->chip primitives | 194 | This highlevel IRQ handling function only uses desc->irq_data.chip |
195 | referenced by the assigned chip descriptor structure. | 195 | primitives referenced by the assigned chip descriptor structure. |
196 | </para> | 196 | </para> |
197 | </sect1> | 197 | </sect1> |
198 | <sect1 id="Highlevel_Driver_API"> | 198 | <sect1 id="Highlevel_Driver_API"> |
@@ -206,11 +206,11 @@ | |||
206 | <listitem><para>enable_irq()</para></listitem> | 206 | <listitem><para>enable_irq()</para></listitem> |
207 | <listitem><para>disable_irq_nosync() (SMP only)</para></listitem> | 207 | <listitem><para>disable_irq_nosync() (SMP only)</para></listitem> |
208 | <listitem><para>synchronize_irq() (SMP only)</para></listitem> | 208 | <listitem><para>synchronize_irq() (SMP only)</para></listitem> |
209 | <listitem><para>set_irq_type()</para></listitem> | 209 | <listitem><para>irq_set_irq_type()</para></listitem> |
210 | <listitem><para>set_irq_wake()</para></listitem> | 210 | <listitem><para>irq_set_irq_wake()</para></listitem> |
211 | <listitem><para>set_irq_data()</para></listitem> | 211 | <listitem><para>irq_set_handler_data()</para></listitem> |
212 | <listitem><para>set_irq_chip()</para></listitem> | 212 | <listitem><para>irq_set_chip()</para></listitem> |
213 | <listitem><para>set_irq_chip_data()</para></listitem> | 213 | <listitem><para>irq_set_chip_data()</para></listitem> |
214 | </itemizedlist> | 214 | </itemizedlist> |
215 | See the autogenerated function documentation for details. | 215 | See the autogenerated function documentation for details. |
216 | </para> | 216 | </para> |
@@ -225,6 +225,8 @@ | |||
225 | <listitem><para>handle_fasteoi_irq</para></listitem> | 225 | <listitem><para>handle_fasteoi_irq</para></listitem> |
226 | <listitem><para>handle_simple_irq</para></listitem> | 226 | <listitem><para>handle_simple_irq</para></listitem> |
227 | <listitem><para>handle_percpu_irq</para></listitem> | 227 | <listitem><para>handle_percpu_irq</para></listitem> |
228 | <listitem><para>handle_edge_eoi_irq</para></listitem> | ||
229 | <listitem><para>handle_bad_irq</para></listitem> | ||
228 | </itemizedlist> | 230 | </itemizedlist> |
229 | The interrupt flow handlers (either predefined or architecture | 231 | The interrupt flow handlers (either predefined or architecture |
230 | specific) are assigned to specific interrupts by the architecture | 232 | specific) are assigned to specific interrupts by the architecture |
@@ -241,13 +243,13 @@ | |||
241 | <programlisting> | 243 | <programlisting> |
242 | default_enable(struct irq_data *data) | 244 | default_enable(struct irq_data *data) |
243 | { | 245 | { |
244 | desc->chip->irq_unmask(data); | 246 | desc->irq_data.chip->irq_unmask(data); |
245 | } | 247 | } |
246 | 248 | ||
247 | default_disable(struct irq_data *data) | 249 | default_disable(struct irq_data *data) |
248 | { | 250 | { |
249 | if (!delay_disable(data)) | 251 | if (!delay_disable(data)) |
250 | desc->chip->irq_mask(data); | 252 | desc->irq_data.chip->irq_mask(data); |
251 | } | 253 | } |
252 | 254 | ||
253 | default_ack(struct irq_data *data) | 255 | default_ack(struct irq_data *data) |
@@ -284,9 +286,9 @@ noop(struct irq_data *data)) | |||
284 | <para> | 286 | <para> |
285 | The following control flow is implemented (simplified excerpt): | 287 | The following control flow is implemented (simplified excerpt): |
286 | <programlisting> | 288 | <programlisting> |
287 | desc->chip->irq_mask(); | 289 | desc->irq_data.chip->irq_mask_ack(); |
288 | handle_IRQ_event(desc->action); | 290 | handle_irq_event(desc->action); |
289 | desc->chip->irq_unmask(); | 291 | desc->irq_data.chip->irq_unmask(); |
290 | </programlisting> | 292 | </programlisting> |
291 | </para> | 293 | </para> |
292 | </sect3> | 294 | </sect3> |
@@ -300,8 +302,8 @@ desc->chip->irq_unmask(); | |||
300 | <para> | 302 | <para> |
301 | The following control flow is implemented (simplified excerpt): | 303 | The following control flow is implemented (simplified excerpt): |
302 | <programlisting> | 304 | <programlisting> |
303 | handle_IRQ_event(desc->action); | 305 | handle_irq_event(desc->action); |
304 | desc->chip->irq_eoi(); | 306 | desc->irq_data.chip->irq_eoi(); |
305 | </programlisting> | 307 | </programlisting> |
306 | </para> | 308 | </para> |
307 | </sect3> | 309 | </sect3> |
@@ -315,17 +317,17 @@ desc->chip->irq_eoi(); | |||
315 | The following control flow is implemented (simplified excerpt): | 317 | The following control flow is implemented (simplified excerpt): |
316 | <programlisting> | 318 | <programlisting> |
317 | if (desc->status & running) { | 319 | if (desc->status & running) { |
318 | desc->chip->irq_mask(); | 320 | desc->irq_data.chip->irq_mask_ack(); |
319 | desc->status |= pending | masked; | 321 | desc->status |= pending | masked; |
320 | return; | 322 | return; |
321 | } | 323 | } |
322 | desc->chip->irq_ack(); | 324 | desc->irq_data.chip->irq_ack(); |
323 | desc->status |= running; | 325 | desc->status |= running; |
324 | do { | 326 | do { |
325 | if (desc->status & masked) | 327 | if (desc->status & masked) |
326 | desc->chip->irq_unmask(); | 328 | desc->irq_data.chip->irq_unmask(); |
327 | desc->status &= ~pending; | 329 | desc->status &= ~pending; |
328 | handle_IRQ_event(desc->action); | 330 | handle_irq_event(desc->action); |
329 | } while (status & pending); | 331 | } while (status & pending); |
330 | desc->status &= ~running; | 332 | desc->status &= ~running; |
331 | </programlisting> | 333 | </programlisting> |
@@ -344,7 +346,7 @@ desc->status &= ~running; | |||
344 | <para> | 346 | <para> |
345 | The following control flow is implemented (simplified excerpt): | 347 | The following control flow is implemented (simplified excerpt): |
346 | <programlisting> | 348 | <programlisting> |
347 | handle_IRQ_event(desc->action); | 349 | handle_irq_event(desc->action); |
348 | </programlisting> | 350 | </programlisting> |
349 | </para> | 351 | </para> |
350 | </sect3> | 352 | </sect3> |
@@ -362,12 +364,29 @@ handle_IRQ_event(desc->action); | |||
362 | <para> | 364 | <para> |
363 | The following control flow is implemented (simplified excerpt): | 365 | The following control flow is implemented (simplified excerpt): |
364 | <programlisting> | 366 | <programlisting> |
365 | handle_IRQ_event(desc->action); | 367 | if (desc->irq_data.chip->irq_ack) |
366 | if (desc->chip->irq_eoi) | 368 | desc->irq_data.chip->irq_ack(); |
367 | desc->chip->irq_eoi(); | 369 | handle_irq_event(desc->action); |
370 | if (desc->irq_data.chip->irq_eoi) | ||
371 | desc->irq_data.chip->irq_eoi(); | ||
368 | </programlisting> | 372 | </programlisting> |
369 | </para> | 373 | </para> |
370 | </sect3> | 374 | </sect3> |
375 | <sect3 id="EOI_Edge_IRQ_flow_handler"> | ||
376 | <title>EOI Edge IRQ flow handler</title> | ||
377 | <para> | ||
378 | handle_edge_eoi_irq provides an abnomination of the edge | ||
379 | handler which is solely used to tame a badly wreckaged | ||
380 | irq controller on powerpc/cell. | ||
381 | </para> | ||
382 | </sect3> | ||
383 | <sect3 id="BAD_IRQ_flow_handler"> | ||
384 | <title>Bad IRQ flow handler</title> | ||
385 | <para> | ||
386 | handle_bad_irq is used for spurious interrupts which | ||
387 | have no real handler assigned.. | ||
388 | </para> | ||
389 | </sect3> | ||
371 | </sect2> | 390 | </sect2> |
372 | <sect2 id="Quirks_and_optimizations"> | 391 | <sect2 id="Quirks_and_optimizations"> |
373 | <title>Quirks and optimizations</title> | 392 | <title>Quirks and optimizations</title> |
@@ -410,6 +429,7 @@ if (desc->chip->irq_eoi) | |||
410 | <listitem><para>irq_mask_ack() - Optional, recommended for performance</para></listitem> | 429 | <listitem><para>irq_mask_ack() - Optional, recommended for performance</para></listitem> |
411 | <listitem><para>irq_mask()</para></listitem> | 430 | <listitem><para>irq_mask()</para></listitem> |
412 | <listitem><para>irq_unmask()</para></listitem> | 431 | <listitem><para>irq_unmask()</para></listitem> |
432 | <listitem><para>irq_eoi() - Optional, required for eoi flow handlers</para></listitem> | ||
413 | <listitem><para>irq_retrigger() - Optional</para></listitem> | 433 | <listitem><para>irq_retrigger() - Optional</para></listitem> |
414 | <listitem><para>irq_set_type() - Optional</para></listitem> | 434 | <listitem><para>irq_set_type() - Optional</para></listitem> |
415 | <listitem><para>irq_set_wake() - Optional</para></listitem> | 435 | <listitem><para>irq_set_wake() - Optional</para></listitem> |
@@ -424,32 +444,24 @@ if (desc->chip->irq_eoi) | |||
424 | <chapter id="doirq"> | 444 | <chapter id="doirq"> |
425 | <title>__do_IRQ entry point</title> | 445 | <title>__do_IRQ entry point</title> |
426 | <para> | 446 | <para> |
427 | The original implementation __do_IRQ() is an alternative entry | 447 | The original implementation __do_IRQ() was an alternative entry |
428 | point for all types of interrupts. | 448 | point for all types of interrupts. It not longer exists. |
429 | </para> | 449 | </para> |
430 | <para> | 450 | <para> |
431 | This handler turned out to be not suitable for all | 451 | This handler turned out to be not suitable for all |
432 | interrupt hardware and was therefore reimplemented with split | 452 | interrupt hardware and was therefore reimplemented with split |
433 | functionality for egde/level/simple/percpu interrupts. This is not | 453 | functionality for edge/level/simple/percpu interrupts. This is not |
434 | only a functional optimization. It also shortens code paths for | 454 | only a functional optimization. It also shortens code paths for |
435 | interrupts. | 455 | interrupts. |
436 | </para> | 456 | </para> |
437 | <para> | ||
438 | To make use of the split implementation, replace the call to | ||
439 | __do_IRQ by a call to desc->handle_irq() and associate | ||
440 | the appropriate handler function to desc->handle_irq(). | ||
441 | In most cases the generic handler implementations should | ||
442 | be sufficient. | ||
443 | </para> | ||
444 | </chapter> | 457 | </chapter> |
445 | 458 | ||
446 | <chapter id="locking"> | 459 | <chapter id="locking"> |
447 | <title>Locking on SMP</title> | 460 | <title>Locking on SMP</title> |
448 | <para> | 461 | <para> |
449 | The locking of chip registers is up to the architecture that | 462 | The locking of chip registers is up to the architecture that |
450 | defines the chip primitives. There is a chip->lock field that can be used | 463 | defines the chip primitives. The per-irq structure is |
451 | for serialization, but the generic layer does not touch it. The per-irq | 464 | protected via desc->lock, by the generic layer. |
452 | structure is protected via desc->lock, by the generic layer. | ||
453 | </para> | 465 | </para> |
454 | </chapter> | 466 | </chapter> |
455 | <chapter id="structs"> | 467 | <chapter id="structs"> |
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt index 492e81df2968..f6a24e8aa11e 100644 --- a/Documentation/feature-removal-schedule.txt +++ b/Documentation/feature-removal-schedule.txt | |||
@@ -460,14 +460,6 @@ Who: Thomas Gleixner <tglx@linutronix.de> | |||
460 | 460 | ||
461 | ---------------------------- | 461 | ---------------------------- |
462 | 462 | ||
463 | What: The acpi_sleep=s4_nonvs command line option | ||
464 | When: 2.6.37 | ||
465 | Files: arch/x86/kernel/acpi/sleep.c | ||
466 | Why: superseded by acpi_sleep=nonvs | ||
467 | Who: Rafael J. Wysocki <rjw@sisk.pl> | ||
468 | |||
469 | ---------------------------- | ||
470 | |||
471 | What: PCI DMA unmap state API | 463 | What: PCI DMA unmap state API |
472 | When: August 2012 | 464 | When: August 2012 |
473 | Why: PCI DMA unmap state API (include/linux/pci-dma.h) was replaced | 465 | Why: PCI DMA unmap state API (include/linux/pci-dma.h) was replaced |
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index cc85a9278190..259037b873b7 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
@@ -245,7 +245,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted. | |||
245 | 245 | ||
246 | acpi_sleep= [HW,ACPI] Sleep options | 246 | acpi_sleep= [HW,ACPI] Sleep options |
247 | Format: { s3_bios, s3_mode, s3_beep, s4_nohwsig, | 247 | Format: { s3_bios, s3_mode, s3_beep, s4_nohwsig, |
248 | old_ordering, s4_nonvs, sci_force_enable } | 248 | old_ordering, nonvs, sci_force_enable } |
249 | See Documentation/power/video.txt for information on | 249 | See Documentation/power/video.txt for information on |
250 | s3_bios and s3_mode. | 250 | s3_bios and s3_mode. |
251 | s3_beep is for debugging; it makes the PC's speaker beep | 251 | s3_beep is for debugging; it makes the PC's speaker beep |
diff --git a/Documentation/power/devices.txt b/Documentation/power/devices.txt index 1971bcf48a60..88880839ece4 100644 --- a/Documentation/power/devices.txt +++ b/Documentation/power/devices.txt | |||
@@ -279,11 +279,15 @@ When the system goes into the standby or memory sleep state, the phases are: | |||
279 | time.) Unlike the other suspend-related phases, during the prepare | 279 | time.) Unlike the other suspend-related phases, during the prepare |
280 | phase the device tree is traversed top-down. | 280 | phase the device tree is traversed top-down. |
281 | 281 | ||
282 | The prepare phase uses only a bus callback. After the callback method | 282 | In addition to that, if device drivers need to allocate additional |
283 | returns, no new children may be registered below the device. The method | 283 | memory to be able to hadle device suspend correctly, that should be |
284 | may also prepare the device or driver in some way for the upcoming | 284 | done in the prepare phase. |
285 | system power transition, but it should not put the device into a | 285 | |
286 | low-power state. | 286 | After the prepare callback method returns, no new children may be |
287 | registered below the device. The method may also prepare the device or | ||
288 | driver in some way for the upcoming system power transition (for | ||
289 | example, by allocating additional memory required for this purpose), but | ||
290 | it should not put the device into a low-power state. | ||
287 | 291 | ||
288 | 2. The suspend methods should quiesce the device to stop it from performing | 292 | 2. The suspend methods should quiesce the device to stop it from performing |
289 | I/O. They also may save the device registers and put it into the | 293 | I/O. They also may save the device registers and put it into the |
diff --git a/Documentation/power/notifiers.txt b/Documentation/power/notifiers.txt index cf980709122a..c2a4a346c0d9 100644 --- a/Documentation/power/notifiers.txt +++ b/Documentation/power/notifiers.txt | |||
@@ -1,46 +1,41 @@ | |||
1 | Suspend notifiers | 1 | Suspend notifiers |
2 | (C) 2007 Rafael J. Wysocki <rjw@sisk.pl>, GPL | 2 | (C) 2007-2011 Rafael J. Wysocki <rjw@sisk.pl>, GPL |
3 | 3 | ||
4 | There are some operations that device drivers may want to carry out in their | 4 | There are some operations that subsystems or drivers may want to carry out |
5 | .suspend() routines, but shouldn't, because they can cause the hibernation or | 5 | before hibernation/suspend or after restore/resume, but they require the system |
6 | suspend to fail. For example, a driver may want to allocate a substantial amount | 6 | to be fully functional, so the drivers' and subsystems' .suspend() and .resume() |
7 | of memory (like 50 MB) in .suspend(), but that shouldn't be done after the | 7 | or even .prepare() and .complete() callbacks are not suitable for this purpose. |
8 | swsusp's memory shrinker has run. | 8 | For example, device drivers may want to upload firmware to their devices after |
9 | 9 | resume/restore, but they cannot do it by calling request_firmware() from their | |
10 | Also, there may be some operations, that subsystems want to carry out before a | 10 | .resume() or .complete() routines (user land processes are frozen at these |
11 | hibernation/suspend or after a restore/resume, requiring the system to be fully | 11 | points). The solution may be to load the firmware into memory before processes |
12 | functional, so the drivers' .suspend() and .resume() routines are not suitable | 12 | are frozen and upload it from there in the .resume() routine. |
13 | for this purpose. For example, device drivers may want to upload firmware to | 13 | A suspend/hibernation notifier may be used for this purpose. |
14 | their devices after a restore from a hibernation image, but they cannot do it by | 14 | |
15 | calling request_firmware() from their .resume() routines (user land processes | 15 | The subsystems or drivers having such needs can register suspend notifiers that |
16 | are frozen at this point). The solution may be to load the firmware into | 16 | will be called upon the following events by the PM core: |
17 | memory before processes are frozen and upload it from there in the .resume() | ||
18 | routine. Of course, a hibernation notifier may be used for this purpose. | ||
19 | |||
20 | The subsystems that have such needs can register suspend notifiers that will be | ||
21 | called upon the following events by the suspend core: | ||
22 | 17 | ||
23 | PM_HIBERNATION_PREPARE The system is going to hibernate or suspend, tasks will | 18 | PM_HIBERNATION_PREPARE The system is going to hibernate or suspend, tasks will |
24 | be frozen immediately. | 19 | be frozen immediately. |
25 | 20 | ||
26 | PM_POST_HIBERNATION The system memory state has been restored from a | 21 | PM_POST_HIBERNATION The system memory state has been restored from a |
27 | hibernation image or an error occurred during the | 22 | hibernation image or an error occurred during |
28 | hibernation. Device drivers' .resume() callbacks have | 23 | hibernation. Device drivers' restore callbacks have |
29 | been executed and tasks have been thawed. | 24 | been executed and tasks have been thawed. |
30 | 25 | ||
31 | PM_RESTORE_PREPARE The system is going to restore a hibernation image. | 26 | PM_RESTORE_PREPARE The system is going to restore a hibernation image. |
32 | If all goes well the restored kernel will issue a | 27 | If all goes well, the restored kernel will issue a |
33 | PM_POST_HIBERNATION notification. | 28 | PM_POST_HIBERNATION notification. |
34 | 29 | ||
35 | PM_POST_RESTORE An error occurred during the hibernation restore. | 30 | PM_POST_RESTORE An error occurred during restore from hibernation. |
36 | Device drivers' .resume() callbacks have been executed | 31 | Device drivers' restore callbacks have been executed |
37 | and tasks have been thawed. | 32 | and tasks have been thawed. |
38 | 33 | ||
39 | PM_SUSPEND_PREPARE The system is preparing for a suspend. | 34 | PM_SUSPEND_PREPARE The system is preparing for suspend. |
40 | 35 | ||
41 | PM_POST_SUSPEND The system has just resumed or an error occurred during | 36 | PM_POST_SUSPEND The system has just resumed or an error occurred during |
42 | the suspend. Device drivers' .resume() callbacks have | 37 | suspend. Device drivers' resume callbacks have been |
43 | been executed and tasks have been thawed. | 38 | executed and tasks have been thawed. |
44 | 39 | ||
45 | It is generally assumed that whatever the notifiers do for | 40 | It is generally assumed that whatever the notifiers do for |
46 | PM_HIBERNATION_PREPARE, should be undone for PM_POST_HIBERNATION. Analogously, | 41 | PM_HIBERNATION_PREPARE, should be undone for PM_POST_HIBERNATION. Analogously, |
diff --git a/Documentation/virtual/00-INDEX b/Documentation/virtual/00-INDEX new file mode 100644 index 000000000000..fe0251c4cfb7 --- /dev/null +++ b/Documentation/virtual/00-INDEX | |||
@@ -0,0 +1,10 @@ | |||
1 | Virtualization support in the Linux kernel. | ||
2 | |||
3 | 00-INDEX | ||
4 | - this file. | ||
5 | kvm/ | ||
6 | - Kernel Virtual Machine. See also http://linux-kvm.org | ||
7 | lguest/ | ||
8 | - Extremely simple hypervisor for experimental/educational use. | ||
9 | uml/ | ||
10 | - User Mode Linux, builds/runs Linux kernel as a userspace program. | ||
diff --git a/Documentation/kvm/api.txt b/Documentation/virtual/kvm/api.txt index 9bef4e4cec50..9bef4e4cec50 100644 --- a/Documentation/kvm/api.txt +++ b/Documentation/virtual/kvm/api.txt | |||
diff --git a/Documentation/kvm/cpuid.txt b/Documentation/virtual/kvm/cpuid.txt index 882068538c9c..882068538c9c 100644 --- a/Documentation/kvm/cpuid.txt +++ b/Documentation/virtual/kvm/cpuid.txt | |||
diff --git a/Documentation/kvm/locking.txt b/Documentation/virtual/kvm/locking.txt index 3b4cd3bf5631..3b4cd3bf5631 100644 --- a/Documentation/kvm/locking.txt +++ b/Documentation/virtual/kvm/locking.txt | |||
diff --git a/Documentation/kvm/mmu.txt b/Documentation/virtual/kvm/mmu.txt index f46aa58389ca..f46aa58389ca 100644 --- a/Documentation/kvm/mmu.txt +++ b/Documentation/virtual/kvm/mmu.txt | |||
diff --git a/Documentation/kvm/msr.txt b/Documentation/virtual/kvm/msr.txt index d079aed27e03..d079aed27e03 100644 --- a/Documentation/kvm/msr.txt +++ b/Documentation/virtual/kvm/msr.txt | |||
diff --git a/Documentation/kvm/ppc-pv.txt b/Documentation/virtual/kvm/ppc-pv.txt index 3ab969c59046..3ab969c59046 100644 --- a/Documentation/kvm/ppc-pv.txt +++ b/Documentation/virtual/kvm/ppc-pv.txt | |||
diff --git a/Documentation/kvm/review-checklist.txt b/Documentation/virtual/kvm/review-checklist.txt index 730475ae1b8d..a850986ed684 100644 --- a/Documentation/kvm/review-checklist.txt +++ b/Documentation/virtual/kvm/review-checklist.txt | |||
@@ -7,7 +7,7 @@ Review checklist for kvm patches | |||
7 | 2. Patches should be against kvm.git master branch. | 7 | 2. Patches should be against kvm.git master branch. |
8 | 8 | ||
9 | 3. If the patch introduces or modifies a new userspace API: | 9 | 3. If the patch introduces or modifies a new userspace API: |
10 | - the API must be documented in Documentation/kvm/api.txt | 10 | - the API must be documented in Documentation/virtual/kvm/api.txt |
11 | - the API must be discoverable using KVM_CHECK_EXTENSION | 11 | - the API must be discoverable using KVM_CHECK_EXTENSION |
12 | 12 | ||
13 | 4. New state must include support for save/restore. | 13 | 4. New state must include support for save/restore. |
diff --git a/Documentation/kvm/timekeeping.txt b/Documentation/virtual/kvm/timekeeping.txt index df8946377cb6..df8946377cb6 100644 --- a/Documentation/kvm/timekeeping.txt +++ b/Documentation/virtual/kvm/timekeeping.txt | |||
diff --git a/Documentation/lguest/.gitignore b/Documentation/virtual/lguest/.gitignore index 115587fd5f65..115587fd5f65 100644 --- a/Documentation/lguest/.gitignore +++ b/Documentation/virtual/lguest/.gitignore | |||
diff --git a/Documentation/lguest/Makefile b/Documentation/virtual/lguest/Makefile index bebac6b4f332..bebac6b4f332 100644 --- a/Documentation/lguest/Makefile +++ b/Documentation/virtual/lguest/Makefile | |||
diff --git a/Documentation/lguest/extract b/Documentation/virtual/lguest/extract index 7730bb6e4b94..7730bb6e4b94 100644 --- a/Documentation/lguest/extract +++ b/Documentation/virtual/lguest/extract | |||
diff --git a/Documentation/lguest/lguest.c b/Documentation/virtual/lguest/lguest.c index d9da7e148538..d9da7e148538 100644 --- a/Documentation/lguest/lguest.c +++ b/Documentation/virtual/lguest/lguest.c | |||
diff --git a/Documentation/lguest/lguest.txt b/Documentation/virtual/lguest/lguest.txt index dad99978a6a8..bff0c554485d 100644 --- a/Documentation/lguest/lguest.txt +++ b/Documentation/virtual/lguest/lguest.txt | |||
@@ -74,7 +74,8 @@ Running Lguest: | |||
74 | 74 | ||
75 | - Run an lguest as root: | 75 | - Run an lguest as root: |
76 | 76 | ||
77 | Documentation/lguest/lguest 64 vmlinux --tunnet=192.168.19.1 --block=rootfile root=/dev/vda | 77 | Documentation/virtual/lguest/lguest 64 vmlinux --tunnet=192.168.19.1 \ |
78 | --block=rootfile root=/dev/vda | ||
78 | 79 | ||
79 | Explanation: | 80 | Explanation: |
80 | 64: the amount of memory to use, in MB. | 81 | 64: the amount of memory to use, in MB. |
diff --git a/Documentation/uml/UserModeLinux-HOWTO.txt b/Documentation/virtual/uml/UserModeLinux-HOWTO.txt index 9b7e1904db1c..9b7e1904db1c 100644 --- a/Documentation/uml/UserModeLinux-HOWTO.txt +++ b/Documentation/virtual/uml/UserModeLinux-HOWTO.txt | |||
diff --git a/Documentation/x86/x86_64/boot-options.txt b/Documentation/x86/x86_64/boot-options.txt index 092e596a1301..c54b4f503e2a 100644 --- a/Documentation/x86/x86_64/boot-options.txt +++ b/Documentation/x86/x86_64/boot-options.txt | |||
@@ -206,7 +206,7 @@ IOMMU (input/output memory management unit) | |||
206 | (e.g. because you have < 3 GB memory). | 206 | (e.g. because you have < 3 GB memory). |
207 | Kernel boot message: "PCI-DMA: Disabling IOMMU" | 207 | Kernel boot message: "PCI-DMA: Disabling IOMMU" |
208 | 208 | ||
209 | 2. <arch/x86_64/kernel/pci-gart.c>: AMD GART based hardware IOMMU. | 209 | 2. <arch/x86/kernel/amd_gart_64.c>: AMD GART based hardware IOMMU. |
210 | Kernel boot message: "PCI-DMA: using GART IOMMU" | 210 | Kernel boot message: "PCI-DMA: using GART IOMMU" |
211 | 211 | ||
212 | 3. <arch/x86_64/kernel/pci-swiotlb.c> : Software IOMMU implementation. Used | 212 | 3. <arch/x86_64/kernel/pci-swiotlb.c> : Software IOMMU implementation. Used |
diff --git a/MAINTAINERS b/MAINTAINERS index 16a5c5f2c6a6..8df8d2dfba28 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -405,8 +405,8 @@ S: Maintained | |||
405 | F: sound/oss/aedsp16.c | 405 | F: sound/oss/aedsp16.c |
406 | 406 | ||
407 | AFFS FILE SYSTEM | 407 | AFFS FILE SYSTEM |
408 | M: Roman Zippel <zippel@linux-m68k.org> | 408 | L: linux-fsdevel@vger.kernel.org |
409 | S: Maintained | 409 | S: Orphan |
410 | F: Documentation/filesystems/affs.txt | 410 | F: Documentation/filesystems/affs.txt |
411 | F: fs/affs/ | 411 | F: fs/affs/ |
412 | 412 | ||
@@ -2813,38 +2813,19 @@ F: Documentation/gpio.txt | |||
2813 | F: drivers/gpio/ | 2813 | F: drivers/gpio/ |
2814 | F: include/linux/gpio* | 2814 | F: include/linux/gpio* |
2815 | 2815 | ||
2816 | GRE DEMULTIPLEXER DRIVER | ||
2817 | M: Dmitry Kozlov <xeb@mail.ru> | ||
2818 | L: netdev@vger.kernel.org | ||
2819 | S: Maintained | ||
2820 | F: net/ipv4/gre.c | ||
2821 | F: include/net/gre.h | ||
2822 | |||
2816 | GRETH 10/100/1G Ethernet MAC device driver | 2823 | GRETH 10/100/1G Ethernet MAC device driver |
2817 | M: Kristoffer Glembo <kristoffer@gaisler.com> | 2824 | M: Kristoffer Glembo <kristoffer@gaisler.com> |
2818 | L: netdev@vger.kernel.org | 2825 | L: netdev@vger.kernel.org |
2819 | S: Maintained | 2826 | S: Maintained |
2820 | F: drivers/net/greth* | 2827 | F: drivers/net/greth* |
2821 | 2828 | ||
2822 | HARD DRIVE ACTIVE PROTECTION SYSTEM (HDAPS) DRIVER | ||
2823 | M: Frank Seidel <frank@f-seidel.de> | ||
2824 | L: platform-driver-x86@vger.kernel.org | ||
2825 | W: http://www.kernel.org/pub/linux/kernel/people/fseidel/hdaps/ | ||
2826 | S: Maintained | ||
2827 | F: drivers/platform/x86/hdaps.c | ||
2828 | |||
2829 | HWPOISON MEMORY FAILURE HANDLING | ||
2830 | M: Andi Kleen <andi@firstfloor.org> | ||
2831 | L: linux-mm@kvack.org | ||
2832 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/ak/linux-mce-2.6.git hwpoison | ||
2833 | S: Maintained | ||
2834 | F: mm/memory-failure.c | ||
2835 | F: mm/hwpoison-inject.c | ||
2836 | |||
2837 | HYPERVISOR VIRTUAL CONSOLE DRIVER | ||
2838 | L: linuxppc-dev@lists.ozlabs.org | ||
2839 | S: Odd Fixes | ||
2840 | F: drivers/tty/hvc/ | ||
2841 | |||
2842 | iSCSI BOOT FIRMWARE TABLE (iBFT) DRIVER | ||
2843 | M: Peter Jones <pjones@redhat.com> | ||
2844 | M: Konrad Rzeszutek Wilk <konrad@kernel.org> | ||
2845 | S: Maintained | ||
2846 | F: drivers/firmware/iscsi_ibft* | ||
2847 | |||
2848 | GSPCA FINEPIX SUBDRIVER | 2829 | GSPCA FINEPIX SUBDRIVER |
2849 | M: Frank Zago <frank@zago.net> | 2830 | M: Frank Zago <frank@zago.net> |
2850 | L: linux-media@vger.kernel.org | 2831 | L: linux-media@vger.kernel.org |
@@ -2895,6 +2876,26 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git | |||
2895 | S: Maintained | 2876 | S: Maintained |
2896 | F: drivers/media/video/gspca/ | 2877 | F: drivers/media/video/gspca/ |
2897 | 2878 | ||
2879 | HARD DRIVE ACTIVE PROTECTION SYSTEM (HDAPS) DRIVER | ||
2880 | M: Frank Seidel <frank@f-seidel.de> | ||
2881 | L: platform-driver-x86@vger.kernel.org | ||
2882 | W: http://www.kernel.org/pub/linux/kernel/people/fseidel/hdaps/ | ||
2883 | S: Maintained | ||
2884 | F: drivers/platform/x86/hdaps.c | ||
2885 | |||
2886 | HWPOISON MEMORY FAILURE HANDLING | ||
2887 | M: Andi Kleen <andi@firstfloor.org> | ||
2888 | L: linux-mm@kvack.org | ||
2889 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/ak/linux-mce-2.6.git hwpoison | ||
2890 | S: Maintained | ||
2891 | F: mm/memory-failure.c | ||
2892 | F: mm/hwpoison-inject.c | ||
2893 | |||
2894 | HYPERVISOR VIRTUAL CONSOLE DRIVER | ||
2895 | L: linuxppc-dev@lists.ozlabs.org | ||
2896 | S: Odd Fixes | ||
2897 | F: drivers/tty/hvc/ | ||
2898 | |||
2898 | HARDWARE MONITORING | 2899 | HARDWARE MONITORING |
2899 | M: Jean Delvare <khali@linux-fr.org> | 2900 | M: Jean Delvare <khali@linux-fr.org> |
2900 | M: Guenter Roeck <guenter.roeck@ericsson.com> | 2901 | M: Guenter Roeck <guenter.roeck@ericsson.com> |
@@ -2945,8 +2946,8 @@ F: drivers/block/cciss* | |||
2945 | F: include/linux/cciss_ioctl.h | 2946 | F: include/linux/cciss_ioctl.h |
2946 | 2947 | ||
2947 | HFS FILESYSTEM | 2948 | HFS FILESYSTEM |
2948 | M: Roman Zippel <zippel@linux-m68k.org> | 2949 | L: linux-fsdevel@vger.kernel.org |
2949 | S: Maintained | 2950 | S: Orphan |
2950 | F: Documentation/filesystems/hfs.txt | 2951 | F: Documentation/filesystems/hfs.txt |
2951 | F: fs/hfs/ | 2952 | F: fs/hfs/ |
2952 | 2953 | ||
@@ -3478,6 +3479,12 @@ F: Documentation/isapnp.txt | |||
3478 | F: drivers/pnp/isapnp/ | 3479 | F: drivers/pnp/isapnp/ |
3479 | F: include/linux/isapnp.h | 3480 | F: include/linux/isapnp.h |
3480 | 3481 | ||
3482 | iSCSI BOOT FIRMWARE TABLE (iBFT) DRIVER | ||
3483 | M: Peter Jones <pjones@redhat.com> | ||
3484 | M: Konrad Rzeszutek Wilk <konrad@kernel.org> | ||
3485 | S: Maintained | ||
3486 | F: drivers/firmware/iscsi_ibft* | ||
3487 | |||
3481 | ISCSI | 3488 | ISCSI |
3482 | M: Mike Christie <michaelc@cs.wisc.edu> | 3489 | M: Mike Christie <michaelc@cs.wisc.edu> |
3483 | L: open-iscsi@googlegroups.com | 3490 | L: open-iscsi@googlegroups.com |
@@ -3807,7 +3814,7 @@ M: Rusty Russell <rusty@rustcorp.com.au> | |||
3807 | L: lguest@lists.ozlabs.org | 3814 | L: lguest@lists.ozlabs.org |
3808 | W: http://lguest.ozlabs.org/ | 3815 | W: http://lguest.ozlabs.org/ |
3809 | S: Odd Fixes | 3816 | S: Odd Fixes |
3810 | F: Documentation/lguest/ | 3817 | F: Documentation/virtual/lguest/ |
3811 | F: arch/x86/lguest/ | 3818 | F: arch/x86/lguest/ |
3812 | F: drivers/lguest/ | 3819 | F: drivers/lguest/ |
3813 | F: include/linux/lguest*.h | 3820 | F: include/linux/lguest*.h |
@@ -3994,7 +4001,6 @@ F: arch/m32r/ | |||
3994 | 4001 | ||
3995 | M68K ARCHITECTURE | 4002 | M68K ARCHITECTURE |
3996 | M: Geert Uytterhoeven <geert@linux-m68k.org> | 4003 | M: Geert Uytterhoeven <geert@linux-m68k.org> |
3997 | M: Roman Zippel <zippel@linux-m68k.org> | ||
3998 | L: linux-m68k@lists.linux-m68k.org | 4004 | L: linux-m68k@lists.linux-m68k.org |
3999 | W: http://www.linux-m68k.org/ | 4005 | W: http://www.linux-m68k.org/ |
4000 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/geert/linux-m68k.git | 4006 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/geert/linux-m68k.git |
@@ -4989,6 +4995,13 @@ F: Documentation/pps/ | |||
4989 | F: drivers/pps/ | 4995 | F: drivers/pps/ |
4990 | F: include/linux/pps*.h | 4996 | F: include/linux/pps*.h |
4991 | 4997 | ||
4998 | PPTP DRIVER | ||
4999 | M: Dmitry Kozlov <xeb@mail.ru> | ||
5000 | L: netdev@vger.kernel.org | ||
5001 | S: Maintained | ||
5002 | F: drivers/net/pptp.c | ||
5003 | W: http://sourceforge.net/projects/accel-pptp | ||
5004 | |||
4992 | PREEMPTIBLE KERNEL | 5005 | PREEMPTIBLE KERNEL |
4993 | M: Robert Love <rml@tech9.net> | 5006 | M: Robert Love <rml@tech9.net> |
4994 | L: kpreempt-tech@lists.sourceforge.net | 5007 | L: kpreempt-tech@lists.sourceforge.net |
@@ -6618,7 +6631,7 @@ L: user-mode-linux-devel@lists.sourceforge.net | |||
6618 | L: user-mode-linux-user@lists.sourceforge.net | 6631 | L: user-mode-linux-user@lists.sourceforge.net |
6619 | W: http://user-mode-linux.sourceforge.net | 6632 | W: http://user-mode-linux.sourceforge.net |
6620 | S: Maintained | 6633 | S: Maintained |
6621 | F: Documentation/uml/ | 6634 | F: Documentation/virtual/uml/ |
6622 | F: arch/um/ | 6635 | F: arch/um/ |
6623 | F: fs/hostfs/ | 6636 | F: fs/hostfs/ |
6624 | F: fs/hppfs/ | 6637 | F: fs/hppfs/ |
@@ -7024,20 +7037,6 @@ M: "Maciej W. Rozycki" <macro@linux-mips.org> | |||
7024 | S: Maintained | 7037 | S: Maintained |
7025 | F: drivers/tty/serial/zs.* | 7038 | F: drivers/tty/serial/zs.* |
7026 | 7039 | ||
7027 | GRE DEMULTIPLEXER DRIVER | ||
7028 | M: Dmitry Kozlov <xeb@mail.ru> | ||
7029 | L: netdev@vger.kernel.org | ||
7030 | S: Maintained | ||
7031 | F: net/ipv4/gre.c | ||
7032 | F: include/net/gre.h | ||
7033 | |||
7034 | PPTP DRIVER | ||
7035 | M: Dmitry Kozlov <xeb@mail.ru> | ||
7036 | L: netdev@vger.kernel.org | ||
7037 | S: Maintained | ||
7038 | F: drivers/net/pptp.c | ||
7039 | W: http://sourceforge.net/projects/accel-pptp | ||
7040 | |||
7041 | THE REST | 7040 | THE REST |
7042 | M: Linus Torvalds <torvalds@linux-foundation.org> | 7041 | M: Linus Torvalds <torvalds@linux-foundation.org> |
7043 | L: linux-kernel@vger.kernel.org | 7042 | L: linux-kernel@vger.kernel.org |
@@ -1,7 +1,7 @@ | |||
1 | VERSION = 2 | 1 | VERSION = 2 |
2 | PATCHLEVEL = 6 | 2 | PATCHLEVEL = 6 |
3 | SUBLEVEL = 39 | 3 | SUBLEVEL = 39 |
4 | EXTRAVERSION = -rc7 | 4 | EXTRAVERSION = |
5 | NAME = Flesh-Eating Bats with Fangs | 5 | NAME = Flesh-Eating Bats with Fangs |
6 | 6 | ||
7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
@@ -1268,6 +1268,7 @@ help: | |||
1268 | @echo ' make C=1 [targets] Check all c source with $$CHECK (sparse by default)' | 1268 | @echo ' make C=1 [targets] Check all c source with $$CHECK (sparse by default)' |
1269 | @echo ' make C=2 [targets] Force check of all c source with $$CHECK' | 1269 | @echo ' make C=2 [targets] Force check of all c source with $$CHECK' |
1270 | @echo ' make W=1 [targets] Enable extra gcc checks' | 1270 | @echo ' make W=1 [targets] Enable extra gcc checks' |
1271 | @echo ' make RECORDMCOUNT_WARN=1 [targets] Warn about ignored mcount sections' | ||
1271 | @echo '' | 1272 | @echo '' |
1272 | @echo 'Execute "make" or "make all" to build all targets marked with [*] ' | 1273 | @echo 'Execute "make" or "make all" to build all targets marked with [*] ' |
1273 | @echo 'For further info see the ./README file' | 1274 | @echo 'For further info see the ./README file' |
diff --git a/arch/alpha/include/asm/unistd.h b/arch/alpha/include/asm/unistd.h index 058937bf5a77..b1834166922d 100644 --- a/arch/alpha/include/asm/unistd.h +++ b/arch/alpha/include/asm/unistd.h | |||
@@ -452,10 +452,14 @@ | |||
452 | #define __NR_fanotify_init 494 | 452 | #define __NR_fanotify_init 494 |
453 | #define __NR_fanotify_mark 495 | 453 | #define __NR_fanotify_mark 495 |
454 | #define __NR_prlimit64 496 | 454 | #define __NR_prlimit64 496 |
455 | #define __NR_name_to_handle_at 497 | ||
456 | #define __NR_open_by_handle_at 498 | ||
457 | #define __NR_clock_adjtime 499 | ||
458 | #define __NR_syncfs 500 | ||
455 | 459 | ||
456 | #ifdef __KERNEL__ | 460 | #ifdef __KERNEL__ |
457 | 461 | ||
458 | #define NR_SYSCALLS 497 | 462 | #define NR_SYSCALLS 501 |
459 | 463 | ||
460 | #define __ARCH_WANT_IPC_PARSE_VERSION | 464 | #define __ARCH_WANT_IPC_PARSE_VERSION |
461 | #define __ARCH_WANT_OLD_READDIR | 465 | #define __ARCH_WANT_OLD_READDIR |
diff --git a/arch/alpha/kernel/systbls.S b/arch/alpha/kernel/systbls.S index a6a1de9db16f..15f999d41c75 100644 --- a/arch/alpha/kernel/systbls.S +++ b/arch/alpha/kernel/systbls.S | |||
@@ -498,23 +498,27 @@ sys_call_table: | |||
498 | .quad sys_ni_syscall /* sys_timerfd */ | 498 | .quad sys_ni_syscall /* sys_timerfd */ |
499 | .quad sys_eventfd | 499 | .quad sys_eventfd |
500 | .quad sys_recvmmsg | 500 | .quad sys_recvmmsg |
501 | .quad sys_fallocate /* 480 */ | 501 | .quad sys_fallocate /* 480 */ |
502 | .quad sys_timerfd_create | 502 | .quad sys_timerfd_create |
503 | .quad sys_timerfd_settime | 503 | .quad sys_timerfd_settime |
504 | .quad sys_timerfd_gettime | 504 | .quad sys_timerfd_gettime |
505 | .quad sys_signalfd4 | 505 | .quad sys_signalfd4 |
506 | .quad sys_eventfd2 /* 485 */ | 506 | .quad sys_eventfd2 /* 485 */ |
507 | .quad sys_epoll_create1 | 507 | .quad sys_epoll_create1 |
508 | .quad sys_dup3 | 508 | .quad sys_dup3 |
509 | .quad sys_pipe2 | 509 | .quad sys_pipe2 |
510 | .quad sys_inotify_init1 | 510 | .quad sys_inotify_init1 |
511 | .quad sys_preadv /* 490 */ | 511 | .quad sys_preadv /* 490 */ |
512 | .quad sys_pwritev | 512 | .quad sys_pwritev |
513 | .quad sys_rt_tgsigqueueinfo | 513 | .quad sys_rt_tgsigqueueinfo |
514 | .quad sys_perf_event_open | 514 | .quad sys_perf_event_open |
515 | .quad sys_fanotify_init | 515 | .quad sys_fanotify_init |
516 | .quad sys_fanotify_mark /* 495 */ | 516 | .quad sys_fanotify_mark /* 495 */ |
517 | .quad sys_prlimit64 | 517 | .quad sys_prlimit64 |
518 | .quad sys_name_to_handle_at | ||
519 | .quad sys_open_by_handle_at | ||
520 | .quad sys_clock_adjtime | ||
521 | .quad sys_syncfs /* 500 */ | ||
518 | 522 | ||
519 | .size sys_call_table, . - sys_call_table | 523 | .size sys_call_table, . - sys_call_table |
520 | .type sys_call_table, @object | 524 | .type sys_call_table, @object |
diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c index 918e8e0b72ff..818e74ed45dc 100644 --- a/arch/alpha/kernel/time.c +++ b/arch/alpha/kernel/time.c | |||
@@ -375,8 +375,7 @@ static struct clocksource clocksource_rpcc = { | |||
375 | 375 | ||
376 | static inline void register_rpcc_clocksource(long cycle_freq) | 376 | static inline void register_rpcc_clocksource(long cycle_freq) |
377 | { | 377 | { |
378 | clocksource_calc_mult_shift(&clocksource_rpcc, cycle_freq, 4); | 378 | clocksource_register_hz(&clocksource_rpcc, cycle_freq); |
379 | clocksource_register(&clocksource_rpcc); | ||
380 | } | 379 | } |
381 | #else /* !CONFIG_SMP */ | 380 | #else /* !CONFIG_SMP */ |
382 | static inline void register_rpcc_clocksource(long cycle_freq) | 381 | static inline void register_rpcc_clocksource(long cycle_freq) |
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile index 8ebbb511c783..0c6852d93506 100644 --- a/arch/arm/boot/compressed/Makefile +++ b/arch/arm/boot/compressed/Makefile | |||
@@ -74,7 +74,7 @@ ZTEXTADDR := $(CONFIG_ZBOOT_ROM_TEXT) | |||
74 | ZBSSADDR := $(CONFIG_ZBOOT_ROM_BSS) | 74 | ZBSSADDR := $(CONFIG_ZBOOT_ROM_BSS) |
75 | else | 75 | else |
76 | ZTEXTADDR := 0 | 76 | ZTEXTADDR := 0 |
77 | ZBSSADDR := ALIGN(4) | 77 | ZBSSADDR := ALIGN(8) |
78 | endif | 78 | endif |
79 | 79 | ||
80 | SEDFLAGS = s/TEXT_START/$(ZTEXTADDR)/;s/BSS_START/$(ZBSSADDR)/ | 80 | SEDFLAGS = s/TEXT_START/$(ZTEXTADDR)/;s/BSS_START/$(ZBSSADDR)/ |
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S index adf583cd0c35..49f5b2eaaa87 100644 --- a/arch/arm/boot/compressed/head.S +++ b/arch/arm/boot/compressed/head.S | |||
@@ -179,15 +179,14 @@ not_angel: | |||
179 | bl cache_on | 179 | bl cache_on |
180 | 180 | ||
181 | restart: adr r0, LC0 | 181 | restart: adr r0, LC0 |
182 | ldmia r0, {r1, r2, r3, r5, r6, r9, r11, r12} | 182 | ldmia r0, {r1, r2, r3, r6, r9, r11, r12} |
183 | ldr sp, [r0, #32] | 183 | ldr sp, [r0, #28] |
184 | 184 | ||
185 | /* | 185 | /* |
186 | * We might be running at a different address. We need | 186 | * We might be running at a different address. We need |
187 | * to fix up various pointers. | 187 | * to fix up various pointers. |
188 | */ | 188 | */ |
189 | sub r0, r0, r1 @ calculate the delta offset | 189 | sub r0, r0, r1 @ calculate the delta offset |
190 | add r5, r5, r0 @ _start | ||
191 | add r6, r6, r0 @ _edata | 190 | add r6, r6, r0 @ _edata |
192 | 191 | ||
193 | #ifndef CONFIG_ZBOOT_ROM | 192 | #ifndef CONFIG_ZBOOT_ROM |
@@ -206,31 +205,40 @@ restart: adr r0, LC0 | |||
206 | /* | 205 | /* |
207 | * Check to see if we will overwrite ourselves. | 206 | * Check to see if we will overwrite ourselves. |
208 | * r4 = final kernel address | 207 | * r4 = final kernel address |
209 | * r5 = start of this image | ||
210 | * r9 = size of decompressed image | 208 | * r9 = size of decompressed image |
211 | * r10 = end of this image, including bss/stack/malloc space if non XIP | 209 | * r10 = end of this image, including bss/stack/malloc space if non XIP |
212 | * We basically want: | 210 | * We basically want: |
213 | * r4 >= r10 -> OK | 211 | * r4 - 16k page directory >= r10 -> OK |
214 | * r4 + image length <= r5 -> OK | 212 | * r4 + image length <= current position (pc) -> OK |
215 | */ | 213 | */ |
214 | add r10, r10, #16384 | ||
216 | cmp r4, r10 | 215 | cmp r4, r10 |
217 | bhs wont_overwrite | 216 | bhs wont_overwrite |
218 | add r10, r4, r9 | 217 | add r10, r4, r9 |
219 | cmp r10, r5 | 218 | ARM( cmp r10, pc ) |
219 | THUMB( mov lr, pc ) | ||
220 | THUMB( cmp r10, lr ) | ||
220 | bls wont_overwrite | 221 | bls wont_overwrite |
221 | 222 | ||
222 | /* | 223 | /* |
223 | * Relocate ourselves past the end of the decompressed kernel. | 224 | * Relocate ourselves past the end of the decompressed kernel. |
224 | * r5 = start of this image | ||
225 | * r6 = _edata | 225 | * r6 = _edata |
226 | * r10 = end of the decompressed kernel | 226 | * r10 = end of the decompressed kernel |
227 | * Because we always copy ahead, we need to do it from the end and go | 227 | * Because we always copy ahead, we need to do it from the end and go |
228 | * backward in case the source and destination overlap. | 228 | * backward in case the source and destination overlap. |
229 | */ | 229 | */ |
230 | /* Round up to next 256-byte boundary. */ | 230 | /* |
231 | add r10, r10, #256 | 231 | * Bump to the next 256-byte boundary with the size of |
232 | * the relocation code added. This avoids overwriting | ||
233 | * ourself when the offset is small. | ||
234 | */ | ||
235 | add r10, r10, #((reloc_code_end - restart + 256) & ~255) | ||
232 | bic r10, r10, #255 | 236 | bic r10, r10, #255 |
233 | 237 | ||
238 | /* Get start of code we want to copy and align it down. */ | ||
239 | adr r5, restart | ||
240 | bic r5, r5, #31 | ||
241 | |||
234 | sub r9, r6, r5 @ size to copy | 242 | sub r9, r6, r5 @ size to copy |
235 | add r9, r9, #31 @ rounded up to a multiple | 243 | add r9, r9, #31 @ rounded up to a multiple |
236 | bic r9, r9, #31 @ ... of 32 bytes | 244 | bic r9, r9, #31 @ ... of 32 bytes |
@@ -245,6 +253,11 @@ restart: adr r0, LC0 | |||
245 | /* Preserve offset to relocated code. */ | 253 | /* Preserve offset to relocated code. */ |
246 | sub r6, r9, r6 | 254 | sub r6, r9, r6 |
247 | 255 | ||
256 | #ifndef CONFIG_ZBOOT_ROM | ||
257 | /* cache_clean_flush may use the stack, so relocate it */ | ||
258 | add sp, sp, r6 | ||
259 | #endif | ||
260 | |||
248 | bl cache_clean_flush | 261 | bl cache_clean_flush |
249 | 262 | ||
250 | adr r0, BSYM(restart) | 263 | adr r0, BSYM(restart) |
@@ -333,7 +346,6 @@ not_relocated: mov r0, #0 | |||
333 | LC0: .word LC0 @ r1 | 346 | LC0: .word LC0 @ r1 |
334 | .word __bss_start @ r2 | 347 | .word __bss_start @ r2 |
335 | .word _end @ r3 | 348 | .word _end @ r3 |
336 | .word _start @ r5 | ||
337 | .word _edata @ r6 | 349 | .word _edata @ r6 |
338 | .word _image_size @ r9 | 350 | .word _image_size @ r9 |
339 | .word _got_start @ r11 | 351 | .word _got_start @ r11 |
@@ -1062,6 +1074,7 @@ memdump: mov r12, r0 | |||
1062 | #endif | 1074 | #endif |
1063 | 1075 | ||
1064 | .ltorg | 1076 | .ltorg |
1077 | reloc_code_end: | ||
1065 | 1078 | ||
1066 | .align | 1079 | .align |
1067 | .section ".stack", "aw", %nobits | 1080 | .section ".stack", "aw", %nobits |
diff --git a/arch/arm/boot/compressed/vmlinux.lds.in b/arch/arm/boot/compressed/vmlinux.lds.in index 5309909d7282..ea80abe78844 100644 --- a/arch/arm/boot/compressed/vmlinux.lds.in +++ b/arch/arm/boot/compressed/vmlinux.lds.in | |||
@@ -54,6 +54,7 @@ SECTIONS | |||
54 | .bss : { *(.bss) } | 54 | .bss : { *(.bss) } |
55 | _end = .; | 55 | _end = .; |
56 | 56 | ||
57 | . = ALIGN(8); /* the stack must be 64-bit aligned */ | ||
57 | .stack : { *(.stack) } | 58 | .stack : { *(.stack) } |
58 | 59 | ||
59 | .stab 0 : { *(.stab) } | 60 | .stab 0 : { *(.stab) } |
diff --git a/arch/arm/common/vic.c b/arch/arm/common/vic.c index 113085a77123..7aa4262ada7a 100644 --- a/arch/arm/common/vic.c +++ b/arch/arm/common/vic.c | |||
@@ -22,17 +22,16 @@ | |||
22 | #include <linux/init.h> | 22 | #include <linux/init.h> |
23 | #include <linux/list.h> | 23 | #include <linux/list.h> |
24 | #include <linux/io.h> | 24 | #include <linux/io.h> |
25 | #include <linux/sysdev.h> | 25 | #include <linux/syscore_ops.h> |
26 | #include <linux/device.h> | 26 | #include <linux/device.h> |
27 | #include <linux/amba/bus.h> | 27 | #include <linux/amba/bus.h> |
28 | 28 | ||
29 | #include <asm/mach/irq.h> | 29 | #include <asm/mach/irq.h> |
30 | #include <asm/hardware/vic.h> | 30 | #include <asm/hardware/vic.h> |
31 | 31 | ||
32 | #if defined(CONFIG_PM) | 32 | #ifdef CONFIG_PM |
33 | /** | 33 | /** |
34 | * struct vic_device - VIC PM device | 34 | * struct vic_device - VIC PM device |
35 | * @sysdev: The system device which is registered. | ||
36 | * @irq: The IRQ number for the base of the VIC. | 35 | * @irq: The IRQ number for the base of the VIC. |
37 | * @base: The register base for the VIC. | 36 | * @base: The register base for the VIC. |
38 | * @resume_sources: A bitmask of interrupts for resume. | 37 | * @resume_sources: A bitmask of interrupts for resume. |
@@ -43,8 +42,6 @@ | |||
43 | * @protect: Save for VIC_PROTECT. | 42 | * @protect: Save for VIC_PROTECT. |
44 | */ | 43 | */ |
45 | struct vic_device { | 44 | struct vic_device { |
46 | struct sys_device sysdev; | ||
47 | |||
48 | void __iomem *base; | 45 | void __iomem *base; |
49 | int irq; | 46 | int irq; |
50 | u32 resume_sources; | 47 | u32 resume_sources; |
@@ -59,11 +56,6 @@ struct vic_device { | |||
59 | static struct vic_device vic_devices[CONFIG_ARM_VIC_NR]; | 56 | static struct vic_device vic_devices[CONFIG_ARM_VIC_NR]; |
60 | 57 | ||
61 | static int vic_id; | 58 | static int vic_id; |
62 | |||
63 | static inline struct vic_device *to_vic(struct sys_device *sys) | ||
64 | { | ||
65 | return container_of(sys, struct vic_device, sysdev); | ||
66 | } | ||
67 | #endif /* CONFIG_PM */ | 59 | #endif /* CONFIG_PM */ |
68 | 60 | ||
69 | /** | 61 | /** |
@@ -85,10 +77,9 @@ static void vic_init2(void __iomem *base) | |||
85 | writel(32, base + VIC_PL190_DEF_VECT_ADDR); | 77 | writel(32, base + VIC_PL190_DEF_VECT_ADDR); |
86 | } | 78 | } |
87 | 79 | ||
88 | #if defined(CONFIG_PM) | 80 | #ifdef CONFIG_PM |
89 | static int vic_class_resume(struct sys_device *dev) | 81 | static void resume_one_vic(struct vic_device *vic) |
90 | { | 82 | { |
91 | struct vic_device *vic = to_vic(dev); | ||
92 | void __iomem *base = vic->base; | 83 | void __iomem *base = vic->base; |
93 | 84 | ||
94 | printk(KERN_DEBUG "%s: resuming vic at %p\n", __func__, base); | 85 | printk(KERN_DEBUG "%s: resuming vic at %p\n", __func__, base); |
@@ -107,13 +98,18 @@ static int vic_class_resume(struct sys_device *dev) | |||
107 | 98 | ||
108 | writel(vic->soft_int, base + VIC_INT_SOFT); | 99 | writel(vic->soft_int, base + VIC_INT_SOFT); |
109 | writel(~vic->soft_int, base + VIC_INT_SOFT_CLEAR); | 100 | writel(~vic->soft_int, base + VIC_INT_SOFT_CLEAR); |
101 | } | ||
110 | 102 | ||
111 | return 0; | 103 | static void vic_resume(void) |
104 | { | ||
105 | int id; | ||
106 | |||
107 | for (id = vic_id - 1; id >= 0; id--) | ||
108 | resume_one_vic(vic_devices + id); | ||
112 | } | 109 | } |
113 | 110 | ||
114 | static int vic_class_suspend(struct sys_device *dev, pm_message_t state) | 111 | static void suspend_one_vic(struct vic_device *vic) |
115 | { | 112 | { |
116 | struct vic_device *vic = to_vic(dev); | ||
117 | void __iomem *base = vic->base; | 113 | void __iomem *base = vic->base; |
118 | 114 | ||
119 | printk(KERN_DEBUG "%s: suspending vic at %p\n", __func__, base); | 115 | printk(KERN_DEBUG "%s: suspending vic at %p\n", __func__, base); |
@@ -128,14 +124,21 @@ static int vic_class_suspend(struct sys_device *dev, pm_message_t state) | |||
128 | 124 | ||
129 | writel(vic->resume_irqs, base + VIC_INT_ENABLE); | 125 | writel(vic->resume_irqs, base + VIC_INT_ENABLE); |
130 | writel(~vic->resume_irqs, base + VIC_INT_ENABLE_CLEAR); | 126 | writel(~vic->resume_irqs, base + VIC_INT_ENABLE_CLEAR); |
127 | } | ||
128 | |||
129 | static int vic_suspend(void) | ||
130 | { | ||
131 | int id; | ||
132 | |||
133 | for (id = 0; id < vic_id; id++) | ||
134 | suspend_one_vic(vic_devices + id); | ||
131 | 135 | ||
132 | return 0; | 136 | return 0; |
133 | } | 137 | } |
134 | 138 | ||
135 | struct sysdev_class vic_class = { | 139 | struct syscore_ops vic_syscore_ops = { |
136 | .name = "vic", | 140 | .suspend = vic_suspend, |
137 | .suspend = vic_class_suspend, | 141 | .resume = vic_resume, |
138 | .resume = vic_class_resume, | ||
139 | }; | 142 | }; |
140 | 143 | ||
141 | /** | 144 | /** |
@@ -147,30 +150,8 @@ struct sysdev_class vic_class = { | |||
147 | */ | 150 | */ |
148 | static int __init vic_pm_init(void) | 151 | static int __init vic_pm_init(void) |
149 | { | 152 | { |
150 | struct vic_device *dev = vic_devices; | 153 | if (vic_id > 0) |
151 | int err; | 154 | register_syscore_ops(&vic_syscore_ops); |
152 | int id; | ||
153 | |||
154 | if (vic_id == 0) | ||
155 | return 0; | ||
156 | |||
157 | err = sysdev_class_register(&vic_class); | ||
158 | if (err) { | ||
159 | printk(KERN_ERR "%s: cannot register class\n", __func__); | ||
160 | return err; | ||
161 | } | ||
162 | |||
163 | for (id = 0; id < vic_id; id++, dev++) { | ||
164 | dev->sysdev.id = id; | ||
165 | dev->sysdev.cls = &vic_class; | ||
166 | |||
167 | err = sysdev_register(&dev->sysdev); | ||
168 | if (err) { | ||
169 | printk(KERN_ERR "%s: failed to register device\n", | ||
170 | __func__); | ||
171 | return err; | ||
172 | } | ||
173 | } | ||
174 | 155 | ||
175 | return 0; | 156 | return 0; |
176 | } | 157 | } |
diff --git a/arch/arm/include/asm/mach/time.h b/arch/arm/include/asm/mach/time.h index 883f6be5117a..d5adaae5ee2c 100644 --- a/arch/arm/include/asm/mach/time.h +++ b/arch/arm/include/asm/mach/time.h | |||
@@ -34,7 +34,6 @@ | |||
34 | * timer interrupt which may be pending. | 34 | * timer interrupt which may be pending. |
35 | */ | 35 | */ |
36 | struct sys_timer { | 36 | struct sys_timer { |
37 | struct sys_device dev; | ||
38 | void (*init)(void); | 37 | void (*init)(void); |
39 | void (*suspend)(void); | 38 | void (*suspend)(void); |
40 | void (*resume)(void); | 39 | void (*resume)(void); |
diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h index 885be097769d..832888d0c20c 100644 --- a/arch/arm/include/asm/system.h +++ b/arch/arm/include/asm/system.h | |||
@@ -159,7 +159,7 @@ extern unsigned int user_debug; | |||
159 | #include <mach/barriers.h> | 159 | #include <mach/barriers.h> |
160 | #elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP) | 160 | #elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP) |
161 | #define mb() do { dsb(); outer_sync(); } while (0) | 161 | #define mb() do { dsb(); outer_sync(); } while (0) |
162 | #define rmb() dmb() | 162 | #define rmb() dsb() |
163 | #define wmb() mb() | 163 | #define wmb() mb() |
164 | #else | 164 | #else |
165 | #include <asm/memory.h> | 165 | #include <asm/memory.h> |
diff --git a/arch/arm/kernel/leds.c b/arch/arm/kernel/leds.c index 31a316c1777b..0f107dcb0347 100644 --- a/arch/arm/kernel/leds.c +++ b/arch/arm/kernel/leds.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
11 | #include <linux/init.h> | 11 | #include <linux/init.h> |
12 | #include <linux/sysdev.h> | 12 | #include <linux/sysdev.h> |
13 | #include <linux/syscore_ops.h> | ||
13 | 14 | ||
14 | #include <asm/leds.h> | 15 | #include <asm/leds.h> |
15 | 16 | ||
@@ -69,36 +70,37 @@ static ssize_t leds_store(struct sys_device *dev, | |||
69 | 70 | ||
70 | static SYSDEV_ATTR(event, 0200, NULL, leds_store); | 71 | static SYSDEV_ATTR(event, 0200, NULL, leds_store); |
71 | 72 | ||
72 | static int leds_suspend(struct sys_device *dev, pm_message_t state) | 73 | static struct sysdev_class leds_sysclass = { |
74 | .name = "leds", | ||
75 | }; | ||
76 | |||
77 | static struct sys_device leds_device = { | ||
78 | .id = 0, | ||
79 | .cls = &leds_sysclass, | ||
80 | }; | ||
81 | |||
82 | static int leds_suspend(void) | ||
73 | { | 83 | { |
74 | leds_event(led_stop); | 84 | leds_event(led_stop); |
75 | return 0; | 85 | return 0; |
76 | } | 86 | } |
77 | 87 | ||
78 | static int leds_resume(struct sys_device *dev) | 88 | static void leds_resume(void) |
79 | { | 89 | { |
80 | leds_event(led_start); | 90 | leds_event(led_start); |
81 | return 0; | ||
82 | } | 91 | } |
83 | 92 | ||
84 | static int leds_shutdown(struct sys_device *dev) | 93 | static void leds_shutdown(void) |
85 | { | 94 | { |
86 | leds_event(led_halted); | 95 | leds_event(led_halted); |
87 | return 0; | ||
88 | } | 96 | } |
89 | 97 | ||
90 | static struct sysdev_class leds_sysclass = { | 98 | static struct syscore_ops leds_syscore_ops = { |
91 | .name = "leds", | ||
92 | .shutdown = leds_shutdown, | 99 | .shutdown = leds_shutdown, |
93 | .suspend = leds_suspend, | 100 | .suspend = leds_suspend, |
94 | .resume = leds_resume, | 101 | .resume = leds_resume, |
95 | }; | 102 | }; |
96 | 103 | ||
97 | static struct sys_device leds_device = { | ||
98 | .id = 0, | ||
99 | .cls = &leds_sysclass, | ||
100 | }; | ||
101 | |||
102 | static int __init leds_init(void) | 104 | static int __init leds_init(void) |
103 | { | 105 | { |
104 | int ret; | 106 | int ret; |
@@ -107,6 +109,8 @@ static int __init leds_init(void) | |||
107 | ret = sysdev_register(&leds_device); | 109 | ret = sysdev_register(&leds_device); |
108 | if (ret == 0) | 110 | if (ret == 0) |
109 | ret = sysdev_create_file(&leds_device, &attr_event); | 111 | ret = sysdev_create_file(&leds_device, &attr_event); |
112 | if (ret == 0) | ||
113 | register_syscore_ops(&leds_syscore_ops); | ||
110 | return ret; | 114 | return ret; |
111 | } | 115 | } |
112 | 116 | ||
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index cb8398317644..0340224cf73c 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c | |||
@@ -597,19 +597,13 @@ setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info, | |||
597 | return err; | 597 | return err; |
598 | } | 598 | } |
599 | 599 | ||
600 | static inline void setup_syscall_restart(struct pt_regs *regs) | ||
601 | { | ||
602 | regs->ARM_r0 = regs->ARM_ORIG_r0; | ||
603 | regs->ARM_pc -= thumb_mode(regs) ? 2 : 4; | ||
604 | } | ||
605 | |||
606 | /* | 600 | /* |
607 | * OK, we're invoking a handler | 601 | * OK, we're invoking a handler |
608 | */ | 602 | */ |
609 | static int | 603 | static int |
610 | handle_signal(unsigned long sig, struct k_sigaction *ka, | 604 | handle_signal(unsigned long sig, struct k_sigaction *ka, |
611 | siginfo_t *info, sigset_t *oldset, | 605 | siginfo_t *info, sigset_t *oldset, |
612 | struct pt_regs * regs, int syscall) | 606 | struct pt_regs * regs) |
613 | { | 607 | { |
614 | struct thread_info *thread = current_thread_info(); | 608 | struct thread_info *thread = current_thread_info(); |
615 | struct task_struct *tsk = current; | 609 | struct task_struct *tsk = current; |
@@ -617,26 +611,6 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, | |||
617 | int ret; | 611 | int ret; |
618 | 612 | ||
619 | /* | 613 | /* |
620 | * If we were from a system call, check for system call restarting... | ||
621 | */ | ||
622 | if (syscall) { | ||
623 | switch (regs->ARM_r0) { | ||
624 | case -ERESTART_RESTARTBLOCK: | ||
625 | case -ERESTARTNOHAND: | ||
626 | regs->ARM_r0 = -EINTR; | ||
627 | break; | ||
628 | case -ERESTARTSYS: | ||
629 | if (!(ka->sa.sa_flags & SA_RESTART)) { | ||
630 | regs->ARM_r0 = -EINTR; | ||
631 | break; | ||
632 | } | ||
633 | /* fallthrough */ | ||
634 | case -ERESTARTNOINTR: | ||
635 | setup_syscall_restart(regs); | ||
636 | } | ||
637 | } | ||
638 | |||
639 | /* | ||
640 | * translate the signal | 614 | * translate the signal |
641 | */ | 615 | */ |
642 | if (usig < 32 && thread->exec_domain && thread->exec_domain->signal_invmap) | 616 | if (usig < 32 && thread->exec_domain && thread->exec_domain->signal_invmap) |
@@ -685,6 +659,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, | |||
685 | */ | 659 | */ |
686 | static void do_signal(struct pt_regs *regs, int syscall) | 660 | static void do_signal(struct pt_regs *regs, int syscall) |
687 | { | 661 | { |
662 | unsigned int retval = 0, continue_addr = 0, restart_addr = 0; | ||
688 | struct k_sigaction ka; | 663 | struct k_sigaction ka; |
689 | siginfo_t info; | 664 | siginfo_t info; |
690 | int signr; | 665 | int signr; |
@@ -698,18 +673,61 @@ static void do_signal(struct pt_regs *regs, int syscall) | |||
698 | if (!user_mode(regs)) | 673 | if (!user_mode(regs)) |
699 | return; | 674 | return; |
700 | 675 | ||
676 | /* | ||
677 | * If we were from a system call, check for system call restarting... | ||
678 | */ | ||
679 | if (syscall) { | ||
680 | continue_addr = regs->ARM_pc; | ||
681 | restart_addr = continue_addr - (thumb_mode(regs) ? 2 : 4); | ||
682 | retval = regs->ARM_r0; | ||
683 | |||
684 | /* | ||
685 | * Prepare for system call restart. We do this here so that a | ||
686 | * debugger will see the already changed PSW. | ||
687 | */ | ||
688 | switch (retval) { | ||
689 | case -ERESTARTNOHAND: | ||
690 | case -ERESTARTSYS: | ||
691 | case -ERESTARTNOINTR: | ||
692 | regs->ARM_r0 = regs->ARM_ORIG_r0; | ||
693 | regs->ARM_pc = restart_addr; | ||
694 | break; | ||
695 | case -ERESTART_RESTARTBLOCK: | ||
696 | regs->ARM_r0 = -EINTR; | ||
697 | break; | ||
698 | } | ||
699 | } | ||
700 | |||
701 | if (try_to_freeze()) | 701 | if (try_to_freeze()) |
702 | goto no_signal; | 702 | goto no_signal; |
703 | 703 | ||
704 | /* | ||
705 | * Get the signal to deliver. When running under ptrace, at this | ||
706 | * point the debugger may change all our registers ... | ||
707 | */ | ||
704 | signr = get_signal_to_deliver(&info, &ka, regs, NULL); | 708 | signr = get_signal_to_deliver(&info, &ka, regs, NULL); |
705 | if (signr > 0) { | 709 | if (signr > 0) { |
706 | sigset_t *oldset; | 710 | sigset_t *oldset; |
707 | 711 | ||
712 | /* | ||
713 | * Depending on the signal settings we may need to revert the | ||
714 | * decision to restart the system call. But skip this if a | ||
715 | * debugger has chosen to restart at a different PC. | ||
716 | */ | ||
717 | if (regs->ARM_pc == restart_addr) { | ||
718 | if (retval == -ERESTARTNOHAND | ||
719 | || (retval == -ERESTARTSYS | ||
720 | && !(ka.sa.sa_flags & SA_RESTART))) { | ||
721 | regs->ARM_r0 = -EINTR; | ||
722 | regs->ARM_pc = continue_addr; | ||
723 | } | ||
724 | } | ||
725 | |||
708 | if (test_thread_flag(TIF_RESTORE_SIGMASK)) | 726 | if (test_thread_flag(TIF_RESTORE_SIGMASK)) |
709 | oldset = ¤t->saved_sigmask; | 727 | oldset = ¤t->saved_sigmask; |
710 | else | 728 | else |
711 | oldset = ¤t->blocked; | 729 | oldset = ¤t->blocked; |
712 | if (handle_signal(signr, &ka, &info, oldset, regs, syscall) == 0) { | 730 | if (handle_signal(signr, &ka, &info, oldset, regs) == 0) { |
713 | /* | 731 | /* |
714 | * A signal was successfully delivered; the saved | 732 | * A signal was successfully delivered; the saved |
715 | * sigmask will have been stored in the signal frame, | 733 | * sigmask will have been stored in the signal frame, |
@@ -723,11 +741,14 @@ static void do_signal(struct pt_regs *regs, int syscall) | |||
723 | } | 741 | } |
724 | 742 | ||
725 | no_signal: | 743 | no_signal: |
726 | /* | ||
727 | * No signal to deliver to the process - restart the syscall. | ||
728 | */ | ||
729 | if (syscall) { | 744 | if (syscall) { |
730 | if (regs->ARM_r0 == -ERESTART_RESTARTBLOCK) { | 745 | /* |
746 | * Handle restarting a different system call. As above, | ||
747 | * if a debugger has chosen to restart at a different PC, | ||
748 | * ignore the restart. | ||
749 | */ | ||
750 | if (retval == -ERESTART_RESTARTBLOCK | ||
751 | && regs->ARM_pc == continue_addr) { | ||
731 | if (thumb_mode(regs)) { | 752 | if (thumb_mode(regs)) { |
732 | regs->ARM_r7 = __NR_restart_syscall - __NR_SYSCALL_BASE; | 753 | regs->ARM_r7 = __NR_restart_syscall - __NR_SYSCALL_BASE; |
733 | regs->ARM_pc -= 2; | 754 | regs->ARM_pc -= 2; |
@@ -750,11 +771,6 @@ static void do_signal(struct pt_regs *regs, int syscall) | |||
750 | #endif | 771 | #endif |
751 | } | 772 | } |
752 | } | 773 | } |
753 | if (regs->ARM_r0 == -ERESTARTNOHAND || | ||
754 | regs->ARM_r0 == -ERESTARTSYS || | ||
755 | regs->ARM_r0 == -ERESTARTNOINTR) { | ||
756 | setup_syscall_restart(regs); | ||
757 | } | ||
758 | 774 | ||
759 | /* If there's no signal to deliver, we just put the saved sigmask | 775 | /* If there's no signal to deliver, we just put the saved sigmask |
760 | * back. | 776 | * back. |
diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c index 1ff46cabc7ef..cb634c3e28e9 100644 --- a/arch/arm/kernel/time.c +++ b/arch/arm/kernel/time.c | |||
@@ -21,7 +21,7 @@ | |||
21 | #include <linux/timex.h> | 21 | #include <linux/timex.h> |
22 | #include <linux/errno.h> | 22 | #include <linux/errno.h> |
23 | #include <linux/profile.h> | 23 | #include <linux/profile.h> |
24 | #include <linux/sysdev.h> | 24 | #include <linux/syscore_ops.h> |
25 | #include <linux/timer.h> | 25 | #include <linux/timer.h> |
26 | #include <linux/irq.h> | 26 | #include <linux/irq.h> |
27 | 27 | ||
@@ -115,48 +115,37 @@ void timer_tick(void) | |||
115 | #endif | 115 | #endif |
116 | 116 | ||
117 | #if defined(CONFIG_PM) && !defined(CONFIG_GENERIC_CLOCKEVENTS) | 117 | #if defined(CONFIG_PM) && !defined(CONFIG_GENERIC_CLOCKEVENTS) |
118 | static int timer_suspend(struct sys_device *dev, pm_message_t state) | 118 | static int timer_suspend(void) |
119 | { | 119 | { |
120 | struct sys_timer *timer = container_of(dev, struct sys_timer, dev); | 120 | if (system_timer->suspend) |
121 | 121 | system_timer->suspend(); | |
122 | if (timer->suspend != NULL) | ||
123 | timer->suspend(); | ||
124 | 122 | ||
125 | return 0; | 123 | return 0; |
126 | } | 124 | } |
127 | 125 | ||
128 | static int timer_resume(struct sys_device *dev) | 126 | static void timer_resume(void) |
129 | { | 127 | { |
130 | struct sys_timer *timer = container_of(dev, struct sys_timer, dev); | 128 | if (system_timer->resume) |
131 | 129 | system_timer->resume(); | |
132 | if (timer->resume != NULL) | ||
133 | timer->resume(); | ||
134 | |||
135 | return 0; | ||
136 | } | 130 | } |
137 | #else | 131 | #else |
138 | #define timer_suspend NULL | 132 | #define timer_suspend NULL |
139 | #define timer_resume NULL | 133 | #define timer_resume NULL |
140 | #endif | 134 | #endif |
141 | 135 | ||
142 | static struct sysdev_class timer_sysclass = { | 136 | static struct syscore_ops timer_syscore_ops = { |
143 | .name = "timer", | ||
144 | .suspend = timer_suspend, | 137 | .suspend = timer_suspend, |
145 | .resume = timer_resume, | 138 | .resume = timer_resume, |
146 | }; | 139 | }; |
147 | 140 | ||
148 | static int __init timer_init_sysfs(void) | 141 | static int __init timer_init_syscore_ops(void) |
149 | { | 142 | { |
150 | int ret = sysdev_class_register(&timer_sysclass); | 143 | register_syscore_ops(&timer_syscore_ops); |
151 | if (ret == 0) { | ||
152 | system_timer->dev.cls = &timer_sysclass; | ||
153 | ret = sysdev_register(&system_timer->dev); | ||
154 | } | ||
155 | 144 | ||
156 | return ret; | 145 | return 0; |
157 | } | 146 | } |
158 | 147 | ||
159 | device_initcall(timer_init_sysfs); | 148 | device_initcall(timer_init_syscore_ops); |
160 | 149 | ||
161 | void __init time_init(void) | 150 | void __init time_init(void) |
162 | { | 151 | { |
diff --git a/arch/arm/mach-davinci/cpufreq.c b/arch/arm/mach-davinci/cpufreq.c index 0a95be1512bb..41669ecc1f91 100644 --- a/arch/arm/mach-davinci/cpufreq.c +++ b/arch/arm/mach-davinci/cpufreq.c | |||
@@ -94,9 +94,7 @@ static int davinci_target(struct cpufreq_policy *policy, | |||
94 | if (freqs.old == freqs.new) | 94 | if (freqs.old == freqs.new) |
95 | return ret; | 95 | return ret; |
96 | 96 | ||
97 | cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, | 97 | dev_dbg(&cpufreq.dev, "transition: %u --> %u\n", freqs.old, freqs.new); |
98 | dev_driver_string(cpufreq.dev), | ||
99 | "transition: %u --> %u\n", freqs.old, freqs.new); | ||
100 | 98 | ||
101 | ret = cpufreq_frequency_table_target(policy, pdata->freq_table, | 99 | ret = cpufreq_frequency_table_target(policy, pdata->freq_table, |
102 | freqs.new, relation, &idx); | 100 | freqs.new, relation, &idx); |
diff --git a/arch/arm/mach-exynos4/pm.c b/arch/arm/mach-exynos4/pm.c index 10d917d9e3ad..8755ca8dd48d 100644 --- a/arch/arm/mach-exynos4/pm.c +++ b/arch/arm/mach-exynos4/pm.c | |||
@@ -16,6 +16,7 @@ | |||
16 | 16 | ||
17 | #include <linux/init.h> | 17 | #include <linux/init.h> |
18 | #include <linux/suspend.h> | 18 | #include <linux/suspend.h> |
19 | #include <linux/syscore_ops.h> | ||
19 | #include <linux/io.h> | 20 | #include <linux/io.h> |
20 | 21 | ||
21 | #include <asm/cacheflush.h> | 22 | #include <asm/cacheflush.h> |
@@ -372,7 +373,27 @@ void exynos4_scu_enable(void __iomem *scu_base) | |||
372 | flush_cache_all(); | 373 | flush_cache_all(); |
373 | } | 374 | } |
374 | 375 | ||
375 | static int exynos4_pm_resume(struct sys_device *dev) | 376 | static struct sysdev_driver exynos4_pm_driver = { |
377 | .add = exynos4_pm_add, | ||
378 | }; | ||
379 | |||
380 | static __init int exynos4_pm_drvinit(void) | ||
381 | { | ||
382 | unsigned int tmp; | ||
383 | |||
384 | s3c_pm_init(); | ||
385 | |||
386 | /* All wakeup disable */ | ||
387 | |||
388 | tmp = __raw_readl(S5P_WAKEUP_MASK); | ||
389 | tmp |= ((0xFF << 8) | (0x1F << 1)); | ||
390 | __raw_writel(tmp, S5P_WAKEUP_MASK); | ||
391 | |||
392 | return sysdev_driver_register(&exynos4_sysclass, &exynos4_pm_driver); | ||
393 | } | ||
394 | arch_initcall(exynos4_pm_drvinit); | ||
395 | |||
396 | static void exynos4_pm_resume(void) | ||
376 | { | 397 | { |
377 | /* For release retention */ | 398 | /* For release retention */ |
378 | 399 | ||
@@ -394,27 +415,15 @@ static int exynos4_pm_resume(struct sys_device *dev) | |||
394 | /* enable L2X0*/ | 415 | /* enable L2X0*/ |
395 | writel_relaxed(1, S5P_VA_L2CC + L2X0_CTRL); | 416 | writel_relaxed(1, S5P_VA_L2CC + L2X0_CTRL); |
396 | #endif | 417 | #endif |
397 | |||
398 | return 0; | ||
399 | } | 418 | } |
400 | 419 | ||
401 | static struct sysdev_driver exynos4_pm_driver = { | 420 | static struct syscore_ops exynos4_pm_syscore_ops = { |
402 | .add = exynos4_pm_add, | ||
403 | .resume = exynos4_pm_resume, | 421 | .resume = exynos4_pm_resume, |
404 | }; | 422 | }; |
405 | 423 | ||
406 | static __init int exynos4_pm_drvinit(void) | 424 | static __init int exynos4_pm_syscore_init(void) |
407 | { | 425 | { |
408 | unsigned int tmp; | 426 | register_syscore_ops(&exynos4_pm_syscore_ops); |
409 | 427 | return 0; | |
410 | s3c_pm_init(); | ||
411 | |||
412 | /* All wakeup disable */ | ||
413 | |||
414 | tmp = __raw_readl(S5P_WAKEUP_MASK); | ||
415 | tmp |= ((0xFF << 8) | (0x1F << 1)); | ||
416 | __raw_writel(tmp, S5P_WAKEUP_MASK); | ||
417 | |||
418 | return sysdev_driver_register(&exynos4_sysclass, &exynos4_pm_driver); | ||
419 | } | 428 | } |
420 | arch_initcall(exynos4_pm_drvinit); | 429 | arch_initcall(exynos4_pm_syscore_init); |
diff --git a/arch/arm/mach-integrator/integrator_ap.c b/arch/arm/mach-integrator/integrator_ap.c index 980803ff348c..d3e96451529c 100644 --- a/arch/arm/mach-integrator/integrator_ap.c +++ b/arch/arm/mach-integrator/integrator_ap.c | |||
@@ -24,7 +24,7 @@ | |||
24 | #include <linux/platform_device.h> | 24 | #include <linux/platform_device.h> |
25 | #include <linux/slab.h> | 25 | #include <linux/slab.h> |
26 | #include <linux/string.h> | 26 | #include <linux/string.h> |
27 | #include <linux/sysdev.h> | 27 | #include <linux/syscore_ops.h> |
28 | #include <linux/amba/bus.h> | 28 | #include <linux/amba/bus.h> |
29 | #include <linux/amba/kmi.h> | 29 | #include <linux/amba/kmi.h> |
30 | #include <linux/clocksource.h> | 30 | #include <linux/clocksource.h> |
@@ -180,13 +180,13 @@ static void __init ap_init_irq(void) | |||
180 | #ifdef CONFIG_PM | 180 | #ifdef CONFIG_PM |
181 | static unsigned long ic_irq_enable; | 181 | static unsigned long ic_irq_enable; |
182 | 182 | ||
183 | static int irq_suspend(struct sys_device *dev, pm_message_t state) | 183 | static int irq_suspend(void) |
184 | { | 184 | { |
185 | ic_irq_enable = readl(VA_IC_BASE + IRQ_ENABLE); | 185 | ic_irq_enable = readl(VA_IC_BASE + IRQ_ENABLE); |
186 | return 0; | 186 | return 0; |
187 | } | 187 | } |
188 | 188 | ||
189 | static int irq_resume(struct sys_device *dev) | 189 | static void irq_resume(void) |
190 | { | 190 | { |
191 | /* disable all irq sources */ | 191 | /* disable all irq sources */ |
192 | writel(-1, VA_CMIC_BASE + IRQ_ENABLE_CLEAR); | 192 | writel(-1, VA_CMIC_BASE + IRQ_ENABLE_CLEAR); |
@@ -194,33 +194,25 @@ static int irq_resume(struct sys_device *dev) | |||
194 | writel(-1, VA_IC_BASE + FIQ_ENABLE_CLEAR); | 194 | writel(-1, VA_IC_BASE + FIQ_ENABLE_CLEAR); |
195 | 195 | ||
196 | writel(ic_irq_enable, VA_IC_BASE + IRQ_ENABLE_SET); | 196 | writel(ic_irq_enable, VA_IC_BASE + IRQ_ENABLE_SET); |
197 | return 0; | ||
198 | } | 197 | } |
199 | #else | 198 | #else |
200 | #define irq_suspend NULL | 199 | #define irq_suspend NULL |
201 | #define irq_resume NULL | 200 | #define irq_resume NULL |
202 | #endif | 201 | #endif |
203 | 202 | ||
204 | static struct sysdev_class irq_class = { | 203 | static struct syscore_ops irq_syscore_ops = { |
205 | .name = "irq", | ||
206 | .suspend = irq_suspend, | 204 | .suspend = irq_suspend, |
207 | .resume = irq_resume, | 205 | .resume = irq_resume, |
208 | }; | 206 | }; |
209 | 207 | ||
210 | static struct sys_device irq_device = { | 208 | static int __init irq_syscore_init(void) |
211 | .id = 0, | ||
212 | .cls = &irq_class, | ||
213 | }; | ||
214 | |||
215 | static int __init irq_init_sysfs(void) | ||
216 | { | 209 | { |
217 | int ret = sysdev_class_register(&irq_class); | 210 | register_syscore_ops(&irq_syscore_ops); |
218 | if (ret == 0) | 211 | |
219 | ret = sysdev_register(&irq_device); | 212 | return 0; |
220 | return ret; | ||
221 | } | 213 | } |
222 | 214 | ||
223 | device_initcall(irq_init_sysfs); | 215 | device_initcall(irq_syscore_init); |
224 | 216 | ||
225 | /* | 217 | /* |
226 | * Flash handling. | 218 | * Flash handling. |
diff --git a/arch/arm/mach-omap1/pm_bus.c b/arch/arm/mach-omap1/pm_bus.c index 6588c22b8a64..fe31d933f0ed 100644 --- a/arch/arm/mach-omap1/pm_bus.c +++ b/arch/arm/mach-omap1/pm_bus.c | |||
@@ -24,75 +24,50 @@ | |||
24 | #ifdef CONFIG_PM_RUNTIME | 24 | #ifdef CONFIG_PM_RUNTIME |
25 | static int omap1_pm_runtime_suspend(struct device *dev) | 25 | static int omap1_pm_runtime_suspend(struct device *dev) |
26 | { | 26 | { |
27 | struct clk *iclk, *fclk; | 27 | int ret; |
28 | int ret = 0; | ||
29 | 28 | ||
30 | dev_dbg(dev, "%s\n", __func__); | 29 | dev_dbg(dev, "%s\n", __func__); |
31 | 30 | ||
32 | ret = pm_generic_runtime_suspend(dev); | 31 | ret = pm_generic_runtime_suspend(dev); |
32 | if (ret) | ||
33 | return ret; | ||
33 | 34 | ||
34 | fclk = clk_get(dev, "fck"); | 35 | ret = pm_runtime_clk_suspend(dev); |
35 | if (!IS_ERR(fclk)) { | 36 | if (ret) { |
36 | clk_disable(fclk); | 37 | pm_generic_runtime_resume(dev); |
37 | clk_put(fclk); | 38 | return ret; |
38 | } | ||
39 | |||
40 | iclk = clk_get(dev, "ick"); | ||
41 | if (!IS_ERR(iclk)) { | ||
42 | clk_disable(iclk); | ||
43 | clk_put(iclk); | ||
44 | } | 39 | } |
45 | 40 | ||
46 | return 0; | 41 | return 0; |
47 | }; | 42 | } |
48 | 43 | ||
49 | static int omap1_pm_runtime_resume(struct device *dev) | 44 | static int omap1_pm_runtime_resume(struct device *dev) |
50 | { | 45 | { |
51 | struct clk *iclk, *fclk; | ||
52 | |||
53 | dev_dbg(dev, "%s\n", __func__); | 46 | dev_dbg(dev, "%s\n", __func__); |
54 | 47 | ||
55 | iclk = clk_get(dev, "ick"); | 48 | pm_runtime_clk_resume(dev); |
56 | if (!IS_ERR(iclk)) { | 49 | return pm_generic_runtime_resume(dev); |
57 | clk_enable(iclk); | 50 | } |
58 | clk_put(iclk); | ||
59 | } | ||
60 | 51 | ||
61 | fclk = clk_get(dev, "fck"); | 52 | static struct dev_power_domain default_power_domain = { |
62 | if (!IS_ERR(fclk)) { | 53 | .ops = { |
63 | clk_enable(fclk); | 54 | .runtime_suspend = omap1_pm_runtime_suspend, |
64 | clk_put(fclk); | 55 | .runtime_resume = omap1_pm_runtime_resume, |
65 | } | 56 | USE_PLATFORM_PM_SLEEP_OPS |
57 | }, | ||
58 | }; | ||
66 | 59 | ||
67 | return pm_generic_runtime_resume(dev); | 60 | static struct pm_clk_notifier_block platform_bus_notifier = { |
61 | .pwr_domain = &default_power_domain, | ||
62 | .con_ids = { "ick", "fck", NULL, }, | ||
68 | }; | 63 | }; |
69 | 64 | ||
70 | static int __init omap1_pm_runtime_init(void) | 65 | static int __init omap1_pm_runtime_init(void) |
71 | { | 66 | { |
72 | const struct dev_pm_ops *pm; | ||
73 | struct dev_pm_ops *omap_pm; | ||
74 | |||
75 | if (!cpu_class_is_omap1()) | 67 | if (!cpu_class_is_omap1()) |
76 | return -ENODEV; | 68 | return -ENODEV; |
77 | 69 | ||
78 | pm = platform_bus_get_pm_ops(); | 70 | pm_runtime_clk_add_notifier(&platform_bus_type, &platform_bus_notifier); |
79 | if (!pm) { | ||
80 | pr_err("%s: unable to get dev_pm_ops from platform_bus\n", | ||
81 | __func__); | ||
82 | return -ENODEV; | ||
83 | } | ||
84 | |||
85 | omap_pm = kmemdup(pm, sizeof(struct dev_pm_ops), GFP_KERNEL); | ||
86 | if (!omap_pm) { | ||
87 | pr_err("%s: unable to alloc memory for new dev_pm_ops\n", | ||
88 | __func__); | ||
89 | return -ENOMEM; | ||
90 | } | ||
91 | |||
92 | omap_pm->runtime_suspend = omap1_pm_runtime_suspend; | ||
93 | omap_pm->runtime_resume = omap1_pm_runtime_resume; | ||
94 | |||
95 | platform_bus_set_pm_ops(omap_pm); | ||
96 | 71 | ||
97 | return 0; | 72 | return 0; |
98 | } | 73 | } |
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile index 512b15204450..66dfbccacd25 100644 --- a/arch/arm/mach-omap2/Makefile +++ b/arch/arm/mach-omap2/Makefile | |||
@@ -59,10 +59,10 @@ endif | |||
59 | # Power Management | 59 | # Power Management |
60 | ifeq ($(CONFIG_PM),y) | 60 | ifeq ($(CONFIG_PM),y) |
61 | obj-$(CONFIG_ARCH_OMAP2) += pm24xx.o | 61 | obj-$(CONFIG_ARCH_OMAP2) += pm24xx.o |
62 | obj-$(CONFIG_ARCH_OMAP2) += sleep24xx.o pm_bus.o | 62 | obj-$(CONFIG_ARCH_OMAP2) += sleep24xx.o |
63 | obj-$(CONFIG_ARCH_OMAP3) += pm34xx.o sleep34xx.o \ | 63 | obj-$(CONFIG_ARCH_OMAP3) += pm34xx.o sleep34xx.o \ |
64 | cpuidle34xx.o pm_bus.o | 64 | cpuidle34xx.o |
65 | obj-$(CONFIG_ARCH_OMAP4) += pm44xx.o pm_bus.o | 65 | obj-$(CONFIG_ARCH_OMAP4) += pm44xx.o |
66 | obj-$(CONFIG_PM_DEBUG) += pm-debug.o | 66 | obj-$(CONFIG_PM_DEBUG) += pm-debug.o |
67 | obj-$(CONFIG_OMAP_SMARTREFLEX) += sr_device.o smartreflex.o | 67 | obj-$(CONFIG_OMAP_SMARTREFLEX) += sr_device.o smartreflex.o |
68 | obj-$(CONFIG_OMAP_SMARTREFLEX_CLASS3) += smartreflex-class3.o | 68 | obj-$(CONFIG_OMAP_SMARTREFLEX_CLASS3) += smartreflex-class3.o |
diff --git a/arch/arm/mach-omap2/clkt34xx_dpll3m2.c b/arch/arm/mach-omap2/clkt34xx_dpll3m2.c index b2b1e37bb6bb..d6e34dd9e7e7 100644 --- a/arch/arm/mach-omap2/clkt34xx_dpll3m2.c +++ b/arch/arm/mach-omap2/clkt34xx_dpll3m2.c | |||
@@ -115,6 +115,7 @@ int omap3_core_dpll_m2_set_rate(struct clk *clk, unsigned long rate) | |||
115 | sdrc_cs0->rfr_ctrl, sdrc_cs0->actim_ctrla, | 115 | sdrc_cs0->rfr_ctrl, sdrc_cs0->actim_ctrla, |
116 | sdrc_cs0->actim_ctrlb, sdrc_cs0->mr, | 116 | sdrc_cs0->actim_ctrlb, sdrc_cs0->mr, |
117 | 0, 0, 0, 0); | 117 | 0, 0, 0, 0); |
118 | clk->rate = rate; | ||
118 | 119 | ||
119 | return 0; | 120 | return 0; |
120 | } | 121 | } |
diff --git a/arch/arm/mach-omap2/pm_bus.c b/arch/arm/mach-omap2/pm_bus.c deleted file mode 100644 index 5acd2ab298b1..000000000000 --- a/arch/arm/mach-omap2/pm_bus.c +++ /dev/null | |||
@@ -1,85 +0,0 @@ | |||
1 | /* | ||
2 | * Runtime PM support code for OMAP | ||
3 | * | ||
4 | * Author: Kevin Hilman, Deep Root Systems, LLC | ||
5 | * | ||
6 | * Copyright (C) 2010 Texas Instruments, Inc. | ||
7 | * | ||
8 | * This file is licensed under the terms of the GNU General Public | ||
9 | * License version 2. This program is licensed "as is" without any | ||
10 | * warranty of any kind, whether express or implied. | ||
11 | */ | ||
12 | #include <linux/init.h> | ||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/io.h> | ||
15 | #include <linux/pm_runtime.h> | ||
16 | #include <linux/platform_device.h> | ||
17 | #include <linux/mutex.h> | ||
18 | |||
19 | #include <plat/omap_device.h> | ||
20 | #include <plat/omap-pm.h> | ||
21 | |||
22 | #ifdef CONFIG_PM_RUNTIME | ||
23 | static int omap_pm_runtime_suspend(struct device *dev) | ||
24 | { | ||
25 | struct platform_device *pdev = to_platform_device(dev); | ||
26 | int r, ret = 0; | ||
27 | |||
28 | dev_dbg(dev, "%s\n", __func__); | ||
29 | |||
30 | ret = pm_generic_runtime_suspend(dev); | ||
31 | |||
32 | if (!ret && dev->parent == &omap_device_parent) { | ||
33 | r = omap_device_idle(pdev); | ||
34 | WARN_ON(r); | ||
35 | } | ||
36 | |||
37 | return ret; | ||
38 | }; | ||
39 | |||
40 | static int omap_pm_runtime_resume(struct device *dev) | ||
41 | { | ||
42 | struct platform_device *pdev = to_platform_device(dev); | ||
43 | int r; | ||
44 | |||
45 | dev_dbg(dev, "%s\n", __func__); | ||
46 | |||
47 | if (dev->parent == &omap_device_parent) { | ||
48 | r = omap_device_enable(pdev); | ||
49 | WARN_ON(r); | ||
50 | } | ||
51 | |||
52 | return pm_generic_runtime_resume(dev); | ||
53 | }; | ||
54 | #else | ||
55 | #define omap_pm_runtime_suspend NULL | ||
56 | #define omap_pm_runtime_resume NULL | ||
57 | #endif /* CONFIG_PM_RUNTIME */ | ||
58 | |||
59 | static int __init omap_pm_runtime_init(void) | ||
60 | { | ||
61 | const struct dev_pm_ops *pm; | ||
62 | struct dev_pm_ops *omap_pm; | ||
63 | |||
64 | pm = platform_bus_get_pm_ops(); | ||
65 | if (!pm) { | ||
66 | pr_err("%s: unable to get dev_pm_ops from platform_bus\n", | ||
67 | __func__); | ||
68 | return -ENODEV; | ||
69 | } | ||
70 | |||
71 | omap_pm = kmemdup(pm, sizeof(struct dev_pm_ops), GFP_KERNEL); | ||
72 | if (!omap_pm) { | ||
73 | pr_err("%s: unable to alloc memory for new dev_pm_ops\n", | ||
74 | __func__); | ||
75 | return -ENOMEM; | ||
76 | } | ||
77 | |||
78 | omap_pm->runtime_suspend = omap_pm_runtime_suspend; | ||
79 | omap_pm->runtime_resume = omap_pm_runtime_resume; | ||
80 | |||
81 | platform_bus_set_pm_ops(omap_pm); | ||
82 | |||
83 | return 0; | ||
84 | } | ||
85 | core_initcall(omap_pm_runtime_init); | ||
diff --git a/arch/arm/mach-pxa/balloon3.c b/arch/arm/mach-pxa/balloon3.c index bfbecec6d05f..810a982a66f8 100644 --- a/arch/arm/mach-pxa/balloon3.c +++ b/arch/arm/mach-pxa/balloon3.c | |||
@@ -15,7 +15,6 @@ | |||
15 | 15 | ||
16 | #include <linux/init.h> | 16 | #include <linux/init.h> |
17 | #include <linux/platform_device.h> | 17 | #include <linux/platform_device.h> |
18 | #include <linux/sysdev.h> | ||
19 | #include <linux/interrupt.h> | 18 | #include <linux/interrupt.h> |
20 | #include <linux/sched.h> | 19 | #include <linux/sched.h> |
21 | #include <linux/bitops.h> | 20 | #include <linux/bitops.h> |
diff --git a/arch/arm/mach-pxa/clock-pxa2xx.c b/arch/arm/mach-pxa/clock-pxa2xx.c index 1ce090448493..1d5859d9a0e3 100644 --- a/arch/arm/mach-pxa/clock-pxa2xx.c +++ b/arch/arm/mach-pxa/clock-pxa2xx.c | |||
@@ -9,7 +9,7 @@ | |||
9 | #include <linux/module.h> | 9 | #include <linux/module.h> |
10 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
11 | #include <linux/init.h> | 11 | #include <linux/init.h> |
12 | #include <linux/sysdev.h> | 12 | #include <linux/syscore_ops.h> |
13 | 13 | ||
14 | #include <mach/pxa2xx-regs.h> | 14 | #include <mach/pxa2xx-regs.h> |
15 | 15 | ||
@@ -33,32 +33,22 @@ const struct clkops clk_pxa2xx_cken_ops = { | |||
33 | #ifdef CONFIG_PM | 33 | #ifdef CONFIG_PM |
34 | static uint32_t saved_cken; | 34 | static uint32_t saved_cken; |
35 | 35 | ||
36 | static int pxa2xx_clock_suspend(struct sys_device *d, pm_message_t state) | 36 | static int pxa2xx_clock_suspend(void) |
37 | { | 37 | { |
38 | saved_cken = CKEN; | 38 | saved_cken = CKEN; |
39 | return 0; | 39 | return 0; |
40 | } | 40 | } |
41 | 41 | ||
42 | static int pxa2xx_clock_resume(struct sys_device *d) | 42 | static void pxa2xx_clock_resume(void) |
43 | { | 43 | { |
44 | CKEN = saved_cken; | 44 | CKEN = saved_cken; |
45 | return 0; | ||
46 | } | 45 | } |
47 | #else | 46 | #else |
48 | #define pxa2xx_clock_suspend NULL | 47 | #define pxa2xx_clock_suspend NULL |
49 | #define pxa2xx_clock_resume NULL | 48 | #define pxa2xx_clock_resume NULL |
50 | #endif | 49 | #endif |
51 | 50 | ||
52 | struct sysdev_class pxa2xx_clock_sysclass = { | 51 | struct syscore_ops pxa2xx_clock_syscore_ops = { |
53 | .name = "pxa2xx-clock", | ||
54 | .suspend = pxa2xx_clock_suspend, | 52 | .suspend = pxa2xx_clock_suspend, |
55 | .resume = pxa2xx_clock_resume, | 53 | .resume = pxa2xx_clock_resume, |
56 | }; | 54 | }; |
57 | |||
58 | static int __init pxa2xx_clock_init(void) | ||
59 | { | ||
60 | if (cpu_is_pxa2xx()) | ||
61 | return sysdev_class_register(&pxa2xx_clock_sysclass); | ||
62 | return 0; | ||
63 | } | ||
64 | postcore_initcall(pxa2xx_clock_init); | ||
diff --git a/arch/arm/mach-pxa/clock-pxa3xx.c b/arch/arm/mach-pxa/clock-pxa3xx.c index 3f864cd0bd28..2a37a9a8f621 100644 --- a/arch/arm/mach-pxa/clock-pxa3xx.c +++ b/arch/arm/mach-pxa/clock-pxa3xx.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
11 | #include <linux/init.h> | 11 | #include <linux/init.h> |
12 | #include <linux/io.h> | 12 | #include <linux/io.h> |
13 | #include <linux/syscore_ops.h> | ||
13 | 14 | ||
14 | #include <mach/smemc.h> | 15 | #include <mach/smemc.h> |
15 | #include <mach/pxa3xx-regs.h> | 16 | #include <mach/pxa3xx-regs.h> |
@@ -182,7 +183,7 @@ const struct clkops clk_pxa3xx_pout_ops = { | |||
182 | static uint32_t cken[2]; | 183 | static uint32_t cken[2]; |
183 | static uint32_t accr; | 184 | static uint32_t accr; |
184 | 185 | ||
185 | static int pxa3xx_clock_suspend(struct sys_device *d, pm_message_t state) | 186 | static int pxa3xx_clock_suspend(void) |
186 | { | 187 | { |
187 | cken[0] = CKENA; | 188 | cken[0] = CKENA; |
188 | cken[1] = CKENB; | 189 | cken[1] = CKENB; |
@@ -190,28 +191,18 @@ static int pxa3xx_clock_suspend(struct sys_device *d, pm_message_t state) | |||
190 | return 0; | 191 | return 0; |
191 | } | 192 | } |
192 | 193 | ||
193 | static int pxa3xx_clock_resume(struct sys_device *d) | 194 | static void pxa3xx_clock_resume(void) |
194 | { | 195 | { |
195 | ACCR = accr; | 196 | ACCR = accr; |
196 | CKENA = cken[0]; | 197 | CKENA = cken[0]; |
197 | CKENB = cken[1]; | 198 | CKENB = cken[1]; |
198 | return 0; | ||
199 | } | 199 | } |
200 | #else | 200 | #else |
201 | #define pxa3xx_clock_suspend NULL | 201 | #define pxa3xx_clock_suspend NULL |
202 | #define pxa3xx_clock_resume NULL | 202 | #define pxa3xx_clock_resume NULL |
203 | #endif | 203 | #endif |
204 | 204 | ||
205 | struct sysdev_class pxa3xx_clock_sysclass = { | 205 | struct syscore_ops pxa3xx_clock_syscore_ops = { |
206 | .name = "pxa3xx-clock", | ||
207 | .suspend = pxa3xx_clock_suspend, | 206 | .suspend = pxa3xx_clock_suspend, |
208 | .resume = pxa3xx_clock_resume, | 207 | .resume = pxa3xx_clock_resume, |
209 | }; | 208 | }; |
210 | |||
211 | static int __init pxa3xx_clock_init(void) | ||
212 | { | ||
213 | if (cpu_is_pxa3xx() || cpu_is_pxa95x()) | ||
214 | return sysdev_class_register(&pxa3xx_clock_sysclass); | ||
215 | return 0; | ||
216 | } | ||
217 | postcore_initcall(pxa3xx_clock_init); | ||
diff --git a/arch/arm/mach-pxa/clock.h b/arch/arm/mach-pxa/clock.h index f9f349a21b54..1f2fb9c43f06 100644 --- a/arch/arm/mach-pxa/clock.h +++ b/arch/arm/mach-pxa/clock.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #include <linux/clkdev.h> | 1 | #include <linux/clkdev.h> |
2 | #include <linux/sysdev.h> | 2 | #include <linux/syscore_ops.h> |
3 | 3 | ||
4 | struct clkops { | 4 | struct clkops { |
5 | void (*enable)(struct clk *); | 5 | void (*enable)(struct clk *); |
@@ -54,7 +54,7 @@ extern const struct clkops clk_pxa2xx_cken_ops; | |||
54 | void clk_pxa2xx_cken_enable(struct clk *clk); | 54 | void clk_pxa2xx_cken_enable(struct clk *clk); |
55 | void clk_pxa2xx_cken_disable(struct clk *clk); | 55 | void clk_pxa2xx_cken_disable(struct clk *clk); |
56 | 56 | ||
57 | extern struct sysdev_class pxa2xx_clock_sysclass; | 57 | extern struct syscore_ops pxa2xx_clock_syscore_ops; |
58 | 58 | ||
59 | #if defined(CONFIG_PXA3xx) || defined(CONFIG_PXA95x) | 59 | #if defined(CONFIG_PXA3xx) || defined(CONFIG_PXA95x) |
60 | #define DEFINE_PXA3_CKEN(_name, _cken, _rate, _delay) \ | 60 | #define DEFINE_PXA3_CKEN(_name, _cken, _rate, _delay) \ |
@@ -74,5 +74,6 @@ extern const struct clkops clk_pxa3xx_smemc_ops; | |||
74 | extern void clk_pxa3xx_cken_enable(struct clk *); | 74 | extern void clk_pxa3xx_cken_enable(struct clk *); |
75 | extern void clk_pxa3xx_cken_disable(struct clk *); | 75 | extern void clk_pxa3xx_cken_disable(struct clk *); |
76 | 76 | ||
77 | extern struct sysdev_class pxa3xx_clock_sysclass; | 77 | extern struct syscore_ops pxa3xx_clock_syscore_ops; |
78 | |||
78 | #endif | 79 | #endif |
diff --git a/arch/arm/mach-pxa/cm-x270.c b/arch/arm/mach-pxa/cm-x270.c index b88d601a8090..13518a705399 100644 --- a/arch/arm/mach-pxa/cm-x270.c +++ b/arch/arm/mach-pxa/cm-x270.c | |||
@@ -10,7 +10,6 @@ | |||
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/platform_device.h> | 12 | #include <linux/platform_device.h> |
13 | #include <linux/sysdev.h> | ||
14 | #include <linux/irq.h> | 13 | #include <linux/irq.h> |
15 | #include <linux/gpio.h> | 14 | #include <linux/gpio.h> |
16 | #include <linux/delay.h> | 15 | #include <linux/delay.h> |
diff --git a/arch/arm/mach-pxa/cm-x2xx.c b/arch/arm/mach-pxa/cm-x2xx.c index 8225e2e58c6e..a10996782476 100644 --- a/arch/arm/mach-pxa/cm-x2xx.c +++ b/arch/arm/mach-pxa/cm-x2xx.c | |||
@@ -10,7 +10,7 @@ | |||
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/platform_device.h> | 12 | #include <linux/platform_device.h> |
13 | #include <linux/sysdev.h> | 13 | #include <linux/syscore_ops.h> |
14 | #include <linux/irq.h> | 14 | #include <linux/irq.h> |
15 | #include <linux/gpio.h> | 15 | #include <linux/gpio.h> |
16 | 16 | ||
@@ -388,7 +388,7 @@ static inline void cmx2xx_init_display(void) {} | |||
388 | #ifdef CONFIG_PM | 388 | #ifdef CONFIG_PM |
389 | static unsigned long sleep_save_msc[10]; | 389 | static unsigned long sleep_save_msc[10]; |
390 | 390 | ||
391 | static int cmx2xx_suspend(struct sys_device *dev, pm_message_t state) | 391 | static int cmx2xx_suspend(void) |
392 | { | 392 | { |
393 | cmx2xx_pci_suspend(); | 393 | cmx2xx_pci_suspend(); |
394 | 394 | ||
@@ -412,7 +412,7 @@ static int cmx2xx_suspend(struct sys_device *dev, pm_message_t state) | |||
412 | return 0; | 412 | return 0; |
413 | } | 413 | } |
414 | 414 | ||
415 | static int cmx2xx_resume(struct sys_device *dev) | 415 | static void cmx2xx_resume(void) |
416 | { | 416 | { |
417 | cmx2xx_pci_resume(); | 417 | cmx2xx_pci_resume(); |
418 | 418 | ||
@@ -420,27 +420,18 @@ static int cmx2xx_resume(struct sys_device *dev) | |||
420 | __raw_writel(sleep_save_msc[0], MSC0); | 420 | __raw_writel(sleep_save_msc[0], MSC0); |
421 | __raw_writel(sleep_save_msc[1], MSC1); | 421 | __raw_writel(sleep_save_msc[1], MSC1); |
422 | __raw_writel(sleep_save_msc[2], MSC2); | 422 | __raw_writel(sleep_save_msc[2], MSC2); |
423 | |||
424 | return 0; | ||
425 | } | 423 | } |
426 | 424 | ||
427 | static struct sysdev_class cmx2xx_pm_sysclass = { | 425 | static struct syscore_ops cmx2xx_pm_syscore_ops = { |
428 | .name = "pm", | ||
429 | .resume = cmx2xx_resume, | 426 | .resume = cmx2xx_resume, |
430 | .suspend = cmx2xx_suspend, | 427 | .suspend = cmx2xx_suspend, |
431 | }; | 428 | }; |
432 | 429 | ||
433 | static struct sys_device cmx2xx_pm_device = { | ||
434 | .cls = &cmx2xx_pm_sysclass, | ||
435 | }; | ||
436 | |||
437 | static int __init cmx2xx_pm_init(void) | 430 | static int __init cmx2xx_pm_init(void) |
438 | { | 431 | { |
439 | int error; | 432 | register_syscore_ops(&cmx2xx_pm_syscore_ops); |
440 | error = sysdev_class_register(&cmx2xx_pm_sysclass); | 433 | |
441 | if (error == 0) | 434 | return 0; |
442 | error = sysdev_register(&cmx2xx_pm_device); | ||
443 | return error; | ||
444 | } | 435 | } |
445 | #else | 436 | #else |
446 | static int __init cmx2xx_pm_init(void) { return 0; } | 437 | static int __init cmx2xx_pm_init(void) { return 0; } |
diff --git a/arch/arm/mach-pxa/colibri-evalboard.c b/arch/arm/mach-pxa/colibri-evalboard.c index 81c3c433e2d6..d28e802e2448 100644 --- a/arch/arm/mach-pxa/colibri-evalboard.c +++ b/arch/arm/mach-pxa/colibri-evalboard.c | |||
@@ -13,7 +13,6 @@ | |||
13 | #include <linux/init.h> | 13 | #include <linux/init.h> |
14 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
15 | #include <linux/platform_device.h> | 15 | #include <linux/platform_device.h> |
16 | #include <linux/sysdev.h> | ||
17 | #include <linux/interrupt.h> | 16 | #include <linux/interrupt.h> |
18 | #include <linux/gpio.h> | 17 | #include <linux/gpio.h> |
19 | #include <asm/mach-types.h> | 18 | #include <asm/mach-types.h> |
diff --git a/arch/arm/mach-pxa/colibri-pxa270-income.c b/arch/arm/mach-pxa/colibri-pxa270-income.c index 44c1b77ece67..80538b8806ed 100644 --- a/arch/arm/mach-pxa/colibri-pxa270-income.c +++ b/arch/arm/mach-pxa/colibri-pxa270-income.c | |||
@@ -22,7 +22,6 @@ | |||
22 | #include <linux/platform_device.h> | 22 | #include <linux/platform_device.h> |
23 | #include <linux/pwm_backlight.h> | 23 | #include <linux/pwm_backlight.h> |
24 | #include <linux/i2c/pxa-i2c.h> | 24 | #include <linux/i2c/pxa-i2c.h> |
25 | #include <linux/sysdev.h> | ||
26 | 25 | ||
27 | #include <asm/irq.h> | 26 | #include <asm/irq.h> |
28 | #include <asm/mach-types.h> | 27 | #include <asm/mach-types.h> |
diff --git a/arch/arm/mach-pxa/colibri-pxa270.c b/arch/arm/mach-pxa/colibri-pxa270.c index 6fc5d328ba7f..7545a48ed88b 100644 --- a/arch/arm/mach-pxa/colibri-pxa270.c +++ b/arch/arm/mach-pxa/colibri-pxa270.c | |||
@@ -17,7 +17,6 @@ | |||
17 | #include <linux/mtd/partitions.h> | 17 | #include <linux/mtd/partitions.h> |
18 | #include <linux/mtd/physmap.h> | 18 | #include <linux/mtd/physmap.h> |
19 | #include <linux/platform_device.h> | 19 | #include <linux/platform_device.h> |
20 | #include <linux/sysdev.h> | ||
21 | #include <linux/ucb1400.h> | 20 | #include <linux/ucb1400.h> |
22 | 21 | ||
23 | #include <asm/mach/arch.h> | 22 | #include <asm/mach/arch.h> |
diff --git a/arch/arm/mach-pxa/generic.h b/arch/arm/mach-pxa/generic.h index a079d8baa45a..e6c9344a95ae 100644 --- a/arch/arm/mach-pxa/generic.h +++ b/arch/arm/mach-pxa/generic.h | |||
@@ -61,10 +61,10 @@ extern unsigned pxa3xx_get_clk_frequency_khz(int); | |||
61 | #define pxa3xx_get_clk_frequency_khz(x) (0) | 61 | #define pxa3xx_get_clk_frequency_khz(x) (0) |
62 | #endif | 62 | #endif |
63 | 63 | ||
64 | extern struct sysdev_class pxa_irq_sysclass; | 64 | extern struct syscore_ops pxa_irq_syscore_ops; |
65 | extern struct sysdev_class pxa_gpio_sysclass; | 65 | extern struct syscore_ops pxa_gpio_syscore_ops; |
66 | extern struct sysdev_class pxa2xx_mfp_sysclass; | 66 | extern struct syscore_ops pxa2xx_mfp_syscore_ops; |
67 | extern struct sysdev_class pxa3xx_mfp_sysclass; | 67 | extern struct syscore_ops pxa3xx_mfp_syscore_ops; |
68 | 68 | ||
69 | void __init pxa_set_ffuart_info(void *info); | 69 | void __init pxa_set_ffuart_info(void *info); |
70 | void __init pxa_set_btuart_info(void *info); | 70 | void __init pxa_set_btuart_info(void *info); |
diff --git a/arch/arm/mach-pxa/irq.c b/arch/arm/mach-pxa/irq.c index 6251e3f5c62c..32ed551bf9c5 100644 --- a/arch/arm/mach-pxa/irq.c +++ b/arch/arm/mach-pxa/irq.c | |||
@@ -15,7 +15,7 @@ | |||
15 | #include <linux/init.h> | 15 | #include <linux/init.h> |
16 | #include <linux/module.h> | 16 | #include <linux/module.h> |
17 | #include <linux/interrupt.h> | 17 | #include <linux/interrupt.h> |
18 | #include <linux/sysdev.h> | 18 | #include <linux/syscore_ops.h> |
19 | #include <linux/io.h> | 19 | #include <linux/io.h> |
20 | #include <linux/irq.h> | 20 | #include <linux/irq.h> |
21 | 21 | ||
@@ -183,7 +183,7 @@ void __init pxa_init_irq(int irq_nr, set_wake_t fn) | |||
183 | static unsigned long saved_icmr[MAX_INTERNAL_IRQS/32]; | 183 | static unsigned long saved_icmr[MAX_INTERNAL_IRQS/32]; |
184 | static unsigned long saved_ipr[MAX_INTERNAL_IRQS]; | 184 | static unsigned long saved_ipr[MAX_INTERNAL_IRQS]; |
185 | 185 | ||
186 | static int pxa_irq_suspend(struct sys_device *dev, pm_message_t state) | 186 | static int pxa_irq_suspend(void) |
187 | { | 187 | { |
188 | int i; | 188 | int i; |
189 | 189 | ||
@@ -202,7 +202,7 @@ static int pxa_irq_suspend(struct sys_device *dev, pm_message_t state) | |||
202 | return 0; | 202 | return 0; |
203 | } | 203 | } |
204 | 204 | ||
205 | static int pxa_irq_resume(struct sys_device *dev) | 205 | static void pxa_irq_resume(void) |
206 | { | 206 | { |
207 | int i; | 207 | int i; |
208 | 208 | ||
@@ -218,22 +218,13 @@ static int pxa_irq_resume(struct sys_device *dev) | |||
218 | __raw_writel(saved_ipr[i], IRQ_BASE + IPR(i)); | 218 | __raw_writel(saved_ipr[i], IRQ_BASE + IPR(i)); |
219 | 219 | ||
220 | __raw_writel(1, IRQ_BASE + ICCR); | 220 | __raw_writel(1, IRQ_BASE + ICCR); |
221 | return 0; | ||
222 | } | 221 | } |
223 | #else | 222 | #else |
224 | #define pxa_irq_suspend NULL | 223 | #define pxa_irq_suspend NULL |
225 | #define pxa_irq_resume NULL | 224 | #define pxa_irq_resume NULL |
226 | #endif | 225 | #endif |
227 | 226 | ||
228 | struct sysdev_class pxa_irq_sysclass = { | 227 | struct syscore_ops pxa_irq_syscore_ops = { |
229 | .name = "irq", | ||
230 | .suspend = pxa_irq_suspend, | 228 | .suspend = pxa_irq_suspend, |
231 | .resume = pxa_irq_resume, | 229 | .resume = pxa_irq_resume, |
232 | }; | 230 | }; |
233 | |||
234 | static int __init pxa_irq_init(void) | ||
235 | { | ||
236 | return sysdev_class_register(&pxa_irq_sysclass); | ||
237 | } | ||
238 | |||
239 | core_initcall(pxa_irq_init); | ||
diff --git a/arch/arm/mach-pxa/lpd270.c b/arch/arm/mach-pxa/lpd270.c index f5de541725b1..6cf8180bf5bd 100644 --- a/arch/arm/mach-pxa/lpd270.c +++ b/arch/arm/mach-pxa/lpd270.c | |||
@@ -15,7 +15,7 @@ | |||
15 | 15 | ||
16 | #include <linux/init.h> | 16 | #include <linux/init.h> |
17 | #include <linux/platform_device.h> | 17 | #include <linux/platform_device.h> |
18 | #include <linux/sysdev.h> | 18 | #include <linux/syscore_ops.h> |
19 | #include <linux/interrupt.h> | 19 | #include <linux/interrupt.h> |
20 | #include <linux/sched.h> | 20 | #include <linux/sched.h> |
21 | #include <linux/bitops.h> | 21 | #include <linux/bitops.h> |
@@ -159,30 +159,22 @@ static void __init lpd270_init_irq(void) | |||
159 | 159 | ||
160 | 160 | ||
161 | #ifdef CONFIG_PM | 161 | #ifdef CONFIG_PM |
162 | static int lpd270_irq_resume(struct sys_device *dev) | 162 | static void lpd270_irq_resume(void) |
163 | { | 163 | { |
164 | __raw_writew(lpd270_irq_enabled, LPD270_INT_MASK); | 164 | __raw_writew(lpd270_irq_enabled, LPD270_INT_MASK); |
165 | return 0; | ||
166 | } | 165 | } |
167 | 166 | ||
168 | static struct sysdev_class lpd270_irq_sysclass = { | 167 | static struct syscore_ops lpd270_irq_syscore_ops = { |
169 | .name = "cpld_irq", | ||
170 | .resume = lpd270_irq_resume, | 168 | .resume = lpd270_irq_resume, |
171 | }; | 169 | }; |
172 | 170 | ||
173 | static struct sys_device lpd270_irq_device = { | ||
174 | .cls = &lpd270_irq_sysclass, | ||
175 | }; | ||
176 | |||
177 | static int __init lpd270_irq_device_init(void) | 171 | static int __init lpd270_irq_device_init(void) |
178 | { | 172 | { |
179 | int ret = -ENODEV; | ||
180 | if (machine_is_logicpd_pxa270()) { | 173 | if (machine_is_logicpd_pxa270()) { |
181 | ret = sysdev_class_register(&lpd270_irq_sysclass); | 174 | register_syscore_ops(&lpd270_irq_syscore_ops); |
182 | if (ret == 0) | 175 | return 0; |
183 | ret = sysdev_register(&lpd270_irq_device); | ||
184 | } | 176 | } |
185 | return ret; | 177 | return -ENODEV; |
186 | } | 178 | } |
187 | 179 | ||
188 | device_initcall(lpd270_irq_device_init); | 180 | device_initcall(lpd270_irq_device_init); |
diff --git a/arch/arm/mach-pxa/lubbock.c b/arch/arm/mach-pxa/lubbock.c index 3ede978c83d9..e10ddb827147 100644 --- a/arch/arm/mach-pxa/lubbock.c +++ b/arch/arm/mach-pxa/lubbock.c | |||
@@ -15,7 +15,7 @@ | |||
15 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
16 | #include <linux/init.h> | 16 | #include <linux/init.h> |
17 | #include <linux/platform_device.h> | 17 | #include <linux/platform_device.h> |
18 | #include <linux/sysdev.h> | 18 | #include <linux/syscore_ops.h> |
19 | #include <linux/major.h> | 19 | #include <linux/major.h> |
20 | #include <linux/fb.h> | 20 | #include <linux/fb.h> |
21 | #include <linux/interrupt.h> | 21 | #include <linux/interrupt.h> |
@@ -176,31 +176,22 @@ static void __init lubbock_init_irq(void) | |||
176 | 176 | ||
177 | #ifdef CONFIG_PM | 177 | #ifdef CONFIG_PM |
178 | 178 | ||
179 | static int lubbock_irq_resume(struct sys_device *dev) | 179 | static void lubbock_irq_resume(void) |
180 | { | 180 | { |
181 | LUB_IRQ_MASK_EN = lubbock_irq_enabled; | 181 | LUB_IRQ_MASK_EN = lubbock_irq_enabled; |
182 | return 0; | ||
183 | } | 182 | } |
184 | 183 | ||
185 | static struct sysdev_class lubbock_irq_sysclass = { | 184 | static struct syscore_ops lubbock_irq_syscore_ops = { |
186 | .name = "cpld_irq", | ||
187 | .resume = lubbock_irq_resume, | 185 | .resume = lubbock_irq_resume, |
188 | }; | 186 | }; |
189 | 187 | ||
190 | static struct sys_device lubbock_irq_device = { | ||
191 | .cls = &lubbock_irq_sysclass, | ||
192 | }; | ||
193 | |||
194 | static int __init lubbock_irq_device_init(void) | 188 | static int __init lubbock_irq_device_init(void) |
195 | { | 189 | { |
196 | int ret = -ENODEV; | ||
197 | |||
198 | if (machine_is_lubbock()) { | 190 | if (machine_is_lubbock()) { |
199 | ret = sysdev_class_register(&lubbock_irq_sysclass); | 191 | register_syscore_ops(&lubbock_irq_syscore_ops); |
200 | if (ret == 0) | 192 | return 0; |
201 | ret = sysdev_register(&lubbock_irq_device); | ||
202 | } | 193 | } |
203 | return ret; | 194 | return -ENODEV; |
204 | } | 195 | } |
205 | 196 | ||
206 | device_initcall(lubbock_irq_device_init); | 197 | device_initcall(lubbock_irq_device_init); |
diff --git a/arch/arm/mach-pxa/mainstone.c b/arch/arm/mach-pxa/mainstone.c index 95163baca29e..3479e2b3b511 100644 --- a/arch/arm/mach-pxa/mainstone.c +++ b/arch/arm/mach-pxa/mainstone.c | |||
@@ -15,7 +15,7 @@ | |||
15 | 15 | ||
16 | #include <linux/init.h> | 16 | #include <linux/init.h> |
17 | #include <linux/platform_device.h> | 17 | #include <linux/platform_device.h> |
18 | #include <linux/sysdev.h> | 18 | #include <linux/syscore_ops.h> |
19 | #include <linux/interrupt.h> | 19 | #include <linux/interrupt.h> |
20 | #include <linux/sched.h> | 20 | #include <linux/sched.h> |
21 | #include <linux/bitops.h> | 21 | #include <linux/bitops.h> |
@@ -185,31 +185,21 @@ static void __init mainstone_init_irq(void) | |||
185 | 185 | ||
186 | #ifdef CONFIG_PM | 186 | #ifdef CONFIG_PM |
187 | 187 | ||
188 | static int mainstone_irq_resume(struct sys_device *dev) | 188 | static void mainstone_irq_resume(void) |
189 | { | 189 | { |
190 | MST_INTMSKENA = mainstone_irq_enabled; | 190 | MST_INTMSKENA = mainstone_irq_enabled; |
191 | return 0; | ||
192 | } | 191 | } |
193 | 192 | ||
194 | static struct sysdev_class mainstone_irq_sysclass = { | 193 | static struct syscore_ops mainstone_irq_syscore_ops = { |
195 | .name = "cpld_irq", | ||
196 | .resume = mainstone_irq_resume, | 194 | .resume = mainstone_irq_resume, |
197 | }; | 195 | }; |
198 | 196 | ||
199 | static struct sys_device mainstone_irq_device = { | ||
200 | .cls = &mainstone_irq_sysclass, | ||
201 | }; | ||
202 | |||
203 | static int __init mainstone_irq_device_init(void) | 197 | static int __init mainstone_irq_device_init(void) |
204 | { | 198 | { |
205 | int ret = -ENODEV; | 199 | if (machine_is_mainstone()) |
200 | register_syscore_ops(&mainstone_irq_syscore_ops); | ||
206 | 201 | ||
207 | if (machine_is_mainstone()) { | 202 | return 0; |
208 | ret = sysdev_class_register(&mainstone_irq_sysclass); | ||
209 | if (ret == 0) | ||
210 | ret = sysdev_register(&mainstone_irq_device); | ||
211 | } | ||
212 | return ret; | ||
213 | } | 203 | } |
214 | 204 | ||
215 | device_initcall(mainstone_irq_device_init); | 205 | device_initcall(mainstone_irq_device_init); |
diff --git a/arch/arm/mach-pxa/mfp-pxa2xx.c b/arch/arm/mach-pxa/mfp-pxa2xx.c index 1d1419b73457..87ae3129f4f7 100644 --- a/arch/arm/mach-pxa/mfp-pxa2xx.c +++ b/arch/arm/mach-pxa/mfp-pxa2xx.c | |||
@@ -16,7 +16,7 @@ | |||
16 | #include <linux/module.h> | 16 | #include <linux/module.h> |
17 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
18 | #include <linux/init.h> | 18 | #include <linux/init.h> |
19 | #include <linux/sysdev.h> | 19 | #include <linux/syscore_ops.h> |
20 | 20 | ||
21 | #include <mach/gpio.h> | 21 | #include <mach/gpio.h> |
22 | #include <mach/pxa2xx-regs.h> | 22 | #include <mach/pxa2xx-regs.h> |
@@ -338,7 +338,7 @@ static unsigned long saved_gafr[2][4]; | |||
338 | static unsigned long saved_gpdr[4]; | 338 | static unsigned long saved_gpdr[4]; |
339 | static unsigned long saved_pgsr[4]; | 339 | static unsigned long saved_pgsr[4]; |
340 | 340 | ||
341 | static int pxa2xx_mfp_suspend(struct sys_device *d, pm_message_t state) | 341 | static int pxa2xx_mfp_suspend(void) |
342 | { | 342 | { |
343 | int i; | 343 | int i; |
344 | 344 | ||
@@ -365,7 +365,7 @@ static int pxa2xx_mfp_suspend(struct sys_device *d, pm_message_t state) | |||
365 | return 0; | 365 | return 0; |
366 | } | 366 | } |
367 | 367 | ||
368 | static int pxa2xx_mfp_resume(struct sys_device *d) | 368 | static void pxa2xx_mfp_resume(void) |
369 | { | 369 | { |
370 | int i; | 370 | int i; |
371 | 371 | ||
@@ -376,15 +376,13 @@ static int pxa2xx_mfp_resume(struct sys_device *d) | |||
376 | PGSR(i) = saved_pgsr[i]; | 376 | PGSR(i) = saved_pgsr[i]; |
377 | } | 377 | } |
378 | PSSR = PSSR_RDH | PSSR_PH; | 378 | PSSR = PSSR_RDH | PSSR_PH; |
379 | return 0; | ||
380 | } | 379 | } |
381 | #else | 380 | #else |
382 | #define pxa2xx_mfp_suspend NULL | 381 | #define pxa2xx_mfp_suspend NULL |
383 | #define pxa2xx_mfp_resume NULL | 382 | #define pxa2xx_mfp_resume NULL |
384 | #endif | 383 | #endif |
385 | 384 | ||
386 | struct sysdev_class pxa2xx_mfp_sysclass = { | 385 | struct syscore_ops pxa2xx_mfp_syscore_ops = { |
387 | .name = "mfp", | ||
388 | .suspend = pxa2xx_mfp_suspend, | 386 | .suspend = pxa2xx_mfp_suspend, |
389 | .resume = pxa2xx_mfp_resume, | 387 | .resume = pxa2xx_mfp_resume, |
390 | }; | 388 | }; |
@@ -409,6 +407,6 @@ static int __init pxa2xx_mfp_init(void) | |||
409 | for (i = 0; i <= gpio_to_bank(pxa_last_gpio); i++) | 407 | for (i = 0; i <= gpio_to_bank(pxa_last_gpio); i++) |
410 | gpdr_lpm[i] = GPDR(i * 32); | 408 | gpdr_lpm[i] = GPDR(i * 32); |
411 | 409 | ||
412 | return sysdev_class_register(&pxa2xx_mfp_sysclass); | 410 | return 0; |
413 | } | 411 | } |
414 | postcore_initcall(pxa2xx_mfp_init); | 412 | postcore_initcall(pxa2xx_mfp_init); |
diff --git a/arch/arm/mach-pxa/mfp-pxa3xx.c b/arch/arm/mach-pxa/mfp-pxa3xx.c index 7a270eecd480..89863a01ecd7 100644 --- a/arch/arm/mach-pxa/mfp-pxa3xx.c +++ b/arch/arm/mach-pxa/mfp-pxa3xx.c | |||
@@ -17,7 +17,7 @@ | |||
17 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
18 | #include <linux/init.h> | 18 | #include <linux/init.h> |
19 | #include <linux/io.h> | 19 | #include <linux/io.h> |
20 | #include <linux/sysdev.h> | 20 | #include <linux/syscore_ops.h> |
21 | 21 | ||
22 | #include <mach/hardware.h> | 22 | #include <mach/hardware.h> |
23 | #include <mach/mfp-pxa3xx.h> | 23 | #include <mach/mfp-pxa3xx.h> |
@@ -31,13 +31,13 @@ | |||
31 | * a pull-down mode if they're an active low chip select, and we're | 31 | * a pull-down mode if they're an active low chip select, and we're |
32 | * just entering standby. | 32 | * just entering standby. |
33 | */ | 33 | */ |
34 | static int pxa3xx_mfp_suspend(struct sys_device *d, pm_message_t state) | 34 | static int pxa3xx_mfp_suspend(void) |
35 | { | 35 | { |
36 | mfp_config_lpm(); | 36 | mfp_config_lpm(); |
37 | return 0; | 37 | return 0; |
38 | } | 38 | } |
39 | 39 | ||
40 | static int pxa3xx_mfp_resume(struct sys_device *d) | 40 | static void pxa3xx_mfp_resume(void) |
41 | { | 41 | { |
42 | mfp_config_run(); | 42 | mfp_config_run(); |
43 | 43 | ||
@@ -47,24 +47,13 @@ static int pxa3xx_mfp_resume(struct sys_device *d) | |||
47 | * preserve them here in case they will be referenced later | 47 | * preserve them here in case they will be referenced later |
48 | */ | 48 | */ |
49 | ASCR &= ~(ASCR_RDH | ASCR_D1S | ASCR_D2S | ASCR_D3S); | 49 | ASCR &= ~(ASCR_RDH | ASCR_D1S | ASCR_D2S | ASCR_D3S); |
50 | return 0; | ||
51 | } | 50 | } |
52 | #else | 51 | #else |
53 | #define pxa3xx_mfp_suspend NULL | 52 | #define pxa3xx_mfp_suspend NULL |
54 | #define pxa3xx_mfp_resume NULL | 53 | #define pxa3xx_mfp_resume NULL |
55 | #endif | 54 | #endif |
56 | 55 | ||
57 | struct sysdev_class pxa3xx_mfp_sysclass = { | 56 | struct syscore_ops pxa3xx_mfp_syscore_ops = { |
58 | .name = "mfp", | ||
59 | .suspend = pxa3xx_mfp_suspend, | 57 | .suspend = pxa3xx_mfp_suspend, |
60 | .resume = pxa3xx_mfp_resume, | 58 | .resume = pxa3xx_mfp_resume, |
61 | }; | 59 | }; |
62 | |||
63 | static int __init mfp_init_devicefs(void) | ||
64 | { | ||
65 | if (cpu_is_pxa3xx()) | ||
66 | return sysdev_class_register(&pxa3xx_mfp_sysclass); | ||
67 | |||
68 | return 0; | ||
69 | } | ||
70 | postcore_initcall(mfp_init_devicefs); | ||
diff --git a/arch/arm/mach-pxa/mioa701.c b/arch/arm/mach-pxa/mioa701.c index 23925db8ff74..e3470137c934 100644 --- a/arch/arm/mach-pxa/mioa701.c +++ b/arch/arm/mach-pxa/mioa701.c | |||
@@ -22,7 +22,7 @@ | |||
22 | #include <linux/kernel.h> | 22 | #include <linux/kernel.h> |
23 | #include <linux/init.h> | 23 | #include <linux/init.h> |
24 | #include <linux/platform_device.h> | 24 | #include <linux/platform_device.h> |
25 | #include <linux/sysdev.h> | 25 | #include <linux/syscore_ops.h> |
26 | #include <linux/input.h> | 26 | #include <linux/input.h> |
27 | #include <linux/delay.h> | 27 | #include <linux/delay.h> |
28 | #include <linux/gpio_keys.h> | 28 | #include <linux/gpio_keys.h> |
@@ -488,7 +488,7 @@ static void install_bootstrap(void) | |||
488 | } | 488 | } |
489 | 489 | ||
490 | 490 | ||
491 | static int mioa701_sys_suspend(struct sys_device *sysdev, pm_message_t state) | 491 | static int mioa701_sys_suspend(void) |
492 | { | 492 | { |
493 | int i = 0, is_bt_on; | 493 | int i = 0, is_bt_on; |
494 | u32 *mem_resume_vector = phys_to_virt(RESUME_VECTOR_ADDR); | 494 | u32 *mem_resume_vector = phys_to_virt(RESUME_VECTOR_ADDR); |
@@ -514,7 +514,7 @@ static int mioa701_sys_suspend(struct sys_device *sysdev, pm_message_t state) | |||
514 | return 0; | 514 | return 0; |
515 | } | 515 | } |
516 | 516 | ||
517 | static int mioa701_sys_resume(struct sys_device *sysdev) | 517 | static void mioa701_sys_resume(void) |
518 | { | 518 | { |
519 | int i = 0; | 519 | int i = 0; |
520 | u32 *mem_resume_vector = phys_to_virt(RESUME_VECTOR_ADDR); | 520 | u32 *mem_resume_vector = phys_to_virt(RESUME_VECTOR_ADDR); |
@@ -527,43 +527,18 @@ static int mioa701_sys_resume(struct sys_device *sysdev) | |||
527 | *mem_resume_enabler = save_buffer[i++]; | 527 | *mem_resume_enabler = save_buffer[i++]; |
528 | *mem_resume_bt = save_buffer[i++]; | 528 | *mem_resume_bt = save_buffer[i++]; |
529 | *mem_resume_unknown = save_buffer[i++]; | 529 | *mem_resume_unknown = save_buffer[i++]; |
530 | |||
531 | return 0; | ||
532 | } | 530 | } |
533 | 531 | ||
534 | static struct sysdev_class mioa701_sysclass = { | 532 | static struct syscore_ops mioa701_syscore_ops = { |
535 | .name = "mioa701", | 533 | .suspend = mioa701_sys_suspend, |
536 | }; | 534 | .resume = mioa701_sys_resume, |
537 | |||
538 | static struct sys_device sysdev_bootstrap = { | ||
539 | .cls = &mioa701_sysclass, | ||
540 | }; | ||
541 | |||
542 | static struct sysdev_driver driver_bootstrap = { | ||
543 | .suspend = &mioa701_sys_suspend, | ||
544 | .resume = &mioa701_sys_resume, | ||
545 | }; | 535 | }; |
546 | 536 | ||
547 | static int __init bootstrap_init(void) | 537 | static int __init bootstrap_init(void) |
548 | { | 538 | { |
549 | int rc; | ||
550 | int save_size = mioa701_bootstrap_lg + (sizeof(u32) * 3); | 539 | int save_size = mioa701_bootstrap_lg + (sizeof(u32) * 3); |
551 | 540 | ||
552 | rc = sysdev_class_register(&mioa701_sysclass); | 541 | register_syscore_ops(&mioa701_syscore_ops); |
553 | if (rc) { | ||
554 | printk(KERN_ERR "Failed registering mioa701 sys class\n"); | ||
555 | return -ENODEV; | ||
556 | } | ||
557 | rc = sysdev_register(&sysdev_bootstrap); | ||
558 | if (rc) { | ||
559 | printk(KERN_ERR "Failed registering mioa701 sys device\n"); | ||
560 | return -ENODEV; | ||
561 | } | ||
562 | rc = sysdev_driver_register(&mioa701_sysclass, &driver_bootstrap); | ||
563 | if (rc) { | ||
564 | printk(KERN_ERR "Failed registering PMU sys driver\n"); | ||
565 | return -ENODEV; | ||
566 | } | ||
567 | 542 | ||
568 | save_buffer = kmalloc(save_size, GFP_KERNEL); | 543 | save_buffer = kmalloc(save_size, GFP_KERNEL); |
569 | if (!save_buffer) | 544 | if (!save_buffer) |
@@ -576,9 +551,7 @@ static int __init bootstrap_init(void) | |||
576 | static void bootstrap_exit(void) | 551 | static void bootstrap_exit(void) |
577 | { | 552 | { |
578 | kfree(save_buffer); | 553 | kfree(save_buffer); |
579 | sysdev_driver_unregister(&mioa701_sysclass, &driver_bootstrap); | 554 | unregister_syscore_ops(&mioa701_syscore_ops); |
580 | sysdev_unregister(&sysdev_bootstrap); | ||
581 | sysdev_class_unregister(&mioa701_sysclass); | ||
582 | 555 | ||
583 | printk(KERN_CRIT "Unregistering mioa701 suspend will hang next" | 556 | printk(KERN_CRIT "Unregistering mioa701 suspend will hang next" |
584 | "resume !!!\n"); | 557 | "resume !!!\n"); |
diff --git a/arch/arm/mach-pxa/palmld.c b/arch/arm/mach-pxa/palmld.c index a6f898cbfac9..4061ecddee70 100644 --- a/arch/arm/mach-pxa/palmld.c +++ b/arch/arm/mach-pxa/palmld.c | |||
@@ -24,7 +24,6 @@ | |||
24 | #include <linux/gpio.h> | 24 | #include <linux/gpio.h> |
25 | #include <linux/wm97xx.h> | 25 | #include <linux/wm97xx.h> |
26 | #include <linux/power_supply.h> | 26 | #include <linux/power_supply.h> |
27 | #include <linux/sysdev.h> | ||
28 | #include <linux/mtd/mtd.h> | 27 | #include <linux/mtd/mtd.h> |
29 | #include <linux/mtd/partitions.h> | 28 | #include <linux/mtd/partitions.h> |
30 | #include <linux/mtd/physmap.h> | 29 | #include <linux/mtd/physmap.h> |
diff --git a/arch/arm/mach-pxa/palmtreo.c b/arch/arm/mach-pxa/palmtreo.c index 8aadad55fbe4..20d1b18b1733 100644 --- a/arch/arm/mach-pxa/palmtreo.c +++ b/arch/arm/mach-pxa/palmtreo.c | |||
@@ -25,7 +25,6 @@ | |||
25 | #include <linux/pwm_backlight.h> | 25 | #include <linux/pwm_backlight.h> |
26 | #include <linux/gpio.h> | 26 | #include <linux/gpio.h> |
27 | #include <linux/power_supply.h> | 27 | #include <linux/power_supply.h> |
28 | #include <linux/sysdev.h> | ||
29 | #include <linux/w1-gpio.h> | 28 | #include <linux/w1-gpio.h> |
30 | 29 | ||
31 | #include <asm/mach-types.h> | 30 | #include <asm/mach-types.h> |
diff --git a/arch/arm/mach-pxa/palmz72.c b/arch/arm/mach-pxa/palmz72.c index 3b8a4f37dbbe..65f24f0b77e8 100644 --- a/arch/arm/mach-pxa/palmz72.c +++ b/arch/arm/mach-pxa/palmz72.c | |||
@@ -19,7 +19,7 @@ | |||
19 | */ | 19 | */ |
20 | 20 | ||
21 | #include <linux/platform_device.h> | 21 | #include <linux/platform_device.h> |
22 | #include <linux/sysdev.h> | 22 | #include <linux/syscore_ops.h> |
23 | #include <linux/delay.h> | 23 | #include <linux/delay.h> |
24 | #include <linux/irq.h> | 24 | #include <linux/irq.h> |
25 | #include <linux/gpio_keys.h> | 25 | #include <linux/gpio_keys.h> |
@@ -233,9 +233,9 @@ static struct palmz72_resume_info palmz72_resume_info = { | |||
233 | 233 | ||
234 | static unsigned long store_ptr; | 234 | static unsigned long store_ptr; |
235 | 235 | ||
236 | /* sys_device for Palm Zire 72 PM */ | 236 | /* syscore_ops for Palm Zire 72 PM */ |
237 | 237 | ||
238 | static int palmz72_pm_suspend(struct sys_device *dev, pm_message_t msg) | 238 | static int palmz72_pm_suspend(void) |
239 | { | 239 | { |
240 | /* setup the resume_info struct for the original bootloader */ | 240 | /* setup the resume_info struct for the original bootloader */ |
241 | palmz72_resume_info.resume_addr = (u32) cpu_resume; | 241 | palmz72_resume_info.resume_addr = (u32) cpu_resume; |
@@ -249,31 +249,23 @@ static int palmz72_pm_suspend(struct sys_device *dev, pm_message_t msg) | |||
249 | return 0; | 249 | return 0; |
250 | } | 250 | } |
251 | 251 | ||
252 | static int palmz72_pm_resume(struct sys_device *dev) | 252 | static void palmz72_pm_resume(void) |
253 | { | 253 | { |
254 | *PALMZ72_SAVE_DWORD = store_ptr; | 254 | *PALMZ72_SAVE_DWORD = store_ptr; |
255 | return 0; | ||
256 | } | 255 | } |
257 | 256 | ||
258 | static struct sysdev_class palmz72_pm_sysclass = { | 257 | static struct syscore_ops palmz72_pm_syscore_ops = { |
259 | .name = "palmz72_pm", | ||
260 | .suspend = palmz72_pm_suspend, | 258 | .suspend = palmz72_pm_suspend, |
261 | .resume = palmz72_pm_resume, | 259 | .resume = palmz72_pm_resume, |
262 | }; | 260 | }; |
263 | 261 | ||
264 | static struct sys_device palmz72_pm_device = { | ||
265 | .cls = &palmz72_pm_sysclass, | ||
266 | }; | ||
267 | |||
268 | static int __init palmz72_pm_init(void) | 262 | static int __init palmz72_pm_init(void) |
269 | { | 263 | { |
270 | int ret = -ENODEV; | ||
271 | if (machine_is_palmz72()) { | 264 | if (machine_is_palmz72()) { |
272 | ret = sysdev_class_register(&palmz72_pm_sysclass); | 265 | register_syscore_ops(&palmz72_pm_syscore_ops); |
273 | if (ret == 0) | 266 | return 0; |
274 | ret = sysdev_register(&palmz72_pm_device); | ||
275 | } | 267 | } |
276 | return ret; | 268 | return -ENODEV; |
277 | } | 269 | } |
278 | 270 | ||
279 | device_initcall(palmz72_pm_init); | 271 | device_initcall(palmz72_pm_init); |
diff --git a/arch/arm/mach-pxa/pxa25x.c b/arch/arm/mach-pxa/pxa25x.c index a4af8c52d7ee..fed363cec9c6 100644 --- a/arch/arm/mach-pxa/pxa25x.c +++ b/arch/arm/mach-pxa/pxa25x.c | |||
@@ -21,7 +21,7 @@ | |||
21 | #include <linux/init.h> | 21 | #include <linux/init.h> |
22 | #include <linux/platform_device.h> | 22 | #include <linux/platform_device.h> |
23 | #include <linux/suspend.h> | 23 | #include <linux/suspend.h> |
24 | #include <linux/sysdev.h> | 24 | #include <linux/syscore_ops.h> |
25 | #include <linux/irq.h> | 25 | #include <linux/irq.h> |
26 | 26 | ||
27 | #include <asm/mach/map.h> | 27 | #include <asm/mach/map.h> |
@@ -350,21 +350,9 @@ static struct platform_device *pxa25x_devices[] __initdata = { | |||
350 | &pxa_device_asoc_platform, | 350 | &pxa_device_asoc_platform, |
351 | }; | 351 | }; |
352 | 352 | ||
353 | static struct sys_device pxa25x_sysdev[] = { | ||
354 | { | ||
355 | .cls = &pxa_irq_sysclass, | ||
356 | }, { | ||
357 | .cls = &pxa2xx_mfp_sysclass, | ||
358 | }, { | ||
359 | .cls = &pxa_gpio_sysclass, | ||
360 | }, { | ||
361 | .cls = &pxa2xx_clock_sysclass, | ||
362 | } | ||
363 | }; | ||
364 | |||
365 | static int __init pxa25x_init(void) | 353 | static int __init pxa25x_init(void) |
366 | { | 354 | { |
367 | int i, ret = 0; | 355 | int ret = 0; |
368 | 356 | ||
369 | if (cpu_is_pxa25x()) { | 357 | if (cpu_is_pxa25x()) { |
370 | 358 | ||
@@ -377,11 +365,10 @@ static int __init pxa25x_init(void) | |||
377 | 365 | ||
378 | pxa25x_init_pm(); | 366 | pxa25x_init_pm(); |
379 | 367 | ||
380 | for (i = 0; i < ARRAY_SIZE(pxa25x_sysdev); i++) { | 368 | register_syscore_ops(&pxa_irq_syscore_ops); |
381 | ret = sysdev_register(&pxa25x_sysdev[i]); | 369 | register_syscore_ops(&pxa2xx_mfp_syscore_ops); |
382 | if (ret) | 370 | register_syscore_ops(&pxa_gpio_syscore_ops); |
383 | pr_err("failed to register sysdev[%d]\n", i); | 371 | register_syscore_ops(&pxa2xx_clock_syscore_ops); |
384 | } | ||
385 | 372 | ||
386 | ret = platform_add_devices(pxa25x_devices, | 373 | ret = platform_add_devices(pxa25x_devices, |
387 | ARRAY_SIZE(pxa25x_devices)); | 374 | ARRAY_SIZE(pxa25x_devices)); |
diff --git a/arch/arm/mach-pxa/pxa27x.c b/arch/arm/mach-pxa/pxa27x.c index 909756eaf4b7..2fecbec58d88 100644 --- a/arch/arm/mach-pxa/pxa27x.c +++ b/arch/arm/mach-pxa/pxa27x.c | |||
@@ -16,7 +16,7 @@ | |||
16 | #include <linux/init.h> | 16 | #include <linux/init.h> |
17 | #include <linux/suspend.h> | 17 | #include <linux/suspend.h> |
18 | #include <linux/platform_device.h> | 18 | #include <linux/platform_device.h> |
19 | #include <linux/sysdev.h> | 19 | #include <linux/syscore_ops.h> |
20 | #include <linux/io.h> | 20 | #include <linux/io.h> |
21 | #include <linux/irq.h> | 21 | #include <linux/irq.h> |
22 | #include <linux/i2c/pxa-i2c.h> | 22 | #include <linux/i2c/pxa-i2c.h> |
@@ -428,21 +428,9 @@ static struct platform_device *devices[] __initdata = { | |||
428 | &pxa27x_device_pwm1, | 428 | &pxa27x_device_pwm1, |
429 | }; | 429 | }; |
430 | 430 | ||
431 | static struct sys_device pxa27x_sysdev[] = { | ||
432 | { | ||
433 | .cls = &pxa_irq_sysclass, | ||
434 | }, { | ||
435 | .cls = &pxa2xx_mfp_sysclass, | ||
436 | }, { | ||
437 | .cls = &pxa_gpio_sysclass, | ||
438 | }, { | ||
439 | .cls = &pxa2xx_clock_sysclass, | ||
440 | } | ||
441 | }; | ||
442 | |||
443 | static int __init pxa27x_init(void) | 431 | static int __init pxa27x_init(void) |
444 | { | 432 | { |
445 | int i, ret = 0; | 433 | int ret = 0; |
446 | 434 | ||
447 | if (cpu_is_pxa27x()) { | 435 | if (cpu_is_pxa27x()) { |
448 | 436 | ||
@@ -455,11 +443,10 @@ static int __init pxa27x_init(void) | |||
455 | 443 | ||
456 | pxa27x_init_pm(); | 444 | pxa27x_init_pm(); |
457 | 445 | ||
458 | for (i = 0; i < ARRAY_SIZE(pxa27x_sysdev); i++) { | 446 | register_syscore_ops(&pxa_irq_syscore_ops); |
459 | ret = sysdev_register(&pxa27x_sysdev[i]); | 447 | register_syscore_ops(&pxa2xx_mfp_syscore_ops); |
460 | if (ret) | 448 | register_syscore_ops(&pxa_gpio_syscore_ops); |
461 | pr_err("failed to register sysdev[%d]\n", i); | 449 | register_syscore_ops(&pxa2xx_clock_syscore_ops); |
462 | } | ||
463 | 450 | ||
464 | ret = platform_add_devices(devices, ARRAY_SIZE(devices)); | 451 | ret = platform_add_devices(devices, ARRAY_SIZE(devices)); |
465 | } | 452 | } |
diff --git a/arch/arm/mach-pxa/pxa3xx.c b/arch/arm/mach-pxa/pxa3xx.c index 8dd107391157..8521d7d6f1da 100644 --- a/arch/arm/mach-pxa/pxa3xx.c +++ b/arch/arm/mach-pxa/pxa3xx.c | |||
@@ -20,7 +20,7 @@ | |||
20 | #include <linux/platform_device.h> | 20 | #include <linux/platform_device.h> |
21 | #include <linux/irq.h> | 21 | #include <linux/irq.h> |
22 | #include <linux/io.h> | 22 | #include <linux/io.h> |
23 | #include <linux/sysdev.h> | 23 | #include <linux/syscore_ops.h> |
24 | #include <linux/i2c/pxa-i2c.h> | 24 | #include <linux/i2c/pxa-i2c.h> |
25 | 25 | ||
26 | #include <asm/mach/map.h> | 26 | #include <asm/mach/map.h> |
@@ -427,21 +427,9 @@ static struct platform_device *devices[] __initdata = { | |||
427 | &pxa27x_device_pwm1, | 427 | &pxa27x_device_pwm1, |
428 | }; | 428 | }; |
429 | 429 | ||
430 | static struct sys_device pxa3xx_sysdev[] = { | ||
431 | { | ||
432 | .cls = &pxa_irq_sysclass, | ||
433 | }, { | ||
434 | .cls = &pxa3xx_mfp_sysclass, | ||
435 | }, { | ||
436 | .cls = &pxa_gpio_sysclass, | ||
437 | }, { | ||
438 | .cls = &pxa3xx_clock_sysclass, | ||
439 | } | ||
440 | }; | ||
441 | |||
442 | static int __init pxa3xx_init(void) | 430 | static int __init pxa3xx_init(void) |
443 | { | 431 | { |
444 | int i, ret = 0; | 432 | int ret = 0; |
445 | 433 | ||
446 | if (cpu_is_pxa3xx()) { | 434 | if (cpu_is_pxa3xx()) { |
447 | 435 | ||
@@ -462,11 +450,10 @@ static int __init pxa3xx_init(void) | |||
462 | 450 | ||
463 | pxa3xx_init_pm(); | 451 | pxa3xx_init_pm(); |
464 | 452 | ||
465 | for (i = 0; i < ARRAY_SIZE(pxa3xx_sysdev); i++) { | 453 | register_syscore_ops(&pxa_irq_syscore_ops); |
466 | ret = sysdev_register(&pxa3xx_sysdev[i]); | 454 | register_syscore_ops(&pxa3xx_mfp_syscore_ops); |
467 | if (ret) | 455 | register_syscore_ops(&pxa_gpio_syscore_ops); |
468 | pr_err("failed to register sysdev[%d]\n", i); | 456 | register_syscore_ops(&pxa3xx_clock_syscore_ops); |
469 | } | ||
470 | 457 | ||
471 | ret = platform_add_devices(devices, ARRAY_SIZE(devices)); | 458 | ret = platform_add_devices(devices, ARRAY_SIZE(devices)); |
472 | } | 459 | } |
diff --git a/arch/arm/mach-pxa/pxa95x.c b/arch/arm/mach-pxa/pxa95x.c index 23b229bd06e9..ecc82a330fad 100644 --- a/arch/arm/mach-pxa/pxa95x.c +++ b/arch/arm/mach-pxa/pxa95x.c | |||
@@ -18,7 +18,7 @@ | |||
18 | #include <linux/i2c/pxa-i2c.h> | 18 | #include <linux/i2c/pxa-i2c.h> |
19 | #include <linux/irq.h> | 19 | #include <linux/irq.h> |
20 | #include <linux/io.h> | 20 | #include <linux/io.h> |
21 | #include <linux/sysdev.h> | 21 | #include <linux/syscore_ops.h> |
22 | 22 | ||
23 | #include <mach/hardware.h> | 23 | #include <mach/hardware.h> |
24 | #include <mach/gpio.h> | 24 | #include <mach/gpio.h> |
@@ -260,16 +260,6 @@ static struct platform_device *devices[] __initdata = { | |||
260 | &pxa27x_device_pwm1, | 260 | &pxa27x_device_pwm1, |
261 | }; | 261 | }; |
262 | 262 | ||
263 | static struct sys_device pxa95x_sysdev[] = { | ||
264 | { | ||
265 | .cls = &pxa_irq_sysclass, | ||
266 | }, { | ||
267 | .cls = &pxa_gpio_sysclass, | ||
268 | }, { | ||
269 | .cls = &pxa3xx_clock_sysclass, | ||
270 | } | ||
271 | }; | ||
272 | |||
273 | static int __init pxa95x_init(void) | 263 | static int __init pxa95x_init(void) |
274 | { | 264 | { |
275 | int ret = 0, i; | 265 | int ret = 0, i; |
@@ -293,11 +283,9 @@ static int __init pxa95x_init(void) | |||
293 | if ((ret = pxa_init_dma(IRQ_DMA, 32))) | 283 | if ((ret = pxa_init_dma(IRQ_DMA, 32))) |
294 | return ret; | 284 | return ret; |
295 | 285 | ||
296 | for (i = 0; i < ARRAY_SIZE(pxa95x_sysdev); i++) { | 286 | register_syscore_ops(&pxa_irq_syscore_ops); |
297 | ret = sysdev_register(&pxa95x_sysdev[i]); | 287 | register_syscore_ops(&pxa_gpio_syscore_ops); |
298 | if (ret) | 288 | register_syscore_ops(&pxa3xx_clock_syscore_ops); |
299 | pr_err("failed to register sysdev[%d]\n", i); | ||
300 | } | ||
301 | 289 | ||
302 | ret = platform_add_devices(devices, ARRAY_SIZE(devices)); | 290 | ret = platform_add_devices(devices, ARRAY_SIZE(devices)); |
303 | } | 291 | } |
diff --git a/arch/arm/mach-pxa/raumfeld.c b/arch/arm/mach-pxa/raumfeld.c index cd1861351f75..d130f77b6d11 100644 --- a/arch/arm/mach-pxa/raumfeld.c +++ b/arch/arm/mach-pxa/raumfeld.c | |||
@@ -18,7 +18,6 @@ | |||
18 | 18 | ||
19 | #include <linux/init.h> | 19 | #include <linux/init.h> |
20 | #include <linux/kernel.h> | 20 | #include <linux/kernel.h> |
21 | #include <linux/sysdev.h> | ||
22 | #include <linux/platform_device.h> | 21 | #include <linux/platform_device.h> |
23 | #include <linux/interrupt.h> | 22 | #include <linux/interrupt.h> |
24 | #include <linux/gpio.h> | 23 | #include <linux/gpio.h> |
diff --git a/arch/arm/mach-pxa/smemc.c b/arch/arm/mach-pxa/smemc.c index 232b7316ec08..79923058d10f 100644 --- a/arch/arm/mach-pxa/smemc.c +++ b/arch/arm/mach-pxa/smemc.c | |||
@@ -6,7 +6,7 @@ | |||
6 | #include <linux/kernel.h> | 6 | #include <linux/kernel.h> |
7 | #include <linux/init.h> | 7 | #include <linux/init.h> |
8 | #include <linux/io.h> | 8 | #include <linux/io.h> |
9 | #include <linux/sysdev.h> | 9 | #include <linux/syscore_ops.h> |
10 | 10 | ||
11 | #include <mach/hardware.h> | 11 | #include <mach/hardware.h> |
12 | #include <mach/smemc.h> | 12 | #include <mach/smemc.h> |
@@ -16,7 +16,7 @@ static unsigned long msc[2]; | |||
16 | static unsigned long sxcnfg, memclkcfg; | 16 | static unsigned long sxcnfg, memclkcfg; |
17 | static unsigned long csadrcfg[4]; | 17 | static unsigned long csadrcfg[4]; |
18 | 18 | ||
19 | static int pxa3xx_smemc_suspend(struct sys_device *dev, pm_message_t state) | 19 | static int pxa3xx_smemc_suspend(void) |
20 | { | 20 | { |
21 | msc[0] = __raw_readl(MSC0); | 21 | msc[0] = __raw_readl(MSC0); |
22 | msc[1] = __raw_readl(MSC1); | 22 | msc[1] = __raw_readl(MSC1); |
@@ -30,7 +30,7 @@ static int pxa3xx_smemc_suspend(struct sys_device *dev, pm_message_t state) | |||
30 | return 0; | 30 | return 0; |
31 | } | 31 | } |
32 | 32 | ||
33 | static int pxa3xx_smemc_resume(struct sys_device *dev) | 33 | static void pxa3xx_smemc_resume(void) |
34 | { | 34 | { |
35 | __raw_writel(msc[0], MSC0); | 35 | __raw_writel(msc[0], MSC0); |
36 | __raw_writel(msc[1], MSC1); | 36 | __raw_writel(msc[1], MSC1); |
@@ -40,34 +40,19 @@ static int pxa3xx_smemc_resume(struct sys_device *dev) | |||
40 | __raw_writel(csadrcfg[1], CSADRCFG1); | 40 | __raw_writel(csadrcfg[1], CSADRCFG1); |
41 | __raw_writel(csadrcfg[2], CSADRCFG2); | 41 | __raw_writel(csadrcfg[2], CSADRCFG2); |
42 | __raw_writel(csadrcfg[3], CSADRCFG3); | 42 | __raw_writel(csadrcfg[3], CSADRCFG3); |
43 | |||
44 | return 0; | ||
45 | } | 43 | } |
46 | 44 | ||
47 | static struct sysdev_class smemc_sysclass = { | 45 | static struct syscore_ops smemc_syscore_ops = { |
48 | .name = "smemc", | ||
49 | .suspend = pxa3xx_smemc_suspend, | 46 | .suspend = pxa3xx_smemc_suspend, |
50 | .resume = pxa3xx_smemc_resume, | 47 | .resume = pxa3xx_smemc_resume, |
51 | }; | 48 | }; |
52 | 49 | ||
53 | static struct sys_device smemc_sysdev = { | ||
54 | .id = 0, | ||
55 | .cls = &smemc_sysclass, | ||
56 | }; | ||
57 | |||
58 | static int __init smemc_init(void) | 50 | static int __init smemc_init(void) |
59 | { | 51 | { |
60 | int ret = 0; | 52 | if (cpu_is_pxa3xx()) |
53 | register_syscore_ops(&smemc_syscore_ops); | ||
61 | 54 | ||
62 | if (cpu_is_pxa3xx()) { | 55 | return 0; |
63 | ret = sysdev_class_register(&smemc_sysclass); | ||
64 | if (ret) | ||
65 | return ret; | ||
66 | |||
67 | ret = sysdev_register(&smemc_sysdev); | ||
68 | } | ||
69 | |||
70 | return ret; | ||
71 | } | 56 | } |
72 | subsys_initcall(smemc_init); | 57 | subsys_initcall(smemc_init); |
73 | #endif | 58 | #endif |
diff --git a/arch/arm/mach-pxa/trizeps4.c b/arch/arm/mach-pxa/trizeps4.c index b9cfbebdfe9c..687417a93698 100644 --- a/arch/arm/mach-pxa/trizeps4.c +++ b/arch/arm/mach-pxa/trizeps4.c | |||
@@ -15,7 +15,6 @@ | |||
15 | #include <linux/init.h> | 15 | #include <linux/init.h> |
16 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
17 | #include <linux/platform_device.h> | 17 | #include <linux/platform_device.h> |
18 | #include <linux/sysdev.h> | ||
19 | #include <linux/interrupt.h> | 18 | #include <linux/interrupt.h> |
20 | #include <linux/sched.h> | 19 | #include <linux/sched.h> |
21 | #include <linux/bitops.h> | 20 | #include <linux/bitops.h> |
diff --git a/arch/arm/mach-pxa/viper.c b/arch/arm/mach-pxa/viper.c index b523f119e0f0..903218eab56d 100644 --- a/arch/arm/mach-pxa/viper.c +++ b/arch/arm/mach-pxa/viper.c | |||
@@ -44,6 +44,7 @@ | |||
44 | #include <linux/mtd/mtd.h> | 44 | #include <linux/mtd/mtd.h> |
45 | #include <linux/mtd/partitions.h> | 45 | #include <linux/mtd/partitions.h> |
46 | #include <linux/mtd/physmap.h> | 46 | #include <linux/mtd/physmap.h> |
47 | #include <linux/syscore_ops.h> | ||
47 | 48 | ||
48 | #include <mach/pxa25x.h> | 49 | #include <mach/pxa25x.h> |
49 | #include <mach/audio.h> | 50 | #include <mach/audio.h> |
@@ -130,20 +131,19 @@ static u8 viper_hw_version(void) | |||
130 | return v1; | 131 | return v1; |
131 | } | 132 | } |
132 | 133 | ||
133 | /* CPU sysdev */ | 134 | /* CPU system core operations. */ |
134 | static int viper_cpu_suspend(struct sys_device *sysdev, pm_message_t state) | 135 | static int viper_cpu_suspend(void) |
135 | { | 136 | { |
136 | viper_icr_set_bit(VIPER_ICR_R_DIS); | 137 | viper_icr_set_bit(VIPER_ICR_R_DIS); |
137 | return 0; | 138 | return 0; |
138 | } | 139 | } |
139 | 140 | ||
140 | static int viper_cpu_resume(struct sys_device *sysdev) | 141 | static void viper_cpu_resume(void) |
141 | { | 142 | { |
142 | viper_icr_clear_bit(VIPER_ICR_R_DIS); | 143 | viper_icr_clear_bit(VIPER_ICR_R_DIS); |
143 | return 0; | ||
144 | } | 144 | } |
145 | 145 | ||
146 | static struct sysdev_driver viper_cpu_sysdev_driver = { | 146 | static struct syscore_ops viper_cpu_syscore_ops = { |
147 | .suspend = viper_cpu_suspend, | 147 | .suspend = viper_cpu_suspend, |
148 | .resume = viper_cpu_resume, | 148 | .resume = viper_cpu_resume, |
149 | }; | 149 | }; |
@@ -945,7 +945,7 @@ static void __init viper_init(void) | |||
945 | viper_init_vcore_gpios(); | 945 | viper_init_vcore_gpios(); |
946 | viper_init_cpufreq(); | 946 | viper_init_cpufreq(); |
947 | 947 | ||
948 | sysdev_driver_register(&cpu_sysdev_class, &viper_cpu_sysdev_driver); | 948 | register_syscore_ops(&viper_cpu_syscore_ops); |
949 | 949 | ||
950 | if (version) { | 950 | if (version) { |
951 | pr_info("viper: hardware v%di%d detected. " | 951 | pr_info("viper: hardware v%di%d detected. " |
diff --git a/arch/arm/mach-pxa/vpac270.c b/arch/arm/mach-pxa/vpac270.c index f71d377c8640..67bd41488bf8 100644 --- a/arch/arm/mach-pxa/vpac270.c +++ b/arch/arm/mach-pxa/vpac270.c | |||
@@ -16,7 +16,6 @@ | |||
16 | #include <linux/gpio_keys.h> | 16 | #include <linux/gpio_keys.h> |
17 | #include <linux/input.h> | 17 | #include <linux/input.h> |
18 | #include <linux/gpio.h> | 18 | #include <linux/gpio.h> |
19 | #include <linux/sysdev.h> | ||
20 | #include <linux/usb/gpio_vbus.h> | 19 | #include <linux/usb/gpio_vbus.h> |
21 | #include <linux/mtd/mtd.h> | 20 | #include <linux/mtd/mtd.h> |
22 | #include <linux/mtd/partitions.h> | 21 | #include <linux/mtd/partitions.h> |
diff --git a/arch/arm/mach-realview/include/mach/barriers.h b/arch/arm/mach-realview/include/mach/barriers.h index 0c5d749d7b5f..9a732195aa1c 100644 --- a/arch/arm/mach-realview/include/mach/barriers.h +++ b/arch/arm/mach-realview/include/mach/barriers.h | |||
@@ -4,5 +4,5 @@ | |||
4 | * operation to deadlock the system. | 4 | * operation to deadlock the system. |
5 | */ | 5 | */ |
6 | #define mb() dsb() | 6 | #define mb() dsb() |
7 | #define rmb() dmb() | 7 | #define rmb() dsb() |
8 | #define wmb() mb() | 8 | #define wmb() mb() |
diff --git a/arch/arm/mach-s3c2410/irq.c b/arch/arm/mach-s3c2410/irq.c index 5e2f35332056..2854129f8cc7 100644 --- a/arch/arm/mach-s3c2410/irq.c +++ b/arch/arm/mach-s3c2410/irq.c | |||
@@ -23,38 +23,12 @@ | |||
23 | #include <linux/module.h> | 23 | #include <linux/module.h> |
24 | #include <linux/interrupt.h> | 24 | #include <linux/interrupt.h> |
25 | #include <linux/ioport.h> | 25 | #include <linux/ioport.h> |
26 | #include <linux/sysdev.h> | 26 | #include <linux/syscore_ops.h> |
27 | 27 | ||
28 | #include <plat/cpu.h> | 28 | #include <plat/cpu.h> |
29 | #include <plat/pm.h> | 29 | #include <plat/pm.h> |
30 | 30 | ||
31 | static int s3c2410_irq_add(struct sys_device *sysdev) | 31 | struct syscore_ops s3c24xx_irq_syscore_ops = { |
32 | { | ||
33 | return 0; | ||
34 | } | ||
35 | |||
36 | static struct sysdev_driver s3c2410_irq_driver = { | ||
37 | .add = s3c2410_irq_add, | ||
38 | .suspend = s3c24xx_irq_suspend, | 32 | .suspend = s3c24xx_irq_suspend, |
39 | .resume = s3c24xx_irq_resume, | 33 | .resume = s3c24xx_irq_resume, |
40 | }; | 34 | }; |
41 | |||
42 | static int __init s3c2410_irq_init(void) | ||
43 | { | ||
44 | return sysdev_driver_register(&s3c2410_sysclass, &s3c2410_irq_driver); | ||
45 | } | ||
46 | |||
47 | arch_initcall(s3c2410_irq_init); | ||
48 | |||
49 | static struct sysdev_driver s3c2410a_irq_driver = { | ||
50 | .add = s3c2410_irq_add, | ||
51 | .suspend = s3c24xx_irq_suspend, | ||
52 | .resume = s3c24xx_irq_resume, | ||
53 | }; | ||
54 | |||
55 | static int __init s3c2410a_irq_init(void) | ||
56 | { | ||
57 | return sysdev_driver_register(&s3c2410a_sysclass, &s3c2410a_irq_driver); | ||
58 | } | ||
59 | |||
60 | arch_initcall(s3c2410a_irq_init); | ||
diff --git a/arch/arm/mach-s3c2410/mach-bast.c b/arch/arm/mach-s3c2410/mach-bast.c index 2970ea9f7c2b..1e2d536adda9 100644 --- a/arch/arm/mach-s3c2410/mach-bast.c +++ b/arch/arm/mach-s3c2410/mach-bast.c | |||
@@ -17,7 +17,7 @@ | |||
17 | #include <linux/timer.h> | 17 | #include <linux/timer.h> |
18 | #include <linux/init.h> | 18 | #include <linux/init.h> |
19 | #include <linux/gpio.h> | 19 | #include <linux/gpio.h> |
20 | #include <linux/sysdev.h> | 20 | #include <linux/syscore_ops.h> |
21 | #include <linux/serial_core.h> | 21 | #include <linux/serial_core.h> |
22 | #include <linux/platform_device.h> | 22 | #include <linux/platform_device.h> |
23 | #include <linux/dm9000.h> | 23 | #include <linux/dm9000.h> |
@@ -214,17 +214,16 @@ static struct s3c2410_uartcfg bast_uartcfgs[] __initdata = { | |||
214 | /* NAND Flash on BAST board */ | 214 | /* NAND Flash on BAST board */ |
215 | 215 | ||
216 | #ifdef CONFIG_PM | 216 | #ifdef CONFIG_PM |
217 | static int bast_pm_suspend(struct sys_device *sd, pm_message_t state) | 217 | static int bast_pm_suspend(void) |
218 | { | 218 | { |
219 | /* ensure that an nRESET is not generated on resume. */ | 219 | /* ensure that an nRESET is not generated on resume. */ |
220 | gpio_direction_output(S3C2410_GPA(21), 1); | 220 | gpio_direction_output(S3C2410_GPA(21), 1); |
221 | return 0; | 221 | return 0; |
222 | } | 222 | } |
223 | 223 | ||
224 | static int bast_pm_resume(struct sys_device *sd) | 224 | static void bast_pm_resume(void) |
225 | { | 225 | { |
226 | s3c_gpio_cfgpin(S3C2410_GPA(21), S3C2410_GPA21_nRSTOUT); | 226 | s3c_gpio_cfgpin(S3C2410_GPA(21), S3C2410_GPA21_nRSTOUT); |
227 | return 0; | ||
228 | } | 227 | } |
229 | 228 | ||
230 | #else | 229 | #else |
@@ -232,16 +231,11 @@ static int bast_pm_resume(struct sys_device *sd) | |||
232 | #define bast_pm_resume NULL | 231 | #define bast_pm_resume NULL |
233 | #endif | 232 | #endif |
234 | 233 | ||
235 | static struct sysdev_class bast_pm_sysclass = { | 234 | static struct syscore_ops bast_pm_syscore_ops = { |
236 | .name = "mach-bast", | ||
237 | .suspend = bast_pm_suspend, | 235 | .suspend = bast_pm_suspend, |
238 | .resume = bast_pm_resume, | 236 | .resume = bast_pm_resume, |
239 | }; | 237 | }; |
240 | 238 | ||
241 | static struct sys_device bast_pm_sysdev = { | ||
242 | .cls = &bast_pm_sysclass, | ||
243 | }; | ||
244 | |||
245 | static int smartmedia_map[] = { 0 }; | 239 | static int smartmedia_map[] = { 0 }; |
246 | static int chip0_map[] = { 1 }; | 240 | static int chip0_map[] = { 1 }; |
247 | static int chip1_map[] = { 2 }; | 241 | static int chip1_map[] = { 2 }; |
@@ -642,8 +636,7 @@ static void __init bast_map_io(void) | |||
642 | 636 | ||
643 | static void __init bast_init(void) | 637 | static void __init bast_init(void) |
644 | { | 638 | { |
645 | sysdev_class_register(&bast_pm_sysclass); | 639 | register_syscore_ops(&bast_pm_syscore_ops); |
646 | sysdev_register(&bast_pm_sysdev); | ||
647 | 640 | ||
648 | s3c_i2c0_set_platdata(&bast_i2c_info); | 641 | s3c_i2c0_set_platdata(&bast_i2c_info); |
649 | s3c_nand_set_platdata(&bast_nand_info); | 642 | s3c_nand_set_platdata(&bast_nand_info); |
diff --git a/arch/arm/mach-s3c2410/pm.c b/arch/arm/mach-s3c2410/pm.c index 725636fc4dc3..4728f9aa7df1 100644 --- a/arch/arm/mach-s3c2410/pm.c +++ b/arch/arm/mach-s3c2410/pm.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/errno.h> | 25 | #include <linux/errno.h> |
26 | #include <linux/time.h> | 26 | #include <linux/time.h> |
27 | #include <linux/sysdev.h> | 27 | #include <linux/sysdev.h> |
28 | #include <linux/syscore_ops.h> | ||
28 | #include <linux/gpio.h> | 29 | #include <linux/gpio.h> |
29 | #include <linux/io.h> | 30 | #include <linux/io.h> |
30 | 31 | ||
@@ -92,7 +93,7 @@ static void s3c2410_pm_prepare(void) | |||
92 | } | 93 | } |
93 | } | 94 | } |
94 | 95 | ||
95 | static int s3c2410_pm_resume(struct sys_device *dev) | 96 | static void s3c2410_pm_resume(void) |
96 | { | 97 | { |
97 | unsigned long tmp; | 98 | unsigned long tmp; |
98 | 99 | ||
@@ -104,10 +105,12 @@ static int s3c2410_pm_resume(struct sys_device *dev) | |||
104 | 105 | ||
105 | if ( machine_is_aml_m5900() ) | 106 | if ( machine_is_aml_m5900() ) |
106 | s3c2410_gpio_setpin(S3C2410_GPF(2), 0); | 107 | s3c2410_gpio_setpin(S3C2410_GPF(2), 0); |
107 | |||
108 | return 0; | ||
109 | } | 108 | } |
110 | 109 | ||
110 | struct syscore_ops s3c2410_pm_syscore_ops = { | ||
111 | .resume = s3c2410_pm_resume, | ||
112 | }; | ||
113 | |||
111 | static int s3c2410_pm_add(struct sys_device *dev) | 114 | static int s3c2410_pm_add(struct sys_device *dev) |
112 | { | 115 | { |
113 | pm_cpu_prep = s3c2410_pm_prepare; | 116 | pm_cpu_prep = s3c2410_pm_prepare; |
@@ -119,7 +122,6 @@ static int s3c2410_pm_add(struct sys_device *dev) | |||
119 | #if defined(CONFIG_CPU_S3C2410) | 122 | #if defined(CONFIG_CPU_S3C2410) |
120 | static struct sysdev_driver s3c2410_pm_driver = { | 123 | static struct sysdev_driver s3c2410_pm_driver = { |
121 | .add = s3c2410_pm_add, | 124 | .add = s3c2410_pm_add, |
122 | .resume = s3c2410_pm_resume, | ||
123 | }; | 125 | }; |
124 | 126 | ||
125 | /* register ourselves */ | 127 | /* register ourselves */ |
@@ -133,7 +135,6 @@ arch_initcall(s3c2410_pm_drvinit); | |||
133 | 135 | ||
134 | static struct sysdev_driver s3c2410a_pm_driver = { | 136 | static struct sysdev_driver s3c2410a_pm_driver = { |
135 | .add = s3c2410_pm_add, | 137 | .add = s3c2410_pm_add, |
136 | .resume = s3c2410_pm_resume, | ||
137 | }; | 138 | }; |
138 | 139 | ||
139 | static int __init s3c2410a_pm_drvinit(void) | 140 | static int __init s3c2410a_pm_drvinit(void) |
@@ -147,7 +148,6 @@ arch_initcall(s3c2410a_pm_drvinit); | |||
147 | #if defined(CONFIG_CPU_S3C2440) | 148 | #if defined(CONFIG_CPU_S3C2440) |
148 | static struct sysdev_driver s3c2440_pm_driver = { | 149 | static struct sysdev_driver s3c2440_pm_driver = { |
149 | .add = s3c2410_pm_add, | 150 | .add = s3c2410_pm_add, |
150 | .resume = s3c2410_pm_resume, | ||
151 | }; | 151 | }; |
152 | 152 | ||
153 | static int __init s3c2440_pm_drvinit(void) | 153 | static int __init s3c2440_pm_drvinit(void) |
@@ -161,7 +161,6 @@ arch_initcall(s3c2440_pm_drvinit); | |||
161 | #if defined(CONFIG_CPU_S3C2442) | 161 | #if defined(CONFIG_CPU_S3C2442) |
162 | static struct sysdev_driver s3c2442_pm_driver = { | 162 | static struct sysdev_driver s3c2442_pm_driver = { |
163 | .add = s3c2410_pm_add, | 163 | .add = s3c2410_pm_add, |
164 | .resume = s3c2410_pm_resume, | ||
165 | }; | 164 | }; |
166 | 165 | ||
167 | static int __init s3c2442_pm_drvinit(void) | 166 | static int __init s3c2442_pm_drvinit(void) |
diff --git a/arch/arm/mach-s3c2410/s3c2410.c b/arch/arm/mach-s3c2410/s3c2410.c index adc90a3c5890..f1d3bd8f6f17 100644 --- a/arch/arm/mach-s3c2410/s3c2410.c +++ b/arch/arm/mach-s3c2410/s3c2410.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/gpio.h> | 19 | #include <linux/gpio.h> |
20 | #include <linux/clk.h> | 20 | #include <linux/clk.h> |
21 | #include <linux/sysdev.h> | 21 | #include <linux/sysdev.h> |
22 | #include <linux/syscore_ops.h> | ||
22 | #include <linux/serial_core.h> | 23 | #include <linux/serial_core.h> |
23 | #include <linux/platform_device.h> | 24 | #include <linux/platform_device.h> |
24 | #include <linux/io.h> | 25 | #include <linux/io.h> |
@@ -40,6 +41,7 @@ | |||
40 | #include <plat/devs.h> | 41 | #include <plat/devs.h> |
41 | #include <plat/clock.h> | 42 | #include <plat/clock.h> |
42 | #include <plat/pll.h> | 43 | #include <plat/pll.h> |
44 | #include <plat/pm.h> | ||
43 | 45 | ||
44 | #include <plat/gpio-core.h> | 46 | #include <plat/gpio-core.h> |
45 | #include <plat/gpio-cfg.h> | 47 | #include <plat/gpio-cfg.h> |
@@ -168,6 +170,9 @@ int __init s3c2410_init(void) | |||
168 | { | 170 | { |
169 | printk("S3C2410: Initialising architecture\n"); | 171 | printk("S3C2410: Initialising architecture\n"); |
170 | 172 | ||
173 | register_syscore_ops(&s3c2410_pm_syscore_ops); | ||
174 | register_syscore_ops(&s3c24xx_irq_syscore_ops); | ||
175 | |||
171 | return sysdev_register(&s3c2410_sysdev); | 176 | return sysdev_register(&s3c2410_sysdev); |
172 | } | 177 | } |
173 | 178 | ||
diff --git a/arch/arm/mach-s3c2412/irq.c b/arch/arm/mach-s3c2412/irq.c index f3355d2ec634..1a1aa220972b 100644 --- a/arch/arm/mach-s3c2412/irq.c +++ b/arch/arm/mach-s3c2412/irq.c | |||
@@ -202,8 +202,6 @@ static int s3c2412_irq_add(struct sys_device *sysdev) | |||
202 | 202 | ||
203 | static struct sysdev_driver s3c2412_irq_driver = { | 203 | static struct sysdev_driver s3c2412_irq_driver = { |
204 | .add = s3c2412_irq_add, | 204 | .add = s3c2412_irq_add, |
205 | .suspend = s3c24xx_irq_suspend, | ||
206 | .resume = s3c24xx_irq_resume, | ||
207 | }; | 205 | }; |
208 | 206 | ||
209 | static int s3c2412_irq_init(void) | 207 | static int s3c2412_irq_init(void) |
diff --git a/arch/arm/mach-s3c2412/mach-jive.c b/arch/arm/mach-s3c2412/mach-jive.c index 923e01bdf017..85dcaeb9e62f 100644 --- a/arch/arm/mach-s3c2412/mach-jive.c +++ b/arch/arm/mach-s3c2412/mach-jive.c | |||
@@ -17,7 +17,7 @@ | |||
17 | #include <linux/timer.h> | 17 | #include <linux/timer.h> |
18 | #include <linux/init.h> | 18 | #include <linux/init.h> |
19 | #include <linux/gpio.h> | 19 | #include <linux/gpio.h> |
20 | #include <linux/sysdev.h> | 20 | #include <linux/syscore_ops.h> |
21 | #include <linux/serial_core.h> | 21 | #include <linux/serial_core.h> |
22 | #include <linux/platform_device.h> | 22 | #include <linux/platform_device.h> |
23 | #include <linux/i2c.h> | 23 | #include <linux/i2c.h> |
@@ -486,7 +486,7 @@ static struct s3c2410_udc_mach_info jive_udc_cfg __initdata = { | |||
486 | /* Jive power management device */ | 486 | /* Jive power management device */ |
487 | 487 | ||
488 | #ifdef CONFIG_PM | 488 | #ifdef CONFIG_PM |
489 | static int jive_pm_suspend(struct sys_device *sd, pm_message_t state) | 489 | static int jive_pm_suspend(void) |
490 | { | 490 | { |
491 | /* Write the magic value u-boot uses to check for resume into | 491 | /* Write the magic value u-boot uses to check for resume into |
492 | * the INFORM0 register, and ensure INFORM1 is set to the | 492 | * the INFORM0 register, and ensure INFORM1 is set to the |
@@ -498,10 +498,9 @@ static int jive_pm_suspend(struct sys_device *sd, pm_message_t state) | |||
498 | return 0; | 498 | return 0; |
499 | } | 499 | } |
500 | 500 | ||
501 | static int jive_pm_resume(struct sys_device *sd) | 501 | static void jive_pm_resume(void) |
502 | { | 502 | { |
503 | __raw_writel(0x0, S3C2412_INFORM0); | 503 | __raw_writel(0x0, S3C2412_INFORM0); |
504 | return 0; | ||
505 | } | 504 | } |
506 | 505 | ||
507 | #else | 506 | #else |
@@ -509,16 +508,11 @@ static int jive_pm_resume(struct sys_device *sd) | |||
509 | #define jive_pm_resume NULL | 508 | #define jive_pm_resume NULL |
510 | #endif | 509 | #endif |
511 | 510 | ||
512 | static struct sysdev_class jive_pm_sysclass = { | 511 | static struct syscore_ops jive_pm_syscore_ops = { |
513 | .name = "jive-pm", | ||
514 | .suspend = jive_pm_suspend, | 512 | .suspend = jive_pm_suspend, |
515 | .resume = jive_pm_resume, | 513 | .resume = jive_pm_resume, |
516 | }; | 514 | }; |
517 | 515 | ||
518 | static struct sys_device jive_pm_sysdev = { | ||
519 | .cls = &jive_pm_sysclass, | ||
520 | }; | ||
521 | |||
522 | static void __init jive_map_io(void) | 516 | static void __init jive_map_io(void) |
523 | { | 517 | { |
524 | s3c24xx_init_io(jive_iodesc, ARRAY_SIZE(jive_iodesc)); | 518 | s3c24xx_init_io(jive_iodesc, ARRAY_SIZE(jive_iodesc)); |
@@ -536,10 +530,9 @@ static void jive_power_off(void) | |||
536 | 530 | ||
537 | static void __init jive_machine_init(void) | 531 | static void __init jive_machine_init(void) |
538 | { | 532 | { |
539 | /* register system devices for managing low level suspend */ | 533 | /* register system core operations for managing low level suspend */ |
540 | 534 | ||
541 | sysdev_class_register(&jive_pm_sysclass); | 535 | register_syscore_ops(&jive_pm_syscore_ops); |
542 | sysdev_register(&jive_pm_sysdev); | ||
543 | 536 | ||
544 | /* write our sleep configurations for the IO. Pull down all unused | 537 | /* write our sleep configurations for the IO. Pull down all unused |
545 | * IO, ensure that we have turned off all peripherals we do not | 538 | * IO, ensure that we have turned off all peripherals we do not |
diff --git a/arch/arm/mach-s3c2412/pm.c b/arch/arm/mach-s3c2412/pm.c index a7417c479ffe..752b13a7b3db 100644 --- a/arch/arm/mach-s3c2412/pm.c +++ b/arch/arm/mach-s3c2412/pm.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/timer.h> | 17 | #include <linux/timer.h> |
18 | #include <linux/init.h> | 18 | #include <linux/init.h> |
19 | #include <linux/sysdev.h> | 19 | #include <linux/sysdev.h> |
20 | #include <linux/syscore_ops.h> | ||
20 | #include <linux/platform_device.h> | 21 | #include <linux/platform_device.h> |
21 | #include <linux/io.h> | 22 | #include <linux/io.h> |
22 | 23 | ||
@@ -86,13 +87,24 @@ static struct sleep_save s3c2412_sleep[] = { | |||
86 | SAVE_ITEM(S3C2413_GPJSLPCON), | 87 | SAVE_ITEM(S3C2413_GPJSLPCON), |
87 | }; | 88 | }; |
88 | 89 | ||
89 | static int s3c2412_pm_suspend(struct sys_device *dev, pm_message_t state) | 90 | static struct sysdev_driver s3c2412_pm_driver = { |
91 | .add = s3c2412_pm_add, | ||
92 | }; | ||
93 | |||
94 | static __init int s3c2412_pm_init(void) | ||
95 | { | ||
96 | return sysdev_driver_register(&s3c2412_sysclass, &s3c2412_pm_driver); | ||
97 | } | ||
98 | |||
99 | arch_initcall(s3c2412_pm_init); | ||
100 | |||
101 | static int s3c2412_pm_suspend(void) | ||
90 | { | 102 | { |
91 | s3c_pm_do_save(s3c2412_sleep, ARRAY_SIZE(s3c2412_sleep)); | 103 | s3c_pm_do_save(s3c2412_sleep, ARRAY_SIZE(s3c2412_sleep)); |
92 | return 0; | 104 | return 0; |
93 | } | 105 | } |
94 | 106 | ||
95 | static int s3c2412_pm_resume(struct sys_device *dev) | 107 | static void s3c2412_pm_resume(void) |
96 | { | 108 | { |
97 | unsigned long tmp; | 109 | unsigned long tmp; |
98 | 110 | ||
@@ -102,18 +114,9 @@ static int s3c2412_pm_resume(struct sys_device *dev) | |||
102 | __raw_writel(tmp, S3C2412_PWRCFG); | 114 | __raw_writel(tmp, S3C2412_PWRCFG); |
103 | 115 | ||
104 | s3c_pm_do_restore(s3c2412_sleep, ARRAY_SIZE(s3c2412_sleep)); | 116 | s3c_pm_do_restore(s3c2412_sleep, ARRAY_SIZE(s3c2412_sleep)); |
105 | return 0; | ||
106 | } | 117 | } |
107 | 118 | ||
108 | static struct sysdev_driver s3c2412_pm_driver = { | 119 | struct syscore_ops s3c2412_pm_syscore_ops = { |
109 | .add = s3c2412_pm_add, | ||
110 | .suspend = s3c2412_pm_suspend, | 120 | .suspend = s3c2412_pm_suspend, |
111 | .resume = s3c2412_pm_resume, | 121 | .resume = s3c2412_pm_resume, |
112 | }; | 122 | }; |
113 | |||
114 | static __init int s3c2412_pm_init(void) | ||
115 | { | ||
116 | return sysdev_driver_register(&s3c2412_sysclass, &s3c2412_pm_driver); | ||
117 | } | ||
118 | |||
119 | arch_initcall(s3c2412_pm_init); | ||
diff --git a/arch/arm/mach-s3c2412/s3c2412.c b/arch/arm/mach-s3c2412/s3c2412.c index 4c6df51ddf33..ef0958d3e5c6 100644 --- a/arch/arm/mach-s3c2412/s3c2412.c +++ b/arch/arm/mach-s3c2412/s3c2412.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/clk.h> | 19 | #include <linux/clk.h> |
20 | #include <linux/delay.h> | 20 | #include <linux/delay.h> |
21 | #include <linux/sysdev.h> | 21 | #include <linux/sysdev.h> |
22 | #include <linux/syscore_ops.h> | ||
22 | #include <linux/serial_core.h> | 23 | #include <linux/serial_core.h> |
23 | #include <linux/platform_device.h> | 24 | #include <linux/platform_device.h> |
24 | #include <linux/io.h> | 25 | #include <linux/io.h> |
@@ -244,5 +245,8 @@ int __init s3c2412_init(void) | |||
244 | { | 245 | { |
245 | printk("S3C2412: Initialising architecture\n"); | 246 | printk("S3C2412: Initialising architecture\n"); |
246 | 247 | ||
248 | register_syscore_ops(&s3c2412_pm_syscore_ops); | ||
249 | register_syscore_ops(&s3c24xx_irq_syscore_ops); | ||
250 | |||
247 | return sysdev_register(&s3c2412_sysdev); | 251 | return sysdev_register(&s3c2412_sysdev); |
248 | } | 252 | } |
diff --git a/arch/arm/mach-s3c2416/irq.c b/arch/arm/mach-s3c2416/irq.c index 77b38f2381c1..28ad20d42445 100644 --- a/arch/arm/mach-s3c2416/irq.c +++ b/arch/arm/mach-s3c2416/irq.c | |||
@@ -236,8 +236,6 @@ static int __init s3c2416_irq_add(struct sys_device *sysdev) | |||
236 | 236 | ||
237 | static struct sysdev_driver s3c2416_irq_driver = { | 237 | static struct sysdev_driver s3c2416_irq_driver = { |
238 | .add = s3c2416_irq_add, | 238 | .add = s3c2416_irq_add, |
239 | .suspend = s3c24xx_irq_suspend, | ||
240 | .resume = s3c24xx_irq_resume, | ||
241 | }; | 239 | }; |
242 | 240 | ||
243 | static int __init s3c2416_irq_init(void) | 241 | static int __init s3c2416_irq_init(void) |
diff --git a/arch/arm/mach-s3c2416/pm.c b/arch/arm/mach-s3c2416/pm.c index 4a04205b04d5..41db2b21e213 100644 --- a/arch/arm/mach-s3c2416/pm.c +++ b/arch/arm/mach-s3c2416/pm.c | |||
@@ -11,6 +11,7 @@ | |||
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <linux/sysdev.h> | 13 | #include <linux/sysdev.h> |
14 | #include <linux/syscore_ops.h> | ||
14 | #include <linux/io.h> | 15 | #include <linux/io.h> |
15 | 16 | ||
16 | #include <asm/cacheflush.h> | 17 | #include <asm/cacheflush.h> |
@@ -55,30 +56,26 @@ static int s3c2416_pm_add(struct sys_device *sysdev) | |||
55 | return 0; | 56 | return 0; |
56 | } | 57 | } |
57 | 58 | ||
58 | static int s3c2416_pm_suspend(struct sys_device *dev, pm_message_t state) | 59 | static struct sysdev_driver s3c2416_pm_driver = { |
60 | .add = s3c2416_pm_add, | ||
61 | }; | ||
62 | |||
63 | static __init int s3c2416_pm_init(void) | ||
59 | { | 64 | { |
60 | return 0; | 65 | return sysdev_driver_register(&s3c2416_sysclass, &s3c2416_pm_driver); |
61 | } | 66 | } |
62 | 67 | ||
63 | static int s3c2416_pm_resume(struct sys_device *dev) | 68 | arch_initcall(s3c2416_pm_init); |
69 | |||
70 | |||
71 | static void s3c2416_pm_resume(void) | ||
64 | { | 72 | { |
65 | /* unset the return-from-sleep amd inform flags */ | 73 | /* unset the return-from-sleep amd inform flags */ |
66 | __raw_writel(0x0, S3C2443_PWRMODE); | 74 | __raw_writel(0x0, S3C2443_PWRMODE); |
67 | __raw_writel(0x0, S3C2412_INFORM0); | 75 | __raw_writel(0x0, S3C2412_INFORM0); |
68 | __raw_writel(0x0, S3C2412_INFORM1); | 76 | __raw_writel(0x0, S3C2412_INFORM1); |
69 | |||
70 | return 0; | ||
71 | } | 77 | } |
72 | 78 | ||
73 | static struct sysdev_driver s3c2416_pm_driver = { | 79 | struct syscore_ops s3c2416_pm_syscore_ops = { |
74 | .add = s3c2416_pm_add, | ||
75 | .suspend = s3c2416_pm_suspend, | ||
76 | .resume = s3c2416_pm_resume, | 80 | .resume = s3c2416_pm_resume, |
77 | }; | 81 | }; |
78 | |||
79 | static __init int s3c2416_pm_init(void) | ||
80 | { | ||
81 | return sysdev_driver_register(&s3c2416_sysclass, &s3c2416_pm_driver); | ||
82 | } | ||
83 | |||
84 | arch_initcall(s3c2416_pm_init); | ||
diff --git a/arch/arm/mach-s3c2416/s3c2416.c b/arch/arm/mach-s3c2416/s3c2416.c index ba7fd8737434..494ce913dc95 100644 --- a/arch/arm/mach-s3c2416/s3c2416.c +++ b/arch/arm/mach-s3c2416/s3c2416.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/platform_device.h> | 32 | #include <linux/platform_device.h> |
33 | #include <linux/serial_core.h> | 33 | #include <linux/serial_core.h> |
34 | #include <linux/sysdev.h> | 34 | #include <linux/sysdev.h> |
35 | #include <linux/syscore_ops.h> | ||
35 | #include <linux/clk.h> | 36 | #include <linux/clk.h> |
36 | #include <linux/io.h> | 37 | #include <linux/io.h> |
37 | 38 | ||
@@ -54,6 +55,7 @@ | |||
54 | #include <plat/devs.h> | 55 | #include <plat/devs.h> |
55 | #include <plat/cpu.h> | 56 | #include <plat/cpu.h> |
56 | #include <plat/sdhci.h> | 57 | #include <plat/sdhci.h> |
58 | #include <plat/pm.h> | ||
57 | 59 | ||
58 | #include <plat/iic-core.h> | 60 | #include <plat/iic-core.h> |
59 | #include <plat/fb-core.h> | 61 | #include <plat/fb-core.h> |
@@ -95,6 +97,9 @@ int __init s3c2416_init(void) | |||
95 | 97 | ||
96 | s3c_fb_setname("s3c2443-fb"); | 98 | s3c_fb_setname("s3c2443-fb"); |
97 | 99 | ||
100 | register_syscore_ops(&s3c2416_pm_syscore_ops); | ||
101 | register_syscore_ops(&s3c24xx_irq_syscore_ops); | ||
102 | |||
98 | return sysdev_register(&s3c2416_sysdev); | 103 | return sysdev_register(&s3c2416_sysdev); |
99 | } | 104 | } |
100 | 105 | ||
diff --git a/arch/arm/mach-s3c2440/mach-osiris.c b/arch/arm/mach-s3c2440/mach-osiris.c index 14dc67897757..d88536393310 100644 --- a/arch/arm/mach-s3c2440/mach-osiris.c +++ b/arch/arm/mach-s3c2440/mach-osiris.c | |||
@@ -17,7 +17,7 @@ | |||
17 | #include <linux/init.h> | 17 | #include <linux/init.h> |
18 | #include <linux/gpio.h> | 18 | #include <linux/gpio.h> |
19 | #include <linux/device.h> | 19 | #include <linux/device.h> |
20 | #include <linux/sysdev.h> | 20 | #include <linux/syscore_ops.h> |
21 | #include <linux/serial_core.h> | 21 | #include <linux/serial_core.h> |
22 | #include <linux/clk.h> | 22 | #include <linux/clk.h> |
23 | #include <linux/i2c.h> | 23 | #include <linux/i2c.h> |
@@ -284,7 +284,7 @@ static struct platform_device osiris_pcmcia = { | |||
284 | #ifdef CONFIG_PM | 284 | #ifdef CONFIG_PM |
285 | static unsigned char pm_osiris_ctrl0; | 285 | static unsigned char pm_osiris_ctrl0; |
286 | 286 | ||
287 | static int osiris_pm_suspend(struct sys_device *sd, pm_message_t state) | 287 | static int osiris_pm_suspend(void) |
288 | { | 288 | { |
289 | unsigned int tmp; | 289 | unsigned int tmp; |
290 | 290 | ||
@@ -304,7 +304,7 @@ static int osiris_pm_suspend(struct sys_device *sd, pm_message_t state) | |||
304 | return 0; | 304 | return 0; |
305 | } | 305 | } |
306 | 306 | ||
307 | static int osiris_pm_resume(struct sys_device *sd) | 307 | static void osiris_pm_resume(void) |
308 | { | 308 | { |
309 | if (pm_osiris_ctrl0 & OSIRIS_CTRL0_FIX8) | 309 | if (pm_osiris_ctrl0 & OSIRIS_CTRL0_FIX8) |
310 | __raw_writeb(OSIRIS_CTRL1_FIX8, OSIRIS_VA_CTRL1); | 310 | __raw_writeb(OSIRIS_CTRL1_FIX8, OSIRIS_VA_CTRL1); |
@@ -312,8 +312,6 @@ static int osiris_pm_resume(struct sys_device *sd) | |||
312 | __raw_writeb(pm_osiris_ctrl0, OSIRIS_VA_CTRL0); | 312 | __raw_writeb(pm_osiris_ctrl0, OSIRIS_VA_CTRL0); |
313 | 313 | ||
314 | s3c_gpio_cfgpin(S3C2410_GPA(21), S3C2410_GPA21_nRSTOUT); | 314 | s3c_gpio_cfgpin(S3C2410_GPA(21), S3C2410_GPA21_nRSTOUT); |
315 | |||
316 | return 0; | ||
317 | } | 315 | } |
318 | 316 | ||
319 | #else | 317 | #else |
@@ -321,16 +319,11 @@ static int osiris_pm_resume(struct sys_device *sd) | |||
321 | #define osiris_pm_resume NULL | 319 | #define osiris_pm_resume NULL |
322 | #endif | 320 | #endif |
323 | 321 | ||
324 | static struct sysdev_class osiris_pm_sysclass = { | 322 | static struct syscore_ops osiris_pm_syscore_ops = { |
325 | .name = "mach-osiris", | ||
326 | .suspend = osiris_pm_suspend, | 323 | .suspend = osiris_pm_suspend, |
327 | .resume = osiris_pm_resume, | 324 | .resume = osiris_pm_resume, |
328 | }; | 325 | }; |
329 | 326 | ||
330 | static struct sys_device osiris_pm_sysdev = { | ||
331 | .cls = &osiris_pm_sysclass, | ||
332 | }; | ||
333 | |||
334 | /* Link for DVS driver to TPS65011 */ | 327 | /* Link for DVS driver to TPS65011 */ |
335 | 328 | ||
336 | static void osiris_tps_release(struct device *dev) | 329 | static void osiris_tps_release(struct device *dev) |
@@ -439,8 +432,7 @@ static void __init osiris_map_io(void) | |||
439 | 432 | ||
440 | static void __init osiris_init(void) | 433 | static void __init osiris_init(void) |
441 | { | 434 | { |
442 | sysdev_class_register(&osiris_pm_sysclass); | 435 | register_syscore_ops(&osiris_pm_syscore_ops); |
443 | sysdev_register(&osiris_pm_sysdev); | ||
444 | 436 | ||
445 | s3c_i2c0_set_platdata(NULL); | 437 | s3c_i2c0_set_platdata(NULL); |
446 | s3c_nand_set_platdata(&osiris_nand_info); | 438 | s3c_nand_set_platdata(&osiris_nand_info); |
diff --git a/arch/arm/mach-s3c2440/s3c2440.c b/arch/arm/mach-s3c2440/s3c2440.c index f7663f731ea0..ce99ff72838d 100644 --- a/arch/arm/mach-s3c2440/s3c2440.c +++ b/arch/arm/mach-s3c2440/s3c2440.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/platform_device.h> | 19 | #include <linux/platform_device.h> |
20 | #include <linux/serial_core.h> | 20 | #include <linux/serial_core.h> |
21 | #include <linux/sysdev.h> | 21 | #include <linux/sysdev.h> |
22 | #include <linux/syscore_ops.h> | ||
22 | #include <linux/gpio.h> | 23 | #include <linux/gpio.h> |
23 | #include <linux/clk.h> | 24 | #include <linux/clk.h> |
24 | #include <linux/io.h> | 25 | #include <linux/io.h> |
@@ -33,6 +34,7 @@ | |||
33 | #include <plat/devs.h> | 34 | #include <plat/devs.h> |
34 | #include <plat/cpu.h> | 35 | #include <plat/cpu.h> |
35 | #include <plat/s3c244x.h> | 36 | #include <plat/s3c244x.h> |
37 | #include <plat/pm.h> | ||
36 | 38 | ||
37 | #include <plat/gpio-core.h> | 39 | #include <plat/gpio-core.h> |
38 | #include <plat/gpio-cfg.h> | 40 | #include <plat/gpio-cfg.h> |
@@ -51,6 +53,12 @@ int __init s3c2440_init(void) | |||
51 | s3c_device_wdt.resource[1].start = IRQ_S3C2440_WDT; | 53 | s3c_device_wdt.resource[1].start = IRQ_S3C2440_WDT; |
52 | s3c_device_wdt.resource[1].end = IRQ_S3C2440_WDT; | 54 | s3c_device_wdt.resource[1].end = IRQ_S3C2440_WDT; |
53 | 55 | ||
56 | /* register suspend/resume handlers */ | ||
57 | |||
58 | register_syscore_ops(&s3c2410_pm_syscore_ops); | ||
59 | register_syscore_ops(&s3c244x_pm_syscore_ops); | ||
60 | register_syscore_ops(&s3c24xx_irq_syscore_ops); | ||
61 | |||
54 | /* register our system device for everything else */ | 62 | /* register our system device for everything else */ |
55 | 63 | ||
56 | return sysdev_register(&s3c2440_sysdev); | 64 | return sysdev_register(&s3c2440_sysdev); |
diff --git a/arch/arm/mach-s3c2440/s3c2442.c b/arch/arm/mach-s3c2440/s3c2442.c index ecf813546554..6224bad4d604 100644 --- a/arch/arm/mach-s3c2440/s3c2442.c +++ b/arch/arm/mach-s3c2440/s3c2442.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/err.h> | 29 | #include <linux/err.h> |
30 | #include <linux/device.h> | 30 | #include <linux/device.h> |
31 | #include <linux/sysdev.h> | 31 | #include <linux/sysdev.h> |
32 | #include <linux/syscore_ops.h> | ||
32 | #include <linux/interrupt.h> | 33 | #include <linux/interrupt.h> |
33 | #include <linux/ioport.h> | 34 | #include <linux/ioport.h> |
34 | #include <linux/mutex.h> | 35 | #include <linux/mutex.h> |
@@ -45,6 +46,7 @@ | |||
45 | #include <plat/clock.h> | 46 | #include <plat/clock.h> |
46 | #include <plat/cpu.h> | 47 | #include <plat/cpu.h> |
47 | #include <plat/s3c244x.h> | 48 | #include <plat/s3c244x.h> |
49 | #include <plat/pm.h> | ||
48 | 50 | ||
49 | #include <plat/gpio-core.h> | 51 | #include <plat/gpio-core.h> |
50 | #include <plat/gpio-cfg.h> | 52 | #include <plat/gpio-cfg.h> |
@@ -167,6 +169,10 @@ int __init s3c2442_init(void) | |||
167 | { | 169 | { |
168 | printk("S3C2442: Initialising architecture\n"); | 170 | printk("S3C2442: Initialising architecture\n"); |
169 | 171 | ||
172 | register_syscore_ops(&s3c2410_pm_syscore_ops); | ||
173 | register_syscore_ops(&s3c244x_pm_syscore_ops); | ||
174 | register_syscore_ops(&s3c24xx_irq_syscore_ops); | ||
175 | |||
170 | return sysdev_register(&s3c2442_sysdev); | 176 | return sysdev_register(&s3c2442_sysdev); |
171 | } | 177 | } |
172 | 178 | ||
diff --git a/arch/arm/mach-s3c2440/s3c244x-irq.c b/arch/arm/mach-s3c2440/s3c244x-irq.c index de07c2feaa32..c63e8f26d901 100644 --- a/arch/arm/mach-s3c2440/s3c244x-irq.c +++ b/arch/arm/mach-s3c2440/s3c244x-irq.c | |||
@@ -116,8 +116,6 @@ static int s3c244x_irq_add(struct sys_device *sysdev) | |||
116 | 116 | ||
117 | static struct sysdev_driver s3c2440_irq_driver = { | 117 | static struct sysdev_driver s3c2440_irq_driver = { |
118 | .add = s3c244x_irq_add, | 118 | .add = s3c244x_irq_add, |
119 | .suspend = s3c24xx_irq_suspend, | ||
120 | .resume = s3c24xx_irq_resume, | ||
121 | }; | 119 | }; |
122 | 120 | ||
123 | static int s3c2440_irq_init(void) | 121 | static int s3c2440_irq_init(void) |
@@ -129,8 +127,6 @@ arch_initcall(s3c2440_irq_init); | |||
129 | 127 | ||
130 | static struct sysdev_driver s3c2442_irq_driver = { | 128 | static struct sysdev_driver s3c2442_irq_driver = { |
131 | .add = s3c244x_irq_add, | 129 | .add = s3c244x_irq_add, |
132 | .suspend = s3c24xx_irq_suspend, | ||
133 | .resume = s3c24xx_irq_resume, | ||
134 | }; | 130 | }; |
135 | 131 | ||
136 | 132 | ||
diff --git a/arch/arm/mach-s3c2440/s3c244x.c b/arch/arm/mach-s3c2440/s3c244x.c index 90c1707b9c95..7e8a23d2098a 100644 --- a/arch/arm/mach-s3c2440/s3c244x.c +++ b/arch/arm/mach-s3c2440/s3c244x.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/serial_core.h> | 19 | #include <linux/serial_core.h> |
20 | #include <linux/platform_device.h> | 20 | #include <linux/platform_device.h> |
21 | #include <linux/sysdev.h> | 21 | #include <linux/sysdev.h> |
22 | #include <linux/syscore_ops.h> | ||
22 | #include <linux/clk.h> | 23 | #include <linux/clk.h> |
23 | #include <linux/io.h> | 24 | #include <linux/io.h> |
24 | 25 | ||
@@ -134,45 +135,14 @@ void __init s3c244x_init_clocks(int xtal) | |||
134 | s3c2410_baseclk_add(); | 135 | s3c2410_baseclk_add(); |
135 | } | 136 | } |
136 | 137 | ||
137 | #ifdef CONFIG_PM | ||
138 | |||
139 | static struct sleep_save s3c244x_sleep[] = { | ||
140 | SAVE_ITEM(S3C2440_DSC0), | ||
141 | SAVE_ITEM(S3C2440_DSC1), | ||
142 | SAVE_ITEM(S3C2440_GPJDAT), | ||
143 | SAVE_ITEM(S3C2440_GPJCON), | ||
144 | SAVE_ITEM(S3C2440_GPJUP) | ||
145 | }; | ||
146 | |||
147 | static int s3c244x_suspend(struct sys_device *dev, pm_message_t state) | ||
148 | { | ||
149 | s3c_pm_do_save(s3c244x_sleep, ARRAY_SIZE(s3c244x_sleep)); | ||
150 | return 0; | ||
151 | } | ||
152 | |||
153 | static int s3c244x_resume(struct sys_device *dev) | ||
154 | { | ||
155 | s3c_pm_do_restore(s3c244x_sleep, ARRAY_SIZE(s3c244x_sleep)); | ||
156 | return 0; | ||
157 | } | ||
158 | |||
159 | #else | ||
160 | #define s3c244x_suspend NULL | ||
161 | #define s3c244x_resume NULL | ||
162 | #endif | ||
163 | |||
164 | /* Since the S3C2442 and S3C2440 share items, put both sysclasses here */ | 138 | /* Since the S3C2442 and S3C2440 share items, put both sysclasses here */ |
165 | 139 | ||
166 | struct sysdev_class s3c2440_sysclass = { | 140 | struct sysdev_class s3c2440_sysclass = { |
167 | .name = "s3c2440-core", | 141 | .name = "s3c2440-core", |
168 | .suspend = s3c244x_suspend, | ||
169 | .resume = s3c244x_resume | ||
170 | }; | 142 | }; |
171 | 143 | ||
172 | struct sysdev_class s3c2442_sysclass = { | 144 | struct sysdev_class s3c2442_sysclass = { |
173 | .name = "s3c2442-core", | 145 | .name = "s3c2442-core", |
174 | .suspend = s3c244x_suspend, | ||
175 | .resume = s3c244x_resume | ||
176 | }; | 146 | }; |
177 | 147 | ||
178 | /* need to register class before we actually register the device, and | 148 | /* need to register class before we actually register the device, and |
@@ -194,3 +164,33 @@ static int __init s3c2442_core_init(void) | |||
194 | } | 164 | } |
195 | 165 | ||
196 | core_initcall(s3c2442_core_init); | 166 | core_initcall(s3c2442_core_init); |
167 | |||
168 | |||
169 | #ifdef CONFIG_PM | ||
170 | static struct sleep_save s3c244x_sleep[] = { | ||
171 | SAVE_ITEM(S3C2440_DSC0), | ||
172 | SAVE_ITEM(S3C2440_DSC1), | ||
173 | SAVE_ITEM(S3C2440_GPJDAT), | ||
174 | SAVE_ITEM(S3C2440_GPJCON), | ||
175 | SAVE_ITEM(S3C2440_GPJUP) | ||
176 | }; | ||
177 | |||
178 | static int s3c244x_suspend(void) | ||
179 | { | ||
180 | s3c_pm_do_save(s3c244x_sleep, ARRAY_SIZE(s3c244x_sleep)); | ||
181 | return 0; | ||
182 | } | ||
183 | |||
184 | static void s3c244x_resume(void) | ||
185 | { | ||
186 | s3c_pm_do_restore(s3c244x_sleep, ARRAY_SIZE(s3c244x_sleep)); | ||
187 | } | ||
188 | #else | ||
189 | #define s3c244x_suspend NULL | ||
190 | #define s3c244x_resume NULL | ||
191 | #endif | ||
192 | |||
193 | struct syscore_ops s3c244x_pm_syscore_ops = { | ||
194 | .suspend = s3c244x_suspend, | ||
195 | .resume = s3c244x_resume, | ||
196 | }; | ||
diff --git a/arch/arm/mach-s3c64xx/irq-pm.c b/arch/arm/mach-s3c64xx/irq-pm.c index da1bec64b9da..8bec61e242c7 100644 --- a/arch/arm/mach-s3c64xx/irq-pm.c +++ b/arch/arm/mach-s3c64xx/irq-pm.c | |||
@@ -13,7 +13,7 @@ | |||
13 | */ | 13 | */ |
14 | 14 | ||
15 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
16 | #include <linux/sysdev.h> | 16 | #include <linux/syscore_ops.h> |
17 | #include <linux/interrupt.h> | 17 | #include <linux/interrupt.h> |
18 | #include <linux/serial_core.h> | 18 | #include <linux/serial_core.h> |
19 | #include <linux/irq.h> | 19 | #include <linux/irq.h> |
@@ -54,7 +54,7 @@ static struct irq_grp_save { | |||
54 | 54 | ||
55 | static u32 irq_uart_mask[CONFIG_SERIAL_SAMSUNG_UARTS]; | 55 | static u32 irq_uart_mask[CONFIG_SERIAL_SAMSUNG_UARTS]; |
56 | 56 | ||
57 | static int s3c64xx_irq_pm_suspend(struct sys_device *dev, pm_message_t state) | 57 | static int s3c64xx_irq_pm_suspend(void) |
58 | { | 58 | { |
59 | struct irq_grp_save *grp = eint_grp_save; | 59 | struct irq_grp_save *grp = eint_grp_save; |
60 | int i; | 60 | int i; |
@@ -75,7 +75,7 @@ static int s3c64xx_irq_pm_suspend(struct sys_device *dev, pm_message_t state) | |||
75 | return 0; | 75 | return 0; |
76 | } | 76 | } |
77 | 77 | ||
78 | static int s3c64xx_irq_pm_resume(struct sys_device *dev) | 78 | static void s3c64xx_irq_pm_resume(void) |
79 | { | 79 | { |
80 | struct irq_grp_save *grp = eint_grp_save; | 80 | struct irq_grp_save *grp = eint_grp_save; |
81 | int i; | 81 | int i; |
@@ -94,18 +94,18 @@ static int s3c64xx_irq_pm_resume(struct sys_device *dev) | |||
94 | } | 94 | } |
95 | 95 | ||
96 | S3C_PMDBG("%s: IRQ configuration restored\n", __func__); | 96 | S3C_PMDBG("%s: IRQ configuration restored\n", __func__); |
97 | return 0; | ||
98 | } | 97 | } |
99 | 98 | ||
100 | static struct sysdev_driver s3c64xx_irq_driver = { | 99 | struct syscore_ops s3c64xx_irq_syscore_ops = { |
101 | .suspend = s3c64xx_irq_pm_suspend, | 100 | .suspend = s3c64xx_irq_pm_suspend, |
102 | .resume = s3c64xx_irq_pm_resume, | 101 | .resume = s3c64xx_irq_pm_resume, |
103 | }; | 102 | }; |
104 | 103 | ||
105 | static int __init s3c64xx_irq_pm_init(void) | 104 | static __init int s3c64xx_syscore_init(void) |
106 | { | 105 | { |
107 | return sysdev_driver_register(&s3c64xx_sysclass, &s3c64xx_irq_driver); | 106 | register_syscore_ops(&s3c64xx_irq_syscore_ops); |
108 | } | ||
109 | 107 | ||
110 | arch_initcall(s3c64xx_irq_pm_init); | 108 | return 0; |
109 | } | ||
111 | 110 | ||
111 | core_initcall(s3c64xx_syscore_init); | ||
diff --git a/arch/arm/mach-s5pv210/pm.c b/arch/arm/mach-s5pv210/pm.c index 549d7924fd4c..24febae3d4c0 100644 --- a/arch/arm/mach-s5pv210/pm.c +++ b/arch/arm/mach-s5pv210/pm.c | |||
@@ -16,6 +16,7 @@ | |||
16 | 16 | ||
17 | #include <linux/init.h> | 17 | #include <linux/init.h> |
18 | #include <linux/suspend.h> | 18 | #include <linux/suspend.h> |
19 | #include <linux/syscore_ops.h> | ||
19 | #include <linux/io.h> | 20 | #include <linux/io.h> |
20 | 21 | ||
21 | #include <plat/cpu.h> | 22 | #include <plat/cpu.h> |
@@ -140,7 +141,17 @@ static int s5pv210_pm_add(struct sys_device *sysdev) | |||
140 | return 0; | 141 | return 0; |
141 | } | 142 | } |
142 | 143 | ||
143 | static int s5pv210_pm_resume(struct sys_device *dev) | 144 | static struct sysdev_driver s5pv210_pm_driver = { |
145 | .add = s5pv210_pm_add, | ||
146 | }; | ||
147 | |||
148 | static __init int s5pv210_pm_drvinit(void) | ||
149 | { | ||
150 | return sysdev_driver_register(&s5pv210_sysclass, &s5pv210_pm_driver); | ||
151 | } | ||
152 | arch_initcall(s5pv210_pm_drvinit); | ||
153 | |||
154 | static void s5pv210_pm_resume(void) | ||
144 | { | 155 | { |
145 | u32 tmp; | 156 | u32 tmp; |
146 | 157 | ||
@@ -150,17 +161,15 @@ static int s5pv210_pm_resume(struct sys_device *dev) | |||
150 | __raw_writel(tmp , S5P_OTHERS); | 161 | __raw_writel(tmp , S5P_OTHERS); |
151 | 162 | ||
152 | s3c_pm_do_restore_core(s5pv210_core_save, ARRAY_SIZE(s5pv210_core_save)); | 163 | s3c_pm_do_restore_core(s5pv210_core_save, ARRAY_SIZE(s5pv210_core_save)); |
153 | |||
154 | return 0; | ||
155 | } | 164 | } |
156 | 165 | ||
157 | static struct sysdev_driver s5pv210_pm_driver = { | 166 | static struct syscore_ops s5pv210_pm_syscore_ops = { |
158 | .add = s5pv210_pm_add, | ||
159 | .resume = s5pv210_pm_resume, | 167 | .resume = s5pv210_pm_resume, |
160 | }; | 168 | }; |
161 | 169 | ||
162 | static __init int s5pv210_pm_drvinit(void) | 170 | static __init int s5pv210_pm_syscore_init(void) |
163 | { | 171 | { |
164 | return sysdev_driver_register(&s5pv210_sysclass, &s5pv210_pm_driver); | 172 | register_syscore_ops(&s5pv210_pm_syscore_ops); |
173 | return 0; | ||
165 | } | 174 | } |
166 | arch_initcall(s5pv210_pm_drvinit); | 175 | arch_initcall(s5pv210_pm_syscore_init); |
diff --git a/arch/arm/mach-sa1100/irq.c b/arch/arm/mach-sa1100/irq.c index 423ddb3d65e9..dfbf824a69fa 100644 --- a/arch/arm/mach-sa1100/irq.c +++ b/arch/arm/mach-sa1100/irq.c | |||
@@ -14,7 +14,7 @@ | |||
14 | #include <linux/interrupt.h> | 14 | #include <linux/interrupt.h> |
15 | #include <linux/irq.h> | 15 | #include <linux/irq.h> |
16 | #include <linux/ioport.h> | 16 | #include <linux/ioport.h> |
17 | #include <linux/sysdev.h> | 17 | #include <linux/syscore_ops.h> |
18 | 18 | ||
19 | #include <mach/hardware.h> | 19 | #include <mach/hardware.h> |
20 | #include <asm/mach/irq.h> | 20 | #include <asm/mach/irq.h> |
@@ -234,7 +234,7 @@ static struct sa1100irq_state { | |||
234 | unsigned int iccr; | 234 | unsigned int iccr; |
235 | } sa1100irq_state; | 235 | } sa1100irq_state; |
236 | 236 | ||
237 | static int sa1100irq_suspend(struct sys_device *dev, pm_message_t state) | 237 | static int sa1100irq_suspend(void) |
238 | { | 238 | { |
239 | struct sa1100irq_state *st = &sa1100irq_state; | 239 | struct sa1100irq_state *st = &sa1100irq_state; |
240 | 240 | ||
@@ -264,7 +264,7 @@ static int sa1100irq_suspend(struct sys_device *dev, pm_message_t state) | |||
264 | return 0; | 264 | return 0; |
265 | } | 265 | } |
266 | 266 | ||
267 | static int sa1100irq_resume(struct sys_device *dev) | 267 | static void sa1100irq_resume(void) |
268 | { | 268 | { |
269 | struct sa1100irq_state *st = &sa1100irq_state; | 269 | struct sa1100irq_state *st = &sa1100irq_state; |
270 | 270 | ||
@@ -277,24 +277,17 @@ static int sa1100irq_resume(struct sys_device *dev) | |||
277 | 277 | ||
278 | ICMR = st->icmr; | 278 | ICMR = st->icmr; |
279 | } | 279 | } |
280 | return 0; | ||
281 | } | 280 | } |
282 | 281 | ||
283 | static struct sysdev_class sa1100irq_sysclass = { | 282 | static struct syscore_ops sa1100irq_syscore_ops = { |
284 | .name = "sa11x0-irq", | ||
285 | .suspend = sa1100irq_suspend, | 283 | .suspend = sa1100irq_suspend, |
286 | .resume = sa1100irq_resume, | 284 | .resume = sa1100irq_resume, |
287 | }; | 285 | }; |
288 | 286 | ||
289 | static struct sys_device sa1100irq_device = { | ||
290 | .id = 0, | ||
291 | .cls = &sa1100irq_sysclass, | ||
292 | }; | ||
293 | |||
294 | static int __init sa1100irq_init_devicefs(void) | 287 | static int __init sa1100irq_init_devicefs(void) |
295 | { | 288 | { |
296 | sysdev_class_register(&sa1100irq_sysclass); | 289 | register_syscore_ops(&sa1100irq_syscore_ops); |
297 | return sysdev_register(&sa1100irq_device); | 290 | return 0; |
298 | } | 291 | } |
299 | 292 | ||
300 | device_initcall(sa1100irq_init_devicefs); | 293 | device_initcall(sa1100irq_init_devicefs); |
diff --git a/arch/arm/mach-shmobile/pm_runtime.c b/arch/arm/mach-shmobile/pm_runtime.c index 94912d3944d3..2d1b67a59e4a 100644 --- a/arch/arm/mach-shmobile/pm_runtime.c +++ b/arch/arm/mach-shmobile/pm_runtime.c | |||
@@ -18,152 +18,41 @@ | |||
18 | #include <linux/clk.h> | 18 | #include <linux/clk.h> |
19 | #include <linux/sh_clk.h> | 19 | #include <linux/sh_clk.h> |
20 | #include <linux/bitmap.h> | 20 | #include <linux/bitmap.h> |
21 | #include <linux/slab.h> | ||
21 | 22 | ||
22 | #ifdef CONFIG_PM_RUNTIME | 23 | #ifdef CONFIG_PM_RUNTIME |
23 | #define BIT_ONCE 0 | ||
24 | #define BIT_ACTIVE 1 | ||
25 | #define BIT_CLK_ENABLED 2 | ||
26 | 24 | ||
27 | struct pm_runtime_data { | 25 | static int default_platform_runtime_idle(struct device *dev) |
28 | unsigned long flags; | ||
29 | struct clk *clk; | ||
30 | }; | ||
31 | |||
32 | static void __devres_release(struct device *dev, void *res) | ||
33 | { | ||
34 | struct pm_runtime_data *prd = res; | ||
35 | |||
36 | dev_dbg(dev, "__devres_release()\n"); | ||
37 | |||
38 | if (test_bit(BIT_CLK_ENABLED, &prd->flags)) | ||
39 | clk_disable(prd->clk); | ||
40 | |||
41 | if (test_bit(BIT_ACTIVE, &prd->flags)) | ||
42 | clk_put(prd->clk); | ||
43 | } | ||
44 | |||
45 | static struct pm_runtime_data *__to_prd(struct device *dev) | ||
46 | { | ||
47 | return devres_find(dev, __devres_release, NULL, NULL); | ||
48 | } | ||
49 | |||
50 | static void platform_pm_runtime_init(struct device *dev, | ||
51 | struct pm_runtime_data *prd) | ||
52 | { | ||
53 | if (prd && !test_and_set_bit(BIT_ONCE, &prd->flags)) { | ||
54 | prd->clk = clk_get(dev, NULL); | ||
55 | if (!IS_ERR(prd->clk)) { | ||
56 | set_bit(BIT_ACTIVE, &prd->flags); | ||
57 | dev_info(dev, "clocks managed by runtime pm\n"); | ||
58 | } | ||
59 | } | ||
60 | } | ||
61 | |||
62 | static void platform_pm_runtime_bug(struct device *dev, | ||
63 | struct pm_runtime_data *prd) | ||
64 | { | ||
65 | if (prd && !test_and_set_bit(BIT_ONCE, &prd->flags)) | ||
66 | dev_err(dev, "runtime pm suspend before resume\n"); | ||
67 | } | ||
68 | |||
69 | int platform_pm_runtime_suspend(struct device *dev) | ||
70 | { | ||
71 | struct pm_runtime_data *prd = __to_prd(dev); | ||
72 | |||
73 | dev_dbg(dev, "platform_pm_runtime_suspend()\n"); | ||
74 | |||
75 | platform_pm_runtime_bug(dev, prd); | ||
76 | |||
77 | if (prd && test_bit(BIT_ACTIVE, &prd->flags)) { | ||
78 | clk_disable(prd->clk); | ||
79 | clear_bit(BIT_CLK_ENABLED, &prd->flags); | ||
80 | } | ||
81 | |||
82 | return 0; | ||
83 | } | ||
84 | |||
85 | int platform_pm_runtime_resume(struct device *dev) | ||
86 | { | ||
87 | struct pm_runtime_data *prd = __to_prd(dev); | ||
88 | |||
89 | dev_dbg(dev, "platform_pm_runtime_resume()\n"); | ||
90 | |||
91 | platform_pm_runtime_init(dev, prd); | ||
92 | |||
93 | if (prd && test_bit(BIT_ACTIVE, &prd->flags)) { | ||
94 | clk_enable(prd->clk); | ||
95 | set_bit(BIT_CLK_ENABLED, &prd->flags); | ||
96 | } | ||
97 | |||
98 | return 0; | ||
99 | } | ||
100 | |||
101 | int platform_pm_runtime_idle(struct device *dev) | ||
102 | { | 26 | { |
103 | /* suspend synchronously to disable clocks immediately */ | 27 | /* suspend synchronously to disable clocks immediately */ |
104 | return pm_runtime_suspend(dev); | 28 | return pm_runtime_suspend(dev); |
105 | } | 29 | } |
106 | 30 | ||
107 | static int platform_bus_notify(struct notifier_block *nb, | 31 | static struct dev_power_domain default_power_domain = { |
108 | unsigned long action, void *data) | 32 | .ops = { |
109 | { | 33 | .runtime_suspend = pm_runtime_clk_suspend, |
110 | struct device *dev = data; | 34 | .runtime_resume = pm_runtime_clk_resume, |
111 | struct pm_runtime_data *prd; | 35 | .runtime_idle = default_platform_runtime_idle, |
112 | 36 | USE_PLATFORM_PM_SLEEP_OPS | |
113 | dev_dbg(dev, "platform_bus_notify() %ld !\n", action); | 37 | }, |
114 | 38 | }; | |
115 | if (action == BUS_NOTIFY_BIND_DRIVER) { | ||
116 | prd = devres_alloc(__devres_release, sizeof(*prd), GFP_KERNEL); | ||
117 | if (prd) | ||
118 | devres_add(dev, prd); | ||
119 | else | ||
120 | dev_err(dev, "unable to alloc memory for runtime pm\n"); | ||
121 | } | ||
122 | |||
123 | return 0; | ||
124 | } | ||
125 | |||
126 | #else /* CONFIG_PM_RUNTIME */ | ||
127 | |||
128 | static int platform_bus_notify(struct notifier_block *nb, | ||
129 | unsigned long action, void *data) | ||
130 | { | ||
131 | struct device *dev = data; | ||
132 | struct clk *clk; | ||
133 | 39 | ||
134 | dev_dbg(dev, "platform_bus_notify() %ld !\n", action); | 40 | #define DEFAULT_PWR_DOMAIN_PTR (&default_power_domain) |
135 | 41 | ||
136 | switch (action) { | 42 | #else |
137 | case BUS_NOTIFY_BIND_DRIVER: | ||
138 | clk = clk_get(dev, NULL); | ||
139 | if (!IS_ERR(clk)) { | ||
140 | clk_enable(clk); | ||
141 | clk_put(clk); | ||
142 | dev_info(dev, "runtime pm disabled, clock forced on\n"); | ||
143 | } | ||
144 | break; | ||
145 | case BUS_NOTIFY_UNBOUND_DRIVER: | ||
146 | clk = clk_get(dev, NULL); | ||
147 | if (!IS_ERR(clk)) { | ||
148 | clk_disable(clk); | ||
149 | clk_put(clk); | ||
150 | dev_info(dev, "runtime pm disabled, clock forced off\n"); | ||
151 | } | ||
152 | break; | ||
153 | } | ||
154 | 43 | ||
155 | return 0; | 44 | #define DEFAULT_PWR_DOMAIN_PTR NULL |
156 | } | ||
157 | 45 | ||
158 | #endif /* CONFIG_PM_RUNTIME */ | 46 | #endif /* CONFIG_PM_RUNTIME */ |
159 | 47 | ||
160 | static struct notifier_block platform_bus_notifier = { | 48 | static struct pm_clk_notifier_block platform_bus_notifier = { |
161 | .notifier_call = platform_bus_notify | 49 | .pwr_domain = DEFAULT_PWR_DOMAIN_PTR, |
50 | .con_ids = { NULL, }, | ||
162 | }; | 51 | }; |
163 | 52 | ||
164 | static int __init sh_pm_runtime_init(void) | 53 | static int __init sh_pm_runtime_init(void) |
165 | { | 54 | { |
166 | bus_register_notifier(&platform_bus_type, &platform_bus_notifier); | 55 | pm_runtime_clk_add_notifier(&platform_bus_type, &platform_bus_notifier); |
167 | return 0; | 56 | return 0; |
168 | } | 57 | } |
169 | core_initcall(sh_pm_runtime_init); | 58 | core_initcall(sh_pm_runtime_init); |
diff --git a/arch/arm/mach-tegra/include/mach/barriers.h b/arch/arm/mach-tegra/include/mach/barriers.h index cc115174899b..425b42e91ef6 100644 --- a/arch/arm/mach-tegra/include/mach/barriers.h +++ b/arch/arm/mach-tegra/include/mach/barriers.h | |||
@@ -23,7 +23,7 @@ | |||
23 | 23 | ||
24 | #include <asm/outercache.h> | 24 | #include <asm/outercache.h> |
25 | 25 | ||
26 | #define rmb() dmb() | 26 | #define rmb() dsb() |
27 | #define wmb() do { dsb(); outer_sync(); } while (0) | 27 | #define wmb() do { dsb(); outer_sync(); } while (0) |
28 | #define mb() wmb() | 28 | #define mb() wmb() |
29 | 29 | ||
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index e5f6fc428348..e591513bb53e 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c | |||
@@ -392,7 +392,7 @@ free_memmap(unsigned long start_pfn, unsigned long end_pfn) | |||
392 | * Convert start_pfn/end_pfn to a struct page pointer. | 392 | * Convert start_pfn/end_pfn to a struct page pointer. |
393 | */ | 393 | */ |
394 | start_pg = pfn_to_page(start_pfn - 1) + 1; | 394 | start_pg = pfn_to_page(start_pfn - 1) + 1; |
395 | end_pg = pfn_to_page(end_pfn); | 395 | end_pg = pfn_to_page(end_pfn - 1) + 1; |
396 | 396 | ||
397 | /* | 397 | /* |
398 | * Convert to physical addresses, and | 398 | * Convert to physical addresses, and |
@@ -426,6 +426,14 @@ static void __init free_unused_memmap(struct meminfo *mi) | |||
426 | 426 | ||
427 | bank_start = bank_pfn_start(bank); | 427 | bank_start = bank_pfn_start(bank); |
428 | 428 | ||
429 | #ifdef CONFIG_SPARSEMEM | ||
430 | /* | ||
431 | * Take care not to free memmap entries that don't exist | ||
432 | * due to SPARSEMEM sections which aren't present. | ||
433 | */ | ||
434 | bank_start = min(bank_start, | ||
435 | ALIGN(prev_bank_end, PAGES_PER_SECTION)); | ||
436 | #endif | ||
429 | /* | 437 | /* |
430 | * If we had a previous bank, and there is a space | 438 | * If we had a previous bank, and there is a space |
431 | * between the current bank and the previous, free it. | 439 | * between the current bank and the previous, free it. |
@@ -440,6 +448,12 @@ static void __init free_unused_memmap(struct meminfo *mi) | |||
440 | */ | 448 | */ |
441 | prev_bank_end = ALIGN(bank_pfn_end(bank), MAX_ORDER_NR_PAGES); | 449 | prev_bank_end = ALIGN(bank_pfn_end(bank), MAX_ORDER_NR_PAGES); |
442 | } | 450 | } |
451 | |||
452 | #ifdef CONFIG_SPARSEMEM | ||
453 | if (!IS_ALIGNED(prev_bank_end, PAGES_PER_SECTION)) | ||
454 | free_memmap(prev_bank_end, | ||
455 | ALIGN(prev_bank_end, PAGES_PER_SECTION)); | ||
456 | #endif | ||
443 | } | 457 | } |
444 | 458 | ||
445 | static void __init free_highpages(void) | 459 | static void __init free_highpages(void) |
diff --git a/arch/arm/plat-omap/gpio.c b/arch/arm/plat-omap/gpio.c index d2adcdda23cf..bd9e32187eab 100644 --- a/arch/arm/plat-omap/gpio.c +++ b/arch/arm/plat-omap/gpio.c | |||
@@ -17,7 +17,7 @@ | |||
17 | #include <linux/init.h> | 17 | #include <linux/init.h> |
18 | #include <linux/module.h> | 18 | #include <linux/module.h> |
19 | #include <linux/interrupt.h> | 19 | #include <linux/interrupt.h> |
20 | #include <linux/sysdev.h> | 20 | #include <linux/syscore_ops.h> |
21 | #include <linux/err.h> | 21 | #include <linux/err.h> |
22 | #include <linux/clk.h> | 22 | #include <linux/clk.h> |
23 | #include <linux/io.h> | 23 | #include <linux/io.h> |
@@ -1372,9 +1372,7 @@ static const struct dev_pm_ops omap_mpuio_dev_pm_ops = { | |||
1372 | .resume_noirq = omap_mpuio_resume_noirq, | 1372 | .resume_noirq = omap_mpuio_resume_noirq, |
1373 | }; | 1373 | }; |
1374 | 1374 | ||
1375 | /* use platform_driver for this, now that there's no longer any | 1375 | /* use platform_driver for this. */ |
1376 | * point to sys_device (other than not disturbing old code). | ||
1377 | */ | ||
1378 | static struct platform_driver omap_mpuio_driver = { | 1376 | static struct platform_driver omap_mpuio_driver = { |
1379 | .driver = { | 1377 | .driver = { |
1380 | .name = "mpuio", | 1378 | .name = "mpuio", |
@@ -1745,7 +1743,7 @@ static int __devinit omap_gpio_probe(struct platform_device *pdev) | |||
1745 | } | 1743 | } |
1746 | 1744 | ||
1747 | #if defined(CONFIG_ARCH_OMAP16XX) || defined(CONFIG_ARCH_OMAP2PLUS) | 1745 | #if defined(CONFIG_ARCH_OMAP16XX) || defined(CONFIG_ARCH_OMAP2PLUS) |
1748 | static int omap_gpio_suspend(struct sys_device *dev, pm_message_t mesg) | 1746 | static int omap_gpio_suspend(void) |
1749 | { | 1747 | { |
1750 | int i; | 1748 | int i; |
1751 | 1749 | ||
@@ -1795,12 +1793,12 @@ static int omap_gpio_suspend(struct sys_device *dev, pm_message_t mesg) | |||
1795 | return 0; | 1793 | return 0; |
1796 | } | 1794 | } |
1797 | 1795 | ||
1798 | static int omap_gpio_resume(struct sys_device *dev) | 1796 | static void omap_gpio_resume(void) |
1799 | { | 1797 | { |
1800 | int i; | 1798 | int i; |
1801 | 1799 | ||
1802 | if (!cpu_class_is_omap2() && !cpu_is_omap16xx()) | 1800 | if (!cpu_class_is_omap2() && !cpu_is_omap16xx()) |
1803 | return 0; | 1801 | return; |
1804 | 1802 | ||
1805 | for (i = 0; i < gpio_bank_count; i++) { | 1803 | for (i = 0; i < gpio_bank_count; i++) { |
1806 | struct gpio_bank *bank = &gpio_bank[i]; | 1804 | struct gpio_bank *bank = &gpio_bank[i]; |
@@ -1836,21 +1834,13 @@ static int omap_gpio_resume(struct sys_device *dev) | |||
1836 | __raw_writel(bank->saved_wakeup, wake_set); | 1834 | __raw_writel(bank->saved_wakeup, wake_set); |
1837 | spin_unlock_irqrestore(&bank->lock, flags); | 1835 | spin_unlock_irqrestore(&bank->lock, flags); |
1838 | } | 1836 | } |
1839 | |||
1840 | return 0; | ||
1841 | } | 1837 | } |
1842 | 1838 | ||
1843 | static struct sysdev_class omap_gpio_sysclass = { | 1839 | static struct syscore_ops omap_gpio_syscore_ops = { |
1844 | .name = "gpio", | ||
1845 | .suspend = omap_gpio_suspend, | 1840 | .suspend = omap_gpio_suspend, |
1846 | .resume = omap_gpio_resume, | 1841 | .resume = omap_gpio_resume, |
1847 | }; | 1842 | }; |
1848 | 1843 | ||
1849 | static struct sys_device omap_gpio_device = { | ||
1850 | .id = 0, | ||
1851 | .cls = &omap_gpio_sysclass, | ||
1852 | }; | ||
1853 | |||
1854 | #endif | 1844 | #endif |
1855 | 1845 | ||
1856 | #ifdef CONFIG_ARCH_OMAP2PLUS | 1846 | #ifdef CONFIG_ARCH_OMAP2PLUS |
@@ -2108,21 +2098,14 @@ postcore_initcall(omap_gpio_drv_reg); | |||
2108 | 2098 | ||
2109 | static int __init omap_gpio_sysinit(void) | 2099 | static int __init omap_gpio_sysinit(void) |
2110 | { | 2100 | { |
2111 | int ret = 0; | ||
2112 | |||
2113 | mpuio_init(); | 2101 | mpuio_init(); |
2114 | 2102 | ||
2115 | #if defined(CONFIG_ARCH_OMAP16XX) || defined(CONFIG_ARCH_OMAP2PLUS) | 2103 | #if defined(CONFIG_ARCH_OMAP16XX) || defined(CONFIG_ARCH_OMAP2PLUS) |
2116 | if (cpu_is_omap16xx() || cpu_class_is_omap2()) { | 2104 | if (cpu_is_omap16xx() || cpu_class_is_omap2()) |
2117 | if (ret == 0) { | 2105 | register_syscore_ops(&omap_gpio_syscore_ops); |
2118 | ret = sysdev_class_register(&omap_gpio_sysclass); | ||
2119 | if (ret == 0) | ||
2120 | ret = sysdev_register(&omap_gpio_device); | ||
2121 | } | ||
2122 | } | ||
2123 | #endif | 2106 | #endif |
2124 | 2107 | ||
2125 | return ret; | 2108 | return 0; |
2126 | } | 2109 | } |
2127 | 2110 | ||
2128 | arch_initcall(omap_gpio_sysinit); | 2111 | arch_initcall(omap_gpio_sysinit); |
diff --git a/arch/arm/plat-omap/iommu.c b/arch/arm/plat-omap/iommu.c index 8a51fd58f656..34fc31ee9081 100644 --- a/arch/arm/plat-omap/iommu.c +++ b/arch/arm/plat-omap/iommu.c | |||
@@ -793,6 +793,8 @@ static irqreturn_t iommu_fault_handler(int irq, void *data) | |||
793 | clk_enable(obj->clk); | 793 | clk_enable(obj->clk); |
794 | errs = iommu_report_fault(obj, &da); | 794 | errs = iommu_report_fault(obj, &da); |
795 | clk_disable(obj->clk); | 795 | clk_disable(obj->clk); |
796 | if (errs == 0) | ||
797 | return IRQ_HANDLED; | ||
796 | 798 | ||
797 | /* Fault callback or TLB/PTE Dynamic loading */ | 799 | /* Fault callback or TLB/PTE Dynamic loading */ |
798 | if (obj->isr && !obj->isr(obj, da, errs, obj->isr_priv)) | 800 | if (obj->isr && !obj->isr(obj, da, errs, obj->isr_priv)) |
diff --git a/arch/arm/plat-omap/omap_device.c b/arch/arm/plat-omap/omap_device.c index 9bbda9acb73b..a37b8eb65b76 100644 --- a/arch/arm/plat-omap/omap_device.c +++ b/arch/arm/plat-omap/omap_device.c | |||
@@ -536,6 +536,28 @@ int omap_early_device_register(struct omap_device *od) | |||
536 | return 0; | 536 | return 0; |
537 | } | 537 | } |
538 | 538 | ||
539 | static int _od_runtime_suspend(struct device *dev) | ||
540 | { | ||
541 | struct platform_device *pdev = to_platform_device(dev); | ||
542 | |||
543 | return omap_device_idle(pdev); | ||
544 | } | ||
545 | |||
546 | static int _od_runtime_resume(struct device *dev) | ||
547 | { | ||
548 | struct platform_device *pdev = to_platform_device(dev); | ||
549 | |||
550 | return omap_device_enable(pdev); | ||
551 | } | ||
552 | |||
553 | static struct dev_power_domain omap_device_power_domain = { | ||
554 | .ops = { | ||
555 | .runtime_suspend = _od_runtime_suspend, | ||
556 | .runtime_resume = _od_runtime_resume, | ||
557 | USE_PLATFORM_PM_SLEEP_OPS | ||
558 | } | ||
559 | }; | ||
560 | |||
539 | /** | 561 | /** |
540 | * omap_device_register - register an omap_device with one omap_hwmod | 562 | * omap_device_register - register an omap_device with one omap_hwmod |
541 | * @od: struct omap_device * to register | 563 | * @od: struct omap_device * to register |
@@ -549,6 +571,7 @@ int omap_device_register(struct omap_device *od) | |||
549 | pr_debug("omap_device: %s: registering\n", od->pdev.name); | 571 | pr_debug("omap_device: %s: registering\n", od->pdev.name); |
550 | 572 | ||
551 | od->pdev.dev.parent = &omap_device_parent; | 573 | od->pdev.dev.parent = &omap_device_parent; |
574 | od->pdev.dev.pwr_domain = &omap_device_power_domain; | ||
552 | return platform_device_register(&od->pdev); | 575 | return platform_device_register(&od->pdev); |
553 | } | 576 | } |
554 | 577 | ||
diff --git a/arch/arm/plat-pxa/gpio.c b/arch/arm/plat-pxa/gpio.c index dce088f45678..48ebb9479b61 100644 --- a/arch/arm/plat-pxa/gpio.c +++ b/arch/arm/plat-pxa/gpio.c | |||
@@ -15,7 +15,7 @@ | |||
15 | #include <linux/init.h> | 15 | #include <linux/init.h> |
16 | #include <linux/irq.h> | 16 | #include <linux/irq.h> |
17 | #include <linux/io.h> | 17 | #include <linux/io.h> |
18 | #include <linux/sysdev.h> | 18 | #include <linux/syscore_ops.h> |
19 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
20 | 20 | ||
21 | #include <mach/gpio.h> | 21 | #include <mach/gpio.h> |
@@ -295,7 +295,7 @@ void __init pxa_init_gpio(int mux_irq, int start, int end, set_wake_t fn) | |||
295 | } | 295 | } |
296 | 296 | ||
297 | #ifdef CONFIG_PM | 297 | #ifdef CONFIG_PM |
298 | static int pxa_gpio_suspend(struct sys_device *dev, pm_message_t state) | 298 | static int pxa_gpio_suspend(void) |
299 | { | 299 | { |
300 | struct pxa_gpio_chip *c; | 300 | struct pxa_gpio_chip *c; |
301 | int gpio; | 301 | int gpio; |
@@ -312,7 +312,7 @@ static int pxa_gpio_suspend(struct sys_device *dev, pm_message_t state) | |||
312 | return 0; | 312 | return 0; |
313 | } | 313 | } |
314 | 314 | ||
315 | static int pxa_gpio_resume(struct sys_device *dev) | 315 | static void pxa_gpio_resume(void) |
316 | { | 316 | { |
317 | struct pxa_gpio_chip *c; | 317 | struct pxa_gpio_chip *c; |
318 | int gpio; | 318 | int gpio; |
@@ -326,22 +326,13 @@ static int pxa_gpio_resume(struct sys_device *dev) | |||
326 | __raw_writel(c->saved_gfer, c->regbase + GFER_OFFSET); | 326 | __raw_writel(c->saved_gfer, c->regbase + GFER_OFFSET); |
327 | __raw_writel(c->saved_gpdr, c->regbase + GPDR_OFFSET); | 327 | __raw_writel(c->saved_gpdr, c->regbase + GPDR_OFFSET); |
328 | } | 328 | } |
329 | return 0; | ||
330 | } | 329 | } |
331 | #else | 330 | #else |
332 | #define pxa_gpio_suspend NULL | 331 | #define pxa_gpio_suspend NULL |
333 | #define pxa_gpio_resume NULL | 332 | #define pxa_gpio_resume NULL |
334 | #endif | 333 | #endif |
335 | 334 | ||
336 | struct sysdev_class pxa_gpio_sysclass = { | 335 | struct syscore_ops pxa_gpio_syscore_ops = { |
337 | .name = "gpio", | ||
338 | .suspend = pxa_gpio_suspend, | 336 | .suspend = pxa_gpio_suspend, |
339 | .resume = pxa_gpio_resume, | 337 | .resume = pxa_gpio_resume, |
340 | }; | 338 | }; |
341 | |||
342 | static int __init pxa_gpio_init(void) | ||
343 | { | ||
344 | return sysdev_class_register(&pxa_gpio_sysclass); | ||
345 | } | ||
346 | |||
347 | core_initcall(pxa_gpio_init); | ||
diff --git a/arch/arm/plat-pxa/mfp.c b/arch/arm/plat-pxa/mfp.c index a9aa5ad3f4eb..be12eadcce20 100644 --- a/arch/arm/plat-pxa/mfp.c +++ b/arch/arm/plat-pxa/mfp.c | |||
@@ -17,7 +17,6 @@ | |||
17 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
18 | #include <linux/init.h> | 18 | #include <linux/init.h> |
19 | #include <linux/io.h> | 19 | #include <linux/io.h> |
20 | #include <linux/sysdev.h> | ||
21 | 20 | ||
22 | #include <plat/mfp.h> | 21 | #include <plat/mfp.h> |
23 | 22 | ||
diff --git a/arch/arm/plat-s3c24xx/dma.c b/arch/arm/plat-s3c24xx/dma.c index 27ea852e3370..c10d10c56e2e 100644 --- a/arch/arm/plat-s3c24xx/dma.c +++ b/arch/arm/plat-s3c24xx/dma.c | |||
@@ -22,7 +22,7 @@ | |||
22 | #include <linux/sched.h> | 22 | #include <linux/sched.h> |
23 | #include <linux/spinlock.h> | 23 | #include <linux/spinlock.h> |
24 | #include <linux/interrupt.h> | 24 | #include <linux/interrupt.h> |
25 | #include <linux/sysdev.h> | 25 | #include <linux/syscore_ops.h> |
26 | #include <linux/slab.h> | 26 | #include <linux/slab.h> |
27 | #include <linux/errno.h> | 27 | #include <linux/errno.h> |
28 | #include <linux/io.h> | 28 | #include <linux/io.h> |
@@ -1195,19 +1195,12 @@ int s3c2410_dma_getposition(unsigned int channel, dma_addr_t *src, dma_addr_t *d | |||
1195 | 1195 | ||
1196 | EXPORT_SYMBOL(s3c2410_dma_getposition); | 1196 | EXPORT_SYMBOL(s3c2410_dma_getposition); |
1197 | 1197 | ||
1198 | static inline struct s3c2410_dma_chan *to_dma_chan(struct sys_device *dev) | 1198 | /* system core operations */ |
1199 | { | ||
1200 | return container_of(dev, struct s3c2410_dma_chan, dev); | ||
1201 | } | ||
1202 | |||
1203 | /* system device class */ | ||
1204 | 1199 | ||
1205 | #ifdef CONFIG_PM | 1200 | #ifdef CONFIG_PM |
1206 | 1201 | ||
1207 | static int s3c2410_dma_suspend(struct sys_device *dev, pm_message_t state) | 1202 | static void s3c2410_dma_suspend_chan(s3c2410_dma_chan *cp) |
1208 | { | 1203 | { |
1209 | struct s3c2410_dma_chan *cp = to_dma_chan(dev); | ||
1210 | |||
1211 | printk(KERN_DEBUG "suspending dma channel %d\n", cp->number); | 1204 | printk(KERN_DEBUG "suspending dma channel %d\n", cp->number); |
1212 | 1205 | ||
1213 | if (dma_rdreg(cp, S3C2410_DMA_DMASKTRIG) & S3C2410_DMASKTRIG_ON) { | 1206 | if (dma_rdreg(cp, S3C2410_DMA_DMASKTRIG) & S3C2410_DMASKTRIG_ON) { |
@@ -1222,13 +1215,21 @@ static int s3c2410_dma_suspend(struct sys_device *dev, pm_message_t state) | |||
1222 | 1215 | ||
1223 | s3c2410_dma_dostop(cp); | 1216 | s3c2410_dma_dostop(cp); |
1224 | } | 1217 | } |
1218 | } | ||
1219 | |||
1220 | static int s3c2410_dma_suspend(void) | ||
1221 | { | ||
1222 | struct s3c2410_dma_chan *cp = s3c2410_chans; | ||
1223 | int channel; | ||
1224 | |||
1225 | for (channel = 0; channel < dma_channels; cp++, channel++) | ||
1226 | s3c2410_dma_suspend_chan(cp); | ||
1225 | 1227 | ||
1226 | return 0; | 1228 | return 0; |
1227 | } | 1229 | } |
1228 | 1230 | ||
1229 | static int s3c2410_dma_resume(struct sys_device *dev) | 1231 | static void s3c2410_dma_resume_chan(struct s3c2410_dma_chan *cp) |
1230 | { | 1232 | { |
1231 | struct s3c2410_dma_chan *cp = to_dma_chan(dev); | ||
1232 | unsigned int no = cp->number | DMACH_LOW_LEVEL; | 1233 | unsigned int no = cp->number | DMACH_LOW_LEVEL; |
1233 | 1234 | ||
1234 | /* restore channel's hardware configuration */ | 1235 | /* restore channel's hardware configuration */ |
@@ -1249,13 +1250,21 @@ static int s3c2410_dma_resume(struct sys_device *dev) | |||
1249 | return 0; | 1250 | return 0; |
1250 | } | 1251 | } |
1251 | 1252 | ||
1253 | static void s3c2410_dma_resume(void) | ||
1254 | { | ||
1255 | struct s3c2410_dma_chan *cp = s3c2410_chans + dma_channels - 1; | ||
1256 | int channel; | ||
1257 | |||
1258 | for (channel = dma_channels - 1; channel >= 0; cp++, channel--) | ||
1259 | s3c2410_dma_resume_chan(cp); | ||
1260 | } | ||
1261 | |||
1252 | #else | 1262 | #else |
1253 | #define s3c2410_dma_suspend NULL | 1263 | #define s3c2410_dma_suspend NULL |
1254 | #define s3c2410_dma_resume NULL | 1264 | #define s3c2410_dma_resume NULL |
1255 | #endif /* CONFIG_PM */ | 1265 | #endif /* CONFIG_PM */ |
1256 | 1266 | ||
1257 | struct sysdev_class dma_sysclass = { | 1267 | struct syscore_ops dma_syscore_ops = { |
1258 | .name = "s3c24xx-dma", | ||
1259 | .suspend = s3c2410_dma_suspend, | 1268 | .suspend = s3c2410_dma_suspend, |
1260 | .resume = s3c2410_dma_resume, | 1269 | .resume = s3c2410_dma_resume, |
1261 | }; | 1270 | }; |
@@ -1269,39 +1278,14 @@ static void s3c2410_dma_cache_ctor(void *p) | |||
1269 | 1278 | ||
1270 | /* initialisation code */ | 1279 | /* initialisation code */ |
1271 | 1280 | ||
1272 | static int __init s3c24xx_dma_sysclass_init(void) | 1281 | static int __init s3c24xx_dma_syscore_init(void) |
1273 | { | 1282 | { |
1274 | int ret = sysdev_class_register(&dma_sysclass); | 1283 | register_syscore_ops(&dma_syscore_ops); |
1275 | |||
1276 | if (ret != 0) | ||
1277 | printk(KERN_ERR "dma sysclass registration failed\n"); | ||
1278 | |||
1279 | return ret; | ||
1280 | } | ||
1281 | |||
1282 | core_initcall(s3c24xx_dma_sysclass_init); | ||
1283 | |||
1284 | static int __init s3c24xx_dma_sysdev_register(void) | ||
1285 | { | ||
1286 | struct s3c2410_dma_chan *cp = s3c2410_chans; | ||
1287 | int channel, ret; | ||
1288 | |||
1289 | for (channel = 0; channel < dma_channels; cp++, channel++) { | ||
1290 | cp->dev.cls = &dma_sysclass; | ||
1291 | cp->dev.id = channel; | ||
1292 | ret = sysdev_register(&cp->dev); | ||
1293 | |||
1294 | if (ret) { | ||
1295 | printk(KERN_ERR "error registering dev for dma %d\n", | ||
1296 | channel); | ||
1297 | return ret; | ||
1298 | } | ||
1299 | } | ||
1300 | 1284 | ||
1301 | return 0; | 1285 | return 0; |
1302 | } | 1286 | } |
1303 | 1287 | ||
1304 | late_initcall(s3c24xx_dma_sysdev_register); | 1288 | late_initcall(s3c24xx_dma_syscore_init); |
1305 | 1289 | ||
1306 | int __init s3c24xx_dma_init(unsigned int channels, unsigned int irq, | 1290 | int __init s3c24xx_dma_init(unsigned int channels, unsigned int irq, |
1307 | unsigned int stride) | 1291 | unsigned int stride) |
diff --git a/arch/arm/plat-s3c24xx/irq-pm.c b/arch/arm/plat-s3c24xx/irq-pm.c index c3624d898630..0efb2e2848c8 100644 --- a/arch/arm/plat-s3c24xx/irq-pm.c +++ b/arch/arm/plat-s3c24xx/irq-pm.c | |||
@@ -14,7 +14,6 @@ | |||
14 | #include <linux/init.h> | 14 | #include <linux/init.h> |
15 | #include <linux/module.h> | 15 | #include <linux/module.h> |
16 | #include <linux/interrupt.h> | 16 | #include <linux/interrupt.h> |
17 | #include <linux/sysdev.h> | ||
18 | #include <linux/irq.h> | 17 | #include <linux/irq.h> |
19 | 18 | ||
20 | #include <plat/cpu.h> | 19 | #include <plat/cpu.h> |
@@ -65,7 +64,7 @@ static unsigned long save_extint[3]; | |||
65 | static unsigned long save_eintflt[4]; | 64 | static unsigned long save_eintflt[4]; |
66 | static unsigned long save_eintmask; | 65 | static unsigned long save_eintmask; |
67 | 66 | ||
68 | int s3c24xx_irq_suspend(struct sys_device *dev, pm_message_t state) | 67 | int s3c24xx_irq_suspend(void) |
69 | { | 68 | { |
70 | unsigned int i; | 69 | unsigned int i; |
71 | 70 | ||
@@ -81,7 +80,7 @@ int s3c24xx_irq_suspend(struct sys_device *dev, pm_message_t state) | |||
81 | return 0; | 80 | return 0; |
82 | } | 81 | } |
83 | 82 | ||
84 | int s3c24xx_irq_resume(struct sys_device *dev) | 83 | void s3c24xx_irq_resume(void) |
85 | { | 84 | { |
86 | unsigned int i; | 85 | unsigned int i; |
87 | 86 | ||
@@ -93,6 +92,4 @@ int s3c24xx_irq_resume(struct sys_device *dev) | |||
93 | 92 | ||
94 | s3c_pm_do_restore(irq_save, ARRAY_SIZE(irq_save)); | 93 | s3c_pm_do_restore(irq_save, ARRAY_SIZE(irq_save)); |
95 | __raw_writel(save_eintmask, S3C24XX_EINTMASK); | 94 | __raw_writel(save_eintmask, S3C24XX_EINTMASK); |
96 | |||
97 | return 0; | ||
98 | } | 95 | } |
diff --git a/arch/arm/plat-s5p/irq-pm.c b/arch/arm/plat-s5p/irq-pm.c index 5259ad458bc8..327acb3a4464 100644 --- a/arch/arm/plat-s5p/irq-pm.c +++ b/arch/arm/plat-s5p/irq-pm.c | |||
@@ -16,7 +16,6 @@ | |||
16 | #include <linux/init.h> | 16 | #include <linux/init.h> |
17 | #include <linux/module.h> | 17 | #include <linux/module.h> |
18 | #include <linux/interrupt.h> | 18 | #include <linux/interrupt.h> |
19 | #include <linux/sysdev.h> | ||
20 | 19 | ||
21 | #include <plat/cpu.h> | 20 | #include <plat/cpu.h> |
22 | #include <plat/irqs.h> | 21 | #include <plat/irqs.h> |
@@ -77,17 +76,15 @@ static struct sleep_save eint_save[] = { | |||
77 | SAVE_ITEM(S5P_EINT_MASK(3)), | 76 | SAVE_ITEM(S5P_EINT_MASK(3)), |
78 | }; | 77 | }; |
79 | 78 | ||
80 | int s3c24xx_irq_suspend(struct sys_device *dev, pm_message_t state) | 79 | int s3c24xx_irq_suspend(void) |
81 | { | 80 | { |
82 | s3c_pm_do_save(eint_save, ARRAY_SIZE(eint_save)); | 81 | s3c_pm_do_save(eint_save, ARRAY_SIZE(eint_save)); |
83 | 82 | ||
84 | return 0; | 83 | return 0; |
85 | } | 84 | } |
86 | 85 | ||
87 | int s3c24xx_irq_resume(struct sys_device *dev) | 86 | void s3c24xx_irq_resume(void) |
88 | { | 87 | { |
89 | s3c_pm_do_restore(eint_save, ARRAY_SIZE(eint_save)); | 88 | s3c_pm_do_restore(eint_save, ARRAY_SIZE(eint_save)); |
90 | |||
91 | return 0; | ||
92 | } | 89 | } |
93 | 90 | ||
diff --git a/arch/arm/plat-samsung/include/plat/cpu.h b/arch/arm/plat-samsung/include/plat/cpu.h index cedfff51c82b..3aedac0034ba 100644 --- a/arch/arm/plat-samsung/include/plat/cpu.h +++ b/arch/arm/plat-samsung/include/plat/cpu.h | |||
@@ -68,6 +68,12 @@ extern void s3c24xx_init_uartdevs(char *name, | |||
68 | struct sys_timer; | 68 | struct sys_timer; |
69 | extern struct sys_timer s3c24xx_timer; | 69 | extern struct sys_timer s3c24xx_timer; |
70 | 70 | ||
71 | extern struct syscore_ops s3c2410_pm_syscore_ops; | ||
72 | extern struct syscore_ops s3c2412_pm_syscore_ops; | ||
73 | extern struct syscore_ops s3c2416_pm_syscore_ops; | ||
74 | extern struct syscore_ops s3c244x_pm_syscore_ops; | ||
75 | extern struct syscore_ops s3c64xx_irq_syscore_ops; | ||
76 | |||
71 | /* system device classes */ | 77 | /* system device classes */ |
72 | 78 | ||
73 | extern struct sysdev_class s3c2410_sysclass; | 79 | extern struct sysdev_class s3c2410_sysclass; |
diff --git a/arch/arm/plat-samsung/include/plat/pm.h b/arch/arm/plat-samsung/include/plat/pm.h index 937cc2ace517..7fb6f6be8c81 100644 --- a/arch/arm/plat-samsung/include/plat/pm.h +++ b/arch/arm/plat-samsung/include/plat/pm.h | |||
@@ -103,14 +103,16 @@ extern void s3c_pm_do_restore_core(struct sleep_save *ptr, int count); | |||
103 | 103 | ||
104 | #ifdef CONFIG_PM | 104 | #ifdef CONFIG_PM |
105 | extern int s3c_irqext_wake(struct irq_data *data, unsigned int state); | 105 | extern int s3c_irqext_wake(struct irq_data *data, unsigned int state); |
106 | extern int s3c24xx_irq_suspend(struct sys_device *dev, pm_message_t state); | 106 | extern int s3c24xx_irq_suspend(void); |
107 | extern int s3c24xx_irq_resume(struct sys_device *dev); | 107 | extern void s3c24xx_irq_resume(void); |
108 | #else | 108 | #else |
109 | #define s3c_irqext_wake NULL | 109 | #define s3c_irqext_wake NULL |
110 | #define s3c24xx_irq_suspend NULL | 110 | #define s3c24xx_irq_suspend NULL |
111 | #define s3c24xx_irq_resume NULL | 111 | #define s3c24xx_irq_resume NULL |
112 | #endif | 112 | #endif |
113 | 113 | ||
114 | extern struct syscore_ops s3c24xx_irq_syscore_ops; | ||
115 | |||
114 | /* PM debug functions */ | 116 | /* PM debug functions */ |
115 | 117 | ||
116 | #ifdef CONFIG_SAMSUNG_PM_DEBUG | 118 | #ifdef CONFIG_SAMSUNG_PM_DEBUG |
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c index f74695075e64..f25e7ec89416 100644 --- a/arch/arm/vfp/vfpmodule.c +++ b/arch/arm/vfp/vfpmodule.c | |||
@@ -398,9 +398,9 @@ static void vfp_enable(void *unused) | |||
398 | } | 398 | } |
399 | 399 | ||
400 | #ifdef CONFIG_PM | 400 | #ifdef CONFIG_PM |
401 | #include <linux/sysdev.h> | 401 | #include <linux/syscore_ops.h> |
402 | 402 | ||
403 | static int vfp_pm_suspend(struct sys_device *dev, pm_message_t state) | 403 | static int vfp_pm_suspend(void) |
404 | { | 404 | { |
405 | struct thread_info *ti = current_thread_info(); | 405 | struct thread_info *ti = current_thread_info(); |
406 | u32 fpexc = fmrx(FPEXC); | 406 | u32 fpexc = fmrx(FPEXC); |
@@ -420,34 +420,25 @@ static int vfp_pm_suspend(struct sys_device *dev, pm_message_t state) | |||
420 | return 0; | 420 | return 0; |
421 | } | 421 | } |
422 | 422 | ||
423 | static int vfp_pm_resume(struct sys_device *dev) | 423 | static void vfp_pm_resume(void) |
424 | { | 424 | { |
425 | /* ensure we have access to the vfp */ | 425 | /* ensure we have access to the vfp */ |
426 | vfp_enable(NULL); | 426 | vfp_enable(NULL); |
427 | 427 | ||
428 | /* and disable it to ensure the next usage restores the state */ | 428 | /* and disable it to ensure the next usage restores the state */ |
429 | fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); | 429 | fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); |
430 | |||
431 | return 0; | ||
432 | } | 430 | } |
433 | 431 | ||
434 | static struct sysdev_class vfp_pm_sysclass = { | 432 | static struct syscore_ops vfp_pm_syscore_ops = { |
435 | .name = "vfp", | ||
436 | .suspend = vfp_pm_suspend, | 433 | .suspend = vfp_pm_suspend, |
437 | .resume = vfp_pm_resume, | 434 | .resume = vfp_pm_resume, |
438 | }; | 435 | }; |
439 | 436 | ||
440 | static struct sys_device vfp_pm_sysdev = { | ||
441 | .cls = &vfp_pm_sysclass, | ||
442 | }; | ||
443 | |||
444 | static void vfp_pm_init(void) | 437 | static void vfp_pm_init(void) |
445 | { | 438 | { |
446 | sysdev_class_register(&vfp_pm_sysclass); | 439 | register_syscore_ops(&vfp_pm_syscore_ops); |
447 | sysdev_register(&vfp_pm_sysdev); | ||
448 | } | 440 | } |
449 | 441 | ||
450 | |||
451 | #else | 442 | #else |
452 | static inline void vfp_pm_init(void) { } | 443 | static inline void vfp_pm_init(void) { } |
453 | #endif /* CONFIG_PM */ | 444 | #endif /* CONFIG_PM */ |
diff --git a/arch/avr32/mach-at32ap/intc.c b/arch/avr32/mach-at32ap/intc.c index 21ce35f33aa5..3e3646186c9f 100644 --- a/arch/avr32/mach-at32ap/intc.c +++ b/arch/avr32/mach-at32ap/intc.c | |||
@@ -12,7 +12,7 @@ | |||
12 | #include <linux/interrupt.h> | 12 | #include <linux/interrupt.h> |
13 | #include <linux/irq.h> | 13 | #include <linux/irq.h> |
14 | #include <linux/platform_device.h> | 14 | #include <linux/platform_device.h> |
15 | #include <linux/sysdev.h> | 15 | #include <linux/syscore_ops.h> |
16 | 16 | ||
17 | #include <asm/io.h> | 17 | #include <asm/io.h> |
18 | 18 | ||
@@ -21,7 +21,6 @@ | |||
21 | struct intc { | 21 | struct intc { |
22 | void __iomem *regs; | 22 | void __iomem *regs; |
23 | struct irq_chip chip; | 23 | struct irq_chip chip; |
24 | struct sys_device sysdev; | ||
25 | #ifdef CONFIG_PM | 24 | #ifdef CONFIG_PM |
26 | unsigned long suspend_ipr; | 25 | unsigned long suspend_ipr; |
27 | unsigned long saved_ipr[64]; | 26 | unsigned long saved_ipr[64]; |
@@ -146,9 +145,8 @@ void intc_set_suspend_handler(unsigned long offset) | |||
146 | intc0.suspend_ipr = offset; | 145 | intc0.suspend_ipr = offset; |
147 | } | 146 | } |
148 | 147 | ||
149 | static int intc_suspend(struct sys_device *sdev, pm_message_t state) | 148 | static int intc_suspend(void) |
150 | { | 149 | { |
151 | struct intc *intc = container_of(sdev, struct intc, sysdev); | ||
152 | int i; | 150 | int i; |
153 | 151 | ||
154 | if (unlikely(!irqs_disabled())) { | 152 | if (unlikely(!irqs_disabled())) { |
@@ -156,28 +154,25 @@ static int intc_suspend(struct sys_device *sdev, pm_message_t state) | |||
156 | return -EINVAL; | 154 | return -EINVAL; |
157 | } | 155 | } |
158 | 156 | ||
159 | if (unlikely(!intc->suspend_ipr)) { | 157 | if (unlikely(!intc0.suspend_ipr)) { |
160 | pr_err("intc_suspend: suspend_ipr not initialized\n"); | 158 | pr_err("intc_suspend: suspend_ipr not initialized\n"); |
161 | return -EINVAL; | 159 | return -EINVAL; |
162 | } | 160 | } |
163 | 161 | ||
164 | for (i = 0; i < 64; i++) { | 162 | for (i = 0; i < 64; i++) { |
165 | intc->saved_ipr[i] = intc_readl(intc, INTPR0 + 4 * i); | 163 | intc0.saved_ipr[i] = intc_readl(&intc0, INTPR0 + 4 * i); |
166 | intc_writel(intc, INTPR0 + 4 * i, intc->suspend_ipr); | 164 | intc_writel(&intc0, INTPR0 + 4 * i, intc0.suspend_ipr); |
167 | } | 165 | } |
168 | 166 | ||
169 | return 0; | 167 | return 0; |
170 | } | 168 | } |
171 | 169 | ||
172 | static int intc_resume(struct sys_device *sdev) | 170 | static int intc_resume(void) |
173 | { | 171 | { |
174 | struct intc *intc = container_of(sdev, struct intc, sysdev); | ||
175 | int i; | 172 | int i; |
176 | 173 | ||
177 | WARN_ON(!irqs_disabled()); | ||
178 | |||
179 | for (i = 0; i < 64; i++) | 174 | for (i = 0; i < 64; i++) |
180 | intc_writel(intc, INTPR0 + 4 * i, intc->saved_ipr[i]); | 175 | intc_writel(&intc0, INTPR0 + 4 * i, intc0.saved_ipr[i]); |
181 | 176 | ||
182 | return 0; | 177 | return 0; |
183 | } | 178 | } |
@@ -186,27 +181,18 @@ static int intc_resume(struct sys_device *sdev) | |||
186 | #define intc_resume NULL | 181 | #define intc_resume NULL |
187 | #endif | 182 | #endif |
188 | 183 | ||
189 | static struct sysdev_class intc_class = { | 184 | static struct syscore_ops intc_syscore_ops = { |
190 | .name = "intc", | ||
191 | .suspend = intc_suspend, | 185 | .suspend = intc_suspend, |
192 | .resume = intc_resume, | 186 | .resume = intc_resume, |
193 | }; | 187 | }; |
194 | 188 | ||
195 | static int __init intc_init_sysdev(void) | 189 | static int __init intc_init_syscore(void) |
196 | { | 190 | { |
197 | int ret; | 191 | register_syscore_ops(&intc_syscore_ops); |
198 | |||
199 | ret = sysdev_class_register(&intc_class); | ||
200 | if (ret) | ||
201 | return ret; | ||
202 | 192 | ||
203 | intc0.sysdev.id = 0; | 193 | return 0; |
204 | intc0.sysdev.cls = &intc_class; | ||
205 | ret = sysdev_register(&intc0.sysdev); | ||
206 | |||
207 | return ret; | ||
208 | } | 194 | } |
209 | device_initcall(intc_init_sysdev); | 195 | device_initcall(intc_init_syscore); |
210 | 196 | ||
211 | unsigned long intc_get_pending(unsigned int group) | 197 | unsigned long intc_get_pending(unsigned int group) |
212 | { | 198 | { |
diff --git a/arch/blackfin/kernel/nmi.c b/arch/blackfin/kernel/nmi.c index 0b5f72f17fd0..401eb1d8e3b4 100644 --- a/arch/blackfin/kernel/nmi.c +++ b/arch/blackfin/kernel/nmi.c | |||
@@ -12,7 +12,7 @@ | |||
12 | 12 | ||
13 | #include <linux/bitops.h> | 13 | #include <linux/bitops.h> |
14 | #include <linux/hardirq.h> | 14 | #include <linux/hardirq.h> |
15 | #include <linux/sysdev.h> | 15 | #include <linux/syscore_ops.h> |
16 | #include <linux/pm.h> | 16 | #include <linux/pm.h> |
17 | #include <linux/nmi.h> | 17 | #include <linux/nmi.h> |
18 | #include <linux/smp.h> | 18 | #include <linux/smp.h> |
@@ -196,43 +196,31 @@ void touch_nmi_watchdog(void) | |||
196 | 196 | ||
197 | /* Suspend/resume support */ | 197 | /* Suspend/resume support */ |
198 | #ifdef CONFIG_PM | 198 | #ifdef CONFIG_PM |
199 | static int nmi_wdt_suspend(struct sys_device *dev, pm_message_t state) | 199 | static int nmi_wdt_suspend(void) |
200 | { | 200 | { |
201 | nmi_wdt_stop(); | 201 | nmi_wdt_stop(); |
202 | return 0; | 202 | return 0; |
203 | } | 203 | } |
204 | 204 | ||
205 | static int nmi_wdt_resume(struct sys_device *dev) | 205 | static void nmi_wdt_resume(void) |
206 | { | 206 | { |
207 | if (nmi_active) | 207 | if (nmi_active) |
208 | nmi_wdt_start(); | 208 | nmi_wdt_start(); |
209 | return 0; | ||
210 | } | 209 | } |
211 | 210 | ||
212 | static struct sysdev_class nmi_sysclass = { | 211 | static struct syscore_ops nmi_syscore_ops = { |
213 | .name = DRV_NAME, | ||
214 | .resume = nmi_wdt_resume, | 212 | .resume = nmi_wdt_resume, |
215 | .suspend = nmi_wdt_suspend, | 213 | .suspend = nmi_wdt_suspend, |
216 | }; | 214 | }; |
217 | 215 | ||
218 | static struct sys_device device_nmi_wdt = { | 216 | static int __init init_nmi_wdt_syscore(void) |
219 | .id = 0, | ||
220 | .cls = &nmi_sysclass, | ||
221 | }; | ||
222 | |||
223 | static int __init init_nmi_wdt_sysfs(void) | ||
224 | { | 217 | { |
225 | int error; | 218 | if (nmi_active) |
226 | 219 | register_syscore_ops(&nmi_syscore_ops); | |
227 | if (!nmi_active) | ||
228 | return 0; | ||
229 | 220 | ||
230 | error = sysdev_class_register(&nmi_sysclass); | 221 | return 0; |
231 | if (!error) | ||
232 | error = sysdev_register(&device_nmi_wdt); | ||
233 | return error; | ||
234 | } | 222 | } |
235 | late_initcall(init_nmi_wdt_sysfs); | 223 | late_initcall(init_nmi_wdt_syscore); |
236 | 224 | ||
237 | #endif /* CONFIG_PM */ | 225 | #endif /* CONFIG_PM */ |
238 | 226 | ||
diff --git a/arch/blackfin/mach-common/dpmc.c b/arch/blackfin/mach-common/dpmc.c index 382099fd5561..5e4112e518a9 100644 --- a/arch/blackfin/mach-common/dpmc.c +++ b/arch/blackfin/mach-common/dpmc.c | |||
@@ -19,9 +19,6 @@ | |||
19 | 19 | ||
20 | #define DRIVER_NAME "bfin dpmc" | 20 | #define DRIVER_NAME "bfin dpmc" |
21 | 21 | ||
22 | #define dprintk(msg...) \ | ||
23 | cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, DRIVER_NAME, msg) | ||
24 | |||
25 | struct bfin_dpmc_platform_data *pdata; | 22 | struct bfin_dpmc_platform_data *pdata; |
26 | 23 | ||
27 | /** | 24 | /** |
diff --git a/arch/ia64/kernel/cpufreq/acpi-cpufreq.c b/arch/ia64/kernel/cpufreq/acpi-cpufreq.c index 22f61526a8e1..f09b174244d5 100644 --- a/arch/ia64/kernel/cpufreq/acpi-cpufreq.c +++ b/arch/ia64/kernel/cpufreq/acpi-cpufreq.c | |||
@@ -23,8 +23,6 @@ | |||
23 | #include <linux/acpi.h> | 23 | #include <linux/acpi.h> |
24 | #include <acpi/processor.h> | 24 | #include <acpi/processor.h> |
25 | 25 | ||
26 | #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "acpi-cpufreq", msg) | ||
27 | |||
28 | MODULE_AUTHOR("Venkatesh Pallipadi"); | 26 | MODULE_AUTHOR("Venkatesh Pallipadi"); |
29 | MODULE_DESCRIPTION("ACPI Processor P-States Driver"); | 27 | MODULE_DESCRIPTION("ACPI Processor P-States Driver"); |
30 | MODULE_LICENSE("GPL"); | 28 | MODULE_LICENSE("GPL"); |
@@ -47,12 +45,12 @@ processor_set_pstate ( | |||
47 | { | 45 | { |
48 | s64 retval; | 46 | s64 retval; |
49 | 47 | ||
50 | dprintk("processor_set_pstate\n"); | 48 | pr_debug("processor_set_pstate\n"); |
51 | 49 | ||
52 | retval = ia64_pal_set_pstate((u64)value); | 50 | retval = ia64_pal_set_pstate((u64)value); |
53 | 51 | ||
54 | if (retval) { | 52 | if (retval) { |
55 | dprintk("Failed to set freq to 0x%x, with error 0x%lx\n", | 53 | pr_debug("Failed to set freq to 0x%x, with error 0x%lx\n", |
56 | value, retval); | 54 | value, retval); |
57 | return -ENODEV; | 55 | return -ENODEV; |
58 | } | 56 | } |
@@ -67,14 +65,14 @@ processor_get_pstate ( | |||
67 | u64 pstate_index = 0; | 65 | u64 pstate_index = 0; |
68 | s64 retval; | 66 | s64 retval; |
69 | 67 | ||
70 | dprintk("processor_get_pstate\n"); | 68 | pr_debug("processor_get_pstate\n"); |
71 | 69 | ||
72 | retval = ia64_pal_get_pstate(&pstate_index, | 70 | retval = ia64_pal_get_pstate(&pstate_index, |
73 | PAL_GET_PSTATE_TYPE_INSTANT); | 71 | PAL_GET_PSTATE_TYPE_INSTANT); |
74 | *value = (u32) pstate_index; | 72 | *value = (u32) pstate_index; |
75 | 73 | ||
76 | if (retval) | 74 | if (retval) |
77 | dprintk("Failed to get current freq with " | 75 | pr_debug("Failed to get current freq with " |
78 | "error 0x%lx, idx 0x%x\n", retval, *value); | 76 | "error 0x%lx, idx 0x%x\n", retval, *value); |
79 | 77 | ||
80 | return (int)retval; | 78 | return (int)retval; |
@@ -90,7 +88,7 @@ extract_clock ( | |||
90 | { | 88 | { |
91 | unsigned long i; | 89 | unsigned long i; |
92 | 90 | ||
93 | dprintk("extract_clock\n"); | 91 | pr_debug("extract_clock\n"); |
94 | 92 | ||
95 | for (i = 0; i < data->acpi_data.state_count; i++) { | 93 | for (i = 0; i < data->acpi_data.state_count; i++) { |
96 | if (value == data->acpi_data.states[i].status) | 94 | if (value == data->acpi_data.states[i].status) |
@@ -110,7 +108,7 @@ processor_get_freq ( | |||
110 | cpumask_t saved_mask; | 108 | cpumask_t saved_mask; |
111 | unsigned long clock_freq; | 109 | unsigned long clock_freq; |
112 | 110 | ||
113 | dprintk("processor_get_freq\n"); | 111 | pr_debug("processor_get_freq\n"); |
114 | 112 | ||
115 | saved_mask = current->cpus_allowed; | 113 | saved_mask = current->cpus_allowed; |
116 | set_cpus_allowed_ptr(current, cpumask_of(cpu)); | 114 | set_cpus_allowed_ptr(current, cpumask_of(cpu)); |
@@ -148,7 +146,7 @@ processor_set_freq ( | |||
148 | cpumask_t saved_mask; | 146 | cpumask_t saved_mask; |
149 | int retval; | 147 | int retval; |
150 | 148 | ||
151 | dprintk("processor_set_freq\n"); | 149 | pr_debug("processor_set_freq\n"); |
152 | 150 | ||
153 | saved_mask = current->cpus_allowed; | 151 | saved_mask = current->cpus_allowed; |
154 | set_cpus_allowed_ptr(current, cpumask_of(cpu)); | 152 | set_cpus_allowed_ptr(current, cpumask_of(cpu)); |
@@ -159,16 +157,16 @@ processor_set_freq ( | |||
159 | 157 | ||
160 | if (state == data->acpi_data.state) { | 158 | if (state == data->acpi_data.state) { |
161 | if (unlikely(data->resume)) { | 159 | if (unlikely(data->resume)) { |
162 | dprintk("Called after resume, resetting to P%d\n", state); | 160 | pr_debug("Called after resume, resetting to P%d\n", state); |
163 | data->resume = 0; | 161 | data->resume = 0; |
164 | } else { | 162 | } else { |
165 | dprintk("Already at target state (P%d)\n", state); | 163 | pr_debug("Already at target state (P%d)\n", state); |
166 | retval = 0; | 164 | retval = 0; |
167 | goto migrate_end; | 165 | goto migrate_end; |
168 | } | 166 | } |
169 | } | 167 | } |
170 | 168 | ||
171 | dprintk("Transitioning from P%d to P%d\n", | 169 | pr_debug("Transitioning from P%d to P%d\n", |
172 | data->acpi_data.state, state); | 170 | data->acpi_data.state, state); |
173 | 171 | ||
174 | /* cpufreq frequency struct */ | 172 | /* cpufreq frequency struct */ |
@@ -186,7 +184,7 @@ processor_set_freq ( | |||
186 | 184 | ||
187 | value = (u32) data->acpi_data.states[state].control; | 185 | value = (u32) data->acpi_data.states[state].control; |
188 | 186 | ||
189 | dprintk("Transitioning to state: 0x%08x\n", value); | 187 | pr_debug("Transitioning to state: 0x%08x\n", value); |
190 | 188 | ||
191 | ret = processor_set_pstate(value); | 189 | ret = processor_set_pstate(value); |
192 | if (ret) { | 190 | if (ret) { |
@@ -219,7 +217,7 @@ acpi_cpufreq_get ( | |||
219 | { | 217 | { |
220 | struct cpufreq_acpi_io *data = acpi_io_data[cpu]; | 218 | struct cpufreq_acpi_io *data = acpi_io_data[cpu]; |
221 | 219 | ||
222 | dprintk("acpi_cpufreq_get\n"); | 220 | pr_debug("acpi_cpufreq_get\n"); |
223 | 221 | ||
224 | return processor_get_freq(data, cpu); | 222 | return processor_get_freq(data, cpu); |
225 | } | 223 | } |
@@ -235,7 +233,7 @@ acpi_cpufreq_target ( | |||
235 | unsigned int next_state = 0; | 233 | unsigned int next_state = 0; |
236 | unsigned int result = 0; | 234 | unsigned int result = 0; |
237 | 235 | ||
238 | dprintk("acpi_cpufreq_setpolicy\n"); | 236 | pr_debug("acpi_cpufreq_setpolicy\n"); |
239 | 237 | ||
240 | result = cpufreq_frequency_table_target(policy, | 238 | result = cpufreq_frequency_table_target(policy, |
241 | data->freq_table, target_freq, relation, &next_state); | 239 | data->freq_table, target_freq, relation, &next_state); |
@@ -255,7 +253,7 @@ acpi_cpufreq_verify ( | |||
255 | unsigned int result = 0; | 253 | unsigned int result = 0; |
256 | struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu]; | 254 | struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu]; |
257 | 255 | ||
258 | dprintk("acpi_cpufreq_verify\n"); | 256 | pr_debug("acpi_cpufreq_verify\n"); |
259 | 257 | ||
260 | result = cpufreq_frequency_table_verify(policy, | 258 | result = cpufreq_frequency_table_verify(policy, |
261 | data->freq_table); | 259 | data->freq_table); |
@@ -273,7 +271,7 @@ acpi_cpufreq_cpu_init ( | |||
273 | struct cpufreq_acpi_io *data; | 271 | struct cpufreq_acpi_io *data; |
274 | unsigned int result = 0; | 272 | unsigned int result = 0; |
275 | 273 | ||
276 | dprintk("acpi_cpufreq_cpu_init\n"); | 274 | pr_debug("acpi_cpufreq_cpu_init\n"); |
277 | 275 | ||
278 | data = kzalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL); | 276 | data = kzalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL); |
279 | if (!data) | 277 | if (!data) |
@@ -288,7 +286,7 @@ acpi_cpufreq_cpu_init ( | |||
288 | 286 | ||
289 | /* capability check */ | 287 | /* capability check */ |
290 | if (data->acpi_data.state_count <= 1) { | 288 | if (data->acpi_data.state_count <= 1) { |
291 | dprintk("No P-States\n"); | 289 | pr_debug("No P-States\n"); |
292 | result = -ENODEV; | 290 | result = -ENODEV; |
293 | goto err_unreg; | 291 | goto err_unreg; |
294 | } | 292 | } |
@@ -297,7 +295,7 @@ acpi_cpufreq_cpu_init ( | |||
297 | ACPI_ADR_SPACE_FIXED_HARDWARE) || | 295 | ACPI_ADR_SPACE_FIXED_HARDWARE) || |
298 | (data->acpi_data.status_register.space_id != | 296 | (data->acpi_data.status_register.space_id != |
299 | ACPI_ADR_SPACE_FIXED_HARDWARE)) { | 297 | ACPI_ADR_SPACE_FIXED_HARDWARE)) { |
300 | dprintk("Unsupported address space [%d, %d]\n", | 298 | pr_debug("Unsupported address space [%d, %d]\n", |
301 | (u32) (data->acpi_data.control_register.space_id), | 299 | (u32) (data->acpi_data.control_register.space_id), |
302 | (u32) (data->acpi_data.status_register.space_id)); | 300 | (u32) (data->acpi_data.status_register.space_id)); |
303 | result = -ENODEV; | 301 | result = -ENODEV; |
@@ -348,7 +346,7 @@ acpi_cpufreq_cpu_init ( | |||
348 | "activated.\n", cpu); | 346 | "activated.\n", cpu); |
349 | 347 | ||
350 | for (i = 0; i < data->acpi_data.state_count; i++) | 348 | for (i = 0; i < data->acpi_data.state_count; i++) |
351 | dprintk(" %cP%d: %d MHz, %d mW, %d uS, %d uS, 0x%x 0x%x\n", | 349 | pr_debug(" %cP%d: %d MHz, %d mW, %d uS, %d uS, 0x%x 0x%x\n", |
352 | (i == data->acpi_data.state?'*':' '), i, | 350 | (i == data->acpi_data.state?'*':' '), i, |
353 | (u32) data->acpi_data.states[i].core_frequency, | 351 | (u32) data->acpi_data.states[i].core_frequency, |
354 | (u32) data->acpi_data.states[i].power, | 352 | (u32) data->acpi_data.states[i].power, |
@@ -383,7 +381,7 @@ acpi_cpufreq_cpu_exit ( | |||
383 | { | 381 | { |
384 | struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu]; | 382 | struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu]; |
385 | 383 | ||
386 | dprintk("acpi_cpufreq_cpu_exit\n"); | 384 | pr_debug("acpi_cpufreq_cpu_exit\n"); |
387 | 385 | ||
388 | if (data) { | 386 | if (data) { |
389 | cpufreq_frequency_table_put_attr(policy->cpu); | 387 | cpufreq_frequency_table_put_attr(policy->cpu); |
@@ -418,7 +416,7 @@ static struct cpufreq_driver acpi_cpufreq_driver = { | |||
418 | static int __init | 416 | static int __init |
419 | acpi_cpufreq_init (void) | 417 | acpi_cpufreq_init (void) |
420 | { | 418 | { |
421 | dprintk("acpi_cpufreq_init\n"); | 419 | pr_debug("acpi_cpufreq_init\n"); |
422 | 420 | ||
423 | return cpufreq_register_driver(&acpi_cpufreq_driver); | 421 | return cpufreq_register_driver(&acpi_cpufreq_driver); |
424 | } | 422 | } |
@@ -427,7 +425,7 @@ acpi_cpufreq_init (void) | |||
427 | static void __exit | 425 | static void __exit |
428 | acpi_cpufreq_exit (void) | 426 | acpi_cpufreq_exit (void) |
429 | { | 427 | { |
430 | dprintk("acpi_cpufreq_exit\n"); | 428 | pr_debug("acpi_cpufreq_exit\n"); |
431 | 429 | ||
432 | cpufreq_unregister_driver(&acpi_cpufreq_driver); | 430 | cpufreq_unregister_driver(&acpi_cpufreq_driver); |
433 | return; | 431 | return; |
diff --git a/arch/m68k/atari/atakeyb.c b/arch/m68k/atari/atakeyb.c index b995513d527f..95022b04b62d 100644 --- a/arch/m68k/atari/atakeyb.c +++ b/arch/m68k/atari/atakeyb.c | |||
@@ -36,13 +36,10 @@ | |||
36 | 36 | ||
37 | /* Hook for MIDI serial driver */ | 37 | /* Hook for MIDI serial driver */ |
38 | void (*atari_MIDI_interrupt_hook) (void); | 38 | void (*atari_MIDI_interrupt_hook) (void); |
39 | /* Hook for mouse driver */ | ||
40 | void (*atari_mouse_interrupt_hook) (char *); | ||
41 | /* Hook for keyboard inputdev driver */ | 39 | /* Hook for keyboard inputdev driver */ |
42 | void (*atari_input_keyboard_interrupt_hook) (unsigned char, char); | 40 | void (*atari_input_keyboard_interrupt_hook) (unsigned char, char); |
43 | /* Hook for mouse inputdev driver */ | 41 | /* Hook for mouse inputdev driver */ |
44 | void (*atari_input_mouse_interrupt_hook) (char *); | 42 | void (*atari_input_mouse_interrupt_hook) (char *); |
45 | EXPORT_SYMBOL(atari_mouse_interrupt_hook); | ||
46 | EXPORT_SYMBOL(atari_input_keyboard_interrupt_hook); | 43 | EXPORT_SYMBOL(atari_input_keyboard_interrupt_hook); |
47 | EXPORT_SYMBOL(atari_input_mouse_interrupt_hook); | 44 | EXPORT_SYMBOL(atari_input_mouse_interrupt_hook); |
48 | 45 | ||
@@ -263,8 +260,8 @@ repeat: | |||
263 | kb_state.buf[kb_state.len++] = scancode; | 260 | kb_state.buf[kb_state.len++] = scancode; |
264 | if (kb_state.len == 3) { | 261 | if (kb_state.len == 3) { |
265 | kb_state.state = KEYBOARD; | 262 | kb_state.state = KEYBOARD; |
266 | if (atari_mouse_interrupt_hook) | 263 | if (atari_input_mouse_interrupt_hook) |
267 | atari_mouse_interrupt_hook(kb_state.buf); | 264 | atari_input_mouse_interrupt_hook(kb_state.buf); |
268 | } | 265 | } |
269 | break; | 266 | break; |
270 | 267 | ||
@@ -575,7 +572,7 @@ int atari_keyb_init(void) | |||
575 | kb_state.len = 0; | 572 | kb_state.len = 0; |
576 | 573 | ||
577 | error = request_irq(IRQ_MFP_ACIA, atari_keyboard_interrupt, | 574 | error = request_irq(IRQ_MFP_ACIA, atari_keyboard_interrupt, |
578 | IRQ_TYPE_SLOW, "keyboard/mouse/MIDI", | 575 | IRQ_TYPE_SLOW, "keyboard,mouse,MIDI", |
579 | atari_keyboard_interrupt); | 576 | atari_keyboard_interrupt); |
580 | if (error) | 577 | if (error) |
581 | return error; | 578 | return error; |
diff --git a/arch/m68k/atari/stdma.c b/arch/m68k/atari/stdma.c index 604329fafbb8..ddbf43ca8858 100644 --- a/arch/m68k/atari/stdma.c +++ b/arch/m68k/atari/stdma.c | |||
@@ -180,7 +180,7 @@ void __init stdma_init(void) | |||
180 | { | 180 | { |
181 | stdma_isr = NULL; | 181 | stdma_isr = NULL; |
182 | if (request_irq(IRQ_MFP_FDC, stdma_int, IRQ_TYPE_SLOW | IRQF_SHARED, | 182 | if (request_irq(IRQ_MFP_FDC, stdma_int, IRQ_TYPE_SLOW | IRQF_SHARED, |
183 | "ST-DMA: floppy/ACSI/IDE/Falcon-SCSI", stdma_int)) | 183 | "ST-DMA floppy,ACSI,IDE,Falcon-SCSI", stdma_int)) |
184 | pr_err("Couldn't register ST-DMA interrupt\n"); | 184 | pr_err("Couldn't register ST-DMA interrupt\n"); |
185 | } | 185 | } |
186 | 186 | ||
diff --git a/arch/m68k/include/asm/atarikb.h b/arch/m68k/include/asm/atarikb.h index 546e7da5804f..68f3622bf591 100644 --- a/arch/m68k/include/asm/atarikb.h +++ b/arch/m68k/include/asm/atarikb.h | |||
@@ -34,8 +34,6 @@ void ikbd_joystick_disable(void); | |||
34 | 34 | ||
35 | /* Hook for MIDI serial driver */ | 35 | /* Hook for MIDI serial driver */ |
36 | extern void (*atari_MIDI_interrupt_hook) (void); | 36 | extern void (*atari_MIDI_interrupt_hook) (void); |
37 | /* Hook for mouse driver */ | ||
38 | extern void (*atari_mouse_interrupt_hook) (char *); | ||
39 | /* Hook for keyboard inputdev driver */ | 37 | /* Hook for keyboard inputdev driver */ |
40 | extern void (*atari_input_keyboard_interrupt_hook) (unsigned char, char); | 38 | extern void (*atari_input_keyboard_interrupt_hook) (unsigned char, char); |
41 | /* Hook for mouse inputdev driver */ | 39 | /* Hook for mouse inputdev driver */ |
diff --git a/arch/m68k/include/asm/bitops_mm.h b/arch/m68k/include/asm/bitops_mm.h index 9d69f6e62365..e9020f88a748 100644 --- a/arch/m68k/include/asm/bitops_mm.h +++ b/arch/m68k/include/asm/bitops_mm.h | |||
@@ -181,14 +181,15 @@ static inline int find_first_zero_bit(const unsigned long *vaddr, | |||
181 | { | 181 | { |
182 | const unsigned long *p = vaddr; | 182 | const unsigned long *p = vaddr; |
183 | int res = 32; | 183 | int res = 32; |
184 | unsigned int words; | ||
184 | unsigned long num; | 185 | unsigned long num; |
185 | 186 | ||
186 | if (!size) | 187 | if (!size) |
187 | return 0; | 188 | return 0; |
188 | 189 | ||
189 | size = (size + 31) >> 5; | 190 | words = (size + 31) >> 5; |
190 | while (!(num = ~*p++)) { | 191 | while (!(num = ~*p++)) { |
191 | if (!--size) | 192 | if (!--words) |
192 | goto out; | 193 | goto out; |
193 | } | 194 | } |
194 | 195 | ||
@@ -196,7 +197,8 @@ static inline int find_first_zero_bit(const unsigned long *vaddr, | |||
196 | : "=d" (res) : "d" (num & -num)); | 197 | : "=d" (res) : "d" (num & -num)); |
197 | res ^= 31; | 198 | res ^= 31; |
198 | out: | 199 | out: |
199 | return ((long)p - (long)vaddr - 4) * 8 + res; | 200 | res += ((long)p - (long)vaddr - 4) * 8; |
201 | return res < size ? res : size; | ||
200 | } | 202 | } |
201 | 203 | ||
202 | static inline int find_next_zero_bit(const unsigned long *vaddr, int size, | 204 | static inline int find_next_zero_bit(const unsigned long *vaddr, int size, |
@@ -215,27 +217,32 @@ static inline int find_next_zero_bit(const unsigned long *vaddr, int size, | |||
215 | /* Look for zero in first longword */ | 217 | /* Look for zero in first longword */ |
216 | __asm__ __volatile__ ("bfffo %1{#0,#0},%0" | 218 | __asm__ __volatile__ ("bfffo %1{#0,#0},%0" |
217 | : "=d" (res) : "d" (num & -num)); | 219 | : "=d" (res) : "d" (num & -num)); |
218 | if (res < 32) | 220 | if (res < 32) { |
219 | return offset + (res ^ 31); | 221 | offset += res ^ 31; |
222 | return offset < size ? offset : size; | ||
223 | } | ||
220 | offset += 32; | 224 | offset += 32; |
225 | |||
226 | if (offset >= size) | ||
227 | return size; | ||
221 | } | 228 | } |
222 | /* No zero yet, search remaining full bytes for a zero */ | 229 | /* No zero yet, search remaining full bytes for a zero */ |
223 | res = find_first_zero_bit(p, size - ((long)p - (long)vaddr) * 8); | 230 | return offset + find_first_zero_bit(p, size - offset); |
224 | return offset + res; | ||
225 | } | 231 | } |
226 | 232 | ||
227 | static inline int find_first_bit(const unsigned long *vaddr, unsigned size) | 233 | static inline int find_first_bit(const unsigned long *vaddr, unsigned size) |
228 | { | 234 | { |
229 | const unsigned long *p = vaddr; | 235 | const unsigned long *p = vaddr; |
230 | int res = 32; | 236 | int res = 32; |
237 | unsigned int words; | ||
231 | unsigned long num; | 238 | unsigned long num; |
232 | 239 | ||
233 | if (!size) | 240 | if (!size) |
234 | return 0; | 241 | return 0; |
235 | 242 | ||
236 | size = (size + 31) >> 5; | 243 | words = (size + 31) >> 5; |
237 | while (!(num = *p++)) { | 244 | while (!(num = *p++)) { |
238 | if (!--size) | 245 | if (!--words) |
239 | goto out; | 246 | goto out; |
240 | } | 247 | } |
241 | 248 | ||
@@ -243,7 +250,8 @@ static inline int find_first_bit(const unsigned long *vaddr, unsigned size) | |||
243 | : "=d" (res) : "d" (num & -num)); | 250 | : "=d" (res) : "d" (num & -num)); |
244 | res ^= 31; | 251 | res ^= 31; |
245 | out: | 252 | out: |
246 | return ((long)p - (long)vaddr - 4) * 8 + res; | 253 | res += ((long)p - (long)vaddr - 4) * 8; |
254 | return res < size ? res : size; | ||
247 | } | 255 | } |
248 | 256 | ||
249 | static inline int find_next_bit(const unsigned long *vaddr, int size, | 257 | static inline int find_next_bit(const unsigned long *vaddr, int size, |
@@ -262,13 +270,17 @@ static inline int find_next_bit(const unsigned long *vaddr, int size, | |||
262 | /* Look for one in first longword */ | 270 | /* Look for one in first longword */ |
263 | __asm__ __volatile__ ("bfffo %1{#0,#0},%0" | 271 | __asm__ __volatile__ ("bfffo %1{#0,#0},%0" |
264 | : "=d" (res) : "d" (num & -num)); | 272 | : "=d" (res) : "d" (num & -num)); |
265 | if (res < 32) | 273 | if (res < 32) { |
266 | return offset + (res ^ 31); | 274 | offset += res ^ 31; |
275 | return offset < size ? offset : size; | ||
276 | } | ||
267 | offset += 32; | 277 | offset += 32; |
278 | |||
279 | if (offset >= size) | ||
280 | return size; | ||
268 | } | 281 | } |
269 | /* No one yet, search remaining full bytes for a one */ | 282 | /* No one yet, search remaining full bytes for a one */ |
270 | res = find_first_bit(p, size - ((long)p - (long)vaddr) * 8); | 283 | return offset + find_first_bit(p, size - offset); |
271 | return offset + res; | ||
272 | } | 284 | } |
273 | 285 | ||
274 | /* | 286 | /* |
@@ -366,23 +378,25 @@ static inline int test_bit_le(int nr, const void *vaddr) | |||
366 | static inline int find_first_zero_bit_le(const void *vaddr, unsigned size) | 378 | static inline int find_first_zero_bit_le(const void *vaddr, unsigned size) |
367 | { | 379 | { |
368 | const unsigned long *p = vaddr, *addr = vaddr; | 380 | const unsigned long *p = vaddr, *addr = vaddr; |
369 | int res; | 381 | int res = 0; |
382 | unsigned int words; | ||
370 | 383 | ||
371 | if (!size) | 384 | if (!size) |
372 | return 0; | 385 | return 0; |
373 | 386 | ||
374 | size = (size >> 5) + ((size & 31) > 0); | 387 | words = (size >> 5) + ((size & 31) > 0); |
375 | while (*p++ == ~0UL) | 388 | while (*p++ == ~0UL) { |
376 | { | 389 | if (--words == 0) |
377 | if (--size == 0) | 390 | goto out; |
378 | return (p - addr) << 5; | ||
379 | } | 391 | } |
380 | 392 | ||
381 | --p; | 393 | --p; |
382 | for (res = 0; res < 32; res++) | 394 | for (res = 0; res < 32; res++) |
383 | if (!test_bit_le(res, p)) | 395 | if (!test_bit_le(res, p)) |
384 | break; | 396 | break; |
385 | return (p - addr) * 32 + res; | 397 | out: |
398 | res += (p - addr) * 32; | ||
399 | return res < size ? res : size; | ||
386 | } | 400 | } |
387 | 401 | ||
388 | static inline unsigned long find_next_zero_bit_le(const void *addr, | 402 | static inline unsigned long find_next_zero_bit_le(const void *addr, |
@@ -400,10 +414,15 @@ static inline unsigned long find_next_zero_bit_le(const void *addr, | |||
400 | offset -= bit; | 414 | offset -= bit; |
401 | /* Look for zero in first longword */ | 415 | /* Look for zero in first longword */ |
402 | for (res = bit; res < 32; res++) | 416 | for (res = bit; res < 32; res++) |
403 | if (!test_bit_le(res, p)) | 417 | if (!test_bit_le(res, p)) { |
404 | return offset + res; | 418 | offset += res; |
419 | return offset < size ? offset : size; | ||
420 | } | ||
405 | p++; | 421 | p++; |
406 | offset += 32; | 422 | offset += 32; |
423 | |||
424 | if (offset >= size) | ||
425 | return size; | ||
407 | } | 426 | } |
408 | /* No zero yet, search remaining full bytes for a zero */ | 427 | /* No zero yet, search remaining full bytes for a zero */ |
409 | return offset + find_first_zero_bit_le(p, size - offset); | 428 | return offset + find_first_zero_bit_le(p, size - offset); |
@@ -412,22 +431,25 @@ static inline unsigned long find_next_zero_bit_le(const void *addr, | |||
412 | static inline int find_first_bit_le(const void *vaddr, unsigned size) | 431 | static inline int find_first_bit_le(const void *vaddr, unsigned size) |
413 | { | 432 | { |
414 | const unsigned long *p = vaddr, *addr = vaddr; | 433 | const unsigned long *p = vaddr, *addr = vaddr; |
415 | int res; | 434 | int res = 0; |
435 | unsigned int words; | ||
416 | 436 | ||
417 | if (!size) | 437 | if (!size) |
418 | return 0; | 438 | return 0; |
419 | 439 | ||
420 | size = (size >> 5) + ((size & 31) > 0); | 440 | words = (size >> 5) + ((size & 31) > 0); |
421 | while (*p++ == 0UL) { | 441 | while (*p++ == 0UL) { |
422 | if (--size == 0) | 442 | if (--words == 0) |
423 | return (p - addr) << 5; | 443 | goto out; |
424 | } | 444 | } |
425 | 445 | ||
426 | --p; | 446 | --p; |
427 | for (res = 0; res < 32; res++) | 447 | for (res = 0; res < 32; res++) |
428 | if (test_bit_le(res, p)) | 448 | if (test_bit_le(res, p)) |
429 | break; | 449 | break; |
430 | return (p - addr) * 32 + res; | 450 | out: |
451 | res += (p - addr) * 32; | ||
452 | return res < size ? res : size; | ||
431 | } | 453 | } |
432 | 454 | ||
433 | static inline unsigned long find_next_bit_le(const void *addr, | 455 | static inline unsigned long find_next_bit_le(const void *addr, |
@@ -445,10 +467,15 @@ static inline unsigned long find_next_bit_le(const void *addr, | |||
445 | offset -= bit; | 467 | offset -= bit; |
446 | /* Look for one in first longword */ | 468 | /* Look for one in first longword */ |
447 | for (res = bit; res < 32; res++) | 469 | for (res = bit; res < 32; res++) |
448 | if (test_bit_le(res, p)) | 470 | if (test_bit_le(res, p)) { |
449 | return offset + res; | 471 | offset += res; |
472 | return offset < size ? offset : size; | ||
473 | } | ||
450 | p++; | 474 | p++; |
451 | offset += 32; | 475 | offset += 32; |
476 | |||
477 | if (offset >= size) | ||
478 | return size; | ||
452 | } | 479 | } |
453 | /* No set bit yet, search remaining full bytes for a set bit */ | 480 | /* No set bit yet, search remaining full bytes for a set bit */ |
454 | return offset + find_first_bit_le(p, size - offset); | 481 | return offset + find_first_bit_le(p, size - offset); |
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h index 29e17907d9f2..f3b649de2a1b 100644 --- a/arch/m68k/include/asm/unistd.h +++ b/arch/m68k/include/asm/unistd.h | |||
@@ -22,7 +22,7 @@ | |||
22 | #define __NR_mknod 14 | 22 | #define __NR_mknod 14 |
23 | #define __NR_chmod 15 | 23 | #define __NR_chmod 15 |
24 | #define __NR_chown 16 | 24 | #define __NR_chown 16 |
25 | #define __NR_break 17 | 25 | /*#define __NR_break 17*/ |
26 | #define __NR_oldstat 18 | 26 | #define __NR_oldstat 18 |
27 | #define __NR_lseek 19 | 27 | #define __NR_lseek 19 |
28 | #define __NR_getpid 20 | 28 | #define __NR_getpid 20 |
@@ -36,11 +36,11 @@ | |||
36 | #define __NR_oldfstat 28 | 36 | #define __NR_oldfstat 28 |
37 | #define __NR_pause 29 | 37 | #define __NR_pause 29 |
38 | #define __NR_utime 30 | 38 | #define __NR_utime 30 |
39 | #define __NR_stty 31 | 39 | /*#define __NR_stty 31*/ |
40 | #define __NR_gtty 32 | 40 | /*#define __NR_gtty 32*/ |
41 | #define __NR_access 33 | 41 | #define __NR_access 33 |
42 | #define __NR_nice 34 | 42 | #define __NR_nice 34 |
43 | #define __NR_ftime 35 | 43 | /*#define __NR_ftime 35*/ |
44 | #define __NR_sync 36 | 44 | #define __NR_sync 36 |
45 | #define __NR_kill 37 | 45 | #define __NR_kill 37 |
46 | #define __NR_rename 38 | 46 | #define __NR_rename 38 |
@@ -49,7 +49,7 @@ | |||
49 | #define __NR_dup 41 | 49 | #define __NR_dup 41 |
50 | #define __NR_pipe 42 | 50 | #define __NR_pipe 42 |
51 | #define __NR_times 43 | 51 | #define __NR_times 43 |
52 | #define __NR_prof 44 | 52 | /*#define __NR_prof 44*/ |
53 | #define __NR_brk 45 | 53 | #define __NR_brk 45 |
54 | #define __NR_setgid 46 | 54 | #define __NR_setgid 46 |
55 | #define __NR_getgid 47 | 55 | #define __NR_getgid 47 |
@@ -58,13 +58,13 @@ | |||
58 | #define __NR_getegid 50 | 58 | #define __NR_getegid 50 |
59 | #define __NR_acct 51 | 59 | #define __NR_acct 51 |
60 | #define __NR_umount2 52 | 60 | #define __NR_umount2 52 |
61 | #define __NR_lock 53 | 61 | /*#define __NR_lock 53*/ |
62 | #define __NR_ioctl 54 | 62 | #define __NR_ioctl 54 |
63 | #define __NR_fcntl 55 | 63 | #define __NR_fcntl 55 |
64 | #define __NR_mpx 56 | 64 | /*#define __NR_mpx 56*/ |
65 | #define __NR_setpgid 57 | 65 | #define __NR_setpgid 57 |
66 | #define __NR_ulimit 58 | 66 | /*#define __NR_ulimit 58*/ |
67 | #define __NR_oldolduname 59 | 67 | /*#define __NR_oldolduname 59*/ |
68 | #define __NR_umask 60 | 68 | #define __NR_umask 60 |
69 | #define __NR_chroot 61 | 69 | #define __NR_chroot 61 |
70 | #define __NR_ustat 62 | 70 | #define __NR_ustat 62 |
@@ -103,10 +103,10 @@ | |||
103 | #define __NR_fchown 95 | 103 | #define __NR_fchown 95 |
104 | #define __NR_getpriority 96 | 104 | #define __NR_getpriority 96 |
105 | #define __NR_setpriority 97 | 105 | #define __NR_setpriority 97 |
106 | #define __NR_profil 98 | 106 | /*#define __NR_profil 98*/ |
107 | #define __NR_statfs 99 | 107 | #define __NR_statfs 99 |
108 | #define __NR_fstatfs 100 | 108 | #define __NR_fstatfs 100 |
109 | #define __NR_ioperm 101 | 109 | /*#define __NR_ioperm 101*/ |
110 | #define __NR_socketcall 102 | 110 | #define __NR_socketcall 102 |
111 | #define __NR_syslog 103 | 111 | #define __NR_syslog 103 |
112 | #define __NR_setitimer 104 | 112 | #define __NR_setitimer 104 |
@@ -114,11 +114,11 @@ | |||
114 | #define __NR_stat 106 | 114 | #define __NR_stat 106 |
115 | #define __NR_lstat 107 | 115 | #define __NR_lstat 107 |
116 | #define __NR_fstat 108 | 116 | #define __NR_fstat 108 |
117 | #define __NR_olduname 109 | 117 | /*#define __NR_olduname 109*/ |
118 | #define __NR_iopl /* 110 */ not supported | 118 | /*#define __NR_iopl 110*/ /* not supported */ |
119 | #define __NR_vhangup 111 | 119 | #define __NR_vhangup 111 |
120 | #define __NR_idle /* 112 */ Obsolete | 120 | /*#define __NR_idle 112*/ /* Obsolete */ |
121 | #define __NR_vm86 /* 113 */ not supported | 121 | /*#define __NR_vm86 113*/ /* not supported */ |
122 | #define __NR_wait4 114 | 122 | #define __NR_wait4 114 |
123 | #define __NR_swapoff 115 | 123 | #define __NR_swapoff 115 |
124 | #define __NR_sysinfo 116 | 124 | #define __NR_sysinfo 116 |
@@ -132,17 +132,17 @@ | |||
132 | #define __NR_adjtimex 124 | 132 | #define __NR_adjtimex 124 |
133 | #define __NR_mprotect 125 | 133 | #define __NR_mprotect 125 |
134 | #define __NR_sigprocmask 126 | 134 | #define __NR_sigprocmask 126 |
135 | #define __NR_create_module 127 | 135 | /*#define __NR_create_module 127*/ |
136 | #define __NR_init_module 128 | 136 | #define __NR_init_module 128 |
137 | #define __NR_delete_module 129 | 137 | #define __NR_delete_module 129 |
138 | #define __NR_get_kernel_syms 130 | 138 | /*#define __NR_get_kernel_syms 130*/ |
139 | #define __NR_quotactl 131 | 139 | #define __NR_quotactl 131 |
140 | #define __NR_getpgid 132 | 140 | #define __NR_getpgid 132 |
141 | #define __NR_fchdir 133 | 141 | #define __NR_fchdir 133 |
142 | #define __NR_bdflush 134 | 142 | #define __NR_bdflush 134 |
143 | #define __NR_sysfs 135 | 143 | #define __NR_sysfs 135 |
144 | #define __NR_personality 136 | 144 | #define __NR_personality 136 |
145 | #define __NR_afs_syscall 137 /* Syscall for Andrew File System */ | 145 | /*#define __NR_afs_syscall 137*/ /* Syscall for Andrew File System */ |
146 | #define __NR_setfsuid 138 | 146 | #define __NR_setfsuid 138 |
147 | #define __NR_setfsgid 139 | 147 | #define __NR_setfsgid 139 |
148 | #define __NR__llseek 140 | 148 | #define __NR__llseek 140 |
@@ -172,7 +172,7 @@ | |||
172 | #define __NR_setresuid 164 | 172 | #define __NR_setresuid 164 |
173 | #define __NR_getresuid 165 | 173 | #define __NR_getresuid 165 |
174 | #define __NR_getpagesize 166 | 174 | #define __NR_getpagesize 166 |
175 | #define __NR_query_module 167 | 175 | /*#define __NR_query_module 167*/ |
176 | #define __NR_poll 168 | 176 | #define __NR_poll 168 |
177 | #define __NR_nfsservctl 169 | 177 | #define __NR_nfsservctl 169 |
178 | #define __NR_setresgid 170 | 178 | #define __NR_setresgid 170 |
@@ -193,8 +193,8 @@ | |||
193 | #define __NR_capset 185 | 193 | #define __NR_capset 185 |
194 | #define __NR_sigaltstack 186 | 194 | #define __NR_sigaltstack 186 |
195 | #define __NR_sendfile 187 | 195 | #define __NR_sendfile 187 |
196 | #define __NR_getpmsg 188 /* some people actually want streams */ | 196 | /*#define __NR_getpmsg 188*/ /* some people actually want streams */ |
197 | #define __NR_putpmsg 189 /* some people actually want streams */ | 197 | /*#define __NR_putpmsg 189*/ /* some people actually want streams */ |
198 | #define __NR_vfork 190 | 198 | #define __NR_vfork 190 |
199 | #define __NR_ugetrlimit 191 | 199 | #define __NR_ugetrlimit 191 |
200 | #define __NR_mmap2 192 | 200 | #define __NR_mmap2 192 |
@@ -223,6 +223,8 @@ | |||
223 | #define __NR_setfsuid32 215 | 223 | #define __NR_setfsuid32 215 |
224 | #define __NR_setfsgid32 216 | 224 | #define __NR_setfsgid32 216 |
225 | #define __NR_pivot_root 217 | 225 | #define __NR_pivot_root 217 |
226 | /* 218*/ | ||
227 | /* 219*/ | ||
226 | #define __NR_getdents64 220 | 228 | #define __NR_getdents64 220 |
227 | #define __NR_gettid 221 | 229 | #define __NR_gettid 221 |
228 | #define __NR_tkill 222 | 230 | #define __NR_tkill 222 |
@@ -281,7 +283,7 @@ | |||
281 | #define __NR_mq_notify 275 | 283 | #define __NR_mq_notify 275 |
282 | #define __NR_mq_getsetattr 276 | 284 | #define __NR_mq_getsetattr 276 |
283 | #define __NR_waitid 277 | 285 | #define __NR_waitid 277 |
284 | #define __NR_vserver 278 | 286 | /*#define __NR_vserver 278*/ |
285 | #define __NR_add_key 279 | 287 | #define __NR_add_key 279 |
286 | #define __NR_request_key 280 | 288 | #define __NR_request_key 280 |
287 | #define __NR_keyctl 281 | 289 | #define __NR_keyctl 281 |
diff --git a/arch/m68k/kernel/Makefile_mm b/arch/m68k/kernel/Makefile_mm index 55d5d6b680a2..aced67804579 100644 --- a/arch/m68k/kernel/Makefile_mm +++ b/arch/m68k/kernel/Makefile_mm | |||
@@ -10,7 +10,7 @@ endif | |||
10 | extra-y += vmlinux.lds | 10 | extra-y += vmlinux.lds |
11 | 11 | ||
12 | obj-y := entry.o process.o traps.o ints.o signal.o ptrace.o module.o \ | 12 | obj-y := entry.o process.o traps.o ints.o signal.o ptrace.o module.o \ |
13 | sys_m68k.o time.o setup.o m68k_ksyms.o devres.o | 13 | sys_m68k.o time.o setup.o m68k_ksyms.o devres.o syscalltable.o |
14 | 14 | ||
15 | devres-y = ../../../kernel/irq/devres.o | 15 | devres-y = ../../../kernel/irq/devres.o |
16 | 16 | ||
diff --git a/arch/m68k/kernel/entry_mm.S b/arch/m68k/kernel/entry_mm.S index 1359ee659574..bd0ec05263b2 100644 --- a/arch/m68k/kernel/entry_mm.S +++ b/arch/m68k/kernel/entry_mm.S | |||
@@ -407,351 +407,3 @@ resume: | |||
407 | 407 | ||
408 | rts | 408 | rts |
409 | 409 | ||
410 | .data | ||
411 | ALIGN | ||
412 | sys_call_table: | ||
413 | .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */ | ||
414 | .long sys_exit | ||
415 | .long sys_fork | ||
416 | .long sys_read | ||
417 | .long sys_write | ||
418 | .long sys_open /* 5 */ | ||
419 | .long sys_close | ||
420 | .long sys_waitpid | ||
421 | .long sys_creat | ||
422 | .long sys_link | ||
423 | .long sys_unlink /* 10 */ | ||
424 | .long sys_execve | ||
425 | .long sys_chdir | ||
426 | .long sys_time | ||
427 | .long sys_mknod | ||
428 | .long sys_chmod /* 15 */ | ||
429 | .long sys_chown16 | ||
430 | .long sys_ni_syscall /* old break syscall holder */ | ||
431 | .long sys_stat | ||
432 | .long sys_lseek | ||
433 | .long sys_getpid /* 20 */ | ||
434 | .long sys_mount | ||
435 | .long sys_oldumount | ||
436 | .long sys_setuid16 | ||
437 | .long sys_getuid16 | ||
438 | .long sys_stime /* 25 */ | ||
439 | .long sys_ptrace | ||
440 | .long sys_alarm | ||
441 | .long sys_fstat | ||
442 | .long sys_pause | ||
443 | .long sys_utime /* 30 */ | ||
444 | .long sys_ni_syscall /* old stty syscall holder */ | ||
445 | .long sys_ni_syscall /* old gtty syscall holder */ | ||
446 | .long sys_access | ||
447 | .long sys_nice | ||
448 | .long sys_ni_syscall /* 35 */ /* old ftime syscall holder */ | ||
449 | .long sys_sync | ||
450 | .long sys_kill | ||
451 | .long sys_rename | ||
452 | .long sys_mkdir | ||
453 | .long sys_rmdir /* 40 */ | ||
454 | .long sys_dup | ||
455 | .long sys_pipe | ||
456 | .long sys_times | ||
457 | .long sys_ni_syscall /* old prof syscall holder */ | ||
458 | .long sys_brk /* 45 */ | ||
459 | .long sys_setgid16 | ||
460 | .long sys_getgid16 | ||
461 | .long sys_signal | ||
462 | .long sys_geteuid16 | ||
463 | .long sys_getegid16 /* 50 */ | ||
464 | .long sys_acct | ||
465 | .long sys_umount /* recycled never used phys() */ | ||
466 | .long sys_ni_syscall /* old lock syscall holder */ | ||
467 | .long sys_ioctl | ||
468 | .long sys_fcntl /* 55 */ | ||
469 | .long sys_ni_syscall /* old mpx syscall holder */ | ||
470 | .long sys_setpgid | ||
471 | .long sys_ni_syscall /* old ulimit syscall holder */ | ||
472 | .long sys_ni_syscall | ||
473 | .long sys_umask /* 60 */ | ||
474 | .long sys_chroot | ||
475 | .long sys_ustat | ||
476 | .long sys_dup2 | ||
477 | .long sys_getppid | ||
478 | .long sys_getpgrp /* 65 */ | ||
479 | .long sys_setsid | ||
480 | .long sys_sigaction | ||
481 | .long sys_sgetmask | ||
482 | .long sys_ssetmask | ||
483 | .long sys_setreuid16 /* 70 */ | ||
484 | .long sys_setregid16 | ||
485 | .long sys_sigsuspend | ||
486 | .long sys_sigpending | ||
487 | .long sys_sethostname | ||
488 | .long sys_setrlimit /* 75 */ | ||
489 | .long sys_old_getrlimit | ||
490 | .long sys_getrusage | ||
491 | .long sys_gettimeofday | ||
492 | .long sys_settimeofday | ||
493 | .long sys_getgroups16 /* 80 */ | ||
494 | .long sys_setgroups16 | ||
495 | .long sys_old_select | ||
496 | .long sys_symlink | ||
497 | .long sys_lstat | ||
498 | .long sys_readlink /* 85 */ | ||
499 | .long sys_uselib | ||
500 | .long sys_swapon | ||
501 | .long sys_reboot | ||
502 | .long sys_old_readdir | ||
503 | .long sys_old_mmap /* 90 */ | ||
504 | .long sys_munmap | ||
505 | .long sys_truncate | ||
506 | .long sys_ftruncate | ||
507 | .long sys_fchmod | ||
508 | .long sys_fchown16 /* 95 */ | ||
509 | .long sys_getpriority | ||
510 | .long sys_setpriority | ||
511 | .long sys_ni_syscall /* old profil syscall holder */ | ||
512 | .long sys_statfs | ||
513 | .long sys_fstatfs /* 100 */ | ||
514 | .long sys_ni_syscall /* ioperm for i386 */ | ||
515 | .long sys_socketcall | ||
516 | .long sys_syslog | ||
517 | .long sys_setitimer | ||
518 | .long sys_getitimer /* 105 */ | ||
519 | .long sys_newstat | ||
520 | .long sys_newlstat | ||
521 | .long sys_newfstat | ||
522 | .long sys_ni_syscall | ||
523 | .long sys_ni_syscall /* 110 */ /* iopl for i386 */ | ||
524 | .long sys_vhangup | ||
525 | .long sys_ni_syscall /* obsolete idle() syscall */ | ||
526 | .long sys_ni_syscall /* vm86old for i386 */ | ||
527 | .long sys_wait4 | ||
528 | .long sys_swapoff /* 115 */ | ||
529 | .long sys_sysinfo | ||
530 | .long sys_ipc | ||
531 | .long sys_fsync | ||
532 | .long sys_sigreturn | ||
533 | .long sys_clone /* 120 */ | ||
534 | .long sys_setdomainname | ||
535 | .long sys_newuname | ||
536 | .long sys_cacheflush /* modify_ldt for i386 */ | ||
537 | .long sys_adjtimex | ||
538 | .long sys_mprotect /* 125 */ | ||
539 | .long sys_sigprocmask | ||
540 | .long sys_ni_syscall /* old "create_module" */ | ||
541 | .long sys_init_module | ||
542 | .long sys_delete_module | ||
543 | .long sys_ni_syscall /* 130 - old "get_kernel_syms" */ | ||
544 | .long sys_quotactl | ||
545 | .long sys_getpgid | ||
546 | .long sys_fchdir | ||
547 | .long sys_bdflush | ||
548 | .long sys_sysfs /* 135 */ | ||
549 | .long sys_personality | ||
550 | .long sys_ni_syscall /* for afs_syscall */ | ||
551 | .long sys_setfsuid16 | ||
552 | .long sys_setfsgid16 | ||
553 | .long sys_llseek /* 140 */ | ||
554 | .long sys_getdents | ||
555 | .long sys_select | ||
556 | .long sys_flock | ||
557 | .long sys_msync | ||
558 | .long sys_readv /* 145 */ | ||
559 | .long sys_writev | ||
560 | .long sys_getsid | ||
561 | .long sys_fdatasync | ||
562 | .long sys_sysctl | ||
563 | .long sys_mlock /* 150 */ | ||
564 | .long sys_munlock | ||
565 | .long sys_mlockall | ||
566 | .long sys_munlockall | ||
567 | .long sys_sched_setparam | ||
568 | .long sys_sched_getparam /* 155 */ | ||
569 | .long sys_sched_setscheduler | ||
570 | .long sys_sched_getscheduler | ||
571 | .long sys_sched_yield | ||
572 | .long sys_sched_get_priority_max | ||
573 | .long sys_sched_get_priority_min /* 160 */ | ||
574 | .long sys_sched_rr_get_interval | ||
575 | .long sys_nanosleep | ||
576 | .long sys_mremap | ||
577 | .long sys_setresuid16 | ||
578 | .long sys_getresuid16 /* 165 */ | ||
579 | .long sys_getpagesize | ||
580 | .long sys_ni_syscall /* old sys_query_module */ | ||
581 | .long sys_poll | ||
582 | .long sys_nfsservctl | ||
583 | .long sys_setresgid16 /* 170 */ | ||
584 | .long sys_getresgid16 | ||
585 | .long sys_prctl | ||
586 | .long sys_rt_sigreturn | ||
587 | .long sys_rt_sigaction | ||
588 | .long sys_rt_sigprocmask /* 175 */ | ||
589 | .long sys_rt_sigpending | ||
590 | .long sys_rt_sigtimedwait | ||
591 | .long sys_rt_sigqueueinfo | ||
592 | .long sys_rt_sigsuspend | ||
593 | .long sys_pread64 /* 180 */ | ||
594 | .long sys_pwrite64 | ||
595 | .long sys_lchown16; | ||
596 | .long sys_getcwd | ||
597 | .long sys_capget | ||
598 | .long sys_capset /* 185 */ | ||
599 | .long sys_sigaltstack | ||
600 | .long sys_sendfile | ||
601 | .long sys_ni_syscall /* streams1 */ | ||
602 | .long sys_ni_syscall /* streams2 */ | ||
603 | .long sys_vfork /* 190 */ | ||
604 | .long sys_getrlimit | ||
605 | .long sys_mmap2 | ||
606 | .long sys_truncate64 | ||
607 | .long sys_ftruncate64 | ||
608 | .long sys_stat64 /* 195 */ | ||
609 | .long sys_lstat64 | ||
610 | .long sys_fstat64 | ||
611 | .long sys_chown | ||
612 | .long sys_getuid | ||
613 | .long sys_getgid /* 200 */ | ||
614 | .long sys_geteuid | ||
615 | .long sys_getegid | ||
616 | .long sys_setreuid | ||
617 | .long sys_setregid | ||
618 | .long sys_getgroups /* 205 */ | ||
619 | .long sys_setgroups | ||
620 | .long sys_fchown | ||
621 | .long sys_setresuid | ||
622 | .long sys_getresuid | ||
623 | .long sys_setresgid /* 210 */ | ||
624 | .long sys_getresgid | ||
625 | .long sys_lchown | ||
626 | .long sys_setuid | ||
627 | .long sys_setgid | ||
628 | .long sys_setfsuid /* 215 */ | ||
629 | .long sys_setfsgid | ||
630 | .long sys_pivot_root | ||
631 | .long sys_ni_syscall | ||
632 | .long sys_ni_syscall | ||
633 | .long sys_getdents64 /* 220 */ | ||
634 | .long sys_gettid | ||
635 | .long sys_tkill | ||
636 | .long sys_setxattr | ||
637 | .long sys_lsetxattr | ||
638 | .long sys_fsetxattr /* 225 */ | ||
639 | .long sys_getxattr | ||
640 | .long sys_lgetxattr | ||
641 | .long sys_fgetxattr | ||
642 | .long sys_listxattr | ||
643 | .long sys_llistxattr /* 230 */ | ||
644 | .long sys_flistxattr | ||
645 | .long sys_removexattr | ||
646 | .long sys_lremovexattr | ||
647 | .long sys_fremovexattr | ||
648 | .long sys_futex /* 235 */ | ||
649 | .long sys_sendfile64 | ||
650 | .long sys_mincore | ||
651 | .long sys_madvise | ||
652 | .long sys_fcntl64 | ||
653 | .long sys_readahead /* 240 */ | ||
654 | .long sys_io_setup | ||
655 | .long sys_io_destroy | ||
656 | .long sys_io_getevents | ||
657 | .long sys_io_submit | ||
658 | .long sys_io_cancel /* 245 */ | ||
659 | .long sys_fadvise64 | ||
660 | .long sys_exit_group | ||
661 | .long sys_lookup_dcookie | ||
662 | .long sys_epoll_create | ||
663 | .long sys_epoll_ctl /* 250 */ | ||
664 | .long sys_epoll_wait | ||
665 | .long sys_remap_file_pages | ||
666 | .long sys_set_tid_address | ||
667 | .long sys_timer_create | ||
668 | .long sys_timer_settime /* 255 */ | ||
669 | .long sys_timer_gettime | ||
670 | .long sys_timer_getoverrun | ||
671 | .long sys_timer_delete | ||
672 | .long sys_clock_settime | ||
673 | .long sys_clock_gettime /* 260 */ | ||
674 | .long sys_clock_getres | ||
675 | .long sys_clock_nanosleep | ||
676 | .long sys_statfs64 | ||
677 | .long sys_fstatfs64 | ||
678 | .long sys_tgkill /* 265 */ | ||
679 | .long sys_utimes | ||
680 | .long sys_fadvise64_64 | ||
681 | .long sys_mbind | ||
682 | .long sys_get_mempolicy | ||
683 | .long sys_set_mempolicy /* 270 */ | ||
684 | .long sys_mq_open | ||
685 | .long sys_mq_unlink | ||
686 | .long sys_mq_timedsend | ||
687 | .long sys_mq_timedreceive | ||
688 | .long sys_mq_notify /* 275 */ | ||
689 | .long sys_mq_getsetattr | ||
690 | .long sys_waitid | ||
691 | .long sys_ni_syscall /* for sys_vserver */ | ||
692 | .long sys_add_key | ||
693 | .long sys_request_key /* 280 */ | ||
694 | .long sys_keyctl | ||
695 | .long sys_ioprio_set | ||
696 | .long sys_ioprio_get | ||
697 | .long sys_inotify_init | ||
698 | .long sys_inotify_add_watch /* 285 */ | ||
699 | .long sys_inotify_rm_watch | ||
700 | .long sys_migrate_pages | ||
701 | .long sys_openat | ||
702 | .long sys_mkdirat | ||
703 | .long sys_mknodat /* 290 */ | ||
704 | .long sys_fchownat | ||
705 | .long sys_futimesat | ||
706 | .long sys_fstatat64 | ||
707 | .long sys_unlinkat | ||
708 | .long sys_renameat /* 295 */ | ||
709 | .long sys_linkat | ||
710 | .long sys_symlinkat | ||
711 | .long sys_readlinkat | ||
712 | .long sys_fchmodat | ||
713 | .long sys_faccessat /* 300 */ | ||
714 | .long sys_ni_syscall /* Reserved for pselect6 */ | ||
715 | .long sys_ni_syscall /* Reserved for ppoll */ | ||
716 | .long sys_unshare | ||
717 | .long sys_set_robust_list | ||
718 | .long sys_get_robust_list /* 305 */ | ||
719 | .long sys_splice | ||
720 | .long sys_sync_file_range | ||
721 | .long sys_tee | ||
722 | .long sys_vmsplice | ||
723 | .long sys_move_pages /* 310 */ | ||
724 | .long sys_sched_setaffinity | ||
725 | .long sys_sched_getaffinity | ||
726 | .long sys_kexec_load | ||
727 | .long sys_getcpu | ||
728 | .long sys_epoll_pwait /* 315 */ | ||
729 | .long sys_utimensat | ||
730 | .long sys_signalfd | ||
731 | .long sys_timerfd_create | ||
732 | .long sys_eventfd | ||
733 | .long sys_fallocate /* 320 */ | ||
734 | .long sys_timerfd_settime | ||
735 | .long sys_timerfd_gettime | ||
736 | .long sys_signalfd4 | ||
737 | .long sys_eventfd2 | ||
738 | .long sys_epoll_create1 /* 325 */ | ||
739 | .long sys_dup3 | ||
740 | .long sys_pipe2 | ||
741 | .long sys_inotify_init1 | ||
742 | .long sys_preadv | ||
743 | .long sys_pwritev /* 330 */ | ||
744 | .long sys_rt_tgsigqueueinfo | ||
745 | .long sys_perf_event_open | ||
746 | .long sys_get_thread_area | ||
747 | .long sys_set_thread_area | ||
748 | .long sys_atomic_cmpxchg_32 /* 335 */ | ||
749 | .long sys_atomic_barrier | ||
750 | .long sys_fanotify_init | ||
751 | .long sys_fanotify_mark | ||
752 | .long sys_prlimit64 | ||
753 | .long sys_name_to_handle_at /* 340 */ | ||
754 | .long sys_open_by_handle_at | ||
755 | .long sys_clock_adjtime | ||
756 | .long sys_syncfs | ||
757 | |||
diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S index 9b8393d8adb8..5909e392cb1e 100644 --- a/arch/m68k/kernel/syscalltable.S +++ b/arch/m68k/kernel/syscalltable.S | |||
@@ -1,6 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * linux/arch/m68knommu/kernel/syscalltable.S | ||
3 | * | ||
4 | * Copyright (C) 2002, Greg Ungerer (gerg@snapgear.com) | 2 | * Copyright (C) 2002, Greg Ungerer (gerg@snapgear.com) |
5 | * | 3 | * |
6 | * Based on older entry.S files, the following copyrights apply: | 4 | * Based on older entry.S files, the following copyrights apply: |
@@ -9,171 +7,176 @@ | |||
9 | * Kenneth Albanowski <kjahds@kjahds.com>, | 7 | * Kenneth Albanowski <kjahds@kjahds.com>, |
10 | * Copyright (C) 2000 Lineo Inc. (www.lineo.com) | 8 | * Copyright (C) 2000 Lineo Inc. (www.lineo.com) |
11 | * Copyright (C) 1991, 1992 Linus Torvalds | 9 | * Copyright (C) 1991, 1992 Linus Torvalds |
10 | * | ||
11 | * Linux/m68k support by Hamish Macdonald | ||
12 | */ | 12 | */ |
13 | 13 | ||
14 | #include <linux/sys.h> | 14 | #include <linux/sys.h> |
15 | #include <linux/linkage.h> | 15 | #include <linux/linkage.h> |
16 | #include <asm/unistd.h> | ||
17 | 16 | ||
18 | .text | 17 | #ifndef CONFIG_MMU |
18 | #define sys_mmap2 sys_mmap_pgoff | ||
19 | #endif | ||
20 | |||
21 | .section .rodata | ||
19 | ALIGN | 22 | ALIGN |
20 | ENTRY(sys_call_table) | 23 | ENTRY(sys_call_table) |
21 | .long sys_restart_syscall /* 0 - old "setup()" system call */ | 24 | .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */ |
22 | .long sys_exit | 25 | .long sys_exit |
23 | .long sys_fork | 26 | .long sys_fork |
24 | .long sys_read | 27 | .long sys_read |
25 | .long sys_write | 28 | .long sys_write |
26 | .long sys_open /* 5 */ | 29 | .long sys_open /* 5 */ |
27 | .long sys_close | 30 | .long sys_close |
28 | .long sys_waitpid | 31 | .long sys_waitpid |
29 | .long sys_creat | 32 | .long sys_creat |
30 | .long sys_link | 33 | .long sys_link |
31 | .long sys_unlink /* 10 */ | 34 | .long sys_unlink /* 10 */ |
32 | .long sys_execve | 35 | .long sys_execve |
33 | .long sys_chdir | 36 | .long sys_chdir |
34 | .long sys_time | 37 | .long sys_time |
35 | .long sys_mknod | 38 | .long sys_mknod |
36 | .long sys_chmod /* 15 */ | 39 | .long sys_chmod /* 15 */ |
37 | .long sys_chown16 | 40 | .long sys_chown16 |
38 | .long sys_ni_syscall /* old break syscall holder */ | 41 | .long sys_ni_syscall /* old break syscall holder */ |
39 | .long sys_stat | 42 | .long sys_stat |
40 | .long sys_lseek | 43 | .long sys_lseek |
41 | .long sys_getpid /* 20 */ | 44 | .long sys_getpid /* 20 */ |
42 | .long sys_mount | 45 | .long sys_mount |
43 | .long sys_oldumount | 46 | .long sys_oldumount |
44 | .long sys_setuid16 | 47 | .long sys_setuid16 |
45 | .long sys_getuid16 | 48 | .long sys_getuid16 |
46 | .long sys_stime /* 25 */ | 49 | .long sys_stime /* 25 */ |
47 | .long sys_ptrace | 50 | .long sys_ptrace |
48 | .long sys_alarm | 51 | .long sys_alarm |
49 | .long sys_fstat | 52 | .long sys_fstat |
50 | .long sys_pause | 53 | .long sys_pause |
51 | .long sys_utime /* 30 */ | 54 | .long sys_utime /* 30 */ |
52 | .long sys_ni_syscall /* old stty syscall holder */ | 55 | .long sys_ni_syscall /* old stty syscall holder */ |
53 | .long sys_ni_syscall /* old gtty syscall holder */ | 56 | .long sys_ni_syscall /* old gtty syscall holder */ |
54 | .long sys_access | 57 | .long sys_access |
55 | .long sys_nice | 58 | .long sys_nice |
56 | .long sys_ni_syscall /* 35 */ /* old ftime syscall holder */ | 59 | .long sys_ni_syscall /* 35 - old ftime syscall holder */ |
57 | .long sys_sync | 60 | .long sys_sync |
58 | .long sys_kill | 61 | .long sys_kill |
59 | .long sys_rename | 62 | .long sys_rename |
60 | .long sys_mkdir | 63 | .long sys_mkdir |
61 | .long sys_rmdir /* 40 */ | 64 | .long sys_rmdir /* 40 */ |
62 | .long sys_dup | 65 | .long sys_dup |
63 | .long sys_pipe | 66 | .long sys_pipe |
64 | .long sys_times | 67 | .long sys_times |
65 | .long sys_ni_syscall /* old prof syscall holder */ | 68 | .long sys_ni_syscall /* old prof syscall holder */ |
66 | .long sys_brk /* 45 */ | 69 | .long sys_brk /* 45 */ |
67 | .long sys_setgid16 | 70 | .long sys_setgid16 |
68 | .long sys_getgid16 | 71 | .long sys_getgid16 |
69 | .long sys_signal | 72 | .long sys_signal |
70 | .long sys_geteuid16 | 73 | .long sys_geteuid16 |
71 | .long sys_getegid16 /* 50 */ | 74 | .long sys_getegid16 /* 50 */ |
72 | .long sys_acct | 75 | .long sys_acct |
73 | .long sys_umount /* recycled never used phys() */ | 76 | .long sys_umount /* recycled never used phys() */ |
74 | .long sys_ni_syscall /* old lock syscall holder */ | 77 | .long sys_ni_syscall /* old lock syscall holder */ |
75 | .long sys_ioctl | 78 | .long sys_ioctl |
76 | .long sys_fcntl /* 55 */ | 79 | .long sys_fcntl /* 55 */ |
77 | .long sys_ni_syscall /* old mpx syscall holder */ | 80 | .long sys_ni_syscall /* old mpx syscall holder */ |
78 | .long sys_setpgid | 81 | .long sys_setpgid |
79 | .long sys_ni_syscall /* old ulimit syscall holder */ | 82 | .long sys_ni_syscall /* old ulimit syscall holder */ |
80 | .long sys_ni_syscall | 83 | .long sys_ni_syscall |
81 | .long sys_umask /* 60 */ | 84 | .long sys_umask /* 60 */ |
82 | .long sys_chroot | 85 | .long sys_chroot |
83 | .long sys_ustat | 86 | .long sys_ustat |
84 | .long sys_dup2 | 87 | .long sys_dup2 |
85 | .long sys_getppid | 88 | .long sys_getppid |
86 | .long sys_getpgrp /* 65 */ | 89 | .long sys_getpgrp /* 65 */ |
87 | .long sys_setsid | 90 | .long sys_setsid |
88 | .long sys_sigaction | 91 | .long sys_sigaction |
89 | .long sys_sgetmask | 92 | .long sys_sgetmask |
90 | .long sys_ssetmask | 93 | .long sys_ssetmask |
91 | .long sys_setreuid16 /* 70 */ | 94 | .long sys_setreuid16 /* 70 */ |
92 | .long sys_setregid16 | 95 | .long sys_setregid16 |
93 | .long sys_sigsuspend | 96 | .long sys_sigsuspend |
94 | .long sys_sigpending | 97 | .long sys_sigpending |
95 | .long sys_sethostname | 98 | .long sys_sethostname |
96 | .long sys_setrlimit /* 75 */ | 99 | .long sys_setrlimit /* 75 */ |
97 | .long sys_old_getrlimit | 100 | .long sys_old_getrlimit |
98 | .long sys_getrusage | 101 | .long sys_getrusage |
99 | .long sys_gettimeofday | 102 | .long sys_gettimeofday |
100 | .long sys_settimeofday | 103 | .long sys_settimeofday |
101 | .long sys_getgroups16 /* 80 */ | 104 | .long sys_getgroups16 /* 80 */ |
102 | .long sys_setgroups16 | 105 | .long sys_setgroups16 |
103 | .long sys_old_select | 106 | .long sys_old_select |
104 | .long sys_symlink | 107 | .long sys_symlink |
105 | .long sys_lstat | 108 | .long sys_lstat |
106 | .long sys_readlink /* 85 */ | 109 | .long sys_readlink /* 85 */ |
107 | .long sys_uselib | 110 | .long sys_uselib |
108 | .long sys_ni_syscall /* sys_swapon */ | 111 | .long sys_swapon |
109 | .long sys_reboot | 112 | .long sys_reboot |
110 | .long sys_old_readdir | 113 | .long sys_old_readdir |
111 | .long sys_old_mmap /* 90 */ | 114 | .long sys_old_mmap /* 90 */ |
112 | .long sys_munmap | 115 | .long sys_munmap |
113 | .long sys_truncate | 116 | .long sys_truncate |
114 | .long sys_ftruncate | 117 | .long sys_ftruncate |
115 | .long sys_fchmod | 118 | .long sys_fchmod |
116 | .long sys_fchown16 /* 95 */ | 119 | .long sys_fchown16 /* 95 */ |
117 | .long sys_getpriority | 120 | .long sys_getpriority |
118 | .long sys_setpriority | 121 | .long sys_setpriority |
119 | .long sys_ni_syscall /* old profil syscall holder */ | 122 | .long sys_ni_syscall /* old profil syscall holder */ |
120 | .long sys_statfs | 123 | .long sys_statfs |
121 | .long sys_fstatfs /* 100 */ | 124 | .long sys_fstatfs /* 100 */ |
122 | .long sys_ni_syscall /* ioperm for i386 */ | 125 | .long sys_ni_syscall /* ioperm for i386 */ |
123 | .long sys_socketcall | 126 | .long sys_socketcall |
124 | .long sys_syslog | 127 | .long sys_syslog |
125 | .long sys_setitimer | 128 | .long sys_setitimer |
126 | .long sys_getitimer /* 105 */ | 129 | .long sys_getitimer /* 105 */ |
127 | .long sys_newstat | 130 | .long sys_newstat |
128 | .long sys_newlstat | 131 | .long sys_newlstat |
129 | .long sys_newfstat | 132 | .long sys_newfstat |
130 | .long sys_ni_syscall | 133 | .long sys_ni_syscall |
131 | .long sys_ni_syscall /* iopl for i386 */ /* 110 */ | 134 | .long sys_ni_syscall /* 110 - iopl for i386 */ |
132 | .long sys_vhangup | 135 | .long sys_vhangup |
133 | .long sys_ni_syscall /* obsolete idle() syscall */ | 136 | .long sys_ni_syscall /* obsolete idle() syscall */ |
134 | .long sys_ni_syscall /* vm86old for i386 */ | 137 | .long sys_ni_syscall /* vm86old for i386 */ |
135 | .long sys_wait4 | 138 | .long sys_wait4 |
136 | .long sys_ni_syscall /* 115 */ /* sys_swapoff */ | 139 | .long sys_swapoff /* 115 */ |
137 | .long sys_sysinfo | 140 | .long sys_sysinfo |
138 | .long sys_ipc | 141 | .long sys_ipc |
139 | .long sys_fsync | 142 | .long sys_fsync |
140 | .long sys_sigreturn | 143 | .long sys_sigreturn |
141 | .long sys_clone /* 120 */ | 144 | .long sys_clone /* 120 */ |
142 | .long sys_setdomainname | 145 | .long sys_setdomainname |
143 | .long sys_newuname | 146 | .long sys_newuname |
144 | .long sys_cacheflush /* modify_ldt for i386 */ | 147 | .long sys_cacheflush /* modify_ldt for i386 */ |
145 | .long sys_adjtimex | 148 | .long sys_adjtimex |
146 | .long sys_ni_syscall /* 125 */ /* sys_mprotect */ | 149 | .long sys_mprotect /* 125 */ |
147 | .long sys_sigprocmask | 150 | .long sys_sigprocmask |
148 | .long sys_ni_syscall /* old "creat_module" */ | 151 | .long sys_ni_syscall /* old "create_module" */ |
149 | .long sys_init_module | 152 | .long sys_init_module |
150 | .long sys_delete_module | 153 | .long sys_delete_module |
151 | .long sys_ni_syscall /* 130: old "get_kernel_syms" */ | 154 | .long sys_ni_syscall /* 130 - old "get_kernel_syms" */ |
152 | .long sys_quotactl | 155 | .long sys_quotactl |
153 | .long sys_getpgid | 156 | .long sys_getpgid |
154 | .long sys_fchdir | 157 | .long sys_fchdir |
155 | .long sys_bdflush | 158 | .long sys_bdflush |
156 | .long sys_sysfs /* 135 */ | 159 | .long sys_sysfs /* 135 */ |
157 | .long sys_personality | 160 | .long sys_personality |
158 | .long sys_ni_syscall /* for afs_syscall */ | 161 | .long sys_ni_syscall /* for afs_syscall */ |
159 | .long sys_setfsuid16 | 162 | .long sys_setfsuid16 |
160 | .long sys_setfsgid16 | 163 | .long sys_setfsgid16 |
161 | .long sys_llseek /* 140 */ | 164 | .long sys_llseek /* 140 */ |
162 | .long sys_getdents | 165 | .long sys_getdents |
163 | .long sys_select | 166 | .long sys_select |
164 | .long sys_flock | 167 | .long sys_flock |
165 | .long sys_ni_syscall /* sys_msync */ | 168 | .long sys_msync |
166 | .long sys_readv /* 145 */ | 169 | .long sys_readv /* 145 */ |
167 | .long sys_writev | 170 | .long sys_writev |
168 | .long sys_getsid | 171 | .long sys_getsid |
169 | .long sys_fdatasync | 172 | .long sys_fdatasync |
170 | .long sys_sysctl | 173 | .long sys_sysctl |
171 | .long sys_ni_syscall /* 150 */ /* sys_mlock */ | 174 | .long sys_mlock /* 150 */ |
172 | .long sys_ni_syscall /* sys_munlock */ | 175 | .long sys_munlock |
173 | .long sys_ni_syscall /* sys_mlockall */ | 176 | .long sys_mlockall |
174 | .long sys_ni_syscall /* sys_munlockall */ | 177 | .long sys_munlockall |
175 | .long sys_sched_setparam | 178 | .long sys_sched_setparam |
176 | .long sys_sched_getparam /* 155 */ | 179 | .long sys_sched_getparam /* 155 */ |
177 | .long sys_sched_setscheduler | 180 | .long sys_sched_setscheduler |
178 | .long sys_sched_getscheduler | 181 | .long sys_sched_getscheduler |
179 | .long sys_sched_yield | 182 | .long sys_sched_yield |
@@ -181,124 +184,124 @@ ENTRY(sys_call_table) | |||
181 | .long sys_sched_get_priority_min /* 160 */ | 184 | .long sys_sched_get_priority_min /* 160 */ |
182 | .long sys_sched_rr_get_interval | 185 | .long sys_sched_rr_get_interval |
183 | .long sys_nanosleep | 186 | .long sys_nanosleep |
184 | .long sys_ni_syscall /* sys_mremap */ | 187 | .long sys_mremap |
185 | .long sys_setresuid16 | 188 | .long sys_setresuid16 |
186 | .long sys_getresuid16 /* 165 */ | 189 | .long sys_getresuid16 /* 165 */ |
187 | .long sys_getpagesize /* sys_getpagesize */ | 190 | .long sys_getpagesize |
188 | .long sys_ni_syscall /* old "query_module" */ | 191 | .long sys_ni_syscall /* old "query_module" */ |
189 | .long sys_poll | 192 | .long sys_poll |
190 | .long sys_ni_syscall /* sys_nfsservctl */ | 193 | .long sys_nfsservctl |
191 | .long sys_setresgid16 /* 170 */ | 194 | .long sys_setresgid16 /* 170 */ |
192 | .long sys_getresgid16 | 195 | .long sys_getresgid16 |
193 | .long sys_prctl | 196 | .long sys_prctl |
194 | .long sys_rt_sigreturn | 197 | .long sys_rt_sigreturn |
195 | .long sys_rt_sigaction | 198 | .long sys_rt_sigaction |
196 | .long sys_rt_sigprocmask /* 175 */ | 199 | .long sys_rt_sigprocmask /* 175 */ |
197 | .long sys_rt_sigpending | 200 | .long sys_rt_sigpending |
198 | .long sys_rt_sigtimedwait | 201 | .long sys_rt_sigtimedwait |
199 | .long sys_rt_sigqueueinfo | 202 | .long sys_rt_sigqueueinfo |
200 | .long sys_rt_sigsuspend | 203 | .long sys_rt_sigsuspend |
201 | .long sys_pread64 /* 180 */ | 204 | .long sys_pread64 /* 180 */ |
202 | .long sys_pwrite64 | 205 | .long sys_pwrite64 |
203 | .long sys_lchown16 | 206 | .long sys_lchown16 |
204 | .long sys_getcwd | 207 | .long sys_getcwd |
205 | .long sys_capget | 208 | .long sys_capget |
206 | .long sys_capset /* 185 */ | 209 | .long sys_capset /* 185 */ |
207 | .long sys_sigaltstack | 210 | .long sys_sigaltstack |
208 | .long sys_sendfile | 211 | .long sys_sendfile |
209 | .long sys_ni_syscall /* streams1 */ | 212 | .long sys_ni_syscall /* streams1 */ |
210 | .long sys_ni_syscall /* streams2 */ | 213 | .long sys_ni_syscall /* streams2 */ |
211 | .long sys_vfork /* 190 */ | 214 | .long sys_vfork /* 190 */ |
212 | .long sys_getrlimit | 215 | .long sys_getrlimit |
213 | .long sys_mmap_pgoff | 216 | .long sys_mmap2 |
214 | .long sys_truncate64 | 217 | .long sys_truncate64 |
215 | .long sys_ftruncate64 | 218 | .long sys_ftruncate64 |
216 | .long sys_stat64 /* 195 */ | 219 | .long sys_stat64 /* 195 */ |
217 | .long sys_lstat64 | 220 | .long sys_lstat64 |
218 | .long sys_fstat64 | 221 | .long sys_fstat64 |
219 | .long sys_chown | 222 | .long sys_chown |
220 | .long sys_getuid | 223 | .long sys_getuid |
221 | .long sys_getgid /* 200 */ | 224 | .long sys_getgid /* 200 */ |
222 | .long sys_geteuid | 225 | .long sys_geteuid |
223 | .long sys_getegid | 226 | .long sys_getegid |
224 | .long sys_setreuid | 227 | .long sys_setreuid |
225 | .long sys_setregid | 228 | .long sys_setregid |
226 | .long sys_getgroups /* 205 */ | 229 | .long sys_getgroups /* 205 */ |
227 | .long sys_setgroups | 230 | .long sys_setgroups |
228 | .long sys_fchown | 231 | .long sys_fchown |
229 | .long sys_setresuid | 232 | .long sys_setresuid |
230 | .long sys_getresuid | 233 | .long sys_getresuid |
231 | .long sys_setresgid /* 210 */ | 234 | .long sys_setresgid /* 210 */ |
232 | .long sys_getresgid | 235 | .long sys_getresgid |
233 | .long sys_lchown | 236 | .long sys_lchown |
234 | .long sys_setuid | 237 | .long sys_setuid |
235 | .long sys_setgid | 238 | .long sys_setgid |
236 | .long sys_setfsuid /* 215 */ | 239 | .long sys_setfsuid /* 215 */ |
237 | .long sys_setfsgid | 240 | .long sys_setfsgid |
238 | .long sys_pivot_root | 241 | .long sys_pivot_root |
239 | .long sys_ni_syscall | 242 | .long sys_ni_syscall |
240 | .long sys_ni_syscall | 243 | .long sys_ni_syscall |
241 | .long sys_getdents64 /* 220 */ | 244 | .long sys_getdents64 /* 220 */ |
242 | .long sys_gettid | 245 | .long sys_gettid |
243 | .long sys_tkill | 246 | .long sys_tkill |
244 | .long sys_setxattr | 247 | .long sys_setxattr |
245 | .long sys_lsetxattr | 248 | .long sys_lsetxattr |
246 | .long sys_fsetxattr /* 225 */ | 249 | .long sys_fsetxattr /* 225 */ |
247 | .long sys_getxattr | 250 | .long sys_getxattr |
248 | .long sys_lgetxattr | 251 | .long sys_lgetxattr |
249 | .long sys_fgetxattr | 252 | .long sys_fgetxattr |
250 | .long sys_listxattr | 253 | .long sys_listxattr |
251 | .long sys_llistxattr /* 230 */ | 254 | .long sys_llistxattr /* 230 */ |
252 | .long sys_flistxattr | 255 | .long sys_flistxattr |
253 | .long sys_removexattr | 256 | .long sys_removexattr |
254 | .long sys_lremovexattr | 257 | .long sys_lremovexattr |
255 | .long sys_fremovexattr | 258 | .long sys_fremovexattr |
256 | .long sys_futex /* 235 */ | 259 | .long sys_futex /* 235 */ |
257 | .long sys_sendfile64 | 260 | .long sys_sendfile64 |
258 | .long sys_ni_syscall /* sys_mincore */ | 261 | .long sys_mincore |
259 | .long sys_ni_syscall /* sys_madvise */ | 262 | .long sys_madvise |
260 | .long sys_fcntl64 | 263 | .long sys_fcntl64 |
261 | .long sys_readahead /* 240 */ | 264 | .long sys_readahead /* 240 */ |
262 | .long sys_io_setup | 265 | .long sys_io_setup |
263 | .long sys_io_destroy | 266 | .long sys_io_destroy |
264 | .long sys_io_getevents | 267 | .long sys_io_getevents |
265 | .long sys_io_submit | 268 | .long sys_io_submit |
266 | .long sys_io_cancel /* 245 */ | 269 | .long sys_io_cancel /* 245 */ |
267 | .long sys_fadvise64 | 270 | .long sys_fadvise64 |
268 | .long sys_exit_group | 271 | .long sys_exit_group |
269 | .long sys_lookup_dcookie | 272 | .long sys_lookup_dcookie |
270 | .long sys_epoll_create | 273 | .long sys_epoll_create |
271 | .long sys_epoll_ctl /* 250 */ | 274 | .long sys_epoll_ctl /* 250 */ |
272 | .long sys_epoll_wait | 275 | .long sys_epoll_wait |
273 | .long sys_ni_syscall /* sys_remap_file_pages */ | 276 | .long sys_remap_file_pages |
274 | .long sys_set_tid_address | 277 | .long sys_set_tid_address |
275 | .long sys_timer_create | 278 | .long sys_timer_create |
276 | .long sys_timer_settime /* 255 */ | 279 | .long sys_timer_settime /* 255 */ |
277 | .long sys_timer_gettime | 280 | .long sys_timer_gettime |
278 | .long sys_timer_getoverrun | 281 | .long sys_timer_getoverrun |
279 | .long sys_timer_delete | 282 | .long sys_timer_delete |
280 | .long sys_clock_settime | 283 | .long sys_clock_settime |
281 | .long sys_clock_gettime /* 260 */ | 284 | .long sys_clock_gettime /* 260 */ |
282 | .long sys_clock_getres | 285 | .long sys_clock_getres |
283 | .long sys_clock_nanosleep | 286 | .long sys_clock_nanosleep |
284 | .long sys_statfs64 | 287 | .long sys_statfs64 |
285 | .long sys_fstatfs64 | 288 | .long sys_fstatfs64 |
286 | .long sys_tgkill /* 265 */ | 289 | .long sys_tgkill /* 265 */ |
287 | .long sys_utimes | 290 | .long sys_utimes |
288 | .long sys_fadvise64_64 | 291 | .long sys_fadvise64_64 |
289 | .long sys_mbind | 292 | .long sys_mbind |
290 | .long sys_get_mempolicy | 293 | .long sys_get_mempolicy |
291 | .long sys_set_mempolicy /* 270 */ | 294 | .long sys_set_mempolicy /* 270 */ |
292 | .long sys_mq_open | 295 | .long sys_mq_open |
293 | .long sys_mq_unlink | 296 | .long sys_mq_unlink |
294 | .long sys_mq_timedsend | 297 | .long sys_mq_timedsend |
295 | .long sys_mq_timedreceive | 298 | .long sys_mq_timedreceive |
296 | .long sys_mq_notify /* 275 */ | 299 | .long sys_mq_notify /* 275 */ |
297 | .long sys_mq_getsetattr | 300 | .long sys_mq_getsetattr |
298 | .long sys_waitid | 301 | .long sys_waitid |
299 | .long sys_ni_syscall /* for sys_vserver */ | 302 | .long sys_ni_syscall /* for sys_vserver */ |
300 | .long sys_add_key | 303 | .long sys_add_key |
301 | .long sys_request_key /* 280 */ | 304 | .long sys_request_key /* 280 */ |
302 | .long sys_keyctl | 305 | .long sys_keyctl |
303 | .long sys_ioprio_set | 306 | .long sys_ioprio_set |
304 | .long sys_ioprio_get | 307 | .long sys_ioprio_get |
@@ -319,8 +322,8 @@ ENTRY(sys_call_table) | |||
319 | .long sys_readlinkat | 322 | .long sys_readlinkat |
320 | .long sys_fchmodat | 323 | .long sys_fchmodat |
321 | .long sys_faccessat /* 300 */ | 324 | .long sys_faccessat /* 300 */ |
322 | .long sys_ni_syscall /* Reserved for pselect6 */ | 325 | .long sys_pselect6 |
323 | .long sys_ni_syscall /* Reserved for ppoll */ | 326 | .long sys_ppoll |
324 | .long sys_unshare | 327 | .long sys_unshare |
325 | .long sys_set_robust_list | 328 | .long sys_set_robust_list |
326 | .long sys_get_robust_list /* 305 */ | 329 | .long sys_get_robust_list /* 305 */ |
@@ -363,7 +366,3 @@ ENTRY(sys_call_table) | |||
363 | .long sys_clock_adjtime | 366 | .long sys_clock_adjtime |
364 | .long sys_syncfs | 367 | .long sys_syncfs |
365 | 368 | ||
366 | .rept NR_syscalls-(.-sys_call_table)/4 | ||
367 | .long sys_ni_syscall | ||
368 | .endr | ||
369 | |||
diff --git a/arch/mips/Kbuild.platforms b/arch/mips/Kbuild.platforms index 7ff9b5492041..aef6c917b45a 100644 --- a/arch/mips/Kbuild.platforms +++ b/arch/mips/Kbuild.platforms | |||
@@ -11,6 +11,7 @@ platforms += dec | |||
11 | platforms += emma | 11 | platforms += emma |
12 | platforms += jazz | 12 | platforms += jazz |
13 | platforms += jz4740 | 13 | platforms += jz4740 |
14 | platforms += lantiq | ||
14 | platforms += lasat | 15 | platforms += lasat |
15 | platforms += loongson | 16 | platforms += loongson |
16 | platforms += mipssim | 17 | platforms += mipssim |
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 8e256cc5dcd9..2d1cf9740953 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig | |||
@@ -212,6 +212,24 @@ config MACH_JZ4740 | |||
212 | select HAVE_PWM | 212 | select HAVE_PWM |
213 | select HAVE_CLK | 213 | select HAVE_CLK |
214 | 214 | ||
215 | config LANTIQ | ||
216 | bool "Lantiq based platforms" | ||
217 | select DMA_NONCOHERENT | ||
218 | select IRQ_CPU | ||
219 | select CEVT_R4K | ||
220 | select CSRC_R4K | ||
221 | select SYS_HAS_CPU_MIPS32_R1 | ||
222 | select SYS_HAS_CPU_MIPS32_R2 | ||
223 | select SYS_SUPPORTS_BIG_ENDIAN | ||
224 | select SYS_SUPPORTS_32BIT_KERNEL | ||
225 | select SYS_SUPPORTS_MULTITHREADING | ||
226 | select SYS_HAS_EARLY_PRINTK | ||
227 | select ARCH_REQUIRE_GPIOLIB | ||
228 | select SWAP_IO_SPACE | ||
229 | select BOOT_RAW | ||
230 | select HAVE_CLK | ||
231 | select MIPS_MACHINE | ||
232 | |||
215 | config LASAT | 233 | config LASAT |
216 | bool "LASAT Networks platforms" | 234 | bool "LASAT Networks platforms" |
217 | select CEVT_R4K | 235 | select CEVT_R4K |
@@ -736,6 +754,33 @@ config CAVIUM_OCTEON_REFERENCE_BOARD | |||
736 | Hikari | 754 | Hikari |
737 | Say Y here for most Octeon reference boards. | 755 | Say Y here for most Octeon reference boards. |
738 | 756 | ||
757 | config NLM_XLR_BOARD | ||
758 | bool "Netlogic XLR/XLS based systems" | ||
759 | depends on EXPERIMENTAL | ||
760 | select BOOT_ELF32 | ||
761 | select NLM_COMMON | ||
762 | select NLM_XLR | ||
763 | select SYS_HAS_CPU_XLR | ||
764 | select SYS_SUPPORTS_SMP | ||
765 | select HW_HAS_PCI | ||
766 | select SWAP_IO_SPACE | ||
767 | select SYS_SUPPORTS_32BIT_KERNEL | ||
768 | select SYS_SUPPORTS_64BIT_KERNEL | ||
769 | select 64BIT_PHYS_ADDR | ||
770 | select SYS_SUPPORTS_BIG_ENDIAN | ||
771 | select SYS_SUPPORTS_HIGHMEM | ||
772 | select DMA_COHERENT | ||
773 | select NR_CPUS_DEFAULT_32 | ||
774 | select CEVT_R4K | ||
775 | select CSRC_R4K | ||
776 | select IRQ_CPU | ||
777 | select ZONE_DMA if 64BIT | ||
778 | select SYNC_R4K | ||
779 | select SYS_HAS_EARLY_PRINTK | ||
780 | help | ||
781 | Support for systems based on Netlogic XLR and XLS processors. | ||
782 | Say Y here if you have a XLR or XLS based board. | ||
783 | |||
739 | endchoice | 784 | endchoice |
740 | 785 | ||
741 | source "arch/mips/alchemy/Kconfig" | 786 | source "arch/mips/alchemy/Kconfig" |
@@ -743,6 +788,7 @@ source "arch/mips/ath79/Kconfig" | |||
743 | source "arch/mips/bcm63xx/Kconfig" | 788 | source "arch/mips/bcm63xx/Kconfig" |
744 | source "arch/mips/jazz/Kconfig" | 789 | source "arch/mips/jazz/Kconfig" |
745 | source "arch/mips/jz4740/Kconfig" | 790 | source "arch/mips/jz4740/Kconfig" |
791 | source "arch/mips/lantiq/Kconfig" | ||
746 | source "arch/mips/lasat/Kconfig" | 792 | source "arch/mips/lasat/Kconfig" |
747 | source "arch/mips/pmc-sierra/Kconfig" | 793 | source "arch/mips/pmc-sierra/Kconfig" |
748 | source "arch/mips/powertv/Kconfig" | 794 | source "arch/mips/powertv/Kconfig" |
@@ -752,6 +798,7 @@ source "arch/mips/txx9/Kconfig" | |||
752 | source "arch/mips/vr41xx/Kconfig" | 798 | source "arch/mips/vr41xx/Kconfig" |
753 | source "arch/mips/cavium-octeon/Kconfig" | 799 | source "arch/mips/cavium-octeon/Kconfig" |
754 | source "arch/mips/loongson/Kconfig" | 800 | source "arch/mips/loongson/Kconfig" |
801 | source "arch/mips/netlogic/Kconfig" | ||
755 | 802 | ||
756 | endmenu | 803 | endmenu |
757 | 804 | ||
@@ -997,9 +1044,6 @@ config IRQ_GT641XX | |||
997 | config IRQ_GIC | 1044 | config IRQ_GIC |
998 | bool | 1045 | bool |
999 | 1046 | ||
1000 | config IRQ_CPU_OCTEON | ||
1001 | bool | ||
1002 | |||
1003 | config MIPS_BOARDS_GEN | 1047 | config MIPS_BOARDS_GEN |
1004 | bool | 1048 | bool |
1005 | 1049 | ||
@@ -1359,8 +1403,6 @@ config CPU_SB1 | |||
1359 | config CPU_CAVIUM_OCTEON | 1403 | config CPU_CAVIUM_OCTEON |
1360 | bool "Cavium Octeon processor" | 1404 | bool "Cavium Octeon processor" |
1361 | depends on SYS_HAS_CPU_CAVIUM_OCTEON | 1405 | depends on SYS_HAS_CPU_CAVIUM_OCTEON |
1362 | select IRQ_CPU | ||
1363 | select IRQ_CPU_OCTEON | ||
1364 | select CPU_HAS_PREFETCH | 1406 | select CPU_HAS_PREFETCH |
1365 | select CPU_SUPPORTS_64BIT_KERNEL | 1407 | select CPU_SUPPORTS_64BIT_KERNEL |
1366 | select SYS_SUPPORTS_SMP | 1408 | select SYS_SUPPORTS_SMP |
@@ -1425,6 +1467,17 @@ config CPU_BMIPS5000 | |||
1425 | help | 1467 | help |
1426 | Broadcom BMIPS5000 processors. | 1468 | Broadcom BMIPS5000 processors. |
1427 | 1469 | ||
1470 | config CPU_XLR | ||
1471 | bool "Netlogic XLR SoC" | ||
1472 | depends on SYS_HAS_CPU_XLR | ||
1473 | select CPU_SUPPORTS_32BIT_KERNEL | ||
1474 | select CPU_SUPPORTS_64BIT_KERNEL | ||
1475 | select CPU_SUPPORTS_HIGHMEM | ||
1476 | select WEAK_ORDERING | ||
1477 | select WEAK_REORDERING_BEYOND_LLSC | ||
1478 | select CPU_SUPPORTS_HUGEPAGES | ||
1479 | help | ||
1480 | Netlogic Microsystems XLR/XLS processors. | ||
1428 | endchoice | 1481 | endchoice |
1429 | 1482 | ||
1430 | if CPU_LOONGSON2F | 1483 | if CPU_LOONGSON2F |
@@ -1555,6 +1608,9 @@ config SYS_HAS_CPU_BMIPS4380 | |||
1555 | config SYS_HAS_CPU_BMIPS5000 | 1608 | config SYS_HAS_CPU_BMIPS5000 |
1556 | bool | 1609 | bool |
1557 | 1610 | ||
1611 | config SYS_HAS_CPU_XLR | ||
1612 | bool | ||
1613 | |||
1558 | # | 1614 | # |
1559 | # CPU may reorder R->R, R->W, W->R, W->W | 1615 | # CPU may reorder R->R, R->W, W->R, W->W |
1560 | # Reordering beyond LL and SC is handled in WEAK_REORDERING_BEYOND_LLSC | 1616 | # Reordering beyond LL and SC is handled in WEAK_REORDERING_BEYOND_LLSC |
diff --git a/arch/mips/Makefile b/arch/mips/Makefile index 53e3514ba10e..884819cd0607 100644 --- a/arch/mips/Makefile +++ b/arch/mips/Makefile | |||
@@ -191,6 +191,18 @@ endif | |||
191 | # | 191 | # |
192 | include $(srctree)/arch/mips/Kbuild.platforms | 192 | include $(srctree)/arch/mips/Kbuild.platforms |
193 | 193 | ||
194 | # | ||
195 | # NETLOGIC SOC Common (common) | ||
196 | # | ||
197 | cflags-$(CONFIG_NLM_COMMON) += -I$(srctree)/arch/mips/include/asm/mach-netlogic | ||
198 | cflags-$(CONFIG_NLM_COMMON) += -I$(srctree)/arch/mips/include/asm/netlogic | ||
199 | |||
200 | # | ||
201 | # NETLOGIC XLR/XLS SoC, Simulator and boards | ||
202 | # | ||
203 | core-$(CONFIG_NLM_XLR) += arch/mips/netlogic/xlr/ | ||
204 | load-$(CONFIG_NLM_XLR_BOARD) += 0xffffffff84000000 | ||
205 | |||
194 | cflags-y += -I$(srctree)/arch/mips/include/asm/mach-generic | 206 | cflags-y += -I$(srctree)/arch/mips/include/asm/mach-generic |
195 | drivers-$(CONFIG_PCI) += arch/mips/pci/ | 207 | drivers-$(CONFIG_PCI) += arch/mips/pci/ |
196 | 208 | ||
diff --git a/arch/mips/alchemy/common/dbdma.c b/arch/mips/alchemy/common/dbdma.c index ca0506a8585a..3a5abb54d505 100644 --- a/arch/mips/alchemy/common/dbdma.c +++ b/arch/mips/alchemy/common/dbdma.c | |||
@@ -36,7 +36,7 @@ | |||
36 | #include <linux/spinlock.h> | 36 | #include <linux/spinlock.h> |
37 | #include <linux/interrupt.h> | 37 | #include <linux/interrupt.h> |
38 | #include <linux/module.h> | 38 | #include <linux/module.h> |
39 | #include <linux/sysdev.h> | 39 | #include <linux/syscore_ops.h> |
40 | #include <asm/mach-au1x00/au1000.h> | 40 | #include <asm/mach-au1x00/au1000.h> |
41 | #include <asm/mach-au1x00/au1xxx_dbdma.h> | 41 | #include <asm/mach-au1x00/au1xxx_dbdma.h> |
42 | 42 | ||
@@ -58,7 +58,8 @@ static DEFINE_SPINLOCK(au1xxx_dbdma_spin_lock); | |||
58 | /* I couldn't find a macro that did this... */ | 58 | /* I couldn't find a macro that did this... */ |
59 | #define ALIGN_ADDR(x, a) ((((u32)(x)) + (a-1)) & ~(a-1)) | 59 | #define ALIGN_ADDR(x, a) ((((u32)(x)) + (a-1)) & ~(a-1)) |
60 | 60 | ||
61 | static dbdma_global_t *dbdma_gptr = (dbdma_global_t *)DDMA_GLOBAL_BASE; | 61 | static dbdma_global_t *dbdma_gptr = |
62 | (dbdma_global_t *)KSEG1ADDR(AU1550_DBDMA_CONF_PHYS_ADDR); | ||
62 | static int dbdma_initialized; | 63 | static int dbdma_initialized; |
63 | 64 | ||
64 | static dbdev_tab_t dbdev_tab[] = { | 65 | static dbdev_tab_t dbdev_tab[] = { |
@@ -299,7 +300,7 @@ u32 au1xxx_dbdma_chan_alloc(u32 srcid, u32 destid, | |||
299 | if (ctp != NULL) { | 300 | if (ctp != NULL) { |
300 | memset(ctp, 0, sizeof(chan_tab_t)); | 301 | memset(ctp, 0, sizeof(chan_tab_t)); |
301 | ctp->chan_index = chan = i; | 302 | ctp->chan_index = chan = i; |
302 | dcp = DDMA_CHANNEL_BASE; | 303 | dcp = KSEG1ADDR(AU1550_DBDMA_PHYS_ADDR); |
303 | dcp += (0x0100 * chan); | 304 | dcp += (0x0100 * chan); |
304 | ctp->chan_ptr = (au1x_dma_chan_t *)dcp; | 305 | ctp->chan_ptr = (au1x_dma_chan_t *)dcp; |
305 | cp = (au1x_dma_chan_t *)dcp; | 306 | cp = (au1x_dma_chan_t *)dcp; |
@@ -958,105 +959,75 @@ u32 au1xxx_dbdma_put_dscr(u32 chanid, au1x_ddma_desc_t *dscr) | |||
958 | } | 959 | } |
959 | 960 | ||
960 | 961 | ||
961 | struct alchemy_dbdma_sysdev { | 962 | static unsigned long alchemy_dbdma_pm_data[NUM_DBDMA_CHANS + 1][6]; |
962 | struct sys_device sysdev; | ||
963 | u32 pm_regs[NUM_DBDMA_CHANS + 1][6]; | ||
964 | }; | ||
965 | 963 | ||
966 | static int alchemy_dbdma_suspend(struct sys_device *dev, | 964 | static int alchemy_dbdma_suspend(void) |
967 | pm_message_t state) | ||
968 | { | 965 | { |
969 | struct alchemy_dbdma_sysdev *sdev = | ||
970 | container_of(dev, struct alchemy_dbdma_sysdev, sysdev); | ||
971 | int i; | 966 | int i; |
972 | u32 addr; | 967 | void __iomem *addr; |
973 | 968 | ||
974 | addr = DDMA_GLOBAL_BASE; | 969 | addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_CONF_PHYS_ADDR); |
975 | sdev->pm_regs[0][0] = au_readl(addr + 0x00); | 970 | alchemy_dbdma_pm_data[0][0] = __raw_readl(addr + 0x00); |
976 | sdev->pm_regs[0][1] = au_readl(addr + 0x04); | 971 | alchemy_dbdma_pm_data[0][1] = __raw_readl(addr + 0x04); |
977 | sdev->pm_regs[0][2] = au_readl(addr + 0x08); | 972 | alchemy_dbdma_pm_data[0][2] = __raw_readl(addr + 0x08); |
978 | sdev->pm_regs[0][3] = au_readl(addr + 0x0c); | 973 | alchemy_dbdma_pm_data[0][3] = __raw_readl(addr + 0x0c); |
979 | 974 | ||
980 | /* save channel configurations */ | 975 | /* save channel configurations */ |
981 | for (i = 1, addr = DDMA_CHANNEL_BASE; i <= NUM_DBDMA_CHANS; i++) { | 976 | addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_PHYS_ADDR); |
982 | sdev->pm_regs[i][0] = au_readl(addr + 0x00); | 977 | for (i = 1; i <= NUM_DBDMA_CHANS; i++) { |
983 | sdev->pm_regs[i][1] = au_readl(addr + 0x04); | 978 | alchemy_dbdma_pm_data[i][0] = __raw_readl(addr + 0x00); |
984 | sdev->pm_regs[i][2] = au_readl(addr + 0x08); | 979 | alchemy_dbdma_pm_data[i][1] = __raw_readl(addr + 0x04); |
985 | sdev->pm_regs[i][3] = au_readl(addr + 0x0c); | 980 | alchemy_dbdma_pm_data[i][2] = __raw_readl(addr + 0x08); |
986 | sdev->pm_regs[i][4] = au_readl(addr + 0x10); | 981 | alchemy_dbdma_pm_data[i][3] = __raw_readl(addr + 0x0c); |
987 | sdev->pm_regs[i][5] = au_readl(addr + 0x14); | 982 | alchemy_dbdma_pm_data[i][4] = __raw_readl(addr + 0x10); |
983 | alchemy_dbdma_pm_data[i][5] = __raw_readl(addr + 0x14); | ||
988 | 984 | ||
989 | /* halt channel */ | 985 | /* halt channel */ |
990 | au_writel(sdev->pm_regs[i][0] & ~1, addr + 0x00); | 986 | __raw_writel(alchemy_dbdma_pm_data[i][0] & ~1, addr + 0x00); |
991 | au_sync(); | 987 | wmb(); |
992 | while (!(au_readl(addr + 0x14) & 1)) | 988 | while (!(__raw_readl(addr + 0x14) & 1)) |
993 | au_sync(); | 989 | wmb(); |
994 | 990 | ||
995 | addr += 0x100; /* next channel base */ | 991 | addr += 0x100; /* next channel base */ |
996 | } | 992 | } |
997 | /* disable channel interrupts */ | 993 | /* disable channel interrupts */ |
998 | au_writel(0, DDMA_GLOBAL_BASE + 0x0c); | 994 | addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_CONF_PHYS_ADDR); |
999 | au_sync(); | 995 | __raw_writel(0, addr + 0x0c); |
996 | wmb(); | ||
1000 | 997 | ||
1001 | return 0; | 998 | return 0; |
1002 | } | 999 | } |
1003 | 1000 | ||
1004 | static int alchemy_dbdma_resume(struct sys_device *dev) | 1001 | static void alchemy_dbdma_resume(void) |
1005 | { | 1002 | { |
1006 | struct alchemy_dbdma_sysdev *sdev = | ||
1007 | container_of(dev, struct alchemy_dbdma_sysdev, sysdev); | ||
1008 | int i; | 1003 | int i; |
1009 | u32 addr; | 1004 | void __iomem *addr; |
1010 | 1005 | ||
1011 | addr = DDMA_GLOBAL_BASE; | 1006 | addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_CONF_PHYS_ADDR); |
1012 | au_writel(sdev->pm_regs[0][0], addr + 0x00); | 1007 | __raw_writel(alchemy_dbdma_pm_data[0][0], addr + 0x00); |
1013 | au_writel(sdev->pm_regs[0][1], addr + 0x04); | 1008 | __raw_writel(alchemy_dbdma_pm_data[0][1], addr + 0x04); |
1014 | au_writel(sdev->pm_regs[0][2], addr + 0x08); | 1009 | __raw_writel(alchemy_dbdma_pm_data[0][2], addr + 0x08); |
1015 | au_writel(sdev->pm_regs[0][3], addr + 0x0c); | 1010 | __raw_writel(alchemy_dbdma_pm_data[0][3], addr + 0x0c); |
1016 | 1011 | ||
1017 | /* restore channel configurations */ | 1012 | /* restore channel configurations */ |
1018 | for (i = 1, addr = DDMA_CHANNEL_BASE; i <= NUM_DBDMA_CHANS; i++) { | 1013 | addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_PHYS_ADDR); |
1019 | au_writel(sdev->pm_regs[i][0], addr + 0x00); | 1014 | for (i = 1; i <= NUM_DBDMA_CHANS; i++) { |
1020 | au_writel(sdev->pm_regs[i][1], addr + 0x04); | 1015 | __raw_writel(alchemy_dbdma_pm_data[i][0], addr + 0x00); |
1021 | au_writel(sdev->pm_regs[i][2], addr + 0x08); | 1016 | __raw_writel(alchemy_dbdma_pm_data[i][1], addr + 0x04); |
1022 | au_writel(sdev->pm_regs[i][3], addr + 0x0c); | 1017 | __raw_writel(alchemy_dbdma_pm_data[i][2], addr + 0x08); |
1023 | au_writel(sdev->pm_regs[i][4], addr + 0x10); | 1018 | __raw_writel(alchemy_dbdma_pm_data[i][3], addr + 0x0c); |
1024 | au_writel(sdev->pm_regs[i][5], addr + 0x14); | 1019 | __raw_writel(alchemy_dbdma_pm_data[i][4], addr + 0x10); |
1025 | au_sync(); | 1020 | __raw_writel(alchemy_dbdma_pm_data[i][5], addr + 0x14); |
1021 | wmb(); | ||
1026 | addr += 0x100; /* next channel base */ | 1022 | addr += 0x100; /* next channel base */ |
1027 | } | 1023 | } |
1028 | |||
1029 | return 0; | ||
1030 | } | 1024 | } |
1031 | 1025 | ||
1032 | static struct sysdev_class alchemy_dbdma_sysdev_class = { | 1026 | static struct syscore_ops alchemy_dbdma_syscore_ops = { |
1033 | .name = "dbdma", | ||
1034 | .suspend = alchemy_dbdma_suspend, | 1027 | .suspend = alchemy_dbdma_suspend, |
1035 | .resume = alchemy_dbdma_resume, | 1028 | .resume = alchemy_dbdma_resume, |
1036 | }; | 1029 | }; |
1037 | 1030 | ||
1038 | static int __init alchemy_dbdma_sysdev_init(void) | ||
1039 | { | ||
1040 | struct alchemy_dbdma_sysdev *sdev; | ||
1041 | int ret; | ||
1042 | |||
1043 | ret = sysdev_class_register(&alchemy_dbdma_sysdev_class); | ||
1044 | if (ret) | ||
1045 | return ret; | ||
1046 | |||
1047 | sdev = kzalloc(sizeof(struct alchemy_dbdma_sysdev), GFP_KERNEL); | ||
1048 | if (!sdev) | ||
1049 | return -ENOMEM; | ||
1050 | |||
1051 | sdev->sysdev.id = -1; | ||
1052 | sdev->sysdev.cls = &alchemy_dbdma_sysdev_class; | ||
1053 | ret = sysdev_register(&sdev->sysdev); | ||
1054 | if (ret) | ||
1055 | kfree(sdev); | ||
1056 | |||
1057 | return ret; | ||
1058 | } | ||
1059 | |||
1060 | static int __init au1xxx_dbdma_init(void) | 1031 | static int __init au1xxx_dbdma_init(void) |
1061 | { | 1032 | { |
1062 | int irq_nr, ret; | 1033 | int irq_nr, ret; |
@@ -1084,11 +1055,7 @@ static int __init au1xxx_dbdma_init(void) | |||
1084 | else { | 1055 | else { |
1085 | dbdma_initialized = 1; | 1056 | dbdma_initialized = 1; |
1086 | printk(KERN_INFO "Alchemy DBDMA initialized\n"); | 1057 | printk(KERN_INFO "Alchemy DBDMA initialized\n"); |
1087 | ret = alchemy_dbdma_sysdev_init(); | 1058 | register_syscore_ops(&alchemy_dbdma_syscore_ops); |
1088 | if (ret) { | ||
1089 | printk(KERN_ERR "DBDMA PM init failed\n"); | ||
1090 | ret = 0; | ||
1091 | } | ||
1092 | } | 1059 | } |
1093 | 1060 | ||
1094 | return ret; | 1061 | return ret; |
diff --git a/arch/mips/alchemy/common/dma.c b/arch/mips/alchemy/common/dma.c index d5278877891d..347980e79a89 100644 --- a/arch/mips/alchemy/common/dma.c +++ b/arch/mips/alchemy/common/dma.c | |||
@@ -58,6 +58,9 @@ | |||
58 | * returned from request_dma. | 58 | * returned from request_dma. |
59 | */ | 59 | */ |
60 | 60 | ||
61 | /* DMA Channel register block spacing */ | ||
62 | #define DMA_CHANNEL_LEN 0x00000100 | ||
63 | |||
61 | DEFINE_SPINLOCK(au1000_dma_spin_lock); | 64 | DEFINE_SPINLOCK(au1000_dma_spin_lock); |
62 | 65 | ||
63 | struct dma_chan au1000_dma_table[NUM_AU1000_DMA_CHANNELS] = { | 66 | struct dma_chan au1000_dma_table[NUM_AU1000_DMA_CHANNELS] = { |
@@ -77,22 +80,23 @@ static const struct dma_dev { | |||
77 | unsigned int fifo_addr; | 80 | unsigned int fifo_addr; |
78 | unsigned int dma_mode; | 81 | unsigned int dma_mode; |
79 | } dma_dev_table[DMA_NUM_DEV] = { | 82 | } dma_dev_table[DMA_NUM_DEV] = { |
80 | {UART0_ADDR + UART_TX, 0}, | 83 | { AU1000_UART0_PHYS_ADDR + 0x04, DMA_DW8 }, /* UART0_TX */ |
81 | {UART0_ADDR + UART_RX, 0}, | 84 | { AU1000_UART0_PHYS_ADDR + 0x00, DMA_DW8 | DMA_DR }, /* UART0_RX */ |
82 | {0, 0}, | 85 | { 0, 0 }, /* DMA_REQ0 */ |
83 | {0, 0}, | 86 | { 0, 0 }, /* DMA_REQ1 */ |
84 | {AC97C_DATA, DMA_DW16 }, /* coherent */ | 87 | { AU1000_AC97_PHYS_ADDR + 0x08, DMA_DW16 }, /* AC97 TX c */ |
85 | {AC97C_DATA, DMA_DR | DMA_DW16 }, /* coherent */ | 88 | { AU1000_AC97_PHYS_ADDR + 0x08, DMA_DW16 | DMA_DR }, /* AC97 RX c */ |
86 | {UART3_ADDR + UART_TX, DMA_DW8 | DMA_NC}, | 89 | { AU1000_UART3_PHYS_ADDR + 0x04, DMA_DW8 | DMA_NC }, /* UART3_TX */ |
87 | {UART3_ADDR + UART_RX, DMA_DR | DMA_DW8 | DMA_NC}, | 90 | { AU1000_UART3_PHYS_ADDR + 0x00, DMA_DW8 | DMA_NC | DMA_DR }, /* UART3_RX */ |
88 | {USBD_EP0RD, DMA_DR | DMA_DW8 | DMA_NC}, | 91 | { AU1000_USBD_PHYS_ADDR + 0x00, DMA_DW8 | DMA_NC | DMA_DR }, /* EP0RD */ |
89 | {USBD_EP0WR, DMA_DW8 | DMA_NC}, | 92 | { AU1000_USBD_PHYS_ADDR + 0x04, DMA_DW8 | DMA_NC }, /* EP0WR */ |
90 | {USBD_EP2WR, DMA_DW8 | DMA_NC}, | 93 | { AU1000_USBD_PHYS_ADDR + 0x08, DMA_DW8 | DMA_NC }, /* EP2WR */ |
91 | {USBD_EP3WR, DMA_DW8 | DMA_NC}, | 94 | { AU1000_USBD_PHYS_ADDR + 0x0c, DMA_DW8 | DMA_NC }, /* EP3WR */ |
92 | {USBD_EP4RD, DMA_DR | DMA_DW8 | DMA_NC}, | 95 | { AU1000_USBD_PHYS_ADDR + 0x10, DMA_DW8 | DMA_NC | DMA_DR }, /* EP4RD */ |
93 | {USBD_EP5RD, DMA_DR | DMA_DW8 | DMA_NC}, | 96 | { AU1000_USBD_PHYS_ADDR + 0x14, DMA_DW8 | DMA_NC | DMA_DR }, /* EP5RD */ |
94 | {I2S_DATA, DMA_DW32 | DMA_NC}, | 97 | /* on Au1500, these 2 are DMA_REQ2/3 (GPIO208/209) instead! */ |
95 | {I2S_DATA, DMA_DR | DMA_DW32 | DMA_NC} | 98 | { AU1000_I2S_PHYS_ADDR + 0x00, DMA_DW32 | DMA_NC}, /* I2S TX */ |
99 | { AU1000_I2S_PHYS_ADDR + 0x00, DMA_DW32 | DMA_NC | DMA_DR}, /* I2S RX */ | ||
96 | }; | 100 | }; |
97 | 101 | ||
98 | int au1000_dma_read_proc(char *buf, char **start, off_t fpos, | 102 | int au1000_dma_read_proc(char *buf, char **start, off_t fpos, |
@@ -123,10 +127,10 @@ int au1000_dma_read_proc(char *buf, char **start, off_t fpos, | |||
123 | 127 | ||
124 | /* Device FIFO addresses and default DMA modes - 2nd bank */ | 128 | /* Device FIFO addresses and default DMA modes - 2nd bank */ |
125 | static const struct dma_dev dma_dev_table_bank2[DMA_NUM_DEV_BANK2] = { | 129 | static const struct dma_dev dma_dev_table_bank2[DMA_NUM_DEV_BANK2] = { |
126 | { SD0_XMIT_FIFO, DMA_DS | DMA_DW8 }, /* coherent */ | 130 | { AU1100_SD0_PHYS_ADDR + 0x00, DMA_DS | DMA_DW8 }, /* coherent */ |
127 | { SD0_RECV_FIFO, DMA_DS | DMA_DR | DMA_DW8 }, /* coherent */ | 131 | { AU1100_SD0_PHYS_ADDR + 0x04, DMA_DS | DMA_DW8 | DMA_DR }, /* coherent */ |
128 | { SD1_XMIT_FIFO, DMA_DS | DMA_DW8 }, /* coherent */ | 132 | { AU1100_SD1_PHYS_ADDR + 0x00, DMA_DS | DMA_DW8 }, /* coherent */ |
129 | { SD1_RECV_FIFO, DMA_DS | DMA_DR | DMA_DW8 } /* coherent */ | 133 | { AU1100_SD1_PHYS_ADDR + 0x04, DMA_DS | DMA_DW8 | DMA_DR } /* coherent */ |
130 | }; | 134 | }; |
131 | 135 | ||
132 | void dump_au1000_dma_channel(unsigned int dmanr) | 136 | void dump_au1000_dma_channel(unsigned int dmanr) |
@@ -202,7 +206,7 @@ int request_au1000_dma(int dev_id, const char *dev_str, | |||
202 | } | 206 | } |
203 | 207 | ||
204 | /* fill it in */ | 208 | /* fill it in */ |
205 | chan->io = DMA_CHANNEL_BASE + i * DMA_CHANNEL_LEN; | 209 | chan->io = KSEG1ADDR(AU1000_DMA_PHYS_ADDR) + i * DMA_CHANNEL_LEN; |
206 | chan->dev_id = dev_id; | 210 | chan->dev_id = dev_id; |
207 | chan->dev_str = dev_str; | 211 | chan->dev_str = dev_str; |
208 | chan->fifo_addr = dev->fifo_addr; | 212 | chan->fifo_addr = dev->fifo_addr; |
diff --git a/arch/mips/alchemy/common/irq.c b/arch/mips/alchemy/common/irq.c index 55dd7c888517..8b60ba0675e2 100644 --- a/arch/mips/alchemy/common/irq.c +++ b/arch/mips/alchemy/common/irq.c | |||
@@ -30,7 +30,7 @@ | |||
30 | #include <linux/interrupt.h> | 30 | #include <linux/interrupt.h> |
31 | #include <linux/irq.h> | 31 | #include <linux/irq.h> |
32 | #include <linux/slab.h> | 32 | #include <linux/slab.h> |
33 | #include <linux/sysdev.h> | 33 | #include <linux/syscore_ops.h> |
34 | 34 | ||
35 | #include <asm/irq_cpu.h> | 35 | #include <asm/irq_cpu.h> |
36 | #include <asm/mipsregs.h> | 36 | #include <asm/mipsregs.h> |
@@ -39,6 +39,36 @@ | |||
39 | #include <asm/mach-pb1x00/pb1000.h> | 39 | #include <asm/mach-pb1x00/pb1000.h> |
40 | #endif | 40 | #endif |
41 | 41 | ||
42 | /* Interrupt Controller register offsets */ | ||
43 | #define IC_CFG0RD 0x40 | ||
44 | #define IC_CFG0SET 0x40 | ||
45 | #define IC_CFG0CLR 0x44 | ||
46 | #define IC_CFG1RD 0x48 | ||
47 | #define IC_CFG1SET 0x48 | ||
48 | #define IC_CFG1CLR 0x4C | ||
49 | #define IC_CFG2RD 0x50 | ||
50 | #define IC_CFG2SET 0x50 | ||
51 | #define IC_CFG2CLR 0x54 | ||
52 | #define IC_REQ0INT 0x54 | ||
53 | #define IC_SRCRD 0x58 | ||
54 | #define IC_SRCSET 0x58 | ||
55 | #define IC_SRCCLR 0x5C | ||
56 | #define IC_REQ1INT 0x5C | ||
57 | #define IC_ASSIGNRD 0x60 | ||
58 | #define IC_ASSIGNSET 0x60 | ||
59 | #define IC_ASSIGNCLR 0x64 | ||
60 | #define IC_WAKERD 0x68 | ||
61 | #define IC_WAKESET 0x68 | ||
62 | #define IC_WAKECLR 0x6C | ||
63 | #define IC_MASKRD 0x70 | ||
64 | #define IC_MASKSET 0x70 | ||
65 | #define IC_MASKCLR 0x74 | ||
66 | #define IC_RISINGRD 0x78 | ||
67 | #define IC_RISINGCLR 0x78 | ||
68 | #define IC_FALLINGRD 0x7C | ||
69 | #define IC_FALLINGCLR 0x7C | ||
70 | #define IC_TESTBIT 0x80 | ||
71 | |||
42 | static int au1x_ic_settype(struct irq_data *d, unsigned int flow_type); | 72 | static int au1x_ic_settype(struct irq_data *d, unsigned int flow_type); |
43 | 73 | ||
44 | /* NOTE on interrupt priorities: The original writers of this code said: | 74 | /* NOTE on interrupt priorities: The original writers of this code said: |
@@ -221,89 +251,101 @@ struct au1xxx_irqmap au1200_irqmap[] __initdata = { | |||
221 | static void au1x_ic0_unmask(struct irq_data *d) | 251 | static void au1x_ic0_unmask(struct irq_data *d) |
222 | { | 252 | { |
223 | unsigned int bit = d->irq - AU1000_INTC0_INT_BASE; | 253 | unsigned int bit = d->irq - AU1000_INTC0_INT_BASE; |
224 | au_writel(1 << bit, IC0_MASKSET); | 254 | void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR); |
225 | au_writel(1 << bit, IC0_WAKESET); | 255 | |
226 | au_sync(); | 256 | __raw_writel(1 << bit, base + IC_MASKSET); |
257 | __raw_writel(1 << bit, base + IC_WAKESET); | ||
258 | wmb(); | ||
227 | } | 259 | } |
228 | 260 | ||
229 | static void au1x_ic1_unmask(struct irq_data *d) | 261 | static void au1x_ic1_unmask(struct irq_data *d) |
230 | { | 262 | { |
231 | unsigned int bit = d->irq - AU1000_INTC1_INT_BASE; | 263 | unsigned int bit = d->irq - AU1000_INTC1_INT_BASE; |
232 | au_writel(1 << bit, IC1_MASKSET); | 264 | void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR); |
233 | au_writel(1 << bit, IC1_WAKESET); | 265 | |
266 | __raw_writel(1 << bit, base + IC_MASKSET); | ||
267 | __raw_writel(1 << bit, base + IC_WAKESET); | ||
234 | 268 | ||
235 | /* very hacky. does the pb1000 cpld auto-disable this int? | 269 | /* very hacky. does the pb1000 cpld auto-disable this int? |
236 | * nowhere in the current kernel sources is it disabled. --mlau | 270 | * nowhere in the current kernel sources is it disabled. --mlau |
237 | */ | 271 | */ |
238 | #if defined(CONFIG_MIPS_PB1000) | 272 | #if defined(CONFIG_MIPS_PB1000) |
239 | if (d->irq == AU1000_GPIO15_INT) | 273 | if (d->irq == AU1000_GPIO15_INT) |
240 | au_writel(0x4000, PB1000_MDR); /* enable int */ | 274 | __raw_writel(0x4000, (void __iomem *)PB1000_MDR); /* enable int */ |
241 | #endif | 275 | #endif |
242 | au_sync(); | 276 | wmb(); |
243 | } | 277 | } |
244 | 278 | ||
245 | static void au1x_ic0_mask(struct irq_data *d) | 279 | static void au1x_ic0_mask(struct irq_data *d) |
246 | { | 280 | { |
247 | unsigned int bit = d->irq - AU1000_INTC0_INT_BASE; | 281 | unsigned int bit = d->irq - AU1000_INTC0_INT_BASE; |
248 | au_writel(1 << bit, IC0_MASKCLR); | 282 | void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR); |
249 | au_writel(1 << bit, IC0_WAKECLR); | 283 | |
250 | au_sync(); | 284 | __raw_writel(1 << bit, base + IC_MASKCLR); |
285 | __raw_writel(1 << bit, base + IC_WAKECLR); | ||
286 | wmb(); | ||
251 | } | 287 | } |
252 | 288 | ||
253 | static void au1x_ic1_mask(struct irq_data *d) | 289 | static void au1x_ic1_mask(struct irq_data *d) |
254 | { | 290 | { |
255 | unsigned int bit = d->irq - AU1000_INTC1_INT_BASE; | 291 | unsigned int bit = d->irq - AU1000_INTC1_INT_BASE; |
256 | au_writel(1 << bit, IC1_MASKCLR); | 292 | void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR); |
257 | au_writel(1 << bit, IC1_WAKECLR); | 293 | |
258 | au_sync(); | 294 | __raw_writel(1 << bit, base + IC_MASKCLR); |
295 | __raw_writel(1 << bit, base + IC_WAKECLR); | ||
296 | wmb(); | ||
259 | } | 297 | } |
260 | 298 | ||
261 | static void au1x_ic0_ack(struct irq_data *d) | 299 | static void au1x_ic0_ack(struct irq_data *d) |
262 | { | 300 | { |
263 | unsigned int bit = d->irq - AU1000_INTC0_INT_BASE; | 301 | unsigned int bit = d->irq - AU1000_INTC0_INT_BASE; |
302 | void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR); | ||
264 | 303 | ||
265 | /* | 304 | /* |
266 | * This may assume that we don't get interrupts from | 305 | * This may assume that we don't get interrupts from |
267 | * both edges at once, or if we do, that we don't care. | 306 | * both edges at once, or if we do, that we don't care. |
268 | */ | 307 | */ |
269 | au_writel(1 << bit, IC0_FALLINGCLR); | 308 | __raw_writel(1 << bit, base + IC_FALLINGCLR); |
270 | au_writel(1 << bit, IC0_RISINGCLR); | 309 | __raw_writel(1 << bit, base + IC_RISINGCLR); |
271 | au_sync(); | 310 | wmb(); |
272 | } | 311 | } |
273 | 312 | ||
274 | static void au1x_ic1_ack(struct irq_data *d) | 313 | static void au1x_ic1_ack(struct irq_data *d) |
275 | { | 314 | { |
276 | unsigned int bit = d->irq - AU1000_INTC1_INT_BASE; | 315 | unsigned int bit = d->irq - AU1000_INTC1_INT_BASE; |
316 | void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR); | ||
277 | 317 | ||
278 | /* | 318 | /* |
279 | * This may assume that we don't get interrupts from | 319 | * This may assume that we don't get interrupts from |
280 | * both edges at once, or if we do, that we don't care. | 320 | * both edges at once, or if we do, that we don't care. |
281 | */ | 321 | */ |
282 | au_writel(1 << bit, IC1_FALLINGCLR); | 322 | __raw_writel(1 << bit, base + IC_FALLINGCLR); |
283 | au_writel(1 << bit, IC1_RISINGCLR); | 323 | __raw_writel(1 << bit, base + IC_RISINGCLR); |
284 | au_sync(); | 324 | wmb(); |
285 | } | 325 | } |
286 | 326 | ||
287 | static void au1x_ic0_maskack(struct irq_data *d) | 327 | static void au1x_ic0_maskack(struct irq_data *d) |
288 | { | 328 | { |
289 | unsigned int bit = d->irq - AU1000_INTC0_INT_BASE; | 329 | unsigned int bit = d->irq - AU1000_INTC0_INT_BASE; |
330 | void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR); | ||
290 | 331 | ||
291 | au_writel(1 << bit, IC0_WAKECLR); | 332 | __raw_writel(1 << bit, base + IC_WAKECLR); |
292 | au_writel(1 << bit, IC0_MASKCLR); | 333 | __raw_writel(1 << bit, base + IC_MASKCLR); |
293 | au_writel(1 << bit, IC0_RISINGCLR); | 334 | __raw_writel(1 << bit, base + IC_RISINGCLR); |
294 | au_writel(1 << bit, IC0_FALLINGCLR); | 335 | __raw_writel(1 << bit, base + IC_FALLINGCLR); |
295 | au_sync(); | 336 | wmb(); |
296 | } | 337 | } |
297 | 338 | ||
298 | static void au1x_ic1_maskack(struct irq_data *d) | 339 | static void au1x_ic1_maskack(struct irq_data *d) |
299 | { | 340 | { |
300 | unsigned int bit = d->irq - AU1000_INTC1_INT_BASE; | 341 | unsigned int bit = d->irq - AU1000_INTC1_INT_BASE; |
342 | void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR); | ||
301 | 343 | ||
302 | au_writel(1 << bit, IC1_WAKECLR); | 344 | __raw_writel(1 << bit, base + IC_WAKECLR); |
303 | au_writel(1 << bit, IC1_MASKCLR); | 345 | __raw_writel(1 << bit, base + IC_MASKCLR); |
304 | au_writel(1 << bit, IC1_RISINGCLR); | 346 | __raw_writel(1 << bit, base + IC_RISINGCLR); |
305 | au_writel(1 << bit, IC1_FALLINGCLR); | 347 | __raw_writel(1 << bit, base + IC_FALLINGCLR); |
306 | au_sync(); | 348 | wmb(); |
307 | } | 349 | } |
308 | 350 | ||
309 | static int au1x_ic1_setwake(struct irq_data *d, unsigned int on) | 351 | static int au1x_ic1_setwake(struct irq_data *d, unsigned int on) |
@@ -318,13 +360,13 @@ static int au1x_ic1_setwake(struct irq_data *d, unsigned int on) | |||
318 | return -EINVAL; | 360 | return -EINVAL; |
319 | 361 | ||
320 | local_irq_save(flags); | 362 | local_irq_save(flags); |
321 | wakemsk = au_readl(SYS_WAKEMSK); | 363 | wakemsk = __raw_readl((void __iomem *)SYS_WAKEMSK); |
322 | if (on) | 364 | if (on) |
323 | wakemsk |= 1 << bit; | 365 | wakemsk |= 1 << bit; |
324 | else | 366 | else |
325 | wakemsk &= ~(1 << bit); | 367 | wakemsk &= ~(1 << bit); |
326 | au_writel(wakemsk, SYS_WAKEMSK); | 368 | __raw_writel(wakemsk, (void __iomem *)SYS_WAKEMSK); |
327 | au_sync(); | 369 | wmb(); |
328 | local_irq_restore(flags); | 370 | local_irq_restore(flags); |
329 | 371 | ||
330 | return 0; | 372 | return 0; |
@@ -356,81 +398,74 @@ static struct irq_chip au1x_ic1_chip = { | |||
356 | static int au1x_ic_settype(struct irq_data *d, unsigned int flow_type) | 398 | static int au1x_ic_settype(struct irq_data *d, unsigned int flow_type) |
357 | { | 399 | { |
358 | struct irq_chip *chip; | 400 | struct irq_chip *chip; |
359 | unsigned long icr[6]; | 401 | unsigned int bit, irq = d->irq; |
360 | unsigned int bit, ic, irq = d->irq; | ||
361 | irq_flow_handler_t handler = NULL; | 402 | irq_flow_handler_t handler = NULL; |
362 | unsigned char *name = NULL; | 403 | unsigned char *name = NULL; |
404 | void __iomem *base; | ||
363 | int ret; | 405 | int ret; |
364 | 406 | ||
365 | if (irq >= AU1000_INTC1_INT_BASE) { | 407 | if (irq >= AU1000_INTC1_INT_BASE) { |
366 | bit = irq - AU1000_INTC1_INT_BASE; | 408 | bit = irq - AU1000_INTC1_INT_BASE; |
367 | chip = &au1x_ic1_chip; | 409 | chip = &au1x_ic1_chip; |
368 | ic = 1; | 410 | base = (void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR); |
369 | } else { | 411 | } else { |
370 | bit = irq - AU1000_INTC0_INT_BASE; | 412 | bit = irq - AU1000_INTC0_INT_BASE; |
371 | chip = &au1x_ic0_chip; | 413 | chip = &au1x_ic0_chip; |
372 | ic = 0; | 414 | base = (void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR); |
373 | } | 415 | } |
374 | 416 | ||
375 | if (bit > 31) | 417 | if (bit > 31) |
376 | return -EINVAL; | 418 | return -EINVAL; |
377 | 419 | ||
378 | icr[0] = ic ? IC1_CFG0SET : IC0_CFG0SET; | ||
379 | icr[1] = ic ? IC1_CFG1SET : IC0_CFG1SET; | ||
380 | icr[2] = ic ? IC1_CFG2SET : IC0_CFG2SET; | ||
381 | icr[3] = ic ? IC1_CFG0CLR : IC0_CFG0CLR; | ||
382 | icr[4] = ic ? IC1_CFG1CLR : IC0_CFG1CLR; | ||
383 | icr[5] = ic ? IC1_CFG2CLR : IC0_CFG2CLR; | ||
384 | |||
385 | ret = 0; | 420 | ret = 0; |
386 | 421 | ||
387 | switch (flow_type) { /* cfgregs 2:1:0 */ | 422 | switch (flow_type) { /* cfgregs 2:1:0 */ |
388 | case IRQ_TYPE_EDGE_RISING: /* 0:0:1 */ | 423 | case IRQ_TYPE_EDGE_RISING: /* 0:0:1 */ |
389 | au_writel(1 << bit, icr[5]); | 424 | __raw_writel(1 << bit, base + IC_CFG2CLR); |
390 | au_writel(1 << bit, icr[4]); | 425 | __raw_writel(1 << bit, base + IC_CFG1CLR); |
391 | au_writel(1 << bit, icr[0]); | 426 | __raw_writel(1 << bit, base + IC_CFG0SET); |
392 | handler = handle_edge_irq; | 427 | handler = handle_edge_irq; |
393 | name = "riseedge"; | 428 | name = "riseedge"; |
394 | break; | 429 | break; |
395 | case IRQ_TYPE_EDGE_FALLING: /* 0:1:0 */ | 430 | case IRQ_TYPE_EDGE_FALLING: /* 0:1:0 */ |
396 | au_writel(1 << bit, icr[5]); | 431 | __raw_writel(1 << bit, base + IC_CFG2CLR); |
397 | au_writel(1 << bit, icr[1]); | 432 | __raw_writel(1 << bit, base + IC_CFG1SET); |
398 | au_writel(1 << bit, icr[3]); | 433 | __raw_writel(1 << bit, base + IC_CFG0CLR); |
399 | handler = handle_edge_irq; | 434 | handler = handle_edge_irq; |
400 | name = "falledge"; | 435 | name = "falledge"; |
401 | break; | 436 | break; |
402 | case IRQ_TYPE_EDGE_BOTH: /* 0:1:1 */ | 437 | case IRQ_TYPE_EDGE_BOTH: /* 0:1:1 */ |
403 | au_writel(1 << bit, icr[5]); | 438 | __raw_writel(1 << bit, base + IC_CFG2CLR); |
404 | au_writel(1 << bit, icr[1]); | 439 | __raw_writel(1 << bit, base + IC_CFG1SET); |
405 | au_writel(1 << bit, icr[0]); | 440 | __raw_writel(1 << bit, base + IC_CFG0SET); |
406 | handler = handle_edge_irq; | 441 | handler = handle_edge_irq; |
407 | name = "bothedge"; | 442 | name = "bothedge"; |
408 | break; | 443 | break; |
409 | case IRQ_TYPE_LEVEL_HIGH: /* 1:0:1 */ | 444 | case IRQ_TYPE_LEVEL_HIGH: /* 1:0:1 */ |
410 | au_writel(1 << bit, icr[2]); | 445 | __raw_writel(1 << bit, base + IC_CFG2SET); |
411 | au_writel(1 << bit, icr[4]); | 446 | __raw_writel(1 << bit, base + IC_CFG1CLR); |
412 | au_writel(1 << bit, icr[0]); | 447 | __raw_writel(1 << bit, base + IC_CFG0SET); |
413 | handler = handle_level_irq; | 448 | handler = handle_level_irq; |
414 | name = "hilevel"; | 449 | name = "hilevel"; |
415 | break; | 450 | break; |
416 | case IRQ_TYPE_LEVEL_LOW: /* 1:1:0 */ | 451 | case IRQ_TYPE_LEVEL_LOW: /* 1:1:0 */ |
417 | au_writel(1 << bit, icr[2]); | 452 | __raw_writel(1 << bit, base + IC_CFG2SET); |
418 | au_writel(1 << bit, icr[1]); | 453 | __raw_writel(1 << bit, base + IC_CFG1SET); |
419 | au_writel(1 << bit, icr[3]); | 454 | __raw_writel(1 << bit, base + IC_CFG0CLR); |
420 | handler = handle_level_irq; | 455 | handler = handle_level_irq; |
421 | name = "lowlevel"; | 456 | name = "lowlevel"; |
422 | break; | 457 | break; |
423 | case IRQ_TYPE_NONE: /* 0:0:0 */ | 458 | case IRQ_TYPE_NONE: /* 0:0:0 */ |
424 | au_writel(1 << bit, icr[5]); | 459 | __raw_writel(1 << bit, base + IC_CFG2CLR); |
425 | au_writel(1 << bit, icr[4]); | 460 | __raw_writel(1 << bit, base + IC_CFG1CLR); |
426 | au_writel(1 << bit, icr[3]); | 461 | __raw_writel(1 << bit, base + IC_CFG0CLR); |
427 | break; | 462 | break; |
428 | default: | 463 | default: |
429 | ret = -EINVAL; | 464 | ret = -EINVAL; |
430 | } | 465 | } |
431 | __irq_set_chip_handler_name_locked(d->irq, chip, handler, name); | 466 | __irq_set_chip_handler_name_locked(d->irq, chip, handler, name); |
432 | 467 | ||
433 | au_sync(); | 468 | wmb(); |
434 | 469 | ||
435 | return ret; | 470 | return ret; |
436 | } | 471 | } |
@@ -444,21 +479,21 @@ asmlinkage void plat_irq_dispatch(void) | |||
444 | off = MIPS_CPU_IRQ_BASE + 7; | 479 | off = MIPS_CPU_IRQ_BASE + 7; |
445 | goto handle; | 480 | goto handle; |
446 | } else if (pending & CAUSEF_IP2) { | 481 | } else if (pending & CAUSEF_IP2) { |
447 | s = IC0_REQ0INT; | 482 | s = KSEG1ADDR(AU1000_IC0_PHYS_ADDR) + IC_REQ0INT; |
448 | off = AU1000_INTC0_INT_BASE; | 483 | off = AU1000_INTC0_INT_BASE; |
449 | } else if (pending & CAUSEF_IP3) { | 484 | } else if (pending & CAUSEF_IP3) { |
450 | s = IC0_REQ1INT; | 485 | s = KSEG1ADDR(AU1000_IC0_PHYS_ADDR) + IC_REQ1INT; |
451 | off = AU1000_INTC0_INT_BASE; | 486 | off = AU1000_INTC0_INT_BASE; |
452 | } else if (pending & CAUSEF_IP4) { | 487 | } else if (pending & CAUSEF_IP4) { |
453 | s = IC1_REQ0INT; | 488 | s = KSEG1ADDR(AU1000_IC1_PHYS_ADDR) + IC_REQ0INT; |
454 | off = AU1000_INTC1_INT_BASE; | 489 | off = AU1000_INTC1_INT_BASE; |
455 | } else if (pending & CAUSEF_IP5) { | 490 | } else if (pending & CAUSEF_IP5) { |
456 | s = IC1_REQ1INT; | 491 | s = KSEG1ADDR(AU1000_IC1_PHYS_ADDR) + IC_REQ1INT; |
457 | off = AU1000_INTC1_INT_BASE; | 492 | off = AU1000_INTC1_INT_BASE; |
458 | } else | 493 | } else |
459 | goto spurious; | 494 | goto spurious; |
460 | 495 | ||
461 | s = au_readl(s); | 496 | s = __raw_readl((void __iomem *)s); |
462 | if (unlikely(!s)) { | 497 | if (unlikely(!s)) { |
463 | spurious: | 498 | spurious: |
464 | spurious_interrupt(); | 499 | spurious_interrupt(); |
@@ -469,48 +504,42 @@ handle: | |||
469 | do_IRQ(off); | 504 | do_IRQ(off); |
470 | } | 505 | } |
471 | 506 | ||
507 | |||
508 | static inline void ic_init(void __iomem *base) | ||
509 | { | ||
510 | /* initialize interrupt controller to a safe state */ | ||
511 | __raw_writel(0xffffffff, base + IC_CFG0CLR); | ||
512 | __raw_writel(0xffffffff, base + IC_CFG1CLR); | ||
513 | __raw_writel(0xffffffff, base + IC_CFG2CLR); | ||
514 | __raw_writel(0xffffffff, base + IC_MASKCLR); | ||
515 | __raw_writel(0xffffffff, base + IC_ASSIGNCLR); | ||
516 | __raw_writel(0xffffffff, base + IC_WAKECLR); | ||
517 | __raw_writel(0xffffffff, base + IC_SRCSET); | ||
518 | __raw_writel(0xffffffff, base + IC_FALLINGCLR); | ||
519 | __raw_writel(0xffffffff, base + IC_RISINGCLR); | ||
520 | __raw_writel(0x00000000, base + IC_TESTBIT); | ||
521 | wmb(); | ||
522 | } | ||
523 | |||
472 | static void __init au1000_init_irq(struct au1xxx_irqmap *map) | 524 | static void __init au1000_init_irq(struct au1xxx_irqmap *map) |
473 | { | 525 | { |
474 | unsigned int bit, irq_nr; | 526 | unsigned int bit, irq_nr; |
475 | int i; | 527 | void __iomem *base; |
476 | |||
477 | /* | ||
478 | * Initialize interrupt controllers to a safe state. | ||
479 | */ | ||
480 | au_writel(0xffffffff, IC0_CFG0CLR); | ||
481 | au_writel(0xffffffff, IC0_CFG1CLR); | ||
482 | au_writel(0xffffffff, IC0_CFG2CLR); | ||
483 | au_writel(0xffffffff, IC0_MASKCLR); | ||
484 | au_writel(0xffffffff, IC0_ASSIGNCLR); | ||
485 | au_writel(0xffffffff, IC0_WAKECLR); | ||
486 | au_writel(0xffffffff, IC0_SRCSET); | ||
487 | au_writel(0xffffffff, IC0_FALLINGCLR); | ||
488 | au_writel(0xffffffff, IC0_RISINGCLR); | ||
489 | au_writel(0x00000000, IC0_TESTBIT); | ||
490 | |||
491 | au_writel(0xffffffff, IC1_CFG0CLR); | ||
492 | au_writel(0xffffffff, IC1_CFG1CLR); | ||
493 | au_writel(0xffffffff, IC1_CFG2CLR); | ||
494 | au_writel(0xffffffff, IC1_MASKCLR); | ||
495 | au_writel(0xffffffff, IC1_ASSIGNCLR); | ||
496 | au_writel(0xffffffff, IC1_WAKECLR); | ||
497 | au_writel(0xffffffff, IC1_SRCSET); | ||
498 | au_writel(0xffffffff, IC1_FALLINGCLR); | ||
499 | au_writel(0xffffffff, IC1_RISINGCLR); | ||
500 | au_writel(0x00000000, IC1_TESTBIT); | ||
501 | 528 | ||
529 | ic_init((void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR)); | ||
530 | ic_init((void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR)); | ||
502 | mips_cpu_irq_init(); | 531 | mips_cpu_irq_init(); |
503 | 532 | ||
504 | /* register all 64 possible IC0+IC1 irq sources as type "none". | 533 | /* register all 64 possible IC0+IC1 irq sources as type "none". |
505 | * Use set_irq_type() to set edge/level behaviour at runtime. | 534 | * Use set_irq_type() to set edge/level behaviour at runtime. |
506 | */ | 535 | */ |
507 | for (i = AU1000_INTC0_INT_BASE; | 536 | for (irq_nr = AU1000_INTC0_INT_BASE; |
508 | (i < AU1000_INTC0_INT_BASE + 32); i++) | 537 | (irq_nr < AU1000_INTC0_INT_BASE + 32); irq_nr++) |
509 | au1x_ic_settype(irq_get_irq_data(i), IRQ_TYPE_NONE); | 538 | au1x_ic_settype(irq_get_irq_data(irq_nr), IRQ_TYPE_NONE); |
510 | 539 | ||
511 | for (i = AU1000_INTC1_INT_BASE; | 540 | for (irq_nr = AU1000_INTC1_INT_BASE; |
512 | (i < AU1000_INTC1_INT_BASE + 32); i++) | 541 | (irq_nr < AU1000_INTC1_INT_BASE + 32); irq_nr++) |
513 | au1x_ic_settype(irq_get_irq_data(i), IRQ_TYPE_NONE); | 542 | au1x_ic_settype(irq_get_irq_data(irq_nr), IRQ_TYPE_NONE); |
514 | 543 | ||
515 | /* | 544 | /* |
516 | * Initialize IC0, which is fixed per processor. | 545 | * Initialize IC0, which is fixed per processor. |
@@ -520,13 +549,13 @@ static void __init au1000_init_irq(struct au1xxx_irqmap *map) | |||
520 | 549 | ||
521 | if (irq_nr >= AU1000_INTC1_INT_BASE) { | 550 | if (irq_nr >= AU1000_INTC1_INT_BASE) { |
522 | bit = irq_nr - AU1000_INTC1_INT_BASE; | 551 | bit = irq_nr - AU1000_INTC1_INT_BASE; |
523 | if (map->im_request) | 552 | base = (void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR); |
524 | au_writel(1 << bit, IC1_ASSIGNSET); | ||
525 | } else { | 553 | } else { |
526 | bit = irq_nr - AU1000_INTC0_INT_BASE; | 554 | bit = irq_nr - AU1000_INTC0_INT_BASE; |
527 | if (map->im_request) | 555 | base = (void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR); |
528 | au_writel(1 << bit, IC0_ASSIGNSET); | ||
529 | } | 556 | } |
557 | if (map->im_request) | ||
558 | __raw_writel(1 << bit, base + IC_ASSIGNSET); | ||
530 | 559 | ||
531 | au1x_ic_settype(irq_get_irq_data(irq_nr), map->im_type); | 560 | au1x_ic_settype(irq_get_irq_data(irq_nr), map->im_type); |
532 | ++map; | 561 | ++map; |
@@ -556,90 +585,62 @@ void __init arch_init_irq(void) | |||
556 | } | 585 | } |
557 | } | 586 | } |
558 | 587 | ||
559 | struct alchemy_ic_sysdev { | ||
560 | struct sys_device sysdev; | ||
561 | void __iomem *base; | ||
562 | unsigned long pmdata[7]; | ||
563 | }; | ||
564 | 588 | ||
565 | static int alchemy_ic_suspend(struct sys_device *dev, pm_message_t state) | 589 | static unsigned long alchemy_ic_pmdata[7 * 2]; |
566 | { | ||
567 | struct alchemy_ic_sysdev *icdev = | ||
568 | container_of(dev, struct alchemy_ic_sysdev, sysdev); | ||
569 | 590 | ||
570 | icdev->pmdata[0] = __raw_readl(icdev->base + IC_CFG0RD); | 591 | static inline void alchemy_ic_suspend_one(void __iomem *base, unsigned long *d) |
571 | icdev->pmdata[1] = __raw_readl(icdev->base + IC_CFG1RD); | 592 | { |
572 | icdev->pmdata[2] = __raw_readl(icdev->base + IC_CFG2RD); | 593 | d[0] = __raw_readl(base + IC_CFG0RD); |
573 | icdev->pmdata[3] = __raw_readl(icdev->base + IC_SRCRD); | 594 | d[1] = __raw_readl(base + IC_CFG1RD); |
574 | icdev->pmdata[4] = __raw_readl(icdev->base + IC_ASSIGNRD); | 595 | d[2] = __raw_readl(base + IC_CFG2RD); |
575 | icdev->pmdata[5] = __raw_readl(icdev->base + IC_WAKERD); | 596 | d[3] = __raw_readl(base + IC_SRCRD); |
576 | icdev->pmdata[6] = __raw_readl(icdev->base + IC_MASKRD); | 597 | d[4] = __raw_readl(base + IC_ASSIGNRD); |
577 | 598 | d[5] = __raw_readl(base + IC_WAKERD); | |
578 | return 0; | 599 | d[6] = __raw_readl(base + IC_MASKRD); |
600 | ic_init(base); /* shut it up too while at it */ | ||
579 | } | 601 | } |
580 | 602 | ||
581 | static int alchemy_ic_resume(struct sys_device *dev) | 603 | static inline void alchemy_ic_resume_one(void __iomem *base, unsigned long *d) |
582 | { | 604 | { |
583 | struct alchemy_ic_sysdev *icdev = | 605 | ic_init(base); |
584 | container_of(dev, struct alchemy_ic_sysdev, sysdev); | 606 | |
585 | 607 | __raw_writel(d[0], base + IC_CFG0SET); | |
586 | __raw_writel(0xffffffff, icdev->base + IC_MASKCLR); | 608 | __raw_writel(d[1], base + IC_CFG1SET); |
587 | __raw_writel(0xffffffff, icdev->base + IC_CFG0CLR); | 609 | __raw_writel(d[2], base + IC_CFG2SET); |
588 | __raw_writel(0xffffffff, icdev->base + IC_CFG1CLR); | 610 | __raw_writel(d[3], base + IC_SRCSET); |
589 | __raw_writel(0xffffffff, icdev->base + IC_CFG2CLR); | 611 | __raw_writel(d[4], base + IC_ASSIGNSET); |
590 | __raw_writel(0xffffffff, icdev->base + IC_SRCCLR); | 612 | __raw_writel(d[5], base + IC_WAKESET); |
591 | __raw_writel(0xffffffff, icdev->base + IC_ASSIGNCLR); | ||
592 | __raw_writel(0xffffffff, icdev->base + IC_WAKECLR); | ||
593 | __raw_writel(0xffffffff, icdev->base + IC_RISINGCLR); | ||
594 | __raw_writel(0xffffffff, icdev->base + IC_FALLINGCLR); | ||
595 | __raw_writel(0x00000000, icdev->base + IC_TESTBIT); | ||
596 | wmb(); | ||
597 | __raw_writel(icdev->pmdata[0], icdev->base + IC_CFG0SET); | ||
598 | __raw_writel(icdev->pmdata[1], icdev->base + IC_CFG1SET); | ||
599 | __raw_writel(icdev->pmdata[2], icdev->base + IC_CFG2SET); | ||
600 | __raw_writel(icdev->pmdata[3], icdev->base + IC_SRCSET); | ||
601 | __raw_writel(icdev->pmdata[4], icdev->base + IC_ASSIGNSET); | ||
602 | __raw_writel(icdev->pmdata[5], icdev->base + IC_WAKESET); | ||
603 | wmb(); | 613 | wmb(); |
604 | 614 | ||
605 | __raw_writel(icdev->pmdata[6], icdev->base + IC_MASKSET); | 615 | __raw_writel(d[6], base + IC_MASKSET); |
606 | wmb(); | 616 | wmb(); |
617 | } | ||
607 | 618 | ||
619 | static int alchemy_ic_suspend(void) | ||
620 | { | ||
621 | alchemy_ic_suspend_one((void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR), | ||
622 | alchemy_ic_pmdata); | ||
623 | alchemy_ic_suspend_one((void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR), | ||
624 | &alchemy_ic_pmdata[7]); | ||
608 | return 0; | 625 | return 0; |
609 | } | 626 | } |
610 | 627 | ||
611 | static struct sysdev_class alchemy_ic_sysdev_class = { | 628 | static void alchemy_ic_resume(void) |
612 | .name = "ic", | 629 | { |
630 | alchemy_ic_resume_one((void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR), | ||
631 | &alchemy_ic_pmdata[7]); | ||
632 | alchemy_ic_resume_one((void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR), | ||
633 | alchemy_ic_pmdata); | ||
634 | } | ||
635 | |||
636 | static struct syscore_ops alchemy_ic_syscore_ops = { | ||
613 | .suspend = alchemy_ic_suspend, | 637 | .suspend = alchemy_ic_suspend, |
614 | .resume = alchemy_ic_resume, | 638 | .resume = alchemy_ic_resume, |
615 | }; | 639 | }; |
616 | 640 | ||
617 | static int __init alchemy_ic_sysdev_init(void) | 641 | static int __init alchemy_ic_pm_init(void) |
618 | { | 642 | { |
619 | struct alchemy_ic_sysdev *icdev; | 643 | register_syscore_ops(&alchemy_ic_syscore_ops); |
620 | unsigned long icbase[2] = { IC0_PHYS_ADDR, IC1_PHYS_ADDR }; | ||
621 | int err, i; | ||
622 | |||
623 | err = sysdev_class_register(&alchemy_ic_sysdev_class); | ||
624 | if (err) | ||
625 | return err; | ||
626 | |||
627 | for (i = 0; i < 2; i++) { | ||
628 | icdev = kzalloc(sizeof(struct alchemy_ic_sysdev), GFP_KERNEL); | ||
629 | if (!icdev) | ||
630 | return -ENOMEM; | ||
631 | |||
632 | icdev->base = ioremap(icbase[i], 0x1000); | ||
633 | |||
634 | icdev->sysdev.id = i; | ||
635 | icdev->sysdev.cls = &alchemy_ic_sysdev_class; | ||
636 | err = sysdev_register(&icdev->sysdev); | ||
637 | if (err) { | ||
638 | kfree(icdev); | ||
639 | return err; | ||
640 | } | ||
641 | } | ||
642 | |||
643 | return 0; | 644 | return 0; |
644 | } | 645 | } |
645 | device_initcall(alchemy_ic_sysdev_init); | 646 | device_initcall(alchemy_ic_pm_init); |
diff --git a/arch/mips/alchemy/common/platform.c b/arch/mips/alchemy/common/platform.c index 9e7814db3d03..3b2c18b14341 100644 --- a/arch/mips/alchemy/common/platform.c +++ b/arch/mips/alchemy/common/platform.c | |||
@@ -13,9 +13,10 @@ | |||
13 | 13 | ||
14 | #include <linux/dma-mapping.h> | 14 | #include <linux/dma-mapping.h> |
15 | #include <linux/etherdevice.h> | 15 | #include <linux/etherdevice.h> |
16 | #include <linux/init.h> | ||
16 | #include <linux/platform_device.h> | 17 | #include <linux/platform_device.h> |
17 | #include <linux/serial_8250.h> | 18 | #include <linux/serial_8250.h> |
18 | #include <linux/init.h> | 19 | #include <linux/slab.h> |
19 | 20 | ||
20 | #include <asm/mach-au1x00/au1xxx.h> | 21 | #include <asm/mach-au1x00/au1xxx.h> |
21 | #include <asm/mach-au1x00/au1xxx_dbdma.h> | 22 | #include <asm/mach-au1x00/au1xxx_dbdma.h> |
@@ -30,21 +31,12 @@ static void alchemy_8250_pm(struct uart_port *port, unsigned int state, | |||
30 | #ifdef CONFIG_SERIAL_8250 | 31 | #ifdef CONFIG_SERIAL_8250 |
31 | switch (state) { | 32 | switch (state) { |
32 | case 0: | 33 | case 0: |
33 | if ((__raw_readl(port->membase + UART_MOD_CNTRL) & 3) != 3) { | 34 | alchemy_uart_enable(CPHYSADDR(port->membase)); |
34 | /* power-on sequence as suggested in the databooks */ | ||
35 | __raw_writel(0, port->membase + UART_MOD_CNTRL); | ||
36 | wmb(); | ||
37 | __raw_writel(1, port->membase + UART_MOD_CNTRL); | ||
38 | wmb(); | ||
39 | } | ||
40 | __raw_writel(3, port->membase + UART_MOD_CNTRL); /* full on */ | ||
41 | wmb(); | ||
42 | serial8250_do_pm(port, state, old_state); | 35 | serial8250_do_pm(port, state, old_state); |
43 | break; | 36 | break; |
44 | case 3: /* power off */ | 37 | case 3: /* power off */ |
45 | serial8250_do_pm(port, state, old_state); | 38 | serial8250_do_pm(port, state, old_state); |
46 | __raw_writel(0, port->membase + UART_MOD_CNTRL); | 39 | alchemy_uart_disable(CPHYSADDR(port->membase)); |
47 | wmb(); | ||
48 | break; | 40 | break; |
49 | default: | 41 | default: |
50 | serial8250_do_pm(port, state, old_state); | 42 | serial8250_do_pm(port, state, old_state); |
@@ -65,38 +57,60 @@ static void alchemy_8250_pm(struct uart_port *port, unsigned int state, | |||
65 | .pm = alchemy_8250_pm, \ | 57 | .pm = alchemy_8250_pm, \ |
66 | } | 58 | } |
67 | 59 | ||
68 | static struct plat_serial8250_port au1x00_uart_data[] = { | 60 | static struct plat_serial8250_port au1x00_uart_data[][4] __initdata = { |
69 | #if defined(CONFIG_SOC_AU1000) | 61 | [ALCHEMY_CPU_AU1000] = { |
70 | PORT(UART0_PHYS_ADDR, AU1000_UART0_INT), | 62 | PORT(AU1000_UART0_PHYS_ADDR, AU1000_UART0_INT), |
71 | PORT(UART1_PHYS_ADDR, AU1000_UART1_INT), | 63 | PORT(AU1000_UART1_PHYS_ADDR, AU1000_UART1_INT), |
72 | PORT(UART2_PHYS_ADDR, AU1000_UART2_INT), | 64 | PORT(AU1000_UART2_PHYS_ADDR, AU1000_UART2_INT), |
73 | PORT(UART3_PHYS_ADDR, AU1000_UART3_INT), | 65 | PORT(AU1000_UART3_PHYS_ADDR, AU1000_UART3_INT), |
74 | #elif defined(CONFIG_SOC_AU1500) | 66 | }, |
75 | PORT(UART0_PHYS_ADDR, AU1500_UART0_INT), | 67 | [ALCHEMY_CPU_AU1500] = { |
76 | PORT(UART3_PHYS_ADDR, AU1500_UART3_INT), | 68 | PORT(AU1000_UART0_PHYS_ADDR, AU1500_UART0_INT), |
77 | #elif defined(CONFIG_SOC_AU1100) | 69 | PORT(AU1000_UART3_PHYS_ADDR, AU1500_UART3_INT), |
78 | PORT(UART0_PHYS_ADDR, AU1100_UART0_INT), | 70 | }, |
79 | PORT(UART1_PHYS_ADDR, AU1100_UART1_INT), | 71 | [ALCHEMY_CPU_AU1100] = { |
80 | PORT(UART3_PHYS_ADDR, AU1100_UART3_INT), | 72 | PORT(AU1000_UART0_PHYS_ADDR, AU1100_UART0_INT), |
81 | #elif defined(CONFIG_SOC_AU1550) | 73 | PORT(AU1000_UART1_PHYS_ADDR, AU1100_UART1_INT), |
82 | PORT(UART0_PHYS_ADDR, AU1550_UART0_INT), | 74 | PORT(AU1000_UART3_PHYS_ADDR, AU1100_UART3_INT), |
83 | PORT(UART1_PHYS_ADDR, AU1550_UART1_INT), | 75 | }, |
84 | PORT(UART3_PHYS_ADDR, AU1550_UART3_INT), | 76 | [ALCHEMY_CPU_AU1550] = { |
85 | #elif defined(CONFIG_SOC_AU1200) | 77 | PORT(AU1000_UART0_PHYS_ADDR, AU1550_UART0_INT), |
86 | PORT(UART0_PHYS_ADDR, AU1200_UART0_INT), | 78 | PORT(AU1000_UART1_PHYS_ADDR, AU1550_UART1_INT), |
87 | PORT(UART1_PHYS_ADDR, AU1200_UART1_INT), | 79 | PORT(AU1000_UART3_PHYS_ADDR, AU1550_UART3_INT), |
88 | #endif | 80 | }, |
89 | { }, | 81 | [ALCHEMY_CPU_AU1200] = { |
82 | PORT(AU1000_UART0_PHYS_ADDR, AU1200_UART0_INT), | ||
83 | PORT(AU1000_UART1_PHYS_ADDR, AU1200_UART1_INT), | ||
84 | }, | ||
90 | }; | 85 | }; |
91 | 86 | ||
92 | static struct platform_device au1xx0_uart_device = { | 87 | static struct platform_device au1xx0_uart_device = { |
93 | .name = "serial8250", | 88 | .name = "serial8250", |
94 | .id = PLAT8250_DEV_AU1X00, | 89 | .id = PLAT8250_DEV_AU1X00, |
95 | .dev = { | ||
96 | .platform_data = au1x00_uart_data, | ||
97 | }, | ||
98 | }; | 90 | }; |
99 | 91 | ||
92 | static void __init alchemy_setup_uarts(int ctype) | ||
93 | { | ||
94 | unsigned int uartclk = get_au1x00_uart_baud_base() * 16; | ||
95 | int s = sizeof(struct plat_serial8250_port); | ||
96 | int c = alchemy_get_uarts(ctype); | ||
97 | struct plat_serial8250_port *ports; | ||
98 | |||
99 | ports = kzalloc(s * (c + 1), GFP_KERNEL); | ||
100 | if (!ports) { | ||
101 | printk(KERN_INFO "Alchemy: no memory for UART data\n"); | ||
102 | return; | ||
103 | } | ||
104 | memcpy(ports, au1x00_uart_data[ctype], s * c); | ||
105 | au1xx0_uart_device.dev.platform_data = ports; | ||
106 | |||
107 | /* Fill up uartclk. */ | ||
108 | for (s = 0; s < c; s++) | ||
109 | ports[s].uartclk = uartclk; | ||
110 | if (platform_device_register(&au1xx0_uart_device)) | ||
111 | printk(KERN_INFO "Alchemy: failed to register UARTs\n"); | ||
112 | } | ||
113 | |||
100 | /* OHCI (USB full speed host controller) */ | 114 | /* OHCI (USB full speed host controller) */ |
101 | static struct resource au1xxx_usb_ohci_resources[] = { | 115 | static struct resource au1xxx_usb_ohci_resources[] = { |
102 | [0] = { | 116 | [0] = { |
@@ -269,8 +283,8 @@ extern struct au1xmmc_platform_data au1xmmc_platdata[2]; | |||
269 | 283 | ||
270 | static struct resource au1200_mmc0_resources[] = { | 284 | static struct resource au1200_mmc0_resources[] = { |
271 | [0] = { | 285 | [0] = { |
272 | .start = SD0_PHYS_ADDR, | 286 | .start = AU1100_SD0_PHYS_ADDR, |
273 | .end = SD0_PHYS_ADDR + 0x7ffff, | 287 | .end = AU1100_SD0_PHYS_ADDR + 0xfff, |
274 | .flags = IORESOURCE_MEM, | 288 | .flags = IORESOURCE_MEM, |
275 | }, | 289 | }, |
276 | [1] = { | 290 | [1] = { |
@@ -305,8 +319,8 @@ static struct platform_device au1200_mmc0_device = { | |||
305 | #ifndef CONFIG_MIPS_DB1200 | 319 | #ifndef CONFIG_MIPS_DB1200 |
306 | static struct resource au1200_mmc1_resources[] = { | 320 | static struct resource au1200_mmc1_resources[] = { |
307 | [0] = { | 321 | [0] = { |
308 | .start = SD1_PHYS_ADDR, | 322 | .start = AU1100_SD1_PHYS_ADDR, |
309 | .end = SD1_PHYS_ADDR + 0x7ffff, | 323 | .end = AU1100_SD1_PHYS_ADDR + 0xfff, |
310 | .flags = IORESOURCE_MEM, | 324 | .flags = IORESOURCE_MEM, |
311 | }, | 325 | }, |
312 | [1] = { | 326 | [1] = { |
@@ -359,15 +373,16 @@ static struct platform_device pbdb_smbus_device = { | |||
359 | #endif | 373 | #endif |
360 | 374 | ||
361 | /* Macro to help defining the Ethernet MAC resources */ | 375 | /* Macro to help defining the Ethernet MAC resources */ |
376 | #define MAC_RES_COUNT 3 /* MAC regs base, MAC enable reg, MAC INT */ | ||
362 | #define MAC_RES(_base, _enable, _irq) \ | 377 | #define MAC_RES(_base, _enable, _irq) \ |
363 | { \ | 378 | { \ |
364 | .start = CPHYSADDR(_base), \ | 379 | .start = _base, \ |
365 | .end = CPHYSADDR(_base + 0xffff), \ | 380 | .end = _base + 0xffff, \ |
366 | .flags = IORESOURCE_MEM, \ | 381 | .flags = IORESOURCE_MEM, \ |
367 | }, \ | 382 | }, \ |
368 | { \ | 383 | { \ |
369 | .start = CPHYSADDR(_enable), \ | 384 | .start = _enable, \ |
370 | .end = CPHYSADDR(_enable + 0x3), \ | 385 | .end = _enable + 0x3, \ |
371 | .flags = IORESOURCE_MEM, \ | 386 | .flags = IORESOURCE_MEM, \ |
372 | }, \ | 387 | }, \ |
373 | { \ | 388 | { \ |
@@ -376,19 +391,29 @@ static struct platform_device pbdb_smbus_device = { | |||
376 | .flags = IORESOURCE_IRQ \ | 391 | .flags = IORESOURCE_IRQ \ |
377 | } | 392 | } |
378 | 393 | ||
379 | static struct resource au1xxx_eth0_resources[] = { | 394 | static struct resource au1xxx_eth0_resources[][MAC_RES_COUNT] __initdata = { |
380 | #if defined(CONFIG_SOC_AU1000) | 395 | [ALCHEMY_CPU_AU1000] = { |
381 | MAC_RES(AU1000_ETH0_BASE, AU1000_MAC0_ENABLE, AU1000_MAC0_DMA_INT), | 396 | MAC_RES(AU1000_MAC0_PHYS_ADDR, |
382 | #elif defined(CONFIG_SOC_AU1100) | 397 | AU1000_MACEN_PHYS_ADDR, |
383 | MAC_RES(AU1100_ETH0_BASE, AU1100_MAC0_ENABLE, AU1100_MAC0_DMA_INT), | 398 | AU1000_MAC0_DMA_INT) |
384 | #elif defined(CONFIG_SOC_AU1550) | 399 | }, |
385 | MAC_RES(AU1550_ETH0_BASE, AU1550_MAC0_ENABLE, AU1550_MAC0_DMA_INT), | 400 | [ALCHEMY_CPU_AU1500] = { |
386 | #elif defined(CONFIG_SOC_AU1500) | 401 | MAC_RES(AU1500_MAC0_PHYS_ADDR, |
387 | MAC_RES(AU1500_ETH0_BASE, AU1500_MAC0_ENABLE, AU1500_MAC0_DMA_INT), | 402 | AU1500_MACEN_PHYS_ADDR, |
388 | #endif | 403 | AU1500_MAC0_DMA_INT) |
404 | }, | ||
405 | [ALCHEMY_CPU_AU1100] = { | ||
406 | MAC_RES(AU1000_MAC0_PHYS_ADDR, | ||
407 | AU1000_MACEN_PHYS_ADDR, | ||
408 | AU1100_MAC0_DMA_INT) | ||
409 | }, | ||
410 | [ALCHEMY_CPU_AU1550] = { | ||
411 | MAC_RES(AU1000_MAC0_PHYS_ADDR, | ||
412 | AU1000_MACEN_PHYS_ADDR, | ||
413 | AU1550_MAC0_DMA_INT) | ||
414 | }, | ||
389 | }; | 415 | }; |
390 | 416 | ||
391 | |||
392 | static struct au1000_eth_platform_data au1xxx_eth0_platform_data = { | 417 | static struct au1000_eth_platform_data au1xxx_eth0_platform_data = { |
393 | .phy1_search_mac0 = 1, | 418 | .phy1_search_mac0 = 1, |
394 | }; | 419 | }; |
@@ -396,20 +421,26 @@ static struct au1000_eth_platform_data au1xxx_eth0_platform_data = { | |||
396 | static struct platform_device au1xxx_eth0_device = { | 421 | static struct platform_device au1xxx_eth0_device = { |
397 | .name = "au1000-eth", | 422 | .name = "au1000-eth", |
398 | .id = 0, | 423 | .id = 0, |
399 | .num_resources = ARRAY_SIZE(au1xxx_eth0_resources), | 424 | .num_resources = MAC_RES_COUNT, |
400 | .resource = au1xxx_eth0_resources, | ||
401 | .dev.platform_data = &au1xxx_eth0_platform_data, | 425 | .dev.platform_data = &au1xxx_eth0_platform_data, |
402 | }; | 426 | }; |
403 | 427 | ||
404 | #ifndef CONFIG_SOC_AU1100 | 428 | static struct resource au1xxx_eth1_resources[][MAC_RES_COUNT] __initdata = { |
405 | static struct resource au1xxx_eth1_resources[] = { | 429 | [ALCHEMY_CPU_AU1000] = { |
406 | #if defined(CONFIG_SOC_AU1000) | 430 | MAC_RES(AU1000_MAC1_PHYS_ADDR, |
407 | MAC_RES(AU1000_ETH1_BASE, AU1000_MAC1_ENABLE, AU1000_MAC1_DMA_INT), | 431 | AU1000_MACEN_PHYS_ADDR + 4, |
408 | #elif defined(CONFIG_SOC_AU1550) | 432 | AU1000_MAC1_DMA_INT) |
409 | MAC_RES(AU1550_ETH1_BASE, AU1550_MAC1_ENABLE, AU1550_MAC1_DMA_INT), | 433 | }, |
410 | #elif defined(CONFIG_SOC_AU1500) | 434 | [ALCHEMY_CPU_AU1500] = { |
411 | MAC_RES(AU1500_ETH1_BASE, AU1500_MAC1_ENABLE, AU1500_MAC1_DMA_INT), | 435 | MAC_RES(AU1500_MAC1_PHYS_ADDR, |
412 | #endif | 436 | AU1500_MACEN_PHYS_ADDR + 4, |
437 | AU1500_MAC1_DMA_INT) | ||
438 | }, | ||
439 | [ALCHEMY_CPU_AU1550] = { | ||
440 | MAC_RES(AU1000_MAC1_PHYS_ADDR, | ||
441 | AU1000_MACEN_PHYS_ADDR + 4, | ||
442 | AU1550_MAC1_DMA_INT) | ||
443 | }, | ||
413 | }; | 444 | }; |
414 | 445 | ||
415 | static struct au1000_eth_platform_data au1xxx_eth1_platform_data = { | 446 | static struct au1000_eth_platform_data au1xxx_eth1_platform_data = { |
@@ -419,11 +450,9 @@ static struct au1000_eth_platform_data au1xxx_eth1_platform_data = { | |||
419 | static struct platform_device au1xxx_eth1_device = { | 450 | static struct platform_device au1xxx_eth1_device = { |
420 | .name = "au1000-eth", | 451 | .name = "au1000-eth", |
421 | .id = 1, | 452 | .id = 1, |
422 | .num_resources = ARRAY_SIZE(au1xxx_eth1_resources), | 453 | .num_resources = MAC_RES_COUNT, |
423 | .resource = au1xxx_eth1_resources, | ||
424 | .dev.platform_data = &au1xxx_eth1_platform_data, | 454 | .dev.platform_data = &au1xxx_eth1_platform_data, |
425 | }; | 455 | }; |
426 | #endif | ||
427 | 456 | ||
428 | void __init au1xxx_override_eth_cfg(unsigned int port, | 457 | void __init au1xxx_override_eth_cfg(unsigned int port, |
429 | struct au1000_eth_platform_data *eth_data) | 458 | struct au1000_eth_platform_data *eth_data) |
@@ -434,15 +463,65 @@ void __init au1xxx_override_eth_cfg(unsigned int port, | |||
434 | if (port == 0) | 463 | if (port == 0) |
435 | memcpy(&au1xxx_eth0_platform_data, eth_data, | 464 | memcpy(&au1xxx_eth0_platform_data, eth_data, |
436 | sizeof(struct au1000_eth_platform_data)); | 465 | sizeof(struct au1000_eth_platform_data)); |
437 | #ifndef CONFIG_SOC_AU1100 | ||
438 | else | 466 | else |
439 | memcpy(&au1xxx_eth1_platform_data, eth_data, | 467 | memcpy(&au1xxx_eth1_platform_data, eth_data, |
440 | sizeof(struct au1000_eth_platform_data)); | 468 | sizeof(struct au1000_eth_platform_data)); |
441 | #endif | 469 | } |
470 | |||
471 | static void __init alchemy_setup_macs(int ctype) | ||
472 | { | ||
473 | int ret, i; | ||
474 | unsigned char ethaddr[6]; | ||
475 | struct resource *macres; | ||
476 | |||
477 | /* Handle 1st MAC */ | ||
478 | if (alchemy_get_macs(ctype) < 1) | ||
479 | return; | ||
480 | |||
481 | macres = kmalloc(sizeof(struct resource) * MAC_RES_COUNT, GFP_KERNEL); | ||
482 | if (!macres) { | ||
483 | printk(KERN_INFO "Alchemy: no memory for MAC0 resources\n"); | ||
484 | return; | ||
485 | } | ||
486 | memcpy(macres, au1xxx_eth0_resources[ctype], | ||
487 | sizeof(struct resource) * MAC_RES_COUNT); | ||
488 | au1xxx_eth0_device.resource = macres; | ||
489 | |||
490 | i = prom_get_ethernet_addr(ethaddr); | ||
491 | if (!i && !is_valid_ether_addr(au1xxx_eth0_platform_data.mac)) | ||
492 | memcpy(au1xxx_eth0_platform_data.mac, ethaddr, 6); | ||
493 | |||
494 | ret = platform_device_register(&au1xxx_eth0_device); | ||
495 | if (!ret) | ||
496 | printk(KERN_INFO "Alchemy: failed to register MAC0\n"); | ||
497 | |||
498 | |||
499 | /* Handle 2nd MAC */ | ||
500 | if (alchemy_get_macs(ctype) < 2) | ||
501 | return; | ||
502 | |||
503 | macres = kmalloc(sizeof(struct resource) * MAC_RES_COUNT, GFP_KERNEL); | ||
504 | if (!macres) { | ||
505 | printk(KERN_INFO "Alchemy: no memory for MAC1 resources\n"); | ||
506 | return; | ||
507 | } | ||
508 | memcpy(macres, au1xxx_eth1_resources[ctype], | ||
509 | sizeof(struct resource) * MAC_RES_COUNT); | ||
510 | au1xxx_eth1_device.resource = macres; | ||
511 | |||
512 | ethaddr[5] += 1; /* next addr for 2nd MAC */ | ||
513 | if (!i && !is_valid_ether_addr(au1xxx_eth1_platform_data.mac)) | ||
514 | memcpy(au1xxx_eth1_platform_data.mac, ethaddr, 6); | ||
515 | |||
516 | /* Register second MAC if enabled in pinfunc */ | ||
517 | if (!(au_readl(SYS_PINFUNC) & (u32)SYS_PF_NI2)) { | ||
518 | ret = platform_device_register(&au1xxx_eth1_device); | ||
519 | if (ret) | ||
520 | printk(KERN_INFO "Alchemy: failed to register MAC1\n"); | ||
521 | } | ||
442 | } | 522 | } |
443 | 523 | ||
444 | static struct platform_device *au1xxx_platform_devices[] __initdata = { | 524 | static struct platform_device *au1xxx_platform_devices[] __initdata = { |
445 | &au1xx0_uart_device, | ||
446 | &au1xxx_usb_ohci_device, | 525 | &au1xxx_usb_ohci_device, |
447 | #ifdef CONFIG_FB_AU1100 | 526 | #ifdef CONFIG_FB_AU1100 |
448 | &au1100_lcd_device, | 527 | &au1100_lcd_device, |
@@ -460,36 +539,17 @@ static struct platform_device *au1xxx_platform_devices[] __initdata = { | |||
460 | #ifdef SMBUS_PSC_BASE | 539 | #ifdef SMBUS_PSC_BASE |
461 | &pbdb_smbus_device, | 540 | &pbdb_smbus_device, |
462 | #endif | 541 | #endif |
463 | &au1xxx_eth0_device, | ||
464 | }; | 542 | }; |
465 | 543 | ||
466 | static int __init au1xxx_platform_init(void) | 544 | static int __init au1xxx_platform_init(void) |
467 | { | 545 | { |
468 | unsigned int uartclk = get_au1x00_uart_baud_base() * 16; | 546 | int err, ctype = alchemy_get_cputype(); |
469 | int err, i; | ||
470 | unsigned char ethaddr[6]; | ||
471 | 547 | ||
472 | /* Fill up uartclk. */ | 548 | alchemy_setup_uarts(ctype); |
473 | for (i = 0; au1x00_uart_data[i].flags; i++) | 549 | alchemy_setup_macs(ctype); |
474 | au1x00_uart_data[i].uartclk = uartclk; | ||
475 | |||
476 | /* use firmware-provided mac addr if available and necessary */ | ||
477 | i = prom_get_ethernet_addr(ethaddr); | ||
478 | if (!i && !is_valid_ether_addr(au1xxx_eth0_platform_data.mac)) | ||
479 | memcpy(au1xxx_eth0_platform_data.mac, ethaddr, 6); | ||
480 | 550 | ||
481 | err = platform_add_devices(au1xxx_platform_devices, | 551 | err = platform_add_devices(au1xxx_platform_devices, |
482 | ARRAY_SIZE(au1xxx_platform_devices)); | 552 | ARRAY_SIZE(au1xxx_platform_devices)); |
483 | #ifndef CONFIG_SOC_AU1100 | ||
484 | ethaddr[5] += 1; /* next addr for 2nd MAC */ | ||
485 | if (!i && !is_valid_ether_addr(au1xxx_eth1_platform_data.mac)) | ||
486 | memcpy(au1xxx_eth1_platform_data.mac, ethaddr, 6); | ||
487 | |||
488 | /* Register second MAC if enabled in pinfunc */ | ||
489 | if (!err && !(au_readl(SYS_PINFUNC) & (u32)SYS_PF_NI2)) | ||
490 | err = platform_device_register(&au1xxx_eth1_device); | ||
491 | #endif | ||
492 | |||
493 | return err; | 553 | return err; |
494 | } | 554 | } |
495 | 555 | ||
diff --git a/arch/mips/alchemy/common/setup.c b/arch/mips/alchemy/common/setup.c index 561e5da2658b..1b887c868417 100644 --- a/arch/mips/alchemy/common/setup.c +++ b/arch/mips/alchemy/common/setup.c | |||
@@ -52,8 +52,6 @@ void __init plat_mem_setup(void) | |||
52 | /* this is faster than wasting cycles trying to approximate it */ | 52 | /* this is faster than wasting cycles trying to approximate it */ |
53 | preset_lpj = (est_freq >> 1) / HZ; | 53 | preset_lpj = (est_freq >> 1) / HZ; |
54 | 54 | ||
55 | board_setup(); /* board specific setup */ | ||
56 | |||
57 | if (au1xxx_cpu_needs_config_od()) | 55 | if (au1xxx_cpu_needs_config_od()) |
58 | /* Various early Au1xx0 errata corrected by this */ | 56 | /* Various early Au1xx0 errata corrected by this */ |
59 | set_c0_config(1 << 19); /* Set Config[OD] */ | 57 | set_c0_config(1 << 19); /* Set Config[OD] */ |
@@ -61,6 +59,8 @@ void __init plat_mem_setup(void) | |||
61 | /* Clear to obtain best system bus performance */ | 59 | /* Clear to obtain best system bus performance */ |
62 | clear_c0_config(1 << 19); /* Clear Config[OD] */ | 60 | clear_c0_config(1 << 19); /* Clear Config[OD] */ |
63 | 61 | ||
62 | board_setup(); /* board specific setup */ | ||
63 | |||
64 | /* IO/MEM resources. */ | 64 | /* IO/MEM resources. */ |
65 | set_io_port_base(0); | 65 | set_io_port_base(0); |
66 | ioport_resource.start = IOPORT_RESOURCE_START; | 66 | ioport_resource.start = IOPORT_RESOURCE_START; |
diff --git a/arch/mips/alchemy/devboards/db1200/setup.c b/arch/mips/alchemy/devboards/db1200/setup.c index 4a8980027ecf..1dac4f27d334 100644 --- a/arch/mips/alchemy/devboards/db1200/setup.c +++ b/arch/mips/alchemy/devboards/db1200/setup.c | |||
@@ -23,6 +23,13 @@ void __init board_setup(void) | |||
23 | unsigned long freq0, clksrc, div, pfc; | 23 | unsigned long freq0, clksrc, div, pfc; |
24 | unsigned short whoami; | 24 | unsigned short whoami; |
25 | 25 | ||
26 | /* Set Config[OD] (disable overlapping bus transaction): | ||
27 | * This gets rid of a _lot_ of spurious interrupts (especially | ||
28 | * wrt. IDE); but incurs ~10% performance hit in some | ||
29 | * cpu-bound applications. | ||
30 | */ | ||
31 | set_c0_config(1 << 19); | ||
32 | |||
26 | bcsr_init(DB1200_BCSR_PHYS_ADDR, | 33 | bcsr_init(DB1200_BCSR_PHYS_ADDR, |
27 | DB1200_BCSR_PHYS_ADDR + DB1200_BCSR_HEXLED_OFS); | 34 | DB1200_BCSR_PHYS_ADDR + DB1200_BCSR_HEXLED_OFS); |
28 | 35 | ||
diff --git a/arch/mips/alchemy/devboards/db1x00/board_setup.c b/arch/mips/alchemy/devboards/db1x00/board_setup.c index 05f120ff90f9..5c956fe8760f 100644 --- a/arch/mips/alchemy/devboards/db1x00/board_setup.c +++ b/arch/mips/alchemy/devboards/db1x00/board_setup.c | |||
@@ -127,13 +127,10 @@ const char *get_system_type(void) | |||
127 | void __init board_setup(void) | 127 | void __init board_setup(void) |
128 | { | 128 | { |
129 | unsigned long bcsr1, bcsr2; | 129 | unsigned long bcsr1, bcsr2; |
130 | u32 pin_func; | ||
131 | 130 | ||
132 | bcsr1 = DB1000_BCSR_PHYS_ADDR; | 131 | bcsr1 = DB1000_BCSR_PHYS_ADDR; |
133 | bcsr2 = DB1000_BCSR_PHYS_ADDR + DB1000_BCSR_HEXLED_OFS; | 132 | bcsr2 = DB1000_BCSR_PHYS_ADDR + DB1000_BCSR_HEXLED_OFS; |
134 | 133 | ||
135 | pin_func = 0; | ||
136 | |||
137 | #ifdef CONFIG_MIPS_DB1000 | 134 | #ifdef CONFIG_MIPS_DB1000 |
138 | printk(KERN_INFO "AMD Alchemy Au1000/Db1000 Board\n"); | 135 | printk(KERN_INFO "AMD Alchemy Au1000/Db1000 Board\n"); |
139 | #endif | 136 | #endif |
@@ -164,12 +161,16 @@ void __init board_setup(void) | |||
164 | /* Not valid for Au1550 */ | 161 | /* Not valid for Au1550 */ |
165 | #if defined(CONFIG_IRDA) && \ | 162 | #if defined(CONFIG_IRDA) && \ |
166 | (defined(CONFIG_SOC_AU1000) || defined(CONFIG_SOC_AU1100)) | 163 | (defined(CONFIG_SOC_AU1000) || defined(CONFIG_SOC_AU1100)) |
167 | /* Set IRFIRSEL instead of GPIO15 */ | 164 | { |
168 | pin_func = au_readl(SYS_PINFUNC) | SYS_PF_IRF; | 165 | u32 pin_func; |
169 | au_writel(pin_func, SYS_PINFUNC); | 166 | |
170 | /* Power off until the driver is in use */ | 167 | /* Set IRFIRSEL instead of GPIO15 */ |
171 | bcsr_mod(BCSR_RESETS, BCSR_RESETS_IRDA_MODE_MASK, | 168 | pin_func = au_readl(SYS_PINFUNC) | SYS_PF_IRF; |
172 | BCSR_RESETS_IRDA_MODE_OFF); | 169 | au_writel(pin_func, SYS_PINFUNC); |
170 | /* Power off until the driver is in use */ | ||
171 | bcsr_mod(BCSR_RESETS, BCSR_RESETS_IRDA_MODE_MASK, | ||
172 | BCSR_RESETS_IRDA_MODE_OFF); | ||
173 | } | ||
173 | #endif | 174 | #endif |
174 | bcsr_write(BCSR_PCMCIA, 0); /* turn off PCMCIA power */ | 175 | bcsr_write(BCSR_PCMCIA, 0); /* turn off PCMCIA power */ |
175 | 176 | ||
@@ -177,31 +178,35 @@ void __init board_setup(void) | |||
177 | alchemy_gpio1_input_enable(); | 178 | alchemy_gpio1_input_enable(); |
178 | 179 | ||
179 | #ifdef CONFIG_MIPS_MIRAGE | 180 | #ifdef CONFIG_MIPS_MIRAGE |
180 | /* GPIO[20] is output */ | 181 | { |
181 | alchemy_gpio_direction_output(20, 0); | 182 | u32 pin_func; |
182 | 183 | ||
183 | /* Set GPIO[210:208] instead of SSI_0 */ | 184 | /* GPIO[20] is output */ |
184 | pin_func = au_readl(SYS_PINFUNC) | SYS_PF_S0; | 185 | alchemy_gpio_direction_output(20, 0); |
185 | 186 | ||
186 | /* Set GPIO[215:211] for LEDs */ | 187 | /* Set GPIO[210:208] instead of SSI_0 */ |
187 | pin_func |= 5 << 2; | 188 | pin_func = au_readl(SYS_PINFUNC) | SYS_PF_S0; |
188 | 189 | ||
189 | /* Set GPIO[214:213] for more LEDs */ | 190 | /* Set GPIO[215:211] for LEDs */ |
190 | pin_func |= 5 << 12; | 191 | pin_func |= 5 << 2; |
191 | 192 | ||
192 | /* Set GPIO[207:200] instead of PCMCIA/LCD */ | 193 | /* Set GPIO[214:213] for more LEDs */ |
193 | pin_func |= SYS_PF_LCD | SYS_PF_PC; | 194 | pin_func |= 5 << 12; |
194 | au_writel(pin_func, SYS_PINFUNC); | ||
195 | 195 | ||
196 | /* | 196 | /* Set GPIO[207:200] instead of PCMCIA/LCD */ |
197 | * Enable speaker amplifier. This should | 197 | pin_func |= SYS_PF_LCD | SYS_PF_PC; |
198 | * be part of the audio driver. | 198 | au_writel(pin_func, SYS_PINFUNC); |
199 | */ | ||
200 | alchemy_gpio_direction_output(209, 1); | ||
201 | 199 | ||
202 | pm_power_off = mirage_power_off; | 200 | /* |
203 | _machine_halt = mirage_power_off; | 201 | * Enable speaker amplifier. This should |
204 | _machine_restart = (void(*)(char *))mips_softreset; | 202 | * be part of the audio driver. |
203 | */ | ||
204 | alchemy_gpio_direction_output(209, 1); | ||
205 | |||
206 | pm_power_off = mirage_power_off; | ||
207 | _machine_halt = mirage_power_off; | ||
208 | _machine_restart = (void(*)(char *))mips_softreset; | ||
209 | } | ||
205 | #endif | 210 | #endif |
206 | 211 | ||
207 | #ifdef CONFIG_MIPS_BOSPORUS | 212 | #ifdef CONFIG_MIPS_BOSPORUS |
diff --git a/arch/mips/alchemy/devboards/pb1000/board_setup.c b/arch/mips/alchemy/devboards/pb1000/board_setup.c index 2d85c4b5be09..e64fdcbf75d0 100644 --- a/arch/mips/alchemy/devboards/pb1000/board_setup.c +++ b/arch/mips/alchemy/devboards/pb1000/board_setup.c | |||
@@ -65,7 +65,7 @@ void __init board_setup(void) | |||
65 | 65 | ||
66 | /* Set AUX clock to 12 MHz * 8 = 96 MHz */ | 66 | /* Set AUX clock to 12 MHz * 8 = 96 MHz */ |
67 | au_writel(8, SYS_AUXPLL); | 67 | au_writel(8, SYS_AUXPLL); |
68 | au_writel(0, SYS_PINSTATERD); | 68 | alchemy_gpio1_input_enable(); |
69 | udelay(100); | 69 | udelay(100); |
70 | 70 | ||
71 | #if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE) | 71 | #if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE) |
diff --git a/arch/mips/alchemy/devboards/pb1500/board_setup.c b/arch/mips/alchemy/devboards/pb1500/board_setup.c index 83f46215eb0c..3b4fa3206969 100644 --- a/arch/mips/alchemy/devboards/pb1500/board_setup.c +++ b/arch/mips/alchemy/devboards/pb1500/board_setup.c | |||
@@ -56,7 +56,7 @@ void __init board_setup(void) | |||
56 | sys_clksrc = sys_freqctrl = pin_func = 0; | 56 | sys_clksrc = sys_freqctrl = pin_func = 0; |
57 | /* Set AUX clock to 12 MHz * 8 = 96 MHz */ | 57 | /* Set AUX clock to 12 MHz * 8 = 96 MHz */ |
58 | au_writel(8, SYS_AUXPLL); | 58 | au_writel(8, SYS_AUXPLL); |
59 | au_writel(0, SYS_PINSTATERD); | 59 | alchemy_gpio1_input_enable(); |
60 | udelay(100); | 60 | udelay(100); |
61 | 61 | ||
62 | /* GPIO201 is input for PCMCIA card detect */ | 62 | /* GPIO201 is input for PCMCIA card detect */ |
diff --git a/arch/mips/alchemy/devboards/prom.c b/arch/mips/alchemy/devboards/prom.c index baeb21385058..e5306b56da6d 100644 --- a/arch/mips/alchemy/devboards/prom.c +++ b/arch/mips/alchemy/devboards/prom.c | |||
@@ -62,5 +62,5 @@ void __init prom_init(void) | |||
62 | 62 | ||
63 | void prom_putchar(unsigned char c) | 63 | void prom_putchar(unsigned char c) |
64 | { | 64 | { |
65 | alchemy_uart_putchar(UART0_PHYS_ADDR, c); | 65 | alchemy_uart_putchar(AU1000_UART0_PHYS_ADDR, c); |
66 | } | 66 | } |
diff --git a/arch/mips/alchemy/gpr/board_setup.c b/arch/mips/alchemy/gpr/board_setup.c index ad2e3f137933..5f8f0691ed2d 100644 --- a/arch/mips/alchemy/gpr/board_setup.c +++ b/arch/mips/alchemy/gpr/board_setup.c | |||
@@ -36,9 +36,6 @@ | |||
36 | 36 | ||
37 | #include <prom.h> | 37 | #include <prom.h> |
38 | 38 | ||
39 | #define UART1_ADDR KSEG1ADDR(UART1_PHYS_ADDR) | ||
40 | #define UART3_ADDR KSEG1ADDR(UART3_PHYS_ADDR) | ||
41 | |||
42 | char irq_tab_alchemy[][5] __initdata = { | 39 | char irq_tab_alchemy[][5] __initdata = { |
43 | [0] = { -1, AU1500_PCI_INTA, AU1500_PCI_INTB, 0xff, 0xff }, | 40 | [0] = { -1, AU1500_PCI_INTA, AU1500_PCI_INTB, 0xff, 0xff }, |
44 | }; | 41 | }; |
@@ -67,18 +64,15 @@ static void gpr_power_off(void) | |||
67 | 64 | ||
68 | void __init board_setup(void) | 65 | void __init board_setup(void) |
69 | { | 66 | { |
70 | printk(KERN_INFO "Tarpeze ITS GPR board\n"); | 67 | printk(KERN_INFO "Trapeze ITS GPR board\n"); |
71 | 68 | ||
72 | pm_power_off = gpr_power_off; | 69 | pm_power_off = gpr_power_off; |
73 | _machine_halt = gpr_power_off; | 70 | _machine_halt = gpr_power_off; |
74 | _machine_restart = gpr_reset; | 71 | _machine_restart = gpr_reset; |
75 | 72 | ||
76 | /* Enable UART3 */ | 73 | /* Enable UART1/3 */ |
77 | au_writel(0x1, UART3_ADDR + UART_MOD_CNTRL);/* clock enable (CE) */ | 74 | alchemy_uart_enable(AU1000_UART3_PHYS_ADDR); |
78 | au_writel(0x3, UART3_ADDR + UART_MOD_CNTRL); /* CE and "enable" */ | 75 | alchemy_uart_enable(AU1000_UART1_PHYS_ADDR); |
79 | /* Enable UART1 */ | ||
80 | au_writel(0x1, UART1_ADDR + UART_MOD_CNTRL); /* clock enable (CE) */ | ||
81 | au_writel(0x3, UART1_ADDR + UART_MOD_CNTRL); /* CE and "enable" */ | ||
82 | 76 | ||
83 | /* Take away Reset of UMTS-card */ | 77 | /* Take away Reset of UMTS-card */ |
84 | alchemy_gpio_direction_output(215, 1); | 78 | alchemy_gpio_direction_output(215, 1); |
diff --git a/arch/mips/alchemy/gpr/init.c b/arch/mips/alchemy/gpr/init.c index f044f4c541d7..229aafae680c 100644 --- a/arch/mips/alchemy/gpr/init.c +++ b/arch/mips/alchemy/gpr/init.c | |||
@@ -59,5 +59,5 @@ void __init prom_init(void) | |||
59 | 59 | ||
60 | void prom_putchar(unsigned char c) | 60 | void prom_putchar(unsigned char c) |
61 | { | 61 | { |
62 | alchemy_uart_putchar(UART0_PHYS_ADDR, c); | 62 | alchemy_uart_putchar(AU1000_UART0_PHYS_ADDR, c); |
63 | } | 63 | } |
diff --git a/arch/mips/alchemy/mtx-1/board_setup.c b/arch/mips/alchemy/mtx-1/board_setup.c index cf436ab679ae..3ae984cf98cf 100644 --- a/arch/mips/alchemy/mtx-1/board_setup.c +++ b/arch/mips/alchemy/mtx-1/board_setup.c | |||
@@ -87,7 +87,7 @@ void __init board_setup(void) | |||
87 | au_writel(SYS_PF_NI2, SYS_PINFUNC); | 87 | au_writel(SYS_PF_NI2, SYS_PINFUNC); |
88 | 88 | ||
89 | /* Initialize GPIO */ | 89 | /* Initialize GPIO */ |
90 | au_writel(0xFFFFFFFF, SYS_TRIOUTCLR); | 90 | au_writel(~0, KSEG1ADDR(AU1000_SYS_PHYS_ADDR) + SYS_TRIOUTCLR); |
91 | alchemy_gpio_direction_output(0, 0); /* Disable M66EN (PCI 66MHz) */ | 91 | alchemy_gpio_direction_output(0, 0); /* Disable M66EN (PCI 66MHz) */ |
92 | alchemy_gpio_direction_output(3, 1); /* Disable PCI CLKRUN# */ | 92 | alchemy_gpio_direction_output(3, 1); /* Disable PCI CLKRUN# */ |
93 | alchemy_gpio_direction_output(1, 1); /* Enable EXT_IO3 */ | 93 | alchemy_gpio_direction_output(1, 1); /* Enable EXT_IO3 */ |
diff --git a/arch/mips/alchemy/mtx-1/init.c b/arch/mips/alchemy/mtx-1/init.c index f8d25575fa05..2e81cc7f3422 100644 --- a/arch/mips/alchemy/mtx-1/init.c +++ b/arch/mips/alchemy/mtx-1/init.c | |||
@@ -62,5 +62,5 @@ void __init prom_init(void) | |||
62 | 62 | ||
63 | void prom_putchar(unsigned char c) | 63 | void prom_putchar(unsigned char c) |
64 | { | 64 | { |
65 | alchemy_uart_putchar(UART0_PHYS_ADDR, c); | 65 | alchemy_uart_putchar(AU1000_UART0_PHYS_ADDR, c); |
66 | } | 66 | } |
diff --git a/arch/mips/alchemy/mtx-1/platform.c b/arch/mips/alchemy/mtx-1/platform.c index 956f946218c5..55628e390fd7 100644 --- a/arch/mips/alchemy/mtx-1/platform.c +++ b/arch/mips/alchemy/mtx-1/platform.c | |||
@@ -53,8 +53,8 @@ static struct platform_device mtx1_button = { | |||
53 | 53 | ||
54 | static struct resource mtx1_wdt_res[] = { | 54 | static struct resource mtx1_wdt_res[] = { |
55 | [0] = { | 55 | [0] = { |
56 | .start = 15, | 56 | .start = 215, |
57 | .end = 15, | 57 | .end = 215, |
58 | .name = "mtx1-wdt-gpio", | 58 | .name = "mtx1-wdt-gpio", |
59 | .flags = IORESOURCE_IRQ, | 59 | .flags = IORESOURCE_IRQ, |
60 | } | 60 | } |
diff --git a/arch/mips/alchemy/xxs1500/board_setup.c b/arch/mips/alchemy/xxs1500/board_setup.c index febfb0fb0896..81e57fad07ab 100644 --- a/arch/mips/alchemy/xxs1500/board_setup.c +++ b/arch/mips/alchemy/xxs1500/board_setup.c | |||
@@ -66,13 +66,10 @@ void __init board_setup(void) | |||
66 | au_writel(pin_func, SYS_PINFUNC); | 66 | au_writel(pin_func, SYS_PINFUNC); |
67 | 67 | ||
68 | /* Enable UART */ | 68 | /* Enable UART */ |
69 | au_writel(0x01, UART3_ADDR + UART_MOD_CNTRL); /* clock enable (CE) */ | 69 | alchemy_uart_enable(AU1000_UART3_PHYS_ADDR); |
70 | mdelay(10); | 70 | /* Enable DTR (MCR bit 0) = USB power up */ |
71 | au_writel(0x03, UART3_ADDR + UART_MOD_CNTRL); /* CE and "enable" */ | 71 | __raw_writel(1, (void __iomem *)KSEG1ADDR(AU1000_UART3_PHYS_ADDR + 0x18)); |
72 | mdelay(10); | 72 | wmb(); |
73 | |||
74 | /* Enable DTR = USB power up */ | ||
75 | au_writel(0x01, UART3_ADDR + UART_MCR); /* UART_MCR_DTR is 0x01??? */ | ||
76 | 73 | ||
77 | #ifdef CONFIG_PCI | 74 | #ifdef CONFIG_PCI |
78 | #if defined(__MIPSEB__) | 75 | #if defined(__MIPSEB__) |
diff --git a/arch/mips/alchemy/xxs1500/init.c b/arch/mips/alchemy/xxs1500/init.c index 15125c2fda7d..0ee02cfa989d 100644 --- a/arch/mips/alchemy/xxs1500/init.c +++ b/arch/mips/alchemy/xxs1500/init.c | |||
@@ -51,14 +51,13 @@ void __init prom_init(void) | |||
51 | prom_init_cmdline(); | 51 | prom_init_cmdline(); |
52 | 52 | ||
53 | memsize_str = prom_getenv("memsize"); | 53 | memsize_str = prom_getenv("memsize"); |
54 | if (!memsize_str) | 54 | if (!memsize_str || strict_strtoul(memsize_str, 0, &memsize)) |
55 | memsize = 0x04000000; | 55 | memsize = 0x04000000; |
56 | else | 56 | |
57 | strict_strtoul(memsize_str, 0, &memsize); | ||
58 | add_memory_region(0, memsize, BOOT_MEM_RAM); | 57 | add_memory_region(0, memsize, BOOT_MEM_RAM); |
59 | } | 58 | } |
60 | 59 | ||
61 | void prom_putchar(unsigned char c) | 60 | void prom_putchar(unsigned char c) |
62 | { | 61 | { |
63 | alchemy_uart_putchar(UART0_PHYS_ADDR, c); | 62 | alchemy_uart_putchar(AU1000_UART0_PHYS_ADDR, c); |
64 | } | 63 | } |
diff --git a/arch/mips/ar7/gpio.c b/arch/mips/ar7/gpio.c index 425dfa5d6e12..bb571bcdb8f2 100644 --- a/arch/mips/ar7/gpio.c +++ b/arch/mips/ar7/gpio.c | |||
@@ -325,9 +325,7 @@ int __init ar7_gpio_init(void) | |||
325 | size = 0x1f; | 325 | size = 0x1f; |
326 | } | 326 | } |
327 | 327 | ||
328 | gpch->regs = ioremap_nocache(AR7_REGS_GPIO, | 328 | gpch->regs = ioremap_nocache(AR7_REGS_GPIO, size); |
329 | AR7_REGS_GPIO + 0x10); | ||
330 | |||
331 | if (!gpch->regs) { | 329 | if (!gpch->regs) { |
332 | printk(KERN_ERR "%s: failed to ioremap regs\n", | 330 | printk(KERN_ERR "%s: failed to ioremap regs\n", |
333 | gpch->chip.label); | 331 | gpch->chip.label); |
diff --git a/arch/mips/bcm47xx/nvram.c b/arch/mips/bcm47xx/nvram.c index e5b6615731e5..54db815bc86c 100644 --- a/arch/mips/bcm47xx/nvram.c +++ b/arch/mips/bcm47xx/nvram.c | |||
@@ -3,6 +3,7 @@ | |||
3 | * | 3 | * |
4 | * Copyright (C) 2005 Broadcom Corporation | 4 | * Copyright (C) 2005 Broadcom Corporation |
5 | * Copyright (C) 2006 Felix Fietkau <nbd@openwrt.org> | 5 | * Copyright (C) 2006 Felix Fietkau <nbd@openwrt.org> |
6 | * Copyright (C) 2010-2011 Hauke Mehrtens <hauke@hauke-m.de> | ||
6 | * | 7 | * |
7 | * This program is free software; you can redistribute it and/or modify it | 8 | * This program is free software; you can redistribute it and/or modify it |
8 | * under the terms of the GNU General Public License as published by the | 9 | * under the terms of the GNU General Public License as published by the |
@@ -23,7 +24,7 @@ | |||
23 | static char nvram_buf[NVRAM_SPACE]; | 24 | static char nvram_buf[NVRAM_SPACE]; |
24 | 25 | ||
25 | /* Probe for NVRAM header */ | 26 | /* Probe for NVRAM header */ |
26 | static void __init early_nvram_init(void) | 27 | static void early_nvram_init(void) |
27 | { | 28 | { |
28 | struct ssb_mipscore *mcore = &ssb_bcm47xx.mipscore; | 29 | struct ssb_mipscore *mcore = &ssb_bcm47xx.mipscore; |
29 | struct nvram_header *header; | 30 | struct nvram_header *header; |
diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c index c95f90bf734c..73b529b57433 100644 --- a/arch/mips/bcm47xx/setup.c +++ b/arch/mips/bcm47xx/setup.c | |||
@@ -3,6 +3,7 @@ | |||
3 | * Copyright (C) 2006 Felix Fietkau <nbd@openwrt.org> | 3 | * Copyright (C) 2006 Felix Fietkau <nbd@openwrt.org> |
4 | * Copyright (C) 2006 Michael Buesch <mb@bu3sch.de> | 4 | * Copyright (C) 2006 Michael Buesch <mb@bu3sch.de> |
5 | * Copyright (C) 2010 Waldemar Brodkorb <wbx@openadk.org> | 5 | * Copyright (C) 2010 Waldemar Brodkorb <wbx@openadk.org> |
6 | * Copyright (C) 2010-2011 Hauke Mehrtens <hauke@hauke-m.de> | ||
6 | * | 7 | * |
7 | * This program is free software; you can redistribute it and/or modify it | 8 | * This program is free software; you can redistribute it and/or modify it |
8 | * under the terms of the GNU General Public License as published by the | 9 | * under the terms of the GNU General Public License as published by the |
@@ -57,10 +58,49 @@ static void bcm47xx_machine_halt(void) | |||
57 | } | 58 | } |
58 | 59 | ||
59 | #define READ_FROM_NVRAM(_outvar, name, buf) \ | 60 | #define READ_FROM_NVRAM(_outvar, name, buf) \ |
60 | if (nvram_getenv(name, buf, sizeof(buf)) >= 0)\ | 61 | if (nvram_getprefix(prefix, name, buf, sizeof(buf)) >= 0)\ |
61 | sprom->_outvar = simple_strtoul(buf, NULL, 0); | 62 | sprom->_outvar = simple_strtoul(buf, NULL, 0); |
62 | 63 | ||
63 | static void bcm47xx_fill_sprom(struct ssb_sprom *sprom) | 64 | #define READ_FROM_NVRAM2(_outvar, name1, name2, buf) \ |
65 | if (nvram_getprefix(prefix, name1, buf, sizeof(buf)) >= 0 || \ | ||
66 | nvram_getprefix(prefix, name2, buf, sizeof(buf)) >= 0)\ | ||
67 | sprom->_outvar = simple_strtoul(buf, NULL, 0); | ||
68 | |||
69 | static inline int nvram_getprefix(const char *prefix, char *name, | ||
70 | char *buf, int len) | ||
71 | { | ||
72 | if (prefix) { | ||
73 | char key[100]; | ||
74 | |||
75 | snprintf(key, sizeof(key), "%s%s", prefix, name); | ||
76 | return nvram_getenv(key, buf, len); | ||
77 | } | ||
78 | |||
79 | return nvram_getenv(name, buf, len); | ||
80 | } | ||
81 | |||
82 | static u32 nvram_getu32(const char *name, char *buf, int len) | ||
83 | { | ||
84 | int rv; | ||
85 | char key[100]; | ||
86 | u16 var0, var1; | ||
87 | |||
88 | snprintf(key, sizeof(key), "%s0", name); | ||
89 | rv = nvram_getenv(key, buf, len); | ||
90 | /* return 0 here so this looks like unset */ | ||
91 | if (rv < 0) | ||
92 | return 0; | ||
93 | var0 = simple_strtoul(buf, NULL, 0); | ||
94 | |||
95 | snprintf(key, sizeof(key), "%s1", name); | ||
96 | rv = nvram_getenv(key, buf, len); | ||
97 | if (rv < 0) | ||
98 | return 0; | ||
99 | var1 = simple_strtoul(buf, NULL, 0); | ||
100 | return var1 << 16 | var0; | ||
101 | } | ||
102 | |||
103 | static void bcm47xx_fill_sprom(struct ssb_sprom *sprom, const char *prefix) | ||
64 | { | 104 | { |
65 | char buf[100]; | 105 | char buf[100]; |
66 | u32 boardflags; | 106 | u32 boardflags; |
@@ -69,11 +109,12 @@ static void bcm47xx_fill_sprom(struct ssb_sprom *sprom) | |||
69 | 109 | ||
70 | sprom->revision = 1; /* Fallback: Old hardware does not define this. */ | 110 | sprom->revision = 1; /* Fallback: Old hardware does not define this. */ |
71 | READ_FROM_NVRAM(revision, "sromrev", buf); | 111 | READ_FROM_NVRAM(revision, "sromrev", buf); |
72 | if (nvram_getenv("il0macaddr", buf, sizeof(buf)) >= 0) | 112 | if (nvram_getprefix(prefix, "il0macaddr", buf, sizeof(buf)) >= 0 || |
113 | nvram_getprefix(prefix, "macaddr", buf, sizeof(buf)) >= 0) | ||
73 | nvram_parse_macaddr(buf, sprom->il0mac); | 114 | nvram_parse_macaddr(buf, sprom->il0mac); |
74 | if (nvram_getenv("et0macaddr", buf, sizeof(buf)) >= 0) | 115 | if (nvram_getprefix(prefix, "et0macaddr", buf, sizeof(buf)) >= 0) |
75 | nvram_parse_macaddr(buf, sprom->et0mac); | 116 | nvram_parse_macaddr(buf, sprom->et0mac); |
76 | if (nvram_getenv("et1macaddr", buf, sizeof(buf)) >= 0) | 117 | if (nvram_getprefix(prefix, "et1macaddr", buf, sizeof(buf)) >= 0) |
77 | nvram_parse_macaddr(buf, sprom->et1mac); | 118 | nvram_parse_macaddr(buf, sprom->et1mac); |
78 | READ_FROM_NVRAM(et0phyaddr, "et0phyaddr", buf); | 119 | READ_FROM_NVRAM(et0phyaddr, "et0phyaddr", buf); |
79 | READ_FROM_NVRAM(et1phyaddr, "et1phyaddr", buf); | 120 | READ_FROM_NVRAM(et1phyaddr, "et1phyaddr", buf); |
@@ -95,20 +136,36 @@ static void bcm47xx_fill_sprom(struct ssb_sprom *sprom) | |||
95 | READ_FROM_NVRAM(pa1hib0, "pa1hib0", buf); | 136 | READ_FROM_NVRAM(pa1hib0, "pa1hib0", buf); |
96 | READ_FROM_NVRAM(pa1hib2, "pa1hib1", buf); | 137 | READ_FROM_NVRAM(pa1hib2, "pa1hib1", buf); |
97 | READ_FROM_NVRAM(pa1hib1, "pa1hib2", buf); | 138 | READ_FROM_NVRAM(pa1hib1, "pa1hib2", buf); |
98 | READ_FROM_NVRAM(gpio0, "wl0gpio0", buf); | 139 | READ_FROM_NVRAM2(gpio0, "ledbh0", "wl0gpio0", buf); |
99 | READ_FROM_NVRAM(gpio1, "wl0gpio1", buf); | 140 | READ_FROM_NVRAM2(gpio1, "ledbh1", "wl0gpio1", buf); |
100 | READ_FROM_NVRAM(gpio2, "wl0gpio2", buf); | 141 | READ_FROM_NVRAM2(gpio2, "ledbh2", "wl0gpio2", buf); |
101 | READ_FROM_NVRAM(gpio3, "wl0gpio3", buf); | 142 | READ_FROM_NVRAM2(gpio3, "ledbh3", "wl0gpio3", buf); |
102 | READ_FROM_NVRAM(maxpwr_bg, "pa0maxpwr", buf); | 143 | READ_FROM_NVRAM2(maxpwr_bg, "maxp2ga0", "pa0maxpwr", buf); |
103 | READ_FROM_NVRAM(maxpwr_al, "pa1lomaxpwr", buf); | 144 | READ_FROM_NVRAM2(maxpwr_al, "maxp5gla0", "pa1lomaxpwr", buf); |
104 | READ_FROM_NVRAM(maxpwr_a, "pa1maxpwr", buf); | 145 | READ_FROM_NVRAM2(maxpwr_a, "maxp5ga0", "pa1maxpwr", buf); |
105 | READ_FROM_NVRAM(maxpwr_ah, "pa1himaxpwr", buf); | 146 | READ_FROM_NVRAM2(maxpwr_ah, "maxp5gha0", "pa1himaxpwr", buf); |
106 | READ_FROM_NVRAM(itssi_a, "pa1itssit", buf); | 147 | READ_FROM_NVRAM2(itssi_bg, "itt5ga0", "pa0itssit", buf); |
107 | READ_FROM_NVRAM(itssi_bg, "pa0itssit", buf); | 148 | READ_FROM_NVRAM2(itssi_a, "itt2ga0", "pa1itssit", buf); |
108 | READ_FROM_NVRAM(tri2g, "tri2g", buf); | 149 | READ_FROM_NVRAM(tri2g, "tri2g", buf); |
109 | READ_FROM_NVRAM(tri5gl, "tri5gl", buf); | 150 | READ_FROM_NVRAM(tri5gl, "tri5gl", buf); |
110 | READ_FROM_NVRAM(tri5g, "tri5g", buf); | 151 | READ_FROM_NVRAM(tri5g, "tri5g", buf); |
111 | READ_FROM_NVRAM(tri5gh, "tri5gh", buf); | 152 | READ_FROM_NVRAM(tri5gh, "tri5gh", buf); |
153 | READ_FROM_NVRAM(txpid2g[0], "txpid2ga0", buf); | ||
154 | READ_FROM_NVRAM(txpid2g[1], "txpid2ga1", buf); | ||
155 | READ_FROM_NVRAM(txpid2g[2], "txpid2ga2", buf); | ||
156 | READ_FROM_NVRAM(txpid2g[3], "txpid2ga3", buf); | ||
157 | READ_FROM_NVRAM(txpid5g[0], "txpid5ga0", buf); | ||
158 | READ_FROM_NVRAM(txpid5g[1], "txpid5ga1", buf); | ||
159 | READ_FROM_NVRAM(txpid5g[2], "txpid5ga2", buf); | ||
160 | READ_FROM_NVRAM(txpid5g[3], "txpid5ga3", buf); | ||
161 | READ_FROM_NVRAM(txpid5gl[0], "txpid5gla0", buf); | ||
162 | READ_FROM_NVRAM(txpid5gl[1], "txpid5gla1", buf); | ||
163 | READ_FROM_NVRAM(txpid5gl[2], "txpid5gla2", buf); | ||
164 | READ_FROM_NVRAM(txpid5gl[3], "txpid5gla3", buf); | ||
165 | READ_FROM_NVRAM(txpid5gh[0], "txpid5gha0", buf); | ||
166 | READ_FROM_NVRAM(txpid5gh[1], "txpid5gha1", buf); | ||
167 | READ_FROM_NVRAM(txpid5gh[2], "txpid5gha2", buf); | ||
168 | READ_FROM_NVRAM(txpid5gh[3], "txpid5gha3", buf); | ||
112 | READ_FROM_NVRAM(rxpo2g, "rxpo2g", buf); | 169 | READ_FROM_NVRAM(rxpo2g, "rxpo2g", buf); |
113 | READ_FROM_NVRAM(rxpo5g, "rxpo5g", buf); | 170 | READ_FROM_NVRAM(rxpo5g, "rxpo5g", buf); |
114 | READ_FROM_NVRAM(rssisav2g, "rssisav2g", buf); | 171 | READ_FROM_NVRAM(rssisav2g, "rssisav2g", buf); |
@@ -120,19 +177,27 @@ static void bcm47xx_fill_sprom(struct ssb_sprom *sprom) | |||
120 | READ_FROM_NVRAM(rssismf5g, "rssismf5g", buf); | 177 | READ_FROM_NVRAM(rssismf5g, "rssismf5g", buf); |
121 | READ_FROM_NVRAM(bxa5g, "bxa5g", buf); | 178 | READ_FROM_NVRAM(bxa5g, "bxa5g", buf); |
122 | READ_FROM_NVRAM(cck2gpo, "cck2gpo", buf); | 179 | READ_FROM_NVRAM(cck2gpo, "cck2gpo", buf); |
123 | READ_FROM_NVRAM(ofdm2gpo, "ofdm2gpo", buf); | ||
124 | READ_FROM_NVRAM(ofdm5glpo, "ofdm5glpo", buf); | ||
125 | READ_FROM_NVRAM(ofdm5gpo, "ofdm5gpo", buf); | ||
126 | READ_FROM_NVRAM(ofdm5ghpo, "ofdm5ghpo", buf); | ||
127 | 180 | ||
128 | if (nvram_getenv("boardflags", buf, sizeof(buf)) >= 0) { | 181 | sprom->ofdm2gpo = nvram_getu32("ofdm2gpo", buf, sizeof(buf)); |
182 | sprom->ofdm5glpo = nvram_getu32("ofdm5glpo", buf, sizeof(buf)); | ||
183 | sprom->ofdm5gpo = nvram_getu32("ofdm5gpo", buf, sizeof(buf)); | ||
184 | sprom->ofdm5ghpo = nvram_getu32("ofdm5ghpo", buf, sizeof(buf)); | ||
185 | |||
186 | READ_FROM_NVRAM(antenna_gain.ghz24.a0, "ag0", buf); | ||
187 | READ_FROM_NVRAM(antenna_gain.ghz24.a1, "ag1", buf); | ||
188 | READ_FROM_NVRAM(antenna_gain.ghz24.a2, "ag2", buf); | ||
189 | READ_FROM_NVRAM(antenna_gain.ghz24.a3, "ag3", buf); | ||
190 | memcpy(&sprom->antenna_gain.ghz5, &sprom->antenna_gain.ghz24, | ||
191 | sizeof(sprom->antenna_gain.ghz5)); | ||
192 | |||
193 | if (nvram_getprefix(prefix, "boardflags", buf, sizeof(buf)) >= 0) { | ||
129 | boardflags = simple_strtoul(buf, NULL, 0); | 194 | boardflags = simple_strtoul(buf, NULL, 0); |
130 | if (boardflags) { | 195 | if (boardflags) { |
131 | sprom->boardflags_lo = (boardflags & 0x0000FFFFU); | 196 | sprom->boardflags_lo = (boardflags & 0x0000FFFFU); |
132 | sprom->boardflags_hi = (boardflags & 0xFFFF0000U) >> 16; | 197 | sprom->boardflags_hi = (boardflags & 0xFFFF0000U) >> 16; |
133 | } | 198 | } |
134 | } | 199 | } |
135 | if (nvram_getenv("boardflags2", buf, sizeof(buf)) >= 0) { | 200 | if (nvram_getprefix(prefix, "boardflags2", buf, sizeof(buf)) >= 0) { |
136 | boardflags = simple_strtoul(buf, NULL, 0); | 201 | boardflags = simple_strtoul(buf, NULL, 0); |
137 | if (boardflags) { | 202 | if (boardflags) { |
138 | sprom->boardflags2_lo = (boardflags & 0x0000FFFFU); | 203 | sprom->boardflags2_lo = (boardflags & 0x0000FFFFU); |
@@ -141,6 +206,22 @@ static void bcm47xx_fill_sprom(struct ssb_sprom *sprom) | |||
141 | } | 206 | } |
142 | } | 207 | } |
143 | 208 | ||
209 | int bcm47xx_get_sprom(struct ssb_bus *bus, struct ssb_sprom *out) | ||
210 | { | ||
211 | char prefix[10]; | ||
212 | |||
213 | if (bus->bustype == SSB_BUSTYPE_PCI) { | ||
214 | snprintf(prefix, sizeof(prefix), "pci/%u/%u/", | ||
215 | bus->host_pci->bus->number + 1, | ||
216 | PCI_SLOT(bus->host_pci->devfn)); | ||
217 | bcm47xx_fill_sprom(out, prefix); | ||
218 | return 0; | ||
219 | } else { | ||
220 | printk(KERN_WARNING "bcm47xx: unable to fill SPROM for given bustype.\n"); | ||
221 | return -EINVAL; | ||
222 | } | ||
223 | } | ||
224 | |||
144 | static int bcm47xx_get_invariants(struct ssb_bus *bus, | 225 | static int bcm47xx_get_invariants(struct ssb_bus *bus, |
145 | struct ssb_init_invariants *iv) | 226 | struct ssb_init_invariants *iv) |
146 | { | 227 | { |
@@ -158,7 +239,7 @@ static int bcm47xx_get_invariants(struct ssb_bus *bus, | |||
158 | if (nvram_getenv("boardrev", buf, sizeof(buf)) >= 0) | 239 | if (nvram_getenv("boardrev", buf, sizeof(buf)) >= 0) |
159 | iv->boardinfo.rev = (u16)simple_strtoul(buf, NULL, 0); | 240 | iv->boardinfo.rev = (u16)simple_strtoul(buf, NULL, 0); |
160 | 241 | ||
161 | bcm47xx_fill_sprom(&iv->sprom); | 242 | bcm47xx_fill_sprom(&iv->sprom, NULL); |
162 | 243 | ||
163 | if (nvram_getenv("cardbus", buf, sizeof(buf)) >= 0) | 244 | if (nvram_getenv("cardbus", buf, sizeof(buf)) >= 0) |
164 | iv->has_cardbus_slot = !!simple_strtoul(buf, NULL, 10); | 245 | iv->has_cardbus_slot = !!simple_strtoul(buf, NULL, 10); |
@@ -172,6 +253,11 @@ void __init plat_mem_setup(void) | |||
172 | char buf[100]; | 253 | char buf[100]; |
173 | struct ssb_mipscore *mcore; | 254 | struct ssb_mipscore *mcore; |
174 | 255 | ||
256 | err = ssb_arch_register_fallback_sprom(&bcm47xx_get_sprom); | ||
257 | if (err) | ||
258 | printk(KERN_WARNING "bcm47xx: someone else already registered" | ||
259 | " a ssb SPROM callback handler (err %d)\n", err); | ||
260 | |||
175 | err = ssb_bus_ssbbus_register(&ssb_bcm47xx, SSB_ENUM_BASE, | 261 | err = ssb_bus_ssbbus_register(&ssb_bcm47xx, SSB_ENUM_BASE, |
176 | bcm47xx_get_invariants); | 262 | bcm47xx_get_invariants); |
177 | if (err) | 263 | if (err) |
diff --git a/arch/mips/bcm63xx/boards/board_bcm963xx.c b/arch/mips/bcm63xx/boards/board_bcm963xx.c index 8dba8cfb752f..40b223b603be 100644 --- a/arch/mips/bcm63xx/boards/board_bcm963xx.c +++ b/arch/mips/bcm63xx/boards/board_bcm963xx.c | |||
@@ -643,6 +643,17 @@ static struct ssb_sprom bcm63xx_sprom = { | |||
643 | .boardflags_lo = 0x2848, | 643 | .boardflags_lo = 0x2848, |
644 | .boardflags_hi = 0x0000, | 644 | .boardflags_hi = 0x0000, |
645 | }; | 645 | }; |
646 | |||
647 | int bcm63xx_get_fallback_sprom(struct ssb_bus *bus, struct ssb_sprom *out) | ||
648 | { | ||
649 | if (bus->bustype == SSB_BUSTYPE_PCI) { | ||
650 | memcpy(out, &bcm63xx_sprom, sizeof(struct ssb_sprom)); | ||
651 | return 0; | ||
652 | } else { | ||
653 | printk(KERN_ERR PFX "unable to fill SPROM for given bustype.\n"); | ||
654 | return -EINVAL; | ||
655 | } | ||
656 | } | ||
646 | #endif | 657 | #endif |
647 | 658 | ||
648 | /* | 659 | /* |
@@ -793,8 +804,9 @@ void __init board_prom_init(void) | |||
793 | if (!board_get_mac_address(bcm63xx_sprom.il0mac)) { | 804 | if (!board_get_mac_address(bcm63xx_sprom.il0mac)) { |
794 | memcpy(bcm63xx_sprom.et0mac, bcm63xx_sprom.il0mac, ETH_ALEN); | 805 | memcpy(bcm63xx_sprom.et0mac, bcm63xx_sprom.il0mac, ETH_ALEN); |
795 | memcpy(bcm63xx_sprom.et1mac, bcm63xx_sprom.il0mac, ETH_ALEN); | 806 | memcpy(bcm63xx_sprom.et1mac, bcm63xx_sprom.il0mac, ETH_ALEN); |
796 | if (ssb_arch_set_fallback_sprom(&bcm63xx_sprom) < 0) | 807 | if (ssb_arch_register_fallback_sprom( |
797 | printk(KERN_ERR "failed to register fallback SPROM\n"); | 808 | &bcm63xx_get_fallback_sprom) < 0) |
809 | printk(KERN_ERR PFX "failed to register fallback SPROM\n"); | ||
798 | } | 810 | } |
799 | #endif | 811 | #endif |
800 | } | 812 | } |
diff --git a/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c b/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c index 88c9d963be88..9a6243676e22 100644 --- a/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c +++ b/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c | |||
@@ -16,8 +16,8 @@ | |||
16 | 16 | ||
17 | int main(int argc, char *argv[]) | 17 | int main(int argc, char *argv[]) |
18 | { | 18 | { |
19 | unsigned long long vmlinux_size, vmlinux_load_addr, vmlinuz_load_addr; | ||
19 | struct stat sb; | 20 | struct stat sb; |
20 | uint64_t vmlinux_size, vmlinux_load_addr, vmlinuz_load_addr; | ||
21 | 21 | ||
22 | if (argc != 3) { | 22 | if (argc != 3) { |
23 | fprintf(stderr, "Usage: %s <pathname> <vmlinux_load_addr>\n", | 23 | fprintf(stderr, "Usage: %s <pathname> <vmlinux_load_addr>\n", |
diff --git a/arch/mips/boot/compressed/uart-alchemy.c b/arch/mips/boot/compressed/uart-alchemy.c index 1bff22fa089b..eb063e6dead9 100644 --- a/arch/mips/boot/compressed/uart-alchemy.c +++ b/arch/mips/boot/compressed/uart-alchemy.c | |||
@@ -3,5 +3,5 @@ | |||
3 | void putc(char c) | 3 | void putc(char c) |
4 | { | 4 | { |
5 | /* all current (Jan. 2010) in-kernel boards */ | 5 | /* all current (Jan. 2010) in-kernel boards */ |
6 | alchemy_uart_putchar(UART0_PHYS_ADDR, c); | 6 | alchemy_uart_putchar(AU1000_UART0_PHYS_ADDR, c); |
7 | } | 7 | } |
diff --git a/arch/mips/cavium-octeon/Kconfig b/arch/mips/cavium-octeon/Kconfig index caae22858163..cad555ebeca3 100644 --- a/arch/mips/cavium-octeon/Kconfig +++ b/arch/mips/cavium-octeon/Kconfig | |||
@@ -1,11 +1,7 @@ | |||
1 | config CAVIUM_OCTEON_SPECIFIC_OPTIONS | 1 | if CPU_CAVIUM_OCTEON |
2 | bool "Enable Octeon specific options" | ||
3 | depends on CPU_CAVIUM_OCTEON | ||
4 | default "y" | ||
5 | 2 | ||
6 | config CAVIUM_CN63XXP1 | 3 | config CAVIUM_CN63XXP1 |
7 | bool "Enable CN63XXP1 errata worarounds" | 4 | bool "Enable CN63XXP1 errata worarounds" |
8 | depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS | ||
9 | default "n" | 5 | default "n" |
10 | help | 6 | help |
11 | The CN63XXP1 chip requires build time workarounds to | 7 | The CN63XXP1 chip requires build time workarounds to |
@@ -16,7 +12,6 @@ config CAVIUM_CN63XXP1 | |||
16 | 12 | ||
17 | config CAVIUM_OCTEON_2ND_KERNEL | 13 | config CAVIUM_OCTEON_2ND_KERNEL |
18 | bool "Build the kernel to be used as a 2nd kernel on the same chip" | 14 | bool "Build the kernel to be used as a 2nd kernel on the same chip" |
19 | depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS | ||
20 | default "n" | 15 | default "n" |
21 | help | 16 | help |
22 | This option configures this kernel to be linked at a different | 17 | This option configures this kernel to be linked at a different |
@@ -26,7 +21,6 @@ config CAVIUM_OCTEON_2ND_KERNEL | |||
26 | 21 | ||
27 | config CAVIUM_OCTEON_HW_FIX_UNALIGNED | 22 | config CAVIUM_OCTEON_HW_FIX_UNALIGNED |
28 | bool "Enable hardware fixups of unaligned loads and stores" | 23 | bool "Enable hardware fixups of unaligned loads and stores" |
29 | depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS | ||
30 | default "y" | 24 | default "y" |
31 | help | 25 | help |
32 | Configure the Octeon hardware to automatically fix unaligned loads | 26 | Configure the Octeon hardware to automatically fix unaligned loads |
@@ -38,7 +32,6 @@ config CAVIUM_OCTEON_HW_FIX_UNALIGNED | |||
38 | 32 | ||
39 | config CAVIUM_OCTEON_CVMSEG_SIZE | 33 | config CAVIUM_OCTEON_CVMSEG_SIZE |
40 | int "Number of L1 cache lines reserved for CVMSEG memory" | 34 | int "Number of L1 cache lines reserved for CVMSEG memory" |
41 | depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS | ||
42 | range 0 54 | 35 | range 0 54 |
43 | default 1 | 36 | default 1 |
44 | help | 37 | help |
@@ -50,7 +43,6 @@ config CAVIUM_OCTEON_CVMSEG_SIZE | |||
50 | 43 | ||
51 | config CAVIUM_OCTEON_LOCK_L2 | 44 | config CAVIUM_OCTEON_LOCK_L2 |
52 | bool "Lock often used kernel code in the L2" | 45 | bool "Lock often used kernel code in the L2" |
53 | depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS | ||
54 | default "y" | 46 | default "y" |
55 | help | 47 | help |
56 | Enable locking parts of the kernel into the L2 cache. | 48 | Enable locking parts of the kernel into the L2 cache. |
@@ -93,7 +85,6 @@ config CAVIUM_OCTEON_LOCK_L2_MEMCPY | |||
93 | config ARCH_SPARSEMEM_ENABLE | 85 | config ARCH_SPARSEMEM_ENABLE |
94 | def_bool y | 86 | def_bool y |
95 | select SPARSEMEM_STATIC | 87 | select SPARSEMEM_STATIC |
96 | depends on CPU_CAVIUM_OCTEON | ||
97 | 88 | ||
98 | config CAVIUM_OCTEON_HELPER | 89 | config CAVIUM_OCTEON_HELPER |
99 | def_bool y | 90 | def_bool y |
@@ -107,6 +98,8 @@ config NEED_SG_DMA_LENGTH | |||
107 | 98 | ||
108 | config SWIOTLB | 99 | config SWIOTLB |
109 | def_bool y | 100 | def_bool y |
110 | depends on CPU_CAVIUM_OCTEON | ||
111 | select IOMMU_HELPER | 101 | select IOMMU_HELPER |
112 | select NEED_SG_DMA_LENGTH | 102 | select NEED_SG_DMA_LENGTH |
103 | |||
104 | |||
105 | endif # CPU_CAVIUM_OCTEON | ||
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c index 0707fae3f0ee..2d9028f1474c 100644 --- a/arch/mips/cavium-octeon/setup.c +++ b/arch/mips/cavium-octeon/setup.c | |||
@@ -288,7 +288,6 @@ void octeon_user_io_init(void) | |||
288 | union octeon_cvmemctl cvmmemctl; | 288 | union octeon_cvmemctl cvmmemctl; |
289 | union cvmx_iob_fau_timeout fau_timeout; | 289 | union cvmx_iob_fau_timeout fau_timeout; |
290 | union cvmx_pow_nw_tim nm_tim; | 290 | union cvmx_pow_nw_tim nm_tim; |
291 | uint64_t cvmctl; | ||
292 | 291 | ||
293 | /* Get the current settings for CP0_CVMMEMCTL_REG */ | 292 | /* Get the current settings for CP0_CVMMEMCTL_REG */ |
294 | cvmmemctl.u64 = read_c0_cvmmemctl(); | 293 | cvmmemctl.u64 = read_c0_cvmmemctl(); |
@@ -392,12 +391,6 @@ void octeon_user_io_init(void) | |||
392 | CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE, | 391 | CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE, |
393 | CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE * 128); | 392 | CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE * 128); |
394 | 393 | ||
395 | /* Move the performance counter interrupts to IRQ 6 */ | ||
396 | cvmctl = read_c0_cvmctl(); | ||
397 | cvmctl &= ~(7 << 7); | ||
398 | cvmctl |= 6 << 7; | ||
399 | write_c0_cvmctl(cvmctl); | ||
400 | |||
401 | /* Set a default for the hardware timeouts */ | 394 | /* Set a default for the hardware timeouts */ |
402 | fau_timeout.u64 = 0; | 395 | fau_timeout.u64 = 0; |
403 | fau_timeout.s.tout_val = 0xfff; | 396 | fau_timeout.s.tout_val = 0xfff; |
diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c index 76923eeb58b9..8b606423bbd7 100644 --- a/arch/mips/cavium-octeon/smp.c +++ b/arch/mips/cavium-octeon/smp.c | |||
@@ -37,7 +37,7 @@ static irqreturn_t mailbox_interrupt(int irq, void *dev_id) | |||
37 | uint64_t action; | 37 | uint64_t action; |
38 | 38 | ||
39 | /* Load the mailbox register to figure out what we're supposed to do */ | 39 | /* Load the mailbox register to figure out what we're supposed to do */ |
40 | action = cvmx_read_csr(CVMX_CIU_MBOX_CLRX(coreid)); | 40 | action = cvmx_read_csr(CVMX_CIU_MBOX_CLRX(coreid)) & 0xffff; |
41 | 41 | ||
42 | /* Clear the mailbox to clear the interrupt */ | 42 | /* Clear the mailbox to clear the interrupt */ |
43 | cvmx_write_csr(CVMX_CIU_MBOX_CLRX(coreid), action); | 43 | cvmx_write_csr(CVMX_CIU_MBOX_CLRX(coreid), action); |
@@ -202,16 +202,15 @@ void octeon_prepare_cpus(unsigned int max_cpus) | |||
202 | if (labi->labi_signature != LABI_SIGNATURE) | 202 | if (labi->labi_signature != LABI_SIGNATURE) |
203 | panic("The bootloader version on this board is incorrect."); | 203 | panic("The bootloader version on this board is incorrect."); |
204 | #endif | 204 | #endif |
205 | 205 | /* | |
206 | cvmx_write_csr(CVMX_CIU_MBOX_CLRX(cvmx_get_core_num()), 0xffffffff); | 206 | * Only the low order mailbox bits are used for IPIs, leave |
207 | * the other bits alone. | ||
208 | */ | ||
209 | cvmx_write_csr(CVMX_CIU_MBOX_CLRX(cvmx_get_core_num()), 0xffff); | ||
207 | if (request_irq(OCTEON_IRQ_MBOX0, mailbox_interrupt, IRQF_DISABLED, | 210 | if (request_irq(OCTEON_IRQ_MBOX0, mailbox_interrupt, IRQF_DISABLED, |
208 | "mailbox0", mailbox_interrupt)) { | 211 | "SMP-IPI", mailbox_interrupt)) { |
209 | panic("Cannot request_irq(OCTEON_IRQ_MBOX0)\n"); | 212 | panic("Cannot request_irq(OCTEON_IRQ_MBOX0)\n"); |
210 | } | 213 | } |
211 | if (request_irq(OCTEON_IRQ_MBOX1, mailbox_interrupt, IRQF_DISABLED, | ||
212 | "mailbox1", mailbox_interrupt)) { | ||
213 | panic("Cannot request_irq(OCTEON_IRQ_MBOX1)\n"); | ||
214 | } | ||
215 | } | 214 | } |
216 | 215 | ||
217 | /** | 216 | /** |
diff --git a/arch/mips/configs/lemote2f_defconfig b/arch/mips/configs/lemote2f_defconfig index 167c1d07b809..b6acd2f256b6 100644 --- a/arch/mips/configs/lemote2f_defconfig +++ b/arch/mips/configs/lemote2f_defconfig | |||
@@ -86,8 +86,8 @@ CONFIG_NET_SCHED=y | |||
86 | CONFIG_NET_EMATCH=y | 86 | CONFIG_NET_EMATCH=y |
87 | CONFIG_NET_CLS_ACT=y | 87 | CONFIG_NET_CLS_ACT=y |
88 | CONFIG_BT=m | 88 | CONFIG_BT=m |
89 | CONFIG_BT_L2CAP=m | 89 | CONFIG_BT_L2CAP=y |
90 | CONFIG_BT_SCO=m | 90 | CONFIG_BT_SCO=y |
91 | CONFIG_BT_RFCOMM=m | 91 | CONFIG_BT_RFCOMM=m |
92 | CONFIG_BT_RFCOMM_TTY=y | 92 | CONFIG_BT_RFCOMM_TTY=y |
93 | CONFIG_BT_BNEP=m | 93 | CONFIG_BT_BNEP=m |
@@ -329,7 +329,7 @@ CONFIG_USB_LED=m | |||
329 | CONFIG_USB_GADGET=m | 329 | CONFIG_USB_GADGET=m |
330 | CONFIG_USB_GADGET_M66592=y | 330 | CONFIG_USB_GADGET_M66592=y |
331 | CONFIG_MMC=m | 331 | CONFIG_MMC=m |
332 | CONFIG_LEDS_CLASS=m | 332 | CONFIG_LEDS_CLASS=y |
333 | CONFIG_STAGING=y | 333 | CONFIG_STAGING=y |
334 | # CONFIG_STAGING_EXCLUDE_BUILD is not set | 334 | # CONFIG_STAGING_EXCLUDE_BUILD is not set |
335 | CONFIG_FB_SM7XX=y | 335 | CONFIG_FB_SM7XX=y |
diff --git a/arch/mips/configs/malta_defconfig b/arch/mips/configs/malta_defconfig index 7270f3183bda..5527abbb7dea 100644 --- a/arch/mips/configs/malta_defconfig +++ b/arch/mips/configs/malta_defconfig | |||
@@ -374,7 +374,7 @@ CONFIG_FB_CIRRUS=y | |||
374 | # CONFIG_VGA_CONSOLE is not set | 374 | # CONFIG_VGA_CONSOLE is not set |
375 | CONFIG_FRAMEBUFFER_CONSOLE=y | 375 | CONFIG_FRAMEBUFFER_CONSOLE=y |
376 | CONFIG_HID=m | 376 | CONFIG_HID=m |
377 | CONFIG_LEDS_CLASS=m | 377 | CONFIG_LEDS_CLASS=y |
378 | CONFIG_LEDS_TRIGGER_TIMER=m | 378 | CONFIG_LEDS_TRIGGER_TIMER=m |
379 | CONFIG_LEDS_TRIGGER_IDE_DISK=y | 379 | CONFIG_LEDS_TRIGGER_IDE_DISK=y |
380 | CONFIG_LEDS_TRIGGER_HEARTBEAT=m | 380 | CONFIG_LEDS_TRIGGER_HEARTBEAT=m |
diff --git a/arch/mips/configs/mtx1_defconfig b/arch/mips/configs/mtx1_defconfig index a97a42c6b2c8..37862b2ce363 100644 --- a/arch/mips/configs/mtx1_defconfig +++ b/arch/mips/configs/mtx1_defconfig | |||
@@ -225,8 +225,8 @@ CONFIG_TOSHIBA_FIR=m | |||
225 | CONFIG_VLSI_FIR=m | 225 | CONFIG_VLSI_FIR=m |
226 | CONFIG_MCS_FIR=m | 226 | CONFIG_MCS_FIR=m |
227 | CONFIG_BT=m | 227 | CONFIG_BT=m |
228 | CONFIG_BT_L2CAP=m | 228 | CONFIG_BT_L2CAP=y |
229 | CONFIG_BT_SCO=m | 229 | CONFIG_BT_SCO=y |
230 | CONFIG_BT_RFCOMM=m | 230 | CONFIG_BT_RFCOMM=m |
231 | CONFIG_BT_RFCOMM_TTY=y | 231 | CONFIG_BT_RFCOMM_TTY=y |
232 | CONFIG_BT_BNEP=m | 232 | CONFIG_BT_BNEP=m |
diff --git a/arch/mips/configs/nlm_xlr_defconfig b/arch/mips/configs/nlm_xlr_defconfig new file mode 100644 index 000000000000..e4b399fdaa61 --- /dev/null +++ b/arch/mips/configs/nlm_xlr_defconfig | |||
@@ -0,0 +1,574 @@ | |||
1 | CONFIG_NLM_XLR_BOARD=y | ||
2 | CONFIG_HIGHMEM=y | ||
3 | CONFIG_KSM=y | ||
4 | CONFIG_DEFAULT_MMAP_MIN_ADDR=65536 | ||
5 | CONFIG_SMP=y | ||
6 | CONFIG_NO_HZ=y | ||
7 | CONFIG_HIGH_RES_TIMERS=y | ||
8 | CONFIG_PREEMPT_VOLUNTARY=y | ||
9 | CONFIG_KEXEC=y | ||
10 | CONFIG_EXPERIMENTAL=y | ||
11 | CONFIG_CROSS_COMPILE="mips64-unknown-linux-gnu-" | ||
12 | # CONFIG_LOCALVERSION_AUTO is not set | ||
13 | CONFIG_SYSVIPC=y | ||
14 | CONFIG_POSIX_MQUEUE=y | ||
15 | CONFIG_BSD_PROCESS_ACCT=y | ||
16 | CONFIG_BSD_PROCESS_ACCT_V3=y | ||
17 | CONFIG_TASKSTATS=y | ||
18 | CONFIG_TASK_DELAY_ACCT=y | ||
19 | CONFIG_TASK_XACCT=y | ||
20 | CONFIG_TASK_IO_ACCOUNTING=y | ||
21 | CONFIG_AUDIT=y | ||
22 | CONFIG_NAMESPACES=y | ||
23 | CONFIG_SCHED_AUTOGROUP=y | ||
24 | CONFIG_BLK_DEV_INITRD=y | ||
25 | CONFIG_INITRAMFS_SOURCE="usr/dev_file_list usr/rootfs" | ||
26 | CONFIG_RD_BZIP2=y | ||
27 | CONFIG_RD_LZMA=y | ||
28 | CONFIG_INITRAMFS_COMPRESSION_GZIP=y | ||
29 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | ||
30 | CONFIG_EXPERT=y | ||
31 | CONFIG_KALLSYMS_ALL=y | ||
32 | # CONFIG_ELF_CORE is not set | ||
33 | # CONFIG_PCSPKR_PLATFORM is not set | ||
34 | # CONFIG_PERF_EVENTS is not set | ||
35 | # CONFIG_COMPAT_BRK is not set | ||
36 | CONFIG_PROFILING=y | ||
37 | CONFIG_MODULES=y | ||
38 | CONFIG_MODULE_UNLOAD=y | ||
39 | CONFIG_MODVERSIONS=y | ||
40 | CONFIG_MODULE_SRCVERSION_ALL=y | ||
41 | CONFIG_BLK_DEV_INTEGRITY=y | ||
42 | CONFIG_BINFMT_MISC=m | ||
43 | CONFIG_PM_RUNTIME=y | ||
44 | CONFIG_PM_DEBUG=y | ||
45 | CONFIG_PACKET=y | ||
46 | CONFIG_UNIX=y | ||
47 | CONFIG_XFRM_USER=m | ||
48 | CONFIG_NET_KEY=m | ||
49 | CONFIG_INET=y | ||
50 | CONFIG_IP_MULTICAST=y | ||
51 | CONFIG_IP_ADVANCED_ROUTER=y | ||
52 | CONFIG_IP_MULTIPLE_TABLES=y | ||
53 | CONFIG_IP_ROUTE_MULTIPATH=y | ||
54 | CONFIG_IP_ROUTE_VERBOSE=y | ||
55 | CONFIG_NET_IPIP=m | ||
56 | CONFIG_IP_MROUTE=y | ||
57 | CONFIG_IP_PIMSM_V1=y | ||
58 | CONFIG_IP_PIMSM_V2=y | ||
59 | CONFIG_SYN_COOKIES=y | ||
60 | CONFIG_INET_AH=m | ||
61 | CONFIG_INET_ESP=m | ||
62 | CONFIG_INET_IPCOMP=m | ||
63 | CONFIG_INET_XFRM_MODE_TRANSPORT=m | ||
64 | CONFIG_INET_XFRM_MODE_TUNNEL=m | ||
65 | CONFIG_INET_XFRM_MODE_BEET=m | ||
66 | CONFIG_TCP_CONG_ADVANCED=y | ||
67 | CONFIG_TCP_CONG_HSTCP=m | ||
68 | CONFIG_TCP_CONG_HYBLA=m | ||
69 | CONFIG_TCP_CONG_SCALABLE=m | ||
70 | CONFIG_TCP_CONG_LP=m | ||
71 | CONFIG_TCP_CONG_VENO=m | ||
72 | CONFIG_TCP_CONG_YEAH=m | ||
73 | CONFIG_TCP_CONG_ILLINOIS=m | ||
74 | CONFIG_TCP_MD5SIG=y | ||
75 | CONFIG_IPV6=y | ||
76 | CONFIG_IPV6_PRIVACY=y | ||
77 | CONFIG_INET6_AH=m | ||
78 | CONFIG_INET6_ESP=m | ||
79 | CONFIG_INET6_IPCOMP=m | ||
80 | CONFIG_INET6_XFRM_MODE_TRANSPORT=m | ||
81 | CONFIG_INET6_XFRM_MODE_TUNNEL=m | ||
82 | CONFIG_INET6_XFRM_MODE_BEET=m | ||
83 | CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m | ||
84 | CONFIG_IPV6_SIT=m | ||
85 | CONFIG_IPV6_TUNNEL=m | ||
86 | CONFIG_IPV6_MULTIPLE_TABLES=y | ||
87 | CONFIG_NETLABEL=y | ||
88 | CONFIG_NETFILTER=y | ||
89 | CONFIG_NF_CONNTRACK=m | ||
90 | CONFIG_NF_CONNTRACK_SECMARK=y | ||
91 | CONFIG_NF_CONNTRACK_EVENTS=y | ||
92 | CONFIG_NF_CT_PROTO_UDPLITE=m | ||
93 | CONFIG_NF_CONNTRACK_AMANDA=m | ||
94 | CONFIG_NF_CONNTRACK_FTP=m | ||
95 | CONFIG_NF_CONNTRACK_H323=m | ||
96 | CONFIG_NF_CONNTRACK_IRC=m | ||
97 | CONFIG_NF_CONNTRACK_NETBIOS_NS=m | ||
98 | CONFIG_NF_CONNTRACK_PPTP=m | ||
99 | CONFIG_NF_CONNTRACK_SANE=m | ||
100 | CONFIG_NF_CONNTRACK_SIP=m | ||
101 | CONFIG_NF_CONNTRACK_TFTP=m | ||
102 | CONFIG_NF_CT_NETLINK=m | ||
103 | CONFIG_NETFILTER_TPROXY=m | ||
104 | CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m | ||
105 | CONFIG_NETFILTER_XT_TARGET_CONNMARK=m | ||
106 | CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m | ||
107 | CONFIG_NETFILTER_XT_TARGET_DSCP=m | ||
108 | CONFIG_NETFILTER_XT_TARGET_MARK=m | ||
109 | CONFIG_NETFILTER_XT_TARGET_NFLOG=m | ||
110 | CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m | ||
111 | CONFIG_NETFILTER_XT_TARGET_NOTRACK=m | ||
112 | CONFIG_NETFILTER_XT_TARGET_TPROXY=m | ||
113 | CONFIG_NETFILTER_XT_TARGET_TRACE=m | ||
114 | CONFIG_NETFILTER_XT_TARGET_SECMARK=m | ||
115 | CONFIG_NETFILTER_XT_TARGET_TCPMSS=m | ||
116 | CONFIG_NETFILTER_XT_MATCH_CLUSTER=m | ||
117 | CONFIG_NETFILTER_XT_MATCH_COMMENT=m | ||
118 | CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m | ||
119 | CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m | ||
120 | CONFIG_NETFILTER_XT_MATCH_CONNMARK=m | ||
121 | CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m | ||
122 | CONFIG_NETFILTER_XT_MATCH_DSCP=m | ||
123 | CONFIG_NETFILTER_XT_MATCH_ESP=m | ||
124 | CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m | ||
125 | CONFIG_NETFILTER_XT_MATCH_HELPER=m | ||
126 | CONFIG_NETFILTER_XT_MATCH_IPRANGE=m | ||
127 | CONFIG_NETFILTER_XT_MATCH_LENGTH=m | ||
128 | CONFIG_NETFILTER_XT_MATCH_LIMIT=m | ||
129 | CONFIG_NETFILTER_XT_MATCH_MAC=m | ||
130 | CONFIG_NETFILTER_XT_MATCH_MARK=m | ||
131 | CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m | ||
132 | CONFIG_NETFILTER_XT_MATCH_OSF=m | ||
133 | CONFIG_NETFILTER_XT_MATCH_OWNER=m | ||
134 | CONFIG_NETFILTER_XT_MATCH_POLICY=m | ||
135 | CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m | ||
136 | CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m | ||
137 | CONFIG_NETFILTER_XT_MATCH_QUOTA=m | ||
138 | CONFIG_NETFILTER_XT_MATCH_RATEEST=m | ||
139 | CONFIG_NETFILTER_XT_MATCH_REALM=m | ||
140 | CONFIG_NETFILTER_XT_MATCH_RECENT=m | ||
141 | CONFIG_NETFILTER_XT_MATCH_SOCKET=m | ||
142 | CONFIG_NETFILTER_XT_MATCH_STATE=m | ||
143 | CONFIG_NETFILTER_XT_MATCH_STATISTIC=m | ||
144 | CONFIG_NETFILTER_XT_MATCH_STRING=m | ||
145 | CONFIG_NETFILTER_XT_MATCH_TCPMSS=m | ||
146 | CONFIG_NETFILTER_XT_MATCH_TIME=m | ||
147 | CONFIG_NETFILTER_XT_MATCH_U32=m | ||
148 | CONFIG_IP_VS=m | ||
149 | CONFIG_IP_VS_IPV6=y | ||
150 | CONFIG_IP_VS_PROTO_TCP=y | ||
151 | CONFIG_IP_VS_PROTO_UDP=y | ||
152 | CONFIG_IP_VS_PROTO_ESP=y | ||
153 | CONFIG_IP_VS_PROTO_AH=y | ||
154 | CONFIG_IP_VS_RR=m | ||
155 | CONFIG_IP_VS_WRR=m | ||
156 | CONFIG_IP_VS_LC=m | ||
157 | CONFIG_IP_VS_WLC=m | ||
158 | CONFIG_IP_VS_LBLC=m | ||
159 | CONFIG_IP_VS_LBLCR=m | ||
160 | CONFIG_IP_VS_DH=m | ||
161 | CONFIG_IP_VS_SH=m | ||
162 | CONFIG_IP_VS_SED=m | ||
163 | CONFIG_IP_VS_NQ=m | ||
164 | CONFIG_IP_VS_FTP=m | ||
165 | CONFIG_NF_CONNTRACK_IPV4=m | ||
166 | CONFIG_IP_NF_QUEUE=m | ||
167 | CONFIG_IP_NF_IPTABLES=m | ||
168 | CONFIG_IP_NF_MATCH_AH=m | ||
169 | CONFIG_IP_NF_MATCH_ECN=m | ||
170 | CONFIG_IP_NF_MATCH_TTL=m | ||
171 | CONFIG_IP_NF_FILTER=m | ||
172 | CONFIG_IP_NF_TARGET_REJECT=m | ||
173 | CONFIG_IP_NF_TARGET_LOG=m | ||
174 | CONFIG_IP_NF_TARGET_ULOG=m | ||
175 | CONFIG_NF_NAT=m | ||
176 | CONFIG_IP_NF_TARGET_MASQUERADE=m | ||
177 | CONFIG_IP_NF_TARGET_NETMAP=m | ||
178 | CONFIG_IP_NF_TARGET_REDIRECT=m | ||
179 | CONFIG_IP_NF_MANGLE=m | ||
180 | CONFIG_IP_NF_TARGET_CLUSTERIP=m | ||
181 | CONFIG_IP_NF_TARGET_ECN=m | ||
182 | CONFIG_IP_NF_TARGET_TTL=m | ||
183 | CONFIG_IP_NF_RAW=m | ||
184 | CONFIG_IP_NF_SECURITY=m | ||
185 | CONFIG_IP_NF_ARPTABLES=m | ||
186 | CONFIG_IP_NF_ARPFILTER=m | ||
187 | CONFIG_IP_NF_ARP_MANGLE=m | ||
188 | CONFIG_NF_CONNTRACK_IPV6=m | ||
189 | CONFIG_IP6_NF_QUEUE=m | ||
190 | CONFIG_IP6_NF_IPTABLES=m | ||
191 | CONFIG_IP6_NF_MATCH_AH=m | ||
192 | CONFIG_IP6_NF_MATCH_EUI64=m | ||
193 | CONFIG_IP6_NF_MATCH_FRAG=m | ||
194 | CONFIG_IP6_NF_MATCH_OPTS=m | ||
195 | CONFIG_IP6_NF_MATCH_HL=m | ||
196 | CONFIG_IP6_NF_MATCH_IPV6HEADER=m | ||
197 | CONFIG_IP6_NF_MATCH_MH=m | ||
198 | CONFIG_IP6_NF_MATCH_RT=m | ||
199 | CONFIG_IP6_NF_TARGET_HL=m | ||
200 | CONFIG_IP6_NF_TARGET_LOG=m | ||
201 | CONFIG_IP6_NF_FILTER=m | ||
202 | CONFIG_IP6_NF_TARGET_REJECT=m | ||
203 | CONFIG_IP6_NF_MANGLE=m | ||
204 | CONFIG_IP6_NF_RAW=m | ||
205 | CONFIG_IP6_NF_SECURITY=m | ||
206 | CONFIG_DECNET_NF_GRABULATOR=m | ||
207 | CONFIG_BRIDGE_NF_EBTABLES=m | ||
208 | CONFIG_BRIDGE_EBT_BROUTE=m | ||
209 | CONFIG_BRIDGE_EBT_T_FILTER=m | ||
210 | CONFIG_BRIDGE_EBT_T_NAT=m | ||
211 | CONFIG_BRIDGE_EBT_802_3=m | ||
212 | CONFIG_BRIDGE_EBT_AMONG=m | ||
213 | CONFIG_BRIDGE_EBT_ARP=m | ||
214 | CONFIG_BRIDGE_EBT_IP=m | ||
215 | CONFIG_BRIDGE_EBT_IP6=m | ||
216 | CONFIG_BRIDGE_EBT_LIMIT=m | ||
217 | CONFIG_BRIDGE_EBT_MARK=m | ||
218 | CONFIG_BRIDGE_EBT_PKTTYPE=m | ||
219 | CONFIG_BRIDGE_EBT_STP=m | ||
220 | CONFIG_BRIDGE_EBT_VLAN=m | ||
221 | CONFIG_BRIDGE_EBT_ARPREPLY=m | ||
222 | CONFIG_BRIDGE_EBT_DNAT=m | ||
223 | CONFIG_BRIDGE_EBT_MARK_T=m | ||
224 | CONFIG_BRIDGE_EBT_REDIRECT=m | ||
225 | CONFIG_BRIDGE_EBT_SNAT=m | ||
226 | CONFIG_BRIDGE_EBT_LOG=m | ||
227 | CONFIG_BRIDGE_EBT_ULOG=m | ||
228 | CONFIG_BRIDGE_EBT_NFLOG=m | ||
229 | CONFIG_IP_DCCP=m | ||
230 | CONFIG_RDS=m | ||
231 | CONFIG_RDS_TCP=m | ||
232 | CONFIG_TIPC=m | ||
233 | CONFIG_ATM=m | ||
234 | CONFIG_ATM_CLIP=m | ||
235 | CONFIG_ATM_LANE=m | ||
236 | CONFIG_ATM_MPOA=m | ||
237 | CONFIG_ATM_BR2684=m | ||
238 | CONFIG_BRIDGE=m | ||
239 | CONFIG_VLAN_8021Q=m | ||
240 | CONFIG_VLAN_8021Q_GVRP=y | ||
241 | CONFIG_DECNET=m | ||
242 | CONFIG_LLC2=m | ||
243 | CONFIG_IPX=m | ||
244 | CONFIG_ATALK=m | ||
245 | CONFIG_DEV_APPLETALK=m | ||
246 | CONFIG_IPDDP=m | ||
247 | CONFIG_IPDDP_ENCAP=y | ||
248 | CONFIG_IPDDP_DECAP=y | ||
249 | CONFIG_X25=m | ||
250 | CONFIG_LAPB=m | ||
251 | CONFIG_ECONET=m | ||
252 | CONFIG_ECONET_AUNUDP=y | ||
253 | CONFIG_ECONET_NATIVE=y | ||
254 | CONFIG_WAN_ROUTER=m | ||
255 | CONFIG_PHONET=m | ||
256 | CONFIG_IEEE802154=m | ||
257 | CONFIG_NET_SCHED=y | ||
258 | CONFIG_NET_SCH_CBQ=m | ||
259 | CONFIG_NET_SCH_HTB=m | ||
260 | CONFIG_NET_SCH_HFSC=m | ||
261 | CONFIG_NET_SCH_ATM=m | ||
262 | CONFIG_NET_SCH_PRIO=m | ||
263 | CONFIG_NET_SCH_MULTIQ=m | ||
264 | CONFIG_NET_SCH_RED=m | ||
265 | CONFIG_NET_SCH_SFQ=m | ||
266 | CONFIG_NET_SCH_TEQL=m | ||
267 | CONFIG_NET_SCH_TBF=m | ||
268 | CONFIG_NET_SCH_GRED=m | ||
269 | CONFIG_NET_SCH_DSMARK=m | ||
270 | CONFIG_NET_SCH_NETEM=m | ||
271 | CONFIG_NET_SCH_DRR=m | ||
272 | CONFIG_NET_SCH_INGRESS=m | ||
273 | CONFIG_NET_CLS_BASIC=m | ||
274 | CONFIG_NET_CLS_TCINDEX=m | ||
275 | CONFIG_NET_CLS_ROUTE4=m | ||
276 | CONFIG_NET_CLS_FW=m | ||
277 | CONFIG_NET_CLS_U32=m | ||
278 | CONFIG_CLS_U32_MARK=y | ||
279 | CONFIG_NET_CLS_RSVP=m | ||
280 | CONFIG_NET_CLS_RSVP6=m | ||
281 | CONFIG_NET_CLS_FLOW=m | ||
282 | CONFIG_NET_EMATCH=y | ||
283 | CONFIG_NET_EMATCH_CMP=m | ||
284 | CONFIG_NET_EMATCH_NBYTE=m | ||
285 | CONFIG_NET_EMATCH_U32=m | ||
286 | CONFIG_NET_EMATCH_META=m | ||
287 | CONFIG_NET_EMATCH_TEXT=m | ||
288 | CONFIG_NET_CLS_ACT=y | ||
289 | CONFIG_NET_ACT_POLICE=m | ||
290 | CONFIG_NET_ACT_GACT=m | ||
291 | CONFIG_GACT_PROB=y | ||
292 | CONFIG_NET_ACT_MIRRED=m | ||
293 | CONFIG_NET_ACT_IPT=m | ||
294 | CONFIG_NET_ACT_NAT=m | ||
295 | CONFIG_NET_ACT_PEDIT=m | ||
296 | CONFIG_NET_ACT_SIMP=m | ||
297 | CONFIG_NET_ACT_SKBEDIT=m | ||
298 | CONFIG_DCB=y | ||
299 | CONFIG_NET_PKTGEN=m | ||
300 | # CONFIG_WIRELESS is not set | ||
301 | CONFIG_DEVTMPFS=y | ||
302 | CONFIG_DEVTMPFS_MOUNT=y | ||
303 | # CONFIG_STANDALONE is not set | ||
304 | CONFIG_CONNECTOR=y | ||
305 | CONFIG_MTD=m | ||
306 | CONFIG_BLK_DEV_LOOP=y | ||
307 | CONFIG_BLK_DEV_CRYPTOLOOP=m | ||
308 | CONFIG_BLK_DEV_NBD=m | ||
309 | CONFIG_BLK_DEV_OSD=m | ||
310 | CONFIG_BLK_DEV_RAM=y | ||
311 | CONFIG_BLK_DEV_RAM_SIZE=65536 | ||
312 | CONFIG_CDROM_PKTCDVD=y | ||
313 | CONFIG_MISC_DEVICES=y | ||
314 | CONFIG_RAID_ATTRS=m | ||
315 | CONFIG_SCSI=y | ||
316 | CONFIG_SCSI_TGT=m | ||
317 | CONFIG_BLK_DEV_SD=y | ||
318 | CONFIG_CHR_DEV_ST=m | ||
319 | CONFIG_CHR_DEV_OSST=m | ||
320 | CONFIG_BLK_DEV_SR=y | ||
321 | CONFIG_CHR_DEV_SG=y | ||
322 | CONFIG_CHR_DEV_SCH=m | ||
323 | CONFIG_SCSI_MULTI_LUN=y | ||
324 | CONFIG_SCSI_CONSTANTS=y | ||
325 | CONFIG_SCSI_LOGGING=y | ||
326 | CONFIG_SCSI_SCAN_ASYNC=y | ||
327 | CONFIG_SCSI_SPI_ATTRS=m | ||
328 | CONFIG_SCSI_FC_TGT_ATTRS=y | ||
329 | CONFIG_SCSI_SAS_LIBSAS=m | ||
330 | CONFIG_SCSI_SRP_ATTRS=m | ||
331 | CONFIG_SCSI_SRP_TGT_ATTRS=y | ||
332 | CONFIG_ISCSI_TCP=m | ||
333 | CONFIG_LIBFCOE=m | ||
334 | CONFIG_SCSI_DEBUG=m | ||
335 | CONFIG_SCSI_DH=y | ||
336 | CONFIG_SCSI_DH_RDAC=m | ||
337 | CONFIG_SCSI_DH_HP_SW=m | ||
338 | CONFIG_SCSI_DH_EMC=m | ||
339 | CONFIG_SCSI_DH_ALUA=m | ||
340 | CONFIG_SCSI_OSD_INITIATOR=m | ||
341 | CONFIG_SCSI_OSD_ULD=m | ||
342 | # CONFIG_INPUT_MOUSEDEV is not set | ||
343 | CONFIG_INPUT_EVDEV=y | ||
344 | CONFIG_INPUT_EVBUG=m | ||
345 | # CONFIG_INPUT_KEYBOARD is not set | ||
346 | # CONFIG_INPUT_MOUSE is not set | ||
347 | # CONFIG_SERIO_I8042 is not set | ||
348 | CONFIG_SERIO_SERPORT=m | ||
349 | CONFIG_SERIO_LIBPS2=y | ||
350 | CONFIG_SERIO_RAW=m | ||
351 | CONFIG_VT_HW_CONSOLE_BINDING=y | ||
352 | CONFIG_DEVPTS_MULTIPLE_INSTANCES=y | ||
353 | CONFIG_LEGACY_PTY_COUNT=0 | ||
354 | CONFIG_SERIAL_NONSTANDARD=y | ||
355 | CONFIG_N_HDLC=m | ||
356 | # CONFIG_DEVKMEM is not set | ||
357 | CONFIG_STALDRV=y | ||
358 | CONFIG_SERIAL_8250=y | ||
359 | CONFIG_SERIAL_8250_CONSOLE=y | ||
360 | CONFIG_SERIAL_8250_NR_UARTS=48 | ||
361 | CONFIG_SERIAL_8250_EXTENDED=y | ||
362 | CONFIG_SERIAL_8250_MANY_PORTS=y | ||
363 | CONFIG_SERIAL_8250_SHARE_IRQ=y | ||
364 | CONFIG_SERIAL_8250_RSA=y | ||
365 | CONFIG_HW_RANDOM=y | ||
366 | CONFIG_HW_RANDOM_TIMERIOMEM=m | ||
367 | CONFIG_RAW_DRIVER=m | ||
368 | # CONFIG_HWMON is not set | ||
369 | # CONFIG_VGA_CONSOLE is not set | ||
370 | # CONFIG_HID_SUPPORT is not set | ||
371 | # CONFIG_USB_SUPPORT is not set | ||
372 | CONFIG_UIO=y | ||
373 | CONFIG_UIO_PDRV=m | ||
374 | CONFIG_UIO_PDRV_GENIRQ=m | ||
375 | CONFIG_EXT2_FS=y | ||
376 | CONFIG_EXT2_FS_XATTR=y | ||
377 | CONFIG_EXT2_FS_POSIX_ACL=y | ||
378 | CONFIG_EXT2_FS_SECURITY=y | ||
379 | CONFIG_EXT3_FS=y | ||
380 | CONFIG_EXT3_FS_POSIX_ACL=y | ||
381 | CONFIG_EXT3_FS_SECURITY=y | ||
382 | CONFIG_EXT4_FS=y | ||
383 | CONFIG_EXT4_FS_POSIX_ACL=y | ||
384 | CONFIG_EXT4_FS_SECURITY=y | ||
385 | CONFIG_GFS2_FS=m | ||
386 | CONFIG_GFS2_FS_LOCKING_DLM=y | ||
387 | CONFIG_OCFS2_FS=m | ||
388 | CONFIG_BTRFS_FS=m | ||
389 | CONFIG_BTRFS_FS_POSIX_ACL=y | ||
390 | CONFIG_NILFS2_FS=m | ||
391 | CONFIG_QUOTA_NETLINK_INTERFACE=y | ||
392 | # CONFIG_PRINT_QUOTA_WARNING is not set | ||
393 | CONFIG_QFMT_V1=m | ||
394 | CONFIG_QFMT_V2=m | ||
395 | CONFIG_AUTOFS4_FS=m | ||
396 | CONFIG_FUSE_FS=y | ||
397 | CONFIG_CUSE=m | ||
398 | CONFIG_FSCACHE=m | ||
399 | CONFIG_FSCACHE_STATS=y | ||
400 | CONFIG_FSCACHE_HISTOGRAM=y | ||
401 | CONFIG_CACHEFILES=m | ||
402 | CONFIG_ISO9660_FS=m | ||
403 | CONFIG_JOLIET=y | ||
404 | CONFIG_ZISOFS=y | ||
405 | CONFIG_UDF_FS=m | ||
406 | CONFIG_MSDOS_FS=m | ||
407 | CONFIG_VFAT_FS=m | ||
408 | CONFIG_NTFS_FS=m | ||
409 | CONFIG_PROC_KCORE=y | ||
410 | CONFIG_TMPFS=y | ||
411 | CONFIG_TMPFS_POSIX_ACL=y | ||
412 | CONFIG_CONFIGFS_FS=y | ||
413 | CONFIG_ADFS_FS=m | ||
414 | CONFIG_AFFS_FS=m | ||
415 | CONFIG_ECRYPT_FS=y | ||
416 | CONFIG_HFS_FS=m | ||
417 | CONFIG_HFSPLUS_FS=m | ||
418 | CONFIG_BEFS_FS=m | ||
419 | CONFIG_BFS_FS=m | ||
420 | CONFIG_EFS_FS=m | ||
421 | CONFIG_CRAMFS=m | ||
422 | CONFIG_SQUASHFS=m | ||
423 | CONFIG_VXFS_FS=m | ||
424 | CONFIG_MINIX_FS=m | ||
425 | CONFIG_OMFS_FS=m | ||
426 | CONFIG_HPFS_FS=m | ||
427 | CONFIG_QNX4FS_FS=m | ||
428 | CONFIG_ROMFS_FS=m | ||
429 | CONFIG_SYSV_FS=m | ||
430 | CONFIG_UFS_FS=m | ||
431 | CONFIG_EXOFS_FS=m | ||
432 | CONFIG_NFS_FS=m | ||
433 | CONFIG_NFS_V3=y | ||
434 | CONFIG_NFS_V3_ACL=y | ||
435 | CONFIG_NFS_V4=y | ||
436 | CONFIG_NFS_FSCACHE=y | ||
437 | CONFIG_NFSD=m | ||
438 | CONFIG_NFSD_V3_ACL=y | ||
439 | CONFIG_NFSD_V4=y | ||
440 | CONFIG_CIFS=m | ||
441 | CONFIG_CIFS_WEAK_PW_HASH=y | ||
442 | CONFIG_CIFS_UPCALL=y | ||
443 | CONFIG_CIFS_XATTR=y | ||
444 | CONFIG_CIFS_POSIX=y | ||
445 | CONFIG_CIFS_DFS_UPCALL=y | ||
446 | CONFIG_CIFS_EXPERIMENTAL=y | ||
447 | CONFIG_NCP_FS=m | ||
448 | CONFIG_NCPFS_PACKET_SIGNING=y | ||
449 | CONFIG_NCPFS_IOCTL_LOCKING=y | ||
450 | CONFIG_NCPFS_STRONG=y | ||
451 | CONFIG_NCPFS_NFS_NS=y | ||
452 | CONFIG_NCPFS_OS2_NS=y | ||
453 | CONFIG_NCPFS_NLS=y | ||
454 | CONFIG_NCPFS_EXTRAS=y | ||
455 | CONFIG_CODA_FS=m | ||
456 | CONFIG_AFS_FS=m | ||
457 | CONFIG_PARTITION_ADVANCED=y | ||
458 | CONFIG_ACORN_PARTITION=y | ||
459 | CONFIG_ACORN_PARTITION_ICS=y | ||
460 | CONFIG_ACORN_PARTITION_RISCIX=y | ||
461 | CONFIG_OSF_PARTITION=y | ||
462 | CONFIG_AMIGA_PARTITION=y | ||
463 | CONFIG_ATARI_PARTITION=y | ||
464 | CONFIG_MAC_PARTITION=y | ||
465 | CONFIG_BSD_DISKLABEL=y | ||
466 | CONFIG_MINIX_SUBPARTITION=y | ||
467 | CONFIG_SOLARIS_X86_PARTITION=y | ||
468 | CONFIG_UNIXWARE_DISKLABEL=y | ||
469 | CONFIG_LDM_PARTITION=y | ||
470 | CONFIG_SGI_PARTITION=y | ||
471 | CONFIG_ULTRIX_PARTITION=y | ||
472 | CONFIG_SUN_PARTITION=y | ||
473 | CONFIG_KARMA_PARTITION=y | ||
474 | CONFIG_EFI_PARTITION=y | ||
475 | CONFIG_SYSV68_PARTITION=y | ||
476 | CONFIG_NLS=y | ||
477 | CONFIG_NLS_DEFAULT="cp437" | ||
478 | CONFIG_NLS_CODEPAGE_437=m | ||
479 | CONFIG_NLS_CODEPAGE_737=m | ||
480 | CONFIG_NLS_CODEPAGE_775=m | ||
481 | CONFIG_NLS_CODEPAGE_850=m | ||
482 | CONFIG_NLS_CODEPAGE_852=m | ||
483 | CONFIG_NLS_CODEPAGE_855=m | ||
484 | CONFIG_NLS_CODEPAGE_857=m | ||
485 | CONFIG_NLS_CODEPAGE_860=m | ||
486 | CONFIG_NLS_CODEPAGE_861=m | ||
487 | CONFIG_NLS_CODEPAGE_862=m | ||
488 | CONFIG_NLS_CODEPAGE_863=m | ||
489 | CONFIG_NLS_CODEPAGE_864=m | ||
490 | CONFIG_NLS_CODEPAGE_865=m | ||
491 | CONFIG_NLS_CODEPAGE_866=m | ||
492 | CONFIG_NLS_CODEPAGE_869=m | ||
493 | CONFIG_NLS_CODEPAGE_936=m | ||
494 | CONFIG_NLS_CODEPAGE_950=m | ||
495 | CONFIG_NLS_CODEPAGE_932=m | ||
496 | CONFIG_NLS_CODEPAGE_949=m | ||
497 | CONFIG_NLS_CODEPAGE_874=m | ||
498 | CONFIG_NLS_ISO8859_8=m | ||
499 | CONFIG_NLS_CODEPAGE_1250=m | ||
500 | CONFIG_NLS_CODEPAGE_1251=m | ||
501 | CONFIG_NLS_ASCII=m | ||
502 | CONFIG_NLS_ISO8859_1=m | ||
503 | CONFIG_NLS_ISO8859_2=m | ||
504 | CONFIG_NLS_ISO8859_3=m | ||
505 | CONFIG_NLS_ISO8859_4=m | ||
506 | CONFIG_NLS_ISO8859_5=m | ||
507 | CONFIG_NLS_ISO8859_6=m | ||
508 | CONFIG_NLS_ISO8859_7=m | ||
509 | CONFIG_NLS_ISO8859_9=m | ||
510 | CONFIG_NLS_ISO8859_13=m | ||
511 | CONFIG_NLS_ISO8859_14=m | ||
512 | CONFIG_NLS_ISO8859_15=m | ||
513 | CONFIG_NLS_KOI8_R=m | ||
514 | CONFIG_NLS_KOI8_U=m | ||
515 | CONFIG_PRINTK_TIME=y | ||
516 | # CONFIG_ENABLE_WARN_DEPRECATED is not set | ||
517 | # CONFIG_ENABLE_MUST_CHECK is not set | ||
518 | CONFIG_UNUSED_SYMBOLS=y | ||
519 | CONFIG_DEBUG_KERNEL=y | ||
520 | CONFIG_DETECT_HUNG_TASK=y | ||
521 | CONFIG_SCHEDSTATS=y | ||
522 | CONFIG_TIMER_STATS=y | ||
523 | CONFIG_DEBUG_INFO=y | ||
524 | CONFIG_DEBUG_MEMORY_INIT=y | ||
525 | CONFIG_SYSCTL_SYSCALL_CHECK=y | ||
526 | CONFIG_SCHED_TRACER=y | ||
527 | CONFIG_BLK_DEV_IO_TRACE=y | ||
528 | CONFIG_KGDB=y | ||
529 | CONFIG_SECURITY=y | ||
530 | CONFIG_SECURITY_NETWORK=y | ||
531 | CONFIG_LSM_MMAP_MIN_ADDR=0 | ||
532 | CONFIG_SECURITY_SELINUX=y | ||
533 | CONFIG_SECURITY_SELINUX_BOOTPARAM=y | ||
534 | CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0 | ||
535 | CONFIG_SECURITY_SELINUX_DISABLE=y | ||
536 | CONFIG_SECURITY_SMACK=y | ||
537 | CONFIG_SECURITY_TOMOYO=y | ||
538 | CONFIG_CRYPTO_NULL=m | ||
539 | CONFIG_CRYPTO_CRYPTD=m | ||
540 | CONFIG_CRYPTO_TEST=m | ||
541 | CONFIG_CRYPTO_CCM=m | ||
542 | CONFIG_CRYPTO_GCM=m | ||
543 | CONFIG_CRYPTO_CTS=m | ||
544 | CONFIG_CRYPTO_LRW=m | ||
545 | CONFIG_CRYPTO_PCBC=m | ||
546 | CONFIG_CRYPTO_XTS=m | ||
547 | CONFIG_CRYPTO_HMAC=y | ||
548 | CONFIG_CRYPTO_XCBC=m | ||
549 | CONFIG_CRYPTO_VMAC=m | ||
550 | CONFIG_CRYPTO_MICHAEL_MIC=m | ||
551 | CONFIG_CRYPTO_RMD128=m | ||
552 | CONFIG_CRYPTO_RMD160=m | ||
553 | CONFIG_CRYPTO_RMD256=m | ||
554 | CONFIG_CRYPTO_RMD320=m | ||
555 | CONFIG_CRYPTO_SHA256=m | ||
556 | CONFIG_CRYPTO_SHA512=m | ||
557 | CONFIG_CRYPTO_TGR192=m | ||
558 | CONFIG_CRYPTO_WP512=m | ||
559 | CONFIG_CRYPTO_ANUBIS=m | ||
560 | CONFIG_CRYPTO_BLOWFISH=m | ||
561 | CONFIG_CRYPTO_CAMELLIA=m | ||
562 | CONFIG_CRYPTO_CAST5=m | ||
563 | CONFIG_CRYPTO_CAST6=m | ||
564 | CONFIG_CRYPTO_FCRYPT=m | ||
565 | CONFIG_CRYPTO_KHAZAD=m | ||
566 | CONFIG_CRYPTO_SALSA20=m | ||
567 | CONFIG_CRYPTO_SEED=m | ||
568 | CONFIG_CRYPTO_SERPENT=m | ||
569 | CONFIG_CRYPTO_TEA=m | ||
570 | CONFIG_CRYPTO_TWOFISH=m | ||
571 | CONFIG_CRYPTO_ZLIB=m | ||
572 | CONFIG_CRYPTO_LZO=m | ||
573 | CONFIG_CRC_CCITT=m | ||
574 | CONFIG_CRC7=m | ||
diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h index 650ac9ba734c..b4db69fbc40c 100644 --- a/arch/mips/include/asm/cache.h +++ b/arch/mips/include/asm/cache.h | |||
@@ -17,6 +17,6 @@ | |||
17 | #define SMP_CACHE_SHIFT L1_CACHE_SHIFT | 17 | #define SMP_CACHE_SHIFT L1_CACHE_SHIFT |
18 | #define SMP_CACHE_BYTES L1_CACHE_BYTES | 18 | #define SMP_CACHE_BYTES L1_CACHE_BYTES |
19 | 19 | ||
20 | #define __read_mostly __attribute__((__section__(".data.read_mostly"))) | 20 | #define __read_mostly __attribute__((__section__(".data..read_mostly"))) |
21 | 21 | ||
22 | #endif /* _ASM_CACHE_H */ | 22 | #endif /* _ASM_CACHE_H */ |
diff --git a/arch/mips/include/asm/cevt-r4k.h b/arch/mips/include/asm/cevt-r4k.h index fa4328f9124f..65f9bdd02f1f 100644 --- a/arch/mips/include/asm/cevt-r4k.h +++ b/arch/mips/include/asm/cevt-r4k.h | |||
@@ -14,6 +14,9 @@ | |||
14 | #ifndef __ASM_CEVT_R4K_H | 14 | #ifndef __ASM_CEVT_R4K_H |
15 | #define __ASM_CEVT_R4K_H | 15 | #define __ASM_CEVT_R4K_H |
16 | 16 | ||
17 | #include <linux/clockchips.h> | ||
18 | #include <asm/time.h> | ||
19 | |||
17 | DECLARE_PER_CPU(struct clock_event_device, mips_clockevent_device); | 20 | DECLARE_PER_CPU(struct clock_event_device, mips_clockevent_device); |
18 | 21 | ||
19 | void mips_event_handler(struct clock_event_device *dev); | 22 | void mips_event_handler(struct clock_event_device *dev); |
diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h index 86877539c6e8..34c0d3cb116f 100644 --- a/arch/mips/include/asm/cpu.h +++ b/arch/mips/include/asm/cpu.h | |||
@@ -33,6 +33,7 @@ | |||
33 | #define PRID_COMP_TOSHIBA 0x070000 | 33 | #define PRID_COMP_TOSHIBA 0x070000 |
34 | #define PRID_COMP_LSI 0x080000 | 34 | #define PRID_COMP_LSI 0x080000 |
35 | #define PRID_COMP_LEXRA 0x0b0000 | 35 | #define PRID_COMP_LEXRA 0x0b0000 |
36 | #define PRID_COMP_NETLOGIC 0x0c0000 | ||
36 | #define PRID_COMP_CAVIUM 0x0d0000 | 37 | #define PRID_COMP_CAVIUM 0x0d0000 |
37 | #define PRID_COMP_INGENIC 0xd00000 | 38 | #define PRID_COMP_INGENIC 0xd00000 |
38 | 39 | ||
@@ -142,6 +143,31 @@ | |||
142 | #define PRID_IMP_JZRISC 0x0200 | 143 | #define PRID_IMP_JZRISC 0x0200 |
143 | 144 | ||
144 | /* | 145 | /* |
146 | * These are the PRID's for when 23:16 == PRID_COMP_NETLOGIC | ||
147 | */ | ||
148 | #define PRID_IMP_NETLOGIC_XLR732 0x0000 | ||
149 | #define PRID_IMP_NETLOGIC_XLR716 0x0200 | ||
150 | #define PRID_IMP_NETLOGIC_XLR532 0x0900 | ||
151 | #define PRID_IMP_NETLOGIC_XLR308 0x0600 | ||
152 | #define PRID_IMP_NETLOGIC_XLR532C 0x0800 | ||
153 | #define PRID_IMP_NETLOGIC_XLR516C 0x0a00 | ||
154 | #define PRID_IMP_NETLOGIC_XLR508C 0x0b00 | ||
155 | #define PRID_IMP_NETLOGIC_XLR308C 0x0f00 | ||
156 | #define PRID_IMP_NETLOGIC_XLS608 0x8000 | ||
157 | #define PRID_IMP_NETLOGIC_XLS408 0x8800 | ||
158 | #define PRID_IMP_NETLOGIC_XLS404 0x8c00 | ||
159 | #define PRID_IMP_NETLOGIC_XLS208 0x8e00 | ||
160 | #define PRID_IMP_NETLOGIC_XLS204 0x8f00 | ||
161 | #define PRID_IMP_NETLOGIC_XLS108 0xce00 | ||
162 | #define PRID_IMP_NETLOGIC_XLS104 0xcf00 | ||
163 | #define PRID_IMP_NETLOGIC_XLS616B 0x4000 | ||
164 | #define PRID_IMP_NETLOGIC_XLS608B 0x4a00 | ||
165 | #define PRID_IMP_NETLOGIC_XLS416B 0x4400 | ||
166 | #define PRID_IMP_NETLOGIC_XLS412B 0x4c00 | ||
167 | #define PRID_IMP_NETLOGIC_XLS408B 0x4e00 | ||
168 | #define PRID_IMP_NETLOGIC_XLS404B 0x4f00 | ||
169 | |||
170 | /* | ||
145 | * Definitions for 7:0 on legacy processors | 171 | * Definitions for 7:0 on legacy processors |
146 | */ | 172 | */ |
147 | 173 | ||
@@ -234,6 +260,7 @@ enum cpu_type_enum { | |||
234 | */ | 260 | */ |
235 | CPU_5KC, CPU_20KC, CPU_25KF, CPU_SB1, CPU_SB1A, CPU_LOONGSON2, | 261 | CPU_5KC, CPU_20KC, CPU_25KF, CPU_SB1, CPU_SB1A, CPU_LOONGSON2, |
236 | CPU_CAVIUM_OCTEON, CPU_CAVIUM_OCTEON_PLUS, CPU_CAVIUM_OCTEON2, | 262 | CPU_CAVIUM_OCTEON, CPU_CAVIUM_OCTEON_PLUS, CPU_CAVIUM_OCTEON2, |
263 | CPU_XLR, | ||
237 | 264 | ||
238 | CPU_LAST | 265 | CPU_LAST |
239 | }; | 266 | }; |
diff --git a/arch/mips/include/asm/dma-mapping.h b/arch/mips/include/asm/dma-mapping.h index 655f849bd08d..7aa37ddfca4b 100644 --- a/arch/mips/include/asm/dma-mapping.h +++ b/arch/mips/include/asm/dma-mapping.h | |||
@@ -5,7 +5,9 @@ | |||
5 | #include <asm/cache.h> | 5 | #include <asm/cache.h> |
6 | #include <asm-generic/dma-coherent.h> | 6 | #include <asm-generic/dma-coherent.h> |
7 | 7 | ||
8 | #ifndef CONFIG_SGI_IP27 /* Kludge to fix 2.6.39 build for IP27 */ | ||
8 | #include <dma-coherence.h> | 9 | #include <dma-coherence.h> |
10 | #endif | ||
9 | 11 | ||
10 | extern struct dma_map_ops *mips_dma_map_ops; | 12 | extern struct dma_map_ops *mips_dma_map_ops; |
11 | 13 | ||
diff --git a/arch/mips/include/asm/hugetlb.h b/arch/mips/include/asm/hugetlb.h index f5e856015329..c565b7c3f0b5 100644 --- a/arch/mips/include/asm/hugetlb.h +++ b/arch/mips/include/asm/hugetlb.h | |||
@@ -70,6 +70,7 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, | |||
70 | static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, | 70 | static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, |
71 | unsigned long addr, pte_t *ptep) | 71 | unsigned long addr, pte_t *ptep) |
72 | { | 72 | { |
73 | flush_tlb_mm(vma->vm_mm); | ||
73 | } | 74 | } |
74 | 75 | ||
75 | static inline int huge_pte_none(pte_t pte) | 76 | static inline int huge_pte_none(pte_t pte) |
diff --git a/arch/mips/include/asm/jump_label.h b/arch/mips/include/asm/jump_label.h index 7622ccf75076..1881b316ca45 100644 --- a/arch/mips/include/asm/jump_label.h +++ b/arch/mips/include/asm/jump_label.h | |||
@@ -20,16 +20,18 @@ | |||
20 | #define WORD_INSN ".word" | 20 | #define WORD_INSN ".word" |
21 | #endif | 21 | #endif |
22 | 22 | ||
23 | #define JUMP_LABEL(key, label) \ | 23 | static __always_inline bool arch_static_branch(struct jump_label_key *key) |
24 | do { \ | 24 | { |
25 | asm goto("1:\tnop\n\t" \ | 25 | asm goto("1:\tnop\n\t" |
26 | "nop\n\t" \ | 26 | "nop\n\t" |
27 | ".pushsection __jump_table, \"a\"\n\t" \ | 27 | ".pushsection __jump_table, \"aw\"\n\t" |
28 | WORD_INSN " 1b, %l[" #label "], %0\n\t" \ | 28 | WORD_INSN " 1b, %l[l_yes], %0\n\t" |
29 | ".popsection\n\t" \ | 29 | ".popsection\n\t" |
30 | : : "i" (key) : : label); \ | 30 | : : "i" (key) : : l_yes); |
31 | } while (0) | 31 | return false; |
32 | 32 | l_yes: | |
33 | return true; | ||
34 | } | ||
33 | 35 | ||
34 | #endif /* __KERNEL__ */ | 36 | #endif /* __KERNEL__ */ |
35 | 37 | ||
diff --git a/arch/mips/include/asm/mach-au1x00/au1000.h b/arch/mips/include/asm/mach-au1x00/au1000.h index a6976619160a..f260ebed713b 100644 --- a/arch/mips/include/asm/mach-au1x00/au1000.h +++ b/arch/mips/include/asm/mach-au1x00/au1000.h | |||
@@ -161,6 +161,45 @@ static inline int alchemy_get_cputype(void) | |||
161 | return ALCHEMY_CPU_UNKNOWN; | 161 | return ALCHEMY_CPU_UNKNOWN; |
162 | } | 162 | } |
163 | 163 | ||
164 | /* return number of uarts on a given cputype */ | ||
165 | static inline int alchemy_get_uarts(int type) | ||
166 | { | ||
167 | switch (type) { | ||
168 | case ALCHEMY_CPU_AU1000: | ||
169 | return 4; | ||
170 | case ALCHEMY_CPU_AU1500: | ||
171 | case ALCHEMY_CPU_AU1200: | ||
172 | return 2; | ||
173 | case ALCHEMY_CPU_AU1100: | ||
174 | case ALCHEMY_CPU_AU1550: | ||
175 | return 3; | ||
176 | } | ||
177 | return 0; | ||
178 | } | ||
179 | |||
180 | /* enable an UART block if it isn't already */ | ||
181 | static inline void alchemy_uart_enable(u32 uart_phys) | ||
182 | { | ||
183 | void __iomem *addr = (void __iomem *)KSEG1ADDR(uart_phys); | ||
184 | |||
185 | /* reset, enable clock, deassert reset */ | ||
186 | if ((__raw_readl(addr + 0x100) & 3) != 3) { | ||
187 | __raw_writel(0, addr + 0x100); | ||
188 | wmb(); | ||
189 | __raw_writel(1, addr + 0x100); | ||
190 | wmb(); | ||
191 | } | ||
192 | __raw_writel(3, addr + 0x100); | ||
193 | wmb(); | ||
194 | } | ||
195 | |||
196 | static inline void alchemy_uart_disable(u32 uart_phys) | ||
197 | { | ||
198 | void __iomem *addr = (void __iomem *)KSEG1ADDR(uart_phys); | ||
199 | __raw_writel(0, addr + 0x100); /* UART_MOD_CNTRL */ | ||
200 | wmb(); | ||
201 | } | ||
202 | |||
164 | static inline void alchemy_uart_putchar(u32 uart_phys, u8 c) | 203 | static inline void alchemy_uart_putchar(u32 uart_phys, u8 c) |
165 | { | 204 | { |
166 | void __iomem *base = (void __iomem *)KSEG1ADDR(uart_phys); | 205 | void __iomem *base = (void __iomem *)KSEG1ADDR(uart_phys); |
@@ -180,6 +219,20 @@ static inline void alchemy_uart_putchar(u32 uart_phys, u8 c) | |||
180 | wmb(); | 219 | wmb(); |
181 | } | 220 | } |
182 | 221 | ||
222 | /* return number of ethernet MACs on a given cputype */ | ||
223 | static inline int alchemy_get_macs(int type) | ||
224 | { | ||
225 | switch (type) { | ||
226 | case ALCHEMY_CPU_AU1000: | ||
227 | case ALCHEMY_CPU_AU1500: | ||
228 | case ALCHEMY_CPU_AU1550: | ||
229 | return 2; | ||
230 | case ALCHEMY_CPU_AU1100: | ||
231 | return 1; | ||
232 | } | ||
233 | return 0; | ||
234 | } | ||
235 | |||
183 | /* arch/mips/au1000/common/clocks.c */ | 236 | /* arch/mips/au1000/common/clocks.c */ |
184 | extern void set_au1x00_speed(unsigned int new_freq); | 237 | extern void set_au1x00_speed(unsigned int new_freq); |
185 | extern unsigned int get_au1x00_speed(void); | 238 | extern unsigned int get_au1x00_speed(void); |
@@ -630,38 +683,42 @@ enum soc_au1200_ints { | |||
630 | 683 | ||
631 | /* | 684 | /* |
632 | * Physical base addresses for integrated peripherals | 685 | * Physical base addresses for integrated peripherals |
686 | * 0..au1000 1..au1500 2..au1100 3..au1550 4..au1200 | ||
633 | */ | 687 | */ |
634 | 688 | ||
689 | #define AU1000_AC97_PHYS_ADDR 0x10000000 /* 012 */ | ||
690 | #define AU1000_USBD_PHYS_ADDR 0x10200000 /* 0123 */ | ||
691 | #define AU1000_IC0_PHYS_ADDR 0x10400000 /* 01234 */ | ||
692 | #define AU1000_MAC0_PHYS_ADDR 0x10500000 /* 023 */ | ||
693 | #define AU1000_MAC1_PHYS_ADDR 0x10510000 /* 023 */ | ||
694 | #define AU1000_MACEN_PHYS_ADDR 0x10520000 /* 023 */ | ||
695 | #define AU1100_SD0_PHYS_ADDR 0x10600000 /* 24 */ | ||
696 | #define AU1100_SD1_PHYS_ADDR 0x10680000 /* 24 */ | ||
697 | #define AU1000_I2S_PHYS_ADDR 0x11000000 /* 02 */ | ||
698 | #define AU1500_MAC0_PHYS_ADDR 0x11500000 /* 1 */ | ||
699 | #define AU1500_MAC1_PHYS_ADDR 0x11510000 /* 1 */ | ||
700 | #define AU1500_MACEN_PHYS_ADDR 0x11520000 /* 1 */ | ||
701 | #define AU1000_UART0_PHYS_ADDR 0x11100000 /* 01234 */ | ||
702 | #define AU1000_UART1_PHYS_ADDR 0x11200000 /* 0234 */ | ||
703 | #define AU1000_UART2_PHYS_ADDR 0x11300000 /* 0 */ | ||
704 | #define AU1000_UART3_PHYS_ADDR 0x11400000 /* 0123 */ | ||
705 | #define AU1500_GPIO2_PHYS_ADDR 0x11700000 /* 1234 */ | ||
706 | #define AU1000_IC1_PHYS_ADDR 0x11800000 /* 01234 */ | ||
707 | #define AU1000_SYS_PHYS_ADDR 0x11900000 /* 01234 */ | ||
708 | #define AU1000_DMA_PHYS_ADDR 0x14002000 /* 012 */ | ||
709 | #define AU1550_DBDMA_PHYS_ADDR 0x14002000 /* 34 */ | ||
710 | #define AU1550_DBDMA_CONF_PHYS_ADDR 0x14003000 /* 34 */ | ||
711 | #define AU1000_MACDMA0_PHYS_ADDR 0x14004000 /* 0123 */ | ||
712 | #define AU1000_MACDMA1_PHYS_ADDR 0x14004200 /* 0123 */ | ||
713 | |||
714 | |||
635 | #ifdef CONFIG_SOC_AU1000 | 715 | #ifdef CONFIG_SOC_AU1000 |
636 | #define MEM_PHYS_ADDR 0x14000000 | 716 | #define MEM_PHYS_ADDR 0x14000000 |
637 | #define STATIC_MEM_PHYS_ADDR 0x14001000 | 717 | #define STATIC_MEM_PHYS_ADDR 0x14001000 |
638 | #define DMA0_PHYS_ADDR 0x14002000 | ||
639 | #define DMA1_PHYS_ADDR 0x14002100 | ||
640 | #define DMA2_PHYS_ADDR 0x14002200 | ||
641 | #define DMA3_PHYS_ADDR 0x14002300 | ||
642 | #define DMA4_PHYS_ADDR 0x14002400 | ||
643 | #define DMA5_PHYS_ADDR 0x14002500 | ||
644 | #define DMA6_PHYS_ADDR 0x14002600 | ||
645 | #define DMA7_PHYS_ADDR 0x14002700 | ||
646 | #define IC0_PHYS_ADDR 0x10400000 | ||
647 | #define IC1_PHYS_ADDR 0x11800000 | ||
648 | #define AC97_PHYS_ADDR 0x10000000 | ||
649 | #define USBH_PHYS_ADDR 0x10100000 | 718 | #define USBH_PHYS_ADDR 0x10100000 |
650 | #define USBD_PHYS_ADDR 0x10200000 | ||
651 | #define IRDA_PHYS_ADDR 0x10300000 | 719 | #define IRDA_PHYS_ADDR 0x10300000 |
652 | #define MAC0_PHYS_ADDR 0x10500000 | ||
653 | #define MAC1_PHYS_ADDR 0x10510000 | ||
654 | #define MACEN_PHYS_ADDR 0x10520000 | ||
655 | #define MACDMA0_PHYS_ADDR 0x14004000 | ||
656 | #define MACDMA1_PHYS_ADDR 0x14004200 | ||
657 | #define I2S_PHYS_ADDR 0x11000000 | ||
658 | #define UART0_PHYS_ADDR 0x11100000 | ||
659 | #define UART1_PHYS_ADDR 0x11200000 | ||
660 | #define UART2_PHYS_ADDR 0x11300000 | ||
661 | #define UART3_PHYS_ADDR 0x11400000 | ||
662 | #define SSI0_PHYS_ADDR 0x11600000 | 720 | #define SSI0_PHYS_ADDR 0x11600000 |
663 | #define SSI1_PHYS_ADDR 0x11680000 | 721 | #define SSI1_PHYS_ADDR 0x11680000 |
664 | #define SYS_PHYS_ADDR 0x11900000 | ||
665 | #define PCMCIA_IO_PHYS_ADDR 0xF00000000ULL | 722 | #define PCMCIA_IO_PHYS_ADDR 0xF00000000ULL |
666 | #define PCMCIA_ATTR_PHYS_ADDR 0xF40000000ULL | 723 | #define PCMCIA_ATTR_PHYS_ADDR 0xF40000000ULL |
667 | #define PCMCIA_MEM_PHYS_ADDR 0xF80000000ULL | 724 | #define PCMCIA_MEM_PHYS_ADDR 0xF80000000ULL |
@@ -672,30 +729,8 @@ enum soc_au1200_ints { | |||
672 | #ifdef CONFIG_SOC_AU1500 | 729 | #ifdef CONFIG_SOC_AU1500 |
673 | #define MEM_PHYS_ADDR 0x14000000 | 730 | #define MEM_PHYS_ADDR 0x14000000 |
674 | #define STATIC_MEM_PHYS_ADDR 0x14001000 | 731 | #define STATIC_MEM_PHYS_ADDR 0x14001000 |
675 | #define DMA0_PHYS_ADDR 0x14002000 | ||
676 | #define DMA1_PHYS_ADDR 0x14002100 | ||
677 | #define DMA2_PHYS_ADDR 0x14002200 | ||
678 | #define DMA3_PHYS_ADDR 0x14002300 | ||
679 | #define DMA4_PHYS_ADDR 0x14002400 | ||
680 | #define DMA5_PHYS_ADDR 0x14002500 | ||
681 | #define DMA6_PHYS_ADDR 0x14002600 | ||
682 | #define DMA7_PHYS_ADDR 0x14002700 | ||
683 | #define IC0_PHYS_ADDR 0x10400000 | ||
684 | #define IC1_PHYS_ADDR 0x11800000 | ||
685 | #define AC97_PHYS_ADDR 0x10000000 | ||
686 | #define USBH_PHYS_ADDR 0x10100000 | 732 | #define USBH_PHYS_ADDR 0x10100000 |
687 | #define USBD_PHYS_ADDR 0x10200000 | ||
688 | #define PCI_PHYS_ADDR 0x14005000 | 733 | #define PCI_PHYS_ADDR 0x14005000 |
689 | #define MAC0_PHYS_ADDR 0x11500000 | ||
690 | #define MAC1_PHYS_ADDR 0x11510000 | ||
691 | #define MACEN_PHYS_ADDR 0x11520000 | ||
692 | #define MACDMA0_PHYS_ADDR 0x14004000 | ||
693 | #define MACDMA1_PHYS_ADDR 0x14004200 | ||
694 | #define I2S_PHYS_ADDR 0x11000000 | ||
695 | #define UART0_PHYS_ADDR 0x11100000 | ||
696 | #define UART3_PHYS_ADDR 0x11400000 | ||
697 | #define GPIO2_PHYS_ADDR 0x11700000 | ||
698 | #define SYS_PHYS_ADDR 0x11900000 | ||
699 | #define PCI_MEM_PHYS_ADDR 0x400000000ULL | 734 | #define PCI_MEM_PHYS_ADDR 0x400000000ULL |
700 | #define PCI_IO_PHYS_ADDR 0x500000000ULL | 735 | #define PCI_IO_PHYS_ADDR 0x500000000ULL |
701 | #define PCI_CONFIG0_PHYS_ADDR 0x600000000ULL | 736 | #define PCI_CONFIG0_PHYS_ADDR 0x600000000ULL |
@@ -710,34 +745,10 @@ enum soc_au1200_ints { | |||
710 | #ifdef CONFIG_SOC_AU1100 | 745 | #ifdef CONFIG_SOC_AU1100 |
711 | #define MEM_PHYS_ADDR 0x14000000 | 746 | #define MEM_PHYS_ADDR 0x14000000 |
712 | #define STATIC_MEM_PHYS_ADDR 0x14001000 | 747 | #define STATIC_MEM_PHYS_ADDR 0x14001000 |
713 | #define DMA0_PHYS_ADDR 0x14002000 | ||
714 | #define DMA1_PHYS_ADDR 0x14002100 | ||
715 | #define DMA2_PHYS_ADDR 0x14002200 | ||
716 | #define DMA3_PHYS_ADDR 0x14002300 | ||
717 | #define DMA4_PHYS_ADDR 0x14002400 | ||
718 | #define DMA5_PHYS_ADDR 0x14002500 | ||
719 | #define DMA6_PHYS_ADDR 0x14002600 | ||
720 | #define DMA7_PHYS_ADDR 0x14002700 | ||
721 | #define IC0_PHYS_ADDR 0x10400000 | ||
722 | #define SD0_PHYS_ADDR 0x10600000 | ||
723 | #define SD1_PHYS_ADDR 0x10680000 | ||
724 | #define IC1_PHYS_ADDR 0x11800000 | ||
725 | #define AC97_PHYS_ADDR 0x10000000 | ||
726 | #define USBH_PHYS_ADDR 0x10100000 | 748 | #define USBH_PHYS_ADDR 0x10100000 |
727 | #define USBD_PHYS_ADDR 0x10200000 | ||
728 | #define IRDA_PHYS_ADDR 0x10300000 | 749 | #define IRDA_PHYS_ADDR 0x10300000 |
729 | #define MAC0_PHYS_ADDR 0x10500000 | ||
730 | #define MACEN_PHYS_ADDR 0x10520000 | ||
731 | #define MACDMA0_PHYS_ADDR 0x14004000 | ||
732 | #define MACDMA1_PHYS_ADDR 0x14004200 | ||
733 | #define I2S_PHYS_ADDR 0x11000000 | ||
734 | #define UART0_PHYS_ADDR 0x11100000 | ||
735 | #define UART1_PHYS_ADDR 0x11200000 | ||
736 | #define UART3_PHYS_ADDR 0x11400000 | ||
737 | #define SSI0_PHYS_ADDR 0x11600000 | 750 | #define SSI0_PHYS_ADDR 0x11600000 |
738 | #define SSI1_PHYS_ADDR 0x11680000 | 751 | #define SSI1_PHYS_ADDR 0x11680000 |
739 | #define GPIO2_PHYS_ADDR 0x11700000 | ||
740 | #define SYS_PHYS_ADDR 0x11900000 | ||
741 | #define LCD_PHYS_ADDR 0x15000000 | 752 | #define LCD_PHYS_ADDR 0x15000000 |
742 | #define PCMCIA_IO_PHYS_ADDR 0xF00000000ULL | 753 | #define PCMCIA_IO_PHYS_ADDR 0xF00000000ULL |
743 | #define PCMCIA_ATTR_PHYS_ADDR 0xF40000000ULL | 754 | #define PCMCIA_ATTR_PHYS_ADDR 0xF40000000ULL |
@@ -749,22 +760,8 @@ enum soc_au1200_ints { | |||
749 | #ifdef CONFIG_SOC_AU1550 | 760 | #ifdef CONFIG_SOC_AU1550 |
750 | #define MEM_PHYS_ADDR 0x14000000 | 761 | #define MEM_PHYS_ADDR 0x14000000 |
751 | #define STATIC_MEM_PHYS_ADDR 0x14001000 | 762 | #define STATIC_MEM_PHYS_ADDR 0x14001000 |
752 | #define IC0_PHYS_ADDR 0x10400000 | ||
753 | #define IC1_PHYS_ADDR 0x11800000 | ||
754 | #define USBH_PHYS_ADDR 0x14020000 | 763 | #define USBH_PHYS_ADDR 0x14020000 |
755 | #define USBD_PHYS_ADDR 0x10200000 | ||
756 | #define PCI_PHYS_ADDR 0x14005000 | 764 | #define PCI_PHYS_ADDR 0x14005000 |
757 | #define MAC0_PHYS_ADDR 0x10500000 | ||
758 | #define MAC1_PHYS_ADDR 0x10510000 | ||
759 | #define MACEN_PHYS_ADDR 0x10520000 | ||
760 | #define MACDMA0_PHYS_ADDR 0x14004000 | ||
761 | #define MACDMA1_PHYS_ADDR 0x14004200 | ||
762 | #define UART0_PHYS_ADDR 0x11100000 | ||
763 | #define UART1_PHYS_ADDR 0x11200000 | ||
764 | #define UART3_PHYS_ADDR 0x11400000 | ||
765 | #define GPIO2_PHYS_ADDR 0x11700000 | ||
766 | #define SYS_PHYS_ADDR 0x11900000 | ||
767 | #define DDMA_PHYS_ADDR 0x14002000 | ||
768 | #define PE_PHYS_ADDR 0x14008000 | 765 | #define PE_PHYS_ADDR 0x14008000 |
769 | #define PSC0_PHYS_ADDR 0x11A00000 | 766 | #define PSC0_PHYS_ADDR 0x11A00000 |
770 | #define PSC1_PHYS_ADDR 0x11B00000 | 767 | #define PSC1_PHYS_ADDR 0x11B00000 |
@@ -786,19 +783,10 @@ enum soc_au1200_ints { | |||
786 | #define STATIC_MEM_PHYS_ADDR 0x14001000 | 783 | #define STATIC_MEM_PHYS_ADDR 0x14001000 |
787 | #define AES_PHYS_ADDR 0x10300000 | 784 | #define AES_PHYS_ADDR 0x10300000 |
788 | #define CIM_PHYS_ADDR 0x14004000 | 785 | #define CIM_PHYS_ADDR 0x14004000 |
789 | #define IC0_PHYS_ADDR 0x10400000 | ||
790 | #define IC1_PHYS_ADDR 0x11800000 | ||
791 | #define USBM_PHYS_ADDR 0x14020000 | 786 | #define USBM_PHYS_ADDR 0x14020000 |
792 | #define USBH_PHYS_ADDR 0x14020100 | 787 | #define USBH_PHYS_ADDR 0x14020100 |
793 | #define UART0_PHYS_ADDR 0x11100000 | ||
794 | #define UART1_PHYS_ADDR 0x11200000 | ||
795 | #define GPIO2_PHYS_ADDR 0x11700000 | ||
796 | #define SYS_PHYS_ADDR 0x11900000 | ||
797 | #define DDMA_PHYS_ADDR 0x14002000 | ||
798 | #define PSC0_PHYS_ADDR 0x11A00000 | 788 | #define PSC0_PHYS_ADDR 0x11A00000 |
799 | #define PSC1_PHYS_ADDR 0x11B00000 | 789 | #define PSC1_PHYS_ADDR 0x11B00000 |
800 | #define SD0_PHYS_ADDR 0x10600000 | ||
801 | #define SD1_PHYS_ADDR 0x10680000 | ||
802 | #define LCD_PHYS_ADDR 0x15000000 | 790 | #define LCD_PHYS_ADDR 0x15000000 |
803 | #define SWCNT_PHYS_ADDR 0x1110010C | 791 | #define SWCNT_PHYS_ADDR 0x1110010C |
804 | #define MAEFE_PHYS_ADDR 0x14012000 | 792 | #define MAEFE_PHYS_ADDR 0x14012000 |
@@ -835,183 +823,43 @@ enum soc_au1200_ints { | |||
835 | #endif | 823 | #endif |
836 | 824 | ||
837 | 825 | ||
838 | /* Interrupt Controller register offsets */ | ||
839 | #define IC_CFG0RD 0x40 | ||
840 | #define IC_CFG0SET 0x40 | ||
841 | #define IC_CFG0CLR 0x44 | ||
842 | #define IC_CFG1RD 0x48 | ||
843 | #define IC_CFG1SET 0x48 | ||
844 | #define IC_CFG1CLR 0x4C | ||
845 | #define IC_CFG2RD 0x50 | ||
846 | #define IC_CFG2SET 0x50 | ||
847 | #define IC_CFG2CLR 0x54 | ||
848 | #define IC_REQ0INT 0x54 | ||
849 | #define IC_SRCRD 0x58 | ||
850 | #define IC_SRCSET 0x58 | ||
851 | #define IC_SRCCLR 0x5C | ||
852 | #define IC_REQ1INT 0x5C | ||
853 | #define IC_ASSIGNRD 0x60 | ||
854 | #define IC_ASSIGNSET 0x60 | ||
855 | #define IC_ASSIGNCLR 0x64 | ||
856 | #define IC_WAKERD 0x68 | ||
857 | #define IC_WAKESET 0x68 | ||
858 | #define IC_WAKECLR 0x6C | ||
859 | #define IC_MASKRD 0x70 | ||
860 | #define IC_MASKSET 0x70 | ||
861 | #define IC_MASKCLR 0x74 | ||
862 | #define IC_RISINGRD 0x78 | ||
863 | #define IC_RISINGCLR 0x78 | ||
864 | #define IC_FALLINGRD 0x7C | ||
865 | #define IC_FALLINGCLR 0x7C | ||
866 | #define IC_TESTBIT 0x80 | ||
867 | |||
868 | |||
869 | /* Interrupt Controller 0 */ | ||
870 | #define IC0_CFG0RD 0xB0400040 | ||
871 | #define IC0_CFG0SET 0xB0400040 | ||
872 | #define IC0_CFG0CLR 0xB0400044 | ||
873 | |||
874 | #define IC0_CFG1RD 0xB0400048 | ||
875 | #define IC0_CFG1SET 0xB0400048 | ||
876 | #define IC0_CFG1CLR 0xB040004C | ||
877 | |||
878 | #define IC0_CFG2RD 0xB0400050 | ||
879 | #define IC0_CFG2SET 0xB0400050 | ||
880 | #define IC0_CFG2CLR 0xB0400054 | ||
881 | |||
882 | #define IC0_REQ0INT 0xB0400054 | ||
883 | #define IC0_SRCRD 0xB0400058 | ||
884 | #define IC0_SRCSET 0xB0400058 | ||
885 | #define IC0_SRCCLR 0xB040005C | ||
886 | #define IC0_REQ1INT 0xB040005C | ||
887 | |||
888 | #define IC0_ASSIGNRD 0xB0400060 | ||
889 | #define IC0_ASSIGNSET 0xB0400060 | ||
890 | #define IC0_ASSIGNCLR 0xB0400064 | ||
891 | |||
892 | #define IC0_WAKERD 0xB0400068 | ||
893 | #define IC0_WAKESET 0xB0400068 | ||
894 | #define IC0_WAKECLR 0xB040006C | ||
895 | |||
896 | #define IC0_MASKRD 0xB0400070 | ||
897 | #define IC0_MASKSET 0xB0400070 | ||
898 | #define IC0_MASKCLR 0xB0400074 | ||
899 | |||
900 | #define IC0_RISINGRD 0xB0400078 | ||
901 | #define IC0_RISINGCLR 0xB0400078 | ||
902 | #define IC0_FALLINGRD 0xB040007C | ||
903 | #define IC0_FALLINGCLR 0xB040007C | ||
904 | |||
905 | #define IC0_TESTBIT 0xB0400080 | ||
906 | |||
907 | /* Interrupt Controller 1 */ | ||
908 | #define IC1_CFG0RD 0xB1800040 | ||
909 | #define IC1_CFG0SET 0xB1800040 | ||
910 | #define IC1_CFG0CLR 0xB1800044 | ||
911 | |||
912 | #define IC1_CFG1RD 0xB1800048 | ||
913 | #define IC1_CFG1SET 0xB1800048 | ||
914 | #define IC1_CFG1CLR 0xB180004C | ||
915 | |||
916 | #define IC1_CFG2RD 0xB1800050 | ||
917 | #define IC1_CFG2SET 0xB1800050 | ||
918 | #define IC1_CFG2CLR 0xB1800054 | ||
919 | |||
920 | #define IC1_REQ0INT 0xB1800054 | ||
921 | #define IC1_SRCRD 0xB1800058 | ||
922 | #define IC1_SRCSET 0xB1800058 | ||
923 | #define IC1_SRCCLR 0xB180005C | ||
924 | #define IC1_REQ1INT 0xB180005C | ||
925 | |||
926 | #define IC1_ASSIGNRD 0xB1800060 | ||
927 | #define IC1_ASSIGNSET 0xB1800060 | ||
928 | #define IC1_ASSIGNCLR 0xB1800064 | ||
929 | |||
930 | #define IC1_WAKERD 0xB1800068 | ||
931 | #define IC1_WAKESET 0xB1800068 | ||
932 | #define IC1_WAKECLR 0xB180006C | ||
933 | |||
934 | #define IC1_MASKRD 0xB1800070 | ||
935 | #define IC1_MASKSET 0xB1800070 | ||
936 | #define IC1_MASKCLR 0xB1800074 | ||
937 | |||
938 | #define IC1_RISINGRD 0xB1800078 | ||
939 | #define IC1_RISINGCLR 0xB1800078 | ||
940 | #define IC1_FALLINGRD 0xB180007C | ||
941 | #define IC1_FALLINGCLR 0xB180007C | ||
942 | |||
943 | #define IC1_TESTBIT 0xB1800080 | ||
944 | 826 | ||
945 | 827 | ||
946 | /* Au1000 */ | 828 | /* Au1000 */ |
947 | #ifdef CONFIG_SOC_AU1000 | 829 | #ifdef CONFIG_SOC_AU1000 |
948 | 830 | ||
949 | #define UART0_ADDR 0xB1100000 | ||
950 | #define UART3_ADDR 0xB1400000 | ||
951 | |||
952 | #define USB_OHCI_BASE 0x10100000 /* phys addr for ioremap */ | 831 | #define USB_OHCI_BASE 0x10100000 /* phys addr for ioremap */ |
953 | #define USB_HOST_CONFIG 0xB017FFFC | 832 | #define USB_HOST_CONFIG 0xB017FFFC |
954 | #define FOR_PLATFORM_C_USB_HOST_INT AU1000_USB_HOST_INT | 833 | #define FOR_PLATFORM_C_USB_HOST_INT AU1000_USB_HOST_INT |
955 | |||
956 | #define AU1000_ETH0_BASE 0xB0500000 | ||
957 | #define AU1000_ETH1_BASE 0xB0510000 | ||
958 | #define AU1000_MAC0_ENABLE 0xB0520000 | ||
959 | #define AU1000_MAC1_ENABLE 0xB0520004 | ||
960 | #define NUM_ETH_INTERFACES 2 | ||
961 | #endif /* CONFIG_SOC_AU1000 */ | 834 | #endif /* CONFIG_SOC_AU1000 */ |
962 | 835 | ||
963 | /* Au1500 */ | 836 | /* Au1500 */ |
964 | #ifdef CONFIG_SOC_AU1500 | 837 | #ifdef CONFIG_SOC_AU1500 |
965 | 838 | ||
966 | #define UART0_ADDR 0xB1100000 | ||
967 | #define UART3_ADDR 0xB1400000 | ||
968 | |||
969 | #define USB_OHCI_BASE 0x10100000 /* phys addr for ioremap */ | 839 | #define USB_OHCI_BASE 0x10100000 /* phys addr for ioremap */ |
970 | #define USB_HOST_CONFIG 0xB017fffc | 840 | #define USB_HOST_CONFIG 0xB017fffc |
971 | #define FOR_PLATFORM_C_USB_HOST_INT AU1500_USB_HOST_INT | 841 | #define FOR_PLATFORM_C_USB_HOST_INT AU1500_USB_HOST_INT |
972 | |||
973 | #define AU1500_ETH0_BASE 0xB1500000 | ||
974 | #define AU1500_ETH1_BASE 0xB1510000 | ||
975 | #define AU1500_MAC0_ENABLE 0xB1520000 | ||
976 | #define AU1500_MAC1_ENABLE 0xB1520004 | ||
977 | #define NUM_ETH_INTERFACES 2 | ||
978 | #endif /* CONFIG_SOC_AU1500 */ | 842 | #endif /* CONFIG_SOC_AU1500 */ |
979 | 843 | ||
980 | /* Au1100 */ | 844 | /* Au1100 */ |
981 | #ifdef CONFIG_SOC_AU1100 | 845 | #ifdef CONFIG_SOC_AU1100 |
982 | 846 | ||
983 | #define UART0_ADDR 0xB1100000 | ||
984 | #define UART3_ADDR 0xB1400000 | ||
985 | |||
986 | #define USB_OHCI_BASE 0x10100000 /* phys addr for ioremap */ | 847 | #define USB_OHCI_BASE 0x10100000 /* phys addr for ioremap */ |
987 | #define USB_HOST_CONFIG 0xB017FFFC | 848 | #define USB_HOST_CONFIG 0xB017FFFC |
988 | #define FOR_PLATFORM_C_USB_HOST_INT AU1100_USB_HOST_INT | 849 | #define FOR_PLATFORM_C_USB_HOST_INT AU1100_USB_HOST_INT |
989 | |||
990 | #define AU1100_ETH0_BASE 0xB0500000 | ||
991 | #define AU1100_MAC0_ENABLE 0xB0520000 | ||
992 | #define NUM_ETH_INTERFACES 1 | ||
993 | #endif /* CONFIG_SOC_AU1100 */ | 850 | #endif /* CONFIG_SOC_AU1100 */ |
994 | 851 | ||
995 | #ifdef CONFIG_SOC_AU1550 | 852 | #ifdef CONFIG_SOC_AU1550 |
996 | #define UART0_ADDR 0xB1100000 | ||
997 | 853 | ||
998 | #define USB_OHCI_BASE 0x14020000 /* phys addr for ioremap */ | 854 | #define USB_OHCI_BASE 0x14020000 /* phys addr for ioremap */ |
999 | #define USB_OHCI_LEN 0x00060000 | 855 | #define USB_OHCI_LEN 0x00060000 |
1000 | #define USB_HOST_CONFIG 0xB4027ffc | 856 | #define USB_HOST_CONFIG 0xB4027ffc |
1001 | #define FOR_PLATFORM_C_USB_HOST_INT AU1550_USB_HOST_INT | 857 | #define FOR_PLATFORM_C_USB_HOST_INT AU1550_USB_HOST_INT |
1002 | |||
1003 | #define AU1550_ETH0_BASE 0xB0500000 | ||
1004 | #define AU1550_ETH1_BASE 0xB0510000 | ||
1005 | #define AU1550_MAC0_ENABLE 0xB0520000 | ||
1006 | #define AU1550_MAC1_ENABLE 0xB0520004 | ||
1007 | #define NUM_ETH_INTERFACES 2 | ||
1008 | #endif /* CONFIG_SOC_AU1550 */ | 858 | #endif /* CONFIG_SOC_AU1550 */ |
1009 | 859 | ||
1010 | 860 | ||
1011 | #ifdef CONFIG_SOC_AU1200 | 861 | #ifdef CONFIG_SOC_AU1200 |
1012 | 862 | ||
1013 | #define UART0_ADDR 0xB1100000 | ||
1014 | |||
1015 | #define USB_UOC_BASE 0x14020020 | 863 | #define USB_UOC_BASE 0x14020020 |
1016 | #define USB_UOC_LEN 0x20 | 864 | #define USB_UOC_LEN 0x20 |
1017 | #define USB_OHCI_BASE 0x14020100 | 865 | #define USB_OHCI_BASE 0x14020100 |
@@ -1504,22 +1352,6 @@ enum soc_au1200_ints { | |||
1504 | #define SYS_PINFUNC_S1B (1 << 2) | 1352 | #define SYS_PINFUNC_S1B (1 << 2) |
1505 | #endif | 1353 | #endif |
1506 | 1354 | ||
1507 | #define SYS_TRIOUTRD 0xB1900100 | ||
1508 | #define SYS_TRIOUTCLR 0xB1900100 | ||
1509 | #define SYS_OUTPUTRD 0xB1900108 | ||
1510 | #define SYS_OUTPUTSET 0xB1900108 | ||
1511 | #define SYS_OUTPUTCLR 0xB190010C | ||
1512 | #define SYS_PINSTATERD 0xB1900110 | ||
1513 | #define SYS_PININPUTEN 0xB1900110 | ||
1514 | |||
1515 | /* GPIO2, Au1500, Au1550 only */ | ||
1516 | #define GPIO2_BASE 0xB1700000 | ||
1517 | #define GPIO2_DIR (GPIO2_BASE + 0) | ||
1518 | #define GPIO2_OUTPUT (GPIO2_BASE + 8) | ||
1519 | #define GPIO2_PINSTATE (GPIO2_BASE + 0xC) | ||
1520 | #define GPIO2_INTENABLE (GPIO2_BASE + 0x10) | ||
1521 | #define GPIO2_ENABLE (GPIO2_BASE + 0x14) | ||
1522 | |||
1523 | /* Power Management */ | 1355 | /* Power Management */ |
1524 | #define SYS_SCRATCH0 0xB1900018 | 1356 | #define SYS_SCRATCH0 0xB1900018 |
1525 | #define SYS_SCRATCH1 0xB190001C | 1357 | #define SYS_SCRATCH1 0xB190001C |
@@ -1635,12 +1467,6 @@ enum soc_au1200_ints { | |||
1635 | # define AC97C_RS (1 << 1) | 1467 | # define AC97C_RS (1 << 1) |
1636 | # define AC97C_CE (1 << 0) | 1468 | # define AC97C_CE (1 << 0) |
1637 | 1469 | ||
1638 | /* Secure Digital (SD) Controller */ | ||
1639 | #define SD0_XMIT_FIFO 0xB0600000 | ||
1640 | #define SD0_RECV_FIFO 0xB0600004 | ||
1641 | #define SD1_XMIT_FIFO 0xB0680000 | ||
1642 | #define SD1_RECV_FIFO 0xB0680004 | ||
1643 | |||
1644 | #if defined(CONFIG_SOC_AU1500) || defined(CONFIG_SOC_AU1550) | 1470 | #if defined(CONFIG_SOC_AU1500) || defined(CONFIG_SOC_AU1550) |
1645 | /* Au1500 PCI Controller */ | 1471 | /* Au1500 PCI Controller */ |
1646 | #define Au1500_CFG_BASE 0xB4005000 /* virtual, KSEG1 addr */ | 1472 | #define Au1500_CFG_BASE 0xB4005000 /* virtual, KSEG1 addr */ |
diff --git a/arch/mips/include/asm/mach-au1x00/au1000_dma.h b/arch/mips/include/asm/mach-au1x00/au1000_dma.h index c333b4e1cd44..59f5b55b2200 100644 --- a/arch/mips/include/asm/mach-au1x00/au1000_dma.h +++ b/arch/mips/include/asm/mach-au1x00/au1000_dma.h | |||
@@ -37,10 +37,6 @@ | |||
37 | 37 | ||
38 | #define NUM_AU1000_DMA_CHANNELS 8 | 38 | #define NUM_AU1000_DMA_CHANNELS 8 |
39 | 39 | ||
40 | /* DMA Channel Base Addresses */ | ||
41 | #define DMA_CHANNEL_BASE 0xB4002000 | ||
42 | #define DMA_CHANNEL_LEN 0x00000100 | ||
43 | |||
44 | /* DMA Channel Register Offsets */ | 40 | /* DMA Channel Register Offsets */ |
45 | #define DMA_MODE_SET 0x00000000 | 41 | #define DMA_MODE_SET 0x00000000 |
46 | #define DMA_MODE_READ DMA_MODE_SET | 42 | #define DMA_MODE_READ DMA_MODE_SET |
diff --git a/arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h b/arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h index c8a553a36ba4..2fdacfe85e23 100644 --- a/arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h +++ b/arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h | |||
@@ -37,14 +37,6 @@ | |||
37 | 37 | ||
38 | #ifndef _LANGUAGE_ASSEMBLY | 38 | #ifndef _LANGUAGE_ASSEMBLY |
39 | 39 | ||
40 | /* | ||
41 | * The DMA base addresses. | ||
42 | * The channels are every 256 bytes (0x0100) from the channel 0 base. | ||
43 | * Interrupt status/enable is bits 15:0 for channels 15 to zero. | ||
44 | */ | ||
45 | #define DDMA_GLOBAL_BASE 0xb4003000 | ||
46 | #define DDMA_CHANNEL_BASE 0xb4002000 | ||
47 | |||
48 | typedef volatile struct dbdma_global { | 40 | typedef volatile struct dbdma_global { |
49 | u32 ddma_config; | 41 | u32 ddma_config; |
50 | u32 ddma_intstat; | 42 | u32 ddma_intstat; |
diff --git a/arch/mips/include/asm/mach-au1x00/gpio-au1000.h b/arch/mips/include/asm/mach-au1x00/gpio-au1000.h index 62d2f136d941..1f41a522906d 100644 --- a/arch/mips/include/asm/mach-au1x00/gpio-au1000.h +++ b/arch/mips/include/asm/mach-au1x00/gpio-au1000.h | |||
@@ -24,6 +24,23 @@ | |||
24 | 24 | ||
25 | #define MAKE_IRQ(intc, off) (AU1000_INTC##intc##_INT_BASE + (off)) | 25 | #define MAKE_IRQ(intc, off) (AU1000_INTC##intc##_INT_BASE + (off)) |
26 | 26 | ||
27 | /* GPIO1 registers within SYS_ area */ | ||
28 | #define SYS_TRIOUTRD 0x100 | ||
29 | #define SYS_TRIOUTCLR 0x100 | ||
30 | #define SYS_OUTPUTRD 0x108 | ||
31 | #define SYS_OUTPUTSET 0x108 | ||
32 | #define SYS_OUTPUTCLR 0x10C | ||
33 | #define SYS_PINSTATERD 0x110 | ||
34 | #define SYS_PININPUTEN 0x110 | ||
35 | |||
36 | /* register offsets within GPIO2 block */ | ||
37 | #define GPIO2_DIR 0x00 | ||
38 | #define GPIO2_OUTPUT 0x08 | ||
39 | #define GPIO2_PINSTATE 0x0C | ||
40 | #define GPIO2_INTENABLE 0x10 | ||
41 | #define GPIO2_ENABLE 0x14 | ||
42 | |||
43 | struct gpio; | ||
27 | 44 | ||
28 | static inline int au1000_gpio1_to_irq(int gpio) | 45 | static inline int au1000_gpio1_to_irq(int gpio) |
29 | { | 46 | { |
@@ -200,23 +217,26 @@ static inline int au1200_irq_to_gpio(int irq) | |||
200 | */ | 217 | */ |
201 | static inline void alchemy_gpio1_set_value(int gpio, int v) | 218 | static inline void alchemy_gpio1_set_value(int gpio, int v) |
202 | { | 219 | { |
220 | void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_SYS_PHYS_ADDR); | ||
203 | unsigned long mask = 1 << (gpio - ALCHEMY_GPIO1_BASE); | 221 | unsigned long mask = 1 << (gpio - ALCHEMY_GPIO1_BASE); |
204 | unsigned long r = v ? SYS_OUTPUTSET : SYS_OUTPUTCLR; | 222 | unsigned long r = v ? SYS_OUTPUTSET : SYS_OUTPUTCLR; |
205 | au_writel(mask, r); | 223 | __raw_writel(mask, base + r); |
206 | au_sync(); | 224 | wmb(); |
207 | } | 225 | } |
208 | 226 | ||
209 | static inline int alchemy_gpio1_get_value(int gpio) | 227 | static inline int alchemy_gpio1_get_value(int gpio) |
210 | { | 228 | { |
229 | void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_SYS_PHYS_ADDR); | ||
211 | unsigned long mask = 1 << (gpio - ALCHEMY_GPIO1_BASE); | 230 | unsigned long mask = 1 << (gpio - ALCHEMY_GPIO1_BASE); |
212 | return au_readl(SYS_PINSTATERD) & mask; | 231 | return __raw_readl(base + SYS_PINSTATERD) & mask; |
213 | } | 232 | } |
214 | 233 | ||
215 | static inline int alchemy_gpio1_direction_input(int gpio) | 234 | static inline int alchemy_gpio1_direction_input(int gpio) |
216 | { | 235 | { |
236 | void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_SYS_PHYS_ADDR); | ||
217 | unsigned long mask = 1 << (gpio - ALCHEMY_GPIO1_BASE); | 237 | unsigned long mask = 1 << (gpio - ALCHEMY_GPIO1_BASE); |
218 | au_writel(mask, SYS_TRIOUTCLR); | 238 | __raw_writel(mask, base + SYS_TRIOUTCLR); |
219 | au_sync(); | 239 | wmb(); |
220 | return 0; | 240 | return 0; |
221 | } | 241 | } |
222 | 242 | ||
@@ -257,27 +277,31 @@ static inline int alchemy_gpio1_to_irq(int gpio) | |||
257 | */ | 277 | */ |
258 | static inline void __alchemy_gpio2_mod_dir(int gpio, int to_out) | 278 | static inline void __alchemy_gpio2_mod_dir(int gpio, int to_out) |
259 | { | 279 | { |
280 | void __iomem *base = (void __iomem *)KSEG1ADDR(AU1500_GPIO2_PHYS_ADDR); | ||
260 | unsigned long mask = 1 << (gpio - ALCHEMY_GPIO2_BASE); | 281 | unsigned long mask = 1 << (gpio - ALCHEMY_GPIO2_BASE); |
261 | unsigned long d = au_readl(GPIO2_DIR); | 282 | unsigned long d = __raw_readl(base + GPIO2_DIR); |
283 | |||
262 | if (to_out) | 284 | if (to_out) |
263 | d |= mask; | 285 | d |= mask; |
264 | else | 286 | else |
265 | d &= ~mask; | 287 | d &= ~mask; |
266 | au_writel(d, GPIO2_DIR); | 288 | __raw_writel(d, base + GPIO2_DIR); |
267 | au_sync(); | 289 | wmb(); |
268 | } | 290 | } |
269 | 291 | ||
270 | static inline void alchemy_gpio2_set_value(int gpio, int v) | 292 | static inline void alchemy_gpio2_set_value(int gpio, int v) |
271 | { | 293 | { |
294 | void __iomem *base = (void __iomem *)KSEG1ADDR(AU1500_GPIO2_PHYS_ADDR); | ||
272 | unsigned long mask; | 295 | unsigned long mask; |
273 | mask = ((v) ? 0x00010001 : 0x00010000) << (gpio - ALCHEMY_GPIO2_BASE); | 296 | mask = ((v) ? 0x00010001 : 0x00010000) << (gpio - ALCHEMY_GPIO2_BASE); |
274 | au_writel(mask, GPIO2_OUTPUT); | 297 | __raw_writel(mask, base + GPIO2_OUTPUT); |
275 | au_sync(); | 298 | wmb(); |
276 | } | 299 | } |
277 | 300 | ||
278 | static inline int alchemy_gpio2_get_value(int gpio) | 301 | static inline int alchemy_gpio2_get_value(int gpio) |
279 | { | 302 | { |
280 | return au_readl(GPIO2_PINSTATE) & (1 << (gpio - ALCHEMY_GPIO2_BASE)); | 303 | void __iomem *base = (void __iomem *)KSEG1ADDR(AU1500_GPIO2_PHYS_ADDR); |
304 | return __raw_readl(base + GPIO2_PINSTATE) & (1 << (gpio - ALCHEMY_GPIO2_BASE)); | ||
281 | } | 305 | } |
282 | 306 | ||
283 | static inline int alchemy_gpio2_direction_input(int gpio) | 307 | static inline int alchemy_gpio2_direction_input(int gpio) |
@@ -329,21 +353,23 @@ static inline int alchemy_gpio2_to_irq(int gpio) | |||
329 | */ | 353 | */ |
330 | static inline void alchemy_gpio1_input_enable(void) | 354 | static inline void alchemy_gpio1_input_enable(void) |
331 | { | 355 | { |
332 | au_writel(0, SYS_PININPUTEN); /* the write op is key */ | 356 | void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_SYS_PHYS_ADDR); |
333 | au_sync(); | 357 | __raw_writel(0, base + SYS_PININPUTEN); /* the write op is key */ |
358 | wmb(); | ||
334 | } | 359 | } |
335 | 360 | ||
336 | /* GPIO2 shared interrupts and control */ | 361 | /* GPIO2 shared interrupts and control */ |
337 | 362 | ||
338 | static inline void __alchemy_gpio2_mod_int(int gpio2, int en) | 363 | static inline void __alchemy_gpio2_mod_int(int gpio2, int en) |
339 | { | 364 | { |
340 | unsigned long r = au_readl(GPIO2_INTENABLE); | 365 | void __iomem *base = (void __iomem *)KSEG1ADDR(AU1500_GPIO2_PHYS_ADDR); |
366 | unsigned long r = __raw_readl(base + GPIO2_INTENABLE); | ||
341 | if (en) | 367 | if (en) |
342 | r |= 1 << gpio2; | 368 | r |= 1 << gpio2; |
343 | else | 369 | else |
344 | r &= ~(1 << gpio2); | 370 | r &= ~(1 << gpio2); |
345 | au_writel(r, GPIO2_INTENABLE); | 371 | __raw_writel(r, base + GPIO2_INTENABLE); |
346 | au_sync(); | 372 | wmb(); |
347 | } | 373 | } |
348 | 374 | ||
349 | /** | 375 | /** |
@@ -418,10 +444,11 @@ static inline void alchemy_gpio2_disable_int(int gpio2) | |||
418 | */ | 444 | */ |
419 | static inline void alchemy_gpio2_enable(void) | 445 | static inline void alchemy_gpio2_enable(void) |
420 | { | 446 | { |
421 | au_writel(3, GPIO2_ENABLE); /* reset, clock enabled */ | 447 | void __iomem *base = (void __iomem *)KSEG1ADDR(AU1500_GPIO2_PHYS_ADDR); |
422 | au_sync(); | 448 | __raw_writel(3, base + GPIO2_ENABLE); /* reset, clock enabled */ |
423 | au_writel(1, GPIO2_ENABLE); /* clock enabled */ | 449 | wmb(); |
424 | au_sync(); | 450 | __raw_writel(1, base + GPIO2_ENABLE); /* clock enabled */ |
451 | wmb(); | ||
425 | } | 452 | } |
426 | 453 | ||
427 | /** | 454 | /** |
@@ -431,8 +458,9 @@ static inline void alchemy_gpio2_enable(void) | |||
431 | */ | 458 | */ |
432 | static inline void alchemy_gpio2_disable(void) | 459 | static inline void alchemy_gpio2_disable(void) |
433 | { | 460 | { |
434 | au_writel(2, GPIO2_ENABLE); /* reset, clock disabled */ | 461 | void __iomem *base = (void __iomem *)KSEG1ADDR(AU1500_GPIO2_PHYS_ADDR); |
435 | au_sync(); | 462 | __raw_writel(2, base + GPIO2_ENABLE); /* reset, clock disabled */ |
463 | wmb(); | ||
436 | } | 464 | } |
437 | 465 | ||
438 | /**********************************************************************/ | 466 | /**********************************************************************/ |
@@ -556,6 +584,16 @@ static inline void gpio_set_value(int gpio, int v) | |||
556 | alchemy_gpio_set_value(gpio, v); | 584 | alchemy_gpio_set_value(gpio, v); |
557 | } | 585 | } |
558 | 586 | ||
587 | static inline int gpio_get_value_cansleep(unsigned gpio) | ||
588 | { | ||
589 | return gpio_get_value(gpio); | ||
590 | } | ||
591 | |||
592 | static inline void gpio_set_value_cansleep(unsigned gpio, int value) | ||
593 | { | ||
594 | gpio_set_value(gpio, value); | ||
595 | } | ||
596 | |||
559 | static inline int gpio_is_valid(int gpio) | 597 | static inline int gpio_is_valid(int gpio) |
560 | { | 598 | { |
561 | return alchemy_gpio_is_valid(gpio); | 599 | return alchemy_gpio_is_valid(gpio); |
@@ -581,10 +619,50 @@ static inline int gpio_request(unsigned gpio, const char *label) | |||
581 | return 0; | 619 | return 0; |
582 | } | 620 | } |
583 | 621 | ||
622 | static inline int gpio_request_one(unsigned gpio, | ||
623 | unsigned long flags, const char *label) | ||
624 | { | ||
625 | return 0; | ||
626 | } | ||
627 | |||
628 | static inline int gpio_request_array(struct gpio *array, size_t num) | ||
629 | { | ||
630 | return 0; | ||
631 | } | ||
632 | |||
584 | static inline void gpio_free(unsigned gpio) | 633 | static inline void gpio_free(unsigned gpio) |
585 | { | 634 | { |
586 | } | 635 | } |
587 | 636 | ||
637 | static inline void gpio_free_array(struct gpio *array, size_t num) | ||
638 | { | ||
639 | } | ||
640 | |||
641 | static inline int gpio_set_debounce(unsigned gpio, unsigned debounce) | ||
642 | { | ||
643 | return -ENOSYS; | ||
644 | } | ||
645 | |||
646 | static inline int gpio_export(unsigned gpio, bool direction_may_change) | ||
647 | { | ||
648 | return -ENOSYS; | ||
649 | } | ||
650 | |||
651 | static inline int gpio_export_link(struct device *dev, const char *name, | ||
652 | unsigned gpio) | ||
653 | { | ||
654 | return -ENOSYS; | ||
655 | } | ||
656 | |||
657 | static inline int gpio_sysfs_set_active_low(unsigned gpio, int value) | ||
658 | { | ||
659 | return -ENOSYS; | ||
660 | } | ||
661 | |||
662 | static inline void gpio_unexport(unsigned gpio) | ||
663 | { | ||
664 | } | ||
665 | |||
588 | #endif /* !CONFIG_ALCHEMY_GPIO_INDIRECT */ | 666 | #endif /* !CONFIG_ALCHEMY_GPIO_INDIRECT */ |
589 | 667 | ||
590 | 668 | ||
diff --git a/arch/mips/include/asm/mach-bcm47xx/nvram.h b/arch/mips/include/asm/mach-bcm47xx/nvram.h index 9759588ba3cf..184d5ecb5f51 100644 --- a/arch/mips/include/asm/mach-bcm47xx/nvram.h +++ b/arch/mips/include/asm/mach-bcm47xx/nvram.h | |||
@@ -39,8 +39,16 @@ extern int nvram_getenv(char *name, char *val, size_t val_len); | |||
39 | 39 | ||
40 | static inline void nvram_parse_macaddr(char *buf, u8 *macaddr) | 40 | static inline void nvram_parse_macaddr(char *buf, u8 *macaddr) |
41 | { | 41 | { |
42 | sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &macaddr[0], &macaddr[1], | 42 | if (strchr(buf, ':')) |
43 | &macaddr[2], &macaddr[3], &macaddr[4], &macaddr[5]); | 43 | sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &macaddr[0], |
44 | &macaddr[1], &macaddr[2], &macaddr[3], &macaddr[4], | ||
45 | &macaddr[5]); | ||
46 | else if (strchr(buf, '-')) | ||
47 | sscanf(buf, "%hhx-%hhx-%hhx-%hhx-%hhx-%hhx", &macaddr[0], | ||
48 | &macaddr[1], &macaddr[2], &macaddr[3], &macaddr[4], | ||
49 | &macaddr[5]); | ||
50 | else | ||
51 | printk(KERN_WARNING "Can not parse mac address: %s\n", buf); | ||
44 | } | 52 | } |
45 | 53 | ||
46 | #endif | 54 | #endif |
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm963xx_tag.h b/arch/mips/include/asm/mach-bcm63xx/bcm963xx_tag.h index 32978d32561a..ed72e6a26b73 100644 --- a/arch/mips/include/asm/mach-bcm63xx/bcm963xx_tag.h +++ b/arch/mips/include/asm/mach-bcm63xx/bcm963xx_tag.h | |||
@@ -88,7 +88,7 @@ struct bcm_tag { | |||
88 | char kernel_crc[CRC_LEN]; | 88 | char kernel_crc[CRC_LEN]; |
89 | /* 228-235: Unused at present */ | 89 | /* 228-235: Unused at present */ |
90 | char reserved1[8]; | 90 | char reserved1[8]; |
91 | /* 236-239: CRC32 of header excluding tagVersion */ | 91 | /* 236-239: CRC32 of header excluding last 20 bytes */ |
92 | char header_crc[CRC_LEN]; | 92 | char header_crc[CRC_LEN]; |
93 | /* 240-255: Unused at present */ | 93 | /* 240-255: Unused at present */ |
94 | char reserved2[16]; | 94 | char reserved2[16]; |
diff --git a/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h b/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h index 0b2b5eb22e9b..dedef7d2b01f 100644 --- a/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h +++ b/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h | |||
@@ -63,6 +63,11 @@ | |||
63 | # CN30XX Disable instruction prefetching | 63 | # CN30XX Disable instruction prefetching |
64 | or v0, v0, 0x2000 | 64 | or v0, v0, 0x2000 |
65 | skip: | 65 | skip: |
66 | # First clear off CvmCtl[IPPCI] bit and move the performance | ||
67 | # counters interrupt to IRQ 6 | ||
68 | li v1, ~(7 << 7) | ||
69 | and v0, v0, v1 | ||
70 | ori v0, v0, (6 << 7) | ||
66 | # Write the cavium control register | 71 | # Write the cavium control register |
67 | dmtc0 v0, CP0_CVMCTL_REG | 72 | dmtc0 v0, CP0_CVMCTL_REG |
68 | sync | 73 | sync |
diff --git a/arch/mips/include/asm/mach-lantiq/lantiq.h b/arch/mips/include/asm/mach-lantiq/lantiq.h new file mode 100644 index 000000000000..ce2f02929d22 --- /dev/null +++ b/arch/mips/include/asm/mach-lantiq/lantiq.h | |||
@@ -0,0 +1,63 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * Copyright (C) 2010 John Crispin <blogic@openwrt.org> | ||
7 | */ | ||
8 | #ifndef _LANTIQ_H__ | ||
9 | #define _LANTIQ_H__ | ||
10 | |||
11 | #include <linux/irq.h> | ||
12 | |||
13 | /* generic reg access functions */ | ||
14 | #define ltq_r32(reg) __raw_readl(reg) | ||
15 | #define ltq_w32(val, reg) __raw_writel(val, reg) | ||
16 | #define ltq_w32_mask(clear, set, reg) \ | ||
17 | ltq_w32((ltq_r32(reg) & ~(clear)) | (set), reg) | ||
18 | #define ltq_r8(reg) __raw_readb(reg) | ||
19 | #define ltq_w8(val, reg) __raw_writeb(val, reg) | ||
20 | |||
21 | /* register access macros for EBU and CGU */ | ||
22 | #define ltq_ebu_w32(x, y) ltq_w32((x), ltq_ebu_membase + (y)) | ||
23 | #define ltq_ebu_r32(x) ltq_r32(ltq_ebu_membase + (x)) | ||
24 | #define ltq_cgu_w32(x, y) ltq_w32((x), ltq_cgu_membase + (y)) | ||
25 | #define ltq_cgu_r32(x) ltq_r32(ltq_cgu_membase + (x)) | ||
26 | |||
27 | extern __iomem void *ltq_ebu_membase; | ||
28 | extern __iomem void *ltq_cgu_membase; | ||
29 | |||
30 | extern unsigned int ltq_get_cpu_ver(void); | ||
31 | extern unsigned int ltq_get_soc_type(void); | ||
32 | |||
33 | /* clock speeds */ | ||
34 | #define CLOCK_60M 60000000 | ||
35 | #define CLOCK_83M 83333333 | ||
36 | #define CLOCK_111M 111111111 | ||
37 | #define CLOCK_133M 133333333 | ||
38 | #define CLOCK_167M 166666667 | ||
39 | #define CLOCK_200M 200000000 | ||
40 | #define CLOCK_266M 266666666 | ||
41 | #define CLOCK_333M 333333333 | ||
42 | #define CLOCK_400M 400000000 | ||
43 | |||
44 | /* spinlock all ebu i/o */ | ||
45 | extern spinlock_t ebu_lock; | ||
46 | |||
47 | /* some irq helpers */ | ||
48 | extern void ltq_disable_irq(struct irq_data *data); | ||
49 | extern void ltq_mask_and_ack_irq(struct irq_data *data); | ||
50 | extern void ltq_enable_irq(struct irq_data *data); | ||
51 | |||
52 | /* find out what caused the last cpu reset */ | ||
53 | extern int ltq_reset_cause(void); | ||
54 | #define LTQ_RST_CAUSE_WDTRST 0x20 | ||
55 | |||
56 | #define IOPORT_RESOURCE_START 0x10000000 | ||
57 | #define IOPORT_RESOURCE_END 0xffffffff | ||
58 | #define IOMEM_RESOURCE_START 0x10000000 | ||
59 | #define IOMEM_RESOURCE_END 0xffffffff | ||
60 | #define LTQ_FLASH_START 0x10000000 | ||
61 | #define LTQ_FLASH_MAX 0x04000000 | ||
62 | |||
63 | #endif | ||
diff --git a/arch/mips/include/asm/mach-lantiq/lantiq_platform.h b/arch/mips/include/asm/mach-lantiq/lantiq_platform.h new file mode 100644 index 000000000000..a305f1d0259e --- /dev/null +++ b/arch/mips/include/asm/mach-lantiq/lantiq_platform.h | |||
@@ -0,0 +1,53 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * Copyright (C) 2010 John Crispin <blogic@openwrt.org> | ||
7 | */ | ||
8 | |||
9 | #ifndef _LANTIQ_PLATFORM_H__ | ||
10 | #define _LANTIQ_PLATFORM_H__ | ||
11 | |||
12 | #include <linux/mtd/partitions.h> | ||
13 | #include <linux/socket.h> | ||
14 | |||
15 | /* struct used to pass info to the pci core */ | ||
16 | enum { | ||
17 | PCI_CLOCK_INT = 0, | ||
18 | PCI_CLOCK_EXT | ||
19 | }; | ||
20 | |||
21 | #define PCI_EXIN0 0x0001 | ||
22 | #define PCI_EXIN1 0x0002 | ||
23 | #define PCI_EXIN2 0x0004 | ||
24 | #define PCI_EXIN3 0x0008 | ||
25 | #define PCI_EXIN4 0x0010 | ||
26 | #define PCI_EXIN5 0x0020 | ||
27 | #define PCI_EXIN_MAX 6 | ||
28 | |||
29 | #define PCI_GNT1 0x0040 | ||
30 | #define PCI_GNT2 0x0080 | ||
31 | #define PCI_GNT3 0x0100 | ||
32 | #define PCI_GNT4 0x0200 | ||
33 | |||
34 | #define PCI_REQ1 0x0400 | ||
35 | #define PCI_REQ2 0x0800 | ||
36 | #define PCI_REQ3 0x1000 | ||
37 | #define PCI_REQ4 0x2000 | ||
38 | #define PCI_REQ_SHIFT 10 | ||
39 | #define PCI_REQ_MASK 0xf | ||
40 | |||
41 | struct ltq_pci_data { | ||
42 | int clock; | ||
43 | int gpio; | ||
44 | int irq[16]; | ||
45 | }; | ||
46 | |||
47 | /* struct used to pass info to network drivers */ | ||
48 | struct ltq_eth_data { | ||
49 | struct sockaddr mac; | ||
50 | int mii_mode; | ||
51 | }; | ||
52 | |||
53 | #endif | ||
diff --git a/arch/mips/include/asm/mach-lantiq/war.h b/arch/mips/include/asm/mach-lantiq/war.h new file mode 100644 index 000000000000..01b08ef368d1 --- /dev/null +++ b/arch/mips/include/asm/mach-lantiq/war.h | |||
@@ -0,0 +1,24 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | */ | ||
7 | #ifndef __ASM_MIPS_MACH_LANTIQ_WAR_H | ||
8 | #define __ASM_MIPS_MACH_LANTIQ_WAR_H | ||
9 | |||
10 | #define R4600_V1_INDEX_ICACHEOP_WAR 0 | ||
11 | #define R4600_V1_HIT_CACHEOP_WAR 0 | ||
12 | #define R4600_V2_HIT_CACHEOP_WAR 0 | ||
13 | #define R5432_CP0_INTERRUPT_WAR 0 | ||
14 | #define BCM1250_M3_WAR 0 | ||
15 | #define SIBYTE_1956_WAR 0 | ||
16 | #define MIPS4K_ICACHE_REFILL_WAR 0 | ||
17 | #define MIPS_CACHE_SYNC_WAR 0 | ||
18 | #define TX49XX_ICACHE_INDEX_INV_WAR 0 | ||
19 | #define RM9000_CDEX_SMP_WAR 0 | ||
20 | #define ICACHE_REFILLS_WORKAROUND_WAR 0 | ||
21 | #define R10000_LLSC_WAR 0 | ||
22 | #define MIPS34K_MISSED_ITLB_WAR 0 | ||
23 | |||
24 | #endif | ||
diff --git a/arch/mips/include/asm/mach-lantiq/xway/irq.h b/arch/mips/include/asm/mach-lantiq/xway/irq.h new file mode 100644 index 000000000000..a1471d2dd0d2 --- /dev/null +++ b/arch/mips/include/asm/mach-lantiq/xway/irq.h | |||
@@ -0,0 +1,18 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * Copyright (C) 2010 John Crispin <blogic@openwrt.org> | ||
7 | */ | ||
8 | |||
9 | #ifndef __LANTIQ_IRQ_H | ||
10 | #define __LANTIQ_IRQ_H | ||
11 | |||
12 | #include <lantiq_irq.h> | ||
13 | |||
14 | #define NR_IRQS 256 | ||
15 | |||
16 | #include_next <irq.h> | ||
17 | |||
18 | #endif | ||
diff --git a/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h b/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h new file mode 100644 index 000000000000..b4465a888e20 --- /dev/null +++ b/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h | |||
@@ -0,0 +1,66 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * Copyright (C) 2010 John Crispin <blogic@openwrt.org> | ||
7 | */ | ||
8 | |||
9 | #ifndef _LANTIQ_XWAY_IRQ_H__ | ||
10 | #define _LANTIQ_XWAY_IRQ_H__ | ||
11 | |||
12 | #define INT_NUM_IRQ0 8 | ||
13 | #define INT_NUM_IM0_IRL0 (INT_NUM_IRQ0 + 0) | ||
14 | #define INT_NUM_IM1_IRL0 (INT_NUM_IRQ0 + 32) | ||
15 | #define INT_NUM_IM2_IRL0 (INT_NUM_IRQ0 + 64) | ||
16 | #define INT_NUM_IM3_IRL0 (INT_NUM_IRQ0 + 96) | ||
17 | #define INT_NUM_IM4_IRL0 (INT_NUM_IRQ0 + 128) | ||
18 | #define INT_NUM_IM_OFFSET (INT_NUM_IM1_IRL0 - INT_NUM_IM0_IRL0) | ||
19 | |||
20 | #define LTQ_ASC_TIR(x) (INT_NUM_IM3_IRL0 + (x * 8)) | ||
21 | #define LTQ_ASC_RIR(x) (INT_NUM_IM3_IRL0 + (x * 8) + 1) | ||
22 | #define LTQ_ASC_EIR(x) (INT_NUM_IM3_IRL0 + (x * 8) + 2) | ||
23 | |||
24 | #define LTQ_ASC_ASE_TIR INT_NUM_IM2_IRL0 | ||
25 | #define LTQ_ASC_ASE_RIR (INT_NUM_IM2_IRL0 + 2) | ||
26 | #define LTQ_ASC_ASE_EIR (INT_NUM_IM2_IRL0 + 3) | ||
27 | |||
28 | #define LTQ_SSC_TIR (INT_NUM_IM0_IRL0 + 15) | ||
29 | #define LTQ_SSC_RIR (INT_NUM_IM0_IRL0 + 14) | ||
30 | #define LTQ_SSC_EIR (INT_NUM_IM0_IRL0 + 16) | ||
31 | |||
32 | #define LTQ_MEI_DYING_GASP_INT (INT_NUM_IM1_IRL0 + 21) | ||
33 | #define LTQ_MEI_INT (INT_NUM_IM1_IRL0 + 23) | ||
34 | |||
35 | #define LTQ_TIMER6_INT (INT_NUM_IM1_IRL0 + 23) | ||
36 | #define LTQ_USB_INT (INT_NUM_IM1_IRL0 + 22) | ||
37 | #define LTQ_USB_OC_INT (INT_NUM_IM4_IRL0 + 23) | ||
38 | |||
39 | #define MIPS_CPU_TIMER_IRQ 7 | ||
40 | |||
41 | #define LTQ_DMA_CH0_INT (INT_NUM_IM2_IRL0) | ||
42 | #define LTQ_DMA_CH1_INT (INT_NUM_IM2_IRL0 + 1) | ||
43 | #define LTQ_DMA_CH2_INT (INT_NUM_IM2_IRL0 + 2) | ||
44 | #define LTQ_DMA_CH3_INT (INT_NUM_IM2_IRL0 + 3) | ||
45 | #define LTQ_DMA_CH4_INT (INT_NUM_IM2_IRL0 + 4) | ||
46 | #define LTQ_DMA_CH5_INT (INT_NUM_IM2_IRL0 + 5) | ||
47 | #define LTQ_DMA_CH6_INT (INT_NUM_IM2_IRL0 + 6) | ||
48 | #define LTQ_DMA_CH7_INT (INT_NUM_IM2_IRL0 + 7) | ||
49 | #define LTQ_DMA_CH8_INT (INT_NUM_IM2_IRL0 + 8) | ||
50 | #define LTQ_DMA_CH9_INT (INT_NUM_IM2_IRL0 + 9) | ||
51 | #define LTQ_DMA_CH10_INT (INT_NUM_IM2_IRL0 + 10) | ||
52 | #define LTQ_DMA_CH11_INT (INT_NUM_IM2_IRL0 + 11) | ||
53 | #define LTQ_DMA_CH12_INT (INT_NUM_IM2_IRL0 + 25) | ||
54 | #define LTQ_DMA_CH13_INT (INT_NUM_IM2_IRL0 + 26) | ||
55 | #define LTQ_DMA_CH14_INT (INT_NUM_IM2_IRL0 + 27) | ||
56 | #define LTQ_DMA_CH15_INT (INT_NUM_IM2_IRL0 + 28) | ||
57 | #define LTQ_DMA_CH16_INT (INT_NUM_IM2_IRL0 + 29) | ||
58 | #define LTQ_DMA_CH17_INT (INT_NUM_IM2_IRL0 + 30) | ||
59 | #define LTQ_DMA_CH18_INT (INT_NUM_IM2_IRL0 + 16) | ||
60 | #define LTQ_DMA_CH19_INT (INT_NUM_IM2_IRL0 + 21) | ||
61 | |||
62 | #define LTQ_PPE_MBOX_INT (INT_NUM_IM2_IRL0 + 24) | ||
63 | |||
64 | #define INT_NUM_IM4_IRL14 (INT_NUM_IM4_IRL0 + 14) | ||
65 | |||
66 | #endif | ||
diff --git a/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h b/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h new file mode 100644 index 000000000000..8a3c6be669d2 --- /dev/null +++ b/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h | |||
@@ -0,0 +1,141 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * Copyright (C) 2010 John Crispin <blogic@openwrt.org> | ||
7 | */ | ||
8 | |||
9 | #ifndef _LTQ_XWAY_H__ | ||
10 | #define _LTQ_XWAY_H__ | ||
11 | |||
12 | #ifdef CONFIG_SOC_TYPE_XWAY | ||
13 | |||
14 | #include <lantiq.h> | ||
15 | |||
16 | /* Chip IDs */ | ||
17 | #define SOC_ID_DANUBE1 0x129 | ||
18 | #define SOC_ID_DANUBE2 0x12B | ||
19 | #define SOC_ID_TWINPASS 0x12D | ||
20 | #define SOC_ID_AMAZON_SE 0x152 | ||
21 | #define SOC_ID_ARX188 0x16C | ||
22 | #define SOC_ID_ARX168 0x16D | ||
23 | #define SOC_ID_ARX182 0x16F | ||
24 | |||
25 | /* SoC Types */ | ||
26 | #define SOC_TYPE_DANUBE 0x01 | ||
27 | #define SOC_TYPE_TWINPASS 0x02 | ||
28 | #define SOC_TYPE_AR9 0x03 | ||
29 | #define SOC_TYPE_VR9 0x04 | ||
30 | #define SOC_TYPE_AMAZON_SE 0x05 | ||
31 | |||
32 | /* ASC0/1 - serial port */ | ||
33 | #define LTQ_ASC0_BASE_ADDR 0x1E100400 | ||
34 | #define LTQ_ASC1_BASE_ADDR 0x1E100C00 | ||
35 | #define LTQ_ASC_SIZE 0x400 | ||
36 | |||
37 | /* RCU - reset control unit */ | ||
38 | #define LTQ_RCU_BASE_ADDR 0x1F203000 | ||
39 | #define LTQ_RCU_SIZE 0x1000 | ||
40 | |||
41 | /* GPTU - general purpose timer unit */ | ||
42 | #define LTQ_GPTU_BASE_ADDR 0x18000300 | ||
43 | #define LTQ_GPTU_SIZE 0x100 | ||
44 | |||
45 | /* EBU - external bus unit */ | ||
46 | #define LTQ_EBU_GPIO_START 0x14000000 | ||
47 | #define LTQ_EBU_GPIO_SIZE 0x1000 | ||
48 | |||
49 | #define LTQ_EBU_BASE_ADDR 0x1E105300 | ||
50 | #define LTQ_EBU_SIZE 0x100 | ||
51 | |||
52 | #define LTQ_EBU_BUSCON0 0x0060 | ||
53 | #define LTQ_EBU_PCC_CON 0x0090 | ||
54 | #define LTQ_EBU_PCC_IEN 0x00A4 | ||
55 | #define LTQ_EBU_PCC_ISTAT 0x00A0 | ||
56 | #define LTQ_EBU_BUSCON1 0x0064 | ||
57 | #define LTQ_EBU_ADDRSEL1 0x0024 | ||
58 | #define EBU_WRDIS 0x80000000 | ||
59 | |||
60 | /* CGU - clock generation unit */ | ||
61 | #define LTQ_CGU_BASE_ADDR 0x1F103000 | ||
62 | #define LTQ_CGU_SIZE 0x1000 | ||
63 | |||
64 | /* ICU - interrupt control unit */ | ||
65 | #define LTQ_ICU_BASE_ADDR 0x1F880200 | ||
66 | #define LTQ_ICU_SIZE 0x100 | ||
67 | |||
68 | /* EIU - external interrupt unit */ | ||
69 | #define LTQ_EIU_BASE_ADDR 0x1F101000 | ||
70 | #define LTQ_EIU_SIZE 0x1000 | ||
71 | |||
72 | /* PMU - power management unit */ | ||
73 | #define LTQ_PMU_BASE_ADDR 0x1F102000 | ||
74 | #define LTQ_PMU_SIZE 0x1000 | ||
75 | |||
76 | #define PMU_DMA 0x0020 | ||
77 | #define PMU_USB 0x8041 | ||
78 | #define PMU_LED 0x0800 | ||
79 | #define PMU_GPT 0x1000 | ||
80 | #define PMU_PPE 0x2000 | ||
81 | #define PMU_FPI 0x4000 | ||
82 | #define PMU_SWITCH 0x10000000 | ||
83 | |||
84 | /* ETOP - ethernet */ | ||
85 | #define LTQ_ETOP_BASE_ADDR 0x1E180000 | ||
86 | #define LTQ_ETOP_SIZE 0x40000 | ||
87 | |||
88 | /* DMA */ | ||
89 | #define LTQ_DMA_BASE_ADDR 0x1E104100 | ||
90 | #define LTQ_DMA_SIZE 0x800 | ||
91 | |||
92 | /* PCI */ | ||
93 | #define PCI_CR_BASE_ADDR 0x1E105400 | ||
94 | #define PCI_CR_SIZE 0x400 | ||
95 | |||
96 | /* WDT */ | ||
97 | #define LTQ_WDT_BASE_ADDR 0x1F8803F0 | ||
98 | #define LTQ_WDT_SIZE 0x10 | ||
99 | |||
100 | /* STP - serial to parallel conversion unit */ | ||
101 | #define LTQ_STP_BASE_ADDR 0x1E100BB0 | ||
102 | #define LTQ_STP_SIZE 0x40 | ||
103 | |||
104 | /* GPIO */ | ||
105 | #define LTQ_GPIO0_BASE_ADDR 0x1E100B10 | ||
106 | #define LTQ_GPIO1_BASE_ADDR 0x1E100B40 | ||
107 | #define LTQ_GPIO2_BASE_ADDR 0x1E100B70 | ||
108 | #define LTQ_GPIO_SIZE 0x30 | ||
109 | |||
110 | /* SSC */ | ||
111 | #define LTQ_SSC_BASE_ADDR 0x1e100800 | ||
112 | #define LTQ_SSC_SIZE 0x100 | ||
113 | |||
114 | /* MEI - dsl core */ | ||
115 | #define LTQ_MEI_BASE_ADDR 0x1E116000 | ||
116 | |||
117 | /* DEU - data encryption unit */ | ||
118 | #define LTQ_DEU_BASE_ADDR 0x1E103100 | ||
119 | |||
120 | /* MPS - multi processor unit (voice) */ | ||
121 | #define LTQ_MPS_BASE_ADDR (KSEG1 + 0x1F107000) | ||
122 | #define LTQ_MPS_CHIPID ((u32 *)(LTQ_MPS_BASE_ADDR + 0x0344)) | ||
123 | |||
124 | /* request a non-gpio and set the PIO config */ | ||
125 | extern int ltq_gpio_request(unsigned int pin, unsigned int alt0, | ||
126 | unsigned int alt1, unsigned int dir, const char *name); | ||
127 | extern void ltq_pmu_enable(unsigned int module); | ||
128 | extern void ltq_pmu_disable(unsigned int module); | ||
129 | |||
130 | static inline int ltq_is_ar9(void) | ||
131 | { | ||
132 | return (ltq_get_soc_type() == SOC_TYPE_AR9); | ||
133 | } | ||
134 | |||
135 | static inline int ltq_is_vr9(void) | ||
136 | { | ||
137 | return (ltq_get_soc_type() == SOC_TYPE_VR9); | ||
138 | } | ||
139 | |||
140 | #endif /* CONFIG_SOC_TYPE_XWAY */ | ||
141 | #endif /* _LTQ_XWAY_H__ */ | ||
diff --git a/arch/mips/include/asm/mach-lantiq/xway/xway_dma.h b/arch/mips/include/asm/mach-lantiq/xway/xway_dma.h new file mode 100644 index 000000000000..872943a4b90e --- /dev/null +++ b/arch/mips/include/asm/mach-lantiq/xway/xway_dma.h | |||
@@ -0,0 +1,60 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * You should have received a copy of the GNU General Public License | ||
12 | * along with this program; if not, write to the Free Software | ||
13 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. | ||
14 | * | ||
15 | * Copyright (C) 2011 John Crispin <blogic@openwrt.org> | ||
16 | */ | ||
17 | |||
18 | #ifndef LTQ_DMA_H__ | ||
19 | #define LTQ_DMA_H__ | ||
20 | |||
21 | #define LTQ_DESC_SIZE 0x08 /* each descriptor is 64bit */ | ||
22 | #define LTQ_DESC_NUM 0x40 /* 64 descriptors / channel */ | ||
23 | |||
24 | #define LTQ_DMA_OWN BIT(31) /* owner bit */ | ||
25 | #define LTQ_DMA_C BIT(30) /* complete bit */ | ||
26 | #define LTQ_DMA_SOP BIT(29) /* start of packet */ | ||
27 | #define LTQ_DMA_EOP BIT(28) /* end of packet */ | ||
28 | #define LTQ_DMA_TX_OFFSET(x) ((x & 0x1f) << 23) /* data bytes offset */ | ||
29 | #define LTQ_DMA_RX_OFFSET(x) ((x & 0x7) << 23) /* data bytes offset */ | ||
30 | #define LTQ_DMA_SIZE_MASK (0xffff) /* the size field is 16 bit */ | ||
31 | |||
32 | struct ltq_dma_desc { | ||
33 | u32 ctl; | ||
34 | u32 addr; | ||
35 | }; | ||
36 | |||
37 | struct ltq_dma_channel { | ||
38 | int nr; /* the channel number */ | ||
39 | int irq; /* the mapped irq */ | ||
40 | int desc; /* the current descriptor */ | ||
41 | struct ltq_dma_desc *desc_base; /* the descriptor base */ | ||
42 | int phys; /* physical addr */ | ||
43 | }; | ||
44 | |||
45 | enum { | ||
46 | DMA_PORT_ETOP = 0, | ||
47 | DMA_PORT_DEU, | ||
48 | }; | ||
49 | |||
50 | extern void ltq_dma_enable_irq(struct ltq_dma_channel *ch); | ||
51 | extern void ltq_dma_disable_irq(struct ltq_dma_channel *ch); | ||
52 | extern void ltq_dma_ack_irq(struct ltq_dma_channel *ch); | ||
53 | extern void ltq_dma_open(struct ltq_dma_channel *ch); | ||
54 | extern void ltq_dma_close(struct ltq_dma_channel *ch); | ||
55 | extern void ltq_dma_alloc_tx(struct ltq_dma_channel *ch); | ||
56 | extern void ltq_dma_alloc_rx(struct ltq_dma_channel *ch); | ||
57 | extern void ltq_dma_free(struct ltq_dma_channel *ch); | ||
58 | extern void ltq_dma_init_port(int p); | ||
59 | |||
60 | #endif | ||
diff --git a/arch/mips/include/asm/mach-netlogic/cpu-feature-overrides.h b/arch/mips/include/asm/mach-netlogic/cpu-feature-overrides.h new file mode 100644 index 000000000000..3b728275b9b0 --- /dev/null +++ b/arch/mips/include/asm/mach-netlogic/cpu-feature-overrides.h | |||
@@ -0,0 +1,47 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2011 Netlogic Microsystems | ||
7 | * Copyright (C) 2003 Ralf Baechle | ||
8 | */ | ||
9 | #ifndef __ASM_MACH_NETLOGIC_CPU_FEATURE_OVERRIDES_H | ||
10 | #define __ASM_MACH_NETLOGIC_CPU_FEATURE_OVERRIDES_H | ||
11 | |||
12 | #define cpu_has_4kex 1 | ||
13 | #define cpu_has_4k_cache 1 | ||
14 | #define cpu_has_watch 1 | ||
15 | #define cpu_has_mips16 0 | ||
16 | #define cpu_has_counter 1 | ||
17 | #define cpu_has_divec 1 | ||
18 | #define cpu_has_vce 0 | ||
19 | #define cpu_has_cache_cdex_p 0 | ||
20 | #define cpu_has_cache_cdex_s 0 | ||
21 | #define cpu_has_prefetch 1 | ||
22 | #define cpu_has_mcheck 1 | ||
23 | #define cpu_has_ejtag 1 | ||
24 | |||
25 | #define cpu_has_llsc 1 | ||
26 | #define cpu_has_vtag_icache 0 | ||
27 | #define cpu_has_dc_aliases 0 | ||
28 | #define cpu_has_ic_fills_f_dc 0 | ||
29 | #define cpu_has_dsp 0 | ||
30 | #define cpu_has_mipsmt 0 | ||
31 | #define cpu_has_userlocal 0 | ||
32 | #define cpu_icache_snoops_remote_store 0 | ||
33 | |||
34 | #define cpu_has_nofpuex 0 | ||
35 | #define cpu_has_64bits 1 | ||
36 | |||
37 | #define cpu_has_mips32r1 1 | ||
38 | #define cpu_has_mips32r2 0 | ||
39 | #define cpu_has_mips64r1 1 | ||
40 | #define cpu_has_mips64r2 0 | ||
41 | |||
42 | #define cpu_has_inclusive_pcaches 0 | ||
43 | |||
44 | #define cpu_dcache_line_size() 32 | ||
45 | #define cpu_icache_line_size() 32 | ||
46 | |||
47 | #endif /* __ASM_MACH_NETLOGIC_CPU_FEATURE_OVERRIDES_H */ | ||
diff --git a/arch/mips/include/asm/mach-netlogic/irq.h b/arch/mips/include/asm/mach-netlogic/irq.h new file mode 100644 index 000000000000..b5902458e7c1 --- /dev/null +++ b/arch/mips/include/asm/mach-netlogic/irq.h | |||
@@ -0,0 +1,14 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2011 Netlogic Microsystems. | ||
7 | */ | ||
8 | #ifndef __ASM_NETLOGIC_IRQ_H | ||
9 | #define __ASM_NETLOGIC_IRQ_H | ||
10 | |||
11 | #define NR_IRQS 64 | ||
12 | #define MIPS_CPU_IRQ_BASE 0 | ||
13 | |||
14 | #endif /* __ASM_NETLOGIC_IRQ_H */ | ||
diff --git a/arch/mips/include/asm/mach-netlogic/war.h b/arch/mips/include/asm/mach-netlogic/war.h new file mode 100644 index 000000000000..22da89327352 --- /dev/null +++ b/arch/mips/include/asm/mach-netlogic/war.h | |||
@@ -0,0 +1,26 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2011 Netlogic Microsystems. | ||
7 | * Copyright (C) 2002, 2004, 2007 by Ralf Baechle <ralf@linux-mips.org> | ||
8 | */ | ||
9 | #ifndef __ASM_MIPS_MACH_NLM_WAR_H | ||
10 | #define __ASM_MIPS_MACH_NLM_WAR_H | ||
11 | |||
12 | #define R4600_V1_INDEX_ICACHEOP_WAR 0 | ||
13 | #define R4600_V1_HIT_CACHEOP_WAR 0 | ||
14 | #define R4600_V2_HIT_CACHEOP_WAR 0 | ||
15 | #define R5432_CP0_INTERRUPT_WAR 0 | ||
16 | #define BCM1250_M3_WAR 0 | ||
17 | #define SIBYTE_1956_WAR 0 | ||
18 | #define MIPS4K_ICACHE_REFILL_WAR 0 | ||
19 | #define MIPS_CACHE_SYNC_WAR 0 | ||
20 | #define TX49XX_ICACHE_INDEX_INV_WAR 0 | ||
21 | #define RM9000_CDEX_SMP_WAR 0 | ||
22 | #define ICACHE_REFILLS_WORKAROUND_WAR 0 | ||
23 | #define R10000_LLSC_WAR 0 | ||
24 | #define MIPS34K_MISSED_ITLB_WAR 0 | ||
25 | |||
26 | #endif /* __ASM_MIPS_MACH_NLM_WAR_H */ | ||
diff --git a/arch/mips/include/asm/module.h b/arch/mips/include/asm/module.h index d94085a3eafb..bc01a02cacd8 100644 --- a/arch/mips/include/asm/module.h +++ b/arch/mips/include/asm/module.h | |||
@@ -118,6 +118,8 @@ search_module_dbetables(unsigned long addr) | |||
118 | #define MODULE_PROC_FAMILY "LOONGSON2 " | 118 | #define MODULE_PROC_FAMILY "LOONGSON2 " |
119 | #elif defined CONFIG_CPU_CAVIUM_OCTEON | 119 | #elif defined CONFIG_CPU_CAVIUM_OCTEON |
120 | #define MODULE_PROC_FAMILY "OCTEON " | 120 | #define MODULE_PROC_FAMILY "OCTEON " |
121 | #elif defined CONFIG_CPU_XLR | ||
122 | #define MODULE_PROC_FAMILY "XLR " | ||
121 | #else | 123 | #else |
122 | #error MODULE_PROC_FAMILY undefined for your processor configuration | 124 | #error MODULE_PROC_FAMILY undefined for your processor configuration |
123 | #endif | 125 | #endif |
diff --git a/arch/mips/include/asm/netlogic/interrupt.h b/arch/mips/include/asm/netlogic/interrupt.h new file mode 100644 index 000000000000..a85aadb6cfd7 --- /dev/null +++ b/arch/mips/include/asm/netlogic/interrupt.h | |||
@@ -0,0 +1,45 @@ | |||
1 | /* | ||
2 | * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights | ||
3 | * reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the NetLogic | ||
9 | * license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or without | ||
12 | * modification, are permitted provided that the following conditions | ||
13 | * are met: | ||
14 | * | ||
15 | * 1. Redistributions of source code must retain the above copyright | ||
16 | * notice, this list of conditions and the following disclaimer. | ||
17 | * 2. Redistributions in binary form must reproduce the above copyright | ||
18 | * notice, this list of conditions and the following disclaimer in | ||
19 | * the documentation and/or other materials provided with the | ||
20 | * distribution. | ||
21 | * | ||
22 | * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR | ||
23 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | ||
24 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
25 | * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE | ||
26 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
27 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
28 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | ||
29 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, | ||
30 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE | ||
31 | * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN | ||
32 | * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
33 | */ | ||
34 | |||
35 | #ifndef _ASM_NLM_INTERRUPT_H | ||
36 | #define _ASM_NLM_INTERRUPT_H | ||
37 | |||
38 | /* Defines for the IRQ numbers */ | ||
39 | |||
40 | #define IRQ_IPI_SMP_FUNCTION 3 | ||
41 | #define IRQ_IPI_SMP_RESCHEDULE 4 | ||
42 | #define IRQ_MSGRING 6 | ||
43 | #define IRQ_TIMER 7 | ||
44 | |||
45 | #endif | ||
diff --git a/arch/mips/include/asm/netlogic/mips-extns.h b/arch/mips/include/asm/netlogic/mips-extns.h new file mode 100644 index 000000000000..8c53d0ba4bf2 --- /dev/null +++ b/arch/mips/include/asm/netlogic/mips-extns.h | |||
@@ -0,0 +1,76 @@ | |||
1 | /* | ||
2 | * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights | ||
3 | * reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the NetLogic | ||
9 | * license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or without | ||
12 | * modification, are permitted provided that the following conditions | ||
13 | * are met: | ||
14 | * | ||
15 | * 1. Redistributions of source code must retain the above copyright | ||
16 | * notice, this list of conditions and the following disclaimer. | ||
17 | * 2. Redistributions in binary form must reproduce the above copyright | ||
18 | * notice, this list of conditions and the following disclaimer in | ||
19 | * the documentation and/or other materials provided with the | ||
20 | * distribution. | ||
21 | * | ||
22 | * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR | ||
23 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | ||
24 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
25 | * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE | ||
26 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
27 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
28 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | ||
29 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, | ||
30 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE | ||
31 | * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN | ||
32 | * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
33 | */ | ||
34 | |||
35 | #ifndef _ASM_NLM_MIPS_EXTS_H | ||
36 | #define _ASM_NLM_MIPS_EXTS_H | ||
37 | |||
38 | /* | ||
39 | * XLR and XLP interrupt request and interrupt mask registers | ||
40 | */ | ||
41 | #define read_c0_eirr() __read_64bit_c0_register($9, 6) | ||
42 | #define read_c0_eimr() __read_64bit_c0_register($9, 7) | ||
43 | #define write_c0_eirr(val) __write_64bit_c0_register($9, 6, val) | ||
44 | |||
45 | /* | ||
46 | * Writing EIMR in 32 bit is a special case, the lower 8 bit of the | ||
47 | * EIMR is shadowed in the status register, so we cannot save and | ||
48 | * restore status register for split read. | ||
49 | */ | ||
50 | #define write_c0_eimr(val) \ | ||
51 | do { \ | ||
52 | if (sizeof(unsigned long) == 4) { \ | ||
53 | unsigned long __flags; \ | ||
54 | \ | ||
55 | local_irq_save(__flags); \ | ||
56 | __asm__ __volatile__( \ | ||
57 | ".set\tmips64\n\t" \ | ||
58 | "dsll\t%L0, %L0, 32\n\t" \ | ||
59 | "dsrl\t%L0, %L0, 32\n\t" \ | ||
60 | "dsll\t%M0, %M0, 32\n\t" \ | ||
61 | "or\t%L0, %L0, %M0\n\t" \ | ||
62 | "dmtc0\t%L0, $9, 7\n\t" \ | ||
63 | ".set\tmips0" \ | ||
64 | : : "r" (val)); \ | ||
65 | __flags = (__flags & 0xffff00ff) | (((val) & 0xff) << 8);\ | ||
66 | local_irq_restore(__flags); \ | ||
67 | } else \ | ||
68 | __write_64bit_c0_register($9, 7, (val)); \ | ||
69 | } while (0) | ||
70 | |||
71 | static inline int hard_smp_processor_id(void) | ||
72 | { | ||
73 | return __read_32bit_c0_register($15, 1) & 0x3ff; | ||
74 | } | ||
75 | |||
76 | #endif /*_ASM_NLM_MIPS_EXTS_H */ | ||
diff --git a/arch/mips/include/asm/netlogic/psb-bootinfo.h b/arch/mips/include/asm/netlogic/psb-bootinfo.h new file mode 100644 index 000000000000..6878307f0ee6 --- /dev/null +++ b/arch/mips/include/asm/netlogic/psb-bootinfo.h | |||
@@ -0,0 +1,109 @@ | |||
1 | /* | ||
2 | * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights | ||
3 | * reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the NetLogic | ||
9 | * license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or without | ||
12 | * modification, are permitted provided that the following conditions | ||
13 | * are met: | ||
14 | * | ||
15 | * 1. Redistributions of source code must retain the above copyright | ||
16 | * notice, this list of conditions and the following disclaimer. | ||
17 | * 2. Redistributions in binary form must reproduce the above copyright | ||
18 | * notice, this list of conditions and the following disclaimer in | ||
19 | * the documentation and/or other materials provided with the | ||
20 | * distribution. | ||
21 | * | ||
22 | * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR | ||
23 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | ||
24 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
25 | * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE | ||
26 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
27 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
28 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | ||
29 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, | ||
30 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE | ||
31 | * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN | ||
32 | * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
33 | */ | ||
34 | |||
35 | #ifndef _ASM_NETLOGIC_BOOTINFO_H | ||
36 | #define _ASM_NETLOGIC_BOOTINFO_H | ||
37 | |||
38 | struct psb_info { | ||
39 | uint64_t boot_level; | ||
40 | uint64_t io_base; | ||
41 | uint64_t output_device; | ||
42 | uint64_t uart_print; | ||
43 | uint64_t led_output; | ||
44 | uint64_t init; | ||
45 | uint64_t exit; | ||
46 | uint64_t warm_reset; | ||
47 | uint64_t wakeup; | ||
48 | uint64_t online_cpu_map; | ||
49 | uint64_t master_reentry_sp; | ||
50 | uint64_t master_reentry_gp; | ||
51 | uint64_t master_reentry_fn; | ||
52 | uint64_t slave_reentry_fn; | ||
53 | uint64_t magic_dword; | ||
54 | uint64_t uart_putchar; | ||
55 | uint64_t size; | ||
56 | uint64_t uart_getchar; | ||
57 | uint64_t nmi_handler; | ||
58 | uint64_t psb_version; | ||
59 | uint64_t mac_addr; | ||
60 | uint64_t cpu_frequency; | ||
61 | uint64_t board_version; | ||
62 | uint64_t malloc; | ||
63 | uint64_t free; | ||
64 | uint64_t global_shmem_addr; | ||
65 | uint64_t global_shmem_size; | ||
66 | uint64_t psb_os_cpu_map; | ||
67 | uint64_t userapp_cpu_map; | ||
68 | uint64_t wakeup_os; | ||
69 | uint64_t psb_mem_map; | ||
70 | uint64_t board_major_version; | ||
71 | uint64_t board_minor_version; | ||
72 | uint64_t board_manf_revision; | ||
73 | uint64_t board_serial_number; | ||
74 | uint64_t psb_physaddr_map; | ||
75 | uint64_t xlr_loaderip_config; | ||
76 | uint64_t bldr_envp; | ||
77 | uint64_t avail_mem_map; | ||
78 | }; | ||
79 | |||
80 | enum { | ||
81 | NETLOGIC_IO_SPACE = 0x10, | ||
82 | PCIX_IO_SPACE, | ||
83 | PCIX_CFG_SPACE, | ||
84 | PCIX_MEMORY_SPACE, | ||
85 | HT_IO_SPACE, | ||
86 | HT_CFG_SPACE, | ||
87 | HT_MEMORY_SPACE, | ||
88 | SRAM_SPACE, | ||
89 | FLASH_CONTROLLER_SPACE | ||
90 | }; | ||
91 | |||
92 | #define NLM_MAX_ARGS 64 | ||
93 | #define NLM_MAX_ENVS 32 | ||
94 | |||
95 | /* This is what netlboot passes and linux boot_mem_map is subtly different */ | ||
96 | #define NLM_BOOT_MEM_MAP_MAX 32 | ||
97 | struct nlm_boot_mem_map { | ||
98 | int nr_map; | ||
99 | struct nlm_boot_mem_map_entry { | ||
100 | uint64_t addr; /* start of memory segment */ | ||
101 | uint64_t size; /* size of memory segment */ | ||
102 | uint32_t type; /* type of memory segment */ | ||
103 | } map[NLM_BOOT_MEM_MAP_MAX]; | ||
104 | }; | ||
105 | |||
106 | /* Pointer to saved boot loader info */ | ||
107 | extern struct psb_info nlm_prom_info; | ||
108 | |||
109 | #endif | ||
diff --git a/arch/mips/include/asm/netlogic/xlr/gpio.h b/arch/mips/include/asm/netlogic/xlr/gpio.h new file mode 100644 index 000000000000..51f6ad4aeb14 --- /dev/null +++ b/arch/mips/include/asm/netlogic/xlr/gpio.h | |||
@@ -0,0 +1,73 @@ | |||
1 | /* | ||
2 | * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights | ||
3 | * reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the NetLogic | ||
9 | * license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or without | ||
12 | * modification, are permitted provided that the following conditions | ||
13 | * are met: | ||
14 | * | ||
15 | * 1. Redistributions of source code must retain the above copyright | ||
16 | * notice, this list of conditions and the following disclaimer. | ||
17 | * 2. Redistributions in binary form must reproduce the above copyright | ||
18 | * notice, this list of conditions and the following disclaimer in | ||
19 | * the documentation and/or other materials provided with the | ||
20 | * distribution. | ||
21 | * | ||
22 | * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR | ||
23 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | ||
24 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
25 | * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE | ||
26 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
27 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
28 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | ||
29 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, | ||
30 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE | ||
31 | * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN | ||
32 | * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
33 | */ | ||
34 | |||
35 | #ifndef _ASM_NLM_GPIO_H | ||
36 | #define _ASM_NLM_GPIO_H | ||
37 | |||
38 | #define NETLOGIC_GPIO_INT_EN_REG 0 | ||
39 | #define NETLOGIC_GPIO_INPUT_INVERSION_REG 1 | ||
40 | #define NETLOGIC_GPIO_IO_DIR_REG 2 | ||
41 | #define NETLOGIC_GPIO_IO_DATA_WR_REG 3 | ||
42 | #define NETLOGIC_GPIO_IO_DATA_RD_REG 4 | ||
43 | |||
44 | #define NETLOGIC_GPIO_SWRESET_REG 8 | ||
45 | #define NETLOGIC_GPIO_DRAM1_CNTRL_REG 9 | ||
46 | #define NETLOGIC_GPIO_DRAM1_RATIO_REG 10 | ||
47 | #define NETLOGIC_GPIO_DRAM1_RESET_REG 11 | ||
48 | #define NETLOGIC_GPIO_DRAM1_STATUS_REG 12 | ||
49 | #define NETLOGIC_GPIO_DRAM2_CNTRL_REG 13 | ||
50 | #define NETLOGIC_GPIO_DRAM2_RATIO_REG 14 | ||
51 | #define NETLOGIC_GPIO_DRAM2_RESET_REG 15 | ||
52 | #define NETLOGIC_GPIO_DRAM2_STATUS_REG 16 | ||
53 | |||
54 | #define NETLOGIC_GPIO_PWRON_RESET_CFG_REG 21 | ||
55 | #define NETLOGIC_GPIO_BIST_ALL_GO_STATUS_REG 24 | ||
56 | #define NETLOGIC_GPIO_BIST_CPU_GO_STATUS_REG 25 | ||
57 | #define NETLOGIC_GPIO_BIST_DEV_GO_STATUS_REG 26 | ||
58 | |||
59 | #define NETLOGIC_GPIO_FUSE_BANK_REG 35 | ||
60 | #define NETLOGIC_GPIO_CPU_RESET_REG 40 | ||
61 | #define NETLOGIC_GPIO_RNG_REG 43 | ||
62 | |||
63 | #define NETLOGIC_PWRON_RESET_PCMCIA_BOOT 17 | ||
64 | #define NETLOGIC_GPIO_LED_BITMAP 0x1700000 | ||
65 | #define NETLOGIC_GPIO_LED_0_SHIFT 20 | ||
66 | #define NETLOGIC_GPIO_LED_1_SHIFT 24 | ||
67 | |||
68 | #define NETLOGIC_GPIO_LED_OUTPUT_CODE_RESET 0x01 | ||
69 | #define NETLOGIC_GPIO_LED_OUTPUT_CODE_HARD_RESET 0x02 | ||
70 | #define NETLOGIC_GPIO_LED_OUTPUT_CODE_SOFT_RESET 0x03 | ||
71 | #define NETLOGIC_GPIO_LED_OUTPUT_CODE_MAIN 0x04 | ||
72 | |||
73 | #endif | ||
diff --git a/arch/mips/include/asm/netlogic/xlr/iomap.h b/arch/mips/include/asm/netlogic/xlr/iomap.h new file mode 100644 index 000000000000..2e3a4dd53045 --- /dev/null +++ b/arch/mips/include/asm/netlogic/xlr/iomap.h | |||
@@ -0,0 +1,131 @@ | |||
1 | /* | ||
2 | * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights | ||
3 | * reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the NetLogic | ||
9 | * license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or without | ||
12 | * modification, are permitted provided that the following conditions | ||
13 | * are met: | ||
14 | * | ||
15 | * 1. Redistributions of source code must retain the above copyright | ||
16 | * notice, this list of conditions and the following disclaimer. | ||
17 | * 2. Redistributions in binary form must reproduce the above copyright | ||
18 | * notice, this list of conditions and the following disclaimer in | ||
19 | * the documentation and/or other materials provided with the | ||
20 | * distribution. | ||
21 | * | ||
22 | * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR | ||
23 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | ||
24 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
25 | * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE | ||
26 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
27 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
28 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | ||
29 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, | ||
30 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE | ||
31 | * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN | ||
32 | * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
33 | */ | ||
34 | |||
35 | #ifndef _ASM_NLM_IOMAP_H | ||
36 | #define _ASM_NLM_IOMAP_H | ||
37 | |||
38 | #define DEFAULT_NETLOGIC_IO_BASE CKSEG1ADDR(0x1ef00000) | ||
39 | #define NETLOGIC_IO_DDR2_CHN0_OFFSET 0x01000 | ||
40 | #define NETLOGIC_IO_DDR2_CHN1_OFFSET 0x02000 | ||
41 | #define NETLOGIC_IO_DDR2_CHN2_OFFSET 0x03000 | ||
42 | #define NETLOGIC_IO_DDR2_CHN3_OFFSET 0x04000 | ||
43 | #define NETLOGIC_IO_PIC_OFFSET 0x08000 | ||
44 | #define NETLOGIC_IO_UART_0_OFFSET 0x14000 | ||
45 | #define NETLOGIC_IO_UART_1_OFFSET 0x15100 | ||
46 | |||
47 | #define NETLOGIC_IO_SIZE 0x1000 | ||
48 | |||
49 | #define NETLOGIC_IO_BRIDGE_OFFSET 0x00000 | ||
50 | |||
51 | #define NETLOGIC_IO_RLD2_CHN0_OFFSET 0x05000 | ||
52 | #define NETLOGIC_IO_RLD2_CHN1_OFFSET 0x06000 | ||
53 | |||
54 | #define NETLOGIC_IO_SRAM_OFFSET 0x07000 | ||
55 | |||
56 | #define NETLOGIC_IO_PCIX_OFFSET 0x09000 | ||
57 | #define NETLOGIC_IO_HT_OFFSET 0x0A000 | ||
58 | |||
59 | #define NETLOGIC_IO_SECURITY_OFFSET 0x0B000 | ||
60 | |||
61 | #define NETLOGIC_IO_GMAC_0_OFFSET 0x0C000 | ||
62 | #define NETLOGIC_IO_GMAC_1_OFFSET 0x0D000 | ||
63 | #define NETLOGIC_IO_GMAC_2_OFFSET 0x0E000 | ||
64 | #define NETLOGIC_IO_GMAC_3_OFFSET 0x0F000 | ||
65 | |||
66 | /* XLS devices */ | ||
67 | #define NETLOGIC_IO_GMAC_4_OFFSET 0x20000 | ||
68 | #define NETLOGIC_IO_GMAC_5_OFFSET 0x21000 | ||
69 | #define NETLOGIC_IO_GMAC_6_OFFSET 0x22000 | ||
70 | #define NETLOGIC_IO_GMAC_7_OFFSET 0x23000 | ||
71 | |||
72 | #define NETLOGIC_IO_PCIE_0_OFFSET 0x1E000 | ||
73 | #define NETLOGIC_IO_PCIE_1_OFFSET 0x1F000 | ||
74 | #define NETLOGIC_IO_SRIO_0_OFFSET 0x1E000 | ||
75 | #define NETLOGIC_IO_SRIO_1_OFFSET 0x1F000 | ||
76 | |||
77 | #define NETLOGIC_IO_USB_0_OFFSET 0x24000 | ||
78 | #define NETLOGIC_IO_USB_1_OFFSET 0x25000 | ||
79 | |||
80 | #define NETLOGIC_IO_COMP_OFFSET 0x1D000 | ||
81 | /* end XLS devices */ | ||
82 | |||
83 | /* XLR devices */ | ||
84 | #define NETLOGIC_IO_SPI4_0_OFFSET 0x10000 | ||
85 | #define NETLOGIC_IO_XGMAC_0_OFFSET 0x11000 | ||
86 | #define NETLOGIC_IO_SPI4_1_OFFSET 0x12000 | ||
87 | #define NETLOGIC_IO_XGMAC_1_OFFSET 0x13000 | ||
88 | /* end XLR devices */ | ||
89 | |||
90 | #define NETLOGIC_IO_I2C_0_OFFSET 0x16000 | ||
91 | #define NETLOGIC_IO_I2C_1_OFFSET 0x17000 | ||
92 | |||
93 | #define NETLOGIC_IO_GPIO_OFFSET 0x18000 | ||
94 | #define NETLOGIC_IO_FLASH_OFFSET 0x19000 | ||
95 | #define NETLOGIC_IO_TB_OFFSET 0x1C000 | ||
96 | |||
97 | #define NETLOGIC_CPLD_OFFSET KSEG1ADDR(0x1d840000) | ||
98 | |||
99 | /* | ||
100 | * Base Address (Virtual) of the PCI Config address space | ||
101 | * For now, choose 256M phys in kseg1 = 0xA0000000 + (1<<28) | ||
102 | * Config space spans 256 (num of buses) * 256 (num functions) * 256 bytes | ||
103 | * ie 1<<24 = 16M | ||
104 | */ | ||
105 | #define DEFAULT_PCI_CONFIG_BASE 0x18000000 | ||
106 | #define DEFAULT_HT_TYPE0_CFG_BASE 0x16000000 | ||
107 | #define DEFAULT_HT_TYPE1_CFG_BASE 0x17000000 | ||
108 | |||
109 | #ifndef __ASSEMBLY__ | ||
110 | #include <linux/types.h> | ||
111 | #include <asm/byteorder.h> | ||
112 | |||
113 | typedef volatile __u32 nlm_reg_t; | ||
114 | extern unsigned long netlogic_io_base; | ||
115 | |||
116 | /* FIXME read once in write_reg */ | ||
117 | #ifdef CONFIG_CPU_LITTLE_ENDIAN | ||
118 | #define netlogic_read_reg(base, offset) ((base)[(offset)]) | ||
119 | #define netlogic_write_reg(base, offset, value) ((base)[(offset)] = (value)) | ||
120 | #else | ||
121 | #define netlogic_read_reg(base, offset) (be32_to_cpu((base)[(offset)])) | ||
122 | #define netlogic_write_reg(base, offset, value) \ | ||
123 | ((base)[(offset)] = cpu_to_be32((value))) | ||
124 | #endif | ||
125 | |||
126 | #define netlogic_read_reg_le32(base, offset) (le32_to_cpu((base)[(offset)])) | ||
127 | #define netlogic_write_reg_le32(base, offset, value) \ | ||
128 | ((base)[(offset)] = cpu_to_le32((value))) | ||
129 | #define netlogic_io_mmio(offset) ((nlm_reg_t *)(netlogic_io_base+(offset))) | ||
130 | #endif /* __ASSEMBLY__ */ | ||
131 | #endif | ||
diff --git a/arch/mips/include/asm/netlogic/xlr/pic.h b/arch/mips/include/asm/netlogic/xlr/pic.h new file mode 100644 index 000000000000..5cceb746f080 --- /dev/null +++ b/arch/mips/include/asm/netlogic/xlr/pic.h | |||
@@ -0,0 +1,231 @@ | |||
1 | /* | ||
2 | * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights | ||
3 | * reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the NetLogic | ||
9 | * license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or without | ||
12 | * modification, are permitted provided that the following conditions | ||
13 | * are met: | ||
14 | * | ||
15 | * 1. Redistributions of source code must retain the above copyright | ||
16 | * notice, this list of conditions and the following disclaimer. | ||
17 | * 2. Redistributions in binary form must reproduce the above copyright | ||
18 | * notice, this list of conditions and the following disclaimer in | ||
19 | * the documentation and/or other materials provided with the | ||
20 | * distribution. | ||
21 | * | ||
22 | * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR | ||
23 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | ||
24 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
25 | * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE | ||
26 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
27 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
28 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | ||
29 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, | ||
30 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE | ||
31 | * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN | ||
32 | * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
33 | */ | ||
34 | |||
35 | #ifndef _ASM_NLM_XLR_PIC_H | ||
36 | #define _ASM_NLM_XLR_PIC_H | ||
37 | |||
38 | #define PIC_CLKS_PER_SEC 66666666ULL | ||
39 | /* PIC hardware interrupt numbers */ | ||
40 | #define PIC_IRT_WD_INDEX 0 | ||
41 | #define PIC_IRT_TIMER_0_INDEX 1 | ||
42 | #define PIC_IRT_TIMER_1_INDEX 2 | ||
43 | #define PIC_IRT_TIMER_2_INDEX 3 | ||
44 | #define PIC_IRT_TIMER_3_INDEX 4 | ||
45 | #define PIC_IRT_TIMER_4_INDEX 5 | ||
46 | #define PIC_IRT_TIMER_5_INDEX 6 | ||
47 | #define PIC_IRT_TIMER_6_INDEX 7 | ||
48 | #define PIC_IRT_TIMER_7_INDEX 8 | ||
49 | #define PIC_IRT_CLOCK_INDEX PIC_IRT_TIMER_7_INDEX | ||
50 | #define PIC_IRT_UART_0_INDEX 9 | ||
51 | #define PIC_IRT_UART_1_INDEX 10 | ||
52 | #define PIC_IRT_I2C_0_INDEX 11 | ||
53 | #define PIC_IRT_I2C_1_INDEX 12 | ||
54 | #define PIC_IRT_PCMCIA_INDEX 13 | ||
55 | #define PIC_IRT_GPIO_INDEX 14 | ||
56 | #define PIC_IRT_HYPER_INDEX 15 | ||
57 | #define PIC_IRT_PCIX_INDEX 16 | ||
58 | /* XLS */ | ||
59 | #define PIC_IRT_CDE_INDEX 15 | ||
60 | #define PIC_IRT_BRIDGE_TB_XLS_INDEX 16 | ||
61 | /* XLS */ | ||
62 | #define PIC_IRT_GMAC0_INDEX 17 | ||
63 | #define PIC_IRT_GMAC1_INDEX 18 | ||
64 | #define PIC_IRT_GMAC2_INDEX 19 | ||
65 | #define PIC_IRT_GMAC3_INDEX 20 | ||
66 | #define PIC_IRT_XGS0_INDEX 21 | ||
67 | #define PIC_IRT_XGS1_INDEX 22 | ||
68 | #define PIC_IRT_HYPER_FATAL_INDEX 23 | ||
69 | #define PIC_IRT_PCIX_FATAL_INDEX 24 | ||
70 | #define PIC_IRT_BRIDGE_AERR_INDEX 25 | ||
71 | #define PIC_IRT_BRIDGE_BERR_INDEX 26 | ||
72 | #define PIC_IRT_BRIDGE_TB_XLR_INDEX 27 | ||
73 | #define PIC_IRT_BRIDGE_AERR_NMI_INDEX 28 | ||
74 | /* XLS */ | ||
75 | #define PIC_IRT_GMAC4_INDEX 21 | ||
76 | #define PIC_IRT_GMAC5_INDEX 22 | ||
77 | #define PIC_IRT_GMAC6_INDEX 23 | ||
78 | #define PIC_IRT_GMAC7_INDEX 24 | ||
79 | #define PIC_IRT_BRIDGE_ERR_INDEX 25 | ||
80 | #define PIC_IRT_PCIE_LINK0_INDEX 26 | ||
81 | #define PIC_IRT_PCIE_LINK1_INDEX 27 | ||
82 | #define PIC_IRT_PCIE_LINK2_INDEX 23 | ||
83 | #define PIC_IRT_PCIE_LINK3_INDEX 24 | ||
84 | #define PIC_IRT_PCIE_XLSB0_LINK2_INDEX 28 | ||
85 | #define PIC_IRT_PCIE_XLSB0_LINK3_INDEX 29 | ||
86 | #define PIC_IRT_SRIO_LINK0_INDEX 26 | ||
87 | #define PIC_IRT_SRIO_LINK1_INDEX 27 | ||
88 | #define PIC_IRT_SRIO_LINK2_INDEX 28 | ||
89 | #define PIC_IRT_SRIO_LINK3_INDEX 29 | ||
90 | #define PIC_IRT_PCIE_INT_INDEX 28 | ||
91 | #define PIC_IRT_PCIE_FATAL_INDEX 29 | ||
92 | #define PIC_IRT_GPIO_B_INDEX 30 | ||
93 | #define PIC_IRT_USB_INDEX 31 | ||
94 | /* XLS */ | ||
95 | #define PIC_NUM_IRTS 32 | ||
96 | |||
97 | |||
98 | #define PIC_CLOCK_TIMER 7 | ||
99 | |||
100 | /* PIC Registers */ | ||
101 | #define PIC_CTRL 0x00 | ||
102 | #define PIC_IPI 0x04 | ||
103 | #define PIC_INT_ACK 0x06 | ||
104 | |||
105 | #define WD_MAX_VAL_0 0x08 | ||
106 | #define WD_MAX_VAL_1 0x09 | ||
107 | #define WD_MASK_0 0x0a | ||
108 | #define WD_MASK_1 0x0b | ||
109 | #define WD_HEARBEAT_0 0x0c | ||
110 | #define WD_HEARBEAT_1 0x0d | ||
111 | |||
112 | #define PIC_IRT_0_BASE 0x40 | ||
113 | #define PIC_IRT_1_BASE 0x80 | ||
114 | #define PIC_TIMER_MAXVAL_0_BASE 0x100 | ||
115 | #define PIC_TIMER_MAXVAL_1_BASE 0x110 | ||
116 | #define PIC_TIMER_COUNT_0_BASE 0x120 | ||
117 | #define PIC_TIMER_COUNT_1_BASE 0x130 | ||
118 | |||
119 | #define PIC_IRT_0(picintr) (PIC_IRT_0_BASE + (picintr)) | ||
120 | #define PIC_IRT_1(picintr) (PIC_IRT_1_BASE + (picintr)) | ||
121 | |||
122 | #define PIC_TIMER_MAXVAL_0(i) (PIC_TIMER_MAXVAL_0_BASE + (i)) | ||
123 | #define PIC_TIMER_MAXVAL_1(i) (PIC_TIMER_MAXVAL_1_BASE + (i)) | ||
124 | #define PIC_TIMER_COUNT_0(i) (PIC_TIMER_COUNT_0_BASE + (i)) | ||
125 | #define PIC_TIMER_COUNT_1(i) (PIC_TIMER_COUNT_0_BASE + (i)) | ||
126 | |||
127 | /* | ||
128 | * Mapping between hardware interrupt numbers and IRQs on CPU | ||
129 | * we use a simple scheme to map PIC interrupts 0-31 to IRQs | ||
130 | * 8-39. This leaves the IRQ 0-7 for cpu interrupts like | ||
131 | * count/compare and FMN | ||
132 | */ | ||
133 | #define PIC_IRQ_BASE 8 | ||
134 | #define PIC_INTR_TO_IRQ(i) (PIC_IRQ_BASE + (i)) | ||
135 | #define PIC_IRQ_TO_INTR(i) ((i) - PIC_IRQ_BASE) | ||
136 | |||
137 | #define PIC_IRT_FIRST_IRQ PIC_IRQ_BASE | ||
138 | #define PIC_WD_IRQ PIC_INTR_TO_IRQ(PIC_IRT_WD_INDEX) | ||
139 | #define PIC_TIMER_0_IRQ PIC_INTR_TO_IRQ(PIC_IRT_TIMER_0_INDEX) | ||
140 | #define PIC_TIMER_1_IRQ PIC_INTR_TO_IRQ(PIC_IRT_TIMER_1_INDEX) | ||
141 | #define PIC_TIMER_2_IRQ PIC_INTR_TO_IRQ(PIC_IRT_TIMER_2_INDEX) | ||
142 | #define PIC_TIMER_3_IRQ PIC_INTR_TO_IRQ(PIC_IRT_TIMER_3_INDEX) | ||
143 | #define PIC_TIMER_4_IRQ PIC_INTR_TO_IRQ(PIC_IRT_TIMER_4_INDEX) | ||
144 | #define PIC_TIMER_5_IRQ PIC_INTR_TO_IRQ(PIC_IRT_TIMER_5_INDEX) | ||
145 | #define PIC_TIMER_6_IRQ PIC_INTR_TO_IRQ(PIC_IRT_TIMER_6_INDEX) | ||
146 | #define PIC_TIMER_7_IRQ PIC_INTR_TO_IRQ(PIC_IRT_TIMER_7_INDEX) | ||
147 | #define PIC_CLOCK_IRQ (PIC_TIMER_7_IRQ) | ||
148 | #define PIC_UART_0_IRQ PIC_INTR_TO_IRQ(PIC_IRT_UART_0_INDEX) | ||
149 | #define PIC_UART_1_IRQ PIC_INTR_TO_IRQ(PIC_IRT_UART_1_INDEX) | ||
150 | #define PIC_I2C_0_IRQ PIC_INTR_TO_IRQ(PIC_IRT_I2C_0_INDEX) | ||
151 | #define PIC_I2C_1_IRQ PIC_INTR_TO_IRQ(PIC_IRT_I2C_1_INDEX) | ||
152 | #define PIC_PCMCIA_IRQ PIC_INTR_TO_IRQ(PIC_IRT_PCMCIA_INDEX) | ||
153 | #define PIC_GPIO_IRQ PIC_INTR_TO_IRQ(PIC_IRT_GPIO_INDEX) | ||
154 | #define PIC_HYPER_IRQ PIC_INTR_TO_IRQ(PIC_IRT_HYPER_INDEX) | ||
155 | #define PIC_PCIX_IRQ PIC_INTR_TO_IRQ(PIC_IRT_PCIX_INDEX) | ||
156 | /* XLS */ | ||
157 | #define PIC_CDE_IRQ PIC_INTR_TO_IRQ(PIC_IRT_CDE_INDEX) | ||
158 | #define PIC_BRIDGE_TB_XLS_IRQ PIC_INTR_TO_IRQ(PIC_IRT_BRIDGE_TB_XLS_INDEX) | ||
159 | /* end XLS */ | ||
160 | #define PIC_GMAC_0_IRQ PIC_INTR_TO_IRQ(PIC_IRT_GMAC0_INDEX) | ||
161 | #define PIC_GMAC_1_IRQ PIC_INTR_TO_IRQ(PIC_IRT_GMAC1_INDEX) | ||
162 | #define PIC_GMAC_2_IRQ PIC_INTR_TO_IRQ(PIC_IRT_GMAC2_INDEX) | ||
163 | #define PIC_GMAC_3_IRQ PIC_INTR_TO_IRQ(PIC_IRT_GMAC3_INDEX) | ||
164 | #define PIC_XGS_0_IRQ PIC_INTR_TO_IRQ(PIC_IRT_XGS0_INDEX) | ||
165 | #define PIC_XGS_1_IRQ PIC_INTR_TO_IRQ(PIC_IRT_XGS1_INDEX) | ||
166 | #define PIC_HYPER_FATAL_IRQ PIC_INTR_TO_IRQ(PIC_IRT_HYPER_FATAL_INDEX) | ||
167 | #define PIC_PCIX_FATAL_IRQ PIC_INTR_TO_IRQ(PIC_IRT_PCIX_FATAL_INDEX) | ||
168 | #define PIC_BRIDGE_AERR_IRQ PIC_INTR_TO_IRQ(PIC_IRT_BRIDGE_AERR_INDEX) | ||
169 | #define PIC_BRIDGE_BERR_IRQ PIC_INTR_TO_IRQ(PIC_IRT_BRIDGE_BERR_INDEX) | ||
170 | #define PIC_BRIDGE_TB_XLR_IRQ PIC_INTR_TO_IRQ(PIC_IRT_BRIDGE_TB_XLR_INDEX) | ||
171 | #define PIC_BRIDGE_AERR_NMI_IRQ PIC_INTR_TO_IRQ(PIC_IRT_BRIDGE_AERR_NMI_INDEX) | ||
172 | /* XLS defines */ | ||
173 | #define PIC_GMAC_4_IRQ PIC_INTR_TO_IRQ(PIC_IRT_GMAC4_INDEX) | ||
174 | #define PIC_GMAC_5_IRQ PIC_INTR_TO_IRQ(PIC_IRT_GMAC5_INDEX) | ||
175 | #define PIC_GMAC_6_IRQ PIC_INTR_TO_IRQ(PIC_IRT_GMAC6_INDEX) | ||
176 | #define PIC_GMAC_7_IRQ PIC_INTR_TO_IRQ(PIC_IRT_GMAC7_INDEX) | ||
177 | #define PIC_BRIDGE_ERR_IRQ PIC_INTR_TO_IRQ(PIC_IRT_BRIDGE_ERR_INDEX) | ||
178 | #define PIC_PCIE_LINK0_IRQ PIC_INTR_TO_IRQ(PIC_IRT_PCIE_LINK0_INDEX) | ||
179 | #define PIC_PCIE_LINK1_IRQ PIC_INTR_TO_IRQ(PIC_IRT_PCIE_LINK1_INDEX) | ||
180 | #define PIC_PCIE_LINK2_IRQ PIC_INTR_TO_IRQ(PIC_IRT_PCIE_LINK2_INDEX) | ||
181 | #define PIC_PCIE_LINK3_IRQ PIC_INTR_TO_IRQ(PIC_IRT_PCIE_LINK3_INDEX) | ||
182 | #define PIC_PCIE_XLSB0_LINK2_IRQ PIC_INTR_TO_IRQ(PIC_IRT_PCIE_XLSB0_LINK2_INDEX) | ||
183 | #define PIC_PCIE_XLSB0_LINK3_IRQ PIC_INTR_TO_IRQ(PIC_IRT_PCIE_XLSB0_LINK3_INDEX) | ||
184 | #define PIC_SRIO_LINK0_IRQ PIC_INTR_TO_IRQ(PIC_IRT_SRIO_LINK0_INDEX) | ||
185 | #define PIC_SRIO_LINK1_IRQ PIC_INTR_TO_IRQ(PIC_IRT_SRIO_LINK1_INDEX) | ||
186 | #define PIC_SRIO_LINK2_IRQ PIC_INTR_TO_IRQ(PIC_IRT_SRIO_LINK2_INDEX) | ||
187 | #define PIC_SRIO_LINK3_IRQ PIC_INTR_TO_IRQ(PIC_IRT_SRIO_LINK3_INDEX) | ||
188 | #define PIC_PCIE_INT_IRQ PIC_INTR_TO_IRQ(PIC_IRT_PCIE_INT__INDEX) | ||
189 | #define PIC_PCIE_FATAL_IRQ PIC_INTR_TO_IRQ(PIC_IRT_PCIE_FATAL_INDEX) | ||
190 | #define PIC_GPIO_B_IRQ PIC_INTR_TO_IRQ(PIC_IRT_GPIO_B_INDEX) | ||
191 | #define PIC_USB_IRQ PIC_INTR_TO_IRQ(PIC_IRT_USB_INDEX) | ||
192 | #define PIC_IRT_LAST_IRQ PIC_USB_IRQ | ||
193 | /* end XLS */ | ||
194 | |||
195 | #ifndef __ASSEMBLY__ | ||
196 | static inline void pic_send_ipi(u32 ipi) | ||
197 | { | ||
198 | nlm_reg_t *mmio = netlogic_io_mmio(NETLOGIC_IO_PIC_OFFSET); | ||
199 | |||
200 | netlogic_write_reg(mmio, PIC_IPI, ipi); | ||
201 | } | ||
202 | |||
203 | static inline u32 pic_read_control(void) | ||
204 | { | ||
205 | nlm_reg_t *mmio = netlogic_io_mmio(NETLOGIC_IO_PIC_OFFSET); | ||
206 | |||
207 | return netlogic_read_reg(mmio, PIC_CTRL); | ||
208 | } | ||
209 | |||
210 | static inline void pic_write_control(u32 control) | ||
211 | { | ||
212 | nlm_reg_t *mmio = netlogic_io_mmio(NETLOGIC_IO_PIC_OFFSET); | ||
213 | |||
214 | netlogic_write_reg(mmio, PIC_CTRL, control); | ||
215 | } | ||
216 | |||
217 | static inline void pic_update_control(u32 control) | ||
218 | { | ||
219 | nlm_reg_t *mmio = netlogic_io_mmio(NETLOGIC_IO_PIC_OFFSET); | ||
220 | |||
221 | netlogic_write_reg(mmio, PIC_CTRL, | ||
222 | (control | netlogic_read_reg(mmio, PIC_CTRL))); | ||
223 | } | ||
224 | |||
225 | #define PIC_IRQ_IS_EDGE_TRIGGERED(irq) (((irq) >= PIC_TIMER_0_IRQ) && \ | ||
226 | ((irq) <= PIC_TIMER_7_IRQ)) | ||
227 | #define PIC_IRQ_IS_IRT(irq) (((irq) >= PIC_IRT_FIRST_IRQ) && \ | ||
228 | ((irq) <= PIC_IRT_LAST_IRQ)) | ||
229 | #endif | ||
230 | |||
231 | #endif /* _ASM_NLM_XLR_PIC_H */ | ||
diff --git a/arch/mips/include/asm/netlogic/xlr/xlr.h b/arch/mips/include/asm/netlogic/xlr/xlr.h new file mode 100644 index 000000000000..3e6372692a04 --- /dev/null +++ b/arch/mips/include/asm/netlogic/xlr/xlr.h | |||
@@ -0,0 +1,75 @@ | |||
1 | /* | ||
2 | * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights | ||
3 | * reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the NetLogic | ||
9 | * license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or without | ||
12 | * modification, are permitted provided that the following conditions | ||
13 | * are met: | ||
14 | * | ||
15 | * 1. Redistributions of source code must retain the above copyright | ||
16 | * notice, this list of conditions and the following disclaimer. | ||
17 | * 2. Redistributions in binary form must reproduce the above copyright | ||
18 | * notice, this list of conditions and the following disclaimer in | ||
19 | * the documentation and/or other materials provided with the | ||
20 | * distribution. | ||
21 | * | ||
22 | * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR | ||
23 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | ||
24 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
25 | * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE | ||
26 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
27 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
28 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | ||
29 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, | ||
30 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE | ||
31 | * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN | ||
32 | * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
33 | */ | ||
34 | |||
35 | #ifndef _ASM_NLM_XLR_H | ||
36 | #define _ASM_NLM_XLR_H | ||
37 | |||
38 | /* Platform UART functions */ | ||
39 | struct uart_port; | ||
40 | unsigned int nlm_xlr_uart_in(struct uart_port *, int); | ||
41 | void nlm_xlr_uart_out(struct uart_port *, int, int); | ||
42 | |||
43 | /* SMP support functions */ | ||
44 | struct irq_desc; | ||
45 | void nlm_smp_function_ipi_handler(unsigned int irq, struct irq_desc *desc); | ||
46 | void nlm_smp_resched_ipi_handler(unsigned int irq, struct irq_desc *desc); | ||
47 | int nlm_wakeup_secondary_cpus(u32 wakeup_mask); | ||
48 | void nlm_smp_irq_init(void); | ||
49 | void nlm_boot_smp_nmi(void); | ||
50 | void prom_pre_boot_secondary_cpus(void); | ||
51 | |||
52 | extern struct plat_smp_ops nlm_smp_ops; | ||
53 | extern unsigned long nlm_common_ebase; | ||
54 | |||
55 | /* XLS B silicon "Rook" */ | ||
56 | static inline unsigned int nlm_chip_is_xls_b(void) | ||
57 | { | ||
58 | uint32_t prid = read_c0_prid(); | ||
59 | |||
60 | return ((prid & 0xf000) == 0x4000); | ||
61 | } | ||
62 | |||
63 | /* | ||
64 | * XLR chip types | ||
65 | */ | ||
66 | /* The XLS product line has chip versions 0x[48c]? */ | ||
67 | static inline unsigned int nlm_chip_is_xls(void) | ||
68 | { | ||
69 | uint32_t prid = read_c0_prid(); | ||
70 | |||
71 | return ((prid & 0xf000) == 0x8000 || (prid & 0xf000) == 0x4000 || | ||
72 | (prid & 0xf000) == 0xc000); | ||
73 | } | ||
74 | |||
75 | #endif /* _ASM_NLM_XLR_H */ | ||
diff --git a/arch/mips/include/asm/ptrace.h b/arch/mips/include/asm/ptrace.h index 9f1b8dba2c81..de39b1f343ea 100644 --- a/arch/mips/include/asm/ptrace.h +++ b/arch/mips/include/asm/ptrace.h | |||
@@ -141,7 +141,8 @@ extern int ptrace_set_watch_regs(struct task_struct *child, | |||
141 | #define instruction_pointer(regs) ((regs)->cp0_epc) | 141 | #define instruction_pointer(regs) ((regs)->cp0_epc) |
142 | #define profile_pc(regs) instruction_pointer(regs) | 142 | #define profile_pc(regs) instruction_pointer(regs) |
143 | 143 | ||
144 | extern asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit); | 144 | extern asmlinkage void syscall_trace_enter(struct pt_regs *regs); |
145 | extern asmlinkage void syscall_trace_leave(struct pt_regs *regs); | ||
145 | 146 | ||
146 | extern NORET_TYPE void die(const char *, struct pt_regs *) ATTRIB_NORET; | 147 | extern NORET_TYPE void die(const char *, struct pt_regs *) ATTRIB_NORET; |
147 | 148 | ||
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h index d71160de4d10..97f8bf6639e7 100644 --- a/arch/mips/include/asm/thread_info.h +++ b/arch/mips/include/asm/thread_info.h | |||
@@ -149,6 +149,9 @@ register struct thread_info *__current_thread_info __asm__("$28"); | |||
149 | #define _TIF_FPUBOUND (1<<TIF_FPUBOUND) | 149 | #define _TIF_FPUBOUND (1<<TIF_FPUBOUND) |
150 | #define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH) | 150 | #define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH) |
151 | 151 | ||
152 | /* work to do in syscall_trace_leave() */ | ||
153 | #define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT) | ||
154 | |||
152 | /* work to do on interrupt/exception return */ | 155 | /* work to do on interrupt/exception return */ |
153 | #define _TIF_WORK_MASK (0x0000ffef & \ | 156 | #define _TIF_WORK_MASK (0x0000ffef & \ |
154 | ~(_TIF_SECCOMP | _TIF_SYSCALL_AUDIT)) | 157 | ~(_TIF_SECCOMP | _TIF_SYSCALL_AUDIT)) |
diff --git a/arch/mips/jazz/jazzdma.c b/arch/mips/jazz/jazzdma.c index 9ce9f64cb76f..2d8e447cb828 100644 --- a/arch/mips/jazz/jazzdma.c +++ b/arch/mips/jazz/jazzdma.c | |||
@@ -211,7 +211,7 @@ EXPORT_SYMBOL(vdma_free); | |||
211 | */ | 211 | */ |
212 | int vdma_remap(unsigned long laddr, unsigned long paddr, unsigned long size) | 212 | int vdma_remap(unsigned long laddr, unsigned long paddr, unsigned long size) |
213 | { | 213 | { |
214 | int first, pages, npages; | 214 | int first, pages; |
215 | 215 | ||
216 | if (laddr > 0xffffff) { | 216 | if (laddr > 0xffffff) { |
217 | if (vdma_debug) | 217 | if (vdma_debug) |
@@ -228,8 +228,7 @@ int vdma_remap(unsigned long laddr, unsigned long paddr, unsigned long size) | |||
228 | return -EINVAL; /* invalid physical address */ | 228 | return -EINVAL; /* invalid physical address */ |
229 | } | 229 | } |
230 | 230 | ||
231 | npages = pages = | 231 | pages = (((paddr & (VDMA_PAGESIZE - 1)) + size) >> 12) + 1; |
232 | (((paddr & (VDMA_PAGESIZE - 1)) + size) >> 12) + 1; | ||
233 | first = laddr >> 12; | 232 | first = laddr >> 12; |
234 | if (vdma_debug) | 233 | if (vdma_debug) |
235 | printk("vdma_remap: first=%x, pages=%x\n", first, pages); | 234 | printk("vdma_remap: first=%x, pages=%x\n", first, pages); |
diff --git a/arch/mips/jz4740/dma.c b/arch/mips/jz4740/dma.c index 5ebe75a68350..d7feb898692c 100644 --- a/arch/mips/jz4740/dma.c +++ b/arch/mips/jz4740/dma.c | |||
@@ -242,9 +242,7 @@ EXPORT_SYMBOL_GPL(jz4740_dma_get_residue); | |||
242 | 242 | ||
243 | static void jz4740_dma_chan_irq(struct jz4740_dma_chan *dma) | 243 | static void jz4740_dma_chan_irq(struct jz4740_dma_chan *dma) |
244 | { | 244 | { |
245 | uint32_t status; | 245 | (void) jz4740_dma_read(JZ_REG_DMA_STATUS_CTRL(dma->id)); |
246 | |||
247 | status = jz4740_dma_read(JZ_REG_DMA_STATUS_CTRL(dma->id)); | ||
248 | 246 | ||
249 | jz4740_dma_write_mask(JZ_REG_DMA_STATUS_CTRL(dma->id), 0, | 247 | jz4740_dma_write_mask(JZ_REG_DMA_STATUS_CTRL(dma->id), 0, |
250 | JZ_DMA_STATUS_CTRL_ENABLE | JZ_DMA_STATUS_CTRL_TRANSFER_DONE); | 248 | JZ_DMA_STATUS_CTRL_ENABLE | JZ_DMA_STATUS_CTRL_TRANSFER_DONE); |
diff --git a/arch/mips/jz4740/setup.c b/arch/mips/jz4740/setup.c index 6a9e14dab91e..d97cfbf882f5 100644 --- a/arch/mips/jz4740/setup.c +++ b/arch/mips/jz4740/setup.c | |||
@@ -1,5 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de> | 2 | * Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de> |
3 | * Copyright (C) 2011, Maarten ter Huurne <maarten@treewalker.org> | ||
3 | * JZ4740 setup code | 4 | * JZ4740 setup code |
4 | * | 5 | * |
5 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
@@ -14,13 +15,44 @@ | |||
14 | */ | 15 | */ |
15 | 16 | ||
16 | #include <linux/init.h> | 17 | #include <linux/init.h> |
18 | #include <linux/io.h> | ||
17 | #include <linux/kernel.h> | 19 | #include <linux/kernel.h> |
18 | 20 | ||
21 | #include <asm/bootinfo.h> | ||
22 | |||
23 | #include <asm/mach-jz4740/base.h> | ||
24 | |||
19 | #include "reset.h" | 25 | #include "reset.h" |
20 | 26 | ||
27 | |||
28 | #define JZ4740_EMC_SDRAM_CTRL 0x80 | ||
29 | |||
30 | |||
31 | static void __init jz4740_detect_mem(void) | ||
32 | { | ||
33 | void __iomem *jz_emc_base; | ||
34 | u32 ctrl, bus, bank, rows, cols; | ||
35 | phys_t size; | ||
36 | |||
37 | jz_emc_base = ioremap(JZ4740_EMC_BASE_ADDR, 0x100); | ||
38 | ctrl = readl(jz_emc_base + JZ4740_EMC_SDRAM_CTRL); | ||
39 | bus = 2 - ((ctrl >> 31) & 1); | ||
40 | bank = 1 + ((ctrl >> 19) & 1); | ||
41 | cols = 8 + ((ctrl >> 26) & 7); | ||
42 | rows = 11 + ((ctrl >> 20) & 3); | ||
43 | printk(KERN_DEBUG | ||
44 | "SDRAM preconfigured: bus:%u bank:%u rows:%u cols:%u\n", | ||
45 | bus, bank, rows, cols); | ||
46 | iounmap(jz_emc_base); | ||
47 | |||
48 | size = 1 << (bus + bank + cols + rows); | ||
49 | add_memory_region(0, size, BOOT_MEM_RAM); | ||
50 | } | ||
51 | |||
21 | void __init plat_mem_setup(void) | 52 | void __init plat_mem_setup(void) |
22 | { | 53 | { |
23 | jz4740_reset_init(); | 54 | jz4740_reset_init(); |
55 | jz4740_detect_mem(); | ||
24 | } | 56 | } |
25 | 57 | ||
26 | const char *get_system_type(void) | 58 | const char *get_system_type(void) |
diff --git a/arch/mips/jz4740/time.c b/arch/mips/jz4740/time.c index fe01678d94fd..eaa853a54af6 100644 --- a/arch/mips/jz4740/time.c +++ b/arch/mips/jz4740/time.c | |||
@@ -89,7 +89,7 @@ static int jz4740_clockevent_set_next(unsigned long evt, | |||
89 | 89 | ||
90 | static struct clock_event_device jz4740_clockevent = { | 90 | static struct clock_event_device jz4740_clockevent = { |
91 | .name = "jz4740-timer", | 91 | .name = "jz4740-timer", |
92 | .features = CLOCK_EVT_FEAT_PERIODIC, | 92 | .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, |
93 | .set_next_event = jz4740_clockevent_set_next, | 93 | .set_next_event = jz4740_clockevent_set_next, |
94 | .set_mode = jz4740_clockevent_set_mode, | 94 | .set_mode = jz4740_clockevent_set_mode, |
95 | .rating = 200, | 95 | .rating = 200, |
diff --git a/arch/mips/jz4740/timer.c b/arch/mips/jz4740/timer.c index b2c015129055..654d5c3900b6 100644 --- a/arch/mips/jz4740/timer.c +++ b/arch/mips/jz4740/timer.c | |||
@@ -27,11 +27,13 @@ void jz4740_timer_enable_watchdog(void) | |||
27 | { | 27 | { |
28 | writel(BIT(16), jz4740_timer_base + JZ_REG_TIMER_STOP_CLEAR); | 28 | writel(BIT(16), jz4740_timer_base + JZ_REG_TIMER_STOP_CLEAR); |
29 | } | 29 | } |
30 | EXPORT_SYMBOL_GPL(jz4740_timer_enable_watchdog); | ||
30 | 31 | ||
31 | void jz4740_timer_disable_watchdog(void) | 32 | void jz4740_timer_disable_watchdog(void) |
32 | { | 33 | { |
33 | writel(BIT(16), jz4740_timer_base + JZ_REG_TIMER_STOP_SET); | 34 | writel(BIT(16), jz4740_timer_base + JZ_REG_TIMER_STOP_SET); |
34 | } | 35 | } |
36 | EXPORT_SYMBOL_GPL(jz4740_timer_disable_watchdog); | ||
35 | 37 | ||
36 | void __init jz4740_timer_init(void) | 38 | void __init jz4740_timer_init(void) |
37 | { | 39 | { |
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index cedee2bcbd18..83bba332bbfc 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile | |||
@@ -52,6 +52,7 @@ obj-$(CONFIG_CPU_TX39XX) += r2300_fpu.o r2300_switch.o | |||
52 | obj-$(CONFIG_CPU_TX49XX) += r4k_fpu.o r4k_switch.o | 52 | obj-$(CONFIG_CPU_TX49XX) += r4k_fpu.o r4k_switch.o |
53 | obj-$(CONFIG_CPU_VR41XX) += r4k_fpu.o r4k_switch.o | 53 | obj-$(CONFIG_CPU_VR41XX) += r4k_fpu.o r4k_switch.o |
54 | obj-$(CONFIG_CPU_CAVIUM_OCTEON) += octeon_switch.o | 54 | obj-$(CONFIG_CPU_CAVIUM_OCTEON) += octeon_switch.o |
55 | obj-$(CONFIG_CPU_XLR) += r4k_fpu.o r4k_switch.o | ||
55 | 56 | ||
56 | obj-$(CONFIG_SMP) += smp.o | 57 | obj-$(CONFIG_SMP) += smp.o |
57 | obj-$(CONFIG_SMP_UP) += smp-up.o | 58 | obj-$(CONFIG_SMP_UP) += smp-up.o |
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index f65d4c8c65a6..bb133d10b145 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c | |||
@@ -291,6 +291,12 @@ static inline int cpu_has_confreg(void) | |||
291 | #endif | 291 | #endif |
292 | } | 292 | } |
293 | 293 | ||
294 | static inline void set_elf_platform(int cpu, const char *plat) | ||
295 | { | ||
296 | if (cpu == 0) | ||
297 | __elf_platform = plat; | ||
298 | } | ||
299 | |||
294 | /* | 300 | /* |
295 | * Get the FPU Implementation/Revision. | 301 | * Get the FPU Implementation/Revision. |
296 | */ | 302 | */ |
@@ -614,6 +620,16 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) | |||
614 | case PRID_IMP_LOONGSON2: | 620 | case PRID_IMP_LOONGSON2: |
615 | c->cputype = CPU_LOONGSON2; | 621 | c->cputype = CPU_LOONGSON2; |
616 | __cpu_name[cpu] = "ICT Loongson-2"; | 622 | __cpu_name[cpu] = "ICT Loongson-2"; |
623 | |||
624 | switch (c->processor_id & PRID_REV_MASK) { | ||
625 | case PRID_REV_LOONGSON2E: | ||
626 | set_elf_platform(cpu, "loongson2e"); | ||
627 | break; | ||
628 | case PRID_REV_LOONGSON2F: | ||
629 | set_elf_platform(cpu, "loongson2f"); | ||
630 | break; | ||
631 | } | ||
632 | |||
617 | c->isa_level = MIPS_CPU_ISA_III; | 633 | c->isa_level = MIPS_CPU_ISA_III; |
618 | c->options = R4K_OPTS | | 634 | c->options = R4K_OPTS | |
619 | MIPS_CPU_FPU | MIPS_CPU_LLSC | | 635 | MIPS_CPU_FPU | MIPS_CPU_LLSC | |
@@ -911,12 +927,14 @@ static inline void cpu_probe_broadcom(struct cpuinfo_mips *c, unsigned int cpu) | |||
911 | case PRID_IMP_BMIPS32_REV8: | 927 | case PRID_IMP_BMIPS32_REV8: |
912 | c->cputype = CPU_BMIPS32; | 928 | c->cputype = CPU_BMIPS32; |
913 | __cpu_name[cpu] = "Broadcom BMIPS32"; | 929 | __cpu_name[cpu] = "Broadcom BMIPS32"; |
930 | set_elf_platform(cpu, "bmips32"); | ||
914 | break; | 931 | break; |
915 | case PRID_IMP_BMIPS3300: | 932 | case PRID_IMP_BMIPS3300: |
916 | case PRID_IMP_BMIPS3300_ALT: | 933 | case PRID_IMP_BMIPS3300_ALT: |
917 | case PRID_IMP_BMIPS3300_BUG: | 934 | case PRID_IMP_BMIPS3300_BUG: |
918 | c->cputype = CPU_BMIPS3300; | 935 | c->cputype = CPU_BMIPS3300; |
919 | __cpu_name[cpu] = "Broadcom BMIPS3300"; | 936 | __cpu_name[cpu] = "Broadcom BMIPS3300"; |
937 | set_elf_platform(cpu, "bmips3300"); | ||
920 | break; | 938 | break; |
921 | case PRID_IMP_BMIPS43XX: { | 939 | case PRID_IMP_BMIPS43XX: { |
922 | int rev = c->processor_id & 0xff; | 940 | int rev = c->processor_id & 0xff; |
@@ -925,15 +943,18 @@ static inline void cpu_probe_broadcom(struct cpuinfo_mips *c, unsigned int cpu) | |||
925 | rev <= PRID_REV_BMIPS4380_HI) { | 943 | rev <= PRID_REV_BMIPS4380_HI) { |
926 | c->cputype = CPU_BMIPS4380; | 944 | c->cputype = CPU_BMIPS4380; |
927 | __cpu_name[cpu] = "Broadcom BMIPS4380"; | 945 | __cpu_name[cpu] = "Broadcom BMIPS4380"; |
946 | set_elf_platform(cpu, "bmips4380"); | ||
928 | } else { | 947 | } else { |
929 | c->cputype = CPU_BMIPS4350; | 948 | c->cputype = CPU_BMIPS4350; |
930 | __cpu_name[cpu] = "Broadcom BMIPS4350"; | 949 | __cpu_name[cpu] = "Broadcom BMIPS4350"; |
950 | set_elf_platform(cpu, "bmips4350"); | ||
931 | } | 951 | } |
932 | break; | 952 | break; |
933 | } | 953 | } |
934 | case PRID_IMP_BMIPS5000: | 954 | case PRID_IMP_BMIPS5000: |
935 | c->cputype = CPU_BMIPS5000; | 955 | c->cputype = CPU_BMIPS5000; |
936 | __cpu_name[cpu] = "Broadcom BMIPS5000"; | 956 | __cpu_name[cpu] = "Broadcom BMIPS5000"; |
957 | set_elf_platform(cpu, "bmips5000"); | ||
937 | c->options |= MIPS_CPU_ULRI; | 958 | c->options |= MIPS_CPU_ULRI; |
938 | break; | 959 | break; |
939 | } | 960 | } |
@@ -956,14 +977,12 @@ static inline void cpu_probe_cavium(struct cpuinfo_mips *c, unsigned int cpu) | |||
956 | c->cputype = CPU_CAVIUM_OCTEON_PLUS; | 977 | c->cputype = CPU_CAVIUM_OCTEON_PLUS; |
957 | __cpu_name[cpu] = "Cavium Octeon+"; | 978 | __cpu_name[cpu] = "Cavium Octeon+"; |
958 | platform: | 979 | platform: |
959 | if (cpu == 0) | 980 | set_elf_platform(cpu, "octeon"); |
960 | __elf_platform = "octeon"; | ||
961 | break; | 981 | break; |
962 | case PRID_IMP_CAVIUM_CN63XX: | 982 | case PRID_IMP_CAVIUM_CN63XX: |
963 | c->cputype = CPU_CAVIUM_OCTEON2; | 983 | c->cputype = CPU_CAVIUM_OCTEON2; |
964 | __cpu_name[cpu] = "Cavium Octeon II"; | 984 | __cpu_name[cpu] = "Cavium Octeon II"; |
965 | if (cpu == 0) | 985 | set_elf_platform(cpu, "octeon2"); |
966 | __elf_platform = "octeon2"; | ||
967 | break; | 986 | break; |
968 | default: | 987 | default: |
969 | printk(KERN_INFO "Unknown Octeon chip!\n"); | 988 | printk(KERN_INFO "Unknown Octeon chip!\n"); |
@@ -988,6 +1007,59 @@ static inline void cpu_probe_ingenic(struct cpuinfo_mips *c, unsigned int cpu) | |||
988 | } | 1007 | } |
989 | } | 1008 | } |
990 | 1009 | ||
1010 | static inline void cpu_probe_netlogic(struct cpuinfo_mips *c, int cpu) | ||
1011 | { | ||
1012 | decode_configs(c); | ||
1013 | |||
1014 | c->options = (MIPS_CPU_TLB | | ||
1015 | MIPS_CPU_4KEX | | ||
1016 | MIPS_CPU_COUNTER | | ||
1017 | MIPS_CPU_DIVEC | | ||
1018 | MIPS_CPU_WATCH | | ||
1019 | MIPS_CPU_EJTAG | | ||
1020 | MIPS_CPU_LLSC); | ||
1021 | |||
1022 | switch (c->processor_id & 0xff00) { | ||
1023 | case PRID_IMP_NETLOGIC_XLR732: | ||
1024 | case PRID_IMP_NETLOGIC_XLR716: | ||
1025 | case PRID_IMP_NETLOGIC_XLR532: | ||
1026 | case PRID_IMP_NETLOGIC_XLR308: | ||
1027 | case PRID_IMP_NETLOGIC_XLR532C: | ||
1028 | case PRID_IMP_NETLOGIC_XLR516C: | ||
1029 | case PRID_IMP_NETLOGIC_XLR508C: | ||
1030 | case PRID_IMP_NETLOGIC_XLR308C: | ||
1031 | c->cputype = CPU_XLR; | ||
1032 | __cpu_name[cpu] = "Netlogic XLR"; | ||
1033 | break; | ||
1034 | |||
1035 | case PRID_IMP_NETLOGIC_XLS608: | ||
1036 | case PRID_IMP_NETLOGIC_XLS408: | ||
1037 | case PRID_IMP_NETLOGIC_XLS404: | ||
1038 | case PRID_IMP_NETLOGIC_XLS208: | ||
1039 | case PRID_IMP_NETLOGIC_XLS204: | ||
1040 | case PRID_IMP_NETLOGIC_XLS108: | ||
1041 | case PRID_IMP_NETLOGIC_XLS104: | ||
1042 | case PRID_IMP_NETLOGIC_XLS616B: | ||
1043 | case PRID_IMP_NETLOGIC_XLS608B: | ||
1044 | case PRID_IMP_NETLOGIC_XLS416B: | ||
1045 | case PRID_IMP_NETLOGIC_XLS412B: | ||
1046 | case PRID_IMP_NETLOGIC_XLS408B: | ||
1047 | case PRID_IMP_NETLOGIC_XLS404B: | ||
1048 | c->cputype = CPU_XLR; | ||
1049 | __cpu_name[cpu] = "Netlogic XLS"; | ||
1050 | break; | ||
1051 | |||
1052 | default: | ||
1053 | printk(KERN_INFO "Unknown Netlogic chip id [%02x]!\n", | ||
1054 | c->processor_id); | ||
1055 | c->cputype = CPU_XLR; | ||
1056 | break; | ||
1057 | } | ||
1058 | |||
1059 | c->isa_level = MIPS_CPU_ISA_M64R1; | ||
1060 | c->tlbsize = ((read_c0_config1() >> 25) & 0x3f) + 1; | ||
1061 | } | ||
1062 | |||
991 | #ifdef CONFIG_64BIT | 1063 | #ifdef CONFIG_64BIT |
992 | /* For use by uaccess.h */ | 1064 | /* For use by uaccess.h */ |
993 | u64 __ua_limit; | 1065 | u64 __ua_limit; |
@@ -1035,6 +1107,9 @@ __cpuinit void cpu_probe(void) | |||
1035 | case PRID_COMP_INGENIC: | 1107 | case PRID_COMP_INGENIC: |
1036 | cpu_probe_ingenic(c, cpu); | 1108 | cpu_probe_ingenic(c, cpu); |
1037 | break; | 1109 | break; |
1110 | case PRID_COMP_NETLOGIC: | ||
1111 | cpu_probe_netlogic(c, cpu); | ||
1112 | break; | ||
1038 | } | 1113 | } |
1039 | 1114 | ||
1040 | BUG_ON(!__cpu_name[cpu]); | 1115 | BUG_ON(!__cpu_name[cpu]); |
diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S index ffa331029e08..37acfa036d44 100644 --- a/arch/mips/kernel/entry.S +++ b/arch/mips/kernel/entry.S | |||
@@ -167,14 +167,13 @@ work_notifysig: # deal with pending signals and | |||
167 | FEXPORT(syscall_exit_work_partial) | 167 | FEXPORT(syscall_exit_work_partial) |
168 | SAVE_STATIC | 168 | SAVE_STATIC |
169 | syscall_exit_work: | 169 | syscall_exit_work: |
170 | li t0, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | 170 | li t0, _TIF_WORK_SYSCALL_EXIT |
171 | and t0, a2 # a2 is preloaded with TI_FLAGS | 171 | and t0, a2 # a2 is preloaded with TI_FLAGS |
172 | beqz t0, work_pending # trace bit set? | 172 | beqz t0, work_pending # trace bit set? |
173 | local_irq_enable # could let do_syscall_trace() | 173 | local_irq_enable # could let syscall_trace_leave() |
174 | # call schedule() instead | 174 | # call schedule() instead |
175 | move a0, sp | 175 | move a0, sp |
176 | li a1, 1 | 176 | jal syscall_trace_leave |
177 | jal do_syscall_trace | ||
178 | b resume_userspace | 177 | b resume_userspace |
179 | 178 | ||
180 | #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT) | 179 | #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT) |
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c index 94ca2b018af7..feb8021a305f 100644 --- a/arch/mips/kernel/ftrace.c +++ b/arch/mips/kernel/ftrace.c | |||
@@ -23,6 +23,7 @@ | |||
23 | 23 | ||
24 | #define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */ | 24 | #define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */ |
25 | #define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */ | 25 | #define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */ |
26 | #define JUMP_RANGE_MASK ((1UL << 28) - 1) | ||
26 | 27 | ||
27 | #define INSN_NOP 0x00000000 /* nop */ | 28 | #define INSN_NOP 0x00000000 /* nop */ |
28 | #define INSN_JAL(addr) \ | 29 | #define INSN_JAL(addr) \ |
@@ -44,12 +45,12 @@ static inline void ftrace_dyn_arch_init_insns(void) | |||
44 | 45 | ||
45 | /* jal (ftrace_caller + 8), jump over the first two instruction */ | 46 | /* jal (ftrace_caller + 8), jump over the first two instruction */ |
46 | buf = (u32 *)&insn_jal_ftrace_caller; | 47 | buf = (u32 *)&insn_jal_ftrace_caller; |
47 | uasm_i_jal(&buf, (FTRACE_ADDR + 8)); | 48 | uasm_i_jal(&buf, (FTRACE_ADDR + 8) & JUMP_RANGE_MASK); |
48 | 49 | ||
49 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 50 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
50 | /* j ftrace_graph_caller */ | 51 | /* j ftrace_graph_caller */ |
51 | buf = (u32 *)&insn_j_ftrace_graph_caller; | 52 | buf = (u32 *)&insn_j_ftrace_graph_caller; |
52 | uasm_i_j(&buf, (unsigned long)ftrace_graph_caller); | 53 | uasm_i_j(&buf, (unsigned long)ftrace_graph_caller & JUMP_RANGE_MASK); |
53 | #endif | 54 | #endif |
54 | } | 55 | } |
55 | 56 | ||
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index d21c388c0116..4e6ea1ffad46 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c | |||
@@ -533,15 +533,10 @@ static inline int audit_arch(void) | |||
533 | * Notification of system call entry/exit | 533 | * Notification of system call entry/exit |
534 | * - triggered by current->work.syscall_trace | 534 | * - triggered by current->work.syscall_trace |
535 | */ | 535 | */ |
536 | asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit) | 536 | asmlinkage void syscall_trace_enter(struct pt_regs *regs) |
537 | { | 537 | { |
538 | /* do the secure computing check first */ | 538 | /* do the secure computing check first */ |
539 | if (!entryexit) | 539 | secure_computing(regs->regs[2]); |
540 | secure_computing(regs->regs[2]); | ||
541 | |||
542 | if (unlikely(current->audit_context) && entryexit) | ||
543 | audit_syscall_exit(AUDITSC_RESULT(regs->regs[2]), | ||
544 | regs->regs[2]); | ||
545 | 540 | ||
546 | if (!(current->ptrace & PT_PTRACED)) | 541 | if (!(current->ptrace & PT_PTRACED)) |
547 | goto out; | 542 | goto out; |
@@ -565,8 +560,40 @@ asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit) | |||
565 | } | 560 | } |
566 | 561 | ||
567 | out: | 562 | out: |
568 | if (unlikely(current->audit_context) && !entryexit) | 563 | if (unlikely(current->audit_context)) |
569 | audit_syscall_entry(audit_arch(), regs->regs[2], | 564 | audit_syscall_entry(audit_arch(), regs->regs[2], |
570 | regs->regs[4], regs->regs[5], | 565 | regs->regs[4], regs->regs[5], |
571 | regs->regs[6], regs->regs[7]); | 566 | regs->regs[6], regs->regs[7]); |
572 | } | 567 | } |
568 | |||
569 | /* | ||
570 | * Notification of system call entry/exit | ||
571 | * - triggered by current->work.syscall_trace | ||
572 | */ | ||
573 | asmlinkage void syscall_trace_leave(struct pt_regs *regs) | ||
574 | { | ||
575 | if (unlikely(current->audit_context)) | ||
576 | audit_syscall_exit(AUDITSC_RESULT(regs->regs[7]), | ||
577 | -regs->regs[2]); | ||
578 | |||
579 | if (!(current->ptrace & PT_PTRACED)) | ||
580 | return; | ||
581 | |||
582 | if (!test_thread_flag(TIF_SYSCALL_TRACE)) | ||
583 | return; | ||
584 | |||
585 | /* The 0x80 provides a way for the tracing parent to distinguish | ||
586 | between a syscall stop and SIGTRAP delivery */ | ||
587 | ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) ? | ||
588 | 0x80 : 0)); | ||
589 | |||
590 | /* | ||
591 | * this isn't the same as continuing with a signal, but it will do | ||
592 | * for normal use. strace only continues with a signal if the | ||
593 | * stopping signal is not SIGTRAP. -brl | ||
594 | */ | ||
595 | if (current->exit_code) { | ||
596 | send_sig(current->exit_code, current, 1); | ||
597 | current->exit_code = 0; | ||
598 | } | ||
599 | } | ||
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index 7f5468b38d4c..7a8e1dd7f6f2 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S | |||
@@ -88,8 +88,7 @@ syscall_trace_entry: | |||
88 | SAVE_STATIC | 88 | SAVE_STATIC |
89 | move s0, t2 | 89 | move s0, t2 |
90 | move a0, sp | 90 | move a0, sp |
91 | li a1, 0 | 91 | jal syscall_trace_enter |
92 | jal do_syscall_trace | ||
93 | 92 | ||
94 | move t0, s0 | 93 | move t0, s0 |
95 | RESTORE_STATIC | 94 | RESTORE_STATIC |
@@ -565,7 +564,7 @@ einval: li v0, -ENOSYS | |||
565 | sys sys_ioprio_get 2 /* 4315 */ | 564 | sys sys_ioprio_get 2 /* 4315 */ |
566 | sys sys_utimensat 4 | 565 | sys sys_utimensat 4 |
567 | sys sys_signalfd 3 | 566 | sys sys_signalfd 3 |
568 | sys sys_ni_syscall 0 | 567 | sys sys_ni_syscall 0 /* was timerfd */ |
569 | sys sys_eventfd 1 | 568 | sys sys_eventfd 1 |
570 | sys sys_fallocate 6 /* 4320 */ | 569 | sys sys_fallocate 6 /* 4320 */ |
571 | sys sys_timerfd_create 2 | 570 | sys sys_timerfd_create 2 |
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S index a2e1fcbc41dc..2d31c83224f9 100644 --- a/arch/mips/kernel/scall64-64.S +++ b/arch/mips/kernel/scall64-64.S | |||
@@ -91,8 +91,7 @@ syscall_trace_entry: | |||
91 | SAVE_STATIC | 91 | SAVE_STATIC |
92 | move s0, t2 | 92 | move s0, t2 |
93 | move a0, sp | 93 | move a0, sp |
94 | li a1, 0 | 94 | jal syscall_trace_enter |
95 | jal do_syscall_trace | ||
96 | 95 | ||
97 | move t0, s0 | 96 | move t0, s0 |
98 | RESTORE_STATIC | 97 | RESTORE_STATIC |
@@ -404,7 +403,7 @@ sys_call_table: | |||
404 | PTR sys_ioprio_get | 403 | PTR sys_ioprio_get |
405 | PTR sys_utimensat /* 5275 */ | 404 | PTR sys_utimensat /* 5275 */ |
406 | PTR sys_signalfd | 405 | PTR sys_signalfd |
407 | PTR sys_ni_syscall | 406 | PTR sys_ni_syscall /* was timerfd */ |
408 | PTR sys_eventfd | 407 | PTR sys_eventfd |
409 | PTR sys_fallocate | 408 | PTR sys_fallocate |
410 | PTR sys_timerfd_create /* 5280 */ | 409 | PTR sys_timerfd_create /* 5280 */ |
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index b2c7624995b8..38a0503b9a4a 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S | |||
@@ -89,8 +89,7 @@ n32_syscall_trace_entry: | |||
89 | SAVE_STATIC | 89 | SAVE_STATIC |
90 | move s0, t2 | 90 | move s0, t2 |
91 | move a0, sp | 91 | move a0, sp |
92 | li a1, 0 | 92 | jal syscall_trace_enter |
93 | jal do_syscall_trace | ||
94 | 93 | ||
95 | move t0, s0 | 94 | move t0, s0 |
96 | RESTORE_STATIC | 95 | RESTORE_STATIC |
@@ -403,7 +402,7 @@ EXPORT(sysn32_call_table) | |||
403 | PTR sys_ioprio_get | 402 | PTR sys_ioprio_get |
404 | PTR compat_sys_utimensat | 403 | PTR compat_sys_utimensat |
405 | PTR compat_sys_signalfd /* 6280 */ | 404 | PTR compat_sys_signalfd /* 6280 */ |
406 | PTR sys_ni_syscall | 405 | PTR sys_ni_syscall /* was timerfd */ |
407 | PTR sys_eventfd | 406 | PTR sys_eventfd |
408 | PTR sys_fallocate | 407 | PTR sys_fallocate |
409 | PTR sys_timerfd_create | 408 | PTR sys_timerfd_create |
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index 049a9c8c49a0..91ea5e4041dd 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S | |||
@@ -123,8 +123,7 @@ trace_a_syscall: | |||
123 | 123 | ||
124 | move s0, t2 # Save syscall pointer | 124 | move s0, t2 # Save syscall pointer |
125 | move a0, sp | 125 | move a0, sp |
126 | li a1, 0 | 126 | jal syscall_trace_enter |
127 | jal do_syscall_trace | ||
128 | 127 | ||
129 | move t0, s0 | 128 | move t0, s0 |
130 | RESTORE_STATIC | 129 | RESTORE_STATIC |
@@ -522,7 +521,7 @@ sys_call_table: | |||
522 | PTR sys_ioprio_get /* 4315 */ | 521 | PTR sys_ioprio_get /* 4315 */ |
523 | PTR compat_sys_utimensat | 522 | PTR compat_sys_utimensat |
524 | PTR compat_sys_signalfd | 523 | PTR compat_sys_signalfd |
525 | PTR sys_ni_syscall | 524 | PTR sys_ni_syscall /* was timerfd */ |
526 | PTR sys_eventfd | 525 | PTR sys_eventfd |
527 | PTR sys32_fallocate /* 4320 */ | 526 | PTR sys32_fallocate /* 4320 */ |
528 | PTR sys_timerfd_create | 527 | PTR sys_timerfd_create |
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c index 58beabf50b3c..d02765708ddb 100644 --- a/arch/mips/kernel/syscall.c +++ b/arch/mips/kernel/syscall.c | |||
@@ -10,12 +10,9 @@ | |||
10 | #include <linux/capability.h> | 10 | #include <linux/capability.h> |
11 | #include <linux/errno.h> | 11 | #include <linux/errno.h> |
12 | #include <linux/linkage.h> | 12 | #include <linux/linkage.h> |
13 | #include <linux/mm.h> | ||
14 | #include <linux/fs.h> | 13 | #include <linux/fs.h> |
15 | #include <linux/smp.h> | 14 | #include <linux/smp.h> |
16 | #include <linux/mman.h> | ||
17 | #include <linux/ptrace.h> | 15 | #include <linux/ptrace.h> |
18 | #include <linux/sched.h> | ||
19 | #include <linux/string.h> | 16 | #include <linux/string.h> |
20 | #include <linux/syscalls.h> | 17 | #include <linux/syscalls.h> |
21 | #include <linux/file.h> | 18 | #include <linux/file.h> |
@@ -25,11 +22,9 @@ | |||
25 | #include <linux/msg.h> | 22 | #include <linux/msg.h> |
26 | #include <linux/shm.h> | 23 | #include <linux/shm.h> |
27 | #include <linux/compiler.h> | 24 | #include <linux/compiler.h> |
28 | #include <linux/module.h> | ||
29 | #include <linux/ipc.h> | 25 | #include <linux/ipc.h> |
30 | #include <linux/uaccess.h> | 26 | #include <linux/uaccess.h> |
31 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
32 | #include <linux/random.h> | ||
33 | #include <linux/elf.h> | 28 | #include <linux/elf.h> |
34 | 29 | ||
35 | #include <asm/asm.h> | 30 | #include <asm/asm.h> |
@@ -66,121 +61,6 @@ out: | |||
66 | return res; | 61 | return res; |
67 | } | 62 | } |
68 | 63 | ||
69 | unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */ | ||
70 | |||
71 | EXPORT_SYMBOL(shm_align_mask); | ||
72 | |||
73 | #define COLOUR_ALIGN(addr,pgoff) \ | ||
74 | ((((addr) + shm_align_mask) & ~shm_align_mask) + \ | ||
75 | (((pgoff) << PAGE_SHIFT) & shm_align_mask)) | ||
76 | |||
77 | unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, | ||
78 | unsigned long len, unsigned long pgoff, unsigned long flags) | ||
79 | { | ||
80 | struct vm_area_struct * vmm; | ||
81 | int do_color_align; | ||
82 | unsigned long task_size; | ||
83 | |||
84 | #ifdef CONFIG_32BIT | ||
85 | task_size = TASK_SIZE; | ||
86 | #else /* Must be CONFIG_64BIT*/ | ||
87 | task_size = test_thread_flag(TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE; | ||
88 | #endif | ||
89 | |||
90 | if (len > task_size) | ||
91 | return -ENOMEM; | ||
92 | |||
93 | if (flags & MAP_FIXED) { | ||
94 | /* Even MAP_FIXED mappings must reside within task_size. */ | ||
95 | if (task_size - len < addr) | ||
96 | return -EINVAL; | ||
97 | |||
98 | /* | ||
99 | * We do not accept a shared mapping if it would violate | ||
100 | * cache aliasing constraints. | ||
101 | */ | ||
102 | if ((flags & MAP_SHARED) && | ||
103 | ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask)) | ||
104 | return -EINVAL; | ||
105 | return addr; | ||
106 | } | ||
107 | |||
108 | do_color_align = 0; | ||
109 | if (filp || (flags & MAP_SHARED)) | ||
110 | do_color_align = 1; | ||
111 | if (addr) { | ||
112 | if (do_color_align) | ||
113 | addr = COLOUR_ALIGN(addr, pgoff); | ||
114 | else | ||
115 | addr = PAGE_ALIGN(addr); | ||
116 | vmm = find_vma(current->mm, addr); | ||
117 | if (task_size - len >= addr && | ||
118 | (!vmm || addr + len <= vmm->vm_start)) | ||
119 | return addr; | ||
120 | } | ||
121 | addr = current->mm->mmap_base; | ||
122 | if (do_color_align) | ||
123 | addr = COLOUR_ALIGN(addr, pgoff); | ||
124 | else | ||
125 | addr = PAGE_ALIGN(addr); | ||
126 | |||
127 | for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) { | ||
128 | /* At this point: (!vmm || addr < vmm->vm_end). */ | ||
129 | if (task_size - len < addr) | ||
130 | return -ENOMEM; | ||
131 | if (!vmm || addr + len <= vmm->vm_start) | ||
132 | return addr; | ||
133 | addr = vmm->vm_end; | ||
134 | if (do_color_align) | ||
135 | addr = COLOUR_ALIGN(addr, pgoff); | ||
136 | } | ||
137 | } | ||
138 | |||
139 | void arch_pick_mmap_layout(struct mm_struct *mm) | ||
140 | { | ||
141 | unsigned long random_factor = 0UL; | ||
142 | |||
143 | if (current->flags & PF_RANDOMIZE) { | ||
144 | random_factor = get_random_int(); | ||
145 | random_factor = random_factor << PAGE_SHIFT; | ||
146 | if (TASK_IS_32BIT_ADDR) | ||
147 | random_factor &= 0xfffffful; | ||
148 | else | ||
149 | random_factor &= 0xffffffful; | ||
150 | } | ||
151 | |||
152 | mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; | ||
153 | mm->get_unmapped_area = arch_get_unmapped_area; | ||
154 | mm->unmap_area = arch_unmap_area; | ||
155 | } | ||
156 | |||
157 | static inline unsigned long brk_rnd(void) | ||
158 | { | ||
159 | unsigned long rnd = get_random_int(); | ||
160 | |||
161 | rnd = rnd << PAGE_SHIFT; | ||
162 | /* 8MB for 32bit, 256MB for 64bit */ | ||
163 | if (TASK_IS_32BIT_ADDR) | ||
164 | rnd = rnd & 0x7ffffful; | ||
165 | else | ||
166 | rnd = rnd & 0xffffffful; | ||
167 | |||
168 | return rnd; | ||
169 | } | ||
170 | |||
171 | unsigned long arch_randomize_brk(struct mm_struct *mm) | ||
172 | { | ||
173 | unsigned long base = mm->brk; | ||
174 | unsigned long ret; | ||
175 | |||
176 | ret = PAGE_ALIGN(base + brk_rnd()); | ||
177 | |||
178 | if (ret < mm->brk) | ||
179 | return mm->brk; | ||
180 | |||
181 | return ret; | ||
182 | } | ||
183 | |||
184 | SYSCALL_DEFINE6(mips_mmap, unsigned long, addr, unsigned long, len, | 64 | SYSCALL_DEFINE6(mips_mmap, unsigned long, addr, unsigned long, len, |
185 | unsigned long, prot, unsigned long, flags, unsigned long, | 65 | unsigned long, prot, unsigned long, flags, unsigned long, |
186 | fd, off_t, offset) | 66 | fd, off_t, offset) |
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 71350f7f2d88..e9b3af27d844 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c | |||
@@ -374,7 +374,8 @@ void __noreturn die(const char *str, struct pt_regs *regs) | |||
374 | unsigned long dvpret = dvpe(); | 374 | unsigned long dvpret = dvpe(); |
375 | #endif /* CONFIG_MIPS_MT_SMTC */ | 375 | #endif /* CONFIG_MIPS_MT_SMTC */ |
376 | 376 | ||
377 | notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs), SIGSEGV); | 377 | if (notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs), SIGSEGV) == NOTIFY_STOP) |
378 | sig = 0; | ||
378 | 379 | ||
379 | console_verbose(); | 380 | console_verbose(); |
380 | spin_lock_irq(&die_lock); | 381 | spin_lock_irq(&die_lock); |
@@ -383,9 +384,6 @@ void __noreturn die(const char *str, struct pt_regs *regs) | |||
383 | mips_mt_regdump(dvpret); | 384 | mips_mt_regdump(dvpret); |
384 | #endif /* CONFIG_MIPS_MT_SMTC */ | 385 | #endif /* CONFIG_MIPS_MT_SMTC */ |
385 | 386 | ||
386 | if (notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs), SIGSEGV) == NOTIFY_STOP) | ||
387 | sig = 0; | ||
388 | |||
389 | printk("%s[#%d]:\n", str, ++die_counter); | 387 | printk("%s[#%d]:\n", str, ++die_counter); |
390 | show_registers(regs); | 388 | show_registers(regs); |
391 | add_taint(TAINT_DIE); | 389 | add_taint(TAINT_DIE); |
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S index 832afbb87588..cd2ca544454b 100644 --- a/arch/mips/kernel/vmlinux.lds.S +++ b/arch/mips/kernel/vmlinux.lds.S | |||
@@ -68,12 +68,14 @@ SECTIONS | |||
68 | RODATA | 68 | RODATA |
69 | 69 | ||
70 | /* writeable */ | 70 | /* writeable */ |
71 | _sdata = .; /* Start of data section */ | ||
71 | .data : { /* Data */ | 72 | .data : { /* Data */ |
72 | . = . + DATAOFFSET; /* for CONFIG_MAPPED_KERNEL */ | 73 | . = . + DATAOFFSET; /* for CONFIG_MAPPED_KERNEL */ |
73 | 74 | ||
74 | INIT_TASK_DATA(PAGE_SIZE) | 75 | INIT_TASK_DATA(PAGE_SIZE) |
75 | NOSAVE_DATA | 76 | NOSAVE_DATA |
76 | CACHELINE_ALIGNED_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT) | 77 | CACHELINE_ALIGNED_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT) |
78 | READ_MOSTLY_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT) | ||
77 | DATA_DATA | 79 | DATA_DATA |
78 | CONSTRUCTORS | 80 | CONSTRUCTORS |
79 | } | 81 | } |
diff --git a/arch/mips/lantiq/Kconfig b/arch/mips/lantiq/Kconfig new file mode 100644 index 000000000000..3fccf2104513 --- /dev/null +++ b/arch/mips/lantiq/Kconfig | |||
@@ -0,0 +1,23 @@ | |||
1 | if LANTIQ | ||
2 | |||
3 | config SOC_TYPE_XWAY | ||
4 | bool | ||
5 | default n | ||
6 | |||
7 | choice | ||
8 | prompt "SoC Type" | ||
9 | default SOC_XWAY | ||
10 | |||
11 | config SOC_AMAZON_SE | ||
12 | bool "Amazon SE" | ||
13 | select SOC_TYPE_XWAY | ||
14 | |||
15 | config SOC_XWAY | ||
16 | bool "XWAY" | ||
17 | select SOC_TYPE_XWAY | ||
18 | select HW_HAS_PCI | ||
19 | endchoice | ||
20 | |||
21 | source "arch/mips/lantiq/xway/Kconfig" | ||
22 | |||
23 | endif | ||
diff --git a/arch/mips/lantiq/Makefile b/arch/mips/lantiq/Makefile new file mode 100644 index 000000000000..e5dae0e24b00 --- /dev/null +++ b/arch/mips/lantiq/Makefile | |||
@@ -0,0 +1,11 @@ | |||
1 | # Copyright (C) 2010 John Crispin <blogic@openwrt.org> | ||
2 | # | ||
3 | # This program is free software; you can redistribute it and/or modify it | ||
4 | # under the terms of the GNU General Public License version 2 as published | ||
5 | # by the Free Software Foundation. | ||
6 | |||
7 | obj-y := irq.o setup.o clk.o prom.o devices.o | ||
8 | |||
9 | obj-$(CONFIG_EARLY_PRINTK) += early_printk.o | ||
10 | |||
11 | obj-$(CONFIG_SOC_TYPE_XWAY) += xway/ | ||
diff --git a/arch/mips/lantiq/Platform b/arch/mips/lantiq/Platform new file mode 100644 index 000000000000..f3dff05722de --- /dev/null +++ b/arch/mips/lantiq/Platform | |||
@@ -0,0 +1,8 @@ | |||
1 | # | ||
2 | # Lantiq | ||
3 | # | ||
4 | |||
5 | platform-$(CONFIG_LANTIQ) += lantiq/ | ||
6 | cflags-$(CONFIG_LANTIQ) += -I$(srctree)/arch/mips/include/asm/mach-lantiq | ||
7 | load-$(CONFIG_LANTIQ) = 0xffffffff80002000 | ||
8 | cflags-$(CONFIG_SOC_TYPE_XWAY) += -I$(srctree)/arch/mips/include/asm/mach-lantiq/xway | ||
diff --git a/arch/mips/lantiq/clk.c b/arch/mips/lantiq/clk.c new file mode 100644 index 000000000000..94560899d13e --- /dev/null +++ b/arch/mips/lantiq/clk.c | |||
@@ -0,0 +1,140 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * Copyright (C) 2010 Thomas Langer <thomas.langer@lantiq.com> | ||
7 | * Copyright (C) 2010 John Crispin <blogic@openwrt.org> | ||
8 | */ | ||
9 | #include <linux/io.h> | ||
10 | #include <linux/module.h> | ||
11 | #include <linux/init.h> | ||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/types.h> | ||
14 | #include <linux/clk.h> | ||
15 | #include <linux/err.h> | ||
16 | #include <linux/list.h> | ||
17 | |||
18 | #include <asm/time.h> | ||
19 | #include <asm/irq.h> | ||
20 | #include <asm/div64.h> | ||
21 | |||
22 | #include <lantiq_soc.h> | ||
23 | |||
24 | #include "clk.h" | ||
25 | |||
26 | struct clk { | ||
27 | const char *name; | ||
28 | unsigned long rate; | ||
29 | unsigned long (*get_rate) (void); | ||
30 | }; | ||
31 | |||
32 | static struct clk *cpu_clk; | ||
33 | static int cpu_clk_cnt; | ||
34 | |||
35 | /* lantiq socs have 3 static clocks */ | ||
36 | static struct clk cpu_clk_generic[] = { | ||
37 | { | ||
38 | .name = "cpu", | ||
39 | .get_rate = ltq_get_cpu_hz, | ||
40 | }, { | ||
41 | .name = "fpi", | ||
42 | .get_rate = ltq_get_fpi_hz, | ||
43 | }, { | ||
44 | .name = "io", | ||
45 | .get_rate = ltq_get_io_region_clock, | ||
46 | }, | ||
47 | }; | ||
48 | |||
49 | static struct resource ltq_cgu_resource = { | ||
50 | .name = "cgu", | ||
51 | .start = LTQ_CGU_BASE_ADDR, | ||
52 | .end = LTQ_CGU_BASE_ADDR + LTQ_CGU_SIZE - 1, | ||
53 | .flags = IORESOURCE_MEM, | ||
54 | }; | ||
55 | |||
56 | /* remapped clock register range */ | ||
57 | void __iomem *ltq_cgu_membase; | ||
58 | |||
59 | void clk_init(void) | ||
60 | { | ||
61 | cpu_clk = cpu_clk_generic; | ||
62 | cpu_clk_cnt = ARRAY_SIZE(cpu_clk_generic); | ||
63 | } | ||
64 | |||
65 | static inline int clk_good(struct clk *clk) | ||
66 | { | ||
67 | return clk && !IS_ERR(clk); | ||
68 | } | ||
69 | |||
70 | unsigned long clk_get_rate(struct clk *clk) | ||
71 | { | ||
72 | if (unlikely(!clk_good(clk))) | ||
73 | return 0; | ||
74 | |||
75 | if (clk->rate != 0) | ||
76 | return clk->rate; | ||
77 | |||
78 | if (clk->get_rate != NULL) | ||
79 | return clk->get_rate(); | ||
80 | |||
81 | return 0; | ||
82 | } | ||
83 | EXPORT_SYMBOL(clk_get_rate); | ||
84 | |||
85 | struct clk *clk_get(struct device *dev, const char *id) | ||
86 | { | ||
87 | int i; | ||
88 | |||
89 | for (i = 0; i < cpu_clk_cnt; i++) | ||
90 | if (!strcmp(id, cpu_clk[i].name)) | ||
91 | return &cpu_clk[i]; | ||
92 | BUG(); | ||
93 | return ERR_PTR(-ENOENT); | ||
94 | } | ||
95 | EXPORT_SYMBOL(clk_get); | ||
96 | |||
97 | void clk_put(struct clk *clk) | ||
98 | { | ||
99 | /* not used */ | ||
100 | } | ||
101 | EXPORT_SYMBOL(clk_put); | ||
102 | |||
103 | static inline u32 ltq_get_counter_resolution(void) | ||
104 | { | ||
105 | u32 res; | ||
106 | |||
107 | __asm__ __volatile__( | ||
108 | ".set push\n" | ||
109 | ".set mips32r2\n" | ||
110 | "rdhwr %0, $3\n" | ||
111 | ".set pop\n" | ||
112 | : "=&r" (res) | ||
113 | : /* no input */ | ||
114 | : "memory"); | ||
115 | |||
116 | return res; | ||
117 | } | ||
118 | |||
119 | void __init plat_time_init(void) | ||
120 | { | ||
121 | struct clk *clk; | ||
122 | |||
123 | if (insert_resource(&iomem_resource, <q_cgu_resource) < 0) | ||
124 | panic("Failed to insert cgu memory\n"); | ||
125 | |||
126 | if (request_mem_region(ltq_cgu_resource.start, | ||
127 | resource_size(<q_cgu_resource), "cgu") < 0) | ||
128 | panic("Failed to request cgu memory\n"); | ||
129 | |||
130 | ltq_cgu_membase = ioremap_nocache(ltq_cgu_resource.start, | ||
131 | resource_size(<q_cgu_resource)); | ||
132 | if (!ltq_cgu_membase) { | ||
133 | pr_err("Failed to remap cgu memory\n"); | ||
134 | unreachable(); | ||
135 | } | ||
136 | clk = clk_get(0, "cpu"); | ||
137 | mips_hpt_frequency = clk_get_rate(clk) / ltq_get_counter_resolution(); | ||
138 | write_c0_compare(read_c0_count()); | ||
139 | clk_put(clk); | ||
140 | } | ||
diff --git a/arch/mips/lantiq/clk.h b/arch/mips/lantiq/clk.h new file mode 100644 index 000000000000..3328925f2c3f --- /dev/null +++ b/arch/mips/lantiq/clk.h | |||
@@ -0,0 +1,18 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * Copyright (C) 2010 John Crispin <blogic@openwrt.org> | ||
7 | */ | ||
8 | |||
9 | #ifndef _LTQ_CLK_H__ | ||
10 | #define _LTQ_CLK_H__ | ||
11 | |||
12 | extern void clk_init(void); | ||
13 | |||
14 | extern unsigned long ltq_get_cpu_hz(void); | ||
15 | extern unsigned long ltq_get_fpi_hz(void); | ||
16 | extern unsigned long ltq_get_io_region_clock(void); | ||
17 | |||
18 | #endif | ||
diff --git a/arch/mips/lantiq/devices.c b/arch/mips/lantiq/devices.c new file mode 100644 index 000000000000..7b82c34cb169 --- /dev/null +++ b/arch/mips/lantiq/devices.c | |||
@@ -0,0 +1,122 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * Copyright (C) 2010 John Crispin <blogic@openwrt.org> | ||
7 | */ | ||
8 | |||
9 | #include <linux/init.h> | ||
10 | #include <linux/module.h> | ||
11 | #include <linux/types.h> | ||
12 | #include <linux/string.h> | ||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/reboot.h> | ||
15 | #include <linux/platform_device.h> | ||
16 | #include <linux/leds.h> | ||
17 | #include <linux/etherdevice.h> | ||
18 | #include <linux/reboot.h> | ||
19 | #include <linux/time.h> | ||
20 | #include <linux/io.h> | ||
21 | #include <linux/gpio.h> | ||
22 | #include <linux/leds.h> | ||
23 | |||
24 | #include <asm/bootinfo.h> | ||
25 | #include <asm/irq.h> | ||
26 | |||
27 | #include <lantiq_soc.h> | ||
28 | |||
29 | #include "devices.h" | ||
30 | |||
31 | /* nor flash */ | ||
32 | static struct resource ltq_nor_resource = { | ||
33 | .name = "nor", | ||
34 | .start = LTQ_FLASH_START, | ||
35 | .end = LTQ_FLASH_START + LTQ_FLASH_MAX - 1, | ||
36 | .flags = IORESOURCE_MEM, | ||
37 | }; | ||
38 | |||
39 | static struct platform_device ltq_nor = { | ||
40 | .name = "ltq_nor", | ||
41 | .resource = <q_nor_resource, | ||
42 | .num_resources = 1, | ||
43 | }; | ||
44 | |||
45 | void __init ltq_register_nor(struct physmap_flash_data *data) | ||
46 | { | ||
47 | ltq_nor.dev.platform_data = data; | ||
48 | platform_device_register(<q_nor); | ||
49 | } | ||
50 | |||
51 | /* watchdog */ | ||
52 | static struct resource ltq_wdt_resource = { | ||
53 | .name = "watchdog", | ||
54 | .start = LTQ_WDT_BASE_ADDR, | ||
55 | .end = LTQ_WDT_BASE_ADDR + LTQ_WDT_SIZE - 1, | ||
56 | .flags = IORESOURCE_MEM, | ||
57 | }; | ||
58 | |||
59 | void __init ltq_register_wdt(void) | ||
60 | { | ||
61 | platform_device_register_simple("ltq_wdt", 0, <q_wdt_resource, 1); | ||
62 | } | ||
63 | |||
64 | /* asc ports */ | ||
65 | static struct resource ltq_asc0_resources[] = { | ||
66 | { | ||
67 | .name = "asc0", | ||
68 | .start = LTQ_ASC0_BASE_ADDR, | ||
69 | .end = LTQ_ASC0_BASE_ADDR + LTQ_ASC_SIZE - 1, | ||
70 | .flags = IORESOURCE_MEM, | ||
71 | }, | ||
72 | IRQ_RES(tx, LTQ_ASC_TIR(0)), | ||
73 | IRQ_RES(rx, LTQ_ASC_RIR(0)), | ||
74 | IRQ_RES(err, LTQ_ASC_EIR(0)), | ||
75 | }; | ||
76 | |||
77 | static struct resource ltq_asc1_resources[] = { | ||
78 | { | ||
79 | .name = "asc1", | ||
80 | .start = LTQ_ASC1_BASE_ADDR, | ||
81 | .end = LTQ_ASC1_BASE_ADDR + LTQ_ASC_SIZE - 1, | ||
82 | .flags = IORESOURCE_MEM, | ||
83 | }, | ||
84 | IRQ_RES(tx, LTQ_ASC_TIR(1)), | ||
85 | IRQ_RES(rx, LTQ_ASC_RIR(1)), | ||
86 | IRQ_RES(err, LTQ_ASC_EIR(1)), | ||
87 | }; | ||
88 | |||
89 | void __init ltq_register_asc(int port) | ||
90 | { | ||
91 | switch (port) { | ||
92 | case 0: | ||
93 | platform_device_register_simple("ltq_asc", 0, | ||
94 | ltq_asc0_resources, ARRAY_SIZE(ltq_asc0_resources)); | ||
95 | break; | ||
96 | case 1: | ||
97 | platform_device_register_simple("ltq_asc", 1, | ||
98 | ltq_asc1_resources, ARRAY_SIZE(ltq_asc1_resources)); | ||
99 | break; | ||
100 | default: | ||
101 | break; | ||
102 | } | ||
103 | } | ||
104 | |||
105 | #ifdef CONFIG_PCI | ||
106 | /* pci */ | ||
107 | static struct platform_device ltq_pci = { | ||
108 | .name = "ltq_pci", | ||
109 | .num_resources = 0, | ||
110 | }; | ||
111 | |||
112 | void __init ltq_register_pci(struct ltq_pci_data *data) | ||
113 | { | ||
114 | ltq_pci.dev.platform_data = data; | ||
115 | platform_device_register(<q_pci); | ||
116 | } | ||
117 | #else | ||
118 | void __init ltq_register_pci(struct ltq_pci_data *data) | ||
119 | { | ||
120 | pr_err("kernel is compiled without PCI support\n"); | ||
121 | } | ||
122 | #endif | ||
diff --git a/arch/mips/lantiq/devices.h b/arch/mips/lantiq/devices.h new file mode 100644 index 000000000000..2947bb19a528 --- /dev/null +++ b/arch/mips/lantiq/devices.h | |||
@@ -0,0 +1,23 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * Copyright (C) 2010 John Crispin <blogic@openwrt.org> | ||
7 | */ | ||
8 | |||
9 | #ifndef _LTQ_DEVICES_H__ | ||
10 | #define _LTQ_DEVICES_H__ | ||
11 | |||
12 | #include <lantiq_platform.h> | ||
13 | #include <linux/mtd/physmap.h> | ||
14 | |||
15 | #define IRQ_RES(resname, irq) \ | ||
16 | {.name = #resname, .start = (irq), .flags = IORESOURCE_IRQ} | ||
17 | |||
18 | extern void ltq_register_nor(struct physmap_flash_data *data); | ||
19 | extern void ltq_register_wdt(void); | ||
20 | extern void ltq_register_asc(int port); | ||
21 | extern void ltq_register_pci(struct ltq_pci_data *data); | ||
22 | |||
23 | #endif | ||
diff --git a/arch/mips/lantiq/early_printk.c b/arch/mips/lantiq/early_printk.c new file mode 100644 index 000000000000..972e05f87631 --- /dev/null +++ b/arch/mips/lantiq/early_printk.c | |||
@@ -0,0 +1,33 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * Copyright (C) 2010 John Crispin <blogic@openwrt.org> | ||
7 | */ | ||
8 | |||
9 | #include <linux/init.h> | ||
10 | #include <linux/cpu.h> | ||
11 | |||
12 | #include <lantiq.h> | ||
13 | #include <lantiq_soc.h> | ||
14 | |||
15 | /* no ioremap possible at this early stage, lets use KSEG1 instead */ | ||
16 | #define LTQ_ASC_BASE KSEG1ADDR(LTQ_ASC1_BASE_ADDR) | ||
17 | #define ASC_BUF 1024 | ||
18 | #define LTQ_ASC_FSTAT ((u32 *)(LTQ_ASC_BASE + 0x0048)) | ||
19 | #define LTQ_ASC_TBUF ((u32 *)(LTQ_ASC_BASE + 0x0020)) | ||
20 | #define TXMASK 0x3F00 | ||
21 | #define TXOFFSET 8 | ||
22 | |||
23 | void prom_putchar(char c) | ||
24 | { | ||
25 | unsigned long flags; | ||
26 | |||
27 | local_irq_save(flags); | ||
28 | do { } while ((ltq_r32(LTQ_ASC_FSTAT) & TXMASK) >> TXOFFSET); | ||
29 | if (c == '\n') | ||
30 | ltq_w32('\r', LTQ_ASC_TBUF); | ||
31 | ltq_w32(c, LTQ_ASC_TBUF); | ||
32 | local_irq_restore(flags); | ||
33 | } | ||
diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c new file mode 100644 index 000000000000..fc89795cafdb --- /dev/null +++ b/arch/mips/lantiq/irq.c | |||
@@ -0,0 +1,326 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * Copyright (C) 2010 John Crispin <blogic@openwrt.org> | ||
7 | * Copyright (C) 2010 Thomas Langer <thomas.langer@lantiq.com> | ||
8 | */ | ||
9 | |||
10 | #include <linux/interrupt.h> | ||
11 | #include <linux/ioport.h> | ||
12 | |||
13 | #include <asm/bootinfo.h> | ||
14 | #include <asm/irq_cpu.h> | ||
15 | |||
16 | #include <lantiq_soc.h> | ||
17 | #include <irq.h> | ||
18 | |||
19 | /* register definitions */ | ||
20 | #define LTQ_ICU_IM0_ISR 0x0000 | ||
21 | #define LTQ_ICU_IM0_IER 0x0008 | ||
22 | #define LTQ_ICU_IM0_IOSR 0x0010 | ||
23 | #define LTQ_ICU_IM0_IRSR 0x0018 | ||
24 | #define LTQ_ICU_IM0_IMR 0x0020 | ||
25 | #define LTQ_ICU_IM1_ISR 0x0028 | ||
26 | #define LTQ_ICU_OFFSET (LTQ_ICU_IM1_ISR - LTQ_ICU_IM0_ISR) | ||
27 | |||
28 | #define LTQ_EIU_EXIN_C 0x0000 | ||
29 | #define LTQ_EIU_EXIN_INIC 0x0004 | ||
30 | #define LTQ_EIU_EXIN_INEN 0x000C | ||
31 | |||
32 | /* irq numbers used by the external interrupt unit (EIU) */ | ||
33 | #define LTQ_EIU_IR0 (INT_NUM_IM4_IRL0 + 30) | ||
34 | #define LTQ_EIU_IR1 (INT_NUM_IM3_IRL0 + 31) | ||
35 | #define LTQ_EIU_IR2 (INT_NUM_IM1_IRL0 + 26) | ||
36 | #define LTQ_EIU_IR3 INT_NUM_IM1_IRL0 | ||
37 | #define LTQ_EIU_IR4 (INT_NUM_IM1_IRL0 + 1) | ||
38 | #define LTQ_EIU_IR5 (INT_NUM_IM1_IRL0 + 2) | ||
39 | #define LTQ_EIU_IR6 (INT_NUM_IM2_IRL0 + 30) | ||
40 | |||
41 | #define MAX_EIU 6 | ||
42 | |||
43 | /* irqs generated by device attached to the EBU need to be acked in | ||
44 | * a special manner | ||
45 | */ | ||
46 | #define LTQ_ICU_EBU_IRQ 22 | ||
47 | |||
48 | #define ltq_icu_w32(x, y) ltq_w32((x), ltq_icu_membase + (y)) | ||
49 | #define ltq_icu_r32(x) ltq_r32(ltq_icu_membase + (x)) | ||
50 | |||
51 | #define ltq_eiu_w32(x, y) ltq_w32((x), ltq_eiu_membase + (y)) | ||
52 | #define ltq_eiu_r32(x) ltq_r32(ltq_eiu_membase + (x)) | ||
53 | |||
54 | static unsigned short ltq_eiu_irq[MAX_EIU] = { | ||
55 | LTQ_EIU_IR0, | ||
56 | LTQ_EIU_IR1, | ||
57 | LTQ_EIU_IR2, | ||
58 | LTQ_EIU_IR3, | ||
59 | LTQ_EIU_IR4, | ||
60 | LTQ_EIU_IR5, | ||
61 | }; | ||
62 | |||
63 | static struct resource ltq_icu_resource = { | ||
64 | .name = "icu", | ||
65 | .start = LTQ_ICU_BASE_ADDR, | ||
66 | .end = LTQ_ICU_BASE_ADDR + LTQ_ICU_SIZE - 1, | ||
67 | .flags = IORESOURCE_MEM, | ||
68 | }; | ||
69 | |||
70 | static struct resource ltq_eiu_resource = { | ||
71 | .name = "eiu", | ||
72 | .start = LTQ_EIU_BASE_ADDR, | ||
73 | .end = LTQ_EIU_BASE_ADDR + LTQ_ICU_SIZE - 1, | ||
74 | .flags = IORESOURCE_MEM, | ||
75 | }; | ||
76 | |||
77 | static void __iomem *ltq_icu_membase; | ||
78 | static void __iomem *ltq_eiu_membase; | ||
79 | |||
80 | void ltq_disable_irq(struct irq_data *d) | ||
81 | { | ||
82 | u32 ier = LTQ_ICU_IM0_IER; | ||
83 | int irq_nr = d->irq - INT_NUM_IRQ0; | ||
84 | |||
85 | ier += LTQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET); | ||
86 | irq_nr %= INT_NUM_IM_OFFSET; | ||
87 | ltq_icu_w32(ltq_icu_r32(ier) & ~(1 << irq_nr), ier); | ||
88 | } | ||
89 | |||
90 | void ltq_mask_and_ack_irq(struct irq_data *d) | ||
91 | { | ||
92 | u32 ier = LTQ_ICU_IM0_IER; | ||
93 | u32 isr = LTQ_ICU_IM0_ISR; | ||
94 | int irq_nr = d->irq - INT_NUM_IRQ0; | ||
95 | |||
96 | ier += LTQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET); | ||
97 | isr += LTQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET); | ||
98 | irq_nr %= INT_NUM_IM_OFFSET; | ||
99 | ltq_icu_w32(ltq_icu_r32(ier) & ~(1 << irq_nr), ier); | ||
100 | ltq_icu_w32((1 << irq_nr), isr); | ||
101 | } | ||
102 | |||
103 | static void ltq_ack_irq(struct irq_data *d) | ||
104 | { | ||
105 | u32 isr = LTQ_ICU_IM0_ISR; | ||
106 | int irq_nr = d->irq - INT_NUM_IRQ0; | ||
107 | |||
108 | isr += LTQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET); | ||
109 | irq_nr %= INT_NUM_IM_OFFSET; | ||
110 | ltq_icu_w32((1 << irq_nr), isr); | ||
111 | } | ||
112 | |||
113 | void ltq_enable_irq(struct irq_data *d) | ||
114 | { | ||
115 | u32 ier = LTQ_ICU_IM0_IER; | ||
116 | int irq_nr = d->irq - INT_NUM_IRQ0; | ||
117 | |||
118 | ier += LTQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET); | ||
119 | irq_nr %= INT_NUM_IM_OFFSET; | ||
120 | ltq_icu_w32(ltq_icu_r32(ier) | (1 << irq_nr), ier); | ||
121 | } | ||
122 | |||
123 | static unsigned int ltq_startup_eiu_irq(struct irq_data *d) | ||
124 | { | ||
125 | int i; | ||
126 | int irq_nr = d->irq - INT_NUM_IRQ0; | ||
127 | |||
128 | ltq_enable_irq(d); | ||
129 | for (i = 0; i < MAX_EIU; i++) { | ||
130 | if (irq_nr == ltq_eiu_irq[i]) { | ||
131 | /* low level - we should really handle set_type */ | ||
132 | ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_C) | | ||
133 | (0x6 << (i * 4)), LTQ_EIU_EXIN_C); | ||
134 | /* clear all pending */ | ||
135 | ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INIC) & ~(1 << i), | ||
136 | LTQ_EIU_EXIN_INIC); | ||
137 | /* enable */ | ||
138 | ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) | (1 << i), | ||
139 | LTQ_EIU_EXIN_INEN); | ||
140 | break; | ||
141 | } | ||
142 | } | ||
143 | |||
144 | return 0; | ||
145 | } | ||
146 | |||
147 | static void ltq_shutdown_eiu_irq(struct irq_data *d) | ||
148 | { | ||
149 | int i; | ||
150 | int irq_nr = d->irq - INT_NUM_IRQ0; | ||
151 | |||
152 | ltq_disable_irq(d); | ||
153 | for (i = 0; i < MAX_EIU; i++) { | ||
154 | if (irq_nr == ltq_eiu_irq[i]) { | ||
155 | /* disable */ | ||
156 | ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) & ~(1 << i), | ||
157 | LTQ_EIU_EXIN_INEN); | ||
158 | break; | ||
159 | } | ||
160 | } | ||
161 | } | ||
162 | |||
163 | static struct irq_chip ltq_irq_type = { | ||
164 | "icu", | ||
165 | .irq_enable = ltq_enable_irq, | ||
166 | .irq_disable = ltq_disable_irq, | ||
167 | .irq_unmask = ltq_enable_irq, | ||
168 | .irq_ack = ltq_ack_irq, | ||
169 | .irq_mask = ltq_disable_irq, | ||
170 | .irq_mask_ack = ltq_mask_and_ack_irq, | ||
171 | }; | ||
172 | |||
173 | static struct irq_chip ltq_eiu_type = { | ||
174 | "eiu", | ||
175 | .irq_startup = ltq_startup_eiu_irq, | ||
176 | .irq_shutdown = ltq_shutdown_eiu_irq, | ||
177 | .irq_enable = ltq_enable_irq, | ||
178 | .irq_disable = ltq_disable_irq, | ||
179 | .irq_unmask = ltq_enable_irq, | ||
180 | .irq_ack = ltq_ack_irq, | ||
181 | .irq_mask = ltq_disable_irq, | ||
182 | .irq_mask_ack = ltq_mask_and_ack_irq, | ||
183 | }; | ||
184 | |||
185 | static void ltq_hw_irqdispatch(int module) | ||
186 | { | ||
187 | u32 irq; | ||
188 | |||
189 | irq = ltq_icu_r32(LTQ_ICU_IM0_IOSR + (module * LTQ_ICU_OFFSET)); | ||
190 | if (irq == 0) | ||
191 | return; | ||
192 | |||
193 | /* silicon bug causes only the msb set to 1 to be valid. all | ||
194 | * other bits might be bogus | ||
195 | */ | ||
196 | irq = __fls(irq); | ||
197 | do_IRQ((int)irq + INT_NUM_IM0_IRL0 + (INT_NUM_IM_OFFSET * module)); | ||
198 | |||
199 | /* if this is a EBU irq, we need to ack it or get a deadlock */ | ||
200 | if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0)) | ||
201 | ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_ISTAT) | 0x10, | ||
202 | LTQ_EBU_PCC_ISTAT); | ||
203 | } | ||
204 | |||
205 | #define DEFINE_HWx_IRQDISPATCH(x) \ | ||
206 | static void ltq_hw ## x ## _irqdispatch(void) \ | ||
207 | { \ | ||
208 | ltq_hw_irqdispatch(x); \ | ||
209 | } | ||
210 | DEFINE_HWx_IRQDISPATCH(0) | ||
211 | DEFINE_HWx_IRQDISPATCH(1) | ||
212 | DEFINE_HWx_IRQDISPATCH(2) | ||
213 | DEFINE_HWx_IRQDISPATCH(3) | ||
214 | DEFINE_HWx_IRQDISPATCH(4) | ||
215 | |||
216 | static void ltq_hw5_irqdispatch(void) | ||
217 | { | ||
218 | do_IRQ(MIPS_CPU_TIMER_IRQ); | ||
219 | } | ||
220 | |||
221 | asmlinkage void plat_irq_dispatch(void) | ||
222 | { | ||
223 | unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM; | ||
224 | unsigned int i; | ||
225 | |||
226 | if (pending & CAUSEF_IP7) { | ||
227 | do_IRQ(MIPS_CPU_TIMER_IRQ); | ||
228 | goto out; | ||
229 | } else { | ||
230 | for (i = 0; i < 5; i++) { | ||
231 | if (pending & (CAUSEF_IP2 << i)) { | ||
232 | ltq_hw_irqdispatch(i); | ||
233 | goto out; | ||
234 | } | ||
235 | } | ||
236 | } | ||
237 | pr_alert("Spurious IRQ: CAUSE=0x%08x\n", read_c0_status()); | ||
238 | |||
239 | out: | ||
240 | return; | ||
241 | } | ||
242 | |||
243 | static struct irqaction cascade = { | ||
244 | .handler = no_action, | ||
245 | .flags = IRQF_DISABLED, | ||
246 | .name = "cascade", | ||
247 | }; | ||
248 | |||
249 | void __init arch_init_irq(void) | ||
250 | { | ||
251 | int i; | ||
252 | |||
253 | if (insert_resource(&iomem_resource, <q_icu_resource) < 0) | ||
254 | panic("Failed to insert icu memory\n"); | ||
255 | |||
256 | if (request_mem_region(ltq_icu_resource.start, | ||
257 | resource_size(<q_icu_resource), "icu") < 0) | ||
258 | panic("Failed to request icu memory\n"); | ||
259 | |||
260 | ltq_icu_membase = ioremap_nocache(ltq_icu_resource.start, | ||
261 | resource_size(<q_icu_resource)); | ||
262 | if (!ltq_icu_membase) | ||
263 | panic("Failed to remap icu memory\n"); | ||
264 | |||
265 | if (insert_resource(&iomem_resource, <q_eiu_resource) < 0) | ||
266 | panic("Failed to insert eiu memory\n"); | ||
267 | |||
268 | if (request_mem_region(ltq_eiu_resource.start, | ||
269 | resource_size(<q_eiu_resource), "eiu") < 0) | ||
270 | panic("Failed to request eiu memory\n"); | ||
271 | |||
272 | ltq_eiu_membase = ioremap_nocache(ltq_eiu_resource.start, | ||
273 | resource_size(<q_eiu_resource)); | ||
274 | if (!ltq_eiu_membase) | ||
275 | panic("Failed to remap eiu memory\n"); | ||
276 | |||
277 | /* make sure all irqs are turned off by default */ | ||
278 | for (i = 0; i < 5; i++) | ||
279 | ltq_icu_w32(0, LTQ_ICU_IM0_IER + (i * LTQ_ICU_OFFSET)); | ||
280 | |||
281 | /* clear all possibly pending interrupts */ | ||
282 | ltq_icu_w32(~0, LTQ_ICU_IM0_ISR + (i * LTQ_ICU_OFFSET)); | ||
283 | |||
284 | mips_cpu_irq_init(); | ||
285 | |||
286 | for (i = 2; i <= 6; i++) | ||
287 | setup_irq(i, &cascade); | ||
288 | |||
289 | if (cpu_has_vint) { | ||
290 | pr_info("Setting up vectored interrupts\n"); | ||
291 | set_vi_handler(2, ltq_hw0_irqdispatch); | ||
292 | set_vi_handler(3, ltq_hw1_irqdispatch); | ||
293 | set_vi_handler(4, ltq_hw2_irqdispatch); | ||
294 | set_vi_handler(5, ltq_hw3_irqdispatch); | ||
295 | set_vi_handler(6, ltq_hw4_irqdispatch); | ||
296 | set_vi_handler(7, ltq_hw5_irqdispatch); | ||
297 | } | ||
298 | |||
299 | for (i = INT_NUM_IRQ0; | ||
300 | i <= (INT_NUM_IRQ0 + (5 * INT_NUM_IM_OFFSET)); i++) | ||
301 | if ((i == LTQ_EIU_IR0) || (i == LTQ_EIU_IR1) || | ||
302 | (i == LTQ_EIU_IR2)) | ||
303 | irq_set_chip_and_handler(i, <q_eiu_type, | ||
304 | handle_level_irq); | ||
305 | /* EIU3-5 only exist on ar9 and vr9 */ | ||
306 | else if (((i == LTQ_EIU_IR3) || (i == LTQ_EIU_IR4) || | ||
307 | (i == LTQ_EIU_IR5)) && (ltq_is_ar9() || ltq_is_vr9())) | ||
308 | irq_set_chip_and_handler(i, <q_eiu_type, | ||
309 | handle_level_irq); | ||
310 | else | ||
311 | irq_set_chip_and_handler(i, <q_irq_type, | ||
312 | handle_level_irq); | ||
313 | |||
314 | #if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC) | ||
315 | set_c0_status(IE_IRQ0 | IE_IRQ1 | IE_IRQ2 | | ||
316 | IE_IRQ3 | IE_IRQ4 | IE_IRQ5); | ||
317 | #else | ||
318 | set_c0_status(IE_SW0 | IE_SW1 | IE_IRQ0 | IE_IRQ1 | | ||
319 | IE_IRQ2 | IE_IRQ3 | IE_IRQ4 | IE_IRQ5); | ||
320 | #endif | ||
321 | } | ||
322 | |||
323 | unsigned int __cpuinit get_c0_compare_int(void) | ||
324 | { | ||
325 | return CP0_LEGACY_COMPARE_IRQ; | ||
326 | } | ||
diff --git a/arch/mips/lantiq/machtypes.h b/arch/mips/lantiq/machtypes.h new file mode 100644 index 000000000000..7e01b8c484eb --- /dev/null +++ b/arch/mips/lantiq/machtypes.h | |||
@@ -0,0 +1,20 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * Copyright (C) 2010 John Crispin <blogic@openwrt.org> | ||
7 | */ | ||
8 | |||
9 | #ifndef _LANTIQ_MACH_H__ | ||
10 | #define _LANTIQ_MACH_H__ | ||
11 | |||
12 | #include <asm/mips_machine.h> | ||
13 | |||
14 | enum lantiq_mach_type { | ||
15 | LTQ_MACH_GENERIC = 0, | ||
16 | LTQ_MACH_EASY50712, /* Danube evaluation board */ | ||
17 | LTQ_MACH_EASY50601, /* Amazon SE evaluation board */ | ||
18 | }; | ||
19 | |||
20 | #endif | ||
diff --git a/arch/mips/lantiq/prom.c b/arch/mips/lantiq/prom.c new file mode 100644 index 000000000000..56ba007bf1e5 --- /dev/null +++ b/arch/mips/lantiq/prom.c | |||
@@ -0,0 +1,71 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * Copyright (C) 2010 John Crispin <blogic@openwrt.org> | ||
7 | */ | ||
8 | |||
9 | #include <linux/module.h> | ||
10 | #include <linux/clk.h> | ||
11 | #include <asm/bootinfo.h> | ||
12 | #include <asm/time.h> | ||
13 | |||
14 | #include <lantiq.h> | ||
15 | |||
16 | #include "prom.h" | ||
17 | #include "clk.h" | ||
18 | |||
19 | static struct ltq_soc_info soc_info; | ||
20 | |||
21 | unsigned int ltq_get_cpu_ver(void) | ||
22 | { | ||
23 | return soc_info.rev; | ||
24 | } | ||
25 | EXPORT_SYMBOL(ltq_get_cpu_ver); | ||
26 | |||
27 | unsigned int ltq_get_soc_type(void) | ||
28 | { | ||
29 | return soc_info.type; | ||
30 | } | ||
31 | EXPORT_SYMBOL(ltq_get_soc_type); | ||
32 | |||
33 | const char *get_system_type(void) | ||
34 | { | ||
35 | return soc_info.sys_type; | ||
36 | } | ||
37 | |||
38 | void prom_free_prom_memory(void) | ||
39 | { | ||
40 | } | ||
41 | |||
42 | static void __init prom_init_cmdline(void) | ||
43 | { | ||
44 | int argc = fw_arg0; | ||
45 | char **argv = (char **) KSEG1ADDR(fw_arg1); | ||
46 | int i; | ||
47 | |||
48 | for (i = 0; i < argc; i++) { | ||
49 | char *p = (char *) KSEG1ADDR(argv[i]); | ||
50 | |||
51 | if (p && *p) { | ||
52 | strlcat(arcs_cmdline, p, sizeof(arcs_cmdline)); | ||
53 | strlcat(arcs_cmdline, " ", sizeof(arcs_cmdline)); | ||
54 | } | ||
55 | } | ||
56 | } | ||
57 | |||
58 | void __init prom_init(void) | ||
59 | { | ||
60 | struct clk *clk; | ||
61 | |||
62 | ltq_soc_detect(&soc_info); | ||
63 | clk_init(); | ||
64 | clk = clk_get(0, "cpu"); | ||
65 | snprintf(soc_info.sys_type, LTQ_SYS_TYPE_LEN - 1, "%s rev1.%d", | ||
66 | soc_info.name, soc_info.rev); | ||
67 | clk_put(clk); | ||
68 | soc_info.sys_type[LTQ_SYS_TYPE_LEN - 1] = '\0'; | ||
69 | pr_info("SoC: %s\n", soc_info.sys_type); | ||
70 | prom_init_cmdline(); | ||
71 | } | ||
diff --git a/arch/mips/lantiq/prom.h b/arch/mips/lantiq/prom.h new file mode 100644 index 000000000000..b4229d94280f --- /dev/null +++ b/arch/mips/lantiq/prom.h | |||
@@ -0,0 +1,25 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * Copyright (C) 2010 John Crispin <blogic@openwrt.org> | ||
7 | */ | ||
8 | |||
9 | #ifndef _LTQ_PROM_H__ | ||
10 | #define _LTQ_PROM_H__ | ||
11 | |||
12 | #define LTQ_SYS_TYPE_LEN 0x100 | ||
13 | |||
14 | struct ltq_soc_info { | ||
15 | unsigned char *name; | ||
16 | unsigned int rev; | ||
17 | unsigned int partnum; | ||
18 | unsigned int type; | ||
19 | unsigned char sys_type[LTQ_SYS_TYPE_LEN]; | ||
20 | }; | ||
21 | |||
22 | extern void ltq_soc_detect(struct ltq_soc_info *i); | ||
23 | extern void ltq_soc_setup(void); | ||
24 | |||
25 | #endif | ||
diff --git a/arch/mips/lantiq/setup.c b/arch/mips/lantiq/setup.c new file mode 100644 index 000000000000..9b8af77ed0f9 --- /dev/null +++ b/arch/mips/lantiq/setup.c | |||
@@ -0,0 +1,66 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * Copyright (C) 2010 John Crispin <blogic@openwrt.org> | ||
7 | */ | ||
8 | |||
9 | #include <linux/kernel.h> | ||
10 | #include <linux/module.h> | ||
11 | #include <linux/io.h> | ||
12 | #include <linux/ioport.h> | ||
13 | #include <asm/bootinfo.h> | ||
14 | |||
15 | #include <lantiq_soc.h> | ||
16 | |||
17 | #include "machtypes.h" | ||
18 | #include "devices.h" | ||
19 | #include "prom.h" | ||
20 | |||
21 | void __init plat_mem_setup(void) | ||
22 | { | ||
23 | /* assume 16M as default incase uboot fails to pass proper ramsize */ | ||
24 | unsigned long memsize = 16; | ||
25 | char **envp = (char **) KSEG1ADDR(fw_arg2); | ||
26 | |||
27 | ioport_resource.start = IOPORT_RESOURCE_START; | ||
28 | ioport_resource.end = IOPORT_RESOURCE_END; | ||
29 | iomem_resource.start = IOMEM_RESOURCE_START; | ||
30 | iomem_resource.end = IOMEM_RESOURCE_END; | ||
31 | |||
32 | set_io_port_base((unsigned long) KSEG1); | ||
33 | |||
34 | while (*envp) { | ||
35 | char *e = (char *)KSEG1ADDR(*envp); | ||
36 | if (!strncmp(e, "memsize=", 8)) { | ||
37 | e += 8; | ||
38 | if (strict_strtoul(e, 0, &memsize)) | ||
39 | pr_warn("bad memsize specified\n"); | ||
40 | } | ||
41 | envp++; | ||
42 | } | ||
43 | memsize *= 1024 * 1024; | ||
44 | add_memory_region(0x00000000, memsize, BOOT_MEM_RAM); | ||
45 | } | ||
46 | |||
47 | static int __init | ||
48 | lantiq_setup(void) | ||
49 | { | ||
50 | ltq_soc_setup(); | ||
51 | mips_machine_setup(); | ||
52 | return 0; | ||
53 | } | ||
54 | |||
55 | arch_initcall(lantiq_setup); | ||
56 | |||
57 | static void __init | ||
58 | lantiq_generic_init(void) | ||
59 | { | ||
60 | /* Nothing to do */ | ||
61 | } | ||
62 | |||
63 | MIPS_MACHINE(LTQ_MACH_GENERIC, | ||
64 | "Generic", | ||
65 | "Generic Lantiq based board", | ||
66 | lantiq_generic_init); | ||
diff --git a/arch/mips/lantiq/xway/Kconfig b/arch/mips/lantiq/xway/Kconfig new file mode 100644 index 000000000000..2b857de36620 --- /dev/null +++ b/arch/mips/lantiq/xway/Kconfig | |||
@@ -0,0 +1,23 @@ | |||
1 | if SOC_XWAY | ||
2 | |||
3 | menu "MIPS Machine" | ||
4 | |||
5 | config LANTIQ_MACH_EASY50712 | ||
6 | bool "Easy50712 - Danube" | ||
7 | default y | ||
8 | |||
9 | endmenu | ||
10 | |||
11 | endif | ||
12 | |||
13 | if SOC_AMAZON_SE | ||
14 | |||
15 | menu "MIPS Machine" | ||
16 | |||
17 | config LANTIQ_MACH_EASY50601 | ||
18 | bool "Easy50601 - Amazon SE" | ||
19 | default y | ||
20 | |||
21 | endmenu | ||
22 | |||
23 | endif | ||
diff --git a/arch/mips/lantiq/xway/Makefile b/arch/mips/lantiq/xway/Makefile new file mode 100644 index 000000000000..c517f2e77563 --- /dev/null +++ b/arch/mips/lantiq/xway/Makefile | |||
@@ -0,0 +1,7 @@ | |||
1 | obj-y := pmu.o ebu.o reset.o gpio.o gpio_stp.o gpio_ebu.o devices.o dma.o | ||
2 | |||
3 | obj-$(CONFIG_SOC_XWAY) += clk-xway.o prom-xway.o setup-xway.o | ||
4 | obj-$(CONFIG_SOC_AMAZON_SE) += clk-ase.o prom-ase.o setup-ase.o | ||
5 | |||
6 | obj-$(CONFIG_LANTIQ_MACH_EASY50712) += mach-easy50712.o | ||
7 | obj-$(CONFIG_LANTIQ_MACH_EASY50601) += mach-easy50601.o | ||
diff --git a/arch/mips/lantiq/xway/clk-ase.c b/arch/mips/lantiq/xway/clk-ase.c new file mode 100644 index 000000000000..22d823acd536 --- /dev/null +++ b/arch/mips/lantiq/xway/clk-ase.c | |||
@@ -0,0 +1,48 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * Copyright (C) 2011 John Crispin <blogic@openwrt.org> | ||
7 | */ | ||
8 | |||
9 | #include <linux/io.h> | ||
10 | #include <linux/module.h> | ||
11 | #include <linux/init.h> | ||
12 | #include <linux/clk.h> | ||
13 | |||
14 | #include <asm/time.h> | ||
15 | #include <asm/irq.h> | ||
16 | #include <asm/div64.h> | ||
17 | |||
18 | #include <lantiq_soc.h> | ||
19 | |||
20 | /* cgu registers */ | ||
21 | #define LTQ_CGU_SYS 0x0010 | ||
22 | |||
23 | unsigned int ltq_get_io_region_clock(void) | ||
24 | { | ||
25 | return CLOCK_133M; | ||
26 | } | ||
27 | EXPORT_SYMBOL(ltq_get_io_region_clock); | ||
28 | |||
29 | unsigned int ltq_get_fpi_bus_clock(int fpi) | ||
30 | { | ||
31 | return CLOCK_133M; | ||
32 | } | ||
33 | EXPORT_SYMBOL(ltq_get_fpi_bus_clock); | ||
34 | |||
35 | unsigned int ltq_get_cpu_hz(void) | ||
36 | { | ||
37 | if (ltq_cgu_r32(LTQ_CGU_SYS) & (1 << 5)) | ||
38 | return CLOCK_266M; | ||
39 | else | ||
40 | return CLOCK_133M; | ||
41 | } | ||
42 | EXPORT_SYMBOL(ltq_get_cpu_hz); | ||
43 | |||
44 | unsigned int ltq_get_fpi_hz(void) | ||
45 | { | ||
46 | return CLOCK_133M; | ||
47 | } | ||
48 | EXPORT_SYMBOL(ltq_get_fpi_hz); | ||
diff --git a/arch/mips/lantiq/xway/clk-xway.c b/arch/mips/lantiq/xway/clk-xway.c new file mode 100644 index 000000000000..ddd39593c581 --- /dev/null +++ b/arch/mips/lantiq/xway/clk-xway.c | |||
@@ -0,0 +1,223 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * Copyright (C) 2010 John Crispin <blogic@openwrt.org> | ||
7 | */ | ||
8 | |||
9 | #include <linux/io.h> | ||
10 | #include <linux/module.h> | ||
11 | #include <linux/init.h> | ||
12 | #include <linux/clk.h> | ||
13 | |||
14 | #include <asm/time.h> | ||
15 | #include <asm/irq.h> | ||
16 | #include <asm/div64.h> | ||
17 | |||
18 | #include <lantiq_soc.h> | ||
19 | |||
20 | static unsigned int ltq_ram_clocks[] = { | ||
21 | CLOCK_167M, CLOCK_133M, CLOCK_111M, CLOCK_83M }; | ||
22 | #define DDR_HZ ltq_ram_clocks[ltq_cgu_r32(LTQ_CGU_SYS) & 0x3] | ||
23 | |||
24 | #define BASIC_FREQUENCY_1 35328000 | ||
25 | #define BASIC_FREQUENCY_2 36000000 | ||
26 | #define BASIS_REQUENCY_USB 12000000 | ||
27 | |||
28 | #define GET_BITS(x, msb, lsb) \ | ||
29 | (((x) & ((1 << ((msb) + 1)) - 1)) >> (lsb)) | ||
30 | |||
31 | #define LTQ_CGU_PLL0_CFG 0x0004 | ||
32 | #define LTQ_CGU_PLL1_CFG 0x0008 | ||
33 | #define LTQ_CGU_PLL2_CFG 0x000C | ||
34 | #define LTQ_CGU_SYS 0x0010 | ||
35 | #define LTQ_CGU_UPDATE 0x0014 | ||
36 | #define LTQ_CGU_IF_CLK 0x0018 | ||
37 | #define LTQ_CGU_OSC_CON 0x001C | ||
38 | #define LTQ_CGU_SMD 0x0020 | ||
39 | #define LTQ_CGU_CT1SR 0x0028 | ||
40 | #define LTQ_CGU_CT2SR 0x002C | ||
41 | #define LTQ_CGU_PCMCR 0x0030 | ||
42 | #define LTQ_CGU_PCI_CR 0x0034 | ||
43 | #define LTQ_CGU_PD_PC 0x0038 | ||
44 | #define LTQ_CGU_FMR 0x003C | ||
45 | |||
46 | #define CGU_PLL0_PHASE_DIVIDER_ENABLE \ | ||
47 | (ltq_cgu_r32(LTQ_CGU_PLL0_CFG) & (1 << 31)) | ||
48 | #define CGU_PLL0_BYPASS \ | ||
49 | (ltq_cgu_r32(LTQ_CGU_PLL0_CFG) & (1 << 30)) | ||
50 | #define CGU_PLL0_CFG_DSMSEL \ | ||
51 | (ltq_cgu_r32(LTQ_CGU_PLL0_CFG) & (1 << 28)) | ||
52 | #define CGU_PLL0_CFG_FRAC_EN \ | ||
53 | (ltq_cgu_r32(LTQ_CGU_PLL0_CFG) & (1 << 27)) | ||
54 | #define CGU_PLL1_SRC \ | ||
55 | (ltq_cgu_r32(LTQ_CGU_PLL1_CFG) & (1 << 31)) | ||
56 | #define CGU_PLL2_PHASE_DIVIDER_ENABLE \ | ||
57 | (ltq_cgu_r32(LTQ_CGU_PLL2_CFG) & (1 << 20)) | ||
58 | #define CGU_SYS_FPI_SEL (1 << 6) | ||
59 | #define CGU_SYS_DDR_SEL 0x3 | ||
60 | #define CGU_PLL0_SRC (1 << 29) | ||
61 | |||
62 | #define CGU_PLL0_CFG_PLLK GET_BITS(ltq_cgu_r32(LTQ_CGU_PLL0_CFG), 26, 17) | ||
63 | #define CGU_PLL0_CFG_PLLN GET_BITS(ltq_cgu_r32(LTQ_CGU_PLL0_CFG), 12, 6) | ||
64 | #define CGU_PLL0_CFG_PLLM GET_BITS(ltq_cgu_r32(LTQ_CGU_PLL0_CFG), 5, 2) | ||
65 | #define CGU_PLL2_SRC GET_BITS(ltq_cgu_r32(LTQ_CGU_PLL2_CFG), 18, 17) | ||
66 | #define CGU_PLL2_CFG_INPUT_DIV GET_BITS(ltq_cgu_r32(LTQ_CGU_PLL2_CFG), 16, 13) | ||
67 | |||
68 | static unsigned int ltq_get_pll0_fdiv(void); | ||
69 | |||
70 | static inline unsigned int get_input_clock(int pll) | ||
71 | { | ||
72 | switch (pll) { | ||
73 | case 0: | ||
74 | if (ltq_cgu_r32(LTQ_CGU_PLL0_CFG) & CGU_PLL0_SRC) | ||
75 | return BASIS_REQUENCY_USB; | ||
76 | else if (CGU_PLL0_PHASE_DIVIDER_ENABLE) | ||
77 | return BASIC_FREQUENCY_1; | ||
78 | else | ||
79 | return BASIC_FREQUENCY_2; | ||
80 | case 1: | ||
81 | if (CGU_PLL1_SRC) | ||
82 | return BASIS_REQUENCY_USB; | ||
83 | else if (CGU_PLL0_PHASE_DIVIDER_ENABLE) | ||
84 | return BASIC_FREQUENCY_1; | ||
85 | else | ||
86 | return BASIC_FREQUENCY_2; | ||
87 | case 2: | ||
88 | switch (CGU_PLL2_SRC) { | ||
89 | case 0: | ||
90 | return ltq_get_pll0_fdiv(); | ||
91 | case 1: | ||
92 | return CGU_PLL2_PHASE_DIVIDER_ENABLE ? | ||
93 | BASIC_FREQUENCY_1 : | ||
94 | BASIC_FREQUENCY_2; | ||
95 | case 2: | ||
96 | return BASIS_REQUENCY_USB; | ||
97 | } | ||
98 | default: | ||
99 | return 0; | ||
100 | } | ||
101 | } | ||
102 | |||
103 | static inline unsigned int cal_dsm(int pll, unsigned int num, unsigned int den) | ||
104 | { | ||
105 | u64 res, clock = get_input_clock(pll); | ||
106 | |||
107 | res = num * clock; | ||
108 | do_div(res, den); | ||
109 | return res; | ||
110 | } | ||
111 | |||
112 | static inline unsigned int mash_dsm(int pll, unsigned int M, unsigned int N, | ||
113 | unsigned int K) | ||
114 | { | ||
115 | unsigned int num = ((N + 1) << 10) + K; | ||
116 | unsigned int den = (M + 1) << 10; | ||
117 | |||
118 | return cal_dsm(pll, num, den); | ||
119 | } | ||
120 | |||
121 | static inline unsigned int ssff_dsm_1(int pll, unsigned int M, unsigned int N, | ||
122 | unsigned int K) | ||
123 | { | ||
124 | unsigned int num = ((N + 1) << 11) + K + 512; | ||
125 | unsigned int den = (M + 1) << 11; | ||
126 | |||
127 | return cal_dsm(pll, num, den); | ||
128 | } | ||
129 | |||
130 | static inline unsigned int ssff_dsm_2(int pll, unsigned int M, unsigned int N, | ||
131 | unsigned int K) | ||
132 | { | ||
133 | unsigned int num = K >= 512 ? | ||
134 | ((N + 1) << 12) + K - 512 : ((N + 1) << 12) + K + 3584; | ||
135 | unsigned int den = (M + 1) << 12; | ||
136 | |||
137 | return cal_dsm(pll, num, den); | ||
138 | } | ||
139 | |||
140 | static inline unsigned int dsm(int pll, unsigned int M, unsigned int N, | ||
141 | unsigned int K, unsigned int dsmsel, unsigned int phase_div_en) | ||
142 | { | ||
143 | if (!dsmsel) | ||
144 | return mash_dsm(pll, M, N, K); | ||
145 | else if (!phase_div_en) | ||
146 | return mash_dsm(pll, M, N, K); | ||
147 | else | ||
148 | return ssff_dsm_2(pll, M, N, K); | ||
149 | } | ||
150 | |||
151 | static inline unsigned int ltq_get_pll0_fosc(void) | ||
152 | { | ||
153 | if (CGU_PLL0_BYPASS) | ||
154 | return get_input_clock(0); | ||
155 | else | ||
156 | return !CGU_PLL0_CFG_FRAC_EN | ||
157 | ? dsm(0, CGU_PLL0_CFG_PLLM, CGU_PLL0_CFG_PLLN, 0, | ||
158 | CGU_PLL0_CFG_DSMSEL, | ||
159 | CGU_PLL0_PHASE_DIVIDER_ENABLE) | ||
160 | : dsm(0, CGU_PLL0_CFG_PLLM, CGU_PLL0_CFG_PLLN, | ||
161 | CGU_PLL0_CFG_PLLK, CGU_PLL0_CFG_DSMSEL, | ||
162 | CGU_PLL0_PHASE_DIVIDER_ENABLE); | ||
163 | } | ||
164 | |||
165 | static unsigned int ltq_get_pll0_fdiv(void) | ||
166 | { | ||
167 | unsigned int div = CGU_PLL2_CFG_INPUT_DIV + 1; | ||
168 | |||
169 | return (ltq_get_pll0_fosc() + (div >> 1)) / div; | ||
170 | } | ||
171 | |||
172 | unsigned int ltq_get_io_region_clock(void) | ||
173 | { | ||
174 | unsigned int ret = ltq_get_pll0_fosc(); | ||
175 | |||
176 | switch (ltq_cgu_r32(LTQ_CGU_PLL2_CFG) & CGU_SYS_DDR_SEL) { | ||
177 | default: | ||
178 | case 0: | ||
179 | return (ret + 1) / 2; | ||
180 | case 1: | ||
181 | return (ret * 2 + 2) / 5; | ||
182 | case 2: | ||
183 | return (ret + 1) / 3; | ||
184 | case 3: | ||
185 | return (ret + 2) / 4; | ||
186 | } | ||
187 | } | ||
188 | EXPORT_SYMBOL(ltq_get_io_region_clock); | ||
189 | |||
190 | unsigned int ltq_get_fpi_bus_clock(int fpi) | ||
191 | { | ||
192 | unsigned int ret = ltq_get_io_region_clock(); | ||
193 | |||
194 | if ((fpi == 2) && (ltq_cgu_r32(LTQ_CGU_SYS) & CGU_SYS_FPI_SEL)) | ||
195 | ret >>= 1; | ||
196 | return ret; | ||
197 | } | ||
198 | EXPORT_SYMBOL(ltq_get_fpi_bus_clock); | ||
199 | |||
200 | unsigned int ltq_get_cpu_hz(void) | ||
201 | { | ||
202 | switch (ltq_cgu_r32(LTQ_CGU_SYS) & 0xc) { | ||
203 | case 0: | ||
204 | return CLOCK_333M; | ||
205 | case 4: | ||
206 | return DDR_HZ; | ||
207 | case 8: | ||
208 | return DDR_HZ << 1; | ||
209 | default: | ||
210 | return DDR_HZ >> 1; | ||
211 | } | ||
212 | } | ||
213 | EXPORT_SYMBOL(ltq_get_cpu_hz); | ||
214 | |||
215 | unsigned int ltq_get_fpi_hz(void) | ||
216 | { | ||
217 | unsigned int ddr_clock = DDR_HZ; | ||
218 | |||
219 | if (ltq_cgu_r32(LTQ_CGU_SYS) & 0x40) | ||
220 | return ddr_clock >> 1; | ||
221 | return ddr_clock; | ||
222 | } | ||
223 | EXPORT_SYMBOL(ltq_get_fpi_hz); | ||
diff --git a/arch/mips/lantiq/xway/devices.c b/arch/mips/lantiq/xway/devices.c new file mode 100644 index 000000000000..e09e789dfc27 --- /dev/null +++ b/arch/mips/lantiq/xway/devices.c | |||
@@ -0,0 +1,121 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * Copyright (C) 2010 John Crispin <blogic@openwrt.org> | ||
7 | */ | ||
8 | |||
9 | #include <linux/init.h> | ||
10 | #include <linux/module.h> | ||
11 | #include <linux/types.h> | ||
12 | #include <linux/string.h> | ||
13 | #include <linux/mtd/physmap.h> | ||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/reboot.h> | ||
16 | #include <linux/platform_device.h> | ||
17 | #include <linux/leds.h> | ||
18 | #include <linux/etherdevice.h> | ||
19 | #include <linux/reboot.h> | ||
20 | #include <linux/time.h> | ||
21 | #include <linux/io.h> | ||
22 | #include <linux/gpio.h> | ||
23 | #include <linux/leds.h> | ||
24 | |||
25 | #include <asm/bootinfo.h> | ||
26 | #include <asm/irq.h> | ||
27 | |||
28 | #include <lantiq_soc.h> | ||
29 | #include <lantiq_irq.h> | ||
30 | #include <lantiq_platform.h> | ||
31 | |||
32 | #include "devices.h" | ||
33 | |||
34 | /* gpio */ | ||
35 | static struct resource ltq_gpio_resource[] = { | ||
36 | { | ||
37 | .name = "gpio0", | ||
38 | .start = LTQ_GPIO0_BASE_ADDR, | ||
39 | .end = LTQ_GPIO0_BASE_ADDR + LTQ_GPIO_SIZE - 1, | ||
40 | .flags = IORESOURCE_MEM, | ||
41 | }, { | ||
42 | .name = "gpio1", | ||
43 | .start = LTQ_GPIO1_BASE_ADDR, | ||
44 | .end = LTQ_GPIO1_BASE_ADDR + LTQ_GPIO_SIZE - 1, | ||
45 | .flags = IORESOURCE_MEM, | ||
46 | }, { | ||
47 | .name = "gpio2", | ||
48 | .start = LTQ_GPIO2_BASE_ADDR, | ||
49 | .end = LTQ_GPIO2_BASE_ADDR + LTQ_GPIO_SIZE - 1, | ||
50 | .flags = IORESOURCE_MEM, | ||
51 | } | ||
52 | }; | ||
53 | |||
54 | void __init ltq_register_gpio(void) | ||
55 | { | ||
56 | platform_device_register_simple("ltq_gpio", 0, | ||
57 | <q_gpio_resource[0], 1); | ||
58 | platform_device_register_simple("ltq_gpio", 1, | ||
59 | <q_gpio_resource[1], 1); | ||
60 | |||
61 | /* AR9 and VR9 have an extra gpio block */ | ||
62 | if (ltq_is_ar9() || ltq_is_vr9()) { | ||
63 | platform_device_register_simple("ltq_gpio", 2, | ||
64 | <q_gpio_resource[2], 1); | ||
65 | } | ||
66 | } | ||
67 | |||
68 | /* serial to parallel conversion */ | ||
69 | static struct resource ltq_stp_resource = { | ||
70 | .name = "stp", | ||
71 | .start = LTQ_STP_BASE_ADDR, | ||
72 | .end = LTQ_STP_BASE_ADDR + LTQ_STP_SIZE - 1, | ||
73 | .flags = IORESOURCE_MEM, | ||
74 | }; | ||
75 | |||
76 | void __init ltq_register_gpio_stp(void) | ||
77 | { | ||
78 | platform_device_register_simple("ltq_stp", 0, <q_stp_resource, 1); | ||
79 | } | ||
80 | |||
81 | /* asc ports - amazon se has its own serial mapping */ | ||
82 | static struct resource ltq_ase_asc_resources[] = { | ||
83 | { | ||
84 | .name = "asc0", | ||
85 | .start = LTQ_ASC1_BASE_ADDR, | ||
86 | .end = LTQ_ASC1_BASE_ADDR + LTQ_ASC_SIZE - 1, | ||
87 | .flags = IORESOURCE_MEM, | ||
88 | }, | ||
89 | IRQ_RES(tx, LTQ_ASC_ASE_TIR), | ||
90 | IRQ_RES(rx, LTQ_ASC_ASE_RIR), | ||
91 | IRQ_RES(err, LTQ_ASC_ASE_EIR), | ||
92 | }; | ||
93 | |||
94 | void __init ltq_register_ase_asc(void) | ||
95 | { | ||
96 | platform_device_register_simple("ltq_asc", 0, | ||
97 | ltq_ase_asc_resources, ARRAY_SIZE(ltq_ase_asc_resources)); | ||
98 | } | ||
99 | |||
100 | /* ethernet */ | ||
101 | static struct resource ltq_etop_resources = { | ||
102 | .name = "etop", | ||
103 | .start = LTQ_ETOP_BASE_ADDR, | ||
104 | .end = LTQ_ETOP_BASE_ADDR + LTQ_ETOP_SIZE - 1, | ||
105 | .flags = IORESOURCE_MEM, | ||
106 | }; | ||
107 | |||
108 | static struct platform_device ltq_etop = { | ||
109 | .name = "ltq_etop", | ||
110 | .resource = <q_etop_resources, | ||
111 | .num_resources = 1, | ||
112 | }; | ||
113 | |||
114 | void __init | ||
115 | ltq_register_etop(struct ltq_eth_data *eth) | ||
116 | { | ||
117 | if (eth) { | ||
118 | ltq_etop.dev.platform_data = eth; | ||
119 | platform_device_register(<q_etop); | ||
120 | } | ||
121 | } | ||
diff --git a/arch/mips/lantiq/xway/devices.h b/arch/mips/lantiq/xway/devices.h new file mode 100644 index 000000000000..e90493471bc1 --- /dev/null +++ b/arch/mips/lantiq/xway/devices.h | |||
@@ -0,0 +1,20 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * Copyright (C) 2010 John Crispin <blogic@openwrt.org> | ||
7 | */ | ||
8 | |||
9 | #ifndef _LTQ_DEVICES_XWAY_H__ | ||
10 | #define _LTQ_DEVICES_XWAY_H__ | ||
11 | |||
12 | #include "../devices.h" | ||
13 | #include <linux/phy.h> | ||
14 | |||
15 | extern void ltq_register_gpio(void); | ||
16 | extern void ltq_register_gpio_stp(void); | ||
17 | extern void ltq_register_ase_asc(void); | ||
18 | extern void ltq_register_etop(struct ltq_eth_data *eth); | ||
19 | |||
20 | #endif | ||
diff --git a/arch/mips/lantiq/xway/dma.c b/arch/mips/lantiq/xway/dma.c new file mode 100644 index 000000000000..4278a459d6c4 --- /dev/null +++ b/arch/mips/lantiq/xway/dma.c | |||
@@ -0,0 +1,253 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * You should have received a copy of the GNU General Public License | ||
12 | * along with this program; if not, write to the Free Software | ||
13 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. | ||
14 | * | ||
15 | * Copyright (C) 2011 John Crispin <blogic@openwrt.org> | ||
16 | */ | ||
17 | |||
18 | #include <linux/init.h> | ||
19 | #include <linux/platform_device.h> | ||
20 | #include <linux/io.h> | ||
21 | #include <linux/dma-mapping.h> | ||
22 | |||
23 | #include <lantiq_soc.h> | ||
24 | #include <xway_dma.h> | ||
25 | |||
26 | #define LTQ_DMA_CTRL 0x10 | ||
27 | #define LTQ_DMA_CPOLL 0x14 | ||
28 | #define LTQ_DMA_CS 0x18 | ||
29 | #define LTQ_DMA_CCTRL 0x1C | ||
30 | #define LTQ_DMA_CDBA 0x20 | ||
31 | #define LTQ_DMA_CDLEN 0x24 | ||
32 | #define LTQ_DMA_CIS 0x28 | ||
33 | #define LTQ_DMA_CIE 0x2C | ||
34 | #define LTQ_DMA_PS 0x40 | ||
35 | #define LTQ_DMA_PCTRL 0x44 | ||
36 | #define LTQ_DMA_IRNEN 0xf4 | ||
37 | |||
38 | #define DMA_DESCPT BIT(3) /* descriptor complete irq */ | ||
39 | #define DMA_TX BIT(8) /* TX channel direction */ | ||
40 | #define DMA_CHAN_ON BIT(0) /* channel on / off bit */ | ||
41 | #define DMA_PDEN BIT(6) /* enable packet drop */ | ||
42 | #define DMA_CHAN_RST BIT(1) /* channel on / off bit */ | ||
43 | #define DMA_RESET BIT(0) /* channel on / off bit */ | ||
44 | #define DMA_IRQ_ACK 0x7e /* IRQ status register */ | ||
45 | #define DMA_POLL BIT(31) /* turn on channel polling */ | ||
46 | #define DMA_CLK_DIV4 BIT(6) /* polling clock divider */ | ||
47 | #define DMA_2W_BURST BIT(1) /* 2 word burst length */ | ||
48 | #define DMA_MAX_CHANNEL 20 /* the soc has 20 channels */ | ||
49 | #define DMA_ETOP_ENDIANESS (0xf << 8) /* endianess swap etop channels */ | ||
50 | #define DMA_WEIGHT (BIT(17) | BIT(16)) /* default channel wheight */ | ||
51 | |||
52 | #define ltq_dma_r32(x) ltq_r32(ltq_dma_membase + (x)) | ||
53 | #define ltq_dma_w32(x, y) ltq_w32(x, ltq_dma_membase + (y)) | ||
54 | #define ltq_dma_w32_mask(x, y, z) ltq_w32_mask(x, y, \ | ||
55 | ltq_dma_membase + (z)) | ||
56 | |||
57 | static struct resource ltq_dma_resource = { | ||
58 | .name = "dma", | ||
59 | .start = LTQ_DMA_BASE_ADDR, | ||
60 | .end = LTQ_DMA_BASE_ADDR + LTQ_DMA_SIZE - 1, | ||
61 | .flags = IORESOURCE_MEM, | ||
62 | }; | ||
63 | |||
64 | static void __iomem *ltq_dma_membase; | ||
65 | |||
66 | void | ||
67 | ltq_dma_enable_irq(struct ltq_dma_channel *ch) | ||
68 | { | ||
69 | unsigned long flags; | ||
70 | |||
71 | local_irq_save(flags); | ||
72 | ltq_dma_w32(ch->nr, LTQ_DMA_CS); | ||
73 | ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN); | ||
74 | local_irq_restore(flags); | ||
75 | } | ||
76 | EXPORT_SYMBOL_GPL(ltq_dma_enable_irq); | ||
77 | |||
78 | void | ||
79 | ltq_dma_disable_irq(struct ltq_dma_channel *ch) | ||
80 | { | ||
81 | unsigned long flags; | ||
82 | |||
83 | local_irq_save(flags); | ||
84 | ltq_dma_w32(ch->nr, LTQ_DMA_CS); | ||
85 | ltq_dma_w32_mask(1 << ch->nr, 0, LTQ_DMA_IRNEN); | ||
86 | local_irq_restore(flags); | ||
87 | } | ||
88 | EXPORT_SYMBOL_GPL(ltq_dma_disable_irq); | ||
89 | |||
90 | void | ||
91 | ltq_dma_ack_irq(struct ltq_dma_channel *ch) | ||
92 | { | ||
93 | unsigned long flags; | ||
94 | |||
95 | local_irq_save(flags); | ||
96 | ltq_dma_w32(ch->nr, LTQ_DMA_CS); | ||
97 | ltq_dma_w32(DMA_IRQ_ACK, LTQ_DMA_CIS); | ||
98 | local_irq_restore(flags); | ||
99 | } | ||
100 | EXPORT_SYMBOL_GPL(ltq_dma_ack_irq); | ||
101 | |||
102 | void | ||
103 | ltq_dma_open(struct ltq_dma_channel *ch) | ||
104 | { | ||
105 | unsigned long flag; | ||
106 | |||
107 | local_irq_save(flag); | ||
108 | ltq_dma_w32(ch->nr, LTQ_DMA_CS); | ||
109 | ltq_dma_w32_mask(0, DMA_CHAN_ON, LTQ_DMA_CCTRL); | ||
110 | ltq_dma_enable_irq(ch); | ||
111 | local_irq_restore(flag); | ||
112 | } | ||
113 | EXPORT_SYMBOL_GPL(ltq_dma_open); | ||
114 | |||
115 | void | ||
116 | ltq_dma_close(struct ltq_dma_channel *ch) | ||
117 | { | ||
118 | unsigned long flag; | ||
119 | |||
120 | local_irq_save(flag); | ||
121 | ltq_dma_w32(ch->nr, LTQ_DMA_CS); | ||
122 | ltq_dma_w32_mask(DMA_CHAN_ON, 0, LTQ_DMA_CCTRL); | ||
123 | ltq_dma_disable_irq(ch); | ||
124 | local_irq_restore(flag); | ||
125 | } | ||
126 | EXPORT_SYMBOL_GPL(ltq_dma_close); | ||
127 | |||
128 | static void | ||
129 | ltq_dma_alloc(struct ltq_dma_channel *ch) | ||
130 | { | ||
131 | unsigned long flags; | ||
132 | |||
133 | ch->desc = 0; | ||
134 | ch->desc_base = dma_alloc_coherent(NULL, | ||
135 | LTQ_DESC_NUM * LTQ_DESC_SIZE, | ||
136 | &ch->phys, GFP_ATOMIC); | ||
137 | memset(ch->desc_base, 0, LTQ_DESC_NUM * LTQ_DESC_SIZE); | ||
138 | |||
139 | local_irq_save(flags); | ||
140 | ltq_dma_w32(ch->nr, LTQ_DMA_CS); | ||
141 | ltq_dma_w32(ch->phys, LTQ_DMA_CDBA); | ||
142 | ltq_dma_w32(LTQ_DESC_NUM, LTQ_DMA_CDLEN); | ||
143 | ltq_dma_w32_mask(DMA_CHAN_ON, 0, LTQ_DMA_CCTRL); | ||
144 | wmb(); | ||
145 | ltq_dma_w32_mask(0, DMA_CHAN_RST, LTQ_DMA_CCTRL); | ||
146 | while (ltq_dma_r32(LTQ_DMA_CCTRL) & DMA_CHAN_RST) | ||
147 | ; | ||
148 | local_irq_restore(flags); | ||
149 | } | ||
150 | |||
151 | void | ||
152 | ltq_dma_alloc_tx(struct ltq_dma_channel *ch) | ||
153 | { | ||
154 | unsigned long flags; | ||
155 | |||
156 | ltq_dma_alloc(ch); | ||
157 | |||
158 | local_irq_save(flags); | ||
159 | ltq_dma_w32(DMA_DESCPT, LTQ_DMA_CIE); | ||
160 | ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN); | ||
161 | ltq_dma_w32(DMA_WEIGHT | DMA_TX, LTQ_DMA_CCTRL); | ||
162 | local_irq_restore(flags); | ||
163 | } | ||
164 | EXPORT_SYMBOL_GPL(ltq_dma_alloc_tx); | ||
165 | |||
166 | void | ||
167 | ltq_dma_alloc_rx(struct ltq_dma_channel *ch) | ||
168 | { | ||
169 | unsigned long flags; | ||
170 | |||
171 | ltq_dma_alloc(ch); | ||
172 | |||
173 | local_irq_save(flags); | ||
174 | ltq_dma_w32(DMA_DESCPT, LTQ_DMA_CIE); | ||
175 | ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN); | ||
176 | ltq_dma_w32(DMA_WEIGHT, LTQ_DMA_CCTRL); | ||
177 | local_irq_restore(flags); | ||
178 | } | ||
179 | EXPORT_SYMBOL_GPL(ltq_dma_alloc_rx); | ||
180 | |||
181 | void | ||
182 | ltq_dma_free(struct ltq_dma_channel *ch) | ||
183 | { | ||
184 | if (!ch->desc_base) | ||
185 | return; | ||
186 | ltq_dma_close(ch); | ||
187 | dma_free_coherent(NULL, LTQ_DESC_NUM * LTQ_DESC_SIZE, | ||
188 | ch->desc_base, ch->phys); | ||
189 | } | ||
190 | EXPORT_SYMBOL_GPL(ltq_dma_free); | ||
191 | |||
192 | void | ||
193 | ltq_dma_init_port(int p) | ||
194 | { | ||
195 | ltq_dma_w32(p, LTQ_DMA_PS); | ||
196 | switch (p) { | ||
197 | case DMA_PORT_ETOP: | ||
198 | /* | ||
199 | * Tell the DMA engine to swap the endianess of data frames and | ||
200 | * drop packets if the channel arbitration fails. | ||
201 | */ | ||
202 | ltq_dma_w32_mask(0, DMA_ETOP_ENDIANESS | DMA_PDEN, | ||
203 | LTQ_DMA_PCTRL); | ||
204 | break; | ||
205 | |||
206 | case DMA_PORT_DEU: | ||
207 | ltq_dma_w32((DMA_2W_BURST << 4) | (DMA_2W_BURST << 2), | ||
208 | LTQ_DMA_PCTRL); | ||
209 | break; | ||
210 | |||
211 | default: | ||
212 | break; | ||
213 | } | ||
214 | } | ||
215 | EXPORT_SYMBOL_GPL(ltq_dma_init_port); | ||
216 | |||
217 | int __init | ||
218 | ltq_dma_init(void) | ||
219 | { | ||
220 | int i; | ||
221 | |||
222 | /* insert and request the memory region */ | ||
223 | if (insert_resource(&iomem_resource, <q_dma_resource) < 0) | ||
224 | panic("Failed to insert dma memory\n"); | ||
225 | |||
226 | if (request_mem_region(ltq_dma_resource.start, | ||
227 | resource_size(<q_dma_resource), "dma") < 0) | ||
228 | panic("Failed to request dma memory\n"); | ||
229 | |||
230 | /* remap dma register range */ | ||
231 | ltq_dma_membase = ioremap_nocache(ltq_dma_resource.start, | ||
232 | resource_size(<q_dma_resource)); | ||
233 | if (!ltq_dma_membase) | ||
234 | panic("Failed to remap dma memory\n"); | ||
235 | |||
236 | /* power up and reset the dma engine */ | ||
237 | ltq_pmu_enable(PMU_DMA); | ||
238 | ltq_dma_w32_mask(0, DMA_RESET, LTQ_DMA_CTRL); | ||
239 | |||
240 | /* disable all interrupts */ | ||
241 | ltq_dma_w32(0, LTQ_DMA_IRNEN); | ||
242 | |||
243 | /* reset/configure each channel */ | ||
244 | for (i = 0; i < DMA_MAX_CHANNEL; i++) { | ||
245 | ltq_dma_w32(i, LTQ_DMA_CS); | ||
246 | ltq_dma_w32(DMA_CHAN_RST, LTQ_DMA_CCTRL); | ||
247 | ltq_dma_w32(DMA_POLL | DMA_CLK_DIV4, LTQ_DMA_CPOLL); | ||
248 | ltq_dma_w32_mask(DMA_CHAN_ON, 0, LTQ_DMA_CCTRL); | ||
249 | } | ||
250 | return 0; | ||
251 | } | ||
252 | |||
253 | postcore_initcall(ltq_dma_init); | ||
diff --git a/arch/mips/lantiq/xway/ebu.c b/arch/mips/lantiq/xway/ebu.c new file mode 100644 index 000000000000..66eb52fa50a1 --- /dev/null +++ b/arch/mips/lantiq/xway/ebu.c | |||
@@ -0,0 +1,53 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * EBU - the external bus unit attaches PCI, NOR and NAND | ||
7 | * | ||
8 | * Copyright (C) 2010 John Crispin <blogic@openwrt.org> | ||
9 | */ | ||
10 | |||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/module.h> | ||
13 | #include <linux/version.h> | ||
14 | #include <linux/ioport.h> | ||
15 | |||
16 | #include <lantiq_soc.h> | ||
17 | |||
18 | /* all access to the ebu must be locked */ | ||
19 | DEFINE_SPINLOCK(ebu_lock); | ||
20 | EXPORT_SYMBOL_GPL(ebu_lock); | ||
21 | |||
22 | static struct resource ltq_ebu_resource = { | ||
23 | .name = "ebu", | ||
24 | .start = LTQ_EBU_BASE_ADDR, | ||
25 | .end = LTQ_EBU_BASE_ADDR + LTQ_EBU_SIZE - 1, | ||
26 | .flags = IORESOURCE_MEM, | ||
27 | }; | ||
28 | |||
29 | /* remapped base addr of the clock unit and external bus unit */ | ||
30 | void __iomem *ltq_ebu_membase; | ||
31 | |||
32 | static int __init lantiq_ebu_init(void) | ||
33 | { | ||
34 | /* insert and request the memory region */ | ||
35 | if (insert_resource(&iomem_resource, <q_ebu_resource) < 0) | ||
36 | panic("Failed to insert ebu memory\n"); | ||
37 | |||
38 | if (request_mem_region(ltq_ebu_resource.start, | ||
39 | resource_size(<q_ebu_resource), "ebu") < 0) | ||
40 | panic("Failed to request ebu memory\n"); | ||
41 | |||
42 | /* remap ebu register range */ | ||
43 | ltq_ebu_membase = ioremap_nocache(ltq_ebu_resource.start, | ||
44 | resource_size(<q_ebu_resource)); | ||
45 | if (!ltq_ebu_membase) | ||
46 | panic("Failed to remap ebu memory\n"); | ||
47 | |||
48 | /* make sure to unprotect the memory region where flash is located */ | ||
49 | ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_BUSCON0) & ~EBU_WRDIS, LTQ_EBU_BUSCON0); | ||
50 | return 0; | ||
51 | } | ||
52 | |||
53 | postcore_initcall(lantiq_ebu_init); | ||
diff --git a/arch/mips/lantiq/xway/gpio.c b/arch/mips/lantiq/xway/gpio.c new file mode 100644 index 000000000000..a321451a5455 --- /dev/null +++ b/arch/mips/lantiq/xway/gpio.c | |||
@@ -0,0 +1,195 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * Copyright (C) 2010 John Crispin <blogic@openwrt.org> | ||
7 | */ | ||
8 | |||
9 | #include <linux/slab.h> | ||
10 | #include <linux/module.h> | ||
11 | #include <linux/platform_device.h> | ||
12 | #include <linux/gpio.h> | ||
13 | #include <linux/ioport.h> | ||
14 | #include <linux/io.h> | ||
15 | |||
16 | #include <lantiq_soc.h> | ||
17 | |||
18 | #define LTQ_GPIO_OUT 0x00 | ||
19 | #define LTQ_GPIO_IN 0x04 | ||
20 | #define LTQ_GPIO_DIR 0x08 | ||
21 | #define LTQ_GPIO_ALTSEL0 0x0C | ||
22 | #define LTQ_GPIO_ALTSEL1 0x10 | ||
23 | #define LTQ_GPIO_OD 0x14 | ||
24 | |||
25 | #define PINS_PER_PORT 16 | ||
26 | #define MAX_PORTS 3 | ||
27 | |||
28 | #define ltq_gpio_getbit(m, r, p) (!!(ltq_r32(m + r) & (1 << p))) | ||
29 | #define ltq_gpio_setbit(m, r, p) ltq_w32_mask(0, (1 << p), m + r) | ||
30 | #define ltq_gpio_clearbit(m, r, p) ltq_w32_mask((1 << p), 0, m + r) | ||
31 | |||
32 | struct ltq_gpio { | ||
33 | void __iomem *membase; | ||
34 | struct gpio_chip chip; | ||
35 | }; | ||
36 | |||
37 | static struct ltq_gpio ltq_gpio_port[MAX_PORTS]; | ||
38 | |||
39 | int gpio_to_irq(unsigned int gpio) | ||
40 | { | ||
41 | return -EINVAL; | ||
42 | } | ||
43 | EXPORT_SYMBOL(gpio_to_irq); | ||
44 | |||
45 | int irq_to_gpio(unsigned int gpio) | ||
46 | { | ||
47 | return -EINVAL; | ||
48 | } | ||
49 | EXPORT_SYMBOL(irq_to_gpio); | ||
50 | |||
51 | int ltq_gpio_request(unsigned int pin, unsigned int alt0, | ||
52 | unsigned int alt1, unsigned int dir, const char *name) | ||
53 | { | ||
54 | int id = 0; | ||
55 | |||
56 | if (pin >= (MAX_PORTS * PINS_PER_PORT)) | ||
57 | return -EINVAL; | ||
58 | if (gpio_request(pin, name)) { | ||
59 | pr_err("failed to setup lantiq gpio: %s\n", name); | ||
60 | return -EBUSY; | ||
61 | } | ||
62 | if (dir) | ||
63 | gpio_direction_output(pin, 1); | ||
64 | else | ||
65 | gpio_direction_input(pin); | ||
66 | while (pin >= PINS_PER_PORT) { | ||
67 | pin -= PINS_PER_PORT; | ||
68 | id++; | ||
69 | } | ||
70 | if (alt0) | ||
71 | ltq_gpio_setbit(ltq_gpio_port[id].membase, | ||
72 | LTQ_GPIO_ALTSEL0, pin); | ||
73 | else | ||
74 | ltq_gpio_clearbit(ltq_gpio_port[id].membase, | ||
75 | LTQ_GPIO_ALTSEL0, pin); | ||
76 | if (alt1) | ||
77 | ltq_gpio_setbit(ltq_gpio_port[id].membase, | ||
78 | LTQ_GPIO_ALTSEL1, pin); | ||
79 | else | ||
80 | ltq_gpio_clearbit(ltq_gpio_port[id].membase, | ||
81 | LTQ_GPIO_ALTSEL1, pin); | ||
82 | return 0; | ||
83 | } | ||
84 | EXPORT_SYMBOL(ltq_gpio_request); | ||
85 | |||
86 | static void ltq_gpio_set(struct gpio_chip *chip, unsigned int offset, int value) | ||
87 | { | ||
88 | struct ltq_gpio *ltq_gpio = container_of(chip, struct ltq_gpio, chip); | ||
89 | |||
90 | if (value) | ||
91 | ltq_gpio_setbit(ltq_gpio->membase, LTQ_GPIO_OUT, offset); | ||
92 | else | ||
93 | ltq_gpio_clearbit(ltq_gpio->membase, LTQ_GPIO_OUT, offset); | ||
94 | } | ||
95 | |||
96 | static int ltq_gpio_get(struct gpio_chip *chip, unsigned int offset) | ||
97 | { | ||
98 | struct ltq_gpio *ltq_gpio = container_of(chip, struct ltq_gpio, chip); | ||
99 | |||
100 | return ltq_gpio_getbit(ltq_gpio->membase, LTQ_GPIO_IN, offset); | ||
101 | } | ||
102 | |||
103 | static int ltq_gpio_direction_input(struct gpio_chip *chip, unsigned int offset) | ||
104 | { | ||
105 | struct ltq_gpio *ltq_gpio = container_of(chip, struct ltq_gpio, chip); | ||
106 | |||
107 | ltq_gpio_clearbit(ltq_gpio->membase, LTQ_GPIO_OD, offset); | ||
108 | ltq_gpio_clearbit(ltq_gpio->membase, LTQ_GPIO_DIR, offset); | ||
109 | |||
110 | return 0; | ||
111 | } | ||
112 | |||
113 | static int ltq_gpio_direction_output(struct gpio_chip *chip, | ||
114 | unsigned int offset, int value) | ||
115 | { | ||
116 | struct ltq_gpio *ltq_gpio = container_of(chip, struct ltq_gpio, chip); | ||
117 | |||
118 | ltq_gpio_setbit(ltq_gpio->membase, LTQ_GPIO_OD, offset); | ||
119 | ltq_gpio_setbit(ltq_gpio->membase, LTQ_GPIO_DIR, offset); | ||
120 | ltq_gpio_set(chip, offset, value); | ||
121 | |||
122 | return 0; | ||
123 | } | ||
124 | |||
125 | static int ltq_gpio_req(struct gpio_chip *chip, unsigned offset) | ||
126 | { | ||
127 | struct ltq_gpio *ltq_gpio = container_of(chip, struct ltq_gpio, chip); | ||
128 | |||
129 | ltq_gpio_clearbit(ltq_gpio->membase, LTQ_GPIO_ALTSEL0, offset); | ||
130 | ltq_gpio_clearbit(ltq_gpio->membase, LTQ_GPIO_ALTSEL1, offset); | ||
131 | return 0; | ||
132 | } | ||
133 | |||
134 | static int ltq_gpio_probe(struct platform_device *pdev) | ||
135 | { | ||
136 | struct resource *res; | ||
137 | |||
138 | if (pdev->id >= MAX_PORTS) { | ||
139 | dev_err(&pdev->dev, "invalid gpio port %d\n", | ||
140 | pdev->id); | ||
141 | return -EINVAL; | ||
142 | } | ||
143 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
144 | if (!res) { | ||
145 | dev_err(&pdev->dev, "failed to get memory for gpio port %d\n", | ||
146 | pdev->id); | ||
147 | return -ENOENT; | ||
148 | } | ||
149 | res = devm_request_mem_region(&pdev->dev, res->start, | ||
150 | resource_size(res), dev_name(&pdev->dev)); | ||
151 | if (!res) { | ||
152 | dev_err(&pdev->dev, | ||
153 | "failed to request memory for gpio port %d\n", | ||
154 | pdev->id); | ||
155 | return -EBUSY; | ||
156 | } | ||
157 | ltq_gpio_port[pdev->id].membase = devm_ioremap_nocache(&pdev->dev, | ||
158 | res->start, resource_size(res)); | ||
159 | if (!ltq_gpio_port[pdev->id].membase) { | ||
160 | dev_err(&pdev->dev, "failed to remap memory for gpio port %d\n", | ||
161 | pdev->id); | ||
162 | return -ENOMEM; | ||
163 | } | ||
164 | ltq_gpio_port[pdev->id].chip.label = "ltq_gpio"; | ||
165 | ltq_gpio_port[pdev->id].chip.direction_input = ltq_gpio_direction_input; | ||
166 | ltq_gpio_port[pdev->id].chip.direction_output = | ||
167 | ltq_gpio_direction_output; | ||
168 | ltq_gpio_port[pdev->id].chip.get = ltq_gpio_get; | ||
169 | ltq_gpio_port[pdev->id].chip.set = ltq_gpio_set; | ||
170 | ltq_gpio_port[pdev->id].chip.request = ltq_gpio_req; | ||
171 | ltq_gpio_port[pdev->id].chip.base = PINS_PER_PORT * pdev->id; | ||
172 | ltq_gpio_port[pdev->id].chip.ngpio = PINS_PER_PORT; | ||
173 | platform_set_drvdata(pdev, <q_gpio_port[pdev->id]); | ||
174 | return gpiochip_add(<q_gpio_port[pdev->id].chip); | ||
175 | } | ||
176 | |||
177 | static struct platform_driver | ||
178 | ltq_gpio_driver = { | ||
179 | .probe = ltq_gpio_probe, | ||
180 | .driver = { | ||
181 | .name = "ltq_gpio", | ||
182 | .owner = THIS_MODULE, | ||
183 | }, | ||
184 | }; | ||
185 | |||
186 | int __init ltq_gpio_init(void) | ||
187 | { | ||
188 | int ret = platform_driver_register(<q_gpio_driver); | ||
189 | |||
190 | if (ret) | ||
191 | pr_info("ltq_gpio : Error registering platfom driver!"); | ||
192 | return ret; | ||
193 | } | ||
194 | |||
195 | postcore_initcall(ltq_gpio_init); | ||
diff --git a/arch/mips/lantiq/xway/gpio_ebu.c b/arch/mips/lantiq/xway/gpio_ebu.c new file mode 100644 index 000000000000..a479355abdb9 --- /dev/null +++ b/arch/mips/lantiq/xway/gpio_ebu.c | |||
@@ -0,0 +1,126 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * Copyright (C) 2010 John Crispin <blogic@openwrt.org> | ||
7 | */ | ||
8 | |||
9 | #include <linux/init.h> | ||
10 | #include <linux/module.h> | ||
11 | #include <linux/types.h> | ||
12 | #include <linux/platform_device.h> | ||
13 | #include <linux/mutex.h> | ||
14 | #include <linux/gpio.h> | ||
15 | #include <linux/io.h> | ||
16 | |||
17 | #include <lantiq_soc.h> | ||
18 | |||
19 | /* | ||
20 | * By attaching hardware latches to the EBU it is possible to create output | ||
21 | * only gpios. This driver configures a special memory address, which when | ||
22 | * written to outputs 16 bit to the latches. | ||
23 | */ | ||
24 | |||
25 | #define LTQ_EBU_BUSCON 0x1e7ff /* 16 bit access, slowest timing */ | ||
26 | #define LTQ_EBU_WP 0x80000000 /* write protect bit */ | ||
27 | |||
28 | /* we keep a shadow value of the last value written to the ebu */ | ||
29 | static int ltq_ebu_gpio_shadow = 0x0; | ||
30 | static void __iomem *ltq_ebu_gpio_membase; | ||
31 | |||
32 | static void ltq_ebu_apply(void) | ||
33 | { | ||
34 | unsigned long flags; | ||
35 | |||
36 | spin_lock_irqsave(&ebu_lock, flags); | ||
37 | ltq_ebu_w32(LTQ_EBU_BUSCON, LTQ_EBU_BUSCON1); | ||
38 | *((__u16 *)ltq_ebu_gpio_membase) = ltq_ebu_gpio_shadow; | ||
39 | ltq_ebu_w32(LTQ_EBU_BUSCON | LTQ_EBU_WP, LTQ_EBU_BUSCON1); | ||
40 | spin_unlock_irqrestore(&ebu_lock, flags); | ||
41 | } | ||
42 | |||
43 | static void ltq_ebu_set(struct gpio_chip *chip, unsigned offset, int value) | ||
44 | { | ||
45 | if (value) | ||
46 | ltq_ebu_gpio_shadow |= (1 << offset); | ||
47 | else | ||
48 | ltq_ebu_gpio_shadow &= ~(1 << offset); | ||
49 | ltq_ebu_apply(); | ||
50 | } | ||
51 | |||
52 | static int ltq_ebu_direction_output(struct gpio_chip *chip, unsigned offset, | ||
53 | int value) | ||
54 | { | ||
55 | ltq_ebu_set(chip, offset, value); | ||
56 | |||
57 | return 0; | ||
58 | } | ||
59 | |||
60 | static struct gpio_chip ltq_ebu_chip = { | ||
61 | .label = "ltq_ebu", | ||
62 | .direction_output = ltq_ebu_direction_output, | ||
63 | .set = ltq_ebu_set, | ||
64 | .base = 72, | ||
65 | .ngpio = 16, | ||
66 | .can_sleep = 1, | ||
67 | .owner = THIS_MODULE, | ||
68 | }; | ||
69 | |||
70 | static int ltq_ebu_probe(struct platform_device *pdev) | ||
71 | { | ||
72 | int ret = 0; | ||
73 | struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
74 | |||
75 | if (!res) { | ||
76 | dev_err(&pdev->dev, "failed to get memory resource\n"); | ||
77 | return -ENOENT; | ||
78 | } | ||
79 | |||
80 | res = devm_request_mem_region(&pdev->dev, res->start, | ||
81 | resource_size(res), dev_name(&pdev->dev)); | ||
82 | if (!res) { | ||
83 | dev_err(&pdev->dev, "failed to request memory resource\n"); | ||
84 | return -EBUSY; | ||
85 | } | ||
86 | |||
87 | ltq_ebu_gpio_membase = devm_ioremap_nocache(&pdev->dev, res->start, | ||
88 | resource_size(res)); | ||
89 | if (!ltq_ebu_gpio_membase) { | ||
90 | dev_err(&pdev->dev, "Failed to ioremap mem region\n"); | ||
91 | return -ENOMEM; | ||
92 | } | ||
93 | |||
94 | /* grab the default shadow value passed form the platform code */ | ||
95 | ltq_ebu_gpio_shadow = (unsigned int) pdev->dev.platform_data; | ||
96 | |||
97 | /* tell the ebu controller which memory address we will be using */ | ||
98 | ltq_ebu_w32(pdev->resource->start | 0x1, LTQ_EBU_ADDRSEL1); | ||
99 | |||
100 | /* write protect the region */ | ||
101 | ltq_ebu_w32(LTQ_EBU_BUSCON | LTQ_EBU_WP, LTQ_EBU_BUSCON1); | ||
102 | |||
103 | ret = gpiochip_add(<q_ebu_chip); | ||
104 | if (!ret) | ||
105 | ltq_ebu_apply(); | ||
106 | return ret; | ||
107 | } | ||
108 | |||
109 | static struct platform_driver ltq_ebu_driver = { | ||
110 | .probe = ltq_ebu_probe, | ||
111 | .driver = { | ||
112 | .name = "ltq_ebu", | ||
113 | .owner = THIS_MODULE, | ||
114 | }, | ||
115 | }; | ||
116 | |||
117 | static int __init ltq_ebu_init(void) | ||
118 | { | ||
119 | int ret = platform_driver_register(<q_ebu_driver); | ||
120 | |||
121 | if (ret) | ||
122 | pr_info("ltq_ebu : Error registering platfom driver!"); | ||
123 | return ret; | ||
124 | } | ||
125 | |||
126 | postcore_initcall(ltq_ebu_init); | ||
diff --git a/arch/mips/lantiq/xway/gpio_stp.c b/arch/mips/lantiq/xway/gpio_stp.c new file mode 100644 index 000000000000..67d59d690340 --- /dev/null +++ b/arch/mips/lantiq/xway/gpio_stp.c | |||
@@ -0,0 +1,157 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * Copyright (C) 2007 John Crispin <blogic@openwrt.org> | ||
7 | * | ||
8 | */ | ||
9 | |||
10 | #include <linux/slab.h> | ||
11 | #include <linux/init.h> | ||
12 | #include <linux/module.h> | ||
13 | #include <linux/types.h> | ||
14 | #include <linux/platform_device.h> | ||
15 | #include <linux/mutex.h> | ||
16 | #include <linux/io.h> | ||
17 | #include <linux/gpio.h> | ||
18 | |||
19 | #include <lantiq_soc.h> | ||
20 | |||
21 | #define LTQ_STP_CON0 0x00 | ||
22 | #define LTQ_STP_CON1 0x04 | ||
23 | #define LTQ_STP_CPU0 0x08 | ||
24 | #define LTQ_STP_CPU1 0x0C | ||
25 | #define LTQ_STP_AR 0x10 | ||
26 | |||
27 | #define LTQ_STP_CON_SWU (1 << 31) | ||
28 | #define LTQ_STP_2HZ 0 | ||
29 | #define LTQ_STP_4HZ (1 << 23) | ||
30 | #define LTQ_STP_8HZ (2 << 23) | ||
31 | #define LTQ_STP_10HZ (3 << 23) | ||
32 | #define LTQ_STP_SPEED_MASK (0xf << 23) | ||
33 | #define LTQ_STP_UPD_FPI (1 << 31) | ||
34 | #define LTQ_STP_UPD_MASK (3 << 30) | ||
35 | #define LTQ_STP_ADSL_SRC (3 << 24) | ||
36 | |||
37 | #define LTQ_STP_GROUP0 (1 << 0) | ||
38 | |||
39 | #define LTQ_STP_RISING 0 | ||
40 | #define LTQ_STP_FALLING (1 << 26) | ||
41 | #define LTQ_STP_EDGE_MASK (1 << 26) | ||
42 | |||
43 | #define ltq_stp_r32(reg) __raw_readl(ltq_stp_membase + reg) | ||
44 | #define ltq_stp_w32(val, reg) __raw_writel(val, ltq_stp_membase + reg) | ||
45 | #define ltq_stp_w32_mask(clear, set, reg) \ | ||
46 | ltq_w32((ltq_r32(ltq_stp_membase + reg) & ~(clear)) | (set), \ | ||
47 | ltq_stp_membase + (reg)) | ||
48 | |||
49 | static int ltq_stp_shadow = 0xffff; | ||
50 | static void __iomem *ltq_stp_membase; | ||
51 | |||
52 | static void ltq_stp_set(struct gpio_chip *chip, unsigned offset, int value) | ||
53 | { | ||
54 | if (value) | ||
55 | ltq_stp_shadow |= (1 << offset); | ||
56 | else | ||
57 | ltq_stp_shadow &= ~(1 << offset); | ||
58 | ltq_stp_w32(ltq_stp_shadow, LTQ_STP_CPU0); | ||
59 | } | ||
60 | |||
61 | static int ltq_stp_direction_output(struct gpio_chip *chip, unsigned offset, | ||
62 | int value) | ||
63 | { | ||
64 | ltq_stp_set(chip, offset, value); | ||
65 | |||
66 | return 0; | ||
67 | } | ||
68 | |||
69 | static struct gpio_chip ltq_stp_chip = { | ||
70 | .label = "ltq_stp", | ||
71 | .direction_output = ltq_stp_direction_output, | ||
72 | .set = ltq_stp_set, | ||
73 | .base = 48, | ||
74 | .ngpio = 24, | ||
75 | .can_sleep = 1, | ||
76 | .owner = THIS_MODULE, | ||
77 | }; | ||
78 | |||
79 | static int ltq_stp_hw_init(void) | ||
80 | { | ||
81 | /* the 3 pins used to control the external stp */ | ||
82 | ltq_gpio_request(4, 1, 0, 1, "stp-st"); | ||
83 | ltq_gpio_request(5, 1, 0, 1, "stp-d"); | ||
84 | ltq_gpio_request(6, 1, 0, 1, "stp-sh"); | ||
85 | |||
86 | /* sane defaults */ | ||
87 | ltq_stp_w32(0, LTQ_STP_AR); | ||
88 | ltq_stp_w32(0, LTQ_STP_CPU0); | ||
89 | ltq_stp_w32(0, LTQ_STP_CPU1); | ||
90 | ltq_stp_w32(LTQ_STP_CON_SWU, LTQ_STP_CON0); | ||
91 | ltq_stp_w32(0, LTQ_STP_CON1); | ||
92 | |||
93 | /* rising or falling edge */ | ||
94 | ltq_stp_w32_mask(LTQ_STP_EDGE_MASK, LTQ_STP_FALLING, LTQ_STP_CON0); | ||
95 | |||
96 | /* per default stp 15-0 are set */ | ||
97 | ltq_stp_w32_mask(0, LTQ_STP_GROUP0, LTQ_STP_CON1); | ||
98 | |||
99 | /* stp are update periodically by the FPI bus */ | ||
100 | ltq_stp_w32_mask(LTQ_STP_UPD_MASK, LTQ_STP_UPD_FPI, LTQ_STP_CON1); | ||
101 | |||
102 | /* set stp update speed */ | ||
103 | ltq_stp_w32_mask(LTQ_STP_SPEED_MASK, LTQ_STP_8HZ, LTQ_STP_CON1); | ||
104 | |||
105 | /* tell the hardware that pin (led) 0 and 1 are controlled | ||
106 | * by the dsl arc | ||
107 | */ | ||
108 | ltq_stp_w32_mask(0, LTQ_STP_ADSL_SRC, LTQ_STP_CON0); | ||
109 | |||
110 | ltq_pmu_enable(PMU_LED); | ||
111 | return 0; | ||
112 | } | ||
113 | |||
114 | static int __devinit ltq_stp_probe(struct platform_device *pdev) | ||
115 | { | ||
116 | struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
117 | int ret = 0; | ||
118 | |||
119 | if (!res) | ||
120 | return -ENOENT; | ||
121 | res = devm_request_mem_region(&pdev->dev, res->start, | ||
122 | resource_size(res), dev_name(&pdev->dev)); | ||
123 | if (!res) { | ||
124 | dev_err(&pdev->dev, "failed to request STP memory\n"); | ||
125 | return -EBUSY; | ||
126 | } | ||
127 | ltq_stp_membase = devm_ioremap_nocache(&pdev->dev, res->start, | ||
128 | resource_size(res)); | ||
129 | if (!ltq_stp_membase) { | ||
130 | dev_err(&pdev->dev, "failed to remap STP memory\n"); | ||
131 | return -ENOMEM; | ||
132 | } | ||
133 | ret = gpiochip_add(<q_stp_chip); | ||
134 | if (!ret) | ||
135 | ret = ltq_stp_hw_init(); | ||
136 | |||
137 | return ret; | ||
138 | } | ||
139 | |||
140 | static struct platform_driver ltq_stp_driver = { | ||
141 | .probe = ltq_stp_probe, | ||
142 | .driver = { | ||
143 | .name = "ltq_stp", | ||
144 | .owner = THIS_MODULE, | ||
145 | }, | ||
146 | }; | ||
147 | |||
148 | int __init ltq_stp_init(void) | ||
149 | { | ||
150 | int ret = platform_driver_register(<q_stp_driver); | ||
151 | |||
152 | if (ret) | ||
153 | pr_info("ltq_stp: error registering platfom driver"); | ||
154 | return ret; | ||
155 | } | ||
156 | |||
157 | postcore_initcall(ltq_stp_init); | ||
diff --git a/arch/mips/lantiq/xway/mach-easy50601.c b/arch/mips/lantiq/xway/mach-easy50601.c new file mode 100644 index 000000000000..d5aaf637ab19 --- /dev/null +++ b/arch/mips/lantiq/xway/mach-easy50601.c | |||
@@ -0,0 +1,57 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * Copyright (C) 2010 John Crispin <blogic@openwrt.org> | ||
7 | */ | ||
8 | |||
9 | #include <linux/init.h> | ||
10 | #include <linux/platform_device.h> | ||
11 | #include <linux/mtd/mtd.h> | ||
12 | #include <linux/mtd/partitions.h> | ||
13 | #include <linux/mtd/physmap.h> | ||
14 | #include <linux/input.h> | ||
15 | |||
16 | #include <lantiq.h> | ||
17 | |||
18 | #include "../machtypes.h" | ||
19 | #include "devices.h" | ||
20 | |||
21 | static struct mtd_partition easy50601_partitions[] = { | ||
22 | { | ||
23 | .name = "uboot", | ||
24 | .offset = 0x0, | ||
25 | .size = 0x10000, | ||
26 | }, | ||
27 | { | ||
28 | .name = "uboot_env", | ||
29 | .offset = 0x10000, | ||
30 | .size = 0x10000, | ||
31 | }, | ||
32 | { | ||
33 | .name = "linux", | ||
34 | .offset = 0x20000, | ||
35 | .size = 0xE0000, | ||
36 | }, | ||
37 | { | ||
38 | .name = "rootfs", | ||
39 | .offset = 0x100000, | ||
40 | .size = 0x300000, | ||
41 | }, | ||
42 | }; | ||
43 | |||
44 | static struct physmap_flash_data easy50601_flash_data = { | ||
45 | .nr_parts = ARRAY_SIZE(easy50601_partitions), | ||
46 | .parts = easy50601_partitions, | ||
47 | }; | ||
48 | |||
49 | static void __init easy50601_init(void) | ||
50 | { | ||
51 | ltq_register_nor(&easy50601_flash_data); | ||
52 | } | ||
53 | |||
54 | MIPS_MACHINE(LTQ_MACH_EASY50601, | ||
55 | "EASY50601", | ||
56 | "EASY50601 Eval Board", | ||
57 | easy50601_init); | ||
diff --git a/arch/mips/lantiq/xway/mach-easy50712.c b/arch/mips/lantiq/xway/mach-easy50712.c new file mode 100644 index 000000000000..ea5027b3239d --- /dev/null +++ b/arch/mips/lantiq/xway/mach-easy50712.c | |||
@@ -0,0 +1,74 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * Copyright (C) 2010 John Crispin <blogic@openwrt.org> | ||
7 | */ | ||
8 | |||
9 | #include <linux/init.h> | ||
10 | #include <linux/platform_device.h> | ||
11 | #include <linux/mtd/mtd.h> | ||
12 | #include <linux/mtd/partitions.h> | ||
13 | #include <linux/mtd/physmap.h> | ||
14 | #include <linux/input.h> | ||
15 | #include <linux/phy.h> | ||
16 | |||
17 | #include <lantiq_soc.h> | ||
18 | #include <irq.h> | ||
19 | |||
20 | #include "../machtypes.h" | ||
21 | #include "devices.h" | ||
22 | |||
23 | static struct mtd_partition easy50712_partitions[] = { | ||
24 | { | ||
25 | .name = "uboot", | ||
26 | .offset = 0x0, | ||
27 | .size = 0x10000, | ||
28 | }, | ||
29 | { | ||
30 | .name = "uboot_env", | ||
31 | .offset = 0x10000, | ||
32 | .size = 0x10000, | ||
33 | }, | ||
34 | { | ||
35 | .name = "linux", | ||
36 | .offset = 0x20000, | ||
37 | .size = 0xe0000, | ||
38 | }, | ||
39 | { | ||
40 | .name = "rootfs", | ||
41 | .offset = 0x100000, | ||
42 | .size = 0x300000, | ||
43 | }, | ||
44 | }; | ||
45 | |||
46 | static struct physmap_flash_data easy50712_flash_data = { | ||
47 | .nr_parts = ARRAY_SIZE(easy50712_partitions), | ||
48 | .parts = easy50712_partitions, | ||
49 | }; | ||
50 | |||
51 | static struct ltq_pci_data ltq_pci_data = { | ||
52 | .clock = PCI_CLOCK_INT, | ||
53 | .gpio = PCI_GNT1 | PCI_REQ1, | ||
54 | .irq = { | ||
55 | [14] = INT_NUM_IM0_IRL0 + 22, | ||
56 | }, | ||
57 | }; | ||
58 | |||
59 | static struct ltq_eth_data ltq_eth_data = { | ||
60 | .mii_mode = PHY_INTERFACE_MODE_MII, | ||
61 | }; | ||
62 | |||
63 | static void __init easy50712_init(void) | ||
64 | { | ||
65 | ltq_register_gpio_stp(); | ||
66 | ltq_register_nor(&easy50712_flash_data); | ||
67 | ltq_register_pci(<q_pci_data); | ||
68 | ltq_register_etop(<q_eth_data); | ||
69 | } | ||
70 | |||
71 | MIPS_MACHINE(LTQ_MACH_EASY50712, | ||
72 | "EASY50712", | ||
73 | "EASY50712 Eval Board", | ||
74 | easy50712_init); | ||
diff --git a/arch/mips/lantiq/xway/pmu.c b/arch/mips/lantiq/xway/pmu.c new file mode 100644 index 000000000000..9d69f01e352b --- /dev/null +++ b/arch/mips/lantiq/xway/pmu.c | |||
@@ -0,0 +1,70 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * Copyright (C) 2010 John Crispin <blogic@openwrt.org> | ||
7 | */ | ||
8 | |||
9 | #include <linux/kernel.h> | ||
10 | #include <linux/module.h> | ||
11 | #include <linux/version.h> | ||
12 | #include <linux/ioport.h> | ||
13 | |||
14 | #include <lantiq_soc.h> | ||
15 | |||
16 | /* PMU - the power management unit allows us to turn part of the core | ||
17 | * on and off | ||
18 | */ | ||
19 | |||
20 | /* the enable / disable registers */ | ||
21 | #define LTQ_PMU_PWDCR 0x1C | ||
22 | #define LTQ_PMU_PWDSR 0x20 | ||
23 | |||
24 | #define ltq_pmu_w32(x, y) ltq_w32((x), ltq_pmu_membase + (y)) | ||
25 | #define ltq_pmu_r32(x) ltq_r32(ltq_pmu_membase + (x)) | ||
26 | |||
27 | static struct resource ltq_pmu_resource = { | ||
28 | .name = "pmu", | ||
29 | .start = LTQ_PMU_BASE_ADDR, | ||
30 | .end = LTQ_PMU_BASE_ADDR + LTQ_PMU_SIZE - 1, | ||
31 | .flags = IORESOURCE_MEM, | ||
32 | }; | ||
33 | |||
34 | static void __iomem *ltq_pmu_membase; | ||
35 | |||
36 | void ltq_pmu_enable(unsigned int module) | ||
37 | { | ||
38 | int err = 1000000; | ||
39 | |||
40 | ltq_pmu_w32(ltq_pmu_r32(LTQ_PMU_PWDCR) & ~module, LTQ_PMU_PWDCR); | ||
41 | do {} while (--err && (ltq_pmu_r32(LTQ_PMU_PWDSR) & module)); | ||
42 | |||
43 | if (!err) | ||
44 | panic("activating PMU module failed!\n"); | ||
45 | } | ||
46 | EXPORT_SYMBOL(ltq_pmu_enable); | ||
47 | |||
48 | void ltq_pmu_disable(unsigned int module) | ||
49 | { | ||
50 | ltq_pmu_w32(ltq_pmu_r32(LTQ_PMU_PWDCR) | module, LTQ_PMU_PWDCR); | ||
51 | } | ||
52 | EXPORT_SYMBOL(ltq_pmu_disable); | ||
53 | |||
54 | int __init ltq_pmu_init(void) | ||
55 | { | ||
56 | if (insert_resource(&iomem_resource, <q_pmu_resource) < 0) | ||
57 | panic("Failed to insert pmu memory\n"); | ||
58 | |||
59 | if (request_mem_region(ltq_pmu_resource.start, | ||
60 | resource_size(<q_pmu_resource), "pmu") < 0) | ||
61 | panic("Failed to request pmu memory\n"); | ||
62 | |||
63 | ltq_pmu_membase = ioremap_nocache(ltq_pmu_resource.start, | ||
64 | resource_size(<q_pmu_resource)); | ||
65 | if (!ltq_pmu_membase) | ||
66 | panic("Failed to remap pmu memory\n"); | ||
67 | return 0; | ||
68 | } | ||
69 | |||
70 | core_initcall(ltq_pmu_init); | ||
diff --git a/arch/mips/lantiq/xway/prom-ase.c b/arch/mips/lantiq/xway/prom-ase.c new file mode 100644 index 000000000000..abe49f4db57f --- /dev/null +++ b/arch/mips/lantiq/xway/prom-ase.c | |||
@@ -0,0 +1,39 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * Copyright (C) 2010 John Crispin <blogic@openwrt.org> | ||
7 | */ | ||
8 | |||
9 | #include <linux/module.h> | ||
10 | #include <linux/clk.h> | ||
11 | #include <asm/bootinfo.h> | ||
12 | #include <asm/time.h> | ||
13 | |||
14 | #include <lantiq_soc.h> | ||
15 | |||
16 | #include "../prom.h" | ||
17 | |||
18 | #define SOC_AMAZON_SE "Amazon_SE" | ||
19 | |||
20 | #define PART_SHIFT 12 | ||
21 | #define PART_MASK 0x0FFFFFFF | ||
22 | #define REV_SHIFT 28 | ||
23 | #define REV_MASK 0xF0000000 | ||
24 | |||
25 | void __init ltq_soc_detect(struct ltq_soc_info *i) | ||
26 | { | ||
27 | i->partnum = (ltq_r32(LTQ_MPS_CHIPID) & PART_MASK) >> PART_SHIFT; | ||
28 | i->rev = (ltq_r32(LTQ_MPS_CHIPID) & REV_MASK) >> REV_SHIFT; | ||
29 | switch (i->partnum) { | ||
30 | case SOC_ID_AMAZON_SE: | ||
31 | i->name = SOC_AMAZON_SE; | ||
32 | i->type = SOC_TYPE_AMAZON_SE; | ||
33 | break; | ||
34 | |||
35 | default: | ||
36 | unreachable(); | ||
37 | break; | ||
38 | } | ||
39 | } | ||
diff --git a/arch/mips/lantiq/xway/prom-xway.c b/arch/mips/lantiq/xway/prom-xway.c new file mode 100644 index 000000000000..1686692ac24d --- /dev/null +++ b/arch/mips/lantiq/xway/prom-xway.c | |||
@@ -0,0 +1,54 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * Copyright (C) 2010 John Crispin <blogic@openwrt.org> | ||
7 | */ | ||
8 | |||
9 | #include <linux/module.h> | ||
10 | #include <linux/clk.h> | ||
11 | #include <asm/bootinfo.h> | ||
12 | #include <asm/time.h> | ||
13 | |||
14 | #include <lantiq_soc.h> | ||
15 | |||
16 | #include "../prom.h" | ||
17 | |||
18 | #define SOC_DANUBE "Danube" | ||
19 | #define SOC_TWINPASS "Twinpass" | ||
20 | #define SOC_AR9 "AR9" | ||
21 | |||
22 | #define PART_SHIFT 12 | ||
23 | #define PART_MASK 0x0FFFFFFF | ||
24 | #define REV_SHIFT 28 | ||
25 | #define REV_MASK 0xF0000000 | ||
26 | |||
27 | void __init ltq_soc_detect(struct ltq_soc_info *i) | ||
28 | { | ||
29 | i->partnum = (ltq_r32(LTQ_MPS_CHIPID) & PART_MASK) >> PART_SHIFT; | ||
30 | i->rev = (ltq_r32(LTQ_MPS_CHIPID) & REV_MASK) >> REV_SHIFT; | ||
31 | switch (i->partnum) { | ||
32 | case SOC_ID_DANUBE1: | ||
33 | case SOC_ID_DANUBE2: | ||
34 | i->name = SOC_DANUBE; | ||
35 | i->type = SOC_TYPE_DANUBE; | ||
36 | break; | ||
37 | |||
38 | case SOC_ID_TWINPASS: | ||
39 | i->name = SOC_TWINPASS; | ||
40 | i->type = SOC_TYPE_DANUBE; | ||
41 | break; | ||
42 | |||
43 | case SOC_ID_ARX188: | ||
44 | case SOC_ID_ARX168: | ||
45 | case SOC_ID_ARX182: | ||
46 | i->name = SOC_AR9; | ||
47 | i->type = SOC_TYPE_AR9; | ||
48 | break; | ||
49 | |||
50 | default: | ||
51 | unreachable(); | ||
52 | break; | ||
53 | } | ||
54 | } | ||
diff --git a/arch/mips/lantiq/xway/reset.c b/arch/mips/lantiq/xway/reset.c new file mode 100644 index 000000000000..a1be36d0e490 --- /dev/null +++ b/arch/mips/lantiq/xway/reset.c | |||
@@ -0,0 +1,91 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * Copyright (C) 2010 John Crispin <blogic@openwrt.org> | ||
7 | */ | ||
8 | |||
9 | #include <linux/init.h> | ||
10 | #include <linux/io.h> | ||
11 | #include <linux/ioport.h> | ||
12 | #include <linux/pm.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <asm/reboot.h> | ||
15 | |||
16 | #include <lantiq_soc.h> | ||
17 | |||
18 | #define ltq_rcu_w32(x, y) ltq_w32((x), ltq_rcu_membase + (y)) | ||
19 | #define ltq_rcu_r32(x) ltq_r32(ltq_rcu_membase + (x)) | ||
20 | |||
21 | /* register definitions */ | ||
22 | #define LTQ_RCU_RST 0x0010 | ||
23 | #define LTQ_RCU_RST_ALL 0x40000000 | ||
24 | |||
25 | #define LTQ_RCU_RST_STAT 0x0014 | ||
26 | #define LTQ_RCU_STAT_SHIFT 26 | ||
27 | |||
28 | static struct resource ltq_rcu_resource = { | ||
29 | .name = "rcu", | ||
30 | .start = LTQ_RCU_BASE_ADDR, | ||
31 | .end = LTQ_RCU_BASE_ADDR + LTQ_RCU_SIZE - 1, | ||
32 | .flags = IORESOURCE_MEM, | ||
33 | }; | ||
34 | |||
35 | /* remapped base addr of the reset control unit */ | ||
36 | static void __iomem *ltq_rcu_membase; | ||
37 | |||
38 | /* This function is used by the watchdog driver */ | ||
39 | int ltq_reset_cause(void) | ||
40 | { | ||
41 | u32 val = ltq_rcu_r32(LTQ_RCU_RST_STAT); | ||
42 | return val >> LTQ_RCU_STAT_SHIFT; | ||
43 | } | ||
44 | EXPORT_SYMBOL_GPL(ltq_reset_cause); | ||
45 | |||
46 | static void ltq_machine_restart(char *command) | ||
47 | { | ||
48 | pr_notice("System restart\n"); | ||
49 | local_irq_disable(); | ||
50 | ltq_rcu_w32(ltq_rcu_r32(LTQ_RCU_RST) | LTQ_RCU_RST_ALL, LTQ_RCU_RST); | ||
51 | unreachable(); | ||
52 | } | ||
53 | |||
54 | static void ltq_machine_halt(void) | ||
55 | { | ||
56 | pr_notice("System halted.\n"); | ||
57 | local_irq_disable(); | ||
58 | unreachable(); | ||
59 | } | ||
60 | |||
61 | static void ltq_machine_power_off(void) | ||
62 | { | ||
63 | pr_notice("Please turn off the power now.\n"); | ||
64 | local_irq_disable(); | ||
65 | unreachable(); | ||
66 | } | ||
67 | |||
68 | static int __init mips_reboot_setup(void) | ||
69 | { | ||
70 | /* insert and request the memory region */ | ||
71 | if (insert_resource(&iomem_resource, <q_rcu_resource) < 0) | ||
72 | panic("Failed to insert rcu memory\n"); | ||
73 | |||
74 | if (request_mem_region(ltq_rcu_resource.start, | ||
75 | resource_size(<q_rcu_resource), "rcu") < 0) | ||
76 | panic("Failed to request rcu memory\n"); | ||
77 | |||
78 | /* remap rcu register range */ | ||
79 | ltq_rcu_membase = ioremap_nocache(ltq_rcu_resource.start, | ||
80 | resource_size(<q_rcu_resource)); | ||
81 | if (!ltq_rcu_membase) | ||
82 | panic("Failed to remap rcu memory\n"); | ||
83 | |||
84 | _machine_restart = ltq_machine_restart; | ||
85 | _machine_halt = ltq_machine_halt; | ||
86 | pm_power_off = ltq_machine_power_off; | ||
87 | |||
88 | return 0; | ||
89 | } | ||
90 | |||
91 | arch_initcall(mips_reboot_setup); | ||
diff --git a/arch/mips/lantiq/xway/setup-ase.c b/arch/mips/lantiq/xway/setup-ase.c new file mode 100644 index 000000000000..f6f326798a39 --- /dev/null +++ b/arch/mips/lantiq/xway/setup-ase.c | |||
@@ -0,0 +1,19 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * Copyright (C) 2011 John Crispin <blogic@openwrt.org> | ||
7 | */ | ||
8 | |||
9 | #include <lantiq_soc.h> | ||
10 | |||
11 | #include "../prom.h" | ||
12 | #include "devices.h" | ||
13 | |||
14 | void __init ltq_soc_setup(void) | ||
15 | { | ||
16 | ltq_register_ase_asc(); | ||
17 | ltq_register_gpio(); | ||
18 | ltq_register_wdt(); | ||
19 | } | ||
diff --git a/arch/mips/lantiq/xway/setup-xway.c b/arch/mips/lantiq/xway/setup-xway.c new file mode 100644 index 000000000000..c292f643a858 --- /dev/null +++ b/arch/mips/lantiq/xway/setup-xway.c | |||
@@ -0,0 +1,20 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * Copyright (C) 2011 John Crispin <blogic@openwrt.org> | ||
7 | */ | ||
8 | |||
9 | #include <lantiq_soc.h> | ||
10 | |||
11 | #include "../prom.h" | ||
12 | #include "devices.h" | ||
13 | |||
14 | void __init ltq_soc_setup(void) | ||
15 | { | ||
16 | ltq_register_asc(0); | ||
17 | ltq_register_asc(1); | ||
18 | ltq_register_gpio(); | ||
19 | ltq_register_wdt(); | ||
20 | } | ||
diff --git a/arch/mips/lib/Makefile b/arch/mips/lib/Makefile index 2adead5a8a37..b2cad4fd5fc4 100644 --- a/arch/mips/lib/Makefile +++ b/arch/mips/lib/Makefile | |||
@@ -28,6 +28,7 @@ obj-$(CONFIG_CPU_TX39XX) += r3k_dump_tlb.o | |||
28 | obj-$(CONFIG_CPU_TX49XX) += dump_tlb.o | 28 | obj-$(CONFIG_CPU_TX49XX) += dump_tlb.o |
29 | obj-$(CONFIG_CPU_VR41XX) += dump_tlb.o | 29 | obj-$(CONFIG_CPU_VR41XX) += dump_tlb.o |
30 | obj-$(CONFIG_CPU_CAVIUM_OCTEON) += dump_tlb.o | 30 | obj-$(CONFIG_CPU_CAVIUM_OCTEON) += dump_tlb.o |
31 | obj-$(CONFIG_CPU_XLR) += dump_tlb.o | ||
31 | 32 | ||
32 | # libgcc-style stuff needed in the kernel | 33 | # libgcc-style stuff needed in the kernel |
33 | obj-y += ashldi3.o ashrdi3.o cmpdi2.o lshrdi3.o ucmpdi2.o | 34 | obj-y += ashldi3.o ashrdi3.o cmpdi2.o lshrdi3.o ucmpdi2.o |
diff --git a/arch/mips/loongson/common/env.c b/arch/mips/loongson/common/env.c index 11b193f848f8..d93830ad6113 100644 --- a/arch/mips/loongson/common/env.c +++ b/arch/mips/loongson/common/env.c | |||
@@ -29,9 +29,10 @@ unsigned long memsize, highmemsize; | |||
29 | 29 | ||
30 | #define parse_even_earlier(res, option, p) \ | 30 | #define parse_even_earlier(res, option, p) \ |
31 | do { \ | 31 | do { \ |
32 | int ret; \ | 32 | unsigned int tmp __maybe_unused; \ |
33 | \ | ||
33 | if (strncmp(option, (char *)p, strlen(option)) == 0) \ | 34 | if (strncmp(option, (char *)p, strlen(option)) == 0) \ |
34 | ret = strict_strtol((char *)p + strlen(option"="), 10, &res); \ | 35 | tmp = strict_strtol((char *)p + strlen(option"="), 10, &res); \ |
35 | } while (0) | 36 | } while (0) |
36 | 37 | ||
37 | void __init prom_init_env(void) | 38 | void __init prom_init_env(void) |
diff --git a/arch/mips/mm/Makefile b/arch/mips/mm/Makefile index d679c772d082..4d8c1623eee2 100644 --- a/arch/mips/mm/Makefile +++ b/arch/mips/mm/Makefile | |||
@@ -3,7 +3,8 @@ | |||
3 | # | 3 | # |
4 | 4 | ||
5 | obj-y += cache.o dma-default.o extable.o fault.o \ | 5 | obj-y += cache.o dma-default.o extable.o fault.o \ |
6 | init.o tlbex.o tlbex-fault.o uasm.o page.o | 6 | init.o mmap.o tlbex.o tlbex-fault.o uasm.o \ |
7 | page.o | ||
7 | 8 | ||
8 | obj-$(CONFIG_32BIT) += ioremap.o pgtable-32.o | 9 | obj-$(CONFIG_32BIT) += ioremap.o pgtable-32.o |
9 | obj-$(CONFIG_64BIT) += pgtable-64.o | 10 | obj-$(CONFIG_64BIT) += pgtable-64.o |
@@ -29,6 +30,7 @@ obj-$(CONFIG_CPU_TX39XX) += c-tx39.o tlb-r3k.o | |||
29 | obj-$(CONFIG_CPU_TX49XX) += c-r4k.o cex-gen.o tlb-r4k.o | 30 | obj-$(CONFIG_CPU_TX49XX) += c-r4k.o cex-gen.o tlb-r4k.o |
30 | obj-$(CONFIG_CPU_VR41XX) += c-r4k.o cex-gen.o tlb-r4k.o | 31 | obj-$(CONFIG_CPU_VR41XX) += c-r4k.o cex-gen.o tlb-r4k.o |
31 | obj-$(CONFIG_CPU_CAVIUM_OCTEON) += c-octeon.o cex-oct.o tlb-r4k.o | 32 | obj-$(CONFIG_CPU_CAVIUM_OCTEON) += c-octeon.o cex-oct.o tlb-r4k.o |
33 | obj-$(CONFIG_CPU_XLR) += c-r4k.o tlb-r4k.o cex-gen.o | ||
32 | 34 | ||
33 | obj-$(CONFIG_IP22_CPU_SCACHE) += sc-ip22.o | 35 | obj-$(CONFIG_IP22_CPU_SCACHE) += sc-ip22.o |
34 | obj-$(CONFIG_R5000_CPU_SCACHE) += sc-r5k.o | 36 | obj-$(CONFIG_R5000_CPU_SCACHE) += sc-r5k.o |
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index b4923a75cb4b..d9bc5d3593b6 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c | |||
@@ -1006,6 +1006,7 @@ static void __cpuinit probe_pcache(void) | |||
1006 | case CPU_25KF: | 1006 | case CPU_25KF: |
1007 | case CPU_SB1: | 1007 | case CPU_SB1: |
1008 | case CPU_SB1A: | 1008 | case CPU_SB1A: |
1009 | case CPU_XLR: | ||
1009 | c->dcache.flags |= MIPS_CACHE_PINDEX; | 1010 | c->dcache.flags |= MIPS_CACHE_PINDEX; |
1010 | break; | 1011 | break; |
1011 | 1012 | ||
@@ -1075,7 +1076,6 @@ static int __cpuinit probe_scache(void) | |||
1075 | unsigned long flags, addr, begin, end, pow2; | 1076 | unsigned long flags, addr, begin, end, pow2; |
1076 | unsigned int config = read_c0_config(); | 1077 | unsigned int config = read_c0_config(); |
1077 | struct cpuinfo_mips *c = ¤t_cpu_data; | 1078 | struct cpuinfo_mips *c = ¤t_cpu_data; |
1078 | int tmp; | ||
1079 | 1079 | ||
1080 | if (config & CONF_SC) | 1080 | if (config & CONF_SC) |
1081 | return 0; | 1081 | return 0; |
@@ -1108,7 +1108,6 @@ static int __cpuinit probe_scache(void) | |||
1108 | 1108 | ||
1109 | /* Now search for the wrap around point. */ | 1109 | /* Now search for the wrap around point. */ |
1110 | pow2 = (128 * 1024); | 1110 | pow2 = (128 * 1024); |
1111 | tmp = 0; | ||
1112 | for (addr = begin + (128 * 1024); addr < end; addr = begin + pow2) { | 1111 | for (addr = begin + (128 * 1024); addr < end; addr = begin + pow2) { |
1113 | cache_op(Index_Load_Tag_SD, addr); | 1112 | cache_op(Index_Load_Tag_SD, addr); |
1114 | __asm__ __volatile__("nop; nop; nop; nop;"); /* hazard... */ | 1113 | __asm__ __volatile__("nop; nop; nop; nop;"); /* hazard... */ |
diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c new file mode 100644 index 000000000000..ae3c20a9556e --- /dev/null +++ b/arch/mips/mm/mmap.c | |||
@@ -0,0 +1,122 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2011 Wind River Systems, | ||
7 | * written by Ralf Baechle <ralf@linux-mips.org> | ||
8 | */ | ||
9 | #include <linux/errno.h> | ||
10 | #include <linux/mm.h> | ||
11 | #include <linux/mman.h> | ||
12 | #include <linux/module.h> | ||
13 | #include <linux/random.h> | ||
14 | #include <linux/sched.h> | ||
15 | |||
16 | unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */ | ||
17 | |||
18 | EXPORT_SYMBOL(shm_align_mask); | ||
19 | |||
20 | #define COLOUR_ALIGN(addr,pgoff) \ | ||
21 | ((((addr) + shm_align_mask) & ~shm_align_mask) + \ | ||
22 | (((pgoff) << PAGE_SHIFT) & shm_align_mask)) | ||
23 | |||
24 | unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, | ||
25 | unsigned long len, unsigned long pgoff, unsigned long flags) | ||
26 | { | ||
27 | struct vm_area_struct * vmm; | ||
28 | int do_color_align; | ||
29 | |||
30 | if (len > TASK_SIZE) | ||
31 | return -ENOMEM; | ||
32 | |||
33 | if (flags & MAP_FIXED) { | ||
34 | /* Even MAP_FIXED mappings must reside within TASK_SIZE. */ | ||
35 | if (TASK_SIZE - len < addr) | ||
36 | return -EINVAL; | ||
37 | |||
38 | /* | ||
39 | * We do not accept a shared mapping if it would violate | ||
40 | * cache aliasing constraints. | ||
41 | */ | ||
42 | if ((flags & MAP_SHARED) && | ||
43 | ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask)) | ||
44 | return -EINVAL; | ||
45 | return addr; | ||
46 | } | ||
47 | |||
48 | do_color_align = 0; | ||
49 | if (filp || (flags & MAP_SHARED)) | ||
50 | do_color_align = 1; | ||
51 | if (addr) { | ||
52 | if (do_color_align) | ||
53 | addr = COLOUR_ALIGN(addr, pgoff); | ||
54 | else | ||
55 | addr = PAGE_ALIGN(addr); | ||
56 | vmm = find_vma(current->mm, addr); | ||
57 | if (TASK_SIZE - len >= addr && | ||
58 | (!vmm || addr + len <= vmm->vm_start)) | ||
59 | return addr; | ||
60 | } | ||
61 | addr = current->mm->mmap_base; | ||
62 | if (do_color_align) | ||
63 | addr = COLOUR_ALIGN(addr, pgoff); | ||
64 | else | ||
65 | addr = PAGE_ALIGN(addr); | ||
66 | |||
67 | for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) { | ||
68 | /* At this point: (!vmm || addr < vmm->vm_end). */ | ||
69 | if (TASK_SIZE - len < addr) | ||
70 | return -ENOMEM; | ||
71 | if (!vmm || addr + len <= vmm->vm_start) | ||
72 | return addr; | ||
73 | addr = vmm->vm_end; | ||
74 | if (do_color_align) | ||
75 | addr = COLOUR_ALIGN(addr, pgoff); | ||
76 | } | ||
77 | } | ||
78 | |||
79 | void arch_pick_mmap_layout(struct mm_struct *mm) | ||
80 | { | ||
81 | unsigned long random_factor = 0UL; | ||
82 | |||
83 | if (current->flags & PF_RANDOMIZE) { | ||
84 | random_factor = get_random_int(); | ||
85 | random_factor = random_factor << PAGE_SHIFT; | ||
86 | if (TASK_IS_32BIT_ADDR) | ||
87 | random_factor &= 0xfffffful; | ||
88 | else | ||
89 | random_factor &= 0xffffffful; | ||
90 | } | ||
91 | |||
92 | mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; | ||
93 | mm->get_unmapped_area = arch_get_unmapped_area; | ||
94 | mm->unmap_area = arch_unmap_area; | ||
95 | } | ||
96 | |||
97 | static inline unsigned long brk_rnd(void) | ||
98 | { | ||
99 | unsigned long rnd = get_random_int(); | ||
100 | |||
101 | rnd = rnd << PAGE_SHIFT; | ||
102 | /* 8MB for 32bit, 256MB for 64bit */ | ||
103 | if (TASK_IS_32BIT_ADDR) | ||
104 | rnd = rnd & 0x7ffffful; | ||
105 | else | ||
106 | rnd = rnd & 0xffffffful; | ||
107 | |||
108 | return rnd; | ||
109 | } | ||
110 | |||
111 | unsigned long arch_randomize_brk(struct mm_struct *mm) | ||
112 | { | ||
113 | unsigned long base = mm->brk; | ||
114 | unsigned long ret; | ||
115 | |||
116 | ret = PAGE_ALIGN(base + brk_rnd()); | ||
117 | |||
118 | if (ret < mm->brk) | ||
119 | return mm->brk; | ||
120 | |||
121 | return ret; | ||
122 | } | ||
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 5ef294fbb6e7..424ed4b92e6d 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c | |||
@@ -404,6 +404,7 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l, | |||
404 | case CPU_5KC: | 404 | case CPU_5KC: |
405 | case CPU_TX49XX: | 405 | case CPU_TX49XX: |
406 | case CPU_PR4450: | 406 | case CPU_PR4450: |
407 | case CPU_XLR: | ||
407 | uasm_i_nop(p); | 408 | uasm_i_nop(p); |
408 | tlbw(p); | 409 | tlbw(p); |
409 | break; | 410 | break; |
@@ -1151,8 +1152,8 @@ static void __cpuinit build_r4000_tlb_refill_handler(void) | |||
1151 | struct uasm_reloc *r = relocs; | 1152 | struct uasm_reloc *r = relocs; |
1152 | u32 *f; | 1153 | u32 *f; |
1153 | unsigned int final_len; | 1154 | unsigned int final_len; |
1154 | struct mips_huge_tlb_info htlb_info; | 1155 | struct mips_huge_tlb_info htlb_info __maybe_unused; |
1155 | enum vmalloc64_mode vmalloc_mode; | 1156 | enum vmalloc64_mode vmalloc_mode __maybe_unused; |
1156 | 1157 | ||
1157 | memset(tlb_handler, 0, sizeof(tlb_handler)); | 1158 | memset(tlb_handler, 0, sizeof(tlb_handler)); |
1158 | memset(labels, 0, sizeof(labels)); | 1159 | memset(labels, 0, sizeof(labels)); |
diff --git a/arch/mips/mti-malta/malta-init.c b/arch/mips/mti-malta/malta-init.c index 414f0c99b196..31180c321a1a 100644 --- a/arch/mips/mti-malta/malta-init.c +++ b/arch/mips/mti-malta/malta-init.c | |||
@@ -193,8 +193,6 @@ extern struct plat_smp_ops msmtc_smp_ops; | |||
193 | 193 | ||
194 | void __init prom_init(void) | 194 | void __init prom_init(void) |
195 | { | 195 | { |
196 | int result; | ||
197 | |||
198 | prom_argc = fw_arg0; | 196 | prom_argc = fw_arg0; |
199 | _prom_argv = (int *) fw_arg1; | 197 | _prom_argv = (int *) fw_arg1; |
200 | _prom_envp = (int *) fw_arg2; | 198 | _prom_envp = (int *) fw_arg2; |
@@ -360,20 +358,14 @@ void __init prom_init(void) | |||
360 | #ifdef CONFIG_SERIAL_8250_CONSOLE | 358 | #ifdef CONFIG_SERIAL_8250_CONSOLE |
361 | console_config(); | 359 | console_config(); |
362 | #endif | 360 | #endif |
363 | /* Early detection of CMP support */ | ||
364 | result = gcmp_probe(GCMP_BASE_ADDR, GCMP_ADDRSPACE_SZ); | ||
365 | |||
366 | #ifdef CONFIG_MIPS_CMP | 361 | #ifdef CONFIG_MIPS_CMP |
367 | if (result) | 362 | /* Early detection of CMP support */ |
363 | if (gcmp_probe(GCMP_BASE_ADDR, GCMP_ADDRSPACE_SZ)) | ||
368 | register_smp_ops(&cmp_smp_ops); | 364 | register_smp_ops(&cmp_smp_ops); |
365 | else | ||
369 | #endif | 366 | #endif |
370 | #ifdef CONFIG_MIPS_MT_SMP | 367 | #ifdef CONFIG_MIPS_MT_SMP |
371 | #ifdef CONFIG_MIPS_CMP | ||
372 | if (!result) | ||
373 | register_smp_ops(&vsmp_smp_ops); | 368 | register_smp_ops(&vsmp_smp_ops); |
374 | #else | ||
375 | register_smp_ops(&vsmp_smp_ops); | ||
376 | #endif | ||
377 | #endif | 369 | #endif |
378 | #ifdef CONFIG_MIPS_MT_SMTC | 370 | #ifdef CONFIG_MIPS_MT_SMTC |
379 | register_smp_ops(&msmtc_smp_ops); | 371 | register_smp_ops(&msmtc_smp_ops); |
diff --git a/arch/mips/mti-malta/malta-int.c b/arch/mips/mti-malta/malta-int.c index 7d93e6fbfa5a..1d36c511a7a5 100644 --- a/arch/mips/mti-malta/malta-int.c +++ b/arch/mips/mti-malta/malta-int.c | |||
@@ -56,7 +56,6 @@ static DEFINE_RAW_SPINLOCK(mips_irq_lock); | |||
56 | static inline int mips_pcibios_iack(void) | 56 | static inline int mips_pcibios_iack(void) |
57 | { | 57 | { |
58 | int irq; | 58 | int irq; |
59 | u32 dummy; | ||
60 | 59 | ||
61 | /* | 60 | /* |
62 | * Determine highest priority pending interrupt by performing | 61 | * Determine highest priority pending interrupt by performing |
@@ -83,7 +82,7 @@ static inline int mips_pcibios_iack(void) | |||
83 | BONITO_PCIMAP_CFG = 0x20000; | 82 | BONITO_PCIMAP_CFG = 0x20000; |
84 | 83 | ||
85 | /* Flush Bonito register block */ | 84 | /* Flush Bonito register block */ |
86 | dummy = BONITO_PCIMAP_CFG; | 85 | (void) BONITO_PCIMAP_CFG; |
87 | iob(); /* sync */ | 86 | iob(); /* sync */ |
88 | 87 | ||
89 | irq = __raw_readl((u32 *)_pcictrl_bonito_pcicfg); | 88 | irq = __raw_readl((u32 *)_pcictrl_bonito_pcicfg); |
diff --git a/arch/mips/netlogic/Kconfig b/arch/mips/netlogic/Kconfig new file mode 100644 index 000000000000..a5ca743613f2 --- /dev/null +++ b/arch/mips/netlogic/Kconfig | |||
@@ -0,0 +1,5 @@ | |||
1 | config NLM_COMMON | ||
2 | bool | ||
3 | |||
4 | config NLM_XLR | ||
5 | bool | ||
diff --git a/arch/mips/netlogic/xlr/Makefile b/arch/mips/netlogic/xlr/Makefile new file mode 100644 index 000000000000..9bd3f731f62e --- /dev/null +++ b/arch/mips/netlogic/xlr/Makefile | |||
@@ -0,0 +1,5 @@ | |||
1 | obj-y += setup.o platform.o irq.o setup.o time.o | ||
2 | obj-$(CONFIG_SMP) += smp.o smpboot.o | ||
3 | obj-$(CONFIG_EARLY_PRINTK) += xlr_console.o | ||
4 | |||
5 | EXTRA_CFLAGS += -Werror | ||
diff --git a/arch/mips/netlogic/xlr/irq.c b/arch/mips/netlogic/xlr/irq.c new file mode 100644 index 000000000000..1446d58e364c --- /dev/null +++ b/arch/mips/netlogic/xlr/irq.c | |||
@@ -0,0 +1,300 @@ | |||
1 | /* | ||
2 | * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights | ||
3 | * reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the NetLogic | ||
9 | * license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or without | ||
12 | * modification, are permitted provided that the following conditions | ||
13 | * are met: | ||
14 | * | ||
15 | * 1. Redistributions of source code must retain the above copyright | ||
16 | * notice, this list of conditions and the following disclaimer. | ||
17 | * 2. Redistributions in binary form must reproduce the above copyright | ||
18 | * notice, this list of conditions and the following disclaimer in | ||
19 | * the documentation and/or other materials provided with the | ||
20 | * distribution. | ||
21 | * | ||
22 | * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR | ||
23 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | ||
24 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
25 | * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE | ||
26 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
27 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
28 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | ||
29 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, | ||
30 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE | ||
31 | * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN | ||
32 | * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
33 | */ | ||
34 | |||
35 | #include <linux/kernel.h> | ||
36 | #include <linux/init.h> | ||
37 | #include <linux/linkage.h> | ||
38 | #include <linux/interrupt.h> | ||
39 | #include <linux/spinlock.h> | ||
40 | #include <linux/mm.h> | ||
41 | |||
42 | #include <asm/mipsregs.h> | ||
43 | |||
44 | #include <asm/netlogic/xlr/iomap.h> | ||
45 | #include <asm/netlogic/xlr/pic.h> | ||
46 | #include <asm/netlogic/xlr/xlr.h> | ||
47 | |||
48 | #include <asm/netlogic/interrupt.h> | ||
49 | #include <asm/netlogic/mips-extns.h> | ||
50 | |||
51 | static u64 nlm_irq_mask; | ||
52 | static DEFINE_SPINLOCK(nlm_pic_lock); | ||
53 | |||
54 | static void xlr_pic_enable(struct irq_data *d) | ||
55 | { | ||
56 | nlm_reg_t *mmio = netlogic_io_mmio(NETLOGIC_IO_PIC_OFFSET); | ||
57 | unsigned long flags; | ||
58 | nlm_reg_t reg; | ||
59 | int irq = d->irq; | ||
60 | |||
61 | WARN(!PIC_IRQ_IS_IRT(irq), "Bad irq %d", irq); | ||
62 | |||
63 | spin_lock_irqsave(&nlm_pic_lock, flags); | ||
64 | reg = netlogic_read_reg(mmio, PIC_IRT_1_BASE + irq - PIC_IRQ_BASE); | ||
65 | netlogic_write_reg(mmio, PIC_IRT_1_BASE + irq - PIC_IRQ_BASE, | ||
66 | reg | (1 << 6) | (1 << 30) | (1 << 31)); | ||
67 | spin_unlock_irqrestore(&nlm_pic_lock, flags); | ||
68 | } | ||
69 | |||
70 | static void xlr_pic_mask(struct irq_data *d) | ||
71 | { | ||
72 | nlm_reg_t *mmio = netlogic_io_mmio(NETLOGIC_IO_PIC_OFFSET); | ||
73 | unsigned long flags; | ||
74 | nlm_reg_t reg; | ||
75 | int irq = d->irq; | ||
76 | |||
77 | WARN(!PIC_IRQ_IS_IRT(irq), "Bad irq %d", irq); | ||
78 | |||
79 | spin_lock_irqsave(&nlm_pic_lock, flags); | ||
80 | reg = netlogic_read_reg(mmio, PIC_IRT_1_BASE + irq - PIC_IRQ_BASE); | ||
81 | netlogic_write_reg(mmio, PIC_IRT_1_BASE + irq - PIC_IRQ_BASE, | ||
82 | reg | (1 << 6) | (1 << 30) | (0 << 31)); | ||
83 | spin_unlock_irqrestore(&nlm_pic_lock, flags); | ||
84 | } | ||
85 | |||
86 | #ifdef CONFIG_PCI | ||
87 | /* Extra ACK needed for XLR on chip PCI controller */ | ||
88 | static void xlr_pci_ack(struct irq_data *d) | ||
89 | { | ||
90 | nlm_reg_t *pci_mmio = netlogic_io_mmio(NETLOGIC_IO_PCIX_OFFSET); | ||
91 | |||
92 | netlogic_read_reg(pci_mmio, (0x140 >> 2)); | ||
93 | } | ||
94 | |||
95 | /* Extra ACK needed for XLS on chip PCIe controller */ | ||
96 | static void xls_pcie_ack(struct irq_data *d) | ||
97 | { | ||
98 | nlm_reg_t *pcie_mmio_le = netlogic_io_mmio(NETLOGIC_IO_PCIE_1_OFFSET); | ||
99 | |||
100 | switch (d->irq) { | ||
101 | case PIC_PCIE_LINK0_IRQ: | ||
102 | netlogic_write_reg(pcie_mmio_le, (0x90 >> 2), 0xffffffff); | ||
103 | break; | ||
104 | case PIC_PCIE_LINK1_IRQ: | ||
105 | netlogic_write_reg(pcie_mmio_le, (0x94 >> 2), 0xffffffff); | ||
106 | break; | ||
107 | case PIC_PCIE_LINK2_IRQ: | ||
108 | netlogic_write_reg(pcie_mmio_le, (0x190 >> 2), 0xffffffff); | ||
109 | break; | ||
110 | case PIC_PCIE_LINK3_IRQ: | ||
111 | netlogic_write_reg(pcie_mmio_le, (0x194 >> 2), 0xffffffff); | ||
112 | break; | ||
113 | } | ||
114 | } | ||
115 | |||
116 | /* For XLS B silicon, the 3,4 PCI interrupts are different */ | ||
117 | static void xls_pcie_ack_b(struct irq_data *d) | ||
118 | { | ||
119 | nlm_reg_t *pcie_mmio_le = netlogic_io_mmio(NETLOGIC_IO_PCIE_1_OFFSET); | ||
120 | |||
121 | switch (d->irq) { | ||
122 | case PIC_PCIE_LINK0_IRQ: | ||
123 | netlogic_write_reg(pcie_mmio_le, (0x90 >> 2), 0xffffffff); | ||
124 | break; | ||
125 | case PIC_PCIE_LINK1_IRQ: | ||
126 | netlogic_write_reg(pcie_mmio_le, (0x94 >> 2), 0xffffffff); | ||
127 | break; | ||
128 | case PIC_PCIE_XLSB0_LINK2_IRQ: | ||
129 | netlogic_write_reg(pcie_mmio_le, (0x190 >> 2), 0xffffffff); | ||
130 | break; | ||
131 | case PIC_PCIE_XLSB0_LINK3_IRQ: | ||
132 | netlogic_write_reg(pcie_mmio_le, (0x194 >> 2), 0xffffffff); | ||
133 | break; | ||
134 | } | ||
135 | } | ||
136 | #endif | ||
137 | |||
138 | static void xlr_pic_ack(struct irq_data *d) | ||
139 | { | ||
140 | unsigned long flags; | ||
141 | nlm_reg_t *mmio; | ||
142 | int irq = d->irq; | ||
143 | void *hd = irq_data_get_irq_handler_data(d); | ||
144 | |||
145 | WARN(!PIC_IRQ_IS_IRT(irq), "Bad irq %d", irq); | ||
146 | |||
147 | if (hd) { | ||
148 | void (*extra_ack)(void *) = hd; | ||
149 | extra_ack(d); | ||
150 | } | ||
151 | mmio = netlogic_io_mmio(NETLOGIC_IO_PIC_OFFSET); | ||
152 | spin_lock_irqsave(&nlm_pic_lock, flags); | ||
153 | netlogic_write_reg(mmio, PIC_INT_ACK, (1 << (irq - PIC_IRQ_BASE))); | ||
154 | spin_unlock_irqrestore(&nlm_pic_lock, flags); | ||
155 | } | ||
156 | |||
157 | /* | ||
158 | * This chip definition handles interrupts routed thru the XLR | ||
159 | * hardware PIC, currently IRQs 8-39 are mapped to hardware intr | ||
160 | * 0-31 wired the XLR PIC | ||
161 | */ | ||
162 | static struct irq_chip xlr_pic = { | ||
163 | .name = "XLR-PIC", | ||
164 | .irq_enable = xlr_pic_enable, | ||
165 | .irq_mask = xlr_pic_mask, | ||
166 | .irq_ack = xlr_pic_ack, | ||
167 | }; | ||
168 | |||
169 | static void rsvd_irq_handler(struct irq_data *d) | ||
170 | { | ||
171 | WARN(d->irq >= PIC_IRQ_BASE, "Bad irq %d", d->irq); | ||
172 | } | ||
173 | |||
174 | /* | ||
175 | * Chip definition for CPU originated interrupts(timer, msg) and | ||
176 | * IPIs | ||
177 | */ | ||
178 | struct irq_chip nlm_cpu_intr = { | ||
179 | .name = "XLR-CPU-INTR", | ||
180 | .irq_enable = rsvd_irq_handler, | ||
181 | .irq_mask = rsvd_irq_handler, | ||
182 | .irq_ack = rsvd_irq_handler, | ||
183 | }; | ||
184 | |||
185 | void __init init_xlr_irqs(void) | ||
186 | { | ||
187 | nlm_reg_t *mmio = netlogic_io_mmio(NETLOGIC_IO_PIC_OFFSET); | ||
188 | uint32_t thread_mask = 1; | ||
189 | int level, i; | ||
190 | |||
191 | pr_info("Interrupt thread mask [%x]\n", thread_mask); | ||
192 | for (i = 0; i < PIC_NUM_IRTS; i++) { | ||
193 | level = PIC_IRQ_IS_EDGE_TRIGGERED(i); | ||
194 | |||
195 | /* Bind all PIC irqs to boot cpu */ | ||
196 | netlogic_write_reg(mmio, PIC_IRT_0_BASE + i, thread_mask); | ||
197 | |||
198 | /* | ||
199 | * Use local scheduling and high polarity for all IRTs | ||
200 | * Invalidate all IRTs, by default | ||
201 | */ | ||
202 | netlogic_write_reg(mmio, PIC_IRT_1_BASE + i, | ||
203 | (level << 30) | (1 << 6) | (PIC_IRQ_BASE + i)); | ||
204 | } | ||
205 | |||
206 | /* Make all IRQs as level triggered by default */ | ||
207 | for (i = 0; i < NR_IRQS; i++) { | ||
208 | if (PIC_IRQ_IS_IRT(i)) | ||
209 | irq_set_chip_and_handler(i, &xlr_pic, handle_level_irq); | ||
210 | else | ||
211 | irq_set_chip_and_handler(i, &nlm_cpu_intr, | ||
212 | handle_level_irq); | ||
213 | } | ||
214 | #ifdef CONFIG_SMP | ||
215 | irq_set_chip_and_handler(IRQ_IPI_SMP_FUNCTION, &nlm_cpu_intr, | ||
216 | nlm_smp_function_ipi_handler); | ||
217 | irq_set_chip_and_handler(IRQ_IPI_SMP_RESCHEDULE, &nlm_cpu_intr, | ||
218 | nlm_smp_resched_ipi_handler); | ||
219 | nlm_irq_mask |= | ||
220 | ((1ULL << IRQ_IPI_SMP_FUNCTION) | (1ULL << IRQ_IPI_SMP_RESCHEDULE)); | ||
221 | #endif | ||
222 | |||
223 | #ifdef CONFIG_PCI | ||
224 | /* | ||
225 | * For PCI interrupts, we need to ack the PIC controller too, overload | ||
226 | * irq handler data to do this | ||
227 | */ | ||
228 | if (nlm_chip_is_xls()) { | ||
229 | if (nlm_chip_is_xls_b()) { | ||
230 | irq_set_handler_data(PIC_PCIE_LINK0_IRQ, | ||
231 | xls_pcie_ack_b); | ||
232 | irq_set_handler_data(PIC_PCIE_LINK1_IRQ, | ||
233 | xls_pcie_ack_b); | ||
234 | irq_set_handler_data(PIC_PCIE_XLSB0_LINK2_IRQ, | ||
235 | xls_pcie_ack_b); | ||
236 | irq_set_handler_data(PIC_PCIE_XLSB0_LINK3_IRQ, | ||
237 | xls_pcie_ack_b); | ||
238 | } else { | ||
239 | irq_set_handler_data(PIC_PCIE_LINK0_IRQ, xls_pcie_ack); | ||
240 | irq_set_handler_data(PIC_PCIE_LINK1_IRQ, xls_pcie_ack); | ||
241 | irq_set_handler_data(PIC_PCIE_LINK2_IRQ, xls_pcie_ack); | ||
242 | irq_set_handler_data(PIC_PCIE_LINK3_IRQ, xls_pcie_ack); | ||
243 | } | ||
244 | } else { | ||
245 | /* XLR PCI controller ACK */ | ||
246 | irq_set_handler_data(PIC_PCIE_XLSB0_LINK3_IRQ, xlr_pci_ack); | ||
247 | } | ||
248 | #endif | ||
249 | /* unmask all PIC related interrupts. If no handler is installed by the | ||
250 | * drivers, it'll just ack the interrupt and return | ||
251 | */ | ||
252 | for (i = PIC_IRT_FIRST_IRQ; i <= PIC_IRT_LAST_IRQ; i++) | ||
253 | nlm_irq_mask |= (1ULL << i); | ||
254 | |||
255 | nlm_irq_mask |= (1ULL << IRQ_TIMER); | ||
256 | } | ||
257 | |||
258 | void __init arch_init_irq(void) | ||
259 | { | ||
260 | /* Initialize the irq descriptors */ | ||
261 | init_xlr_irqs(); | ||
262 | write_c0_eimr(nlm_irq_mask); | ||
263 | } | ||
264 | |||
265 | void __cpuinit nlm_smp_irq_init(void) | ||
266 | { | ||
267 | /* set interrupt mask for non-zero cpus */ | ||
268 | write_c0_eimr(nlm_irq_mask); | ||
269 | } | ||
270 | |||
271 | asmlinkage void plat_irq_dispatch(void) | ||
272 | { | ||
273 | uint64_t eirr; | ||
274 | int i; | ||
275 | |||
276 | eirr = read_c0_eirr() & read_c0_eimr(); | ||
277 | if (!eirr) | ||
278 | return; | ||
279 | |||
280 | /* no need of EIRR here, writing compare clears interrupt */ | ||
281 | if (eirr & (1 << IRQ_TIMER)) { | ||
282 | do_IRQ(IRQ_TIMER); | ||
283 | return; | ||
284 | } | ||
285 | |||
286 | /* use dcltz: optimize below code */ | ||
287 | for (i = 63; i != -1; i--) { | ||
288 | if (eirr & (1ULL << i)) | ||
289 | break; | ||
290 | } | ||
291 | if (i == -1) { | ||
292 | pr_err("no interrupt !!\n"); | ||
293 | return; | ||
294 | } | ||
295 | |||
296 | /* Ack eirr */ | ||
297 | write_c0_eirr(1ULL << i); | ||
298 | |||
299 | do_IRQ(i); | ||
300 | } | ||
diff --git a/arch/mips/netlogic/xlr/platform.c b/arch/mips/netlogic/xlr/platform.c new file mode 100644 index 000000000000..609ec2534642 --- /dev/null +++ b/arch/mips/netlogic/xlr/platform.c | |||
@@ -0,0 +1,98 @@ | |||
1 | /* | ||
2 | * Copyright 2011, Netlogic Microsystems. | ||
3 | * Copyright 2004, Matt Porter <mporter@kernel.crashing.org> | ||
4 | * | ||
5 | * This file is licensed under the terms of the GNU General Public | ||
6 | * License version 2. This program is licensed "as is" without any | ||
7 | * warranty of any kind, whether express or implied. | ||
8 | */ | ||
9 | |||
10 | #include <linux/device.h> | ||
11 | #include <linux/platform_device.h> | ||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/init.h> | ||
14 | #include <linux/resource.h> | ||
15 | #include <linux/serial_8250.h> | ||
16 | #include <linux/serial_reg.h> | ||
17 | |||
18 | #include <asm/netlogic/xlr/iomap.h> | ||
19 | #include <asm/netlogic/xlr/pic.h> | ||
20 | #include <asm/netlogic/xlr/xlr.h> | ||
21 | |||
22 | unsigned int nlm_xlr_uart_in(struct uart_port *p, int offset) | ||
23 | { | ||
24 | nlm_reg_t *mmio; | ||
25 | unsigned int value; | ||
26 | |||
27 | /* XLR uart does not need any mapping of regs */ | ||
28 | mmio = (nlm_reg_t *)(p->membase + (offset << p->regshift)); | ||
29 | value = netlogic_read_reg(mmio, 0); | ||
30 | |||
31 | /* See XLR/XLS errata */ | ||
32 | if (offset == UART_MSR) | ||
33 | value ^= 0xF0; | ||
34 | else if (offset == UART_MCR) | ||
35 | value ^= 0x3; | ||
36 | |||
37 | return value; | ||
38 | } | ||
39 | |||
40 | void nlm_xlr_uart_out(struct uart_port *p, int offset, int value) | ||
41 | { | ||
42 | nlm_reg_t *mmio; | ||
43 | |||
44 | /* XLR uart does not need any mapping of regs */ | ||
45 | mmio = (nlm_reg_t *)(p->membase + (offset << p->regshift)); | ||
46 | |||
47 | /* See XLR/XLS errata */ | ||
48 | if (offset == UART_MSR) | ||
49 | value ^= 0xF0; | ||
50 | else if (offset == UART_MCR) | ||
51 | value ^= 0x3; | ||
52 | |||
53 | netlogic_write_reg(mmio, 0, value); | ||
54 | } | ||
55 | |||
56 | #define PORT(_irq) \ | ||
57 | { \ | ||
58 | .irq = _irq, \ | ||
59 | .regshift = 2, \ | ||
60 | .iotype = UPIO_MEM32, \ | ||
61 | .flags = (UPF_SKIP_TEST | \ | ||
62 | UPF_FIXED_TYPE | UPF_BOOT_AUTOCONF),\ | ||
63 | .uartclk = PIC_CLKS_PER_SEC, \ | ||
64 | .type = PORT_16550A, \ | ||
65 | .serial_in = nlm_xlr_uart_in, \ | ||
66 | .serial_out = nlm_xlr_uart_out, \ | ||
67 | } | ||
68 | |||
69 | static struct plat_serial8250_port xlr_uart_data[] = { | ||
70 | PORT(PIC_UART_0_IRQ), | ||
71 | PORT(PIC_UART_1_IRQ), | ||
72 | {}, | ||
73 | }; | ||
74 | |||
75 | static struct platform_device uart_device = { | ||
76 | .name = "serial8250", | ||
77 | .id = PLAT8250_DEV_PLATFORM, | ||
78 | .dev = { | ||
79 | .platform_data = xlr_uart_data, | ||
80 | }, | ||
81 | }; | ||
82 | |||
83 | static int __init nlm_uart_init(void) | ||
84 | { | ||
85 | nlm_reg_t *mmio; | ||
86 | |||
87 | mmio = netlogic_io_mmio(NETLOGIC_IO_UART_0_OFFSET); | ||
88 | xlr_uart_data[0].membase = (void __iomem *)mmio; | ||
89 | xlr_uart_data[0].mapbase = CPHYSADDR((unsigned long)mmio); | ||
90 | |||
91 | mmio = netlogic_io_mmio(NETLOGIC_IO_UART_1_OFFSET); | ||
92 | xlr_uart_data[1].membase = (void __iomem *)mmio; | ||
93 | xlr_uart_data[1].mapbase = CPHYSADDR((unsigned long)mmio); | ||
94 | |||
95 | return platform_device_register(&uart_device); | ||
96 | } | ||
97 | |||
98 | arch_initcall(nlm_uart_init); | ||
diff --git a/arch/mips/netlogic/xlr/setup.c b/arch/mips/netlogic/xlr/setup.c new file mode 100644 index 000000000000..482802569e74 --- /dev/null +++ b/arch/mips/netlogic/xlr/setup.c | |||
@@ -0,0 +1,188 @@ | |||
1 | /* | ||
2 | * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights | ||
3 | * reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the NetLogic | ||
9 | * license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or without | ||
12 | * modification, are permitted provided that the following conditions | ||
13 | * are met: | ||
14 | * | ||
15 | * 1. Redistributions of source code must retain the above copyright | ||
16 | * notice, this list of conditions and the following disclaimer. | ||
17 | * 2. Redistributions in binary form must reproduce the above copyright | ||
18 | * notice, this list of conditions and the following disclaimer in | ||
19 | * the documentation and/or other materials provided with the | ||
20 | * distribution. | ||
21 | * | ||
22 | * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR | ||
23 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | ||
24 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
25 | * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE | ||
26 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
27 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
28 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | ||
29 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, | ||
30 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE | ||
31 | * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN | ||
32 | * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
33 | */ | ||
34 | |||
35 | #include <linux/kernel.h> | ||
36 | #include <linux/serial_8250.h> | ||
37 | #include <linux/pm.h> | ||
38 | |||
39 | #include <asm/reboot.h> | ||
40 | #include <asm/time.h> | ||
41 | #include <asm/bootinfo.h> | ||
42 | #include <asm/smp-ops.h> | ||
43 | |||
44 | #include <asm/netlogic/interrupt.h> | ||
45 | #include <asm/netlogic/psb-bootinfo.h> | ||
46 | |||
47 | #include <asm/netlogic/xlr/xlr.h> | ||
48 | #include <asm/netlogic/xlr/iomap.h> | ||
49 | #include <asm/netlogic/xlr/pic.h> | ||
50 | #include <asm/netlogic/xlr/gpio.h> | ||
51 | |||
52 | unsigned long netlogic_io_base = (unsigned long)(DEFAULT_NETLOGIC_IO_BASE); | ||
53 | unsigned long nlm_common_ebase = 0x0; | ||
54 | struct psb_info nlm_prom_info; | ||
55 | |||
56 | static void nlm_early_serial_setup(void) | ||
57 | { | ||
58 | struct uart_port s; | ||
59 | nlm_reg_t *uart_base; | ||
60 | |||
61 | uart_base = netlogic_io_mmio(NETLOGIC_IO_UART_0_OFFSET); | ||
62 | memset(&s, 0, sizeof(s)); | ||
63 | s.flags = ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST; | ||
64 | s.iotype = UPIO_MEM32; | ||
65 | s.regshift = 2; | ||
66 | s.irq = PIC_UART_0_IRQ; | ||
67 | s.uartclk = PIC_CLKS_PER_SEC; | ||
68 | s.serial_in = nlm_xlr_uart_in; | ||
69 | s.serial_out = nlm_xlr_uart_out; | ||
70 | s.mapbase = (unsigned long)uart_base; | ||
71 | s.membase = (unsigned char __iomem *)uart_base; | ||
72 | early_serial_setup(&s); | ||
73 | } | ||
74 | |||
75 | static void nlm_linux_exit(void) | ||
76 | { | ||
77 | nlm_reg_t *mmio; | ||
78 | |||
79 | mmio = netlogic_io_mmio(NETLOGIC_IO_GPIO_OFFSET); | ||
80 | /* trigger a chip reset by writing 1 to GPIO_SWRESET_REG */ | ||
81 | netlogic_write_reg(mmio, NETLOGIC_GPIO_SWRESET_REG, 1); | ||
82 | for ( ; ; ) | ||
83 | cpu_wait(); | ||
84 | } | ||
85 | |||
86 | void __init plat_mem_setup(void) | ||
87 | { | ||
88 | panic_timeout = 5; | ||
89 | _machine_restart = (void (*)(char *))nlm_linux_exit; | ||
90 | _machine_halt = nlm_linux_exit; | ||
91 | pm_power_off = nlm_linux_exit; | ||
92 | } | ||
93 | |||
94 | const char *get_system_type(void) | ||
95 | { | ||
96 | return "Netlogic XLR/XLS Series"; | ||
97 | } | ||
98 | |||
99 | void __init prom_free_prom_memory(void) | ||
100 | { | ||
101 | /* Nothing yet */ | ||
102 | } | ||
103 | |||
104 | static void build_arcs_cmdline(int *argv) | ||
105 | { | ||
106 | int i, remain, len; | ||
107 | char *arg; | ||
108 | |||
109 | remain = sizeof(arcs_cmdline) - 1; | ||
110 | arcs_cmdline[0] = '\0'; | ||
111 | for (i = 0; argv[i] != 0; i++) { | ||
112 | arg = (char *)(long)argv[i]; | ||
113 | len = strlen(arg); | ||
114 | if (len + 1 > remain) | ||
115 | break; | ||
116 | strcat(arcs_cmdline, arg); | ||
117 | strcat(arcs_cmdline, " "); | ||
118 | remain -= len + 1; | ||
119 | } | ||
120 | |||
121 | /* Add the default options here */ | ||
122 | if ((strstr(arcs_cmdline, "console=")) == NULL) { | ||
123 | arg = "console=ttyS0,38400 "; | ||
124 | len = strlen(arg); | ||
125 | if (len > remain) | ||
126 | goto fail; | ||
127 | strcat(arcs_cmdline, arg); | ||
128 | remain -= len; | ||
129 | } | ||
130 | #ifdef CONFIG_BLK_DEV_INITRD | ||
131 | if ((strstr(arcs_cmdline, "rdinit=")) == NULL) { | ||
132 | arg = "rdinit=/sbin/init "; | ||
133 | len = strlen(arg); | ||
134 | if (len > remain) | ||
135 | goto fail; | ||
136 | strcat(arcs_cmdline, arg); | ||
137 | remain -= len; | ||
138 | } | ||
139 | #endif | ||
140 | return; | ||
141 | fail: | ||
142 | panic("Cannot add %s, command line too big!", arg); | ||
143 | } | ||
144 | |||
145 | static void prom_add_memory(void) | ||
146 | { | ||
147 | struct nlm_boot_mem_map *bootm; | ||
148 | u64 start, size; | ||
149 | u64 pref_backup = 512; /* avoid pref walking beyond end */ | ||
150 | int i; | ||
151 | |||
152 | bootm = (void *)(long)nlm_prom_info.psb_mem_map; | ||
153 | for (i = 0; i < bootm->nr_map; i++) { | ||
154 | if (bootm->map[i].type != BOOT_MEM_RAM) | ||
155 | continue; | ||
156 | start = bootm->map[i].addr; | ||
157 | size = bootm->map[i].size; | ||
158 | |||
159 | /* Work around for using bootloader mem */ | ||
160 | if (i == 0 && start == 0 && size == 0x0c000000) | ||
161 | size = 0x0ff00000; | ||
162 | |||
163 | add_memory_region(start, size - pref_backup, BOOT_MEM_RAM); | ||
164 | } | ||
165 | } | ||
166 | |||
167 | void __init prom_init(void) | ||
168 | { | ||
169 | int *argv, *envp; /* passed as 32 bit ptrs */ | ||
170 | struct psb_info *prom_infop; | ||
171 | |||
172 | /* truncate to 32 bit and sign extend all args */ | ||
173 | argv = (int *)(long)(int)fw_arg1; | ||
174 | envp = (int *)(long)(int)fw_arg2; | ||
175 | prom_infop = (struct psb_info *)(long)(int)fw_arg3; | ||
176 | |||
177 | nlm_prom_info = *prom_infop; | ||
178 | |||
179 | nlm_early_serial_setup(); | ||
180 | build_arcs_cmdline(argv); | ||
181 | nlm_common_ebase = read_c0_ebase() & (~((1 << 12) - 1)); | ||
182 | prom_add_memory(); | ||
183 | |||
184 | #ifdef CONFIG_SMP | ||
185 | nlm_wakeup_secondary_cpus(nlm_prom_info.online_cpu_map); | ||
186 | register_smp_ops(&nlm_smp_ops); | ||
187 | #endif | ||
188 | } | ||
diff --git a/arch/mips/netlogic/xlr/smp.c b/arch/mips/netlogic/xlr/smp.c new file mode 100644 index 000000000000..b495a7f1433b --- /dev/null +++ b/arch/mips/netlogic/xlr/smp.c | |||
@@ -0,0 +1,225 @@ | |||
1 | /* | ||
2 | * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights | ||
3 | * reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the NetLogic | ||
9 | * license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or without | ||
12 | * modification, are permitted provided that the following conditions | ||
13 | * are met: | ||
14 | * | ||
15 | * 1. Redistributions of source code must retain the above copyright | ||
16 | * notice, this list of conditions and the following disclaimer. | ||
17 | * 2. Redistributions in binary form must reproduce the above copyright | ||
18 | * notice, this list of conditions and the following disclaimer in | ||
19 | * the documentation and/or other materials provided with the | ||
20 | * distribution. | ||
21 | * | ||
22 | * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR | ||
23 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | ||
24 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
25 | * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE | ||
26 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
27 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
28 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | ||
29 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, | ||
30 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE | ||
31 | * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN | ||
32 | * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
33 | */ | ||
34 | |||
35 | #include <linux/kernel.h> | ||
36 | #include <linux/delay.h> | ||
37 | #include <linux/init.h> | ||
38 | #include <linux/smp.h> | ||
39 | #include <linux/irq.h> | ||
40 | |||
41 | #include <asm/mmu_context.h> | ||
42 | |||
43 | #include <asm/netlogic/interrupt.h> | ||
44 | #include <asm/netlogic/mips-extns.h> | ||
45 | |||
46 | #include <asm/netlogic/xlr/iomap.h> | ||
47 | #include <asm/netlogic/xlr/pic.h> | ||
48 | #include <asm/netlogic/xlr/xlr.h> | ||
49 | |||
50 | void core_send_ipi(int logical_cpu, unsigned int action) | ||
51 | { | ||
52 | int cpu = cpu_logical_map(logical_cpu); | ||
53 | u32 tid = cpu & 0x3; | ||
54 | u32 pid = (cpu >> 2) & 0x07; | ||
55 | u32 ipi = (tid << 16) | (pid << 20); | ||
56 | |||
57 | if (action & SMP_CALL_FUNCTION) | ||
58 | ipi |= IRQ_IPI_SMP_FUNCTION; | ||
59 | else if (action & SMP_RESCHEDULE_YOURSELF) | ||
60 | ipi |= IRQ_IPI_SMP_RESCHEDULE; | ||
61 | else | ||
62 | return; | ||
63 | |||
64 | pic_send_ipi(ipi); | ||
65 | } | ||
66 | |||
67 | void nlm_send_ipi_single(int cpu, unsigned int action) | ||
68 | { | ||
69 | core_send_ipi(cpu, action); | ||
70 | } | ||
71 | |||
72 | void nlm_send_ipi_mask(const struct cpumask *mask, unsigned int action) | ||
73 | { | ||
74 | int cpu; | ||
75 | |||
76 | for_each_cpu(cpu, mask) { | ||
77 | core_send_ipi(cpu, action); | ||
78 | } | ||
79 | } | ||
80 | |||
81 | /* IRQ_IPI_SMP_FUNCTION Handler */ | ||
82 | void nlm_smp_function_ipi_handler(unsigned int irq, struct irq_desc *desc) | ||
83 | { | ||
84 | smp_call_function_interrupt(); | ||
85 | } | ||
86 | |||
87 | /* IRQ_IPI_SMP_RESCHEDULE handler */ | ||
88 | void nlm_smp_resched_ipi_handler(unsigned int irq, struct irq_desc *desc) | ||
89 | { | ||
90 | set_need_resched(); | ||
91 | } | ||
92 | |||
93 | void nlm_common_ipi_handler(int irq, struct pt_regs *regs) | ||
94 | { | ||
95 | if (irq == IRQ_IPI_SMP_FUNCTION) { | ||
96 | smp_call_function_interrupt(); | ||
97 | } else { | ||
98 | /* Announce that we are for reschduling */ | ||
99 | set_need_resched(); | ||
100 | } | ||
101 | } | ||
102 | |||
103 | /* | ||
104 | * Called before going into mips code, early cpu init | ||
105 | */ | ||
106 | void nlm_early_init_secondary(void) | ||
107 | { | ||
108 | write_c0_ebase((uint32_t)nlm_common_ebase); | ||
109 | /* TLB partition here later */ | ||
110 | } | ||
111 | |||
112 | /* | ||
113 | * Code to run on secondary just after probing the CPU | ||
114 | */ | ||
115 | static void __cpuinit nlm_init_secondary(void) | ||
116 | { | ||
117 | nlm_smp_irq_init(); | ||
118 | } | ||
119 | |||
120 | void nlm_smp_finish(void) | ||
121 | { | ||
122 | #ifdef notyet | ||
123 | nlm_common_msgring_cpu_init(); | ||
124 | #endif | ||
125 | } | ||
126 | |||
127 | void nlm_cpus_done(void) | ||
128 | { | ||
129 | } | ||
130 | |||
131 | /* | ||
132 | * Boot all other cpus in the system, initialize them, and bring them into | ||
133 | * the boot function | ||
134 | */ | ||
135 | int nlm_cpu_unblock[NR_CPUS]; | ||
136 | int nlm_cpu_ready[NR_CPUS]; | ||
137 | unsigned long nlm_next_gp; | ||
138 | unsigned long nlm_next_sp; | ||
139 | cpumask_t phys_cpu_present_map; | ||
140 | |||
141 | void nlm_boot_secondary(int logical_cpu, struct task_struct *idle) | ||
142 | { | ||
143 | unsigned long gp = (unsigned long)task_thread_info(idle); | ||
144 | unsigned long sp = (unsigned long)__KSTK_TOS(idle); | ||
145 | int cpu = cpu_logical_map(logical_cpu); | ||
146 | |||
147 | nlm_next_sp = sp; | ||
148 | nlm_next_gp = gp; | ||
149 | |||
150 | /* barrier */ | ||
151 | __sync(); | ||
152 | nlm_cpu_unblock[cpu] = 1; | ||
153 | } | ||
154 | |||
155 | void __init nlm_smp_setup(void) | ||
156 | { | ||
157 | unsigned int boot_cpu; | ||
158 | int num_cpus, i; | ||
159 | |||
160 | boot_cpu = hard_smp_processor_id(); | ||
161 | cpus_clear(phys_cpu_present_map); | ||
162 | |||
163 | cpu_set(boot_cpu, phys_cpu_present_map); | ||
164 | __cpu_number_map[boot_cpu] = 0; | ||
165 | __cpu_logical_map[0] = boot_cpu; | ||
166 | cpu_set(0, cpu_possible_map); | ||
167 | |||
168 | num_cpus = 1; | ||
169 | for (i = 0; i < NR_CPUS; i++) { | ||
170 | if (nlm_cpu_ready[i]) { | ||
171 | cpu_set(i, phys_cpu_present_map); | ||
172 | __cpu_number_map[i] = num_cpus; | ||
173 | __cpu_logical_map[num_cpus] = i; | ||
174 | cpu_set(num_cpus, cpu_possible_map); | ||
175 | ++num_cpus; | ||
176 | } | ||
177 | } | ||
178 | |||
179 | pr_info("Phys CPU present map: %lx, possible map %lx\n", | ||
180 | (unsigned long)phys_cpu_present_map.bits[0], | ||
181 | (unsigned long)cpu_possible_map.bits[0]); | ||
182 | |||
183 | pr_info("Detected %i Slave CPU(s)\n", num_cpus); | ||
184 | } | ||
185 | |||
186 | void nlm_prepare_cpus(unsigned int max_cpus) | ||
187 | { | ||
188 | } | ||
189 | |||
190 | struct plat_smp_ops nlm_smp_ops = { | ||
191 | .send_ipi_single = nlm_send_ipi_single, | ||
192 | .send_ipi_mask = nlm_send_ipi_mask, | ||
193 | .init_secondary = nlm_init_secondary, | ||
194 | .smp_finish = nlm_smp_finish, | ||
195 | .cpus_done = nlm_cpus_done, | ||
196 | .boot_secondary = nlm_boot_secondary, | ||
197 | .smp_setup = nlm_smp_setup, | ||
198 | .prepare_cpus = nlm_prepare_cpus, | ||
199 | }; | ||
200 | |||
201 | unsigned long secondary_entry_point; | ||
202 | |||
203 | int nlm_wakeup_secondary_cpus(u32 wakeup_mask) | ||
204 | { | ||
205 | unsigned int tid, pid, ipi, i, boot_cpu; | ||
206 | void *reset_vec; | ||
207 | |||
208 | secondary_entry_point = (unsigned long)prom_pre_boot_secondary_cpus; | ||
209 | reset_vec = (void *)CKSEG1ADDR(0x1fc00000); | ||
210 | memcpy(reset_vec, nlm_boot_smp_nmi, 0x80); | ||
211 | boot_cpu = hard_smp_processor_id(); | ||
212 | |||
213 | for (i = 0; i < NR_CPUS; i++) { | ||
214 | if (i == boot_cpu) | ||
215 | continue; | ||
216 | if (wakeup_mask & (1u << i)) { | ||
217 | tid = i & 0x3; | ||
218 | pid = (i >> 2) & 0x7; | ||
219 | ipi = (tid << 16) | (pid << 20) | (1 << 8); | ||
220 | pic_send_ipi(ipi); | ||
221 | } | ||
222 | } | ||
223 | |||
224 | return 0; | ||
225 | } | ||
diff --git a/arch/mips/netlogic/xlr/smpboot.S b/arch/mips/netlogic/xlr/smpboot.S new file mode 100644 index 000000000000..b8e074402c99 --- /dev/null +++ b/arch/mips/netlogic/xlr/smpboot.S | |||
@@ -0,0 +1,94 @@ | |||
1 | /* | ||
2 | * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights | ||
3 | * reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the NetLogic | ||
9 | * license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or without | ||
12 | * modification, are permitted provided that the following conditions | ||
13 | * are met: | ||
14 | * | ||
15 | * 1. Redistributions of source code must retain the above copyright | ||
16 | * notice, this list of conditions and the following disclaimer. | ||
17 | * 2. Redistributions in binary form must reproduce the above copyright | ||
18 | * notice, this list of conditions and the following disclaimer in | ||
19 | * the documentation and/or other materials provided with the | ||
20 | * distribution. | ||
21 | * | ||
22 | * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR | ||
23 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | ||
24 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
25 | * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE | ||
26 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
27 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
28 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | ||
29 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, | ||
30 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE | ||
31 | * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN | ||
32 | * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
33 | */ | ||
34 | |||
35 | #include <asm/asm.h> | ||
36 | #include <asm/asm-offsets.h> | ||
37 | #include <asm/regdef.h> | ||
38 | #include <asm/mipsregs.h> | ||
39 | |||
40 | |||
41 | /* Don't jump to linux function from Bootloader stack. Change it | ||
42 | * here. Kernel might allocate bootloader memory before all the CPUs are | ||
43 | * brought up (eg: Inode cache region) and we better don't overwrite this | ||
44 | * memory | ||
45 | */ | ||
46 | NESTED(prom_pre_boot_secondary_cpus, 16, sp) | ||
47 | .set mips64 | ||
48 | mfc0 t0, $15, 1 # read ebase | ||
49 | andi t0, 0x1f # t0 has the processor_id() | ||
50 | sll t0, 2 # offset in cpu array | ||
51 | |||
52 | PTR_LA t1, nlm_cpu_ready # mark CPU ready | ||
53 | PTR_ADDU t1, t0 | ||
54 | li t2, 1 | ||
55 | sw t2, 0(t1) | ||
56 | |||
57 | PTR_LA t1, nlm_cpu_unblock | ||
58 | PTR_ADDU t1, t0 | ||
59 | 1: lw t2, 0(t1) # wait till unblocked | ||
60 | beqz t2, 1b | ||
61 | nop | ||
62 | |||
63 | PTR_LA t1, nlm_next_sp | ||
64 | PTR_L sp, 0(t1) | ||
65 | PTR_LA t1, nlm_next_gp | ||
66 | PTR_L gp, 0(t1) | ||
67 | |||
68 | PTR_LA t0, nlm_early_init_secondary | ||
69 | jalr t0 | ||
70 | nop | ||
71 | |||
72 | PTR_LA t0, smp_bootstrap | ||
73 | jr t0 | ||
74 | nop | ||
75 | END(prom_pre_boot_secondary_cpus) | ||
76 | |||
77 | NESTED(nlm_boot_smp_nmi, 0, sp) | ||
78 | .set push | ||
79 | .set noat | ||
80 | .set mips64 | ||
81 | .set noreorder | ||
82 | |||
83 | /* Clear the NMI and BEV bits */ | ||
84 | MFC0 k0, CP0_STATUS | ||
85 | li k1, 0xffb7ffff | ||
86 | and k0, k0, k1 | ||
87 | MTC0 k0, CP0_STATUS | ||
88 | |||
89 | PTR_LA k1, secondary_entry_point | ||
90 | PTR_L k0, 0(k1) | ||
91 | jr k0 | ||
92 | nop | ||
93 | .set pop | ||
94 | END(nlm_boot_smp_nmi) | ||
diff --git a/arch/mips/netlogic/xlr/time.c b/arch/mips/netlogic/xlr/time.c new file mode 100644 index 000000000000..0d81b262593c --- /dev/null +++ b/arch/mips/netlogic/xlr/time.c | |||
@@ -0,0 +1,51 @@ | |||
1 | /* | ||
2 | * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights | ||
3 | * reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the NetLogic | ||
9 | * license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or without | ||
12 | * modification, are permitted provided that the following conditions | ||
13 | * are met: | ||
14 | * | ||
15 | * 1. Redistributions of source code must retain the above copyright | ||
16 | * notice, this list of conditions and the following disclaimer. | ||
17 | * 2. Redistributions in binary form must reproduce the above copyright | ||
18 | * notice, this list of conditions and the following disclaimer in | ||
19 | * the documentation and/or other materials provided with the | ||
20 | * distribution. | ||
21 | * | ||
22 | * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR | ||
23 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | ||
24 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
25 | * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE | ||
26 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
27 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
28 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | ||
29 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, | ||
30 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE | ||
31 | * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN | ||
32 | * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
33 | */ | ||
34 | |||
35 | #include <linux/init.h> | ||
36 | |||
37 | #include <asm/time.h> | ||
38 | #include <asm/netlogic/interrupt.h> | ||
39 | #include <asm/netlogic/psb-bootinfo.h> | ||
40 | |||
41 | unsigned int __cpuinit get_c0_compare_int(void) | ||
42 | { | ||
43 | return IRQ_TIMER; | ||
44 | } | ||
45 | |||
46 | void __init plat_time_init(void) | ||
47 | { | ||
48 | mips_hpt_frequency = nlm_prom_info.cpu_frequency; | ||
49 | pr_info("MIPS counter frequency [%ld]\n", | ||
50 | (unsigned long)mips_hpt_frequency); | ||
51 | } | ||
diff --git a/arch/mips/netlogic/xlr/xlr_console.c b/arch/mips/netlogic/xlr/xlr_console.c new file mode 100644 index 000000000000..759df0692201 --- /dev/null +++ b/arch/mips/netlogic/xlr/xlr_console.c | |||
@@ -0,0 +1,46 @@ | |||
1 | /* | ||
2 | * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights | ||
3 | * reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the NetLogic | ||
9 | * license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or without | ||
12 | * modification, are permitted provided that the following conditions | ||
13 | * are met: | ||
14 | * | ||
15 | * 1. Redistributions of source code must retain the above copyright | ||
16 | * notice, this list of conditions and the following disclaimer. | ||
17 | * 2. Redistributions in binary form must reproduce the above copyright | ||
18 | * notice, this list of conditions and the following disclaimer in | ||
19 | * the documentation and/or other materials provided with the | ||
20 | * distribution. | ||
21 | * | ||
22 | * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR | ||
23 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | ||
24 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
25 | * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE | ||
26 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
27 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
28 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | ||
29 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, | ||
30 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE | ||
31 | * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN | ||
32 | * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
33 | */ | ||
34 | |||
35 | #include <linux/types.h> | ||
36 | #include <asm/netlogic/xlr/iomap.h> | ||
37 | |||
38 | void prom_putchar(char c) | ||
39 | { | ||
40 | nlm_reg_t *mmio; | ||
41 | |||
42 | mmio = netlogic_io_mmio(NETLOGIC_IO_UART_0_OFFSET); | ||
43 | while (netlogic_read_reg(mmio, 0x5) == 0) | ||
44 | ; | ||
45 | netlogic_write_reg(mmio, 0x0, c); | ||
46 | } | ||
diff --git a/arch/mips/pci/Makefile b/arch/mips/pci/Makefile index c9209ca6c8e7..4df879937446 100644 --- a/arch/mips/pci/Makefile +++ b/arch/mips/pci/Makefile | |||
@@ -41,6 +41,7 @@ obj-$(CONFIG_SIBYTE_SB1250) += fixup-sb1250.o pci-sb1250.o | |||
41 | obj-$(CONFIG_SIBYTE_BCM112X) += fixup-sb1250.o pci-sb1250.o | 41 | obj-$(CONFIG_SIBYTE_BCM112X) += fixup-sb1250.o pci-sb1250.o |
42 | obj-$(CONFIG_SIBYTE_BCM1x80) += pci-bcm1480.o pci-bcm1480ht.o | 42 | obj-$(CONFIG_SIBYTE_BCM1x80) += pci-bcm1480.o pci-bcm1480ht.o |
43 | obj-$(CONFIG_SNI_RM) += fixup-sni.o ops-sni.o | 43 | obj-$(CONFIG_SNI_RM) += fixup-sni.o ops-sni.o |
44 | obj-$(CONFIG_SOC_XWAY) += pci-lantiq.o ops-lantiq.o | ||
44 | obj-$(CONFIG_TANBAC_TB0219) += fixup-tb0219.o | 45 | obj-$(CONFIG_TANBAC_TB0219) += fixup-tb0219.o |
45 | obj-$(CONFIG_TANBAC_TB0226) += fixup-tb0226.o | 46 | obj-$(CONFIG_TANBAC_TB0226) += fixup-tb0226.o |
46 | obj-$(CONFIG_TANBAC_TB0287) += fixup-tb0287.o | 47 | obj-$(CONFIG_TANBAC_TB0287) += fixup-tb0287.o |
@@ -55,6 +56,7 @@ obj-$(CONFIG_ZAO_CAPCELLA) += fixup-capcella.o | |||
55 | obj-$(CONFIG_WR_PPMC) += fixup-wrppmc.o | 56 | obj-$(CONFIG_WR_PPMC) += fixup-wrppmc.o |
56 | obj-$(CONFIG_MIKROTIK_RB532) += pci-rc32434.o ops-rc32434.o fixup-rc32434.o | 57 | obj-$(CONFIG_MIKROTIK_RB532) += pci-rc32434.o ops-rc32434.o fixup-rc32434.o |
57 | obj-$(CONFIG_CPU_CAVIUM_OCTEON) += pci-octeon.o pcie-octeon.o | 58 | obj-$(CONFIG_CPU_CAVIUM_OCTEON) += pci-octeon.o pcie-octeon.o |
59 | obj-$(CONFIG_NLM_XLR) += pci-xlr.o | ||
58 | 60 | ||
59 | ifdef CONFIG_PCI_MSI | 61 | ifdef CONFIG_PCI_MSI |
60 | obj-$(CONFIG_CPU_CAVIUM_OCTEON) += msi-octeon.o | 62 | obj-$(CONFIG_CPU_CAVIUM_OCTEON) += msi-octeon.o |
diff --git a/arch/mips/pci/ops-lantiq.c b/arch/mips/pci/ops-lantiq.c new file mode 100644 index 000000000000..1f2afb55cc71 --- /dev/null +++ b/arch/mips/pci/ops-lantiq.c | |||
@@ -0,0 +1,116 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * Copyright (C) 2010 John Crispin <blogic@openwrt.org> | ||
7 | */ | ||
8 | |||
9 | #include <linux/types.h> | ||
10 | #include <linux/pci.h> | ||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/init.h> | ||
13 | #include <linux/delay.h> | ||
14 | #include <linux/mm.h> | ||
15 | #include <asm/addrspace.h> | ||
16 | #include <linux/vmalloc.h> | ||
17 | |||
18 | #include <lantiq_soc.h> | ||
19 | |||
20 | #include "pci-lantiq.h" | ||
21 | |||
22 | #define LTQ_PCI_CFG_BUSNUM_SHF 16 | ||
23 | #define LTQ_PCI_CFG_DEVNUM_SHF 11 | ||
24 | #define LTQ_PCI_CFG_FUNNUM_SHF 8 | ||
25 | |||
26 | #define PCI_ACCESS_READ 0 | ||
27 | #define PCI_ACCESS_WRITE 1 | ||
28 | |||
29 | static int ltq_pci_config_access(unsigned char access_type, struct pci_bus *bus, | ||
30 | unsigned int devfn, unsigned int where, u32 *data) | ||
31 | { | ||
32 | unsigned long cfg_base; | ||
33 | unsigned long flags; | ||
34 | u32 temp; | ||
35 | |||
36 | /* we support slot from 0 to 15 dev_fn & 0x68 (AD29) is the | ||
37 | SoC itself */ | ||
38 | if ((bus->number != 0) || ((devfn & 0xf8) > 0x78) | ||
39 | || ((devfn & 0xf8) == 0) || ((devfn & 0xf8) == 0x68)) | ||
40 | return 1; | ||
41 | |||
42 | spin_lock_irqsave(&ebu_lock, flags); | ||
43 | |||
44 | cfg_base = (unsigned long) ltq_pci_mapped_cfg; | ||
45 | cfg_base |= (bus->number << LTQ_PCI_CFG_BUSNUM_SHF) | (devfn << | ||
46 | LTQ_PCI_CFG_FUNNUM_SHF) | (where & ~0x3); | ||
47 | |||
48 | /* Perform access */ | ||
49 | if (access_type == PCI_ACCESS_WRITE) { | ||
50 | ltq_w32(swab32(*data), ((u32 *)cfg_base)); | ||
51 | } else { | ||
52 | *data = ltq_r32(((u32 *)(cfg_base))); | ||
53 | *data = swab32(*data); | ||
54 | } | ||
55 | wmb(); | ||
56 | |||
57 | /* clean possible Master abort */ | ||
58 | cfg_base = (unsigned long) ltq_pci_mapped_cfg; | ||
59 | cfg_base |= (0x0 << LTQ_PCI_CFG_FUNNUM_SHF) + 4; | ||
60 | temp = ltq_r32(((u32 *)(cfg_base))); | ||
61 | temp = swab32(temp); | ||
62 | cfg_base = (unsigned long) ltq_pci_mapped_cfg; | ||
63 | cfg_base |= (0x68 << LTQ_PCI_CFG_FUNNUM_SHF) + 4; | ||
64 | ltq_w32(temp, ((u32 *)cfg_base)); | ||
65 | |||
66 | spin_unlock_irqrestore(&ebu_lock, flags); | ||
67 | |||
68 | if (((*data) == 0xffffffff) && (access_type == PCI_ACCESS_READ)) | ||
69 | return 1; | ||
70 | |||
71 | return 0; | ||
72 | } | ||
73 | |||
74 | int ltq_pci_read_config_dword(struct pci_bus *bus, unsigned int devfn, | ||
75 | int where, int size, u32 *val) | ||
76 | { | ||
77 | u32 data = 0; | ||
78 | |||
79 | if (ltq_pci_config_access(PCI_ACCESS_READ, bus, devfn, where, &data)) | ||
80 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
81 | |||
82 | if (size == 1) | ||
83 | *val = (data >> ((where & 3) << 3)) & 0xff; | ||
84 | else if (size == 2) | ||
85 | *val = (data >> ((where & 3) << 3)) & 0xffff; | ||
86 | else | ||
87 | *val = data; | ||
88 | |||
89 | return PCIBIOS_SUCCESSFUL; | ||
90 | } | ||
91 | |||
92 | int ltq_pci_write_config_dword(struct pci_bus *bus, unsigned int devfn, | ||
93 | int where, int size, u32 val) | ||
94 | { | ||
95 | u32 data = 0; | ||
96 | |||
97 | if (size == 4) { | ||
98 | data = val; | ||
99 | } else { | ||
100 | if (ltq_pci_config_access(PCI_ACCESS_READ, bus, | ||
101 | devfn, where, &data)) | ||
102 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
103 | |||
104 | if (size == 1) | ||
105 | data = (data & ~(0xff << ((where & 3) << 3))) | | ||
106 | (val << ((where & 3) << 3)); | ||
107 | else if (size == 2) | ||
108 | data = (data & ~(0xffff << ((where & 3) << 3))) | | ||
109 | (val << ((where & 3) << 3)); | ||
110 | } | ||
111 | |||
112 | if (ltq_pci_config_access(PCI_ACCESS_WRITE, bus, devfn, where, &data)) | ||
113 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
114 | |||
115 | return PCIBIOS_SUCCESSFUL; | ||
116 | } | ||
diff --git a/arch/mips/pci/pci-lantiq.c b/arch/mips/pci/pci-lantiq.c new file mode 100644 index 000000000000..603d7493e966 --- /dev/null +++ b/arch/mips/pci/pci-lantiq.c | |||
@@ -0,0 +1,297 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * Copyright (C) 2010 John Crispin <blogic@openwrt.org> | ||
7 | */ | ||
8 | |||
9 | #include <linux/types.h> | ||
10 | #include <linux/pci.h> | ||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/init.h> | ||
13 | #include <linux/delay.h> | ||
14 | #include <linux/mm.h> | ||
15 | #include <linux/vmalloc.h> | ||
16 | #include <linux/platform_device.h> | ||
17 | |||
18 | #include <asm/pci.h> | ||
19 | #include <asm/gpio.h> | ||
20 | #include <asm/addrspace.h> | ||
21 | |||
22 | #include <lantiq_soc.h> | ||
23 | #include <lantiq_irq.h> | ||
24 | #include <lantiq_platform.h> | ||
25 | |||
26 | #include "pci-lantiq.h" | ||
27 | |||
28 | #define LTQ_PCI_CFG_BASE 0x17000000 | ||
29 | #define LTQ_PCI_CFG_SIZE 0x00008000 | ||
30 | #define LTQ_PCI_MEM_BASE 0x18000000 | ||
31 | #define LTQ_PCI_MEM_SIZE 0x02000000 | ||
32 | #define LTQ_PCI_IO_BASE 0x1AE00000 | ||
33 | #define LTQ_PCI_IO_SIZE 0x00200000 | ||
34 | |||
35 | #define PCI_CR_FCI_ADDR_MAP0 0x00C0 | ||
36 | #define PCI_CR_FCI_ADDR_MAP1 0x00C4 | ||
37 | #define PCI_CR_FCI_ADDR_MAP2 0x00C8 | ||
38 | #define PCI_CR_FCI_ADDR_MAP3 0x00CC | ||
39 | #define PCI_CR_FCI_ADDR_MAP4 0x00D0 | ||
40 | #define PCI_CR_FCI_ADDR_MAP5 0x00D4 | ||
41 | #define PCI_CR_FCI_ADDR_MAP6 0x00D8 | ||
42 | #define PCI_CR_FCI_ADDR_MAP7 0x00DC | ||
43 | #define PCI_CR_CLK_CTRL 0x0000 | ||
44 | #define PCI_CR_PCI_MOD 0x0030 | ||
45 | #define PCI_CR_PC_ARB 0x0080 | ||
46 | #define PCI_CR_FCI_ADDR_MAP11hg 0x00E4 | ||
47 | #define PCI_CR_BAR11MASK 0x0044 | ||
48 | #define PCI_CR_BAR12MASK 0x0048 | ||
49 | #define PCI_CR_BAR13MASK 0x004C | ||
50 | #define PCI_CS_BASE_ADDR1 0x0010 | ||
51 | #define PCI_CR_PCI_ADDR_MAP11 0x0064 | ||
52 | #define PCI_CR_FCI_BURST_LENGTH 0x00E8 | ||
53 | #define PCI_CR_PCI_EOI 0x002C | ||
54 | #define PCI_CS_STS_CMD 0x0004 | ||
55 | |||
56 | #define PCI_MASTER0_REQ_MASK_2BITS 8 | ||
57 | #define PCI_MASTER1_REQ_MASK_2BITS 10 | ||
58 | #define PCI_MASTER2_REQ_MASK_2BITS 12 | ||
59 | #define INTERNAL_ARB_ENABLE_BIT 0 | ||
60 | |||
61 | #define LTQ_CGU_IFCCR 0x0018 | ||
62 | #define LTQ_CGU_PCICR 0x0034 | ||
63 | |||
64 | #define ltq_pci_w32(x, y) ltq_w32((x), ltq_pci_membase + (y)) | ||
65 | #define ltq_pci_r32(x) ltq_r32(ltq_pci_membase + (x)) | ||
66 | |||
67 | #define ltq_pci_cfg_w32(x, y) ltq_w32((x), ltq_pci_mapped_cfg + (y)) | ||
68 | #define ltq_pci_cfg_r32(x) ltq_r32(ltq_pci_mapped_cfg + (x)) | ||
69 | |||
70 | struct ltq_pci_gpio_map { | ||
71 | int pin; | ||
72 | int alt0; | ||
73 | int alt1; | ||
74 | int dir; | ||
75 | char *name; | ||
76 | }; | ||
77 | |||
78 | /* the pci core can make use of the following gpios */ | ||
79 | static struct ltq_pci_gpio_map ltq_pci_gpio_map[] = { | ||
80 | { 0, 1, 0, 0, "pci-exin0" }, | ||
81 | { 1, 1, 0, 0, "pci-exin1" }, | ||
82 | { 2, 1, 0, 0, "pci-exin2" }, | ||
83 | { 39, 1, 0, 0, "pci-exin3" }, | ||
84 | { 10, 1, 0, 0, "pci-exin4" }, | ||
85 | { 9, 1, 0, 0, "pci-exin5" }, | ||
86 | { 30, 1, 0, 1, "pci-gnt1" }, | ||
87 | { 23, 1, 0, 1, "pci-gnt2" }, | ||
88 | { 19, 1, 0, 1, "pci-gnt3" }, | ||
89 | { 38, 1, 0, 1, "pci-gnt4" }, | ||
90 | { 29, 1, 0, 0, "pci-req1" }, | ||
91 | { 31, 1, 0, 0, "pci-req2" }, | ||
92 | { 3, 1, 0, 0, "pci-req3" }, | ||
93 | { 37, 1, 0, 0, "pci-req4" }, | ||
94 | }; | ||
95 | |||
96 | __iomem void *ltq_pci_mapped_cfg; | ||
97 | static __iomem void *ltq_pci_membase; | ||
98 | |||
99 | int (*ltqpci_plat_dev_init)(struct pci_dev *dev) = NULL; | ||
100 | |||
101 | /* Since the PCI REQ pins can be reused for other functionality, make it | ||
102 | possible to exclude those from interpretation by the PCI controller */ | ||
103 | static int ltq_pci_req_mask = 0xf; | ||
104 | |||
105 | static int *ltq_pci_irq_map; | ||
106 | |||
107 | struct pci_ops ltq_pci_ops = { | ||
108 | .read = ltq_pci_read_config_dword, | ||
109 | .write = ltq_pci_write_config_dword | ||
110 | }; | ||
111 | |||
112 | static struct resource pci_io_resource = { | ||
113 | .name = "pci io space", | ||
114 | .start = LTQ_PCI_IO_BASE, | ||
115 | .end = LTQ_PCI_IO_BASE + LTQ_PCI_IO_SIZE - 1, | ||
116 | .flags = IORESOURCE_IO | ||
117 | }; | ||
118 | |||
119 | static struct resource pci_mem_resource = { | ||
120 | .name = "pci memory space", | ||
121 | .start = LTQ_PCI_MEM_BASE, | ||
122 | .end = LTQ_PCI_MEM_BASE + LTQ_PCI_MEM_SIZE - 1, | ||
123 | .flags = IORESOURCE_MEM | ||
124 | }; | ||
125 | |||
126 | static struct pci_controller ltq_pci_controller = { | ||
127 | .pci_ops = <q_pci_ops, | ||
128 | .mem_resource = &pci_mem_resource, | ||
129 | .mem_offset = 0x00000000UL, | ||
130 | .io_resource = &pci_io_resource, | ||
131 | .io_offset = 0x00000000UL, | ||
132 | }; | ||
133 | |||
134 | int pcibios_plat_dev_init(struct pci_dev *dev) | ||
135 | { | ||
136 | if (ltqpci_plat_dev_init) | ||
137 | return ltqpci_plat_dev_init(dev); | ||
138 | |||
139 | return 0; | ||
140 | } | ||
141 | |||
142 | static u32 ltq_calc_bar11mask(void) | ||
143 | { | ||
144 | u32 mem, bar11mask; | ||
145 | |||
146 | /* BAR11MASK value depends on available memory on system. */ | ||
147 | mem = num_physpages * PAGE_SIZE; | ||
148 | bar11mask = (0x0ffffff0 & ~((1 << (fls(mem) - 1)) - 1)) | 8; | ||
149 | |||
150 | return bar11mask; | ||
151 | } | ||
152 | |||
153 | static void ltq_pci_setup_gpio(int gpio) | ||
154 | { | ||
155 | int i; | ||
156 | for (i = 0; i < ARRAY_SIZE(ltq_pci_gpio_map); i++) { | ||
157 | if (gpio & (1 << i)) { | ||
158 | ltq_gpio_request(ltq_pci_gpio_map[i].pin, | ||
159 | ltq_pci_gpio_map[i].alt0, | ||
160 | ltq_pci_gpio_map[i].alt1, | ||
161 | ltq_pci_gpio_map[i].dir, | ||
162 | ltq_pci_gpio_map[i].name); | ||
163 | } | ||
164 | } | ||
165 | ltq_gpio_request(21, 0, 0, 1, "pci-reset"); | ||
166 | ltq_pci_req_mask = (gpio >> PCI_REQ_SHIFT) & PCI_REQ_MASK; | ||
167 | } | ||
168 | |||
169 | static int __devinit ltq_pci_startup(struct ltq_pci_data *conf) | ||
170 | { | ||
171 | u32 temp_buffer; | ||
172 | |||
173 | /* set clock to 33Mhz */ | ||
174 | ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) & ~0xf00000, LTQ_CGU_IFCCR); | ||
175 | ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) | 0x800000, LTQ_CGU_IFCCR); | ||
176 | |||
177 | /* external or internal clock ? */ | ||
178 | if (conf->clock) { | ||
179 | ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) & ~(1 << 16), | ||
180 | LTQ_CGU_IFCCR); | ||
181 | ltq_cgu_w32((1 << 30), LTQ_CGU_PCICR); | ||
182 | } else { | ||
183 | ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) | (1 << 16), | ||
184 | LTQ_CGU_IFCCR); | ||
185 | ltq_cgu_w32((1 << 31) | (1 << 30), LTQ_CGU_PCICR); | ||
186 | } | ||
187 | |||
188 | /* setup pci clock and gpis used by pci */ | ||
189 | ltq_pci_setup_gpio(conf->gpio); | ||
190 | |||
191 | /* enable auto-switching between PCI and EBU */ | ||
192 | ltq_pci_w32(0xa, PCI_CR_CLK_CTRL); | ||
193 | |||
194 | /* busy, i.e. configuration is not done, PCI access has to be retried */ | ||
195 | ltq_pci_w32(ltq_pci_r32(PCI_CR_PCI_MOD) & ~(1 << 24), PCI_CR_PCI_MOD); | ||
196 | wmb(); | ||
197 | /* BUS Master/IO/MEM access */ | ||
198 | ltq_pci_cfg_w32(ltq_pci_cfg_r32(PCI_CS_STS_CMD) | 7, PCI_CS_STS_CMD); | ||
199 | |||
200 | /* enable external 2 PCI masters */ | ||
201 | temp_buffer = ltq_pci_r32(PCI_CR_PC_ARB); | ||
202 | temp_buffer &= (~(ltq_pci_req_mask << 16)); | ||
203 | /* enable internal arbiter */ | ||
204 | temp_buffer |= (1 << INTERNAL_ARB_ENABLE_BIT); | ||
205 | /* enable internal PCI master reqest */ | ||
206 | temp_buffer &= (~(3 << PCI_MASTER0_REQ_MASK_2BITS)); | ||
207 | |||
208 | /* enable EBU request */ | ||
209 | temp_buffer &= (~(3 << PCI_MASTER1_REQ_MASK_2BITS)); | ||
210 | |||
211 | /* enable all external masters request */ | ||
212 | temp_buffer &= (~(3 << PCI_MASTER2_REQ_MASK_2BITS)); | ||
213 | ltq_pci_w32(temp_buffer, PCI_CR_PC_ARB); | ||
214 | wmb(); | ||
215 | |||
216 | /* setup BAR memory regions */ | ||
217 | ltq_pci_w32(0x18000000, PCI_CR_FCI_ADDR_MAP0); | ||
218 | ltq_pci_w32(0x18400000, PCI_CR_FCI_ADDR_MAP1); | ||
219 | ltq_pci_w32(0x18800000, PCI_CR_FCI_ADDR_MAP2); | ||
220 | ltq_pci_w32(0x18c00000, PCI_CR_FCI_ADDR_MAP3); | ||
221 | ltq_pci_w32(0x19000000, PCI_CR_FCI_ADDR_MAP4); | ||
222 | ltq_pci_w32(0x19400000, PCI_CR_FCI_ADDR_MAP5); | ||
223 | ltq_pci_w32(0x19800000, PCI_CR_FCI_ADDR_MAP6); | ||
224 | ltq_pci_w32(0x19c00000, PCI_CR_FCI_ADDR_MAP7); | ||
225 | ltq_pci_w32(0x1ae00000, PCI_CR_FCI_ADDR_MAP11hg); | ||
226 | ltq_pci_w32(ltq_calc_bar11mask(), PCI_CR_BAR11MASK); | ||
227 | ltq_pci_w32(0, PCI_CR_PCI_ADDR_MAP11); | ||
228 | ltq_pci_w32(0, PCI_CS_BASE_ADDR1); | ||
229 | /* both TX and RX endian swap are enabled */ | ||
230 | ltq_pci_w32(ltq_pci_r32(PCI_CR_PCI_EOI) | 3, PCI_CR_PCI_EOI); | ||
231 | wmb(); | ||
232 | ltq_pci_w32(ltq_pci_r32(PCI_CR_BAR12MASK) | 0x80000000, | ||
233 | PCI_CR_BAR12MASK); | ||
234 | ltq_pci_w32(ltq_pci_r32(PCI_CR_BAR13MASK) | 0x80000000, | ||
235 | PCI_CR_BAR13MASK); | ||
236 | /*use 8 dw burst length */ | ||
237 | ltq_pci_w32(0x303, PCI_CR_FCI_BURST_LENGTH); | ||
238 | ltq_pci_w32(ltq_pci_r32(PCI_CR_PCI_MOD) | (1 << 24), PCI_CR_PCI_MOD); | ||
239 | wmb(); | ||
240 | |||
241 | /* setup irq line */ | ||
242 | ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_CON) | 0xc, LTQ_EBU_PCC_CON); | ||
243 | ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_IEN) | 0x10, LTQ_EBU_PCC_IEN); | ||
244 | |||
245 | /* toggle reset pin */ | ||
246 | __gpio_set_value(21, 0); | ||
247 | wmb(); | ||
248 | mdelay(1); | ||
249 | __gpio_set_value(21, 1); | ||
250 | return 0; | ||
251 | } | ||
252 | |||
253 | int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | ||
254 | { | ||
255 | if (ltq_pci_irq_map[slot]) | ||
256 | return ltq_pci_irq_map[slot]; | ||
257 | printk(KERN_ERR "lq_pci: trying to map irq for unknown slot %d\n", | ||
258 | slot); | ||
259 | |||
260 | return 0; | ||
261 | } | ||
262 | |||
263 | static int __devinit ltq_pci_probe(struct platform_device *pdev) | ||
264 | { | ||
265 | struct ltq_pci_data *ltq_pci_data = | ||
266 | (struct ltq_pci_data *) pdev->dev.platform_data; | ||
267 | pci_probe_only = 0; | ||
268 | ltq_pci_irq_map = ltq_pci_data->irq; | ||
269 | ltq_pci_membase = ioremap_nocache(PCI_CR_BASE_ADDR, PCI_CR_SIZE); | ||
270 | ltq_pci_mapped_cfg = | ||
271 | ioremap_nocache(LTQ_PCI_CFG_BASE, LTQ_PCI_CFG_BASE); | ||
272 | ltq_pci_controller.io_map_base = | ||
273 | (unsigned long)ioremap(LTQ_PCI_IO_BASE, LTQ_PCI_IO_SIZE - 1); | ||
274 | ltq_pci_startup(ltq_pci_data); | ||
275 | register_pci_controller(<q_pci_controller); | ||
276 | |||
277 | return 0; | ||
278 | } | ||
279 | |||
280 | static struct platform_driver | ||
281 | ltq_pci_driver = { | ||
282 | .probe = ltq_pci_probe, | ||
283 | .driver = { | ||
284 | .name = "ltq_pci", | ||
285 | .owner = THIS_MODULE, | ||
286 | }, | ||
287 | }; | ||
288 | |||
289 | int __init pcibios_init(void) | ||
290 | { | ||
291 | int ret = platform_driver_register(<q_pci_driver); | ||
292 | if (ret) | ||
293 | printk(KERN_INFO "ltq_pci: Error registering platfom driver!"); | ||
294 | return ret; | ||
295 | } | ||
296 | |||
297 | arch_initcall(pcibios_init); | ||
diff --git a/arch/mips/pci/pci-lantiq.h b/arch/mips/pci/pci-lantiq.h new file mode 100644 index 000000000000..66bf6cd6be3c --- /dev/null +++ b/arch/mips/pci/pci-lantiq.h | |||
@@ -0,0 +1,18 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * Copyright (C) 2010 John Crispin <blogic@openwrt.org> | ||
7 | */ | ||
8 | |||
9 | #ifndef _LTQ_PCI_H__ | ||
10 | #define _LTQ_PCI_H__ | ||
11 | |||
12 | extern __iomem void *ltq_pci_mapped_cfg; | ||
13 | extern int ltq_pci_read_config_dword(struct pci_bus *bus, | ||
14 | unsigned int devfn, int where, int size, u32 *val); | ||
15 | extern int ltq_pci_write_config_dword(struct pci_bus *bus, | ||
16 | unsigned int devfn, int where, int size, u32 val); | ||
17 | |||
18 | #endif | ||
diff --git a/arch/mips/pci/pci-xlr.c b/arch/mips/pci/pci-xlr.c new file mode 100644 index 000000000000..38fece16c435 --- /dev/null +++ b/arch/mips/pci/pci-xlr.c | |||
@@ -0,0 +1,214 @@ | |||
1 | /* | ||
2 | * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights | ||
3 | * reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the NetLogic | ||
9 | * license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or without | ||
12 | * modification, are permitted provided that the following conditions | ||
13 | * are met: | ||
14 | * | ||
15 | * 1. Redistributions of source code must retain the above copyright | ||
16 | * notice, this list of conditions and the following disclaimer. | ||
17 | * 2. Redistributions in binary form must reproduce the above copyright | ||
18 | * notice, this list of conditions and the following disclaimer in | ||
19 | * the documentation and/or other materials provided with the | ||
20 | * distribution. | ||
21 | * | ||
22 | * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR | ||
23 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | ||
24 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
25 | * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE | ||
26 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
27 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
28 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | ||
29 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, | ||
30 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE | ||
31 | * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN | ||
32 | * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
33 | */ | ||
34 | |||
35 | #include <linux/types.h> | ||
36 | #include <linux/pci.h> | ||
37 | #include <linux/kernel.h> | ||
38 | #include <linux/init.h> | ||
39 | #include <linux/mm.h> | ||
40 | #include <linux/console.h> | ||
41 | |||
42 | #include <asm/io.h> | ||
43 | |||
44 | #include <asm/netlogic/interrupt.h> | ||
45 | #include <asm/netlogic/xlr/iomap.h> | ||
46 | #include <asm/netlogic/xlr/pic.h> | ||
47 | #include <asm/netlogic/xlr/xlr.h> | ||
48 | |||
49 | static void *pci_config_base; | ||
50 | |||
51 | #define pci_cfg_addr(bus, devfn, off) (((bus) << 16) | ((devfn) << 8) | (off)) | ||
52 | |||
53 | /* PCI ops */ | ||
54 | static inline u32 pci_cfg_read_32bit(struct pci_bus *bus, unsigned int devfn, | ||
55 | int where) | ||
56 | { | ||
57 | u32 data; | ||
58 | u32 *cfgaddr; | ||
59 | |||
60 | cfgaddr = (u32 *)(pci_config_base + | ||
61 | pci_cfg_addr(bus->number, devfn, where & ~3)); | ||
62 | data = *cfgaddr; | ||
63 | return cpu_to_le32(data); | ||
64 | } | ||
65 | |||
66 | static inline void pci_cfg_write_32bit(struct pci_bus *bus, unsigned int devfn, | ||
67 | int where, u32 data) | ||
68 | { | ||
69 | u32 *cfgaddr; | ||
70 | |||
71 | cfgaddr = (u32 *)(pci_config_base + | ||
72 | pci_cfg_addr(bus->number, devfn, where & ~3)); | ||
73 | *cfgaddr = cpu_to_le32(data); | ||
74 | } | ||
75 | |||
76 | static int nlm_pcibios_read(struct pci_bus *bus, unsigned int devfn, | ||
77 | int where, int size, u32 *val) | ||
78 | { | ||
79 | u32 data; | ||
80 | |||
81 | if ((size == 2) && (where & 1)) | ||
82 | return PCIBIOS_BAD_REGISTER_NUMBER; | ||
83 | else if ((size == 4) && (where & 3)) | ||
84 | return PCIBIOS_BAD_REGISTER_NUMBER; | ||
85 | |||
86 | data = pci_cfg_read_32bit(bus, devfn, where); | ||
87 | |||
88 | if (size == 1) | ||
89 | *val = (data >> ((where & 3) << 3)) & 0xff; | ||
90 | else if (size == 2) | ||
91 | *val = (data >> ((where & 3) << 3)) & 0xffff; | ||
92 | else | ||
93 | *val = data; | ||
94 | |||
95 | return PCIBIOS_SUCCESSFUL; | ||
96 | } | ||
97 | |||
98 | |||
99 | static int nlm_pcibios_write(struct pci_bus *bus, unsigned int devfn, | ||
100 | int where, int size, u32 val) | ||
101 | { | ||
102 | u32 data; | ||
103 | |||
104 | if ((size == 2) && (where & 1)) | ||
105 | return PCIBIOS_BAD_REGISTER_NUMBER; | ||
106 | else if ((size == 4) && (where & 3)) | ||
107 | return PCIBIOS_BAD_REGISTER_NUMBER; | ||
108 | |||
109 | data = pci_cfg_read_32bit(bus, devfn, where); | ||
110 | |||
111 | if (size == 1) | ||
112 | data = (data & ~(0xff << ((where & 3) << 3))) | | ||
113 | (val << ((where & 3) << 3)); | ||
114 | else if (size == 2) | ||
115 | data = (data & ~(0xffff << ((where & 3) << 3))) | | ||
116 | (val << ((where & 3) << 3)); | ||
117 | else | ||
118 | data = val; | ||
119 | |||
120 | pci_cfg_write_32bit(bus, devfn, where, data); | ||
121 | |||
122 | return PCIBIOS_SUCCESSFUL; | ||
123 | } | ||
124 | |||
125 | struct pci_ops nlm_pci_ops = { | ||
126 | .read = nlm_pcibios_read, | ||
127 | .write = nlm_pcibios_write | ||
128 | }; | ||
129 | |||
130 | static struct resource nlm_pci_mem_resource = { | ||
131 | .name = "XLR PCI MEM", | ||
132 | .start = 0xd0000000UL, /* 256MB PCI mem @ 0xd000_0000 */ | ||
133 | .end = 0xdfffffffUL, | ||
134 | .flags = IORESOURCE_MEM, | ||
135 | }; | ||
136 | |||
137 | static struct resource nlm_pci_io_resource = { | ||
138 | .name = "XLR IO MEM", | ||
139 | .start = 0x10000000UL, /* 16MB PCI IO @ 0x1000_0000 */ | ||
140 | .end = 0x100fffffUL, | ||
141 | .flags = IORESOURCE_IO, | ||
142 | }; | ||
143 | |||
144 | struct pci_controller nlm_pci_controller = { | ||
145 | .index = 0, | ||
146 | .pci_ops = &nlm_pci_ops, | ||
147 | .mem_resource = &nlm_pci_mem_resource, | ||
148 | .mem_offset = 0x00000000UL, | ||
149 | .io_resource = &nlm_pci_io_resource, | ||
150 | .io_offset = 0x00000000UL, | ||
151 | }; | ||
152 | |||
153 | int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | ||
154 | { | ||
155 | if (!nlm_chip_is_xls()) | ||
156 | return PIC_PCIX_IRQ; /* for XLR just one IRQ*/ | ||
157 | |||
158 | /* | ||
159 | * For XLS PCIe, there is an IRQ per Link, find out which | ||
160 | * link the device is on to assign interrupts | ||
161 | */ | ||
162 | if (dev->bus->self == NULL) | ||
163 | return 0; | ||
164 | |||
165 | switch (dev->bus->self->devfn) { | ||
166 | case 0x0: | ||
167 | return PIC_PCIE_LINK0_IRQ; | ||
168 | case 0x8: | ||
169 | return PIC_PCIE_LINK1_IRQ; | ||
170 | case 0x10: | ||
171 | if (nlm_chip_is_xls_b()) | ||
172 | return PIC_PCIE_XLSB0_LINK2_IRQ; | ||
173 | else | ||
174 | return PIC_PCIE_LINK2_IRQ; | ||
175 | case 0x18: | ||
176 | if (nlm_chip_is_xls_b()) | ||
177 | return PIC_PCIE_XLSB0_LINK3_IRQ; | ||
178 | else | ||
179 | return PIC_PCIE_LINK3_IRQ; | ||
180 | } | ||
181 | WARN(1, "Unexpected devfn %d\n", dev->bus->self->devfn); | ||
182 | return 0; | ||
183 | } | ||
184 | |||
185 | /* Do platform specific device initialization at pci_enable_device() time */ | ||
186 | int pcibios_plat_dev_init(struct pci_dev *dev) | ||
187 | { | ||
188 | return 0; | ||
189 | } | ||
190 | |||
191 | static int __init pcibios_init(void) | ||
192 | { | ||
193 | /* PSB assigns PCI resources */ | ||
194 | pci_probe_only = 1; | ||
195 | pci_config_base = ioremap(DEFAULT_PCI_CONFIG_BASE, 16 << 20); | ||
196 | |||
197 | /* Extend IO port for memory mapped io */ | ||
198 | ioport_resource.start = 0; | ||
199 | ioport_resource.end = ~0; | ||
200 | |||
201 | set_io_port_base(CKSEG1); | ||
202 | nlm_pci_controller.io_map_base = CKSEG1; | ||
203 | |||
204 | pr_info("Registering XLR/XLS PCIX/PCIE Controller.\n"); | ||
205 | register_pci_controller(&nlm_pci_controller); | ||
206 | |||
207 | return 0; | ||
208 | } | ||
209 | |||
210 | arch_initcall(pcibios_init); | ||
211 | |||
212 | struct pci_fixup pcibios_fixups[] = { | ||
213 | {0} | ||
214 | }; | ||
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_irq_per.c b/arch/mips/pmc-sierra/msp71xx/msp_irq_per.c index f9b9dcdfa9dd..98fd0099d964 100644 --- a/arch/mips/pmc-sierra/msp71xx/msp_irq_per.c +++ b/arch/mips/pmc-sierra/msp71xx/msp_irq_per.c | |||
@@ -97,7 +97,7 @@ static int msp_per_irq_set_affinity(struct irq_data *d, | |||
97 | 97 | ||
98 | static struct irq_chip msp_per_irq_controller = { | 98 | static struct irq_chip msp_per_irq_controller = { |
99 | .name = "MSP_PER", | 99 | .name = "MSP_PER", |
100 | .irq_enable = unmask_per_irq. | 100 | .irq_enable = unmask_per_irq, |
101 | .irq_disable = mask_per_irq, | 101 | .irq_disable = mask_per_irq, |
102 | .irq_ack = msp_per_irq_ack, | 102 | .irq_ack = msp_per_irq_ack, |
103 | #ifdef CONFIG_SMP | 103 | #ifdef CONFIG_SMP |
diff --git a/arch/mips/power/hibernate.S b/arch/mips/power/hibernate.S index dbb5c7b4b70f..f8a751c03282 100644 --- a/arch/mips/power/hibernate.S +++ b/arch/mips/power/hibernate.S | |||
@@ -35,7 +35,7 @@ LEAF(swsusp_arch_resume) | |||
35 | 0: | 35 | 0: |
36 | PTR_L t1, PBE_ADDRESS(t0) /* source */ | 36 | PTR_L t1, PBE_ADDRESS(t0) /* source */ |
37 | PTR_L t2, PBE_ORIG_ADDRESS(t0) /* destination */ | 37 | PTR_L t2, PBE_ORIG_ADDRESS(t0) /* destination */ |
38 | PTR_ADDIU t3, t1, PAGE_SIZE | 38 | PTR_ADDU t3, t1, PAGE_SIZE |
39 | 1: | 39 | 1: |
40 | REG_L t8, (t1) | 40 | REG_L t8, (t1) |
41 | REG_S t8, (t2) | 41 | REG_S t8, (t2) |
diff --git a/arch/mips/rb532/gpio.c b/arch/mips/rb532/gpio.c index 37de05d595e7..6c47dfeb7be3 100644 --- a/arch/mips/rb532/gpio.c +++ b/arch/mips/rb532/gpio.c | |||
@@ -185,7 +185,7 @@ int __init rb532_gpio_init(void) | |||
185 | struct resource *r; | 185 | struct resource *r; |
186 | 186 | ||
187 | r = rb532_gpio_reg0_res; | 187 | r = rb532_gpio_reg0_res; |
188 | rb532_gpio_chip->regbase = ioremap_nocache(r->start, r->end - r->start); | 188 | rb532_gpio_chip->regbase = ioremap_nocache(r->start, resource_size(r)); |
189 | 189 | ||
190 | if (!rb532_gpio_chip->regbase) { | 190 | if (!rb532_gpio_chip->regbase) { |
191 | printk(KERN_ERR "rb532: cannot remap GPIO register 0\n"); | 191 | printk(KERN_ERR "rb532: cannot remap GPIO register 0\n"); |
diff --git a/arch/mips/sgi-ip22/ip22-platform.c b/arch/mips/sgi-ip22/ip22-platform.c index deddbf0ebe5c..698904daf901 100644 --- a/arch/mips/sgi-ip22/ip22-platform.c +++ b/arch/mips/sgi-ip22/ip22-platform.c | |||
@@ -132,7 +132,7 @@ static struct platform_device eth1_device = { | |||
132 | */ | 132 | */ |
133 | static int __init sgiseeq_devinit(void) | 133 | static int __init sgiseeq_devinit(void) |
134 | { | 134 | { |
135 | unsigned int tmp; | 135 | unsigned int pbdma __maybe_unused; |
136 | int res, i; | 136 | int res, i; |
137 | 137 | ||
138 | eth0_pd.hpc = hpc3c0; | 138 | eth0_pd.hpc = hpc3c0; |
@@ -151,7 +151,7 @@ static int __init sgiseeq_devinit(void) | |||
151 | 151 | ||
152 | /* Second HPC is missing? */ | 152 | /* Second HPC is missing? */ |
153 | if (ip22_is_fullhouse() || | 153 | if (ip22_is_fullhouse() || |
154 | get_dbe(tmp, (unsigned int *)&hpc3c1->pbdma[1])) | 154 | get_dbe(pbdma, (unsigned int *)&hpc3c1->pbdma[1])) |
155 | return 0; | 155 | return 0; |
156 | 156 | ||
157 | sgimc->giopar |= SGIMC_GIOPAR_MASTEREXP1 | SGIMC_GIOPAR_EXP164 | | 157 | sgimc->giopar |= SGIMC_GIOPAR_MASTEREXP1 | SGIMC_GIOPAR_EXP164 | |
diff --git a/arch/mips/sgi-ip22/ip22-time.c b/arch/mips/sgi-ip22/ip22-time.c index 603fc91c1030..1a94c9894188 100644 --- a/arch/mips/sgi-ip22/ip22-time.c +++ b/arch/mips/sgi-ip22/ip22-time.c | |||
@@ -32,7 +32,7 @@ | |||
32 | static unsigned long dosample(void) | 32 | static unsigned long dosample(void) |
33 | { | 33 | { |
34 | u32 ct0, ct1; | 34 | u32 ct0, ct1; |
35 | u8 msb, lsb; | 35 | u8 msb; |
36 | 36 | ||
37 | /* Start the counter. */ | 37 | /* Start the counter. */ |
38 | sgint->tcword = (SGINT_TCWORD_CNT2 | SGINT_TCWORD_CALL | | 38 | sgint->tcword = (SGINT_TCWORD_CNT2 | SGINT_TCWORD_CALL | |
@@ -46,7 +46,7 @@ static unsigned long dosample(void) | |||
46 | /* Latch and spin until top byte of counter2 is zero */ | 46 | /* Latch and spin until top byte of counter2 is zero */ |
47 | do { | 47 | do { |
48 | writeb(SGINT_TCWORD_CNT2 | SGINT_TCWORD_CLAT, &sgint->tcword); | 48 | writeb(SGINT_TCWORD_CNT2 | SGINT_TCWORD_CLAT, &sgint->tcword); |
49 | lsb = readb(&sgint->tcnt2); | 49 | (void) readb(&sgint->tcnt2); |
50 | msb = readb(&sgint->tcnt2); | 50 | msb = readb(&sgint->tcnt2); |
51 | ct1 = read_c0_count(); | 51 | ct1 = read_c0_count(); |
52 | } while (msb); | 52 | } while (msb); |
diff --git a/arch/mips/sgi-ip27/ip27-hubio.c b/arch/mips/sgi-ip27/ip27-hubio.c index a1fa4abb3f6a..cd0d5b06cd83 100644 --- a/arch/mips/sgi-ip27/ip27-hubio.c +++ b/arch/mips/sgi-ip27/ip27-hubio.c | |||
@@ -29,7 +29,6 @@ unsigned long hub_pio_map(cnodeid_t cnode, xwidgetnum_t widget, | |||
29 | unsigned long xtalk_addr, size_t size) | 29 | unsigned long xtalk_addr, size_t size) |
30 | { | 30 | { |
31 | nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode); | 31 | nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode); |
32 | volatile hubreg_t junk; | ||
33 | unsigned i; | 32 | unsigned i; |
34 | 33 | ||
35 | /* use small-window mapping if possible */ | 34 | /* use small-window mapping if possible */ |
@@ -64,7 +63,7 @@ unsigned long hub_pio_map(cnodeid_t cnode, xwidgetnum_t widget, | |||
64 | * after we write it. | 63 | * after we write it. |
65 | */ | 64 | */ |
66 | IIO_ITTE_PUT(nasid, i, HUB_PIO_MAP_TO_MEM, widget, xtalk_addr); | 65 | IIO_ITTE_PUT(nasid, i, HUB_PIO_MAP_TO_MEM, widget, xtalk_addr); |
67 | junk = HUB_L(IIO_ITTE_GET(nasid, i)); | 66 | (void) HUB_L(IIO_ITTE_GET(nasid, i)); |
68 | 67 | ||
69 | return NODE_BWIN_BASE(nasid, widget) + (xtalk_addr % BWIN_SIZE); | 68 | return NODE_BWIN_BASE(nasid, widget) + (xtalk_addr % BWIN_SIZE); |
70 | } | 69 | } |
diff --git a/arch/mips/sgi-ip27/ip27-klnuma.c b/arch/mips/sgi-ip27/ip27-klnuma.c index c3d30a88daf3..1d1919a44e88 100644 --- a/arch/mips/sgi-ip27/ip27-klnuma.c +++ b/arch/mips/sgi-ip27/ip27-klnuma.c | |||
@@ -54,11 +54,8 @@ void __init setup_replication_mask(void) | |||
54 | 54 | ||
55 | static __init void set_ktext_source(nasid_t client_nasid, nasid_t server_nasid) | 55 | static __init void set_ktext_source(nasid_t client_nasid, nasid_t server_nasid) |
56 | { | 56 | { |
57 | cnodeid_t client_cnode; | ||
58 | kern_vars_t *kvp; | 57 | kern_vars_t *kvp; |
59 | 58 | ||
60 | client_cnode = NASID_TO_COMPACT_NODEID(client_nasid); | ||
61 | |||
62 | kvp = &hub_data(client_nasid)->kern_vars; | 59 | kvp = &hub_data(client_nasid)->kern_vars; |
63 | 60 | ||
64 | KERN_VARS_ADDR(client_nasid) = (unsigned long)kvp; | 61 | KERN_VARS_ADDR(client_nasid) = (unsigned long)kvp; |
diff --git a/arch/mips/sgi-ip27/ip27-timer.c b/arch/mips/sgi-ip27/ip27-timer.c index a152538d3c97..3f810c9cbf83 100644 --- a/arch/mips/sgi-ip27/ip27-timer.c +++ b/arch/mips/sgi-ip27/ip27-timer.c | |||
@@ -66,18 +66,7 @@ static int rt_next_event(unsigned long delta, struct clock_event_device *evt) | |||
66 | static void rt_set_mode(enum clock_event_mode mode, | 66 | static void rt_set_mode(enum clock_event_mode mode, |
67 | struct clock_event_device *evt) | 67 | struct clock_event_device *evt) |
68 | { | 68 | { |
69 | switch (mode) { | 69 | /* Nothing to do ... */ |
70 | case CLOCK_EVT_MODE_ONESHOT: | ||
71 | /* The only mode supported */ | ||
72 | break; | ||
73 | |||
74 | case CLOCK_EVT_MODE_PERIODIC: | ||
75 | case CLOCK_EVT_MODE_UNUSED: | ||
76 | case CLOCK_EVT_MODE_SHUTDOWN: | ||
77 | case CLOCK_EVT_MODE_RESUME: | ||
78 | /* Nothing to do */ | ||
79 | break; | ||
80 | } | ||
81 | } | 70 | } |
82 | 71 | ||
83 | int rt_timer_irq; | 72 | int rt_timer_irq; |
diff --git a/arch/mips/sni/time.c b/arch/mips/sni/time.c index c76151b56568..0904d4d30cb3 100644 --- a/arch/mips/sni/time.c +++ b/arch/mips/sni/time.c | |||
@@ -95,7 +95,7 @@ static void __init sni_a20r_timer_setup(void) | |||
95 | static __init unsigned long dosample(void) | 95 | static __init unsigned long dosample(void) |
96 | { | 96 | { |
97 | u32 ct0, ct1; | 97 | u32 ct0, ct1; |
98 | volatile u8 msb, lsb; | 98 | volatile u8 msb; |
99 | 99 | ||
100 | /* Start the counter. */ | 100 | /* Start the counter. */ |
101 | outb_p(0x34, 0x43); | 101 | outb_p(0x34, 0x43); |
@@ -108,7 +108,7 @@ static __init unsigned long dosample(void) | |||
108 | /* Latch and spin until top byte of counter0 is zero */ | 108 | /* Latch and spin until top byte of counter0 is zero */ |
109 | do { | 109 | do { |
110 | outb(0x00, 0x43); | 110 | outb(0x00, 0x43); |
111 | lsb = inb(0x40); | 111 | (void) inb(0x40); |
112 | msb = inb(0x40); | 112 | msb = inb(0x40); |
113 | ct1 = read_c0_count(); | 113 | ct1 = read_c0_count(); |
114 | } while (msb); | 114 | } while (msb); |
diff --git a/arch/powerpc/include/asm/mpic.h b/arch/powerpc/include/asm/mpic.h index 7005ee0b074d..49baddcdd14e 100644 --- a/arch/powerpc/include/asm/mpic.h +++ b/arch/powerpc/include/asm/mpic.h | |||
@@ -3,7 +3,6 @@ | |||
3 | #ifdef __KERNEL__ | 3 | #ifdef __KERNEL__ |
4 | 4 | ||
5 | #include <linux/irq.h> | 5 | #include <linux/irq.h> |
6 | #include <linux/sysdev.h> | ||
7 | #include <asm/dcr.h> | 6 | #include <asm/dcr.h> |
8 | #include <asm/msi_bitmap.h> | 7 | #include <asm/msi_bitmap.h> |
9 | 8 | ||
@@ -320,8 +319,6 @@ struct mpic | |||
320 | /* link */ | 319 | /* link */ |
321 | struct mpic *next; | 320 | struct mpic *next; |
322 | 321 | ||
323 | struct sys_device sysdev; | ||
324 | |||
325 | #ifdef CONFIG_PM | 322 | #ifdef CONFIG_PM |
326 | struct mpic_irq_save *save_data; | 323 | struct mpic_irq_save *save_data; |
327 | #endif | 324 | #endif |
diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c index 188272934cfb..104faa8aa23c 100644 --- a/arch/powerpc/platforms/83xx/suspend.c +++ b/arch/powerpc/platforms/83xx/suspend.c | |||
@@ -318,17 +318,20 @@ static const struct platform_suspend_ops mpc83xx_suspend_ops = { | |||
318 | .end = mpc83xx_suspend_end, | 318 | .end = mpc83xx_suspend_end, |
319 | }; | 319 | }; |
320 | 320 | ||
321 | static struct of_device_id pmc_match[]; | ||
321 | static int pmc_probe(struct platform_device *ofdev) | 322 | static int pmc_probe(struct platform_device *ofdev) |
322 | { | 323 | { |
324 | const struct of_device_id *match; | ||
323 | struct device_node *np = ofdev->dev.of_node; | 325 | struct device_node *np = ofdev->dev.of_node; |
324 | struct resource res; | 326 | struct resource res; |
325 | struct pmc_type *type; | 327 | struct pmc_type *type; |
326 | int ret = 0; | 328 | int ret = 0; |
327 | 329 | ||
328 | if (!ofdev->dev.of_match) | 330 | match = of_match_device(pmc_match, &ofdev->dev); |
331 | if (!match) | ||
329 | return -EINVAL; | 332 | return -EINVAL; |
330 | 333 | ||
331 | type = ofdev->dev.of_match->data; | 334 | type = match->data; |
332 | 335 | ||
333 | if (!of_device_is_available(np)) | 336 | if (!of_device_is_available(np)) |
334 | return -ENODEV; | 337 | return -ENODEV; |
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c index acfaccea5f4f..3675da73623f 100644 --- a/arch/powerpc/platforms/cell/spu_base.c +++ b/arch/powerpc/platforms/cell/spu_base.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/io.h> | 32 | #include <linux/io.h> |
33 | #include <linux/mutex.h> | 33 | #include <linux/mutex.h> |
34 | #include <linux/linux_logo.h> | 34 | #include <linux/linux_logo.h> |
35 | #include <linux/syscore_ops.h> | ||
35 | #include <asm/spu.h> | 36 | #include <asm/spu.h> |
36 | #include <asm/spu_priv1.h> | 37 | #include <asm/spu_priv1.h> |
37 | #include <asm/spu_csa.h> | 38 | #include <asm/spu_csa.h> |
@@ -521,18 +522,8 @@ void spu_init_channels(struct spu *spu) | |||
521 | } | 522 | } |
522 | EXPORT_SYMBOL_GPL(spu_init_channels); | 523 | EXPORT_SYMBOL_GPL(spu_init_channels); |
523 | 524 | ||
524 | static int spu_shutdown(struct sys_device *sysdev) | ||
525 | { | ||
526 | struct spu *spu = container_of(sysdev, struct spu, sysdev); | ||
527 | |||
528 | spu_free_irqs(spu); | ||
529 | spu_destroy_spu(spu); | ||
530 | return 0; | ||
531 | } | ||
532 | |||
533 | static struct sysdev_class spu_sysdev_class = { | 525 | static struct sysdev_class spu_sysdev_class = { |
534 | .name = "spu", | 526 | .name = "spu", |
535 | .shutdown = spu_shutdown, | ||
536 | }; | 527 | }; |
537 | 528 | ||
538 | int spu_add_sysdev_attr(struct sysdev_attribute *attr) | 529 | int spu_add_sysdev_attr(struct sysdev_attribute *attr) |
@@ -797,6 +788,22 @@ static inline void crash_register_spus(struct list_head *list) | |||
797 | } | 788 | } |
798 | #endif | 789 | #endif |
799 | 790 | ||
791 | static void spu_shutdown(void) | ||
792 | { | ||
793 | struct spu *spu; | ||
794 | |||
795 | mutex_lock(&spu_full_list_mutex); | ||
796 | list_for_each_entry(spu, &spu_full_list, full_list) { | ||
797 | spu_free_irqs(spu); | ||
798 | spu_destroy_spu(spu); | ||
799 | } | ||
800 | mutex_unlock(&spu_full_list_mutex); | ||
801 | } | ||
802 | |||
803 | static struct syscore_ops spu_syscore_ops = { | ||
804 | .shutdown = spu_shutdown, | ||
805 | }; | ||
806 | |||
800 | static int __init init_spu_base(void) | 807 | static int __init init_spu_base(void) |
801 | { | 808 | { |
802 | int i, ret = 0; | 809 | int i, ret = 0; |
@@ -830,6 +837,7 @@ static int __init init_spu_base(void) | |||
830 | crash_register_spus(&spu_full_list); | 837 | crash_register_spus(&spu_full_list); |
831 | mutex_unlock(&spu_full_list_mutex); | 838 | mutex_unlock(&spu_full_list_mutex); |
832 | spu_add_sysdev_attr(&attr_stat); | 839 | spu_add_sysdev_attr(&attr_stat); |
840 | register_syscore_ops(&spu_syscore_ops); | ||
833 | 841 | ||
834 | spu_init_affinity(); | 842 | spu_init_affinity(); |
835 | 843 | ||
diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c index 023f24086a0a..7c18a1607d1c 100644 --- a/arch/powerpc/platforms/powermac/pic.c +++ b/arch/powerpc/platforms/powermac/pic.c | |||
@@ -21,7 +21,7 @@ | |||
21 | #include <linux/signal.h> | 21 | #include <linux/signal.h> |
22 | #include <linux/pci.h> | 22 | #include <linux/pci.h> |
23 | #include <linux/interrupt.h> | 23 | #include <linux/interrupt.h> |
24 | #include <linux/sysdev.h> | 24 | #include <linux/syscore_ops.h> |
25 | #include <linux/adb.h> | 25 | #include <linux/adb.h> |
26 | #include <linux/pmu.h> | 26 | #include <linux/pmu.h> |
27 | #include <linux/module.h> | 27 | #include <linux/module.h> |
@@ -677,7 +677,7 @@ not_found: | |||
677 | return viaint; | 677 | return viaint; |
678 | } | 678 | } |
679 | 679 | ||
680 | static int pmacpic_suspend(struct sys_device *sysdev, pm_message_t state) | 680 | static int pmacpic_suspend(void) |
681 | { | 681 | { |
682 | int viaint = pmacpic_find_viaint(); | 682 | int viaint = pmacpic_find_viaint(); |
683 | 683 | ||
@@ -698,7 +698,7 @@ static int pmacpic_suspend(struct sys_device *sysdev, pm_message_t state) | |||
698 | return 0; | 698 | return 0; |
699 | } | 699 | } |
700 | 700 | ||
701 | static int pmacpic_resume(struct sys_device *sysdev) | 701 | static void pmacpic_resume(void) |
702 | { | 702 | { |
703 | int i; | 703 | int i; |
704 | 704 | ||
@@ -709,39 +709,19 @@ static int pmacpic_resume(struct sys_device *sysdev) | |||
709 | for (i = 0; i < max_real_irqs; ++i) | 709 | for (i = 0; i < max_real_irqs; ++i) |
710 | if (test_bit(i, sleep_save_mask)) | 710 | if (test_bit(i, sleep_save_mask)) |
711 | pmac_unmask_irq(irq_get_irq_data(i)); | 711 | pmac_unmask_irq(irq_get_irq_data(i)); |
712 | |||
713 | return 0; | ||
714 | } | 712 | } |
715 | 713 | ||
716 | #endif /* CONFIG_PM && CONFIG_PPC32 */ | 714 | static struct syscore_ops pmacpic_syscore_ops = { |
717 | 715 | .suspend = pmacpic_suspend, | |
718 | static struct sysdev_class pmacpic_sysclass = { | 716 | .resume = pmacpic_resume, |
719 | .name = "pmac_pic", | ||
720 | }; | 717 | }; |
721 | 718 | ||
722 | static struct sys_device device_pmacpic = { | 719 | static int __init init_pmacpic_syscore(void) |
723 | .id = 0, | ||
724 | .cls = &pmacpic_sysclass, | ||
725 | }; | ||
726 | |||
727 | static struct sysdev_driver driver_pmacpic = { | ||
728 | #if defined(CONFIG_PM) && defined(CONFIG_PPC32) | ||
729 | .suspend = &pmacpic_suspend, | ||
730 | .resume = &pmacpic_resume, | ||
731 | #endif /* CONFIG_PM && CONFIG_PPC32 */ | ||
732 | }; | ||
733 | |||
734 | static int __init init_pmacpic_sysfs(void) | ||
735 | { | 720 | { |
736 | #ifdef CONFIG_PPC32 | 721 | register_syscore_ops(&pmacpic_syscore_ops); |
737 | if (max_irqs == 0) | ||
738 | return -ENODEV; | ||
739 | #endif | ||
740 | printk(KERN_DEBUG "Registering pmac pic with sysfs...\n"); | ||
741 | sysdev_class_register(&pmacpic_sysclass); | ||
742 | sysdev_register(&device_pmacpic); | ||
743 | sysdev_driver_register(&pmacpic_sysclass, &driver_pmacpic); | ||
744 | return 0; | 722 | return 0; |
745 | } | 723 | } |
746 | machine_subsys_initcall(powermac, init_pmacpic_sysfs); | ||
747 | 724 | ||
725 | machine_subsys_initcall(powermac, init_pmacpic_syscore); | ||
726 | |||
727 | #endif /* CONFIG_PM && CONFIG_PPC32 */ | ||
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c index d5679dc1e20f..01cd2f089512 100644 --- a/arch/powerpc/sysdev/fsl_msi.c +++ b/arch/powerpc/sysdev/fsl_msi.c | |||
@@ -304,8 +304,10 @@ static int __devinit fsl_msi_setup_hwirq(struct fsl_msi *msi, | |||
304 | return 0; | 304 | return 0; |
305 | } | 305 | } |
306 | 306 | ||
307 | static const struct of_device_id fsl_of_msi_ids[]; | ||
307 | static int __devinit fsl_of_msi_probe(struct platform_device *dev) | 308 | static int __devinit fsl_of_msi_probe(struct platform_device *dev) |
308 | { | 309 | { |
310 | const struct of_device_id *match; | ||
309 | struct fsl_msi *msi; | 311 | struct fsl_msi *msi; |
310 | struct resource res; | 312 | struct resource res; |
311 | int err, i, j, irq_index, count; | 313 | int err, i, j, irq_index, count; |
@@ -316,9 +318,10 @@ static int __devinit fsl_of_msi_probe(struct platform_device *dev) | |||
316 | u32 offset; | 318 | u32 offset; |
317 | static const u32 all_avail[] = { 0, NR_MSI_IRQS }; | 319 | static const u32 all_avail[] = { 0, NR_MSI_IRQS }; |
318 | 320 | ||
319 | if (!dev->dev.of_match) | 321 | match = of_match_device(fsl_of_msi_ids, &dev->dev); |
322 | if (!match) | ||
320 | return -EINVAL; | 323 | return -EINVAL; |
321 | features = dev->dev.of_match->data; | 324 | features = match->data; |
322 | 325 | ||
323 | printk(KERN_DEBUG "Setting up Freescale MSI support\n"); | 326 | printk(KERN_DEBUG "Setting up Freescale MSI support\n"); |
324 | 327 | ||
diff --git a/arch/powerpc/sysdev/ipic.c b/arch/powerpc/sysdev/ipic.c index fa438be962b7..596554a8725e 100644 --- a/arch/powerpc/sysdev/ipic.c +++ b/arch/powerpc/sysdev/ipic.c | |||
@@ -18,7 +18,7 @@ | |||
18 | #include <linux/stddef.h> | 18 | #include <linux/stddef.h> |
19 | #include <linux/sched.h> | 19 | #include <linux/sched.h> |
20 | #include <linux/signal.h> | 20 | #include <linux/signal.h> |
21 | #include <linux/sysdev.h> | 21 | #include <linux/syscore_ops.h> |
22 | #include <linux/device.h> | 22 | #include <linux/device.h> |
23 | #include <linux/bootmem.h> | 23 | #include <linux/bootmem.h> |
24 | #include <linux/spinlock.h> | 24 | #include <linux/spinlock.h> |
@@ -902,7 +902,7 @@ static struct { | |||
902 | u32 sercr; | 902 | u32 sercr; |
903 | } ipic_saved_state; | 903 | } ipic_saved_state; |
904 | 904 | ||
905 | static int ipic_suspend(struct sys_device *sdev, pm_message_t state) | 905 | static int ipic_suspend(void) |
906 | { | 906 | { |
907 | struct ipic *ipic = primary_ipic; | 907 | struct ipic *ipic = primary_ipic; |
908 | 908 | ||
@@ -933,7 +933,7 @@ static int ipic_suspend(struct sys_device *sdev, pm_message_t state) | |||
933 | return 0; | 933 | return 0; |
934 | } | 934 | } |
935 | 935 | ||
936 | static int ipic_resume(struct sys_device *sdev) | 936 | static void ipic_resume(void) |
937 | { | 937 | { |
938 | struct ipic *ipic = primary_ipic; | 938 | struct ipic *ipic = primary_ipic; |
939 | 939 | ||
@@ -949,44 +949,26 @@ static int ipic_resume(struct sys_device *sdev) | |||
949 | ipic_write(ipic->regs, IPIC_SECNR, ipic_saved_state.secnr); | 949 | ipic_write(ipic->regs, IPIC_SECNR, ipic_saved_state.secnr); |
950 | ipic_write(ipic->regs, IPIC_SERMR, ipic_saved_state.sermr); | 950 | ipic_write(ipic->regs, IPIC_SERMR, ipic_saved_state.sermr); |
951 | ipic_write(ipic->regs, IPIC_SERCR, ipic_saved_state.sercr); | 951 | ipic_write(ipic->regs, IPIC_SERCR, ipic_saved_state.sercr); |
952 | |||
953 | return 0; | ||
954 | } | 952 | } |
955 | #else | 953 | #else |
956 | #define ipic_suspend NULL | 954 | #define ipic_suspend NULL |
957 | #define ipic_resume NULL | 955 | #define ipic_resume NULL |
958 | #endif | 956 | #endif |
959 | 957 | ||
960 | static struct sysdev_class ipic_sysclass = { | 958 | static struct syscore_ops ipic_syscore_ops = { |
961 | .name = "ipic", | ||
962 | .suspend = ipic_suspend, | 959 | .suspend = ipic_suspend, |
963 | .resume = ipic_resume, | 960 | .resume = ipic_resume, |
964 | }; | 961 | }; |
965 | 962 | ||
966 | static struct sys_device device_ipic = { | 963 | static int __init init_ipic_syscore(void) |
967 | .id = 0, | ||
968 | .cls = &ipic_sysclass, | ||
969 | }; | ||
970 | |||
971 | static int __init init_ipic_sysfs(void) | ||
972 | { | 964 | { |
973 | int rc; | ||
974 | |||
975 | if (!primary_ipic || !primary_ipic->regs) | 965 | if (!primary_ipic || !primary_ipic->regs) |
976 | return -ENODEV; | 966 | return -ENODEV; |
977 | printk(KERN_DEBUG "Registering ipic with sysfs...\n"); | ||
978 | 967 | ||
979 | rc = sysdev_class_register(&ipic_sysclass); | 968 | printk(KERN_DEBUG "Registering ipic system core operations\n"); |
980 | if (rc) { | 969 | register_syscore_ops(&ipic_syscore_ops); |
981 | printk(KERN_ERR "Failed registering ipic sys class\n"); | 970 | |
982 | return -ENODEV; | ||
983 | } | ||
984 | rc = sysdev_register(&device_ipic); | ||
985 | if (rc) { | ||
986 | printk(KERN_ERR "Failed registering ipic sys device\n"); | ||
987 | return -ENODEV; | ||
988 | } | ||
989 | return 0; | 971 | return 0; |
990 | } | 972 | } |
991 | 973 | ||
992 | subsys_initcall(init_ipic_sysfs); | 974 | subsys_initcall(init_ipic_syscore); |
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c index f91c065bed5a..7e5dc8f4984a 100644 --- a/arch/powerpc/sysdev/mpic.c +++ b/arch/powerpc/sysdev/mpic.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <linux/spinlock.h> | 27 | #include <linux/spinlock.h> |
28 | #include <linux/pci.h> | 28 | #include <linux/pci.h> |
29 | #include <linux/slab.h> | 29 | #include <linux/slab.h> |
30 | #include <linux/syscore_ops.h> | ||
30 | 31 | ||
31 | #include <asm/ptrace.h> | 32 | #include <asm/ptrace.h> |
32 | #include <asm/signal.h> | 33 | #include <asm/signal.h> |
@@ -1702,9 +1703,8 @@ void mpic_reset_core(int cpu) | |||
1702 | #endif /* CONFIG_SMP */ | 1703 | #endif /* CONFIG_SMP */ |
1703 | 1704 | ||
1704 | #ifdef CONFIG_PM | 1705 | #ifdef CONFIG_PM |
1705 | static int mpic_suspend(struct sys_device *dev, pm_message_t state) | 1706 | static void mpic_suspend_one(struct mpic *mpic) |
1706 | { | 1707 | { |
1707 | struct mpic *mpic = container_of(dev, struct mpic, sysdev); | ||
1708 | int i; | 1708 | int i; |
1709 | 1709 | ||
1710 | for (i = 0; i < mpic->num_sources; i++) { | 1710 | for (i = 0; i < mpic->num_sources; i++) { |
@@ -1713,13 +1713,22 @@ static int mpic_suspend(struct sys_device *dev, pm_message_t state) | |||
1713 | mpic->save_data[i].dest = | 1713 | mpic->save_data[i].dest = |
1714 | mpic_irq_read(i, MPIC_INFO(IRQ_DESTINATION)); | 1714 | mpic_irq_read(i, MPIC_INFO(IRQ_DESTINATION)); |
1715 | } | 1715 | } |
1716 | } | ||
1717 | |||
1718 | static int mpic_suspend(void) | ||
1719 | { | ||
1720 | struct mpic *mpic = mpics; | ||
1721 | |||
1722 | while (mpic) { | ||
1723 | mpic_suspend_one(mpic); | ||
1724 | mpic = mpic->next; | ||
1725 | } | ||
1716 | 1726 | ||
1717 | return 0; | 1727 | return 0; |
1718 | } | 1728 | } |
1719 | 1729 | ||
1720 | static int mpic_resume(struct sys_device *dev) | 1730 | static void mpic_resume_one(struct mpic *mpic) |
1721 | { | 1731 | { |
1722 | struct mpic *mpic = container_of(dev, struct mpic, sysdev); | ||
1723 | int i; | 1732 | int i; |
1724 | 1733 | ||
1725 | for (i = 0; i < mpic->num_sources; i++) { | 1734 | for (i = 0; i < mpic->num_sources; i++) { |
@@ -1746,33 +1755,28 @@ static int mpic_resume(struct sys_device *dev) | |||
1746 | } | 1755 | } |
1747 | #endif | 1756 | #endif |
1748 | } /* end for loop */ | 1757 | } /* end for loop */ |
1758 | } | ||
1749 | 1759 | ||
1750 | return 0; | 1760 | static void mpic_resume(void) |
1761 | { | ||
1762 | struct mpic *mpic = mpics; | ||
1763 | |||
1764 | while (mpic) { | ||
1765 | mpic_resume_one(mpic); | ||
1766 | mpic = mpic->next; | ||
1767 | } | ||
1751 | } | 1768 | } |
1752 | #endif | ||
1753 | 1769 | ||
1754 | static struct sysdev_class mpic_sysclass = { | 1770 | static struct syscore_ops mpic_syscore_ops = { |
1755 | #ifdef CONFIG_PM | ||
1756 | .resume = mpic_resume, | 1771 | .resume = mpic_resume, |
1757 | .suspend = mpic_suspend, | 1772 | .suspend = mpic_suspend, |
1758 | #endif | ||
1759 | .name = "mpic", | ||
1760 | }; | 1773 | }; |
1761 | 1774 | ||
1762 | static int mpic_init_sys(void) | 1775 | static int mpic_init_sys(void) |
1763 | { | 1776 | { |
1764 | struct mpic *mpic = mpics; | 1777 | register_syscore_ops(&mpic_syscore_ops); |
1765 | int error, id = 0; | 1778 | return 0; |
1766 | |||
1767 | error = sysdev_class_register(&mpic_sysclass); | ||
1768 | |||
1769 | while (mpic && !error) { | ||
1770 | mpic->sysdev.cls = &mpic_sysclass; | ||
1771 | mpic->sysdev.id = id++; | ||
1772 | error = sysdev_register(&mpic->sysdev); | ||
1773 | mpic = mpic->next; | ||
1774 | } | ||
1775 | return error; | ||
1776 | } | 1779 | } |
1777 | 1780 | ||
1778 | device_initcall(mpic_init_sys); | 1781 | device_initcall(mpic_init_sys); |
1782 | #endif | ||
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 2508a6f31588..4a7f14079e03 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig | |||
@@ -88,6 +88,7 @@ config S390 | |||
88 | select HAVE_KERNEL_XZ | 88 | select HAVE_KERNEL_XZ |
89 | select HAVE_GET_USER_PAGES_FAST | 89 | select HAVE_GET_USER_PAGES_FAST |
90 | select HAVE_ARCH_MUTEX_CPU_RELAX | 90 | select HAVE_ARCH_MUTEX_CPU_RELAX |
91 | select HAVE_ARCH_JUMP_LABEL if !MARCH_G5 | ||
91 | select ARCH_INLINE_SPIN_TRYLOCK | 92 | select ARCH_INLINE_SPIN_TRYLOCK |
92 | select ARCH_INLINE_SPIN_TRYLOCK_BH | 93 | select ARCH_INLINE_SPIN_TRYLOCK_BH |
93 | select ARCH_INLINE_SPIN_LOCK | 94 | select ARCH_INLINE_SPIN_LOCK |
diff --git a/arch/s390/include/asm/cacheflush.h b/arch/s390/include/asm/cacheflush.h index 43a5c78046db..3e20383d0921 100644 --- a/arch/s390/include/asm/cacheflush.h +++ b/arch/s390/include/asm/cacheflush.h | |||
@@ -11,5 +11,6 @@ void kernel_map_pages(struct page *page, int numpages, int enable); | |||
11 | int set_memory_ro(unsigned long addr, int numpages); | 11 | int set_memory_ro(unsigned long addr, int numpages); |
12 | int set_memory_rw(unsigned long addr, int numpages); | 12 | int set_memory_rw(unsigned long addr, int numpages); |
13 | int set_memory_nx(unsigned long addr, int numpages); | 13 | int set_memory_nx(unsigned long addr, int numpages); |
14 | int set_memory_x(unsigned long addr, int numpages); | ||
14 | 15 | ||
15 | #endif /* _S390_CACHEFLUSH_H */ | 16 | #endif /* _S390_CACHEFLUSH_H */ |
diff --git a/arch/s390/include/asm/diag.h b/arch/s390/include/asm/diag.h index 72b2e2f2d32d..7e91c58072e2 100644 --- a/arch/s390/include/asm/diag.h +++ b/arch/s390/include/asm/diag.h | |||
@@ -9,9 +9,22 @@ | |||
9 | #define _ASM_S390_DIAG_H | 9 | #define _ASM_S390_DIAG_H |
10 | 10 | ||
11 | /* | 11 | /* |
12 | * Diagnose 10: Release pages | 12 | * Diagnose 10: Release page range |
13 | */ | 13 | */ |
14 | extern void diag10(unsigned long addr); | 14 | static inline void diag10_range(unsigned long start_pfn, unsigned long num_pfn) |
15 | { | ||
16 | unsigned long start_addr, end_addr; | ||
17 | |||
18 | start_addr = start_pfn << PAGE_SHIFT; | ||
19 | end_addr = (start_pfn + num_pfn - 1) << PAGE_SHIFT; | ||
20 | |||
21 | asm volatile( | ||
22 | "0: diag %0,%1,0x10\n" | ||
23 | "1:\n" | ||
24 | EX_TABLE(0b, 1b) | ||
25 | EX_TABLE(1b, 1b) | ||
26 | : : "a" (start_addr), "a" (end_addr)); | ||
27 | } | ||
15 | 28 | ||
16 | /* | 29 | /* |
17 | * Diagnose 14: Input spool file manipulation | 30 | * Diagnose 14: Input spool file manipulation |
diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h index 3c29be4836ed..b7931faaef6d 100644 --- a/arch/s390/include/asm/ftrace.h +++ b/arch/s390/include/asm/ftrace.h | |||
@@ -11,15 +11,13 @@ struct dyn_arch_ftrace { }; | |||
11 | 11 | ||
12 | #ifdef CONFIG_64BIT | 12 | #ifdef CONFIG_64BIT |
13 | #define MCOUNT_INSN_SIZE 12 | 13 | #define MCOUNT_INSN_SIZE 12 |
14 | #define MCOUNT_OFFSET 8 | ||
15 | #else | 14 | #else |
16 | #define MCOUNT_INSN_SIZE 20 | 15 | #define MCOUNT_INSN_SIZE 20 |
17 | #define MCOUNT_OFFSET 4 | ||
18 | #endif | 16 | #endif |
19 | 17 | ||
20 | static inline unsigned long ftrace_call_adjust(unsigned long addr) | 18 | static inline unsigned long ftrace_call_adjust(unsigned long addr) |
21 | { | 19 | { |
22 | return addr - MCOUNT_OFFSET; | 20 | return addr; |
23 | } | 21 | } |
24 | 22 | ||
25 | #endif /* __ASSEMBLY__ */ | 23 | #endif /* __ASSEMBLY__ */ |
diff --git a/arch/s390/include/asm/jump_label.h b/arch/s390/include/asm/jump_label.h new file mode 100644 index 000000000000..95a6cf2b5b67 --- /dev/null +++ b/arch/s390/include/asm/jump_label.h | |||
@@ -0,0 +1,37 @@ | |||
1 | #ifndef _ASM_S390_JUMP_LABEL_H | ||
2 | #define _ASM_S390_JUMP_LABEL_H | ||
3 | |||
4 | #include <linux/types.h> | ||
5 | |||
6 | #define JUMP_LABEL_NOP_SIZE 6 | ||
7 | |||
8 | #ifdef CONFIG_64BIT | ||
9 | #define ASM_PTR ".quad" | ||
10 | #define ASM_ALIGN ".balign 8" | ||
11 | #else | ||
12 | #define ASM_PTR ".long" | ||
13 | #define ASM_ALIGN ".balign 4" | ||
14 | #endif | ||
15 | |||
16 | static __always_inline bool arch_static_branch(struct jump_label_key *key) | ||
17 | { | ||
18 | asm goto("0: brcl 0,0\n" | ||
19 | ".pushsection __jump_table, \"aw\"\n" | ||
20 | ASM_ALIGN "\n" | ||
21 | ASM_PTR " 0b, %l[label], %0\n" | ||
22 | ".popsection\n" | ||
23 | : : "X" (key) : : label); | ||
24 | return false; | ||
25 | label: | ||
26 | return true; | ||
27 | } | ||
28 | |||
29 | typedef unsigned long jump_label_t; | ||
30 | |||
31 | struct jump_entry { | ||
32 | jump_label_t code; | ||
33 | jump_label_t target; | ||
34 | jump_label_t key; | ||
35 | }; | ||
36 | |||
37 | #endif | ||
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index a6f0e7cc9cde..8c277caa8d3a 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h | |||
@@ -23,7 +23,7 @@ static inline int init_new_context(struct task_struct *tsk, | |||
23 | #ifdef CONFIG_64BIT | 23 | #ifdef CONFIG_64BIT |
24 | mm->context.asce_bits |= _ASCE_TYPE_REGION3; | 24 | mm->context.asce_bits |= _ASCE_TYPE_REGION3; |
25 | #endif | 25 | #endif |
26 | if (current->mm->context.alloc_pgste) { | 26 | if (current->mm && current->mm->context.alloc_pgste) { |
27 | /* | 27 | /* |
28 | * alloc_pgste indicates, that any NEW context will be created | 28 | * alloc_pgste indicates, that any NEW context will be created |
29 | * with extended page tables. The old context is unchanged. The | 29 | * with extended page tables. The old context is unchanged. The |
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index 64230bc392fa..5ff15dacb571 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile | |||
@@ -23,7 +23,7 @@ CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w | |||
23 | obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o \ | 23 | obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o \ |
24 | processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \ | 24 | processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \ |
25 | s390_ext.o debug.o irq.o ipl.o dis.o diag.o mem_detect.o \ | 25 | s390_ext.o debug.o irq.o ipl.o dis.o diag.o mem_detect.o \ |
26 | vdso.o vtime.o sysinfo.o nmi.o sclp.o | 26 | vdso.o vtime.o sysinfo.o nmi.o sclp.o jump_label.o |
27 | 27 | ||
28 | obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) | 28 | obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) |
29 | obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) | 29 | obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) |
diff --git a/arch/s390/kernel/diag.c b/arch/s390/kernel/diag.c index c032d11da8a1..8237fc07ac79 100644 --- a/arch/s390/kernel/diag.c +++ b/arch/s390/kernel/diag.c | |||
@@ -9,27 +9,6 @@ | |||
9 | #include <asm/diag.h> | 9 | #include <asm/diag.h> |
10 | 10 | ||
11 | /* | 11 | /* |
12 | * Diagnose 10: Release pages | ||
13 | */ | ||
14 | void diag10(unsigned long addr) | ||
15 | { | ||
16 | if (addr >= 0x7ff00000) | ||
17 | return; | ||
18 | asm volatile( | ||
19 | #ifdef CONFIG_64BIT | ||
20 | " sam31\n" | ||
21 | " diag %0,%0,0x10\n" | ||
22 | "0: sam64\n" | ||
23 | #else | ||
24 | " diag %0,%0,0x10\n" | ||
25 | "0:\n" | ||
26 | #endif | ||
27 | EX_TABLE(0b, 0b) | ||
28 | : : "a" (addr)); | ||
29 | } | ||
30 | EXPORT_SYMBOL(diag10); | ||
31 | |||
32 | /* | ||
33 | * Diagnose 14: Input spool file manipulation | 12 | * Diagnose 14: Input spool file manipulation |
34 | */ | 13 | */ |
35 | int diag14(unsigned long rx, unsigned long ry1, unsigned long subcode) | 14 | int diag14(unsigned long rx, unsigned long ry1, unsigned long subcode) |
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c index c83726c9fe03..3d4a78fc1adc 100644 --- a/arch/s390/kernel/dis.c +++ b/arch/s390/kernel/dis.c | |||
@@ -672,6 +672,7 @@ static struct insn opcode_b2[] = { | |||
672 | { "rp", 0x77, INSTR_S_RD }, | 672 | { "rp", 0x77, INSTR_S_RD }, |
673 | { "stcke", 0x78, INSTR_S_RD }, | 673 | { "stcke", 0x78, INSTR_S_RD }, |
674 | { "sacf", 0x79, INSTR_S_RD }, | 674 | { "sacf", 0x79, INSTR_S_RD }, |
675 | { "spp", 0x80, INSTR_S_RD }, | ||
675 | { "stsi", 0x7d, INSTR_S_RD }, | 676 | { "stsi", 0x7d, INSTR_S_RD }, |
676 | { "srnm", 0x99, INSTR_S_RD }, | 677 | { "srnm", 0x99, INSTR_S_RD }, |
677 | { "stfpc", 0x9c, INSTR_S_RD }, | 678 | { "stfpc", 0x9c, INSTR_S_RD }, |
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 648f64239a9d..1b67fc6ebdc2 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S | |||
@@ -836,7 +836,7 @@ restart_base: | |||
836 | stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on | 836 | stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on |
837 | basr %r14,0 | 837 | basr %r14,0 |
838 | l %r14,restart_addr-.(%r14) | 838 | l %r14,restart_addr-.(%r14) |
839 | br %r14 # branch to start_secondary | 839 | basr %r14,%r14 # branch to start_secondary |
840 | restart_addr: | 840 | restart_addr: |
841 | .long start_secondary | 841 | .long start_secondary |
842 | .align 8 | 842 | .align 8 |
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index 9d3603d6c511..9fd864563499 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S | |||
@@ -841,7 +841,7 @@ restart_base: | |||
841 | mvc __LC_SYSTEM_TIMER(8),__TI_system_timer(%r1) | 841 | mvc __LC_SYSTEM_TIMER(8),__TI_system_timer(%r1) |
842 | xc __LC_STEAL_TIMER(8),__LC_STEAL_TIMER | 842 | xc __LC_STEAL_TIMER(8),__LC_STEAL_TIMER |
843 | stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on | 843 | stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on |
844 | jg start_secondary | 844 | brasl %r14,start_secondary |
845 | .align 8 | 845 | .align 8 |
846 | restart_vtime: | 846 | restart_vtime: |
847 | .long 0x7fffffff,0xffffffff | 847 | .long 0x7fffffff,0xffffffff |
diff --git a/arch/s390/kernel/jump_label.c b/arch/s390/kernel/jump_label.c new file mode 100644 index 000000000000..44cc06bedf77 --- /dev/null +++ b/arch/s390/kernel/jump_label.c | |||
@@ -0,0 +1,59 @@ | |||
1 | /* | ||
2 | * Jump label s390 support | ||
3 | * | ||
4 | * Copyright IBM Corp. 2011 | ||
5 | * Author(s): Jan Glauber <jang@linux.vnet.ibm.com> | ||
6 | */ | ||
7 | #include <linux/module.h> | ||
8 | #include <linux/uaccess.h> | ||
9 | #include <linux/stop_machine.h> | ||
10 | #include <linux/jump_label.h> | ||
11 | #include <asm/ipl.h> | ||
12 | |||
13 | #ifdef HAVE_JUMP_LABEL | ||
14 | |||
15 | struct insn { | ||
16 | u16 opcode; | ||
17 | s32 offset; | ||
18 | } __packed; | ||
19 | |||
20 | struct insn_args { | ||
21 | unsigned long *target; | ||
22 | struct insn *insn; | ||
23 | ssize_t size; | ||
24 | }; | ||
25 | |||
26 | static int __arch_jump_label_transform(void *data) | ||
27 | { | ||
28 | struct insn_args *args = data; | ||
29 | int rc; | ||
30 | |||
31 | rc = probe_kernel_write(args->target, args->insn, args->size); | ||
32 | WARN_ON_ONCE(rc < 0); | ||
33 | return 0; | ||
34 | } | ||
35 | |||
36 | void arch_jump_label_transform(struct jump_entry *entry, | ||
37 | enum jump_label_type type) | ||
38 | { | ||
39 | struct insn_args args; | ||
40 | struct insn insn; | ||
41 | |||
42 | if (type == JUMP_LABEL_ENABLE) { | ||
43 | /* brcl 15,offset */ | ||
44 | insn.opcode = 0xc0f4; | ||
45 | insn.offset = (entry->target - entry->code) >> 1; | ||
46 | } else { | ||
47 | /* brcl 0,0 */ | ||
48 | insn.opcode = 0xc004; | ||
49 | insn.offset = 0; | ||
50 | } | ||
51 | |||
52 | args.target = (void *) entry->code; | ||
53 | args.insn = &insn; | ||
54 | args.size = JUMP_LABEL_NOP_SIZE; | ||
55 | |||
56 | stop_machine(__arch_jump_label_transform, &args, NULL); | ||
57 | } | ||
58 | |||
59 | #endif | ||
diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c index c66ffd8dbbb7..1f1dba9dcf58 100644 --- a/arch/s390/mm/cmm.c +++ b/arch/s390/mm/cmm.c | |||
@@ -91,7 +91,7 @@ static long cmm_alloc_pages(long nr, long *counter, | |||
91 | } else | 91 | } else |
92 | free_page((unsigned long) npa); | 92 | free_page((unsigned long) npa); |
93 | } | 93 | } |
94 | diag10(addr); | 94 | diag10_range(addr >> PAGE_SHIFT, 1); |
95 | pa->pages[pa->index++] = addr; | 95 | pa->pages[pa->index++] = addr; |
96 | (*counter)++; | 96 | (*counter)++; |
97 | spin_unlock(&cmm_lock); | 97 | spin_unlock(&cmm_lock); |
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c index 0607e4b14b27..f05edcc3beff 100644 --- a/arch/s390/mm/pageattr.c +++ b/arch/s390/mm/pageattr.c | |||
@@ -54,3 +54,8 @@ int set_memory_nx(unsigned long addr, int numpages) | |||
54 | return 0; | 54 | return 0; |
55 | } | 55 | } |
56 | EXPORT_SYMBOL_GPL(set_memory_nx); | 56 | EXPORT_SYMBOL_GPL(set_memory_nx); |
57 | |||
58 | int set_memory_x(unsigned long addr, int numpages) | ||
59 | { | ||
60 | return 0; | ||
61 | } | ||
diff --git a/arch/s390/oprofile/hwsampler.c b/arch/s390/oprofile/hwsampler.c index 4952872d6f0a..33cbd373cce4 100644 --- a/arch/s390/oprofile/hwsampler.c +++ b/arch/s390/oprofile/hwsampler.c | |||
@@ -1021,20 +1021,14 @@ deallocate_exit: | |||
1021 | return rc; | 1021 | return rc; |
1022 | } | 1022 | } |
1023 | 1023 | ||
1024 | long hwsampler_query_min_interval(void) | 1024 | unsigned long hwsampler_query_min_interval(void) |
1025 | { | 1025 | { |
1026 | if (min_sampler_rate) | 1026 | return min_sampler_rate; |
1027 | return min_sampler_rate; | ||
1028 | else | ||
1029 | return -EINVAL; | ||
1030 | } | 1027 | } |
1031 | 1028 | ||
1032 | long hwsampler_query_max_interval(void) | 1029 | unsigned long hwsampler_query_max_interval(void) |
1033 | { | 1030 | { |
1034 | if (max_sampler_rate) | 1031 | return max_sampler_rate; |
1035 | return max_sampler_rate; | ||
1036 | else | ||
1037 | return -EINVAL; | ||
1038 | } | 1032 | } |
1039 | 1033 | ||
1040 | unsigned long hwsampler_get_sample_overflow_count(unsigned int cpu) | 1034 | unsigned long hwsampler_get_sample_overflow_count(unsigned int cpu) |
diff --git a/arch/s390/oprofile/hwsampler.h b/arch/s390/oprofile/hwsampler.h index 8c72b59316b5..1912f3bb190c 100644 --- a/arch/s390/oprofile/hwsampler.h +++ b/arch/s390/oprofile/hwsampler.h | |||
@@ -102,8 +102,8 @@ int hwsampler_setup(void); | |||
102 | int hwsampler_shutdown(void); | 102 | int hwsampler_shutdown(void); |
103 | int hwsampler_allocate(unsigned long sdbt, unsigned long sdb); | 103 | int hwsampler_allocate(unsigned long sdbt, unsigned long sdb); |
104 | int hwsampler_deallocate(void); | 104 | int hwsampler_deallocate(void); |
105 | long hwsampler_query_min_interval(void); | 105 | unsigned long hwsampler_query_min_interval(void); |
106 | long hwsampler_query_max_interval(void); | 106 | unsigned long hwsampler_query_max_interval(void); |
107 | int hwsampler_start_all(unsigned long interval); | 107 | int hwsampler_start_all(unsigned long interval); |
108 | int hwsampler_stop_all(void); | 108 | int hwsampler_stop_all(void); |
109 | int hwsampler_deactivate(unsigned int cpu); | 109 | int hwsampler_deactivate(unsigned int cpu); |
diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c index c63d7e58352b..5995e9bc72d9 100644 --- a/arch/s390/oprofile/init.c +++ b/arch/s390/oprofile/init.c | |||
@@ -145,15 +145,11 @@ static int oprofile_hwsampler_init(struct oprofile_operations *ops) | |||
145 | * create hwsampler files only if hwsampler_setup() succeeds. | 145 | * create hwsampler files only if hwsampler_setup() succeeds. |
146 | */ | 146 | */ |
147 | oprofile_min_interval = hwsampler_query_min_interval(); | 147 | oprofile_min_interval = hwsampler_query_min_interval(); |
148 | if (oprofile_min_interval < 0) { | 148 | if (oprofile_min_interval == 0) |
149 | oprofile_min_interval = 0; | ||
150 | return -ENODEV; | 149 | return -ENODEV; |
151 | } | ||
152 | oprofile_max_interval = hwsampler_query_max_interval(); | 150 | oprofile_max_interval = hwsampler_query_max_interval(); |
153 | if (oprofile_max_interval < 0) { | 151 | if (oprofile_max_interval == 0) |
154 | oprofile_max_interval = 0; | ||
155 | return -ENODEV; | 152 | return -ENODEV; |
156 | } | ||
157 | 153 | ||
158 | if (oprofile_timer_init(ops)) | 154 | if (oprofile_timer_init(ops)) |
159 | return -ENODEV; | 155 | return -ENODEV; |
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 4b89da248d17..bc439de48cd1 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig | |||
@@ -24,7 +24,6 @@ config SUPERH | |||
24 | select RTC_LIB | 24 | select RTC_LIB |
25 | select GENERIC_ATOMIC64 | 25 | select GENERIC_ATOMIC64 |
26 | select GENERIC_IRQ_SHOW | 26 | select GENERIC_IRQ_SHOW |
27 | select ARCH_NO_SYSDEV_OPS | ||
28 | help | 27 | help |
29 | The SuperH is a RISC processor targeted for use in embedded systems | 28 | The SuperH is a RISC processor targeted for use in embedded systems |
30 | and consumer electronics; it was also used in the Sega Dreamcast | 29 | and consumer electronics; it was also used in the Sega Dreamcast |
diff --git a/arch/sh/configs/apsh4ad0a_defconfig b/arch/sh/configs/apsh4ad0a_defconfig index e71a531f1e31..77ec0e7b8ddf 100644 --- a/arch/sh/configs/apsh4ad0a_defconfig +++ b/arch/sh/configs/apsh4ad0a_defconfig | |||
@@ -48,7 +48,6 @@ CONFIG_PREEMPT=y | |||
48 | CONFIG_BINFMT_MISC=y | 48 | CONFIG_BINFMT_MISC=y |
49 | CONFIG_PM=y | 49 | CONFIG_PM=y |
50 | CONFIG_PM_DEBUG=y | 50 | CONFIG_PM_DEBUG=y |
51 | CONFIG_PM_VERBOSE=y | ||
52 | CONFIG_PM_RUNTIME=y | 51 | CONFIG_PM_RUNTIME=y |
53 | CONFIG_CPU_IDLE=y | 52 | CONFIG_CPU_IDLE=y |
54 | CONFIG_NET=y | 53 | CONFIG_NET=y |
diff --git a/arch/sh/configs/sdk7786_defconfig b/arch/sh/configs/sdk7786_defconfig index dc4a2eb6a616..c41650572d79 100644 --- a/arch/sh/configs/sdk7786_defconfig +++ b/arch/sh/configs/sdk7786_defconfig | |||
@@ -83,7 +83,6 @@ CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y | |||
83 | CONFIG_BINFMT_MISC=y | 83 | CONFIG_BINFMT_MISC=y |
84 | CONFIG_PM=y | 84 | CONFIG_PM=y |
85 | CONFIG_PM_DEBUG=y | 85 | CONFIG_PM_DEBUG=y |
86 | CONFIG_PM_VERBOSE=y | ||
87 | CONFIG_PM_RUNTIME=y | 86 | CONFIG_PM_RUNTIME=y |
88 | CONFIG_CPU_IDLE=y | 87 | CONFIG_CPU_IDLE=y |
89 | CONFIG_NET=y | 88 | CONFIG_NET=y |
diff --git a/arch/sh/kernel/cpu/shmobile/pm_runtime.c b/arch/sh/kernel/cpu/shmobile/pm_runtime.c index 6dcb8166a64d..22db127afa7b 100644 --- a/arch/sh/kernel/cpu/shmobile/pm_runtime.c +++ b/arch/sh/kernel/cpu/shmobile/pm_runtime.c | |||
@@ -139,7 +139,7 @@ void platform_pm_runtime_suspend_idle(void) | |||
139 | queue_work(pm_wq, &hwblk_work); | 139 | queue_work(pm_wq, &hwblk_work); |
140 | } | 140 | } |
141 | 141 | ||
142 | int platform_pm_runtime_suspend(struct device *dev) | 142 | static int default_platform_runtime_suspend(struct device *dev) |
143 | { | 143 | { |
144 | struct platform_device *pdev = to_platform_device(dev); | 144 | struct platform_device *pdev = to_platform_device(dev); |
145 | struct pdev_archdata *ad = &pdev->archdata; | 145 | struct pdev_archdata *ad = &pdev->archdata; |
@@ -147,7 +147,7 @@ int platform_pm_runtime_suspend(struct device *dev) | |||
147 | int hwblk = ad->hwblk_id; | 147 | int hwblk = ad->hwblk_id; |
148 | int ret = 0; | 148 | int ret = 0; |
149 | 149 | ||
150 | dev_dbg(dev, "platform_pm_runtime_suspend() [%d]\n", hwblk); | 150 | dev_dbg(dev, "%s() [%d]\n", __func__, hwblk); |
151 | 151 | ||
152 | /* ignore off-chip platform devices */ | 152 | /* ignore off-chip platform devices */ |
153 | if (!hwblk) | 153 | if (!hwblk) |
@@ -183,20 +183,20 @@ int platform_pm_runtime_suspend(struct device *dev) | |||
183 | mutex_unlock(&ad->mutex); | 183 | mutex_unlock(&ad->mutex); |
184 | 184 | ||
185 | out: | 185 | out: |
186 | dev_dbg(dev, "platform_pm_runtime_suspend() [%d] returns %d\n", | 186 | dev_dbg(dev, "%s() [%d] returns %d\n", |
187 | hwblk, ret); | 187 | __func__, hwblk, ret); |
188 | 188 | ||
189 | return ret; | 189 | return ret; |
190 | } | 190 | } |
191 | 191 | ||
192 | int platform_pm_runtime_resume(struct device *dev) | 192 | static int default_platform_runtime_resume(struct device *dev) |
193 | { | 193 | { |
194 | struct platform_device *pdev = to_platform_device(dev); | 194 | struct platform_device *pdev = to_platform_device(dev); |
195 | struct pdev_archdata *ad = &pdev->archdata; | 195 | struct pdev_archdata *ad = &pdev->archdata; |
196 | int hwblk = ad->hwblk_id; | 196 | int hwblk = ad->hwblk_id; |
197 | int ret = 0; | 197 | int ret = 0; |
198 | 198 | ||
199 | dev_dbg(dev, "platform_pm_runtime_resume() [%d]\n", hwblk); | 199 | dev_dbg(dev, "%s() [%d]\n", __func__, hwblk); |
200 | 200 | ||
201 | /* ignore off-chip platform devices */ | 201 | /* ignore off-chip platform devices */ |
202 | if (!hwblk) | 202 | if (!hwblk) |
@@ -228,19 +228,19 @@ int platform_pm_runtime_resume(struct device *dev) | |||
228 | */ | 228 | */ |
229 | mutex_unlock(&ad->mutex); | 229 | mutex_unlock(&ad->mutex); |
230 | out: | 230 | out: |
231 | dev_dbg(dev, "platform_pm_runtime_resume() [%d] returns %d\n", | 231 | dev_dbg(dev, "%s() [%d] returns %d\n", |
232 | hwblk, ret); | 232 | __func__, hwblk, ret); |
233 | 233 | ||
234 | return ret; | 234 | return ret; |
235 | } | 235 | } |
236 | 236 | ||
237 | int platform_pm_runtime_idle(struct device *dev) | 237 | static int default_platform_runtime_idle(struct device *dev) |
238 | { | 238 | { |
239 | struct platform_device *pdev = to_platform_device(dev); | 239 | struct platform_device *pdev = to_platform_device(dev); |
240 | int hwblk = pdev->archdata.hwblk_id; | 240 | int hwblk = pdev->archdata.hwblk_id; |
241 | int ret = 0; | 241 | int ret = 0; |
242 | 242 | ||
243 | dev_dbg(dev, "platform_pm_runtime_idle() [%d]\n", hwblk); | 243 | dev_dbg(dev, "%s() [%d]\n", __func__, hwblk); |
244 | 244 | ||
245 | /* ignore off-chip platform devices */ | 245 | /* ignore off-chip platform devices */ |
246 | if (!hwblk) | 246 | if (!hwblk) |
@@ -252,10 +252,19 @@ int platform_pm_runtime_idle(struct device *dev) | |||
252 | /* suspend synchronously to disable clocks immediately */ | 252 | /* suspend synchronously to disable clocks immediately */ |
253 | ret = pm_runtime_suspend(dev); | 253 | ret = pm_runtime_suspend(dev); |
254 | out: | 254 | out: |
255 | dev_dbg(dev, "platform_pm_runtime_idle() [%d] done!\n", hwblk); | 255 | dev_dbg(dev, "%s() [%d] done!\n", __func__, hwblk); |
256 | return ret; | 256 | return ret; |
257 | } | 257 | } |
258 | 258 | ||
259 | static struct dev_power_domain default_power_domain = { | ||
260 | .ops = { | ||
261 | .runtime_suspend = default_platform_runtime_suspend, | ||
262 | .runtime_resume = default_platform_runtime_resume, | ||
263 | .runtime_idle = default_platform_runtime_idle, | ||
264 | USE_PLATFORM_PM_SLEEP_OPS | ||
265 | }, | ||
266 | }; | ||
267 | |||
259 | static int platform_bus_notify(struct notifier_block *nb, | 268 | static int platform_bus_notify(struct notifier_block *nb, |
260 | unsigned long action, void *data) | 269 | unsigned long action, void *data) |
261 | { | 270 | { |
@@ -276,6 +285,7 @@ static int platform_bus_notify(struct notifier_block *nb, | |||
276 | hwblk_disable(hwblk_info, hwblk); | 285 | hwblk_disable(hwblk_info, hwblk); |
277 | /* make sure driver re-inits itself once */ | 286 | /* make sure driver re-inits itself once */ |
278 | __set_bit(PDEV_ARCHDATA_FLAG_INIT, &pdev->archdata.flags); | 287 | __set_bit(PDEV_ARCHDATA_FLAG_INIT, &pdev->archdata.flags); |
288 | dev->pwr_domain = &default_power_domain; | ||
279 | break; | 289 | break; |
280 | /* TODO: add BUS_NOTIFY_BIND_DRIVER and increase idle count */ | 290 | /* TODO: add BUS_NOTIFY_BIND_DRIVER and increase idle count */ |
281 | case BUS_NOTIFY_BOUND_DRIVER: | 291 | case BUS_NOTIFY_BOUND_DRIVER: |
@@ -289,6 +299,7 @@ static int platform_bus_notify(struct notifier_block *nb, | |||
289 | __set_bit(PDEV_ARCHDATA_FLAG_INIT, &pdev->archdata.flags); | 299 | __set_bit(PDEV_ARCHDATA_FLAG_INIT, &pdev->archdata.flags); |
290 | break; | 300 | break; |
291 | case BUS_NOTIFY_DEL_DEVICE: | 301 | case BUS_NOTIFY_DEL_DEVICE: |
302 | dev->pwr_domain = NULL; | ||
292 | break; | 303 | break; |
293 | } | 304 | } |
294 | return 0; | 305 | return 0; |
diff --git a/arch/sparc/include/asm/jump_label.h b/arch/sparc/include/asm/jump_label.h index 427d4684e0d2..fc73a82366f8 100644 --- a/arch/sparc/include/asm/jump_label.h +++ b/arch/sparc/include/asm/jump_label.h | |||
@@ -7,17 +7,20 @@ | |||
7 | 7 | ||
8 | #define JUMP_LABEL_NOP_SIZE 4 | 8 | #define JUMP_LABEL_NOP_SIZE 4 |
9 | 9 | ||
10 | #define JUMP_LABEL(key, label) \ | 10 | static __always_inline bool arch_static_branch(struct jump_label_key *key) |
11 | do { \ | 11 | { |
12 | asm goto("1:\n\t" \ | 12 | asm goto("1:\n\t" |
13 | "nop\n\t" \ | 13 | "nop\n\t" |
14 | "nop\n\t" \ | 14 | "nop\n\t" |
15 | ".pushsection __jump_table, \"a\"\n\t"\ | 15 | ".pushsection __jump_table, \"aw\"\n\t" |
16 | ".align 4\n\t" \ | 16 | ".align 4\n\t" |
17 | ".word 1b, %l[" #label "], %c0\n\t" \ | 17 | ".word 1b, %l[l_yes], %c0\n\t" |
18 | ".popsection \n\t" \ | 18 | ".popsection \n\t" |
19 | : : "i" (key) : : label);\ | 19 | : : "i" (key) : : l_yes); |
20 | } while (0) | 20 | return false; |
21 | l_yes: | ||
22 | return true; | ||
23 | } | ||
21 | 24 | ||
22 | #endif /* __KERNEL__ */ | 25 | #endif /* __KERNEL__ */ |
23 | 26 | ||
diff --git a/arch/sparc/kernel/apc.c b/arch/sparc/kernel/apc.c index f679c57644d5..1e34f29e58bb 100644 --- a/arch/sparc/kernel/apc.c +++ b/arch/sparc/kernel/apc.c | |||
@@ -165,7 +165,7 @@ static int __devinit apc_probe(struct platform_device *op) | |||
165 | return 0; | 165 | return 0; |
166 | } | 166 | } |
167 | 167 | ||
168 | static struct of_device_id __initdata apc_match[] = { | 168 | static struct of_device_id apc_match[] = { |
169 | { | 169 | { |
170 | .name = APC_OBPNAME, | 170 | .name = APC_OBPNAME, |
171 | }, | 171 | }, |
diff --git a/arch/sparc/kernel/pci_sabre.c b/arch/sparc/kernel/pci_sabre.c index 948068a083fc..d1840dbdaa2f 100644 --- a/arch/sparc/kernel/pci_sabre.c +++ b/arch/sparc/kernel/pci_sabre.c | |||
@@ -452,8 +452,10 @@ static void __devinit sabre_pbm_init(struct pci_pbm_info *pbm, | |||
452 | sabre_scan_bus(pbm, &op->dev); | 452 | sabre_scan_bus(pbm, &op->dev); |
453 | } | 453 | } |
454 | 454 | ||
455 | static const struct of_device_id sabre_match[]; | ||
455 | static int __devinit sabre_probe(struct platform_device *op) | 456 | static int __devinit sabre_probe(struct platform_device *op) |
456 | { | 457 | { |
458 | const struct of_device_id *match; | ||
457 | const struct linux_prom64_registers *pr_regs; | 459 | const struct linux_prom64_registers *pr_regs; |
458 | struct device_node *dp = op->dev.of_node; | 460 | struct device_node *dp = op->dev.of_node; |
459 | struct pci_pbm_info *pbm; | 461 | struct pci_pbm_info *pbm; |
@@ -463,7 +465,8 @@ static int __devinit sabre_probe(struct platform_device *op) | |||
463 | const u32 *vdma; | 465 | const u32 *vdma; |
464 | u64 clear_irq; | 466 | u64 clear_irq; |
465 | 467 | ||
466 | hummingbird_p = op->dev.of_match && (op->dev.of_match->data != NULL); | 468 | match = of_match_device(sabre_match, &op->dev); |
469 | hummingbird_p = match && (match->data != NULL); | ||
467 | if (!hummingbird_p) { | 470 | if (!hummingbird_p) { |
468 | struct device_node *cpu_dp; | 471 | struct device_node *cpu_dp; |
469 | 472 | ||
diff --git a/arch/sparc/kernel/pci_schizo.c b/arch/sparc/kernel/pci_schizo.c index fecfcb2063c8..283fbc329a43 100644 --- a/arch/sparc/kernel/pci_schizo.c +++ b/arch/sparc/kernel/pci_schizo.c | |||
@@ -1458,11 +1458,15 @@ out_err: | |||
1458 | return err; | 1458 | return err; |
1459 | } | 1459 | } |
1460 | 1460 | ||
1461 | static const struct of_device_id schizo_match[]; | ||
1461 | static int __devinit schizo_probe(struct platform_device *op) | 1462 | static int __devinit schizo_probe(struct platform_device *op) |
1462 | { | 1463 | { |
1463 | if (!op->dev.of_match) | 1464 | const struct of_device_id *match; |
1465 | |||
1466 | match = of_match_device(schizo_match, &op->dev); | ||
1467 | if (!match) | ||
1464 | return -EINVAL; | 1468 | return -EINVAL; |
1465 | return __schizo_init(op, (unsigned long) op->dev.of_match->data); | 1469 | return __schizo_init(op, (unsigned long)match->data); |
1466 | } | 1470 | } |
1467 | 1471 | ||
1468 | /* The ordering of this table is very important. Some Tomatillo | 1472 | /* The ordering of this table is very important. Some Tomatillo |
diff --git a/arch/sparc/kernel/pmc.c b/arch/sparc/kernel/pmc.c index 93d7b4465f8d..6a585d393580 100644 --- a/arch/sparc/kernel/pmc.c +++ b/arch/sparc/kernel/pmc.c | |||
@@ -69,7 +69,7 @@ static int __devinit pmc_probe(struct platform_device *op) | |||
69 | return 0; | 69 | return 0; |
70 | } | 70 | } |
71 | 71 | ||
72 | static struct of_device_id __initdata pmc_match[] = { | 72 | static struct of_device_id pmc_match[] = { |
73 | { | 73 | { |
74 | .name = PMC_OBPNAME, | 74 | .name = PMC_OBPNAME, |
75 | }, | 75 | }, |
diff --git a/arch/sparc/kernel/smp_32.c b/arch/sparc/kernel/smp_32.c index f95690c167b6..442286d83435 100644 --- a/arch/sparc/kernel/smp_32.c +++ b/arch/sparc/kernel/smp_32.c | |||
@@ -53,6 +53,7 @@ cpumask_t smp_commenced_mask = CPU_MASK_NONE; | |||
53 | void __cpuinit smp_store_cpu_info(int id) | 53 | void __cpuinit smp_store_cpu_info(int id) |
54 | { | 54 | { |
55 | int cpu_node; | 55 | int cpu_node; |
56 | int mid; | ||
56 | 57 | ||
57 | cpu_data(id).udelay_val = loops_per_jiffy; | 58 | cpu_data(id).udelay_val = loops_per_jiffy; |
58 | 59 | ||
@@ -60,10 +61,13 @@ void __cpuinit smp_store_cpu_info(int id) | |||
60 | cpu_data(id).clock_tick = prom_getintdefault(cpu_node, | 61 | cpu_data(id).clock_tick = prom_getintdefault(cpu_node, |
61 | "clock-frequency", 0); | 62 | "clock-frequency", 0); |
62 | cpu_data(id).prom_node = cpu_node; | 63 | cpu_data(id).prom_node = cpu_node; |
63 | cpu_data(id).mid = cpu_get_hwmid(cpu_node); | 64 | mid = cpu_get_hwmid(cpu_node); |
64 | 65 | ||
65 | if (cpu_data(id).mid < 0) | 66 | if (mid < 0) { |
66 | panic("No MID found for CPU%d at node 0x%08d", id, cpu_node); | 67 | printk(KERN_NOTICE "No MID found for CPU%d at node 0x%08d", id, cpu_node); |
68 | mid = 0; | ||
69 | } | ||
70 | cpu_data(id).mid = mid; | ||
67 | } | 71 | } |
68 | 72 | ||
69 | void __init smp_cpus_done(unsigned int max_cpus) | 73 | void __init smp_cpus_done(unsigned int max_cpus) |
diff --git a/arch/sparc/kernel/time_32.c b/arch/sparc/kernel/time_32.c index 4e236391b635..96046a4024c2 100644 --- a/arch/sparc/kernel/time_32.c +++ b/arch/sparc/kernel/time_32.c | |||
@@ -168,7 +168,7 @@ static int __devinit clock_probe(struct platform_device *op) | |||
168 | return 0; | 168 | return 0; |
169 | } | 169 | } |
170 | 170 | ||
171 | static struct of_device_id __initdata clock_match[] = { | 171 | static struct of_device_id clock_match[] = { |
172 | { | 172 | { |
173 | .name = "eeprom", | 173 | .name = "eeprom", |
174 | }, | 174 | }, |
diff --git a/arch/sparc/lib/checksum_32.S b/arch/sparc/lib/checksum_32.S index 3632cb34e914..0084c3361e15 100644 --- a/arch/sparc/lib/checksum_32.S +++ b/arch/sparc/lib/checksum_32.S | |||
@@ -289,10 +289,16 @@ cc_end_cruft: | |||
289 | 289 | ||
290 | /* Also, handle the alignment code out of band. */ | 290 | /* Also, handle the alignment code out of band. */ |
291 | cc_dword_align: | 291 | cc_dword_align: |
292 | cmp %g1, 6 | 292 | cmp %g1, 16 |
293 | bl,a ccte | 293 | bge 1f |
294 | srl %g1, 1, %o3 | ||
295 | 2: cmp %o3, 0 | ||
296 | be,a ccte | ||
294 | andcc %g1, 0xf, %o3 | 297 | andcc %g1, 0xf, %o3 |
295 | andcc %o0, 0x1, %g0 | 298 | andcc %o3, %o0, %g0 ! Check %o0 only (%o1 has the same last 2 bits) |
299 | be,a 2b | ||
300 | srl %o3, 1, %o3 | ||
301 | 1: andcc %o0, 0x1, %g0 | ||
296 | bne ccslow | 302 | bne ccslow |
297 | andcc %o0, 0x2, %g0 | 303 | andcc %o0, 0x2, %g0 |
298 | be 1f | 304 | be 1f |
diff --git a/arch/um/os-Linux/util.c b/arch/um/os-Linux/util.c index 6ea77979531c..42827cafa6af 100644 --- a/arch/um/os-Linux/util.c +++ b/arch/um/os-Linux/util.c | |||
@@ -5,6 +5,7 @@ | |||
5 | 5 | ||
6 | #include <stdio.h> | 6 | #include <stdio.h> |
7 | #include <stdlib.h> | 7 | #include <stdlib.h> |
8 | #include <unistd.h> | ||
8 | #include <errno.h> | 9 | #include <errno.h> |
9 | #include <signal.h> | 10 | #include <signal.h> |
10 | #include <string.h> | 11 | #include <string.h> |
@@ -75,6 +76,26 @@ void setup_hostinfo(char *buf, int len) | |||
75 | host.release, host.version, host.machine); | 76 | host.release, host.version, host.machine); |
76 | } | 77 | } |
77 | 78 | ||
79 | /* | ||
80 | * We cannot use glibc's abort(). It makes use of tgkill() which | ||
81 | * has no effect within UML's kernel threads. | ||
82 | * After that glibc would execute an invalid instruction to kill | ||
83 | * the calling process and UML crashes with SIGSEGV. | ||
84 | */ | ||
85 | static inline void __attribute__ ((noreturn)) uml_abort(void) | ||
86 | { | ||
87 | sigset_t sig; | ||
88 | |||
89 | fflush(NULL); | ||
90 | |||
91 | if (!sigemptyset(&sig) && !sigaddset(&sig, SIGABRT)) | ||
92 | sigprocmask(SIG_UNBLOCK, &sig, 0); | ||
93 | |||
94 | for (;;) | ||
95 | if (kill(getpid(), SIGABRT) < 0) | ||
96 | exit(127); | ||
97 | } | ||
98 | |||
78 | void os_dump_core(void) | 99 | void os_dump_core(void) |
79 | { | 100 | { |
80 | int pid; | 101 | int pid; |
@@ -116,5 +137,5 @@ void os_dump_core(void) | |||
116 | while ((pid = waitpid(-1, NULL, WNOHANG | __WALL)) > 0) | 137 | while ((pid = waitpid(-1, NULL, WNOHANG | __WALL)) > 0) |
117 | os_kill_ptraced_process(pid, 0); | 138 | os_kill_ptraced_process(pid, 0); |
118 | 139 | ||
119 | abort(); | 140 | uml_abort(); |
120 | } | 141 | } |
diff --git a/arch/unicore32/kernel/irq.c b/arch/unicore32/kernel/irq.c index 2aa30a364bbe..d4efa7d679ff 100644 --- a/arch/unicore32/kernel/irq.c +++ b/arch/unicore32/kernel/irq.c | |||
@@ -23,7 +23,7 @@ | |||
23 | #include <linux/list.h> | 23 | #include <linux/list.h> |
24 | #include <linux/kallsyms.h> | 24 | #include <linux/kallsyms.h> |
25 | #include <linux/proc_fs.h> | 25 | #include <linux/proc_fs.h> |
26 | #include <linux/sysdev.h> | 26 | #include <linux/syscore_ops.h> |
27 | #include <linux/gpio.h> | 27 | #include <linux/gpio.h> |
28 | 28 | ||
29 | #include <asm/system.h> | 29 | #include <asm/system.h> |
@@ -237,7 +237,7 @@ static struct puv3_irq_state { | |||
237 | unsigned int iccr; | 237 | unsigned int iccr; |
238 | } puv3_irq_state; | 238 | } puv3_irq_state; |
239 | 239 | ||
240 | static int puv3_irq_suspend(struct sys_device *dev, pm_message_t state) | 240 | static int puv3_irq_suspend(void) |
241 | { | 241 | { |
242 | struct puv3_irq_state *st = &puv3_irq_state; | 242 | struct puv3_irq_state *st = &puv3_irq_state; |
243 | 243 | ||
@@ -265,7 +265,7 @@ static int puv3_irq_suspend(struct sys_device *dev, pm_message_t state) | |||
265 | return 0; | 265 | return 0; |
266 | } | 266 | } |
267 | 267 | ||
268 | static int puv3_irq_resume(struct sys_device *dev) | 268 | static void puv3_irq_resume(void) |
269 | { | 269 | { |
270 | struct puv3_irq_state *st = &puv3_irq_state; | 270 | struct puv3_irq_state *st = &puv3_irq_state; |
271 | 271 | ||
@@ -278,27 +278,20 @@ static int puv3_irq_resume(struct sys_device *dev) | |||
278 | 278 | ||
279 | writel(st->icmr, INTC_ICMR); | 279 | writel(st->icmr, INTC_ICMR); |
280 | } | 280 | } |
281 | return 0; | ||
282 | } | 281 | } |
283 | 282 | ||
284 | static struct sysdev_class puv3_irq_sysclass = { | 283 | static struct syscore_ops puv3_irq_syscore_ops = { |
285 | .name = "pkunity-irq", | ||
286 | .suspend = puv3_irq_suspend, | 284 | .suspend = puv3_irq_suspend, |
287 | .resume = puv3_irq_resume, | 285 | .resume = puv3_irq_resume, |
288 | }; | 286 | }; |
289 | 287 | ||
290 | static struct sys_device puv3_irq_device = { | 288 | static int __init puv3_irq_init_syscore(void) |
291 | .id = 0, | ||
292 | .cls = &puv3_irq_sysclass, | ||
293 | }; | ||
294 | |||
295 | static int __init puv3_irq_init_devicefs(void) | ||
296 | { | 289 | { |
297 | sysdev_class_register(&puv3_irq_sysclass); | 290 | register_syscore_ops(&puv3_irq_syscore_ops); |
298 | return sysdev_register(&puv3_irq_device); | 291 | return 0; |
299 | } | 292 | } |
300 | 293 | ||
301 | device_initcall(puv3_irq_init_devicefs); | 294 | device_initcall(puv3_irq_init_syscore); |
302 | 295 | ||
303 | void __init init_IRQ(void) | 296 | void __init init_IRQ(void) |
304 | { | 297 | { |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index cc6c53a95bfd..650bb8c47eca 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -71,7 +71,6 @@ config X86 | |||
71 | select GENERIC_IRQ_SHOW | 71 | select GENERIC_IRQ_SHOW |
72 | select IRQ_FORCED_THREADING | 72 | select IRQ_FORCED_THREADING |
73 | select USE_GENERIC_SMP_HELPERS if SMP | 73 | select USE_GENERIC_SMP_HELPERS if SMP |
74 | select ARCH_NO_SYSDEV_OPS | ||
75 | 74 | ||
76 | config INSTRUCTION_DECODER | 75 | config INSTRUCTION_DECODER |
77 | def_bool (KPROBES || PERF_EVENTS) | 76 | def_bool (KPROBES || PERF_EVENTS) |
@@ -690,6 +689,7 @@ config AMD_IOMMU | |||
690 | bool "AMD IOMMU support" | 689 | bool "AMD IOMMU support" |
691 | select SWIOTLB | 690 | select SWIOTLB |
692 | select PCI_MSI | 691 | select PCI_MSI |
692 | select PCI_IOV | ||
693 | depends on X86_64 && PCI && ACPI | 693 | depends on X86_64 && PCI && ACPI |
694 | ---help--- | 694 | ---help--- |
695 | With this option you can enable support for AMD IOMMU hardware in | 695 | With this option you can enable support for AMD IOMMU hardware in |
@@ -1848,7 +1848,7 @@ config APM_ALLOW_INTS | |||
1848 | 1848 | ||
1849 | endif # APM | 1849 | endif # APM |
1850 | 1850 | ||
1851 | source "arch/x86/kernel/cpu/cpufreq/Kconfig" | 1851 | source "drivers/cpufreq/Kconfig" |
1852 | 1852 | ||
1853 | source "drivers/cpuidle/Kconfig" | 1853 | source "drivers/cpuidle/Kconfig" |
1854 | 1854 | ||
diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h index a63a68be1cce..94d420b360d1 100644 --- a/arch/x86/include/asm/alternative-asm.h +++ b/arch/x86/include/asm/alternative-asm.h | |||
@@ -15,4 +15,13 @@ | |||
15 | .endm | 15 | .endm |
16 | #endif | 16 | #endif |
17 | 17 | ||
18 | .macro altinstruction_entry orig alt feature orig_len alt_len | ||
19 | .align 8 | ||
20 | .quad \orig | ||
21 | .quad \alt | ||
22 | .word \feature | ||
23 | .byte \orig_len | ||
24 | .byte \alt_len | ||
25 | .endm | ||
26 | |||
18 | #endif /* __ASSEMBLY__ */ | 27 | #endif /* __ASSEMBLY__ */ |
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h index 13009d1af99a..8cdd1e247975 100644 --- a/arch/x86/include/asm/alternative.h +++ b/arch/x86/include/asm/alternative.h | |||
@@ -4,7 +4,6 @@ | |||
4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
5 | #include <linux/stddef.h> | 5 | #include <linux/stddef.h> |
6 | #include <linux/stringify.h> | 6 | #include <linux/stringify.h> |
7 | #include <linux/jump_label.h> | ||
8 | #include <asm/asm.h> | 7 | #include <asm/asm.h> |
9 | 8 | ||
10 | /* | 9 | /* |
@@ -191,7 +190,7 @@ extern void *text_poke(void *addr, const void *opcode, size_t len); | |||
191 | extern void *text_poke_smp(void *addr, const void *opcode, size_t len); | 190 | extern void *text_poke_smp(void *addr, const void *opcode, size_t len); |
192 | extern void text_poke_smp_batch(struct text_poke_param *params, int n); | 191 | extern void text_poke_smp_batch(struct text_poke_param *params, int n); |
193 | 192 | ||
194 | #if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL) | 193 | #if defined(CONFIG_DYNAMIC_FTRACE) || defined(CONFIG_JUMP_LABEL) |
195 | #define IDEAL_NOP_SIZE_5 5 | 194 | #define IDEAL_NOP_SIZE_5 5 |
196 | extern unsigned char ideal_nop5[IDEAL_NOP_SIZE_5]; | 195 | extern unsigned char ideal_nop5[IDEAL_NOP_SIZE_5]; |
197 | extern void arch_init_ideal_nop5(void); | 196 | extern void arch_init_ideal_nop5(void); |
diff --git a/arch/x86/include/asm/amd_iommu_proto.h b/arch/x86/include/asm/amd_iommu_proto.h index 916bc8111a01..55d95eb789b3 100644 --- a/arch/x86/include/asm/amd_iommu_proto.h +++ b/arch/x86/include/asm/amd_iommu_proto.h | |||
@@ -19,13 +19,12 @@ | |||
19 | #ifndef _ASM_X86_AMD_IOMMU_PROTO_H | 19 | #ifndef _ASM_X86_AMD_IOMMU_PROTO_H |
20 | #define _ASM_X86_AMD_IOMMU_PROTO_H | 20 | #define _ASM_X86_AMD_IOMMU_PROTO_H |
21 | 21 | ||
22 | struct amd_iommu; | 22 | #include <asm/amd_iommu_types.h> |
23 | 23 | ||
24 | extern int amd_iommu_init_dma_ops(void); | 24 | extern int amd_iommu_init_dma_ops(void); |
25 | extern int amd_iommu_init_passthrough(void); | 25 | extern int amd_iommu_init_passthrough(void); |
26 | extern irqreturn_t amd_iommu_int_thread(int irq, void *data); | ||
26 | extern irqreturn_t amd_iommu_int_handler(int irq, void *data); | 27 | extern irqreturn_t amd_iommu_int_handler(int irq, void *data); |
27 | extern void amd_iommu_flush_all_domains(void); | ||
28 | extern void amd_iommu_flush_all_devices(void); | ||
29 | extern void amd_iommu_apply_erratum_63(u16 devid); | 28 | extern void amd_iommu_apply_erratum_63(u16 devid); |
30 | extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu); | 29 | extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu); |
31 | extern int amd_iommu_init_devices(void); | 30 | extern int amd_iommu_init_devices(void); |
@@ -44,4 +43,12 @@ static inline bool is_rd890_iommu(struct pci_dev *pdev) | |||
44 | (pdev->device == PCI_DEVICE_ID_RD890_IOMMU); | 43 | (pdev->device == PCI_DEVICE_ID_RD890_IOMMU); |
45 | } | 44 | } |
46 | 45 | ||
46 | static inline bool iommu_feature(struct amd_iommu *iommu, u64 f) | ||
47 | { | ||
48 | if (!(iommu->cap & (1 << IOMMU_CAP_EFR))) | ||
49 | return false; | ||
50 | |||
51 | return !!(iommu->features & f); | ||
52 | } | ||
53 | |||
47 | #endif /* _ASM_X86_AMD_IOMMU_PROTO_H */ | 54 | #endif /* _ASM_X86_AMD_IOMMU_PROTO_H */ |
diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h index e3509fc303bf..4c9982995414 100644 --- a/arch/x86/include/asm/amd_iommu_types.h +++ b/arch/x86/include/asm/amd_iommu_types.h | |||
@@ -68,12 +68,25 @@ | |||
68 | #define MMIO_CONTROL_OFFSET 0x0018 | 68 | #define MMIO_CONTROL_OFFSET 0x0018 |
69 | #define MMIO_EXCL_BASE_OFFSET 0x0020 | 69 | #define MMIO_EXCL_BASE_OFFSET 0x0020 |
70 | #define MMIO_EXCL_LIMIT_OFFSET 0x0028 | 70 | #define MMIO_EXCL_LIMIT_OFFSET 0x0028 |
71 | #define MMIO_EXT_FEATURES 0x0030 | ||
71 | #define MMIO_CMD_HEAD_OFFSET 0x2000 | 72 | #define MMIO_CMD_HEAD_OFFSET 0x2000 |
72 | #define MMIO_CMD_TAIL_OFFSET 0x2008 | 73 | #define MMIO_CMD_TAIL_OFFSET 0x2008 |
73 | #define MMIO_EVT_HEAD_OFFSET 0x2010 | 74 | #define MMIO_EVT_HEAD_OFFSET 0x2010 |
74 | #define MMIO_EVT_TAIL_OFFSET 0x2018 | 75 | #define MMIO_EVT_TAIL_OFFSET 0x2018 |
75 | #define MMIO_STATUS_OFFSET 0x2020 | 76 | #define MMIO_STATUS_OFFSET 0x2020 |
76 | 77 | ||
78 | |||
79 | /* Extended Feature Bits */ | ||
80 | #define FEATURE_PREFETCH (1ULL<<0) | ||
81 | #define FEATURE_PPR (1ULL<<1) | ||
82 | #define FEATURE_X2APIC (1ULL<<2) | ||
83 | #define FEATURE_NX (1ULL<<3) | ||
84 | #define FEATURE_GT (1ULL<<4) | ||
85 | #define FEATURE_IA (1ULL<<6) | ||
86 | #define FEATURE_GA (1ULL<<7) | ||
87 | #define FEATURE_HE (1ULL<<8) | ||
88 | #define FEATURE_PC (1ULL<<9) | ||
89 | |||
77 | /* MMIO status bits */ | 90 | /* MMIO status bits */ |
78 | #define MMIO_STATUS_COM_WAIT_INT_MASK 0x04 | 91 | #define MMIO_STATUS_COM_WAIT_INT_MASK 0x04 |
79 | 92 | ||
@@ -113,7 +126,9 @@ | |||
113 | /* command specific defines */ | 126 | /* command specific defines */ |
114 | #define CMD_COMPL_WAIT 0x01 | 127 | #define CMD_COMPL_WAIT 0x01 |
115 | #define CMD_INV_DEV_ENTRY 0x02 | 128 | #define CMD_INV_DEV_ENTRY 0x02 |
116 | #define CMD_INV_IOMMU_PAGES 0x03 | 129 | #define CMD_INV_IOMMU_PAGES 0x03 |
130 | #define CMD_INV_IOTLB_PAGES 0x04 | ||
131 | #define CMD_INV_ALL 0x08 | ||
117 | 132 | ||
118 | #define CMD_COMPL_WAIT_STORE_MASK 0x01 | 133 | #define CMD_COMPL_WAIT_STORE_MASK 0x01 |
119 | #define CMD_COMPL_WAIT_INT_MASK 0x02 | 134 | #define CMD_COMPL_WAIT_INT_MASK 0x02 |
@@ -215,6 +230,8 @@ | |||
215 | #define IOMMU_PTE_IR (1ULL << 61) | 230 | #define IOMMU_PTE_IR (1ULL << 61) |
216 | #define IOMMU_PTE_IW (1ULL << 62) | 231 | #define IOMMU_PTE_IW (1ULL << 62) |
217 | 232 | ||
233 | #define DTE_FLAG_IOTLB 0x01 | ||
234 | |||
218 | #define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL) | 235 | #define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL) |
219 | #define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_P) | 236 | #define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_P) |
220 | #define IOMMU_PTE_PAGE(pte) (phys_to_virt((pte) & IOMMU_PAGE_MASK)) | 237 | #define IOMMU_PTE_PAGE(pte) (phys_to_virt((pte) & IOMMU_PAGE_MASK)) |
@@ -227,6 +244,7 @@ | |||
227 | /* IOMMU capabilities */ | 244 | /* IOMMU capabilities */ |
228 | #define IOMMU_CAP_IOTLB 24 | 245 | #define IOMMU_CAP_IOTLB 24 |
229 | #define IOMMU_CAP_NPCACHE 26 | 246 | #define IOMMU_CAP_NPCACHE 26 |
247 | #define IOMMU_CAP_EFR 27 | ||
230 | 248 | ||
231 | #define MAX_DOMAIN_ID 65536 | 249 | #define MAX_DOMAIN_ID 65536 |
232 | 250 | ||
@@ -249,6 +267,8 @@ extern bool amd_iommu_dump; | |||
249 | 267 | ||
250 | /* global flag if IOMMUs cache non-present entries */ | 268 | /* global flag if IOMMUs cache non-present entries */ |
251 | extern bool amd_iommu_np_cache; | 269 | extern bool amd_iommu_np_cache; |
270 | /* Only true if all IOMMUs support device IOTLBs */ | ||
271 | extern bool amd_iommu_iotlb_sup; | ||
252 | 272 | ||
253 | /* | 273 | /* |
254 | * Make iterating over all IOMMUs easier | 274 | * Make iterating over all IOMMUs easier |
@@ -371,6 +391,9 @@ struct amd_iommu { | |||
371 | /* flags read from acpi table */ | 391 | /* flags read from acpi table */ |
372 | u8 acpi_flags; | 392 | u8 acpi_flags; |
373 | 393 | ||
394 | /* Extended features */ | ||
395 | u64 features; | ||
396 | |||
374 | /* | 397 | /* |
375 | * Capability pointer. There could be more than one IOMMU per PCI | 398 | * Capability pointer. There could be more than one IOMMU per PCI |
376 | * device function if there are more than one AMD IOMMU capability | 399 | * device function if there are more than one AMD IOMMU capability |
@@ -409,9 +432,6 @@ struct amd_iommu { | |||
409 | /* if one, we need to send a completion wait command */ | 432 | /* if one, we need to send a completion wait command */ |
410 | bool need_sync; | 433 | bool need_sync; |
411 | 434 | ||
412 | /* becomes true if a command buffer reset is running */ | ||
413 | bool reset_in_progress; | ||
414 | |||
415 | /* default dma_ops domain for that IOMMU */ | 435 | /* default dma_ops domain for that IOMMU */ |
416 | struct dma_ops_domain *default_dom; | 436 | struct dma_ops_domain *default_dom; |
417 | 437 | ||
diff --git a/arch/x86/include/asm/apicdef.h b/arch/x86/include/asm/apicdef.h index d87988bacf3e..34595d5e1038 100644 --- a/arch/x86/include/asm/apicdef.h +++ b/arch/x86/include/asm/apicdef.h | |||
@@ -78,6 +78,7 @@ | |||
78 | #define APIC_DEST_LOGICAL 0x00800 | 78 | #define APIC_DEST_LOGICAL 0x00800 |
79 | #define APIC_DEST_PHYSICAL 0x00000 | 79 | #define APIC_DEST_PHYSICAL 0x00000 |
80 | #define APIC_DM_FIXED 0x00000 | 80 | #define APIC_DM_FIXED 0x00000 |
81 | #define APIC_DM_FIXED_MASK 0x00700 | ||
81 | #define APIC_DM_LOWEST 0x00100 | 82 | #define APIC_DM_LOWEST 0x00100 |
82 | #define APIC_DM_SMI 0x00200 | 83 | #define APIC_DM_SMI 0x00200 |
83 | #define APIC_DM_REMRD 0x00300 | 84 | #define APIC_DM_REMRD 0x00300 |
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 91f3e087cf21..7f2f7b123293 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h | |||
@@ -195,6 +195,7 @@ | |||
195 | 195 | ||
196 | /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */ | 196 | /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */ |
197 | #define X86_FEATURE_FSGSBASE (9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/ | 197 | #define X86_FEATURE_FSGSBASE (9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/ |
198 | #define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */ | ||
198 | 199 | ||
199 | #if defined(__KERNEL__) && !defined(__ASSEMBLY__) | 200 | #if defined(__KERNEL__) && !defined(__ASSEMBLY__) |
200 | 201 | ||
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h index db24c2278be0..268c783ab1c0 100644 --- a/arch/x86/include/asm/ftrace.h +++ b/arch/x86/include/asm/ftrace.h | |||
@@ -38,11 +38,10 @@ extern void mcount(void); | |||
38 | static inline unsigned long ftrace_call_adjust(unsigned long addr) | 38 | static inline unsigned long ftrace_call_adjust(unsigned long addr) |
39 | { | 39 | { |
40 | /* | 40 | /* |
41 | * call mcount is "e8 <4 byte offset>" | 41 | * addr is the address of the mcount call instruction. |
42 | * The addr points to the 4 byte offset and the caller of this | 42 | * recordmcount does the necessary offset calculation. |
43 | * function wants the pointer to e8. Simply subtract one. | ||
44 | */ | 43 | */ |
45 | return addr - 1; | 44 | return addr; |
46 | } | 45 | } |
47 | 46 | ||
48 | #ifdef CONFIG_DYNAMIC_FTRACE | 47 | #ifdef CONFIG_DYNAMIC_FTRACE |
diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h index 574dbc22893a..a32b18ce6ead 100644 --- a/arch/x86/include/asm/jump_label.h +++ b/arch/x86/include/asm/jump_label.h | |||
@@ -5,20 +5,25 @@ | |||
5 | 5 | ||
6 | #include <linux/types.h> | 6 | #include <linux/types.h> |
7 | #include <asm/nops.h> | 7 | #include <asm/nops.h> |
8 | #include <asm/asm.h> | ||
8 | 9 | ||
9 | #define JUMP_LABEL_NOP_SIZE 5 | 10 | #define JUMP_LABEL_NOP_SIZE 5 |
10 | 11 | ||
11 | # define JUMP_LABEL_INITIAL_NOP ".byte 0xe9 \n\t .long 0\n\t" | 12 | #define JUMP_LABEL_INITIAL_NOP ".byte 0xe9 \n\t .long 0\n\t" |
12 | 13 | ||
13 | # define JUMP_LABEL(key, label) \ | 14 | static __always_inline bool arch_static_branch(struct jump_label_key *key) |
14 | do { \ | 15 | { |
15 | asm goto("1:" \ | 16 | asm goto("1:" |
16 | JUMP_LABEL_INITIAL_NOP \ | 17 | JUMP_LABEL_INITIAL_NOP |
17 | ".pushsection __jump_table, \"aw\" \n\t"\ | 18 | ".pushsection __jump_table, \"aw\" \n\t" |
18 | _ASM_PTR "1b, %l[" #label "], %c0 \n\t" \ | 19 | _ASM_ALIGN "\n\t" |
19 | ".popsection \n\t" \ | 20 | _ASM_PTR "1b, %l[l_yes], %c0 \n\t" |
20 | : : "i" (key) : : label); \ | 21 | ".popsection \n\t" |
21 | } while (0) | 22 | : : "i" (key) : : l_yes); |
23 | return false; | ||
24 | l_yes: | ||
25 | return true; | ||
26 | } | ||
22 | 27 | ||
23 | #endif /* __KERNEL__ */ | 28 | #endif /* __KERNEL__ */ |
24 | 29 | ||
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h index 7db7723d1f32..d56187c6b838 100644 --- a/arch/x86/include/asm/pgtable_types.h +++ b/arch/x86/include/asm/pgtable_types.h | |||
@@ -299,6 +299,7 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, | |||
299 | /* Install a pte for a particular vaddr in kernel space. */ | 299 | /* Install a pte for a particular vaddr in kernel space. */ |
300 | void set_pte_vaddr(unsigned long vaddr, pte_t pte); | 300 | void set_pte_vaddr(unsigned long vaddr, pte_t pte); |
301 | 301 | ||
302 | extern void native_pagetable_reserve(u64 start, u64 end); | ||
302 | #ifdef CONFIG_X86_32 | 303 | #ifdef CONFIG_X86_32 |
303 | extern void native_pagetable_setup_start(pgd_t *base); | 304 | extern void native_pagetable_setup_start(pgd_t *base); |
304 | extern void native_pagetable_setup_done(pgd_t *base); | 305 | extern void native_pagetable_setup_done(pgd_t *base); |
diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h index db8aa19a08a2..647d8a06ce4f 100644 --- a/arch/x86/include/asm/setup.h +++ b/arch/x86/include/asm/setup.h | |||
@@ -88,7 +88,7 @@ void *extend_brk(size_t size, size_t align); | |||
88 | * executable.) | 88 | * executable.) |
89 | */ | 89 | */ |
90 | #define RESERVE_BRK(name,sz) \ | 90 | #define RESERVE_BRK(name,sz) \ |
91 | static void __section(.discard.text) __used \ | 91 | static void __section(.discard.text) __used notrace \ |
92 | __brk_reservation_fn_##name##__(void) { \ | 92 | __brk_reservation_fn_##name##__(void) { \ |
93 | asm volatile ( \ | 93 | asm volatile ( \ |
94 | ".pushsection .brk_reservation,\"aw\",@nobits;" \ | 94 | ".pushsection .brk_reservation,\"aw\",@nobits;" \ |
diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h index d7e89c83645d..70bbe39043a9 100644 --- a/arch/x86/include/asm/stacktrace.h +++ b/arch/x86/include/asm/stacktrace.h | |||
@@ -37,9 +37,6 @@ print_context_stack_bp(struct thread_info *tinfo, | |||
37 | /* Generic stack tracer with callbacks */ | 37 | /* Generic stack tracer with callbacks */ |
38 | 38 | ||
39 | struct stacktrace_ops { | 39 | struct stacktrace_ops { |
40 | void (*warning)(void *data, char *msg); | ||
41 | /* msg must contain %s for the symbol */ | ||
42 | void (*warning_symbol)(void *data, char *msg, unsigned long symbol); | ||
43 | void (*address)(void *data, unsigned long address, int reliable); | 40 | void (*address)(void *data, unsigned long address, int reliable); |
44 | /* On negative return stop dumping */ | 41 | /* On negative return stop dumping */ |
45 | int (*stack)(void *data, char *name); | 42 | int (*stack)(void *data, char *name); |
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index abd3e0ea762a..99f0ad753f32 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h | |||
@@ -42,7 +42,7 @@ | |||
42 | * Returns 0 if the range is valid, nonzero otherwise. | 42 | * Returns 0 if the range is valid, nonzero otherwise. |
43 | * | 43 | * |
44 | * This is equivalent to the following test: | 44 | * This is equivalent to the following test: |
45 | * (u33)addr + (u33)size >= (u33)current->addr_limit.seg (u65 for x86_64) | 45 | * (u33)addr + (u33)size > (u33)current->addr_limit.seg (u65 for x86_64) |
46 | * | 46 | * |
47 | * This needs 33-bit (65-bit for x86_64) arithmetic. We have a carry... | 47 | * This needs 33-bit (65-bit for x86_64) arithmetic. We have a carry... |
48 | */ | 48 | */ |
diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h index 3e094af443c3..130f1eeee5fe 100644 --- a/arch/x86/include/asm/uv/uv_bau.h +++ b/arch/x86/include/asm/uv/uv_bau.h | |||
@@ -94,6 +94,8 @@ | |||
94 | /* after this # consecutive successes, bump up the throttle if it was lowered */ | 94 | /* after this # consecutive successes, bump up the throttle if it was lowered */ |
95 | #define COMPLETE_THRESHOLD 5 | 95 | #define COMPLETE_THRESHOLD 5 |
96 | 96 | ||
97 | #define UV_LB_SUBNODEID 0x10 | ||
98 | |||
97 | /* | 99 | /* |
98 | * number of entries in the destination side payload queue | 100 | * number of entries in the destination side payload queue |
99 | */ | 101 | */ |
@@ -124,7 +126,7 @@ | |||
124 | * The distribution specification (32 bytes) is interpreted as a 256-bit | 126 | * The distribution specification (32 bytes) is interpreted as a 256-bit |
125 | * distribution vector. Adjacent bits correspond to consecutive even numbered | 127 | * distribution vector. Adjacent bits correspond to consecutive even numbered |
126 | * nodeIDs. The result of adding the index of a given bit to the 15-bit | 128 | * nodeIDs. The result of adding the index of a given bit to the 15-bit |
127 | * 'base_dest_nodeid' field of the header corresponds to the | 129 | * 'base_dest_nasid' field of the header corresponds to the |
128 | * destination nodeID associated with that specified bit. | 130 | * destination nodeID associated with that specified bit. |
129 | */ | 131 | */ |
130 | struct bau_target_uvhubmask { | 132 | struct bau_target_uvhubmask { |
@@ -176,7 +178,7 @@ struct bau_msg_payload { | |||
176 | struct bau_msg_header { | 178 | struct bau_msg_header { |
177 | unsigned int dest_subnodeid:6; /* must be 0x10, for the LB */ | 179 | unsigned int dest_subnodeid:6; /* must be 0x10, for the LB */ |
178 | /* bits 5:0 */ | 180 | /* bits 5:0 */ |
179 | unsigned int base_dest_nodeid:15; /* nasid of the */ | 181 | unsigned int base_dest_nasid:15; /* nasid of the */ |
180 | /* bits 20:6 */ /* first bit in uvhub map */ | 182 | /* bits 20:6 */ /* first bit in uvhub map */ |
181 | unsigned int command:8; /* message type */ | 183 | unsigned int command:8; /* message type */ |
182 | /* bits 28:21 */ | 184 | /* bits 28:21 */ |
@@ -378,6 +380,10 @@ struct ptc_stats { | |||
378 | unsigned long d_rcanceled; /* number of messages canceled by resets */ | 380 | unsigned long d_rcanceled; /* number of messages canceled by resets */ |
379 | }; | 381 | }; |
380 | 382 | ||
383 | struct hub_and_pnode { | ||
384 | short uvhub; | ||
385 | short pnode; | ||
386 | }; | ||
381 | /* | 387 | /* |
382 | * one per-cpu; to locate the software tables | 388 | * one per-cpu; to locate the software tables |
383 | */ | 389 | */ |
@@ -399,10 +405,12 @@ struct bau_control { | |||
399 | int baudisabled; | 405 | int baudisabled; |
400 | int set_bau_off; | 406 | int set_bau_off; |
401 | short cpu; | 407 | short cpu; |
408 | short osnode; | ||
402 | short uvhub_cpu; | 409 | short uvhub_cpu; |
403 | short uvhub; | 410 | short uvhub; |
404 | short cpus_in_socket; | 411 | short cpus_in_socket; |
405 | short cpus_in_uvhub; | 412 | short cpus_in_uvhub; |
413 | short partition_base_pnode; | ||
406 | unsigned short message_number; | 414 | unsigned short message_number; |
407 | unsigned short uvhub_quiesce; | 415 | unsigned short uvhub_quiesce; |
408 | short socket_acknowledge_count[DEST_Q_SIZE]; | 416 | short socket_acknowledge_count[DEST_Q_SIZE]; |
@@ -422,15 +430,16 @@ struct bau_control { | |||
422 | int congested_period; | 430 | int congested_period; |
423 | cycles_t period_time; | 431 | cycles_t period_time; |
424 | long period_requests; | 432 | long period_requests; |
433 | struct hub_and_pnode *target_hub_and_pnode; | ||
425 | }; | 434 | }; |
426 | 435 | ||
427 | static inline int bau_uvhub_isset(int uvhub, struct bau_target_uvhubmask *dstp) | 436 | static inline int bau_uvhub_isset(int uvhub, struct bau_target_uvhubmask *dstp) |
428 | { | 437 | { |
429 | return constant_test_bit(uvhub, &dstp->bits[0]); | 438 | return constant_test_bit(uvhub, &dstp->bits[0]); |
430 | } | 439 | } |
431 | static inline void bau_uvhub_set(int uvhub, struct bau_target_uvhubmask *dstp) | 440 | static inline void bau_uvhub_set(int pnode, struct bau_target_uvhubmask *dstp) |
432 | { | 441 | { |
433 | __set_bit(uvhub, &dstp->bits[0]); | 442 | __set_bit(pnode, &dstp->bits[0]); |
434 | } | 443 | } |
435 | static inline void bau_uvhubs_clear(struct bau_target_uvhubmask *dstp, | 444 | static inline void bau_uvhubs_clear(struct bau_target_uvhubmask *dstp, |
436 | int nbits) | 445 | int nbits) |
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h index a501741c2335..4298002d0c83 100644 --- a/arch/x86/include/asm/uv/uv_hub.h +++ b/arch/x86/include/asm/uv/uv_hub.h | |||
@@ -398,6 +398,8 @@ struct uv_blade_info { | |||
398 | unsigned short nr_online_cpus; | 398 | unsigned short nr_online_cpus; |
399 | unsigned short pnode; | 399 | unsigned short pnode; |
400 | short memory_nid; | 400 | short memory_nid; |
401 | spinlock_t nmi_lock; | ||
402 | unsigned long nmi_count; | ||
401 | }; | 403 | }; |
402 | extern struct uv_blade_info *uv_blade_info; | 404 | extern struct uv_blade_info *uv_blade_info; |
403 | extern short *uv_node_to_blade; | 405 | extern short *uv_node_to_blade; |
diff --git a/arch/x86/include/asm/uv/uv_mmrs.h b/arch/x86/include/asm/uv/uv_mmrs.h index 20cafeac7455..f5bb64a823d7 100644 --- a/arch/x86/include/asm/uv/uv_mmrs.h +++ b/arch/x86/include/asm/uv/uv_mmrs.h | |||
@@ -5,7 +5,7 @@ | |||
5 | * | 5 | * |
6 | * SGI UV MMR definitions | 6 | * SGI UV MMR definitions |
7 | * | 7 | * |
8 | * Copyright (C) 2007-2010 Silicon Graphics, Inc. All rights reserved. | 8 | * Copyright (C) 2007-2011 Silicon Graphics, Inc. All rights reserved. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #ifndef _ASM_X86_UV_UV_MMRS_H | 11 | #ifndef _ASM_X86_UV_UV_MMRS_H |
@@ -1099,5 +1099,19 @@ union uvh_rtc1_int_config_u { | |||
1099 | } s; | 1099 | } s; |
1100 | }; | 1100 | }; |
1101 | 1101 | ||
1102 | /* ========================================================================= */ | ||
1103 | /* UVH_SCRATCH5 */ | ||
1104 | /* ========================================================================= */ | ||
1105 | #define UVH_SCRATCH5 0x2d0200UL | ||
1106 | #define UVH_SCRATCH5_32 0x00778 | ||
1107 | |||
1108 | #define UVH_SCRATCH5_SCRATCH5_SHFT 0 | ||
1109 | #define UVH_SCRATCH5_SCRATCH5_MASK 0xffffffffffffffffUL | ||
1110 | union uvh_scratch5_u { | ||
1111 | unsigned long v; | ||
1112 | struct uvh_scratch5_s { | ||
1113 | unsigned long scratch5 : 64; /* RW, W1CS */ | ||
1114 | } s; | ||
1115 | }; | ||
1102 | 1116 | ||
1103 | #endif /* __ASM_UV_MMRS_X86_H__ */ | 1117 | #endif /* __ASM_UV_MMRS_X86_H__ */ |
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h index 643ebf2e2ad8..d3d859035af9 100644 --- a/arch/x86/include/asm/x86_init.h +++ b/arch/x86/include/asm/x86_init.h | |||
@@ -68,6 +68,17 @@ struct x86_init_oem { | |||
68 | }; | 68 | }; |
69 | 69 | ||
70 | /** | 70 | /** |
71 | * struct x86_init_mapping - platform specific initial kernel pagetable setup | ||
72 | * @pagetable_reserve: reserve a range of addresses for kernel pagetable usage | ||
73 | * | ||
74 | * For more details on the purpose of this hook, look in | ||
75 | * init_memory_mapping and the commit that added it. | ||
76 | */ | ||
77 | struct x86_init_mapping { | ||
78 | void (*pagetable_reserve)(u64 start, u64 end); | ||
79 | }; | ||
80 | |||
81 | /** | ||
71 | * struct x86_init_paging - platform specific paging functions | 82 | * struct x86_init_paging - platform specific paging functions |
72 | * @pagetable_setup_start: platform specific pre paging_init() call | 83 | * @pagetable_setup_start: platform specific pre paging_init() call |
73 | * @pagetable_setup_done: platform specific post paging_init() call | 84 | * @pagetable_setup_done: platform specific post paging_init() call |
@@ -123,6 +134,7 @@ struct x86_init_ops { | |||
123 | struct x86_init_mpparse mpparse; | 134 | struct x86_init_mpparse mpparse; |
124 | struct x86_init_irqs irqs; | 135 | struct x86_init_irqs irqs; |
125 | struct x86_init_oem oem; | 136 | struct x86_init_oem oem; |
137 | struct x86_init_mapping mapping; | ||
126 | struct x86_init_paging paging; | 138 | struct x86_init_paging paging; |
127 | struct x86_init_timers timers; | 139 | struct x86_init_timers timers; |
128 | struct x86_init_iommu iommu; | 140 | struct x86_init_iommu iommu; |
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h index c61934fbf22a..64a619d47d34 100644 --- a/arch/x86/include/asm/xen/page.h +++ b/arch/x86/include/asm/xen/page.h | |||
@@ -47,8 +47,9 @@ extern bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn); | |||
47 | extern unsigned long set_phys_range_identity(unsigned long pfn_s, | 47 | extern unsigned long set_phys_range_identity(unsigned long pfn_s, |
48 | unsigned long pfn_e); | 48 | unsigned long pfn_e); |
49 | 49 | ||
50 | extern int m2p_add_override(unsigned long mfn, struct page *page); | 50 | extern int m2p_add_override(unsigned long mfn, struct page *page, |
51 | extern int m2p_remove_override(struct page *page); | 51 | bool clear_pte); |
52 | extern int m2p_remove_override(struct page *page, bool clear_pte); | ||
52 | extern struct page *m2p_find_override(unsigned long mfn); | 53 | extern struct page *m2p_find_override(unsigned long mfn); |
53 | extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn); | 54 | extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn); |
54 | 55 | ||
diff --git a/arch/x86/include/asm/xen/pci.h b/arch/x86/include/asm/xen/pci.h index aa8620989162..4fbda9a3f339 100644 --- a/arch/x86/include/asm/xen/pci.h +++ b/arch/x86/include/asm/xen/pci.h | |||
@@ -15,10 +15,26 @@ static inline int pci_xen_hvm_init(void) | |||
15 | #endif | 15 | #endif |
16 | #if defined(CONFIG_XEN_DOM0) | 16 | #if defined(CONFIG_XEN_DOM0) |
17 | void __init xen_setup_pirqs(void); | 17 | void __init xen_setup_pirqs(void); |
18 | int xen_find_device_domain_owner(struct pci_dev *dev); | ||
19 | int xen_register_device_domain_owner(struct pci_dev *dev, uint16_t domain); | ||
20 | int xen_unregister_device_domain_owner(struct pci_dev *dev); | ||
18 | #else | 21 | #else |
19 | static inline void __init xen_setup_pirqs(void) | 22 | static inline void __init xen_setup_pirqs(void) |
20 | { | 23 | { |
21 | } | 24 | } |
25 | static inline int xen_find_device_domain_owner(struct pci_dev *dev) | ||
26 | { | ||
27 | return -1; | ||
28 | } | ||
29 | static inline int xen_register_device_domain_owner(struct pci_dev *dev, | ||
30 | uint16_t domain) | ||
31 | { | ||
32 | return -1; | ||
33 | } | ||
34 | static inline int xen_unregister_device_domain_owner(struct pci_dev *dev) | ||
35 | { | ||
36 | return -1; | ||
37 | } | ||
22 | #endif | 38 | #endif |
23 | 39 | ||
24 | #if defined(CONFIG_PCI_MSI) | 40 | #if defined(CONFIG_PCI_MSI) |
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 7338ef2218bc..97ebf82e0b7f 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile | |||
@@ -117,7 +117,7 @@ obj-$(CONFIG_OF) += devicetree.o | |||
117 | ifeq ($(CONFIG_X86_64),y) | 117 | ifeq ($(CONFIG_X86_64),y) |
118 | obj-$(CONFIG_AUDIT) += audit_64.o | 118 | obj-$(CONFIG_AUDIT) += audit_64.o |
119 | 119 | ||
120 | obj-$(CONFIG_GART_IOMMU) += pci-gart_64.o aperture_64.o | 120 | obj-$(CONFIG_GART_IOMMU) += amd_gart_64.o aperture_64.o |
121 | obj-$(CONFIG_CALGARY_IOMMU) += pci-calgary_64.o tce_64.o | 121 | obj-$(CONFIG_CALGARY_IOMMU) += pci-calgary_64.o tce_64.o |
122 | obj-$(CONFIG_AMD_IOMMU) += amd_iommu_init.o amd_iommu.o | 122 | obj-$(CONFIG_AMD_IOMMU) += amd_iommu_init.o amd_iommu.o |
123 | 123 | ||
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c index ff93bc1b09c3..18a857ba7a25 100644 --- a/arch/x86/kernel/acpi/sleep.c +++ b/arch/x86/kernel/acpi/sleep.c | |||
@@ -112,11 +112,6 @@ static int __init acpi_sleep_setup(char *str) | |||
112 | #ifdef CONFIG_HIBERNATION | 112 | #ifdef CONFIG_HIBERNATION |
113 | if (strncmp(str, "s4_nohwsig", 10) == 0) | 113 | if (strncmp(str, "s4_nohwsig", 10) == 0) |
114 | acpi_no_s4_hw_signature(); | 114 | acpi_no_s4_hw_signature(); |
115 | if (strncmp(str, "s4_nonvs", 8) == 0) { | ||
116 | pr_warning("ACPI: acpi_sleep=s4_nonvs is deprecated, " | ||
117 | "please use acpi_sleep=nonvs instead"); | ||
118 | acpi_nvs_nosave(); | ||
119 | } | ||
120 | #endif | 115 | #endif |
121 | if (strncmp(str, "nonvs", 5) == 0) | 116 | if (strncmp(str, "nonvs", 5) == 0) |
122 | acpi_nvs_nosave(); | 117 | acpi_nvs_nosave(); |
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 4a234677e213..1eeeafcb4410 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c | |||
@@ -210,6 +210,15 @@ void __init_or_module apply_alternatives(struct alt_instr *start, | |||
210 | u8 insnbuf[MAX_PATCH_LEN]; | 210 | u8 insnbuf[MAX_PATCH_LEN]; |
211 | 211 | ||
212 | DPRINTK("%s: alt table %p -> %p\n", __func__, start, end); | 212 | DPRINTK("%s: alt table %p -> %p\n", __func__, start, end); |
213 | /* | ||
214 | * The scan order should be from start to end. A later scanned | ||
215 | * alternative code can overwrite a previous scanned alternative code. | ||
216 | * Some kernel functions (e.g. memcpy, memset, etc) use this order to | ||
217 | * patch code. | ||
218 | * | ||
219 | * So be careful if you want to change the scan order to any other | ||
220 | * order. | ||
221 | */ | ||
213 | for (a = start; a < end; a++) { | 222 | for (a = start; a < end; a++) { |
214 | u8 *instr = a->instr; | 223 | u8 *instr = a->instr; |
215 | BUG_ON(a->replacementlen > a->instrlen); | 224 | BUG_ON(a->replacementlen > a->instrlen); |
@@ -679,7 +688,7 @@ void __kprobes text_poke_smp_batch(struct text_poke_param *params, int n) | |||
679 | __stop_machine(stop_machine_text_poke, (void *)&tpp, NULL); | 688 | __stop_machine(stop_machine_text_poke, (void *)&tpp, NULL); |
680 | } | 689 | } |
681 | 690 | ||
682 | #if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL) | 691 | #if defined(CONFIG_DYNAMIC_FTRACE) || defined(CONFIG_JUMP_LABEL) |
683 | 692 | ||
684 | #ifdef CONFIG_X86_64 | 693 | #ifdef CONFIG_X86_64 |
685 | unsigned char ideal_nop5[5] = { 0x66, 0x66, 0x66, 0x66, 0x90 }; | 694 | unsigned char ideal_nop5[5] = { 0x66, 0x66, 0x66, 0x66, 0x90 }; |
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/amd_gart_64.c index b117efd24f71..b117efd24f71 100644 --- a/arch/x86/kernel/pci-gart_64.c +++ b/arch/x86/kernel/amd_gart_64.c | |||
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 57ca77787220..873e7e1ead7b 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c | |||
@@ -18,6 +18,7 @@ | |||
18 | */ | 18 | */ |
19 | 19 | ||
20 | #include <linux/pci.h> | 20 | #include <linux/pci.h> |
21 | #include <linux/pci-ats.h> | ||
21 | #include <linux/bitmap.h> | 22 | #include <linux/bitmap.h> |
22 | #include <linux/slab.h> | 23 | #include <linux/slab.h> |
23 | #include <linux/debugfs.h> | 24 | #include <linux/debugfs.h> |
@@ -25,6 +26,7 @@ | |||
25 | #include <linux/dma-mapping.h> | 26 | #include <linux/dma-mapping.h> |
26 | #include <linux/iommu-helper.h> | 27 | #include <linux/iommu-helper.h> |
27 | #include <linux/iommu.h> | 28 | #include <linux/iommu.h> |
29 | #include <linux/delay.h> | ||
28 | #include <asm/proto.h> | 30 | #include <asm/proto.h> |
29 | #include <asm/iommu.h> | 31 | #include <asm/iommu.h> |
30 | #include <asm/gart.h> | 32 | #include <asm/gart.h> |
@@ -34,7 +36,7 @@ | |||
34 | 36 | ||
35 | #define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28)) | 37 | #define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28)) |
36 | 38 | ||
37 | #define EXIT_LOOP_COUNT 10000000 | 39 | #define LOOP_TIMEOUT 100000 |
38 | 40 | ||
39 | static DEFINE_RWLOCK(amd_iommu_devtable_lock); | 41 | static DEFINE_RWLOCK(amd_iommu_devtable_lock); |
40 | 42 | ||
@@ -57,7 +59,6 @@ struct iommu_cmd { | |||
57 | u32 data[4]; | 59 | u32 data[4]; |
58 | }; | 60 | }; |
59 | 61 | ||
60 | static void reset_iommu_command_buffer(struct amd_iommu *iommu); | ||
61 | static void update_domain(struct protection_domain *domain); | 62 | static void update_domain(struct protection_domain *domain); |
62 | 63 | ||
63 | /**************************************************************************** | 64 | /**************************************************************************** |
@@ -322,8 +323,6 @@ static void iommu_print_event(struct amd_iommu *iommu, void *__evt) | |||
322 | break; | 323 | break; |
323 | case EVENT_TYPE_ILL_CMD: | 324 | case EVENT_TYPE_ILL_CMD: |
324 | printk("ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address); | 325 | printk("ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address); |
325 | iommu->reset_in_progress = true; | ||
326 | reset_iommu_command_buffer(iommu); | ||
327 | dump_command(address); | 326 | dump_command(address); |
328 | break; | 327 | break; |
329 | case EVENT_TYPE_CMD_HARD_ERR: | 328 | case EVENT_TYPE_CMD_HARD_ERR: |
@@ -367,7 +366,7 @@ static void iommu_poll_events(struct amd_iommu *iommu) | |||
367 | spin_unlock_irqrestore(&iommu->lock, flags); | 366 | spin_unlock_irqrestore(&iommu->lock, flags); |
368 | } | 367 | } |
369 | 368 | ||
370 | irqreturn_t amd_iommu_int_handler(int irq, void *data) | 369 | irqreturn_t amd_iommu_int_thread(int irq, void *data) |
371 | { | 370 | { |
372 | struct amd_iommu *iommu; | 371 | struct amd_iommu *iommu; |
373 | 372 | ||
@@ -377,192 +376,300 @@ irqreturn_t amd_iommu_int_handler(int irq, void *data) | |||
377 | return IRQ_HANDLED; | 376 | return IRQ_HANDLED; |
378 | } | 377 | } |
379 | 378 | ||
379 | irqreturn_t amd_iommu_int_handler(int irq, void *data) | ||
380 | { | ||
381 | return IRQ_WAKE_THREAD; | ||
382 | } | ||
383 | |||
380 | /**************************************************************************** | 384 | /**************************************************************************** |
381 | * | 385 | * |
382 | * IOMMU command queuing functions | 386 | * IOMMU command queuing functions |
383 | * | 387 | * |
384 | ****************************************************************************/ | 388 | ****************************************************************************/ |
385 | 389 | ||
386 | /* | 390 | static int wait_on_sem(volatile u64 *sem) |
387 | * Writes the command to the IOMMUs command buffer and informs the | 391 | { |
388 | * hardware about the new command. Must be called with iommu->lock held. | 392 | int i = 0; |
389 | */ | 393 | |
390 | static int __iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd) | 394 | while (*sem == 0 && i < LOOP_TIMEOUT) { |
395 | udelay(1); | ||
396 | i += 1; | ||
397 | } | ||
398 | |||
399 | if (i == LOOP_TIMEOUT) { | ||
400 | pr_alert("AMD-Vi: Completion-Wait loop timed out\n"); | ||
401 | return -EIO; | ||
402 | } | ||
403 | |||
404 | return 0; | ||
405 | } | ||
406 | |||
407 | static void copy_cmd_to_buffer(struct amd_iommu *iommu, | ||
408 | struct iommu_cmd *cmd, | ||
409 | u32 tail) | ||
391 | { | 410 | { |
392 | u32 tail, head; | ||
393 | u8 *target; | 411 | u8 *target; |
394 | 412 | ||
395 | WARN_ON(iommu->cmd_buf_size & CMD_BUFFER_UNINITIALIZED); | ||
396 | tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); | ||
397 | target = iommu->cmd_buf + tail; | 413 | target = iommu->cmd_buf + tail; |
398 | memcpy_toio(target, cmd, sizeof(*cmd)); | 414 | tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size; |
399 | tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size; | 415 | |
400 | head = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); | 416 | /* Copy command to buffer */ |
401 | if (tail == head) | 417 | memcpy(target, cmd, sizeof(*cmd)); |
402 | return -ENOMEM; | 418 | |
419 | /* Tell the IOMMU about it */ | ||
403 | writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); | 420 | writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); |
421 | } | ||
404 | 422 | ||
405 | return 0; | 423 | static void build_completion_wait(struct iommu_cmd *cmd, u64 address) |
424 | { | ||
425 | WARN_ON(address & 0x7ULL); | ||
426 | |||
427 | memset(cmd, 0, sizeof(*cmd)); | ||
428 | cmd->data[0] = lower_32_bits(__pa(address)) | CMD_COMPL_WAIT_STORE_MASK; | ||
429 | cmd->data[1] = upper_32_bits(__pa(address)); | ||
430 | cmd->data[2] = 1; | ||
431 | CMD_SET_TYPE(cmd, CMD_COMPL_WAIT); | ||
432 | } | ||
433 | |||
434 | static void build_inv_dte(struct iommu_cmd *cmd, u16 devid) | ||
435 | { | ||
436 | memset(cmd, 0, sizeof(*cmd)); | ||
437 | cmd->data[0] = devid; | ||
438 | CMD_SET_TYPE(cmd, CMD_INV_DEV_ENTRY); | ||
439 | } | ||
440 | |||
441 | static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address, | ||
442 | size_t size, u16 domid, int pde) | ||
443 | { | ||
444 | u64 pages; | ||
445 | int s; | ||
446 | |||
447 | pages = iommu_num_pages(address, size, PAGE_SIZE); | ||
448 | s = 0; | ||
449 | |||
450 | if (pages > 1) { | ||
451 | /* | ||
452 | * If we have to flush more than one page, flush all | ||
453 | * TLB entries for this domain | ||
454 | */ | ||
455 | address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS; | ||
456 | s = 1; | ||
457 | } | ||
458 | |||
459 | address &= PAGE_MASK; | ||
460 | |||
461 | memset(cmd, 0, sizeof(*cmd)); | ||
462 | cmd->data[1] |= domid; | ||
463 | cmd->data[2] = lower_32_bits(address); | ||
464 | cmd->data[3] = upper_32_bits(address); | ||
465 | CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES); | ||
466 | if (s) /* size bit - we flush more than one 4kb page */ | ||
467 | cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK; | ||
468 | if (pde) /* PDE bit - we wan't flush everything not only the PTEs */ | ||
469 | cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK; | ||
470 | } | ||
471 | |||
472 | static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep, | ||
473 | u64 address, size_t size) | ||
474 | { | ||
475 | u64 pages; | ||
476 | int s; | ||
477 | |||
478 | pages = iommu_num_pages(address, size, PAGE_SIZE); | ||
479 | s = 0; | ||
480 | |||
481 | if (pages > 1) { | ||
482 | /* | ||
483 | * If we have to flush more than one page, flush all | ||
484 | * TLB entries for this domain | ||
485 | */ | ||
486 | address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS; | ||
487 | s = 1; | ||
488 | } | ||
489 | |||
490 | address &= PAGE_MASK; | ||
491 | |||
492 | memset(cmd, 0, sizeof(*cmd)); | ||
493 | cmd->data[0] = devid; | ||
494 | cmd->data[0] |= (qdep & 0xff) << 24; | ||
495 | cmd->data[1] = devid; | ||
496 | cmd->data[2] = lower_32_bits(address); | ||
497 | cmd->data[3] = upper_32_bits(address); | ||
498 | CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES); | ||
499 | if (s) | ||
500 | cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK; | ||
501 | } | ||
502 | |||
503 | static void build_inv_all(struct iommu_cmd *cmd) | ||
504 | { | ||
505 | memset(cmd, 0, sizeof(*cmd)); | ||
506 | CMD_SET_TYPE(cmd, CMD_INV_ALL); | ||
406 | } | 507 | } |
407 | 508 | ||
408 | /* | 509 | /* |
409 | * General queuing function for commands. Takes iommu->lock and calls | 510 | * Writes the command to the IOMMUs command buffer and informs the |
410 | * __iommu_queue_command(). | 511 | * hardware about the new command. |
411 | */ | 512 | */ |
412 | static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd) | 513 | static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd) |
413 | { | 514 | { |
515 | u32 left, tail, head, next_tail; | ||
414 | unsigned long flags; | 516 | unsigned long flags; |
415 | int ret; | ||
416 | 517 | ||
518 | WARN_ON(iommu->cmd_buf_size & CMD_BUFFER_UNINITIALIZED); | ||
519 | |||
520 | again: | ||
417 | spin_lock_irqsave(&iommu->lock, flags); | 521 | spin_lock_irqsave(&iommu->lock, flags); |
418 | ret = __iommu_queue_command(iommu, cmd); | ||
419 | if (!ret) | ||
420 | iommu->need_sync = true; | ||
421 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
422 | 522 | ||
423 | return ret; | 523 | head = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); |
424 | } | 524 | tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); |
525 | next_tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size; | ||
526 | left = (head - next_tail) % iommu->cmd_buf_size; | ||
425 | 527 | ||
426 | /* | 528 | if (left <= 2) { |
427 | * This function waits until an IOMMU has completed a completion | 529 | struct iommu_cmd sync_cmd; |
428 | * wait command | 530 | volatile u64 sem = 0; |
429 | */ | 531 | int ret; |
430 | static void __iommu_wait_for_completion(struct amd_iommu *iommu) | ||
431 | { | ||
432 | int ready = 0; | ||
433 | unsigned status = 0; | ||
434 | unsigned long i = 0; | ||
435 | 532 | ||
436 | INC_STATS_COUNTER(compl_wait); | 533 | build_completion_wait(&sync_cmd, (u64)&sem); |
534 | copy_cmd_to_buffer(iommu, &sync_cmd, tail); | ||
437 | 535 | ||
438 | while (!ready && (i < EXIT_LOOP_COUNT)) { | 536 | spin_unlock_irqrestore(&iommu->lock, flags); |
439 | ++i; | 537 | |
440 | /* wait for the bit to become one */ | 538 | if ((ret = wait_on_sem(&sem)) != 0) |
441 | status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); | 539 | return ret; |
442 | ready = status & MMIO_STATUS_COM_WAIT_INT_MASK; | 540 | |
541 | goto again; | ||
443 | } | 542 | } |
444 | 543 | ||
445 | /* set bit back to zero */ | 544 | copy_cmd_to_buffer(iommu, cmd, tail); |
446 | status &= ~MMIO_STATUS_COM_WAIT_INT_MASK; | 545 | |
447 | writel(status, iommu->mmio_base + MMIO_STATUS_OFFSET); | 546 | /* We need to sync now to make sure all commands are processed */ |
547 | iommu->need_sync = true; | ||
548 | |||
549 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
448 | 550 | ||
449 | if (unlikely(i == EXIT_LOOP_COUNT)) | 551 | return 0; |
450 | iommu->reset_in_progress = true; | ||
451 | } | 552 | } |
452 | 553 | ||
453 | /* | 554 | /* |
454 | * This function queues a completion wait command into the command | 555 | * This function queues a completion wait command into the command |
455 | * buffer of an IOMMU | 556 | * buffer of an IOMMU |
456 | */ | 557 | */ |
457 | static int __iommu_completion_wait(struct amd_iommu *iommu) | 558 | static int iommu_completion_wait(struct amd_iommu *iommu) |
458 | { | 559 | { |
459 | struct iommu_cmd cmd; | 560 | struct iommu_cmd cmd; |
561 | volatile u64 sem = 0; | ||
562 | int ret; | ||
460 | 563 | ||
461 | memset(&cmd, 0, sizeof(cmd)); | 564 | if (!iommu->need_sync) |
462 | cmd.data[0] = CMD_COMPL_WAIT_INT_MASK; | 565 | return 0; |
463 | CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT); | ||
464 | 566 | ||
465 | return __iommu_queue_command(iommu, &cmd); | 567 | build_completion_wait(&cmd, (u64)&sem); |
568 | |||
569 | ret = iommu_queue_command(iommu, &cmd); | ||
570 | if (ret) | ||
571 | return ret; | ||
572 | |||
573 | return wait_on_sem(&sem); | ||
466 | } | 574 | } |
467 | 575 | ||
468 | /* | 576 | static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid) |
469 | * This function is called whenever we need to ensure that the IOMMU has | ||
470 | * completed execution of all commands we sent. It sends a | ||
471 | * COMPLETION_WAIT command and waits for it to finish. The IOMMU informs | ||
472 | * us about that by writing a value to a physical address we pass with | ||
473 | * the command. | ||
474 | */ | ||
475 | static int iommu_completion_wait(struct amd_iommu *iommu) | ||
476 | { | 577 | { |
477 | int ret = 0; | 578 | struct iommu_cmd cmd; |
478 | unsigned long flags; | ||
479 | 579 | ||
480 | spin_lock_irqsave(&iommu->lock, flags); | 580 | build_inv_dte(&cmd, devid); |
481 | 581 | ||
482 | if (!iommu->need_sync) | 582 | return iommu_queue_command(iommu, &cmd); |
483 | goto out; | 583 | } |
484 | 584 | ||
485 | ret = __iommu_completion_wait(iommu); | 585 | static void iommu_flush_dte_all(struct amd_iommu *iommu) |
586 | { | ||
587 | u32 devid; | ||
486 | 588 | ||
487 | iommu->need_sync = false; | 589 | for (devid = 0; devid <= 0xffff; ++devid) |
590 | iommu_flush_dte(iommu, devid); | ||
488 | 591 | ||
489 | if (ret) | 592 | iommu_completion_wait(iommu); |
490 | goto out; | 593 | } |
491 | |||
492 | __iommu_wait_for_completion(iommu); | ||
493 | 594 | ||
494 | out: | 595 | /* |
495 | spin_unlock_irqrestore(&iommu->lock, flags); | 596 | * This function uses heavy locking and may disable irqs for some time. But |
597 | * this is no issue because it is only called during resume. | ||
598 | */ | ||
599 | static void iommu_flush_tlb_all(struct amd_iommu *iommu) | ||
600 | { | ||
601 | u32 dom_id; | ||
496 | 602 | ||
497 | if (iommu->reset_in_progress) | 603 | for (dom_id = 0; dom_id <= 0xffff; ++dom_id) { |
498 | reset_iommu_command_buffer(iommu); | 604 | struct iommu_cmd cmd; |
605 | build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, | ||
606 | dom_id, 1); | ||
607 | iommu_queue_command(iommu, &cmd); | ||
608 | } | ||
499 | 609 | ||
500 | return 0; | 610 | iommu_completion_wait(iommu); |
501 | } | 611 | } |
502 | 612 | ||
503 | static void iommu_flush_complete(struct protection_domain *domain) | 613 | static void iommu_flush_all(struct amd_iommu *iommu) |
504 | { | 614 | { |
505 | int i; | 615 | struct iommu_cmd cmd; |
506 | 616 | ||
507 | for (i = 0; i < amd_iommus_present; ++i) { | 617 | build_inv_all(&cmd); |
508 | if (!domain->dev_iommu[i]) | ||
509 | continue; | ||
510 | 618 | ||
511 | /* | 619 | iommu_queue_command(iommu, &cmd); |
512 | * Devices of this domain are behind this IOMMU | 620 | iommu_completion_wait(iommu); |
513 | * We need to wait for completion of all commands. | 621 | } |
514 | */ | 622 | |
515 | iommu_completion_wait(amd_iommus[i]); | 623 | void iommu_flush_all_caches(struct amd_iommu *iommu) |
624 | { | ||
625 | if (iommu_feature(iommu, FEATURE_IA)) { | ||
626 | iommu_flush_all(iommu); | ||
627 | } else { | ||
628 | iommu_flush_dte_all(iommu); | ||
629 | iommu_flush_tlb_all(iommu); | ||
516 | } | 630 | } |
517 | } | 631 | } |
518 | 632 | ||
519 | /* | 633 | /* |
520 | * Command send function for invalidating a device table entry | 634 | * Command send function for flushing on-device TLB |
521 | */ | 635 | */ |
522 | static int iommu_flush_device(struct device *dev) | 636 | static int device_flush_iotlb(struct device *dev, u64 address, size_t size) |
523 | { | 637 | { |
638 | struct pci_dev *pdev = to_pci_dev(dev); | ||
524 | struct amd_iommu *iommu; | 639 | struct amd_iommu *iommu; |
525 | struct iommu_cmd cmd; | 640 | struct iommu_cmd cmd; |
526 | u16 devid; | 641 | u16 devid; |
642 | int qdep; | ||
527 | 643 | ||
644 | qdep = pci_ats_queue_depth(pdev); | ||
528 | devid = get_device_id(dev); | 645 | devid = get_device_id(dev); |
529 | iommu = amd_iommu_rlookup_table[devid]; | 646 | iommu = amd_iommu_rlookup_table[devid]; |
530 | 647 | ||
531 | /* Build command */ | 648 | build_inv_iotlb_pages(&cmd, devid, qdep, address, size); |
532 | memset(&cmd, 0, sizeof(cmd)); | ||
533 | CMD_SET_TYPE(&cmd, CMD_INV_DEV_ENTRY); | ||
534 | cmd.data[0] = devid; | ||
535 | 649 | ||
536 | return iommu_queue_command(iommu, &cmd); | 650 | return iommu_queue_command(iommu, &cmd); |
537 | } | 651 | } |
538 | 652 | ||
539 | static void __iommu_build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address, | ||
540 | u16 domid, int pde, int s) | ||
541 | { | ||
542 | memset(cmd, 0, sizeof(*cmd)); | ||
543 | address &= PAGE_MASK; | ||
544 | CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES); | ||
545 | cmd->data[1] |= domid; | ||
546 | cmd->data[2] = lower_32_bits(address); | ||
547 | cmd->data[3] = upper_32_bits(address); | ||
548 | if (s) /* size bit - we flush more than one 4kb page */ | ||
549 | cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK; | ||
550 | if (pde) /* PDE bit - we wan't flush everything not only the PTEs */ | ||
551 | cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK; | ||
552 | } | ||
553 | |||
554 | /* | 653 | /* |
555 | * Generic command send function for invalidaing TLB entries | 654 | * Command send function for invalidating a device table entry |
556 | */ | 655 | */ |
557 | static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu, | 656 | static int device_flush_dte(struct device *dev) |
558 | u64 address, u16 domid, int pde, int s) | ||
559 | { | 657 | { |
560 | struct iommu_cmd cmd; | 658 | struct amd_iommu *iommu; |
659 | struct pci_dev *pdev; | ||
660 | u16 devid; | ||
561 | int ret; | 661 | int ret; |
562 | 662 | ||
563 | __iommu_build_inv_iommu_pages(&cmd, address, domid, pde, s); | 663 | pdev = to_pci_dev(dev); |
664 | devid = get_device_id(dev); | ||
665 | iommu = amd_iommu_rlookup_table[devid]; | ||
564 | 666 | ||
565 | ret = iommu_queue_command(iommu, &cmd); | 667 | ret = iommu_flush_dte(iommu, devid); |
668 | if (ret) | ||
669 | return ret; | ||
670 | |||
671 | if (pci_ats_enabled(pdev)) | ||
672 | ret = device_flush_iotlb(dev, 0, ~0UL); | ||
566 | 673 | ||
567 | return ret; | 674 | return ret; |
568 | } | 675 | } |
@@ -572,23 +679,14 @@ static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu, | |||
572 | * It invalidates a single PTE if the range to flush is within a single | 679 | * It invalidates a single PTE if the range to flush is within a single |
573 | * page. Otherwise it flushes the whole TLB of the IOMMU. | 680 | * page. Otherwise it flushes the whole TLB of the IOMMU. |
574 | */ | 681 | */ |
575 | static void __iommu_flush_pages(struct protection_domain *domain, | 682 | static void __domain_flush_pages(struct protection_domain *domain, |
576 | u64 address, size_t size, int pde) | 683 | u64 address, size_t size, int pde) |
577 | { | 684 | { |
578 | int s = 0, i; | 685 | struct iommu_dev_data *dev_data; |
579 | unsigned long pages = iommu_num_pages(address, size, PAGE_SIZE); | 686 | struct iommu_cmd cmd; |
580 | 687 | int ret = 0, i; | |
581 | address &= PAGE_MASK; | ||
582 | |||
583 | if (pages > 1) { | ||
584 | /* | ||
585 | * If we have to flush more than one page, flush all | ||
586 | * TLB entries for this domain | ||
587 | */ | ||
588 | address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS; | ||
589 | s = 1; | ||
590 | } | ||
591 | 688 | ||
689 | build_inv_iommu_pages(&cmd, address, size, domain->id, pde); | ||
592 | 690 | ||
593 | for (i = 0; i < amd_iommus_present; ++i) { | 691 | for (i = 0; i < amd_iommus_present; ++i) { |
594 | if (!domain->dev_iommu[i]) | 692 | if (!domain->dev_iommu[i]) |
@@ -598,101 +696,70 @@ static void __iommu_flush_pages(struct protection_domain *domain, | |||
598 | * Devices of this domain are behind this IOMMU | 696 | * Devices of this domain are behind this IOMMU |
599 | * We need a TLB flush | 697 | * We need a TLB flush |
600 | */ | 698 | */ |
601 | iommu_queue_inv_iommu_pages(amd_iommus[i], address, | 699 | ret |= iommu_queue_command(amd_iommus[i], &cmd); |
602 | domain->id, pde, s); | 700 | } |
701 | |||
702 | list_for_each_entry(dev_data, &domain->dev_list, list) { | ||
703 | struct pci_dev *pdev = to_pci_dev(dev_data->dev); | ||
704 | |||
705 | if (!pci_ats_enabled(pdev)) | ||
706 | continue; | ||
707 | |||
708 | ret |= device_flush_iotlb(dev_data->dev, address, size); | ||
603 | } | 709 | } |
604 | 710 | ||
605 | return; | 711 | WARN_ON(ret); |
606 | } | 712 | } |
607 | 713 | ||
608 | static void iommu_flush_pages(struct protection_domain *domain, | 714 | static void domain_flush_pages(struct protection_domain *domain, |
609 | u64 address, size_t size) | 715 | u64 address, size_t size) |
610 | { | 716 | { |
611 | __iommu_flush_pages(domain, address, size, 0); | 717 | __domain_flush_pages(domain, address, size, 0); |
612 | } | 718 | } |
613 | 719 | ||
614 | /* Flush the whole IO/TLB for a given protection domain */ | 720 | /* Flush the whole IO/TLB for a given protection domain */ |
615 | static void iommu_flush_tlb(struct protection_domain *domain) | 721 | static void domain_flush_tlb(struct protection_domain *domain) |
616 | { | 722 | { |
617 | __iommu_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 0); | 723 | __domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 0); |
618 | } | 724 | } |
619 | 725 | ||
620 | /* Flush the whole IO/TLB for a given protection domain - including PDE */ | 726 | /* Flush the whole IO/TLB for a given protection domain - including PDE */ |
621 | static void iommu_flush_tlb_pde(struct protection_domain *domain) | 727 | static void domain_flush_tlb_pde(struct protection_domain *domain) |
622 | { | 728 | { |
623 | __iommu_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1); | 729 | __domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1); |
624 | } | ||
625 | |||
626 | |||
627 | /* | ||
628 | * This function flushes the DTEs for all devices in domain | ||
629 | */ | ||
630 | static void iommu_flush_domain_devices(struct protection_domain *domain) | ||
631 | { | ||
632 | struct iommu_dev_data *dev_data; | ||
633 | unsigned long flags; | ||
634 | |||
635 | spin_lock_irqsave(&domain->lock, flags); | ||
636 | |||
637 | list_for_each_entry(dev_data, &domain->dev_list, list) | ||
638 | iommu_flush_device(dev_data->dev); | ||
639 | |||
640 | spin_unlock_irqrestore(&domain->lock, flags); | ||
641 | } | 730 | } |
642 | 731 | ||
643 | static void iommu_flush_all_domain_devices(void) | 732 | static void domain_flush_complete(struct protection_domain *domain) |
644 | { | 733 | { |
645 | struct protection_domain *domain; | 734 | int i; |
646 | unsigned long flags; | ||
647 | 735 | ||
648 | spin_lock_irqsave(&amd_iommu_pd_lock, flags); | 736 | for (i = 0; i < amd_iommus_present; ++i) { |
737 | if (!domain->dev_iommu[i]) | ||
738 | continue; | ||
649 | 739 | ||
650 | list_for_each_entry(domain, &amd_iommu_pd_list, list) { | 740 | /* |
651 | iommu_flush_domain_devices(domain); | 741 | * Devices of this domain are behind this IOMMU |
652 | iommu_flush_complete(domain); | 742 | * We need to wait for completion of all commands. |
743 | */ | ||
744 | iommu_completion_wait(amd_iommus[i]); | ||
653 | } | 745 | } |
654 | |||
655 | spin_unlock_irqrestore(&amd_iommu_pd_lock, flags); | ||
656 | } | 746 | } |
657 | 747 | ||
658 | void amd_iommu_flush_all_devices(void) | ||
659 | { | ||
660 | iommu_flush_all_domain_devices(); | ||
661 | } | ||
662 | 748 | ||
663 | /* | 749 | /* |
664 | * This function uses heavy locking and may disable irqs for some time. But | 750 | * This function flushes the DTEs for all devices in domain |
665 | * this is no issue because it is only called during resume. | ||
666 | */ | 751 | */ |
667 | void amd_iommu_flush_all_domains(void) | 752 | static void domain_flush_devices(struct protection_domain *domain) |
668 | { | 753 | { |
669 | struct protection_domain *domain; | 754 | struct iommu_dev_data *dev_data; |
670 | unsigned long flags; | 755 | unsigned long flags; |
671 | 756 | ||
672 | spin_lock_irqsave(&amd_iommu_pd_lock, flags); | 757 | spin_lock_irqsave(&domain->lock, flags); |
673 | |||
674 | list_for_each_entry(domain, &amd_iommu_pd_list, list) { | ||
675 | spin_lock(&domain->lock); | ||
676 | iommu_flush_tlb_pde(domain); | ||
677 | iommu_flush_complete(domain); | ||
678 | spin_unlock(&domain->lock); | ||
679 | } | ||
680 | |||
681 | spin_unlock_irqrestore(&amd_iommu_pd_lock, flags); | ||
682 | } | ||
683 | |||
684 | static void reset_iommu_command_buffer(struct amd_iommu *iommu) | ||
685 | { | ||
686 | pr_err("AMD-Vi: Resetting IOMMU command buffer\n"); | ||
687 | |||
688 | if (iommu->reset_in_progress) | ||
689 | panic("AMD-Vi: ILLEGAL_COMMAND_ERROR while resetting command buffer\n"); | ||
690 | 758 | ||
691 | amd_iommu_reset_cmd_buffer(iommu); | 759 | list_for_each_entry(dev_data, &domain->dev_list, list) |
692 | amd_iommu_flush_all_devices(); | 760 | device_flush_dte(dev_data->dev); |
693 | amd_iommu_flush_all_domains(); | ||
694 | 761 | ||
695 | iommu->reset_in_progress = false; | 762 | spin_unlock_irqrestore(&domain->lock, flags); |
696 | } | 763 | } |
697 | 764 | ||
698 | /**************************************************************************** | 765 | /**************************************************************************** |
@@ -1410,17 +1477,22 @@ static bool dma_ops_domain(struct protection_domain *domain) | |||
1410 | return domain->flags & PD_DMA_OPS_MASK; | 1477 | return domain->flags & PD_DMA_OPS_MASK; |
1411 | } | 1478 | } |
1412 | 1479 | ||
1413 | static void set_dte_entry(u16 devid, struct protection_domain *domain) | 1480 | static void set_dte_entry(u16 devid, struct protection_domain *domain, bool ats) |
1414 | { | 1481 | { |
1415 | u64 pte_root = virt_to_phys(domain->pt_root); | 1482 | u64 pte_root = virt_to_phys(domain->pt_root); |
1483 | u32 flags = 0; | ||
1416 | 1484 | ||
1417 | pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK) | 1485 | pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK) |
1418 | << DEV_ENTRY_MODE_SHIFT; | 1486 | << DEV_ENTRY_MODE_SHIFT; |
1419 | pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV; | 1487 | pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV; |
1420 | 1488 | ||
1421 | amd_iommu_dev_table[devid].data[2] = domain->id; | 1489 | if (ats) |
1422 | amd_iommu_dev_table[devid].data[1] = upper_32_bits(pte_root); | 1490 | flags |= DTE_FLAG_IOTLB; |
1423 | amd_iommu_dev_table[devid].data[0] = lower_32_bits(pte_root); | 1491 | |
1492 | amd_iommu_dev_table[devid].data[3] |= flags; | ||
1493 | amd_iommu_dev_table[devid].data[2] = domain->id; | ||
1494 | amd_iommu_dev_table[devid].data[1] = upper_32_bits(pte_root); | ||
1495 | amd_iommu_dev_table[devid].data[0] = lower_32_bits(pte_root); | ||
1424 | } | 1496 | } |
1425 | 1497 | ||
1426 | static void clear_dte_entry(u16 devid) | 1498 | static void clear_dte_entry(u16 devid) |
@@ -1437,34 +1509,42 @@ static void do_attach(struct device *dev, struct protection_domain *domain) | |||
1437 | { | 1509 | { |
1438 | struct iommu_dev_data *dev_data; | 1510 | struct iommu_dev_data *dev_data; |
1439 | struct amd_iommu *iommu; | 1511 | struct amd_iommu *iommu; |
1512 | struct pci_dev *pdev; | ||
1513 | bool ats = false; | ||
1440 | u16 devid; | 1514 | u16 devid; |
1441 | 1515 | ||
1442 | devid = get_device_id(dev); | 1516 | devid = get_device_id(dev); |
1443 | iommu = amd_iommu_rlookup_table[devid]; | 1517 | iommu = amd_iommu_rlookup_table[devid]; |
1444 | dev_data = get_dev_data(dev); | 1518 | dev_data = get_dev_data(dev); |
1519 | pdev = to_pci_dev(dev); | ||
1520 | |||
1521 | if (amd_iommu_iotlb_sup) | ||
1522 | ats = pci_ats_enabled(pdev); | ||
1445 | 1523 | ||
1446 | /* Update data structures */ | 1524 | /* Update data structures */ |
1447 | dev_data->domain = domain; | 1525 | dev_data->domain = domain; |
1448 | list_add(&dev_data->list, &domain->dev_list); | 1526 | list_add(&dev_data->list, &domain->dev_list); |
1449 | set_dte_entry(devid, domain); | 1527 | set_dte_entry(devid, domain, ats); |
1450 | 1528 | ||
1451 | /* Do reference counting */ | 1529 | /* Do reference counting */ |
1452 | domain->dev_iommu[iommu->index] += 1; | 1530 | domain->dev_iommu[iommu->index] += 1; |
1453 | domain->dev_cnt += 1; | 1531 | domain->dev_cnt += 1; |
1454 | 1532 | ||
1455 | /* Flush the DTE entry */ | 1533 | /* Flush the DTE entry */ |
1456 | iommu_flush_device(dev); | 1534 | device_flush_dte(dev); |
1457 | } | 1535 | } |
1458 | 1536 | ||
1459 | static void do_detach(struct device *dev) | 1537 | static void do_detach(struct device *dev) |
1460 | { | 1538 | { |
1461 | struct iommu_dev_data *dev_data; | 1539 | struct iommu_dev_data *dev_data; |
1462 | struct amd_iommu *iommu; | 1540 | struct amd_iommu *iommu; |
1541 | struct pci_dev *pdev; | ||
1463 | u16 devid; | 1542 | u16 devid; |
1464 | 1543 | ||
1465 | devid = get_device_id(dev); | 1544 | devid = get_device_id(dev); |
1466 | iommu = amd_iommu_rlookup_table[devid]; | 1545 | iommu = amd_iommu_rlookup_table[devid]; |
1467 | dev_data = get_dev_data(dev); | 1546 | dev_data = get_dev_data(dev); |
1547 | pdev = to_pci_dev(dev); | ||
1468 | 1548 | ||
1469 | /* decrease reference counters */ | 1549 | /* decrease reference counters */ |
1470 | dev_data->domain->dev_iommu[iommu->index] -= 1; | 1550 | dev_data->domain->dev_iommu[iommu->index] -= 1; |
@@ -1476,7 +1556,7 @@ static void do_detach(struct device *dev) | |||
1476 | clear_dte_entry(devid); | 1556 | clear_dte_entry(devid); |
1477 | 1557 | ||
1478 | /* Flush the DTE entry */ | 1558 | /* Flush the DTE entry */ |
1479 | iommu_flush_device(dev); | 1559 | device_flush_dte(dev); |
1480 | } | 1560 | } |
1481 | 1561 | ||
1482 | /* | 1562 | /* |
@@ -1539,9 +1619,13 @@ out_unlock: | |||
1539 | static int attach_device(struct device *dev, | 1619 | static int attach_device(struct device *dev, |
1540 | struct protection_domain *domain) | 1620 | struct protection_domain *domain) |
1541 | { | 1621 | { |
1622 | struct pci_dev *pdev = to_pci_dev(dev); | ||
1542 | unsigned long flags; | 1623 | unsigned long flags; |
1543 | int ret; | 1624 | int ret; |
1544 | 1625 | ||
1626 | if (amd_iommu_iotlb_sup) | ||
1627 | pci_enable_ats(pdev, PAGE_SHIFT); | ||
1628 | |||
1545 | write_lock_irqsave(&amd_iommu_devtable_lock, flags); | 1629 | write_lock_irqsave(&amd_iommu_devtable_lock, flags); |
1546 | ret = __attach_device(dev, domain); | 1630 | ret = __attach_device(dev, domain); |
1547 | write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); | 1631 | write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); |
@@ -1551,7 +1635,7 @@ static int attach_device(struct device *dev, | |||
1551 | * left the caches in the IOMMU dirty. So we have to flush | 1635 | * left the caches in the IOMMU dirty. So we have to flush |
1552 | * here to evict all dirty stuff. | 1636 | * here to evict all dirty stuff. |
1553 | */ | 1637 | */ |
1554 | iommu_flush_tlb_pde(domain); | 1638 | domain_flush_tlb_pde(domain); |
1555 | 1639 | ||
1556 | return ret; | 1640 | return ret; |
1557 | } | 1641 | } |
@@ -1598,12 +1682,16 @@ static void __detach_device(struct device *dev) | |||
1598 | */ | 1682 | */ |
1599 | static void detach_device(struct device *dev) | 1683 | static void detach_device(struct device *dev) |
1600 | { | 1684 | { |
1685 | struct pci_dev *pdev = to_pci_dev(dev); | ||
1601 | unsigned long flags; | 1686 | unsigned long flags; |
1602 | 1687 | ||
1603 | /* lock device table */ | 1688 | /* lock device table */ |
1604 | write_lock_irqsave(&amd_iommu_devtable_lock, flags); | 1689 | write_lock_irqsave(&amd_iommu_devtable_lock, flags); |
1605 | __detach_device(dev); | 1690 | __detach_device(dev); |
1606 | write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); | 1691 | write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); |
1692 | |||
1693 | if (amd_iommu_iotlb_sup && pci_ats_enabled(pdev)) | ||
1694 | pci_disable_ats(pdev); | ||
1607 | } | 1695 | } |
1608 | 1696 | ||
1609 | /* | 1697 | /* |
@@ -1692,7 +1780,7 @@ static int device_change_notifier(struct notifier_block *nb, | |||
1692 | goto out; | 1780 | goto out; |
1693 | } | 1781 | } |
1694 | 1782 | ||
1695 | iommu_flush_device(dev); | 1783 | device_flush_dte(dev); |
1696 | iommu_completion_wait(iommu); | 1784 | iommu_completion_wait(iommu); |
1697 | 1785 | ||
1698 | out: | 1786 | out: |
@@ -1753,8 +1841,9 @@ static void update_device_table(struct protection_domain *domain) | |||
1753 | struct iommu_dev_data *dev_data; | 1841 | struct iommu_dev_data *dev_data; |
1754 | 1842 | ||
1755 | list_for_each_entry(dev_data, &domain->dev_list, list) { | 1843 | list_for_each_entry(dev_data, &domain->dev_list, list) { |
1844 | struct pci_dev *pdev = to_pci_dev(dev_data->dev); | ||
1756 | u16 devid = get_device_id(dev_data->dev); | 1845 | u16 devid = get_device_id(dev_data->dev); |
1757 | set_dte_entry(devid, domain); | 1846 | set_dte_entry(devid, domain, pci_ats_enabled(pdev)); |
1758 | } | 1847 | } |
1759 | } | 1848 | } |
1760 | 1849 | ||
@@ -1764,8 +1853,9 @@ static void update_domain(struct protection_domain *domain) | |||
1764 | return; | 1853 | return; |
1765 | 1854 | ||
1766 | update_device_table(domain); | 1855 | update_device_table(domain); |
1767 | iommu_flush_domain_devices(domain); | 1856 | |
1768 | iommu_flush_tlb_pde(domain); | 1857 | domain_flush_devices(domain); |
1858 | domain_flush_tlb_pde(domain); | ||
1769 | 1859 | ||
1770 | domain->updated = false; | 1860 | domain->updated = false; |
1771 | } | 1861 | } |
@@ -1924,10 +2014,10 @@ retry: | |||
1924 | ADD_STATS_COUNTER(alloced_io_mem, size); | 2014 | ADD_STATS_COUNTER(alloced_io_mem, size); |
1925 | 2015 | ||
1926 | if (unlikely(dma_dom->need_flush && !amd_iommu_unmap_flush)) { | 2016 | if (unlikely(dma_dom->need_flush && !amd_iommu_unmap_flush)) { |
1927 | iommu_flush_tlb(&dma_dom->domain); | 2017 | domain_flush_tlb(&dma_dom->domain); |
1928 | dma_dom->need_flush = false; | 2018 | dma_dom->need_flush = false; |
1929 | } else if (unlikely(amd_iommu_np_cache)) | 2019 | } else if (unlikely(amd_iommu_np_cache)) |
1930 | iommu_flush_pages(&dma_dom->domain, address, size); | 2020 | domain_flush_pages(&dma_dom->domain, address, size); |
1931 | 2021 | ||
1932 | out: | 2022 | out: |
1933 | return address; | 2023 | return address; |
@@ -1976,7 +2066,7 @@ static void __unmap_single(struct dma_ops_domain *dma_dom, | |||
1976 | dma_ops_free_addresses(dma_dom, dma_addr, pages); | 2066 | dma_ops_free_addresses(dma_dom, dma_addr, pages); |
1977 | 2067 | ||
1978 | if (amd_iommu_unmap_flush || dma_dom->need_flush) { | 2068 | if (amd_iommu_unmap_flush || dma_dom->need_flush) { |
1979 | iommu_flush_pages(&dma_dom->domain, flush_addr, size); | 2069 | domain_flush_pages(&dma_dom->domain, flush_addr, size); |
1980 | dma_dom->need_flush = false; | 2070 | dma_dom->need_flush = false; |
1981 | } | 2071 | } |
1982 | } | 2072 | } |
@@ -2012,7 +2102,7 @@ static dma_addr_t map_page(struct device *dev, struct page *page, | |||
2012 | if (addr == DMA_ERROR_CODE) | 2102 | if (addr == DMA_ERROR_CODE) |
2013 | goto out; | 2103 | goto out; |
2014 | 2104 | ||
2015 | iommu_flush_complete(domain); | 2105 | domain_flush_complete(domain); |
2016 | 2106 | ||
2017 | out: | 2107 | out: |
2018 | spin_unlock_irqrestore(&domain->lock, flags); | 2108 | spin_unlock_irqrestore(&domain->lock, flags); |
@@ -2039,7 +2129,7 @@ static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, | |||
2039 | 2129 | ||
2040 | __unmap_single(domain->priv, dma_addr, size, dir); | 2130 | __unmap_single(domain->priv, dma_addr, size, dir); |
2041 | 2131 | ||
2042 | iommu_flush_complete(domain); | 2132 | domain_flush_complete(domain); |
2043 | 2133 | ||
2044 | spin_unlock_irqrestore(&domain->lock, flags); | 2134 | spin_unlock_irqrestore(&domain->lock, flags); |
2045 | } | 2135 | } |
@@ -2104,7 +2194,7 @@ static int map_sg(struct device *dev, struct scatterlist *sglist, | |||
2104 | goto unmap; | 2194 | goto unmap; |
2105 | } | 2195 | } |
2106 | 2196 | ||
2107 | iommu_flush_complete(domain); | 2197 | domain_flush_complete(domain); |
2108 | 2198 | ||
2109 | out: | 2199 | out: |
2110 | spin_unlock_irqrestore(&domain->lock, flags); | 2200 | spin_unlock_irqrestore(&domain->lock, flags); |
@@ -2150,7 +2240,7 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist, | |||
2150 | s->dma_address = s->dma_length = 0; | 2240 | s->dma_address = s->dma_length = 0; |
2151 | } | 2241 | } |
2152 | 2242 | ||
2153 | iommu_flush_complete(domain); | 2243 | domain_flush_complete(domain); |
2154 | 2244 | ||
2155 | spin_unlock_irqrestore(&domain->lock, flags); | 2245 | spin_unlock_irqrestore(&domain->lock, flags); |
2156 | } | 2246 | } |
@@ -2200,7 +2290,7 @@ static void *alloc_coherent(struct device *dev, size_t size, | |||
2200 | goto out_free; | 2290 | goto out_free; |
2201 | } | 2291 | } |
2202 | 2292 | ||
2203 | iommu_flush_complete(domain); | 2293 | domain_flush_complete(domain); |
2204 | 2294 | ||
2205 | spin_unlock_irqrestore(&domain->lock, flags); | 2295 | spin_unlock_irqrestore(&domain->lock, flags); |
2206 | 2296 | ||
@@ -2232,7 +2322,7 @@ static void free_coherent(struct device *dev, size_t size, | |||
2232 | 2322 | ||
2233 | __unmap_single(domain->priv, dma_addr, size, DMA_BIDIRECTIONAL); | 2323 | __unmap_single(domain->priv, dma_addr, size, DMA_BIDIRECTIONAL); |
2234 | 2324 | ||
2235 | iommu_flush_complete(domain); | 2325 | domain_flush_complete(domain); |
2236 | 2326 | ||
2237 | spin_unlock_irqrestore(&domain->lock, flags); | 2327 | spin_unlock_irqrestore(&domain->lock, flags); |
2238 | 2328 | ||
@@ -2476,7 +2566,7 @@ static void amd_iommu_detach_device(struct iommu_domain *dom, | |||
2476 | if (!iommu) | 2566 | if (!iommu) |
2477 | return; | 2567 | return; |
2478 | 2568 | ||
2479 | iommu_flush_device(dev); | 2569 | device_flush_dte(dev); |
2480 | iommu_completion_wait(iommu); | 2570 | iommu_completion_wait(iommu); |
2481 | } | 2571 | } |
2482 | 2572 | ||
@@ -2542,7 +2632,7 @@ static int amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova, | |||
2542 | unmap_size = iommu_unmap_page(domain, iova, page_size); | 2632 | unmap_size = iommu_unmap_page(domain, iova, page_size); |
2543 | mutex_unlock(&domain->api_lock); | 2633 | mutex_unlock(&domain->api_lock); |
2544 | 2634 | ||
2545 | iommu_flush_tlb_pde(domain); | 2635 | domain_flush_tlb_pde(domain); |
2546 | 2636 | ||
2547 | return get_order(unmap_size); | 2637 | return get_order(unmap_size); |
2548 | } | 2638 | } |
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c index 246d727b65b7..9179c21120a8 100644 --- a/arch/x86/kernel/amd_iommu_init.c +++ b/arch/x86/kernel/amd_iommu_init.c | |||
@@ -137,6 +137,7 @@ int amd_iommus_present; | |||
137 | 137 | ||
138 | /* IOMMUs have a non-present cache? */ | 138 | /* IOMMUs have a non-present cache? */ |
139 | bool amd_iommu_np_cache __read_mostly; | 139 | bool amd_iommu_np_cache __read_mostly; |
140 | bool amd_iommu_iotlb_sup __read_mostly = true; | ||
140 | 141 | ||
141 | /* | 142 | /* |
142 | * The ACPI table parsing functions set this variable on an error | 143 | * The ACPI table parsing functions set this variable on an error |
@@ -180,6 +181,12 @@ static u32 dev_table_size; /* size of the device table */ | |||
180 | static u32 alias_table_size; /* size of the alias table */ | 181 | static u32 alias_table_size; /* size of the alias table */ |
181 | static u32 rlookup_table_size; /* size if the rlookup table */ | 182 | static u32 rlookup_table_size; /* size if the rlookup table */ |
182 | 183 | ||
184 | /* | ||
185 | * This function flushes all internal caches of | ||
186 | * the IOMMU used by this driver. | ||
187 | */ | ||
188 | extern void iommu_flush_all_caches(struct amd_iommu *iommu); | ||
189 | |||
183 | static inline void update_last_devid(u16 devid) | 190 | static inline void update_last_devid(u16 devid) |
184 | { | 191 | { |
185 | if (devid > amd_iommu_last_bdf) | 192 | if (devid > amd_iommu_last_bdf) |
@@ -293,9 +300,23 @@ static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit) | |||
293 | /* Function to enable the hardware */ | 300 | /* Function to enable the hardware */ |
294 | static void iommu_enable(struct amd_iommu *iommu) | 301 | static void iommu_enable(struct amd_iommu *iommu) |
295 | { | 302 | { |
296 | printk(KERN_INFO "AMD-Vi: Enabling IOMMU at %s cap 0x%hx\n", | 303 | static const char * const feat_str[] = { |
304 | "PreF", "PPR", "X2APIC", "NX", "GT", "[5]", | ||
305 | "IA", "GA", "HE", "PC", NULL | ||
306 | }; | ||
307 | int i; | ||
308 | |||
309 | printk(KERN_INFO "AMD-Vi: Enabling IOMMU at %s cap 0x%hx", | ||
297 | dev_name(&iommu->dev->dev), iommu->cap_ptr); | 310 | dev_name(&iommu->dev->dev), iommu->cap_ptr); |
298 | 311 | ||
312 | if (iommu->cap & (1 << IOMMU_CAP_EFR)) { | ||
313 | printk(KERN_CONT " extended features: "); | ||
314 | for (i = 0; feat_str[i]; ++i) | ||
315 | if (iommu_feature(iommu, (1ULL << i))) | ||
316 | printk(KERN_CONT " %s", feat_str[i]); | ||
317 | } | ||
318 | printk(KERN_CONT "\n"); | ||
319 | |||
299 | iommu_feature_enable(iommu, CONTROL_IOMMU_EN); | 320 | iommu_feature_enable(iommu, CONTROL_IOMMU_EN); |
300 | } | 321 | } |
301 | 322 | ||
@@ -651,7 +672,7 @@ static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m) | |||
651 | static void __init init_iommu_from_pci(struct amd_iommu *iommu) | 672 | static void __init init_iommu_from_pci(struct amd_iommu *iommu) |
652 | { | 673 | { |
653 | int cap_ptr = iommu->cap_ptr; | 674 | int cap_ptr = iommu->cap_ptr; |
654 | u32 range, misc; | 675 | u32 range, misc, low, high; |
655 | int i, j; | 676 | int i, j; |
656 | 677 | ||
657 | pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET, | 678 | pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET, |
@@ -667,6 +688,15 @@ static void __init init_iommu_from_pci(struct amd_iommu *iommu) | |||
667 | MMIO_GET_LD(range)); | 688 | MMIO_GET_LD(range)); |
668 | iommu->evt_msi_num = MMIO_MSI_NUM(misc); | 689 | iommu->evt_msi_num = MMIO_MSI_NUM(misc); |
669 | 690 | ||
691 | if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB))) | ||
692 | amd_iommu_iotlb_sup = false; | ||
693 | |||
694 | /* read extended feature bits */ | ||
695 | low = readl(iommu->mmio_base + MMIO_EXT_FEATURES); | ||
696 | high = readl(iommu->mmio_base + MMIO_EXT_FEATURES + 4); | ||
697 | |||
698 | iommu->features = ((u64)high << 32) | low; | ||
699 | |||
670 | if (!is_rd890_iommu(iommu->dev)) | 700 | if (!is_rd890_iommu(iommu->dev)) |
671 | return; | 701 | return; |
672 | 702 | ||
@@ -1004,10 +1034,11 @@ static int iommu_setup_msi(struct amd_iommu *iommu) | |||
1004 | if (pci_enable_msi(iommu->dev)) | 1034 | if (pci_enable_msi(iommu->dev)) |
1005 | return 1; | 1035 | return 1; |
1006 | 1036 | ||
1007 | r = request_irq(iommu->dev->irq, amd_iommu_int_handler, | 1037 | r = request_threaded_irq(iommu->dev->irq, |
1008 | IRQF_SAMPLE_RANDOM, | 1038 | amd_iommu_int_handler, |
1009 | "AMD-Vi", | 1039 | amd_iommu_int_thread, |
1010 | NULL); | 1040 | 0, "AMD-Vi", |
1041 | iommu->dev); | ||
1011 | 1042 | ||
1012 | if (r) { | 1043 | if (r) { |
1013 | pci_disable_msi(iommu->dev); | 1044 | pci_disable_msi(iommu->dev); |
@@ -1244,6 +1275,7 @@ static void enable_iommus(void) | |||
1244 | iommu_set_exclusion_range(iommu); | 1275 | iommu_set_exclusion_range(iommu); |
1245 | iommu_init_msi(iommu); | 1276 | iommu_init_msi(iommu); |
1246 | iommu_enable(iommu); | 1277 | iommu_enable(iommu); |
1278 | iommu_flush_all_caches(iommu); | ||
1247 | } | 1279 | } |
1248 | } | 1280 | } |
1249 | 1281 | ||
@@ -1274,8 +1306,8 @@ static void amd_iommu_resume(void) | |||
1274 | * we have to flush after the IOMMUs are enabled because a | 1306 | * we have to flush after the IOMMUs are enabled because a |
1275 | * disabled IOMMU will never execute the commands we send | 1307 | * disabled IOMMU will never execute the commands we send |
1276 | */ | 1308 | */ |
1277 | amd_iommu_flush_all_devices(); | 1309 | for_each_iommu(iommu) |
1278 | amd_iommu_flush_all_domains(); | 1310 | iommu_flush_all_caches(iommu); |
1279 | } | 1311 | } |
1280 | 1312 | ||
1281 | static int amd_iommu_suspend(void) | 1313 | static int amd_iommu_suspend(void) |
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index 33b10a0fc095..7acd2d2ac965 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c | |||
@@ -37,6 +37,13 @@ | |||
37 | #include <asm/smp.h> | 37 | #include <asm/smp.h> |
38 | #include <asm/x86_init.h> | 38 | #include <asm/x86_init.h> |
39 | #include <asm/emergency-restart.h> | 39 | #include <asm/emergency-restart.h> |
40 | #include <asm/nmi.h> | ||
41 | |||
42 | /* BMC sets a bit this MMR non-zero before sending an NMI */ | ||
43 | #define UVH_NMI_MMR UVH_SCRATCH5 | ||
44 | #define UVH_NMI_MMR_CLEAR (UVH_NMI_MMR + 8) | ||
45 | #define UV_NMI_PENDING_MASK (1UL << 63) | ||
46 | DEFINE_PER_CPU(unsigned long, cpu_last_nmi_count); | ||
40 | 47 | ||
41 | DEFINE_PER_CPU(int, x2apic_extra_bits); | 48 | DEFINE_PER_CPU(int, x2apic_extra_bits); |
42 | 49 | ||
@@ -642,18 +649,46 @@ void __cpuinit uv_cpu_init(void) | |||
642 | */ | 649 | */ |
643 | int uv_handle_nmi(struct notifier_block *self, unsigned long reason, void *data) | 650 | int uv_handle_nmi(struct notifier_block *self, unsigned long reason, void *data) |
644 | { | 651 | { |
652 | unsigned long real_uv_nmi; | ||
653 | int bid; | ||
654 | |||
645 | if (reason != DIE_NMIUNKNOWN) | 655 | if (reason != DIE_NMIUNKNOWN) |
646 | return NOTIFY_OK; | 656 | return NOTIFY_OK; |
647 | 657 | ||
648 | if (in_crash_kexec) | 658 | if (in_crash_kexec) |
649 | /* do nothing if entering the crash kernel */ | 659 | /* do nothing if entering the crash kernel */ |
650 | return NOTIFY_OK; | 660 | return NOTIFY_OK; |
661 | |||
651 | /* | 662 | /* |
652 | * Use a lock so only one cpu prints at a time | 663 | * Each blade has an MMR that indicates when an NMI has been sent |
653 | * to prevent intermixed output. | 664 | * to cpus on the blade. If an NMI is detected, atomically |
665 | * clear the MMR and update a per-blade NMI count used to | ||
666 | * cause each cpu on the blade to notice a new NMI. | ||
667 | */ | ||
668 | bid = uv_numa_blade_id(); | ||
669 | real_uv_nmi = (uv_read_local_mmr(UVH_NMI_MMR) & UV_NMI_PENDING_MASK); | ||
670 | |||
671 | if (unlikely(real_uv_nmi)) { | ||
672 | spin_lock(&uv_blade_info[bid].nmi_lock); | ||
673 | real_uv_nmi = (uv_read_local_mmr(UVH_NMI_MMR) & UV_NMI_PENDING_MASK); | ||
674 | if (real_uv_nmi) { | ||
675 | uv_blade_info[bid].nmi_count++; | ||
676 | uv_write_local_mmr(UVH_NMI_MMR_CLEAR, UV_NMI_PENDING_MASK); | ||
677 | } | ||
678 | spin_unlock(&uv_blade_info[bid].nmi_lock); | ||
679 | } | ||
680 | |||
681 | if (likely(__get_cpu_var(cpu_last_nmi_count) == uv_blade_info[bid].nmi_count)) | ||
682 | return NOTIFY_DONE; | ||
683 | |||
684 | __get_cpu_var(cpu_last_nmi_count) = uv_blade_info[bid].nmi_count; | ||
685 | |||
686 | /* | ||
687 | * Use a lock so only one cpu prints at a time. | ||
688 | * This prevents intermixed output. | ||
654 | */ | 689 | */ |
655 | spin_lock(&uv_nmi_lock); | 690 | spin_lock(&uv_nmi_lock); |
656 | pr_info("NMI stack dump cpu %u:\n", smp_processor_id()); | 691 | pr_info("UV NMI stack dump cpu %u:\n", smp_processor_id()); |
657 | dump_stack(); | 692 | dump_stack(); |
658 | spin_unlock(&uv_nmi_lock); | 693 | spin_unlock(&uv_nmi_lock); |
659 | 694 | ||
@@ -661,7 +696,8 @@ int uv_handle_nmi(struct notifier_block *self, unsigned long reason, void *data) | |||
661 | } | 696 | } |
662 | 697 | ||
663 | static struct notifier_block uv_dump_stack_nmi_nb = { | 698 | static struct notifier_block uv_dump_stack_nmi_nb = { |
664 | .notifier_call = uv_handle_nmi | 699 | .notifier_call = uv_handle_nmi, |
700 | .priority = NMI_LOCAL_LOW_PRIOR - 1, | ||
665 | }; | 701 | }; |
666 | 702 | ||
667 | void uv_register_nmi_notifier(void) | 703 | void uv_register_nmi_notifier(void) |
@@ -720,8 +756,9 @@ void __init uv_system_init(void) | |||
720 | printk(KERN_DEBUG "UV: Found %d blades\n", uv_num_possible_blades()); | 756 | printk(KERN_DEBUG "UV: Found %d blades\n", uv_num_possible_blades()); |
721 | 757 | ||
722 | bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades(); | 758 | bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades(); |
723 | uv_blade_info = kmalloc(bytes, GFP_KERNEL); | 759 | uv_blade_info = kzalloc(bytes, GFP_KERNEL); |
724 | BUG_ON(!uv_blade_info); | 760 | BUG_ON(!uv_blade_info); |
761 | |||
725 | for (blade = 0; blade < uv_num_possible_blades(); blade++) | 762 | for (blade = 0; blade < uv_num_possible_blades(); blade++) |
726 | uv_blade_info[blade].memory_nid = -1; | 763 | uv_blade_info[blade].memory_nid = -1; |
727 | 764 | ||
@@ -747,6 +784,7 @@ void __init uv_system_init(void) | |||
747 | uv_blade_info[blade].pnode = pnode; | 784 | uv_blade_info[blade].pnode = pnode; |
748 | uv_blade_info[blade].nr_possible_cpus = 0; | 785 | uv_blade_info[blade].nr_possible_cpus = 0; |
749 | uv_blade_info[blade].nr_online_cpus = 0; | 786 | uv_blade_info[blade].nr_online_cpus = 0; |
787 | spin_lock_init(&uv_blade_info[blade].nmi_lock); | ||
750 | max_pnode = max(pnode, max_pnode); | 788 | max_pnode = max(pnode, max_pnode); |
751 | blade++; | 789 | blade++; |
752 | } | 790 | } |
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c index adee12e0da1f..3bfa02235965 100644 --- a/arch/x86/kernel/apm_32.c +++ b/arch/x86/kernel/apm_32.c | |||
@@ -1238,7 +1238,6 @@ static int suspend(int vetoable) | |||
1238 | dpm_suspend_noirq(PMSG_SUSPEND); | 1238 | dpm_suspend_noirq(PMSG_SUSPEND); |
1239 | 1239 | ||
1240 | local_irq_disable(); | 1240 | local_irq_disable(); |
1241 | sysdev_suspend(PMSG_SUSPEND); | ||
1242 | syscore_suspend(); | 1241 | syscore_suspend(); |
1243 | 1242 | ||
1244 | local_irq_enable(); | 1243 | local_irq_enable(); |
@@ -1258,7 +1257,6 @@ static int suspend(int vetoable) | |||
1258 | err = (err == APM_SUCCESS) ? 0 : -EIO; | 1257 | err = (err == APM_SUCCESS) ? 0 : -EIO; |
1259 | 1258 | ||
1260 | syscore_resume(); | 1259 | syscore_resume(); |
1261 | sysdev_resume(); | ||
1262 | local_irq_enable(); | 1260 | local_irq_enable(); |
1263 | 1261 | ||
1264 | dpm_resume_noirq(PMSG_RESUME); | 1262 | dpm_resume_noirq(PMSG_RESUME); |
@@ -1282,7 +1280,6 @@ static void standby(void) | |||
1282 | dpm_suspend_noirq(PMSG_SUSPEND); | 1280 | dpm_suspend_noirq(PMSG_SUSPEND); |
1283 | 1281 | ||
1284 | local_irq_disable(); | 1282 | local_irq_disable(); |
1285 | sysdev_suspend(PMSG_SUSPEND); | ||
1286 | syscore_suspend(); | 1283 | syscore_suspend(); |
1287 | local_irq_enable(); | 1284 | local_irq_enable(); |
1288 | 1285 | ||
@@ -1292,7 +1289,6 @@ static void standby(void) | |||
1292 | 1289 | ||
1293 | local_irq_disable(); | 1290 | local_irq_disable(); |
1294 | syscore_resume(); | 1291 | syscore_resume(); |
1295 | sysdev_resume(); | ||
1296 | local_irq_enable(); | 1292 | local_irq_enable(); |
1297 | 1293 | ||
1298 | dpm_resume_noirq(PMSG_RESUME); | 1294 | dpm_resume_noirq(PMSG_RESUME); |
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index 3f0ebe429a01..6042981d0309 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile | |||
@@ -30,7 +30,6 @@ obj-$(CONFIG_PERF_EVENTS) += perf_event.o | |||
30 | 30 | ||
31 | obj-$(CONFIG_X86_MCE) += mcheck/ | 31 | obj-$(CONFIG_X86_MCE) += mcheck/ |
32 | obj-$(CONFIG_MTRR) += mtrr/ | 32 | obj-$(CONFIG_MTRR) += mtrr/ |
33 | obj-$(CONFIG_CPU_FREQ) += cpufreq/ | ||
34 | 33 | ||
35 | obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o | 34 | obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o |
36 | 35 | ||
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index bb9eb29a52dd..6f9d1f6063e9 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
@@ -613,7 +613,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) | |||
613 | #endif | 613 | #endif |
614 | 614 | ||
615 | /* As a rule processors have APIC timer running in deep C states */ | 615 | /* As a rule processors have APIC timer running in deep C states */ |
616 | if (c->x86 >= 0xf && !cpu_has_amd_erratum(amd_erratum_400)) | 616 | if (c->x86 > 0xf && !cpu_has_amd_erratum(amd_erratum_400)) |
617 | set_cpu_cap(c, X86_FEATURE_ARAT); | 617 | set_cpu_cap(c, X86_FEATURE_ARAT); |
618 | 618 | ||
619 | /* | 619 | /* |
@@ -698,7 +698,7 @@ cpu_dev_register(amd_cpu_dev); | |||
698 | */ | 698 | */ |
699 | 699 | ||
700 | const int amd_erratum_400[] = | 700 | const int amd_erratum_400[] = |
701 | AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0x0f, 0x4, 0x2, 0xff, 0xf), | 701 | AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf), |
702 | AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf)); | 702 | AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf)); |
703 | EXPORT_SYMBOL_GPL(amd_erratum_400); | 703 | EXPORT_SYMBOL_GPL(amd_erratum_400); |
704 | 704 | ||
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index e2ced0074a45..173f3a3fa1a6 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -565,8 +565,7 @@ void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c) | |||
565 | 565 | ||
566 | cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx); | 566 | cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx); |
567 | 567 | ||
568 | if (eax > 0) | 568 | c->x86_capability[9] = ebx; |
569 | c->x86_capability[9] = ebx; | ||
570 | } | 569 | } |
571 | 570 | ||
572 | /* AMD-defined flags: level 0x80000001 */ | 571 | /* AMD-defined flags: level 0x80000001 */ |
diff --git a/arch/x86/kernel/cpu/cpufreq/Makefile b/arch/x86/kernel/cpu/cpufreq/Makefile deleted file mode 100644 index bd54bf67e6fb..000000000000 --- a/arch/x86/kernel/cpu/cpufreq/Makefile +++ /dev/null | |||
@@ -1,21 +0,0 @@ | |||
1 | # Link order matters. K8 is preferred to ACPI because of firmware bugs in early | ||
2 | # K8 systems. ACPI is preferred to all other hardware-specific drivers. | ||
3 | # speedstep-* is preferred over p4-clockmod. | ||
4 | |||
5 | obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o mperf.o | ||
6 | obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o mperf.o | ||
7 | obj-$(CONFIG_X86_PCC_CPUFREQ) += pcc-cpufreq.o | ||
8 | obj-$(CONFIG_X86_POWERNOW_K6) += powernow-k6.o | ||
9 | obj-$(CONFIG_X86_POWERNOW_K7) += powernow-k7.o | ||
10 | obj-$(CONFIG_X86_LONGHAUL) += longhaul.o | ||
11 | obj-$(CONFIG_X86_E_POWERSAVER) += e_powersaver.o | ||
12 | obj-$(CONFIG_ELAN_CPUFREQ) += elanfreq.o | ||
13 | obj-$(CONFIG_SC520_CPUFREQ) += sc520_freq.o | ||
14 | obj-$(CONFIG_X86_LONGRUN) += longrun.o | ||
15 | obj-$(CONFIG_X86_GX_SUSPMOD) += gx-suspmod.o | ||
16 | obj-$(CONFIG_X86_SPEEDSTEP_ICH) += speedstep-ich.o | ||
17 | obj-$(CONFIG_X86_SPEEDSTEP_LIB) += speedstep-lib.o | ||
18 | obj-$(CONFIG_X86_SPEEDSTEP_SMI) += speedstep-smi.o | ||
19 | obj-$(CONFIG_X86_SPEEDSTEP_CENTRINO) += speedstep-centrino.o | ||
20 | obj-$(CONFIG_X86_P4_CLOCKMOD) += p4-clockmod.o | ||
21 | obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o | ||
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index df86bc8c859d..fc73a34ba8c9 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c | |||
@@ -29,10 +29,10 @@ | |||
29 | 29 | ||
30 | static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) | 30 | static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) |
31 | { | 31 | { |
32 | u64 misc_enable; | ||
33 | |||
32 | /* Unmask CPUID levels if masked: */ | 34 | /* Unmask CPUID levels if masked: */ |
33 | if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) { | 35 | if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) { |
34 | u64 misc_enable; | ||
35 | |||
36 | rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); | 36 | rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); |
37 | 37 | ||
38 | if (misc_enable & MSR_IA32_MISC_ENABLE_LIMIT_CPUID) { | 38 | if (misc_enable & MSR_IA32_MISC_ENABLE_LIMIT_CPUID) { |
@@ -118,8 +118,6 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) | |||
118 | * (model 2) with the same problem. | 118 | * (model 2) with the same problem. |
119 | */ | 119 | */ |
120 | if (c->x86 == 15) { | 120 | if (c->x86 == 15) { |
121 | u64 misc_enable; | ||
122 | |||
123 | rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); | 121 | rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); |
124 | 122 | ||
125 | if (misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING) { | 123 | if (misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING) { |
@@ -130,6 +128,19 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) | |||
130 | } | 128 | } |
131 | } | 129 | } |
132 | #endif | 130 | #endif |
131 | |||
132 | /* | ||
133 | * If fast string is not enabled in IA32_MISC_ENABLE for any reason, | ||
134 | * clear the fast string and enhanced fast string CPU capabilities. | ||
135 | */ | ||
136 | if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) { | ||
137 | rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); | ||
138 | if (!(misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING)) { | ||
139 | printk(KERN_INFO "Disabled fast string operations\n"); | ||
140 | setup_clear_cpu_cap(X86_FEATURE_REP_GOOD); | ||
141 | setup_clear_cpu_cap(X86_FEATURE_ERMS); | ||
142 | } | ||
143 | } | ||
133 | } | 144 | } |
134 | 145 | ||
135 | #ifdef CONFIG_X86_32 | 146 | #ifdef CONFIG_X86_32 |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c index 167f97b5596e..bb0adad35143 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c | |||
@@ -509,6 +509,7 @@ recurse: | |||
509 | out_free: | 509 | out_free: |
510 | if (b) { | 510 | if (b) { |
511 | kobject_put(&b->kobj); | 511 | kobject_put(&b->kobj); |
512 | list_del(&b->miscj); | ||
512 | kfree(b); | 513 | kfree(b); |
513 | } | 514 | } |
514 | return err; | 515 | return err; |
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c index 6f8c5e9da97f..0f034460260d 100644 --- a/arch/x86/kernel/cpu/mcheck/therm_throt.c +++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c | |||
@@ -446,18 +446,20 @@ void intel_init_thermal(struct cpuinfo_x86 *c) | |||
446 | */ | 446 | */ |
447 | rdmsr(MSR_IA32_MISC_ENABLE, l, h); | 447 | rdmsr(MSR_IA32_MISC_ENABLE, l, h); |
448 | 448 | ||
449 | h = lvtthmr_init; | ||
449 | /* | 450 | /* |
450 | * The initial value of thermal LVT entries on all APs always reads | 451 | * The initial value of thermal LVT entries on all APs always reads |
451 | * 0x10000 because APs are woken up by BSP issuing INIT-SIPI-SIPI | 452 | * 0x10000 because APs are woken up by BSP issuing INIT-SIPI-SIPI |
452 | * sequence to them and LVT registers are reset to 0s except for | 453 | * sequence to them and LVT registers are reset to 0s except for |
453 | * the mask bits which are set to 1s when APs receive INIT IPI. | 454 | * the mask bits which are set to 1s when APs receive INIT IPI. |
454 | * Always restore the value that BIOS has programmed on AP based on | 455 | * If BIOS takes over the thermal interrupt and sets its interrupt |
455 | * BSP's info we saved since BIOS is always setting the same value | 456 | * delivery mode to SMI (not fixed), it restores the value that the |
456 | * for all threads/cores | 457 | * BIOS has programmed on AP based on BSP's info we saved since BIOS |
458 | * is always setting the same value for all threads/cores. | ||
457 | */ | 459 | */ |
458 | apic_write(APIC_LVTTHMR, lvtthmr_init); | 460 | if ((h & APIC_DM_FIXED_MASK) != APIC_DM_FIXED) |
461 | apic_write(APIC_LVTTHMR, lvtthmr_init); | ||
459 | 462 | ||
460 | h = lvtthmr_init; | ||
461 | 463 | ||
462 | if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) { | 464 | if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) { |
463 | printk(KERN_DEBUG | 465 | printk(KERN_DEBUG |
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index e638689279d3..3a0338b4b179 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <asm/nmi.h> | 31 | #include <asm/nmi.h> |
32 | #include <asm/compat.h> | 32 | #include <asm/compat.h> |
33 | #include <asm/smp.h> | 33 | #include <asm/smp.h> |
34 | #include <asm/alternative.h> | ||
34 | 35 | ||
35 | #if 0 | 36 | #if 0 |
36 | #undef wrmsrl | 37 | #undef wrmsrl |
@@ -363,12 +364,18 @@ again: | |||
363 | return new_raw_count; | 364 | return new_raw_count; |
364 | } | 365 | } |
365 | 366 | ||
366 | /* using X86_FEATURE_PERFCTR_CORE to later implement ALTERNATIVE() here */ | ||
367 | static inline int x86_pmu_addr_offset(int index) | 367 | static inline int x86_pmu_addr_offset(int index) |
368 | { | 368 | { |
369 | if (boot_cpu_has(X86_FEATURE_PERFCTR_CORE)) | 369 | int offset; |
370 | return index << 1; | 370 | |
371 | return index; | 371 | /* offset = X86_FEATURE_PERFCTR_CORE ? index << 1 : index */ |
372 | alternative_io(ASM_NOP2, | ||
373 | "shll $1, %%eax", | ||
374 | X86_FEATURE_PERFCTR_CORE, | ||
375 | "=a" (offset), | ||
376 | "a" (index)); | ||
377 | |||
378 | return offset; | ||
372 | } | 379 | } |
373 | 380 | ||
374 | static inline unsigned int x86_pmu_config_addr(int index) | 381 | static inline unsigned int x86_pmu_config_addr(int index) |
@@ -1766,17 +1773,6 @@ static struct pmu pmu = { | |||
1766 | * callchain support | 1773 | * callchain support |
1767 | */ | 1774 | */ |
1768 | 1775 | ||
1769 | static void | ||
1770 | backtrace_warning_symbol(void *data, char *msg, unsigned long symbol) | ||
1771 | { | ||
1772 | /* Ignore warnings */ | ||
1773 | } | ||
1774 | |||
1775 | static void backtrace_warning(void *data, char *msg) | ||
1776 | { | ||
1777 | /* Ignore warnings */ | ||
1778 | } | ||
1779 | |||
1780 | static int backtrace_stack(void *data, char *name) | 1776 | static int backtrace_stack(void *data, char *name) |
1781 | { | 1777 | { |
1782 | return 0; | 1778 | return 0; |
@@ -1790,8 +1786,6 @@ static void backtrace_address(void *data, unsigned long addr, int reliable) | |||
1790 | } | 1786 | } |
1791 | 1787 | ||
1792 | static const struct stacktrace_ops backtrace_ops = { | 1788 | static const struct stacktrace_ops backtrace_ops = { |
1793 | .warning = backtrace_warning, | ||
1794 | .warning_symbol = backtrace_warning_symbol, | ||
1795 | .stack = backtrace_stack, | 1789 | .stack = backtrace_stack, |
1796 | .address = backtrace_address, | 1790 | .address = backtrace_address, |
1797 | .walk_stack = print_context_stack_bp, | 1791 | .walk_stack = print_context_stack_bp, |
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c index cf4e369cea67..fe29c1d2219e 100644 --- a/arch/x86/kernel/cpu/perf_event_amd.c +++ b/arch/x86/kernel/cpu/perf_event_amd.c | |||
@@ -96,12 +96,14 @@ static __initconst const u64 amd_hw_cache_event_ids | |||
96 | */ | 96 | */ |
97 | static const u64 amd_perfmon_event_map[] = | 97 | static const u64 amd_perfmon_event_map[] = |
98 | { | 98 | { |
99 | [PERF_COUNT_HW_CPU_CYCLES] = 0x0076, | 99 | [PERF_COUNT_HW_CPU_CYCLES] = 0x0076, |
100 | [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, | 100 | [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, |
101 | [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0080, | 101 | [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0080, |
102 | [PERF_COUNT_HW_CACHE_MISSES] = 0x0081, | 102 | [PERF_COUNT_HW_CACHE_MISSES] = 0x0081, |
103 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2, | 103 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2, |
104 | [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3, | 104 | [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3, |
105 | [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00d0, /* "Decoder empty" event */ | ||
106 | [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x00d1, /* "Dispatch stalls" event */ | ||
105 | }; | 107 | }; |
106 | 108 | ||
107 | static u64 amd_pmu_event_map(int hw_event) | 109 | static u64 amd_pmu_event_map(int hw_event) |
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 447a28de6f09..41178c826c48 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c | |||
@@ -36,7 +36,7 @@ static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly = | |||
36 | [PERF_COUNT_HW_BUS_CYCLES] = 0x013c, | 36 | [PERF_COUNT_HW_BUS_CYCLES] = 0x013c, |
37 | }; | 37 | }; |
38 | 38 | ||
39 | static struct event_constraint intel_core_event_constraints[] = | 39 | static struct event_constraint intel_core_event_constraints[] __read_mostly = |
40 | { | 40 | { |
41 | INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */ | 41 | INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */ |
42 | INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */ | 42 | INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */ |
@@ -47,7 +47,7 @@ static struct event_constraint intel_core_event_constraints[] = | |||
47 | EVENT_CONSTRAINT_END | 47 | EVENT_CONSTRAINT_END |
48 | }; | 48 | }; |
49 | 49 | ||
50 | static struct event_constraint intel_core2_event_constraints[] = | 50 | static struct event_constraint intel_core2_event_constraints[] __read_mostly = |
51 | { | 51 | { |
52 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ | 52 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ |
53 | FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ | 53 | FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ |
@@ -70,7 +70,7 @@ static struct event_constraint intel_core2_event_constraints[] = | |||
70 | EVENT_CONSTRAINT_END | 70 | EVENT_CONSTRAINT_END |
71 | }; | 71 | }; |
72 | 72 | ||
73 | static struct event_constraint intel_nehalem_event_constraints[] = | 73 | static struct event_constraint intel_nehalem_event_constraints[] __read_mostly = |
74 | { | 74 | { |
75 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ | 75 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ |
76 | FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ | 76 | FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ |
@@ -86,19 +86,19 @@ static struct event_constraint intel_nehalem_event_constraints[] = | |||
86 | EVENT_CONSTRAINT_END | 86 | EVENT_CONSTRAINT_END |
87 | }; | 87 | }; |
88 | 88 | ||
89 | static struct extra_reg intel_nehalem_extra_regs[] = | 89 | static struct extra_reg intel_nehalem_extra_regs[] __read_mostly = |
90 | { | 90 | { |
91 | INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff), | 91 | INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff), |
92 | EVENT_EXTRA_END | 92 | EVENT_EXTRA_END |
93 | }; | 93 | }; |
94 | 94 | ||
95 | static struct event_constraint intel_nehalem_percore_constraints[] = | 95 | static struct event_constraint intel_nehalem_percore_constraints[] __read_mostly = |
96 | { | 96 | { |
97 | INTEL_EVENT_CONSTRAINT(0xb7, 0), | 97 | INTEL_EVENT_CONSTRAINT(0xb7, 0), |
98 | EVENT_CONSTRAINT_END | 98 | EVENT_CONSTRAINT_END |
99 | }; | 99 | }; |
100 | 100 | ||
101 | static struct event_constraint intel_westmere_event_constraints[] = | 101 | static struct event_constraint intel_westmere_event_constraints[] __read_mostly = |
102 | { | 102 | { |
103 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ | 103 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ |
104 | FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ | 104 | FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ |
@@ -110,7 +110,7 @@ static struct event_constraint intel_westmere_event_constraints[] = | |||
110 | EVENT_CONSTRAINT_END | 110 | EVENT_CONSTRAINT_END |
111 | }; | 111 | }; |
112 | 112 | ||
113 | static struct event_constraint intel_snb_event_constraints[] = | 113 | static struct event_constraint intel_snb_event_constraints[] __read_mostly = |
114 | { | 114 | { |
115 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ | 115 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ |
116 | FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ | 116 | FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ |
@@ -123,21 +123,21 @@ static struct event_constraint intel_snb_event_constraints[] = | |||
123 | EVENT_CONSTRAINT_END | 123 | EVENT_CONSTRAINT_END |
124 | }; | 124 | }; |
125 | 125 | ||
126 | static struct extra_reg intel_westmere_extra_regs[] = | 126 | static struct extra_reg intel_westmere_extra_regs[] __read_mostly = |
127 | { | 127 | { |
128 | INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff), | 128 | INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff), |
129 | INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0xffff), | 129 | INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0xffff), |
130 | EVENT_EXTRA_END | 130 | EVENT_EXTRA_END |
131 | }; | 131 | }; |
132 | 132 | ||
133 | static struct event_constraint intel_westmere_percore_constraints[] = | 133 | static struct event_constraint intel_westmere_percore_constraints[] __read_mostly = |
134 | { | 134 | { |
135 | INTEL_EVENT_CONSTRAINT(0xb7, 0), | 135 | INTEL_EVENT_CONSTRAINT(0xb7, 0), |
136 | INTEL_EVENT_CONSTRAINT(0xbb, 0), | 136 | INTEL_EVENT_CONSTRAINT(0xbb, 0), |
137 | EVENT_CONSTRAINT_END | 137 | EVENT_CONSTRAINT_END |
138 | }; | 138 | }; |
139 | 139 | ||
140 | static struct event_constraint intel_gen_event_constraints[] = | 140 | static struct event_constraint intel_gen_event_constraints[] __read_mostly = |
141 | { | 141 | { |
142 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ | 142 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ |
143 | FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ | 143 | FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ |
@@ -1440,6 +1440,11 @@ static __init int intel_pmu_init(void) | |||
1440 | x86_pmu.enable_all = intel_pmu_nhm_enable_all; | 1440 | x86_pmu.enable_all = intel_pmu_nhm_enable_all; |
1441 | x86_pmu.extra_regs = intel_nehalem_extra_regs; | 1441 | x86_pmu.extra_regs = intel_nehalem_extra_regs; |
1442 | 1442 | ||
1443 | /* UOPS_ISSUED.STALLED_CYCLES */ | ||
1444 | intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x180010e; | ||
1445 | /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */ | ||
1446 | intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x1803fb1; | ||
1447 | |||
1443 | if (ebx & 0x40) { | 1448 | if (ebx & 0x40) { |
1444 | /* | 1449 | /* |
1445 | * Erratum AAJ80 detected, we work it around by using | 1450 | * Erratum AAJ80 detected, we work it around by using |
@@ -1480,6 +1485,12 @@ static __init int intel_pmu_init(void) | |||
1480 | x86_pmu.enable_all = intel_pmu_nhm_enable_all; | 1485 | x86_pmu.enable_all = intel_pmu_nhm_enable_all; |
1481 | x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints; | 1486 | x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints; |
1482 | x86_pmu.extra_regs = intel_westmere_extra_regs; | 1487 | x86_pmu.extra_regs = intel_westmere_extra_regs; |
1488 | |||
1489 | /* UOPS_ISSUED.STALLED_CYCLES */ | ||
1490 | intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x180010e; | ||
1491 | /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */ | ||
1492 | intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x1803fb1; | ||
1493 | |||
1483 | pr_cont("Westmere events, "); | 1494 | pr_cont("Westmere events, "); |
1484 | break; | 1495 | break; |
1485 | 1496 | ||
@@ -1491,6 +1502,12 @@ static __init int intel_pmu_init(void) | |||
1491 | 1502 | ||
1492 | x86_pmu.event_constraints = intel_snb_event_constraints; | 1503 | x86_pmu.event_constraints = intel_snb_event_constraints; |
1493 | x86_pmu.pebs_constraints = intel_snb_pebs_events; | 1504 | x86_pmu.pebs_constraints = intel_snb_pebs_events; |
1505 | |||
1506 | /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */ | ||
1507 | intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x180010e; | ||
1508 | /* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/ | ||
1509 | intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x18001b1; | ||
1510 | |||
1494 | pr_cont("SandyBridge events, "); | 1511 | pr_cont("SandyBridge events, "); |
1495 | break; | 1512 | break; |
1496 | 1513 | ||
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c index e93fcd55fae1..ead584fb6a7d 100644 --- a/arch/x86/kernel/cpu/perf_event_p4.c +++ b/arch/x86/kernel/cpu/perf_event_p4.c | |||
@@ -468,7 +468,7 @@ static struct p4_event_bind p4_event_bind_map[] = { | |||
468 | .opcode = P4_OPCODE(P4_EVENT_MISPRED_BRANCH_RETIRED), | 468 | .opcode = P4_OPCODE(P4_EVENT_MISPRED_BRANCH_RETIRED), |
469 | .escr_msr = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 }, | 469 | .escr_msr = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 }, |
470 | .escr_emask = | 470 | .escr_emask = |
471 | P4_ESCR_EMASK_BIT(P4_EVENT_MISPRED_BRANCH_RETIRED, NBOGUS), | 471 | P4_ESCR_EMASK_BIT(P4_EVENT_MISPRED_BRANCH_RETIRED, NBOGUS), |
472 | .cntr = { {12, 13, 16}, {14, 15, 17} }, | 472 | .cntr = { {12, 13, 16}, {14, 15, 17} }, |
473 | }, | 473 | }, |
474 | [P4_EVENT_X87_ASSIST] = { | 474 | [P4_EVENT_X87_ASSIST] = { |
@@ -912,8 +912,7 @@ static int p4_pmu_handle_irq(struct pt_regs *regs) | |||
912 | int idx, handled = 0; | 912 | int idx, handled = 0; |
913 | u64 val; | 913 | u64 val; |
914 | 914 | ||
915 | data.addr = 0; | 915 | perf_sample_data_init(&data, 0); |
916 | data.raw = NULL; | ||
917 | 916 | ||
918 | cpuc = &__get_cpu_var(cpu_hw_events); | 917 | cpuc = &__get_cpu_var(cpu_hw_events); |
919 | 918 | ||
@@ -1197,7 +1196,7 @@ static __init int p4_pmu_init(void) | |||
1197 | { | 1196 | { |
1198 | unsigned int low, high; | 1197 | unsigned int low, high; |
1199 | 1198 | ||
1200 | /* If we get stripped -- indexig fails */ | 1199 | /* If we get stripped -- indexing fails */ |
1201 | BUILD_BUG_ON(ARCH_P4_MAX_CCCR > X86_PMC_MAX_GENERIC); | 1200 | BUILD_BUG_ON(ARCH_P4_MAX_CCCR > X86_PMC_MAX_GENERIC); |
1202 | 1201 | ||
1203 | rdmsr(MSR_IA32_MISC_ENABLE, low, high); | 1202 | rdmsr(MSR_IA32_MISC_ENABLE, low, high); |
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index e2a3f0606da4..f478ff6877ef 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c | |||
@@ -135,20 +135,6 @@ print_context_stack_bp(struct thread_info *tinfo, | |||
135 | } | 135 | } |
136 | EXPORT_SYMBOL_GPL(print_context_stack_bp); | 136 | EXPORT_SYMBOL_GPL(print_context_stack_bp); |
137 | 137 | ||
138 | |||
139 | static void | ||
140 | print_trace_warning_symbol(void *data, char *msg, unsigned long symbol) | ||
141 | { | ||
142 | printk(data); | ||
143 | print_symbol(msg, symbol); | ||
144 | printk("\n"); | ||
145 | } | ||
146 | |||
147 | static void print_trace_warning(void *data, char *msg) | ||
148 | { | ||
149 | printk("%s%s\n", (char *)data, msg); | ||
150 | } | ||
151 | |||
152 | static int print_trace_stack(void *data, char *name) | 138 | static int print_trace_stack(void *data, char *name) |
153 | { | 139 | { |
154 | printk("%s <%s> ", (char *)data, name); | 140 | printk("%s <%s> ", (char *)data, name); |
@@ -166,8 +152,6 @@ static void print_trace_address(void *data, unsigned long addr, int reliable) | |||
166 | } | 152 | } |
167 | 153 | ||
168 | static const struct stacktrace_ops print_trace_ops = { | 154 | static const struct stacktrace_ops print_trace_ops = { |
169 | .warning = print_trace_warning, | ||
170 | .warning_symbol = print_trace_warning_symbol, | ||
171 | .stack = print_trace_stack, | 155 | .stack = print_trace_stack, |
172 | .address = print_trace_address, | 156 | .address = print_trace_address, |
173 | .walk_stack = print_context_stack, | 157 | .walk_stack = print_context_stack, |
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c index c969fd9d1566..f1a6244d7d93 100644 --- a/arch/x86/kernel/kprobes.c +++ b/arch/x86/kernel/kprobes.c | |||
@@ -1183,12 +1183,13 @@ static void __kprobes optimized_callback(struct optimized_kprobe *op, | |||
1183 | struct pt_regs *regs) | 1183 | struct pt_regs *regs) |
1184 | { | 1184 | { |
1185 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | 1185 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
1186 | unsigned long flags; | ||
1186 | 1187 | ||
1187 | /* This is possible if op is under delayed unoptimizing */ | 1188 | /* This is possible if op is under delayed unoptimizing */ |
1188 | if (kprobe_disabled(&op->kp)) | 1189 | if (kprobe_disabled(&op->kp)) |
1189 | return; | 1190 | return; |
1190 | 1191 | ||
1191 | preempt_disable(); | 1192 | local_irq_save(flags); |
1192 | if (kprobe_running()) { | 1193 | if (kprobe_running()) { |
1193 | kprobes_inc_nmissed_count(&op->kp); | 1194 | kprobes_inc_nmissed_count(&op->kp); |
1194 | } else { | 1195 | } else { |
@@ -1207,7 +1208,7 @@ static void __kprobes optimized_callback(struct optimized_kprobe *op, | |||
1207 | opt_pre_handler(&op->kp, regs); | 1208 | opt_pre_handler(&op->kp, regs); |
1208 | __this_cpu_write(current_kprobe, NULL); | 1209 | __this_cpu_write(current_kprobe, NULL); |
1209 | } | 1210 | } |
1210 | preempt_enable_no_resched(); | 1211 | local_irq_restore(flags); |
1211 | } | 1212 | } |
1212 | 1213 | ||
1213 | static int __kprobes copy_optimized_instructions(u8 *dest, u8 *src) | 1214 | static int __kprobes copy_optimized_instructions(u8 *dest, u8 *src) |
diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c index ab23f1ad4bf1..52f256f2cc81 100644 --- a/arch/x86/kernel/module.c +++ b/arch/x86/kernel/module.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/bug.h> | 24 | #include <linux/bug.h> |
25 | #include <linux/mm.h> | 25 | #include <linux/mm.h> |
26 | #include <linux/gfp.h> | 26 | #include <linux/gfp.h> |
27 | #include <linux/jump_label.h> | ||
27 | 28 | ||
28 | #include <asm/system.h> | 29 | #include <asm/system.h> |
29 | #include <asm/page.h> | 30 | #include <asm/page.h> |
diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c index 55d745ec1181..35ccf75696eb 100644 --- a/arch/x86/kernel/pci-iommu_table.c +++ b/arch/x86/kernel/pci-iommu_table.c | |||
@@ -50,20 +50,14 @@ void __init check_iommu_entries(struct iommu_table_entry *start, | |||
50 | struct iommu_table_entry *finish) | 50 | struct iommu_table_entry *finish) |
51 | { | 51 | { |
52 | struct iommu_table_entry *p, *q, *x; | 52 | struct iommu_table_entry *p, *q, *x; |
53 | char sym_p[KSYM_SYMBOL_LEN]; | ||
54 | char sym_q[KSYM_SYMBOL_LEN]; | ||
55 | 53 | ||
56 | /* Simple cyclic dependency checker. */ | 54 | /* Simple cyclic dependency checker. */ |
57 | for (p = start; p < finish; p++) { | 55 | for (p = start; p < finish; p++) { |
58 | q = find_dependents_of(start, finish, p); | 56 | q = find_dependents_of(start, finish, p); |
59 | x = find_dependents_of(start, finish, q); | 57 | x = find_dependents_of(start, finish, q); |
60 | if (p == x) { | 58 | if (p == x) { |
61 | sprint_symbol(sym_p, (unsigned long)p->detect); | 59 | printk(KERN_ERR "CYCLIC DEPENDENCY FOUND! %pS depends on %pS and vice-versa. BREAKING IT.\n", |
62 | sprint_symbol(sym_q, (unsigned long)q->detect); | 60 | p->detect, q->detect); |
63 | |||
64 | printk(KERN_ERR "CYCLIC DEPENDENCY FOUND! %s depends" \ | ||
65 | " on %s and vice-versa. BREAKING IT.\n", | ||
66 | sym_p, sym_q); | ||
67 | /* Heavy handed way..*/ | 61 | /* Heavy handed way..*/ |
68 | x->depend = 0; | 62 | x->depend = 0; |
69 | } | 63 | } |
@@ -72,12 +66,8 @@ void __init check_iommu_entries(struct iommu_table_entry *start, | |||
72 | for (p = start; p < finish; p++) { | 66 | for (p = start; p < finish; p++) { |
73 | q = find_dependents_of(p, finish, p); | 67 | q = find_dependents_of(p, finish, p); |
74 | if (q && q > p) { | 68 | if (q && q > p) { |
75 | sprint_symbol(sym_p, (unsigned long)p->detect); | 69 | printk(KERN_ERR "EXECUTION ORDER INVALID! %pS should be called before %pS!\n", |
76 | sprint_symbol(sym_q, (unsigned long)q->detect); | 70 | p->detect, q->detect); |
77 | |||
78 | printk(KERN_ERR "EXECUTION ORDER INVALID! %s "\ | ||
79 | "should be called before %s!\n", | ||
80 | sym_p, sym_q); | ||
81 | } | 71 | } |
82 | } | 72 | } |
83 | } | 73 | } |
diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c index 6515733a289d..55d9bc03f696 100644 --- a/arch/x86/kernel/stacktrace.c +++ b/arch/x86/kernel/stacktrace.c | |||
@@ -9,15 +9,6 @@ | |||
9 | #include <linux/uaccess.h> | 9 | #include <linux/uaccess.h> |
10 | #include <asm/stacktrace.h> | 10 | #include <asm/stacktrace.h> |
11 | 11 | ||
12 | static void save_stack_warning(void *data, char *msg) | ||
13 | { | ||
14 | } | ||
15 | |||
16 | static void | ||
17 | save_stack_warning_symbol(void *data, char *msg, unsigned long symbol) | ||
18 | { | ||
19 | } | ||
20 | |||
21 | static int save_stack_stack(void *data, char *name) | 12 | static int save_stack_stack(void *data, char *name) |
22 | { | 13 | { |
23 | return 0; | 14 | return 0; |
@@ -53,16 +44,12 @@ save_stack_address_nosched(void *data, unsigned long addr, int reliable) | |||
53 | } | 44 | } |
54 | 45 | ||
55 | static const struct stacktrace_ops save_stack_ops = { | 46 | static const struct stacktrace_ops save_stack_ops = { |
56 | .warning = save_stack_warning, | ||
57 | .warning_symbol = save_stack_warning_symbol, | ||
58 | .stack = save_stack_stack, | 47 | .stack = save_stack_stack, |
59 | .address = save_stack_address, | 48 | .address = save_stack_address, |
60 | .walk_stack = print_context_stack, | 49 | .walk_stack = print_context_stack, |
61 | }; | 50 | }; |
62 | 51 | ||
63 | static const struct stacktrace_ops save_stack_ops_nosched = { | 52 | static const struct stacktrace_ops save_stack_ops_nosched = { |
64 | .warning = save_stack_warning, | ||
65 | .warning_symbol = save_stack_warning_symbol, | ||
66 | .stack = save_stack_stack, | 53 | .stack = save_stack_stack, |
67 | .address = save_stack_address_nosched, | 54 | .address = save_stack_address_nosched, |
68 | .walk_stack = print_context_stack, | 55 | .walk_stack = print_context_stack, |
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c index c11514e9128b..75ef4b18e9b7 100644 --- a/arch/x86/kernel/x86_init.c +++ b/arch/x86/kernel/x86_init.c | |||
@@ -61,6 +61,10 @@ struct x86_init_ops x86_init __initdata = { | |||
61 | .banner = default_banner, | 61 | .banner = default_banner, |
62 | }, | 62 | }, |
63 | 63 | ||
64 | .mapping = { | ||
65 | .pagetable_reserve = native_pagetable_reserve, | ||
66 | }, | ||
67 | |||
64 | .paging = { | 68 | .paging = { |
65 | .pagetable_setup_start = native_pagetable_setup_start, | 69 | .pagetable_setup_start = native_pagetable_setup_start, |
66 | .pagetable_setup_done = native_pagetable_setup_done, | 70 | .pagetable_setup_done = native_pagetable_setup_done, |
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index 1cd608973ce5..395bf0114aad 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c | |||
@@ -7,7 +7,7 @@ | |||
7 | * kernel and insert a module (lg.ko) which allows us to run other Linux | 7 | * kernel and insert a module (lg.ko) which allows us to run other Linux |
8 | * kernels the same way we'd run processes. We call the first kernel the Host, | 8 | * kernels the same way we'd run processes. We call the first kernel the Host, |
9 | * and the others the Guests. The program which sets up and configures Guests | 9 | * and the others the Guests. The program which sets up and configures Guests |
10 | * (such as the example in Documentation/lguest/lguest.c) is called the | 10 | * (such as the example in Documentation/virtual/lguest/lguest.c) is called the |
11 | * Launcher. | 11 | * Launcher. |
12 | * | 12 | * |
13 | * Secondly, we only run specially modified Guests, not normal kernels: setting | 13 | * Secondly, we only run specially modified Guests, not normal kernels: setting |
diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S index aa4326bfb24a..f2145cfa12a6 100644 --- a/arch/x86/lib/clear_page_64.S +++ b/arch/x86/lib/clear_page_64.S | |||
@@ -1,5 +1,6 @@ | |||
1 | #include <linux/linkage.h> | 1 | #include <linux/linkage.h> |
2 | #include <asm/dwarf2.h> | 2 | #include <asm/dwarf2.h> |
3 | #include <asm/alternative-asm.h> | ||
3 | 4 | ||
4 | /* | 5 | /* |
5 | * Zero a page. | 6 | * Zero a page. |
@@ -14,6 +15,15 @@ ENTRY(clear_page_c) | |||
14 | CFI_ENDPROC | 15 | CFI_ENDPROC |
15 | ENDPROC(clear_page_c) | 16 | ENDPROC(clear_page_c) |
16 | 17 | ||
18 | ENTRY(clear_page_c_e) | ||
19 | CFI_STARTPROC | ||
20 | movl $4096,%ecx | ||
21 | xorl %eax,%eax | ||
22 | rep stosb | ||
23 | ret | ||
24 | CFI_ENDPROC | ||
25 | ENDPROC(clear_page_c_e) | ||
26 | |||
17 | ENTRY(clear_page) | 27 | ENTRY(clear_page) |
18 | CFI_STARTPROC | 28 | CFI_STARTPROC |
19 | xorl %eax,%eax | 29 | xorl %eax,%eax |
@@ -38,21 +48,26 @@ ENTRY(clear_page) | |||
38 | .Lclear_page_end: | 48 | .Lclear_page_end: |
39 | ENDPROC(clear_page) | 49 | ENDPROC(clear_page) |
40 | 50 | ||
41 | /* Some CPUs run faster using the string instructions. | 51 | /* |
42 | It is also a lot simpler. Use this when possible */ | 52 | * Some CPUs support enhanced REP MOVSB/STOSB instructions. |
53 | * It is recommended to use this when possible. | ||
54 | * If enhanced REP MOVSB/STOSB is not available, try to use fast string. | ||
55 | * Otherwise, use original function. | ||
56 | * | ||
57 | */ | ||
43 | 58 | ||
44 | #include <asm/cpufeature.h> | 59 | #include <asm/cpufeature.h> |
45 | 60 | ||
46 | .section .altinstr_replacement,"ax" | 61 | .section .altinstr_replacement,"ax" |
47 | 1: .byte 0xeb /* jmp <disp8> */ | 62 | 1: .byte 0xeb /* jmp <disp8> */ |
48 | .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */ | 63 | .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */ |
49 | 2: | 64 | 2: .byte 0xeb /* jmp <disp8> */ |
65 | .byte (clear_page_c_e - clear_page) - (3f - 2b) /* offset */ | ||
66 | 3: | ||
50 | .previous | 67 | .previous |
51 | .section .altinstructions,"a" | 68 | .section .altinstructions,"a" |
52 | .align 8 | 69 | altinstruction_entry clear_page,1b,X86_FEATURE_REP_GOOD,\ |
53 | .quad clear_page | 70 | .Lclear_page_end-clear_page, 2b-1b |
54 | .quad 1b | 71 | altinstruction_entry clear_page,2b,X86_FEATURE_ERMS, \ |
55 | .word X86_FEATURE_REP_GOOD | 72 | .Lclear_page_end-clear_page,3b-2b |
56 | .byte .Lclear_page_end - clear_page | ||
57 | .byte 2b - 1b | ||
58 | .previous | 73 | .previous |
diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S index 99e482615195..024840266ba0 100644 --- a/arch/x86/lib/copy_user_64.S +++ b/arch/x86/lib/copy_user_64.S | |||
@@ -15,23 +15,30 @@ | |||
15 | #include <asm/asm-offsets.h> | 15 | #include <asm/asm-offsets.h> |
16 | #include <asm/thread_info.h> | 16 | #include <asm/thread_info.h> |
17 | #include <asm/cpufeature.h> | 17 | #include <asm/cpufeature.h> |
18 | #include <asm/alternative-asm.h> | ||
18 | 19 | ||
19 | .macro ALTERNATIVE_JUMP feature,orig,alt | 20 | /* |
21 | * By placing feature2 after feature1 in altinstructions section, we logically | ||
22 | * implement: | ||
23 | * If CPU has feature2, jmp to alt2 is used | ||
24 | * else if CPU has feature1, jmp to alt1 is used | ||
25 | * else jmp to orig is used. | ||
26 | */ | ||
27 | .macro ALTERNATIVE_JUMP feature1,feature2,orig,alt1,alt2 | ||
20 | 0: | 28 | 0: |
21 | .byte 0xe9 /* 32bit jump */ | 29 | .byte 0xe9 /* 32bit jump */ |
22 | .long \orig-1f /* by default jump to orig */ | 30 | .long \orig-1f /* by default jump to orig */ |
23 | 1: | 31 | 1: |
24 | .section .altinstr_replacement,"ax" | 32 | .section .altinstr_replacement,"ax" |
25 | 2: .byte 0xe9 /* near jump with 32bit immediate */ | 33 | 2: .byte 0xe9 /* near jump with 32bit immediate */ |
26 | .long \alt-1b /* offset */ /* or alternatively to alt */ | 34 | .long \alt1-1b /* offset */ /* or alternatively to alt1 */ |
35 | 3: .byte 0xe9 /* near jump with 32bit immediate */ | ||
36 | .long \alt2-1b /* offset */ /* or alternatively to alt2 */ | ||
27 | .previous | 37 | .previous |
38 | |||
28 | .section .altinstructions,"a" | 39 | .section .altinstructions,"a" |
29 | .align 8 | 40 | altinstruction_entry 0b,2b,\feature1,5,5 |
30 | .quad 0b | 41 | altinstruction_entry 0b,3b,\feature2,5,5 |
31 | .quad 2b | ||
32 | .word \feature /* when feature is set */ | ||
33 | .byte 5 | ||
34 | .byte 5 | ||
35 | .previous | 42 | .previous |
36 | .endm | 43 | .endm |
37 | 44 | ||
@@ -72,8 +79,10 @@ ENTRY(_copy_to_user) | |||
72 | addq %rdx,%rcx | 79 | addq %rdx,%rcx |
73 | jc bad_to_user | 80 | jc bad_to_user |
74 | cmpq TI_addr_limit(%rax),%rcx | 81 | cmpq TI_addr_limit(%rax),%rcx |
75 | jae bad_to_user | 82 | ja bad_to_user |
76 | ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string | 83 | ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \ |
84 | copy_user_generic_unrolled,copy_user_generic_string, \ | ||
85 | copy_user_enhanced_fast_string | ||
77 | CFI_ENDPROC | 86 | CFI_ENDPROC |
78 | ENDPROC(_copy_to_user) | 87 | ENDPROC(_copy_to_user) |
79 | 88 | ||
@@ -85,8 +94,10 @@ ENTRY(_copy_from_user) | |||
85 | addq %rdx,%rcx | 94 | addq %rdx,%rcx |
86 | jc bad_from_user | 95 | jc bad_from_user |
87 | cmpq TI_addr_limit(%rax),%rcx | 96 | cmpq TI_addr_limit(%rax),%rcx |
88 | jae bad_from_user | 97 | ja bad_from_user |
89 | ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string | 98 | ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \ |
99 | copy_user_generic_unrolled,copy_user_generic_string, \ | ||
100 | copy_user_enhanced_fast_string | ||
90 | CFI_ENDPROC | 101 | CFI_ENDPROC |
91 | ENDPROC(_copy_from_user) | 102 | ENDPROC(_copy_from_user) |
92 | 103 | ||
@@ -255,3 +266,37 @@ ENTRY(copy_user_generic_string) | |||
255 | .previous | 266 | .previous |
256 | CFI_ENDPROC | 267 | CFI_ENDPROC |
257 | ENDPROC(copy_user_generic_string) | 268 | ENDPROC(copy_user_generic_string) |
269 | |||
270 | /* | ||
271 | * Some CPUs are adding enhanced REP MOVSB/STOSB instructions. | ||
272 | * It's recommended to use enhanced REP MOVSB/STOSB if it's enabled. | ||
273 | * | ||
274 | * Input: | ||
275 | * rdi destination | ||
276 | * rsi source | ||
277 | * rdx count | ||
278 | * | ||
279 | * Output: | ||
280 | * eax uncopied bytes or 0 if successful. | ||
281 | */ | ||
282 | ENTRY(copy_user_enhanced_fast_string) | ||
283 | CFI_STARTPROC | ||
284 | andl %edx,%edx | ||
285 | jz 2f | ||
286 | movl %edx,%ecx | ||
287 | 1: rep | ||
288 | movsb | ||
289 | 2: xorl %eax,%eax | ||
290 | ret | ||
291 | |||
292 | .section .fixup,"ax" | ||
293 | 12: movl %ecx,%edx /* ecx is zerorest also */ | ||
294 | jmp copy_user_handle_tail | ||
295 | .previous | ||
296 | |||
297 | .section __ex_table,"a" | ||
298 | .align 8 | ||
299 | .quad 1b,12b | ||
300 | .previous | ||
301 | CFI_ENDPROC | ||
302 | ENDPROC(copy_user_enhanced_fast_string) | ||
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S index 75ef61e35e38..daab21dae2d1 100644 --- a/arch/x86/lib/memcpy_64.S +++ b/arch/x86/lib/memcpy_64.S | |||
@@ -4,6 +4,7 @@ | |||
4 | 4 | ||
5 | #include <asm/cpufeature.h> | 5 | #include <asm/cpufeature.h> |
6 | #include <asm/dwarf2.h> | 6 | #include <asm/dwarf2.h> |
7 | #include <asm/alternative-asm.h> | ||
7 | 8 | ||
8 | /* | 9 | /* |
9 | * memcpy - Copy a memory block. | 10 | * memcpy - Copy a memory block. |
@@ -37,6 +38,23 @@ | |||
37 | .Lmemcpy_e: | 38 | .Lmemcpy_e: |
38 | .previous | 39 | .previous |
39 | 40 | ||
41 | /* | ||
42 | * memcpy_c_e() - enhanced fast string memcpy. This is faster and simpler than | ||
43 | * memcpy_c. Use memcpy_c_e when possible. | ||
44 | * | ||
45 | * This gets patched over the unrolled variant (below) via the | ||
46 | * alternative instructions framework: | ||
47 | */ | ||
48 | .section .altinstr_replacement, "ax", @progbits | ||
49 | .Lmemcpy_c_e: | ||
50 | movq %rdi, %rax | ||
51 | |||
52 | movl %edx, %ecx | ||
53 | rep movsb | ||
54 | ret | ||
55 | .Lmemcpy_e_e: | ||
56 | .previous | ||
57 | |||
40 | ENTRY(__memcpy) | 58 | ENTRY(__memcpy) |
41 | ENTRY(memcpy) | 59 | ENTRY(memcpy) |
42 | CFI_STARTPROC | 60 | CFI_STARTPROC |
@@ -171,21 +189,22 @@ ENDPROC(memcpy) | |||
171 | ENDPROC(__memcpy) | 189 | ENDPROC(__memcpy) |
172 | 190 | ||
173 | /* | 191 | /* |
174 | * Some CPUs run faster using the string copy instructions. | 192 | * Some CPUs are adding enhanced REP MOVSB/STOSB feature |
175 | * It is also a lot simpler. Use this when possible: | 193 | * If the feature is supported, memcpy_c_e() is the first choice. |
176 | */ | 194 | * If enhanced rep movsb copy is not available, use fast string copy |
177 | 195 | * memcpy_c() when possible. This is faster and code is simpler than | |
178 | .section .altinstructions, "a" | 196 | * original memcpy(). |
179 | .align 8 | 197 | * Otherwise, original memcpy() is used. |
180 | .quad memcpy | 198 | * In .altinstructions section, ERMS feature is placed after REG_GOOD |
181 | .quad .Lmemcpy_c | 199 | * feature to implement the right patch order. |
182 | .word X86_FEATURE_REP_GOOD | 200 | * |
183 | |||
184 | /* | ||
185 | * Replace only beginning, memcpy is used to apply alternatives, | 201 | * Replace only beginning, memcpy is used to apply alternatives, |
186 | * so it is silly to overwrite itself with nops - reboot is the | 202 | * so it is silly to overwrite itself with nops - reboot is the |
187 | * only outcome... | 203 | * only outcome... |
188 | */ | 204 | */ |
189 | .byte .Lmemcpy_e - .Lmemcpy_c | 205 | .section .altinstructions, "a" |
190 | .byte .Lmemcpy_e - .Lmemcpy_c | 206 | altinstruction_entry memcpy,.Lmemcpy_c,X86_FEATURE_REP_GOOD,\ |
207 | .Lmemcpy_e-.Lmemcpy_c,.Lmemcpy_e-.Lmemcpy_c | ||
208 | altinstruction_entry memcpy,.Lmemcpy_c_e,X86_FEATURE_ERMS, \ | ||
209 | .Lmemcpy_e_e-.Lmemcpy_c_e,.Lmemcpy_e_e-.Lmemcpy_c_e | ||
191 | .previous | 210 | .previous |
diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S index 0ecb8433e5a8..d0ec9c2936d7 100644 --- a/arch/x86/lib/memmove_64.S +++ b/arch/x86/lib/memmove_64.S | |||
@@ -8,6 +8,7 @@ | |||
8 | #define _STRING_C | 8 | #define _STRING_C |
9 | #include <linux/linkage.h> | 9 | #include <linux/linkage.h> |
10 | #include <asm/dwarf2.h> | 10 | #include <asm/dwarf2.h> |
11 | #include <asm/cpufeature.h> | ||
11 | 12 | ||
12 | #undef memmove | 13 | #undef memmove |
13 | 14 | ||
@@ -24,6 +25,7 @@ | |||
24 | */ | 25 | */ |
25 | ENTRY(memmove) | 26 | ENTRY(memmove) |
26 | CFI_STARTPROC | 27 | CFI_STARTPROC |
28 | |||
27 | /* Handle more 32bytes in loop */ | 29 | /* Handle more 32bytes in loop */ |
28 | mov %rdi, %rax | 30 | mov %rdi, %rax |
29 | cmp $0x20, %rdx | 31 | cmp $0x20, %rdx |
@@ -31,8 +33,13 @@ ENTRY(memmove) | |||
31 | 33 | ||
32 | /* Decide forward/backward copy mode */ | 34 | /* Decide forward/backward copy mode */ |
33 | cmp %rdi, %rsi | 35 | cmp %rdi, %rsi |
34 | jb 2f | 36 | jge .Lmemmove_begin_forward |
37 | mov %rsi, %r8 | ||
38 | add %rdx, %r8 | ||
39 | cmp %rdi, %r8 | ||
40 | jg 2f | ||
35 | 41 | ||
42 | .Lmemmove_begin_forward: | ||
36 | /* | 43 | /* |
37 | * movsq instruction have many startup latency | 44 | * movsq instruction have many startup latency |
38 | * so we handle small size by general register. | 45 | * so we handle small size by general register. |
@@ -78,6 +85,8 @@ ENTRY(memmove) | |||
78 | rep movsq | 85 | rep movsq |
79 | movq %r11, (%r10) | 86 | movq %r11, (%r10) |
80 | jmp 13f | 87 | jmp 13f |
88 | .Lmemmove_end_forward: | ||
89 | |||
81 | /* | 90 | /* |
82 | * Handle data backward by movsq. | 91 | * Handle data backward by movsq. |
83 | */ | 92 | */ |
@@ -194,4 +203,22 @@ ENTRY(memmove) | |||
194 | 13: | 203 | 13: |
195 | retq | 204 | retq |
196 | CFI_ENDPROC | 205 | CFI_ENDPROC |
206 | |||
207 | .section .altinstr_replacement,"ax" | ||
208 | .Lmemmove_begin_forward_efs: | ||
209 | /* Forward moving data. */ | ||
210 | movq %rdx, %rcx | ||
211 | rep movsb | ||
212 | retq | ||
213 | .Lmemmove_end_forward_efs: | ||
214 | .previous | ||
215 | |||
216 | .section .altinstructions,"a" | ||
217 | .align 8 | ||
218 | .quad .Lmemmove_begin_forward | ||
219 | .quad .Lmemmove_begin_forward_efs | ||
220 | .word X86_FEATURE_ERMS | ||
221 | .byte .Lmemmove_end_forward-.Lmemmove_begin_forward | ||
222 | .byte .Lmemmove_end_forward_efs-.Lmemmove_begin_forward_efs | ||
223 | .previous | ||
197 | ENDPROC(memmove) | 224 | ENDPROC(memmove) |
diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S index 09d344269652..79bd454b78a3 100644 --- a/arch/x86/lib/memset_64.S +++ b/arch/x86/lib/memset_64.S | |||
@@ -2,9 +2,13 @@ | |||
2 | 2 | ||
3 | #include <linux/linkage.h> | 3 | #include <linux/linkage.h> |
4 | #include <asm/dwarf2.h> | 4 | #include <asm/dwarf2.h> |
5 | #include <asm/cpufeature.h> | ||
6 | #include <asm/alternative-asm.h> | ||
5 | 7 | ||
6 | /* | 8 | /* |
7 | * ISO C memset - set a memory block to a byte value. | 9 | * ISO C memset - set a memory block to a byte value. This function uses fast |
10 | * string to get better performance than the original function. The code is | ||
11 | * simpler and shorter than the orignal function as well. | ||
8 | * | 12 | * |
9 | * rdi destination | 13 | * rdi destination |
10 | * rsi value (char) | 14 | * rsi value (char) |
@@ -31,6 +35,28 @@ | |||
31 | .Lmemset_e: | 35 | .Lmemset_e: |
32 | .previous | 36 | .previous |
33 | 37 | ||
38 | /* | ||
39 | * ISO C memset - set a memory block to a byte value. This function uses | ||
40 | * enhanced rep stosb to override the fast string function. | ||
41 | * The code is simpler and shorter than the fast string function as well. | ||
42 | * | ||
43 | * rdi destination | ||
44 | * rsi value (char) | ||
45 | * rdx count (bytes) | ||
46 | * | ||
47 | * rax original destination | ||
48 | */ | ||
49 | .section .altinstr_replacement, "ax", @progbits | ||
50 | .Lmemset_c_e: | ||
51 | movq %rdi,%r9 | ||
52 | movb %sil,%al | ||
53 | movl %edx,%ecx | ||
54 | rep stosb | ||
55 | movq %r9,%rax | ||
56 | ret | ||
57 | .Lmemset_e_e: | ||
58 | .previous | ||
59 | |||
34 | ENTRY(memset) | 60 | ENTRY(memset) |
35 | ENTRY(__memset) | 61 | ENTRY(__memset) |
36 | CFI_STARTPROC | 62 | CFI_STARTPROC |
@@ -112,16 +138,20 @@ ENTRY(__memset) | |||
112 | ENDPROC(memset) | 138 | ENDPROC(memset) |
113 | ENDPROC(__memset) | 139 | ENDPROC(__memset) |
114 | 140 | ||
115 | /* Some CPUs run faster using the string instructions. | 141 | /* Some CPUs support enhanced REP MOVSB/STOSB feature. |
116 | It is also a lot simpler. Use this when possible */ | 142 | * It is recommended to use this when possible. |
117 | 143 | * | |
118 | #include <asm/cpufeature.h> | 144 | * If enhanced REP MOVSB/STOSB feature is not available, use fast string |
119 | 145 | * instructions. | |
146 | * | ||
147 | * Otherwise, use original memset function. | ||
148 | * | ||
149 | * In .altinstructions section, ERMS feature is placed after REG_GOOD | ||
150 | * feature to implement the right patch order. | ||
151 | */ | ||
120 | .section .altinstructions,"a" | 152 | .section .altinstructions,"a" |
121 | .align 8 | 153 | altinstruction_entry memset,.Lmemset_c,X86_FEATURE_REP_GOOD,\ |
122 | .quad memset | 154 | .Lfinal-memset,.Lmemset_e-.Lmemset_c |
123 | .quad .Lmemset_c | 155 | altinstruction_entry memset,.Lmemset_c_e,X86_FEATURE_ERMS, \ |
124 | .word X86_FEATURE_REP_GOOD | 156 | .Lfinal-memset,.Lmemset_e_e-.Lmemset_c_e |
125 | .byte .Lfinal - memset | ||
126 | .byte .Lmemset_e - .Lmemset_c | ||
127 | .previous | 157 | .previous |
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 286d289b039b..37b8b0fe8320 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c | |||
@@ -81,6 +81,11 @@ static void __init find_early_table_space(unsigned long end, int use_pse, | |||
81 | end, pgt_buf_start << PAGE_SHIFT, pgt_buf_top << PAGE_SHIFT); | 81 | end, pgt_buf_start << PAGE_SHIFT, pgt_buf_top << PAGE_SHIFT); |
82 | } | 82 | } |
83 | 83 | ||
84 | void __init native_pagetable_reserve(u64 start, u64 end) | ||
85 | { | ||
86 | memblock_x86_reserve_range(start, end, "PGTABLE"); | ||
87 | } | ||
88 | |||
84 | struct map_range { | 89 | struct map_range { |
85 | unsigned long start; | 90 | unsigned long start; |
86 | unsigned long end; | 91 | unsigned long end; |
@@ -272,9 +277,24 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, | |||
272 | 277 | ||
273 | __flush_tlb_all(); | 278 | __flush_tlb_all(); |
274 | 279 | ||
280 | /* | ||
281 | * Reserve the kernel pagetable pages we used (pgt_buf_start - | ||
282 | * pgt_buf_end) and free the other ones (pgt_buf_end - pgt_buf_top) | ||
283 | * so that they can be reused for other purposes. | ||
284 | * | ||
285 | * On native it just means calling memblock_x86_reserve_range, on Xen it | ||
286 | * also means marking RW the pagetable pages that we allocated before | ||
287 | * but that haven't been used. | ||
288 | * | ||
289 | * In fact on xen we mark RO the whole range pgt_buf_start - | ||
290 | * pgt_buf_top, because we have to make sure that when | ||
291 | * init_memory_mapping reaches the pagetable pages area, it maps | ||
292 | * RO all the pagetable pages, including the ones that are beyond | ||
293 | * pgt_buf_end at that time. | ||
294 | */ | ||
275 | if (!after_bootmem && pgt_buf_end > pgt_buf_start) | 295 | if (!after_bootmem && pgt_buf_end > pgt_buf_start) |
276 | memblock_x86_reserve_range(pgt_buf_start << PAGE_SHIFT, | 296 | x86_init.mapping.pagetable_reserve(PFN_PHYS(pgt_buf_start), |
277 | pgt_buf_end << PAGE_SHIFT, "PGTABLE"); | 297 | PFN_PHYS(pgt_buf_end)); |
278 | 298 | ||
279 | if (!after_bootmem) | 299 | if (!after_bootmem) |
280 | early_memtest(start, end); | 300 | early_memtest(start, end); |
diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c index 2d49d4e19a36..a5b64ab4cd6e 100644 --- a/arch/x86/oprofile/backtrace.c +++ b/arch/x86/oprofile/backtrace.c | |||
@@ -16,17 +16,6 @@ | |||
16 | #include <asm/stacktrace.h> | 16 | #include <asm/stacktrace.h> |
17 | #include <linux/compat.h> | 17 | #include <linux/compat.h> |
18 | 18 | ||
19 | static void backtrace_warning_symbol(void *data, char *msg, | ||
20 | unsigned long symbol) | ||
21 | { | ||
22 | /* Ignore warnings */ | ||
23 | } | ||
24 | |||
25 | static void backtrace_warning(void *data, char *msg) | ||
26 | { | ||
27 | /* Ignore warnings */ | ||
28 | } | ||
29 | |||
30 | static int backtrace_stack(void *data, char *name) | 19 | static int backtrace_stack(void *data, char *name) |
31 | { | 20 | { |
32 | /* Yes, we want all stacks */ | 21 | /* Yes, we want all stacks */ |
@@ -42,8 +31,6 @@ static void backtrace_address(void *data, unsigned long addr, int reliable) | |||
42 | } | 31 | } |
43 | 32 | ||
44 | static struct stacktrace_ops backtrace_ops = { | 33 | static struct stacktrace_ops backtrace_ops = { |
45 | .warning = backtrace_warning, | ||
46 | .warning_symbol = backtrace_warning_symbol, | ||
47 | .stack = backtrace_stack, | 34 | .stack = backtrace_stack, |
48 | .address = backtrace_address, | 35 | .address = backtrace_address, |
49 | .walk_stack = print_context_stack, | 36 | .walk_stack = print_context_stack, |
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c index e37b407a0ee8..8214724ce54d 100644 --- a/arch/x86/pci/xen.c +++ b/arch/x86/pci/xen.c | |||
@@ -108,7 +108,8 @@ static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) | |||
108 | } | 108 | } |
109 | irq = xen_bind_pirq_msi_to_irq(dev, msidesc, pirq, 0, | 109 | irq = xen_bind_pirq_msi_to_irq(dev, msidesc, pirq, 0, |
110 | (type == PCI_CAP_ID_MSIX) ? | 110 | (type == PCI_CAP_ID_MSIX) ? |
111 | "msi-x" : "msi"); | 111 | "msi-x" : "msi", |
112 | DOMID_SELF); | ||
112 | if (irq < 0) | 113 | if (irq < 0) |
113 | goto error; | 114 | goto error; |
114 | dev_dbg(&dev->dev, | 115 | dev_dbg(&dev->dev, |
@@ -148,7 +149,8 @@ static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) | |||
148 | irq = xen_bind_pirq_msi_to_irq(dev, msidesc, v[i], 0, | 149 | irq = xen_bind_pirq_msi_to_irq(dev, msidesc, v[i], 0, |
149 | (type == PCI_CAP_ID_MSIX) ? | 150 | (type == PCI_CAP_ID_MSIX) ? |
150 | "pcifront-msi-x" : | 151 | "pcifront-msi-x" : |
151 | "pcifront-msi"); | 152 | "pcifront-msi", |
153 | DOMID_SELF); | ||
152 | if (irq < 0) | 154 | if (irq < 0) |
153 | goto free; | 155 | goto free; |
154 | i++; | 156 | i++; |
@@ -190,9 +192,16 @@ static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) | |||
190 | 192 | ||
191 | list_for_each_entry(msidesc, &dev->msi_list, list) { | 193 | list_for_each_entry(msidesc, &dev->msi_list, list) { |
192 | struct physdev_map_pirq map_irq; | 194 | struct physdev_map_pirq map_irq; |
195 | domid_t domid; | ||
196 | |||
197 | domid = ret = xen_find_device_domain_owner(dev); | ||
198 | /* N.B. Casting int's -ENODEV to uint16_t results in 0xFFED, | ||
199 | * hence check ret value for < 0. */ | ||
200 | if (ret < 0) | ||
201 | domid = DOMID_SELF; | ||
193 | 202 | ||
194 | memset(&map_irq, 0, sizeof(map_irq)); | 203 | memset(&map_irq, 0, sizeof(map_irq)); |
195 | map_irq.domid = DOMID_SELF; | 204 | map_irq.domid = domid; |
196 | map_irq.type = MAP_PIRQ_TYPE_MSI; | 205 | map_irq.type = MAP_PIRQ_TYPE_MSI; |
197 | map_irq.index = -1; | 206 | map_irq.index = -1; |
198 | map_irq.pirq = -1; | 207 | map_irq.pirq = -1; |
@@ -215,14 +224,16 @@ static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) | |||
215 | 224 | ||
216 | ret = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq); | 225 | ret = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq); |
217 | if (ret) { | 226 | if (ret) { |
218 | dev_warn(&dev->dev, "xen map irq failed %d\n", ret); | 227 | dev_warn(&dev->dev, "xen map irq failed %d for %d domain\n", |
228 | ret, domid); | ||
219 | goto out; | 229 | goto out; |
220 | } | 230 | } |
221 | 231 | ||
222 | ret = xen_bind_pirq_msi_to_irq(dev, msidesc, | 232 | ret = xen_bind_pirq_msi_to_irq(dev, msidesc, |
223 | map_irq.pirq, map_irq.index, | 233 | map_irq.pirq, map_irq.index, |
224 | (type == PCI_CAP_ID_MSIX) ? | 234 | (type == PCI_CAP_ID_MSIX) ? |
225 | "msi-x" : "msi"); | 235 | "msi-x" : "msi", |
236 | domid); | ||
226 | if (ret < 0) | 237 | if (ret < 0) |
227 | goto out; | 238 | goto out; |
228 | } | 239 | } |
@@ -461,3 +472,78 @@ void __init xen_setup_pirqs(void) | |||
461 | } | 472 | } |
462 | } | 473 | } |
463 | #endif | 474 | #endif |
475 | |||
476 | #ifdef CONFIG_XEN_DOM0 | ||
477 | struct xen_device_domain_owner { | ||
478 | domid_t domain; | ||
479 | struct pci_dev *dev; | ||
480 | struct list_head list; | ||
481 | }; | ||
482 | |||
483 | static DEFINE_SPINLOCK(dev_domain_list_spinlock); | ||
484 | static struct list_head dev_domain_list = LIST_HEAD_INIT(dev_domain_list); | ||
485 | |||
486 | static struct xen_device_domain_owner *find_device(struct pci_dev *dev) | ||
487 | { | ||
488 | struct xen_device_domain_owner *owner; | ||
489 | |||
490 | list_for_each_entry(owner, &dev_domain_list, list) { | ||
491 | if (owner->dev == dev) | ||
492 | return owner; | ||
493 | } | ||
494 | return NULL; | ||
495 | } | ||
496 | |||
497 | int xen_find_device_domain_owner(struct pci_dev *dev) | ||
498 | { | ||
499 | struct xen_device_domain_owner *owner; | ||
500 | int domain = -ENODEV; | ||
501 | |||
502 | spin_lock(&dev_domain_list_spinlock); | ||
503 | owner = find_device(dev); | ||
504 | if (owner) | ||
505 | domain = owner->domain; | ||
506 | spin_unlock(&dev_domain_list_spinlock); | ||
507 | return domain; | ||
508 | } | ||
509 | EXPORT_SYMBOL_GPL(xen_find_device_domain_owner); | ||
510 | |||
511 | int xen_register_device_domain_owner(struct pci_dev *dev, uint16_t domain) | ||
512 | { | ||
513 | struct xen_device_domain_owner *owner; | ||
514 | |||
515 | owner = kzalloc(sizeof(struct xen_device_domain_owner), GFP_KERNEL); | ||
516 | if (!owner) | ||
517 | return -ENODEV; | ||
518 | |||
519 | spin_lock(&dev_domain_list_spinlock); | ||
520 | if (find_device(dev)) { | ||
521 | spin_unlock(&dev_domain_list_spinlock); | ||
522 | kfree(owner); | ||
523 | return -EEXIST; | ||
524 | } | ||
525 | owner->domain = domain; | ||
526 | owner->dev = dev; | ||
527 | list_add_tail(&owner->list, &dev_domain_list); | ||
528 | spin_unlock(&dev_domain_list_spinlock); | ||
529 | return 0; | ||
530 | } | ||
531 | EXPORT_SYMBOL_GPL(xen_register_device_domain_owner); | ||
532 | |||
533 | int xen_unregister_device_domain_owner(struct pci_dev *dev) | ||
534 | { | ||
535 | struct xen_device_domain_owner *owner; | ||
536 | |||
537 | spin_lock(&dev_domain_list_spinlock); | ||
538 | owner = find_device(dev); | ||
539 | if (!owner) { | ||
540 | spin_unlock(&dev_domain_list_spinlock); | ||
541 | return -ENODEV; | ||
542 | } | ||
543 | list_del(&owner->list); | ||
544 | spin_unlock(&dev_domain_list_spinlock); | ||
545 | kfree(owner); | ||
546 | return 0; | ||
547 | } | ||
548 | EXPORT_SYMBOL_GPL(xen_unregister_device_domain_owner); | ||
549 | #endif | ||
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c index 7cb6424317f6..c58e0ea39ef5 100644 --- a/arch/x86/platform/uv/tlb_uv.c +++ b/arch/x86/platform/uv/tlb_uv.c | |||
@@ -699,16 +699,17 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, | |||
699 | struct mm_struct *mm, | 699 | struct mm_struct *mm, |
700 | unsigned long va, unsigned int cpu) | 700 | unsigned long va, unsigned int cpu) |
701 | { | 701 | { |
702 | int tcpu; | ||
703 | int uvhub; | ||
704 | int locals = 0; | 702 | int locals = 0; |
705 | int remotes = 0; | 703 | int remotes = 0; |
706 | int hubs = 0; | 704 | int hubs = 0; |
705 | int tcpu; | ||
706 | int tpnode; | ||
707 | struct bau_desc *bau_desc; | 707 | struct bau_desc *bau_desc; |
708 | struct cpumask *flush_mask; | 708 | struct cpumask *flush_mask; |
709 | struct ptc_stats *stat; | 709 | struct ptc_stats *stat; |
710 | struct bau_control *bcp; | 710 | struct bau_control *bcp; |
711 | struct bau_control *tbcp; | 711 | struct bau_control *tbcp; |
712 | struct hub_and_pnode *hpp; | ||
712 | 713 | ||
713 | /* kernel was booted 'nobau' */ | 714 | /* kernel was booted 'nobau' */ |
714 | if (nobau) | 715 | if (nobau) |
@@ -750,11 +751,18 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, | |||
750 | bau_desc += UV_ITEMS_PER_DESCRIPTOR * bcp->uvhub_cpu; | 751 | bau_desc += UV_ITEMS_PER_DESCRIPTOR * bcp->uvhub_cpu; |
751 | bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE); | 752 | bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE); |
752 | 753 | ||
753 | /* cpu statistics */ | ||
754 | for_each_cpu(tcpu, flush_mask) { | 754 | for_each_cpu(tcpu, flush_mask) { |
755 | uvhub = uv_cpu_to_blade_id(tcpu); | 755 | /* |
756 | bau_uvhub_set(uvhub, &bau_desc->distribution); | 756 | * The distribution vector is a bit map of pnodes, relative |
757 | if (uvhub == bcp->uvhub) | 757 | * to the partition base pnode (and the partition base nasid |
758 | * in the header). | ||
759 | * Translate cpu to pnode and hub using an array stored | ||
760 | * in local memory. | ||
761 | */ | ||
762 | hpp = &bcp->socket_master->target_hub_and_pnode[tcpu]; | ||
763 | tpnode = hpp->pnode - bcp->partition_base_pnode; | ||
764 | bau_uvhub_set(tpnode, &bau_desc->distribution); | ||
765 | if (hpp->uvhub == bcp->uvhub) | ||
758 | locals++; | 766 | locals++; |
759 | else | 767 | else |
760 | remotes++; | 768 | remotes++; |
@@ -855,7 +863,7 @@ void uv_bau_message_interrupt(struct pt_regs *regs) | |||
855 | * an interrupt, but causes an error message to be returned to | 863 | * an interrupt, but causes an error message to be returned to |
856 | * the sender. | 864 | * the sender. |
857 | */ | 865 | */ |
858 | static void uv_enable_timeouts(void) | 866 | static void __init uv_enable_timeouts(void) |
859 | { | 867 | { |
860 | int uvhub; | 868 | int uvhub; |
861 | int nuvhubs; | 869 | int nuvhubs; |
@@ -1326,10 +1334,10 @@ static int __init uv_ptc_init(void) | |||
1326 | } | 1334 | } |
1327 | 1335 | ||
1328 | /* | 1336 | /* |
1329 | * initialize the sending side's sending buffers | 1337 | * Initialize the sending side's sending buffers. |
1330 | */ | 1338 | */ |
1331 | static void | 1339 | static void |
1332 | uv_activation_descriptor_init(int node, int pnode) | 1340 | uv_activation_descriptor_init(int node, int pnode, int base_pnode) |
1333 | { | 1341 | { |
1334 | int i; | 1342 | int i; |
1335 | int cpu; | 1343 | int cpu; |
@@ -1352,11 +1360,11 @@ uv_activation_descriptor_init(int node, int pnode) | |||
1352 | n = pa >> uv_nshift; | 1360 | n = pa >> uv_nshift; |
1353 | m = pa & uv_mmask; | 1361 | m = pa & uv_mmask; |
1354 | 1362 | ||
1363 | /* the 14-bit pnode */ | ||
1355 | uv_write_global_mmr64(pnode, UVH_LB_BAU_SB_DESCRIPTOR_BASE, | 1364 | uv_write_global_mmr64(pnode, UVH_LB_BAU_SB_DESCRIPTOR_BASE, |
1356 | (n << UV_DESC_BASE_PNODE_SHIFT | m)); | 1365 | (n << UV_DESC_BASE_PNODE_SHIFT | m)); |
1357 | |||
1358 | /* | 1366 | /* |
1359 | * initializing all 8 (UV_ITEMS_PER_DESCRIPTOR) descriptors for each | 1367 | * Initializing all 8 (UV_ITEMS_PER_DESCRIPTOR) descriptors for each |
1360 | * cpu even though we only use the first one; one descriptor can | 1368 | * cpu even though we only use the first one; one descriptor can |
1361 | * describe a broadcast to 256 uv hubs. | 1369 | * describe a broadcast to 256 uv hubs. |
1362 | */ | 1370 | */ |
@@ -1365,12 +1373,13 @@ uv_activation_descriptor_init(int node, int pnode) | |||
1365 | memset(bd2, 0, sizeof(struct bau_desc)); | 1373 | memset(bd2, 0, sizeof(struct bau_desc)); |
1366 | bd2->header.sw_ack_flag = 1; | 1374 | bd2->header.sw_ack_flag = 1; |
1367 | /* | 1375 | /* |
1368 | * base_dest_nodeid is the nasid of the first uvhub | 1376 | * The base_dest_nasid set in the message header is the nasid |
1369 | * in the partition. The bit map will indicate uvhub numbers, | 1377 | * of the first uvhub in the partition. The bit map will |
1370 | * which are 0-N in a partition. Pnodes are unique system-wide. | 1378 | * indicate destination pnode numbers relative to that base. |
1379 | * They may not be consecutive if nasid striding is being used. | ||
1371 | */ | 1380 | */ |
1372 | bd2->header.base_dest_nodeid = UV_PNODE_TO_NASID(uv_partition_base_pnode); | 1381 | bd2->header.base_dest_nasid = UV_PNODE_TO_NASID(base_pnode); |
1373 | bd2->header.dest_subnodeid = 0x10; /* the LB */ | 1382 | bd2->header.dest_subnodeid = UV_LB_SUBNODEID; |
1374 | bd2->header.command = UV_NET_ENDPOINT_INTD; | 1383 | bd2->header.command = UV_NET_ENDPOINT_INTD; |
1375 | bd2->header.int_both = 1; | 1384 | bd2->header.int_both = 1; |
1376 | /* | 1385 | /* |
@@ -1442,7 +1451,7 @@ uv_payload_queue_init(int node, int pnode) | |||
1442 | /* | 1451 | /* |
1443 | * Initialization of each UV hub's structures | 1452 | * Initialization of each UV hub's structures |
1444 | */ | 1453 | */ |
1445 | static void __init uv_init_uvhub(int uvhub, int vector) | 1454 | static void __init uv_init_uvhub(int uvhub, int vector, int base_pnode) |
1446 | { | 1455 | { |
1447 | int node; | 1456 | int node; |
1448 | int pnode; | 1457 | int pnode; |
@@ -1450,11 +1459,11 @@ static void __init uv_init_uvhub(int uvhub, int vector) | |||
1450 | 1459 | ||
1451 | node = uvhub_to_first_node(uvhub); | 1460 | node = uvhub_to_first_node(uvhub); |
1452 | pnode = uv_blade_to_pnode(uvhub); | 1461 | pnode = uv_blade_to_pnode(uvhub); |
1453 | uv_activation_descriptor_init(node, pnode); | 1462 | uv_activation_descriptor_init(node, pnode, base_pnode); |
1454 | uv_payload_queue_init(node, pnode); | 1463 | uv_payload_queue_init(node, pnode); |
1455 | /* | 1464 | /* |
1456 | * the below initialization can't be in firmware because the | 1465 | * The below initialization can't be in firmware because the |
1457 | * messaging IRQ will be determined by the OS | 1466 | * messaging IRQ will be determined by the OS. |
1458 | */ | 1467 | */ |
1459 | apicid = uvhub_to_first_apicid(uvhub) | uv_apicid_hibits; | 1468 | apicid = uvhub_to_first_apicid(uvhub) | uv_apicid_hibits; |
1460 | uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG, | 1469 | uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG, |
@@ -1491,10 +1500,11 @@ calculate_destination_timeout(void) | |||
1491 | /* | 1500 | /* |
1492 | * initialize the bau_control structure for each cpu | 1501 | * initialize the bau_control structure for each cpu |
1493 | */ | 1502 | */ |
1494 | static int __init uv_init_per_cpu(int nuvhubs) | 1503 | static int __init uv_init_per_cpu(int nuvhubs, int base_part_pnode) |
1495 | { | 1504 | { |
1496 | int i; | 1505 | int i; |
1497 | int cpu; | 1506 | int cpu; |
1507 | int tcpu; | ||
1498 | int pnode; | 1508 | int pnode; |
1499 | int uvhub; | 1509 | int uvhub; |
1500 | int have_hmaster; | 1510 | int have_hmaster; |
@@ -1528,6 +1538,15 @@ static int __init uv_init_per_cpu(int nuvhubs) | |||
1528 | bcp = &per_cpu(bau_control, cpu); | 1538 | bcp = &per_cpu(bau_control, cpu); |
1529 | memset(bcp, 0, sizeof(struct bau_control)); | 1539 | memset(bcp, 0, sizeof(struct bau_control)); |
1530 | pnode = uv_cpu_hub_info(cpu)->pnode; | 1540 | pnode = uv_cpu_hub_info(cpu)->pnode; |
1541 | if ((pnode - base_part_pnode) >= UV_DISTRIBUTION_SIZE) { | ||
1542 | printk(KERN_EMERG | ||
1543 | "cpu %d pnode %d-%d beyond %d; BAU disabled\n", | ||
1544 | cpu, pnode, base_part_pnode, | ||
1545 | UV_DISTRIBUTION_SIZE); | ||
1546 | return 1; | ||
1547 | } | ||
1548 | bcp->osnode = cpu_to_node(cpu); | ||
1549 | bcp->partition_base_pnode = uv_partition_base_pnode; | ||
1531 | uvhub = uv_cpu_hub_info(cpu)->numa_blade_id; | 1550 | uvhub = uv_cpu_hub_info(cpu)->numa_blade_id; |
1532 | *(uvhub_mask + (uvhub/8)) |= (1 << (uvhub%8)); | 1551 | *(uvhub_mask + (uvhub/8)) |= (1 << (uvhub%8)); |
1533 | bdp = &uvhub_descs[uvhub]; | 1552 | bdp = &uvhub_descs[uvhub]; |
@@ -1536,7 +1555,7 @@ static int __init uv_init_per_cpu(int nuvhubs) | |||
1536 | bdp->pnode = pnode; | 1555 | bdp->pnode = pnode; |
1537 | /* kludge: 'assuming' one node per socket, and assuming that | 1556 | /* kludge: 'assuming' one node per socket, and assuming that |
1538 | disabling a socket just leaves a gap in node numbers */ | 1557 | disabling a socket just leaves a gap in node numbers */ |
1539 | socket = (cpu_to_node(cpu) & 1); | 1558 | socket = bcp->osnode & 1; |
1540 | bdp->socket_mask |= (1 << socket); | 1559 | bdp->socket_mask |= (1 << socket); |
1541 | sdp = &bdp->socket[socket]; | 1560 | sdp = &bdp->socket[socket]; |
1542 | sdp->cpu_number[sdp->num_cpus] = cpu; | 1561 | sdp->cpu_number[sdp->num_cpus] = cpu; |
@@ -1585,6 +1604,20 @@ static int __init uv_init_per_cpu(int nuvhubs) | |||
1585 | nextsocket: | 1604 | nextsocket: |
1586 | socket++; | 1605 | socket++; |
1587 | socket_mask = (socket_mask >> 1); | 1606 | socket_mask = (socket_mask >> 1); |
1607 | /* each socket gets a local array of pnodes/hubs */ | ||
1608 | bcp = smaster; | ||
1609 | bcp->target_hub_and_pnode = kmalloc_node( | ||
1610 | sizeof(struct hub_and_pnode) * | ||
1611 | num_possible_cpus(), GFP_KERNEL, bcp->osnode); | ||
1612 | memset(bcp->target_hub_and_pnode, 0, | ||
1613 | sizeof(struct hub_and_pnode) * | ||
1614 | num_possible_cpus()); | ||
1615 | for_each_present_cpu(tcpu) { | ||
1616 | bcp->target_hub_and_pnode[tcpu].pnode = | ||
1617 | uv_cpu_hub_info(tcpu)->pnode; | ||
1618 | bcp->target_hub_and_pnode[tcpu].uvhub = | ||
1619 | uv_cpu_hub_info(tcpu)->numa_blade_id; | ||
1620 | } | ||
1588 | } | 1621 | } |
1589 | } | 1622 | } |
1590 | kfree(uvhub_descs); | 1623 | kfree(uvhub_descs); |
@@ -1637,21 +1670,22 @@ static int __init uv_bau_init(void) | |||
1637 | spin_lock_init(&disable_lock); | 1670 | spin_lock_init(&disable_lock); |
1638 | congested_cycles = microsec_2_cycles(congested_response_us); | 1671 | congested_cycles = microsec_2_cycles(congested_response_us); |
1639 | 1672 | ||
1640 | if (uv_init_per_cpu(nuvhubs)) { | ||
1641 | nobau = 1; | ||
1642 | return 0; | ||
1643 | } | ||
1644 | |||
1645 | uv_partition_base_pnode = 0x7fffffff; | 1673 | uv_partition_base_pnode = 0x7fffffff; |
1646 | for (uvhub = 0; uvhub < nuvhubs; uvhub++) | 1674 | for (uvhub = 0; uvhub < nuvhubs; uvhub++) { |
1647 | if (uv_blade_nr_possible_cpus(uvhub) && | 1675 | if (uv_blade_nr_possible_cpus(uvhub) && |
1648 | (uv_blade_to_pnode(uvhub) < uv_partition_base_pnode)) | 1676 | (uv_blade_to_pnode(uvhub) < uv_partition_base_pnode)) |
1649 | uv_partition_base_pnode = uv_blade_to_pnode(uvhub); | 1677 | uv_partition_base_pnode = uv_blade_to_pnode(uvhub); |
1678 | } | ||
1679 | |||
1680 | if (uv_init_per_cpu(nuvhubs, uv_partition_base_pnode)) { | ||
1681 | nobau = 1; | ||
1682 | return 0; | ||
1683 | } | ||
1650 | 1684 | ||
1651 | vector = UV_BAU_MESSAGE; | 1685 | vector = UV_BAU_MESSAGE; |
1652 | for_each_possible_blade(uvhub) | 1686 | for_each_possible_blade(uvhub) |
1653 | if (uv_blade_nr_possible_cpus(uvhub)) | 1687 | if (uv_blade_nr_possible_cpus(uvhub)) |
1654 | uv_init_uvhub(uvhub, vector); | 1688 | uv_init_uvhub(uvhub, vector, uv_partition_base_pnode); |
1655 | 1689 | ||
1656 | uv_enable_timeouts(); | 1690 | uv_enable_timeouts(); |
1657 | alloc_intr_gate(vector, uv_bau_message_intr1); | 1691 | alloc_intr_gate(vector, uv_bau_message_intr1); |
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index e3c6a06cf725..dd7b88f2ec7a 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
@@ -235,7 +235,7 @@ static void xen_cpuid(unsigned int *ax, unsigned int *bx, | |||
235 | *dx &= maskedx; | 235 | *dx &= maskedx; |
236 | } | 236 | } |
237 | 237 | ||
238 | static __init void xen_init_cpuid_mask(void) | 238 | static void __init xen_init_cpuid_mask(void) |
239 | { | 239 | { |
240 | unsigned int ax, bx, cx, dx; | 240 | unsigned int ax, bx, cx, dx; |
241 | unsigned int xsave_mask; | 241 | unsigned int xsave_mask; |
@@ -400,7 +400,7 @@ static void xen_load_gdt(const struct desc_ptr *dtr) | |||
400 | /* | 400 | /* |
401 | * load_gdt for early boot, when the gdt is only mapped once | 401 | * load_gdt for early boot, when the gdt is only mapped once |
402 | */ | 402 | */ |
403 | static __init void xen_load_gdt_boot(const struct desc_ptr *dtr) | 403 | static void __init xen_load_gdt_boot(const struct desc_ptr *dtr) |
404 | { | 404 | { |
405 | unsigned long va = dtr->address; | 405 | unsigned long va = dtr->address; |
406 | unsigned int size = dtr->size + 1; | 406 | unsigned int size = dtr->size + 1; |
@@ -662,7 +662,7 @@ static void xen_write_gdt_entry(struct desc_struct *dt, int entry, | |||
662 | * Version of write_gdt_entry for use at early boot-time needed to | 662 | * Version of write_gdt_entry for use at early boot-time needed to |
663 | * update an entry as simply as possible. | 663 | * update an entry as simply as possible. |
664 | */ | 664 | */ |
665 | static __init void xen_write_gdt_entry_boot(struct desc_struct *dt, int entry, | 665 | static void __init xen_write_gdt_entry_boot(struct desc_struct *dt, int entry, |
666 | const void *desc, int type) | 666 | const void *desc, int type) |
667 | { | 667 | { |
668 | switch (type) { | 668 | switch (type) { |
@@ -933,18 +933,18 @@ static unsigned xen_patch(u8 type, u16 clobbers, void *insnbuf, | |||
933 | return ret; | 933 | return ret; |
934 | } | 934 | } |
935 | 935 | ||
936 | static const struct pv_info xen_info __initdata = { | 936 | static const struct pv_info xen_info __initconst = { |
937 | .paravirt_enabled = 1, | 937 | .paravirt_enabled = 1, |
938 | .shared_kernel_pmd = 0, | 938 | .shared_kernel_pmd = 0, |
939 | 939 | ||
940 | .name = "Xen", | 940 | .name = "Xen", |
941 | }; | 941 | }; |
942 | 942 | ||
943 | static const struct pv_init_ops xen_init_ops __initdata = { | 943 | static const struct pv_init_ops xen_init_ops __initconst = { |
944 | .patch = xen_patch, | 944 | .patch = xen_patch, |
945 | }; | 945 | }; |
946 | 946 | ||
947 | static const struct pv_cpu_ops xen_cpu_ops __initdata = { | 947 | static const struct pv_cpu_ops xen_cpu_ops __initconst = { |
948 | .cpuid = xen_cpuid, | 948 | .cpuid = xen_cpuid, |
949 | 949 | ||
950 | .set_debugreg = xen_set_debugreg, | 950 | .set_debugreg = xen_set_debugreg, |
@@ -1004,7 +1004,7 @@ static const struct pv_cpu_ops xen_cpu_ops __initdata = { | |||
1004 | .end_context_switch = xen_end_context_switch, | 1004 | .end_context_switch = xen_end_context_switch, |
1005 | }; | 1005 | }; |
1006 | 1006 | ||
1007 | static const struct pv_apic_ops xen_apic_ops __initdata = { | 1007 | static const struct pv_apic_ops xen_apic_ops __initconst = { |
1008 | #ifdef CONFIG_X86_LOCAL_APIC | 1008 | #ifdef CONFIG_X86_LOCAL_APIC |
1009 | .startup_ipi_hook = paravirt_nop, | 1009 | .startup_ipi_hook = paravirt_nop, |
1010 | #endif | 1010 | #endif |
@@ -1055,7 +1055,7 @@ int xen_panic_handler_init(void) | |||
1055 | return 0; | 1055 | return 0; |
1056 | } | 1056 | } |
1057 | 1057 | ||
1058 | static const struct machine_ops __initdata xen_machine_ops = { | 1058 | static const struct machine_ops xen_machine_ops __initconst = { |
1059 | .restart = xen_restart, | 1059 | .restart = xen_restart, |
1060 | .halt = xen_machine_halt, | 1060 | .halt = xen_machine_halt, |
1061 | .power_off = xen_machine_halt, | 1061 | .power_off = xen_machine_halt, |
@@ -1332,7 +1332,7 @@ static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self, | |||
1332 | return NOTIFY_OK; | 1332 | return NOTIFY_OK; |
1333 | } | 1333 | } |
1334 | 1334 | ||
1335 | static struct notifier_block __cpuinitdata xen_hvm_cpu_notifier = { | 1335 | static struct notifier_block xen_hvm_cpu_notifier __cpuinitdata = { |
1336 | .notifier_call = xen_hvm_cpu_notify, | 1336 | .notifier_call = xen_hvm_cpu_notify, |
1337 | }; | 1337 | }; |
1338 | 1338 | ||
@@ -1381,7 +1381,7 @@ bool xen_hvm_need_lapic(void) | |||
1381 | } | 1381 | } |
1382 | EXPORT_SYMBOL_GPL(xen_hvm_need_lapic); | 1382 | EXPORT_SYMBOL_GPL(xen_hvm_need_lapic); |
1383 | 1383 | ||
1384 | const __refconst struct hypervisor_x86 x86_hyper_xen_hvm = { | 1384 | const struct hypervisor_x86 x86_hyper_xen_hvm __refconst = { |
1385 | .name = "Xen HVM", | 1385 | .name = "Xen HVM", |
1386 | .detect = xen_hvm_platform, | 1386 | .detect = xen_hvm_platform, |
1387 | .init_platform = xen_hvm_guest_init, | 1387 | .init_platform = xen_hvm_guest_init, |
diff --git a/arch/x86/xen/irq.c b/arch/x86/xen/irq.c index 6a6fe8939645..8bbb465b6f0a 100644 --- a/arch/x86/xen/irq.c +++ b/arch/x86/xen/irq.c | |||
@@ -113,7 +113,7 @@ static void xen_halt(void) | |||
113 | xen_safe_halt(); | 113 | xen_safe_halt(); |
114 | } | 114 | } |
115 | 115 | ||
116 | static const struct pv_irq_ops xen_irq_ops __initdata = { | 116 | static const struct pv_irq_ops xen_irq_ops __initconst = { |
117 | .save_fl = PV_CALLEE_SAVE(xen_save_fl), | 117 | .save_fl = PV_CALLEE_SAVE(xen_save_fl), |
118 | .restore_fl = PV_CALLEE_SAVE(xen_restore_fl), | 118 | .restore_fl = PV_CALLEE_SAVE(xen_restore_fl), |
119 | .irq_disable = PV_CALLEE_SAVE(xen_irq_disable), | 119 | .irq_disable = PV_CALLEE_SAVE(xen_irq_disable), |
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 55c965b38c27..02d752460371 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -1054,7 +1054,7 @@ void xen_mm_pin_all(void) | |||
1054 | * that's before we have page structures to store the bits. So do all | 1054 | * that's before we have page structures to store the bits. So do all |
1055 | * the book-keeping now. | 1055 | * the book-keeping now. |
1056 | */ | 1056 | */ |
1057 | static __init int xen_mark_pinned(struct mm_struct *mm, struct page *page, | 1057 | static int __init xen_mark_pinned(struct mm_struct *mm, struct page *page, |
1058 | enum pt_level level) | 1058 | enum pt_level level) |
1059 | { | 1059 | { |
1060 | SetPagePinned(page); | 1060 | SetPagePinned(page); |
@@ -1187,7 +1187,7 @@ static void drop_other_mm_ref(void *info) | |||
1187 | 1187 | ||
1188 | active_mm = percpu_read(cpu_tlbstate.active_mm); | 1188 | active_mm = percpu_read(cpu_tlbstate.active_mm); |
1189 | 1189 | ||
1190 | if (active_mm == mm) | 1190 | if (active_mm == mm && percpu_read(cpu_tlbstate.state) != TLBSTATE_OK) |
1191 | leave_mm(smp_processor_id()); | 1191 | leave_mm(smp_processor_id()); |
1192 | 1192 | ||
1193 | /* If this cpu still has a stale cr3 reference, then make sure | 1193 | /* If this cpu still has a stale cr3 reference, then make sure |
@@ -1271,13 +1271,27 @@ void xen_exit_mmap(struct mm_struct *mm) | |||
1271 | spin_unlock(&mm->page_table_lock); | 1271 | spin_unlock(&mm->page_table_lock); |
1272 | } | 1272 | } |
1273 | 1273 | ||
1274 | static __init void xen_pagetable_setup_start(pgd_t *base) | 1274 | static void __init xen_pagetable_setup_start(pgd_t *base) |
1275 | { | 1275 | { |
1276 | } | 1276 | } |
1277 | 1277 | ||
1278 | static __init void xen_mapping_pagetable_reserve(u64 start, u64 end) | ||
1279 | { | ||
1280 | /* reserve the range used */ | ||
1281 | native_pagetable_reserve(start, end); | ||
1282 | |||
1283 | /* set as RW the rest */ | ||
1284 | printk(KERN_DEBUG "xen: setting RW the range %llx - %llx\n", end, | ||
1285 | PFN_PHYS(pgt_buf_top)); | ||
1286 | while (end < PFN_PHYS(pgt_buf_top)) { | ||
1287 | make_lowmem_page_readwrite(__va(end)); | ||
1288 | end += PAGE_SIZE; | ||
1289 | } | ||
1290 | } | ||
1291 | |||
1278 | static void xen_post_allocator_init(void); | 1292 | static void xen_post_allocator_init(void); |
1279 | 1293 | ||
1280 | static __init void xen_pagetable_setup_done(pgd_t *base) | 1294 | static void __init xen_pagetable_setup_done(pgd_t *base) |
1281 | { | 1295 | { |
1282 | xen_setup_shared_info(); | 1296 | xen_setup_shared_info(); |
1283 | xen_post_allocator_init(); | 1297 | xen_post_allocator_init(); |
@@ -1463,119 +1477,6 @@ static int xen_pgd_alloc(struct mm_struct *mm) | |||
1463 | return ret; | 1477 | return ret; |
1464 | } | 1478 | } |
1465 | 1479 | ||
1466 | #ifdef CONFIG_X86_64 | ||
1467 | static __initdata u64 __last_pgt_set_rw = 0; | ||
1468 | static __initdata u64 __pgt_buf_start = 0; | ||
1469 | static __initdata u64 __pgt_buf_end = 0; | ||
1470 | static __initdata u64 __pgt_buf_top = 0; | ||
1471 | /* | ||
1472 | * As a consequence of the commit: | ||
1473 | * | ||
1474 | * commit 4b239f458c229de044d6905c2b0f9fe16ed9e01e | ||
1475 | * Author: Yinghai Lu <yinghai@kernel.org> | ||
1476 | * Date: Fri Dec 17 16:58:28 2010 -0800 | ||
1477 | * | ||
1478 | * x86-64, mm: Put early page table high | ||
1479 | * | ||
1480 | * at some point init_memory_mapping is going to reach the pagetable pages | ||
1481 | * area and map those pages too (mapping them as normal memory that falls | ||
1482 | * in the range of addresses passed to init_memory_mapping as argument). | ||
1483 | * Some of those pages are already pagetable pages (they are in the range | ||
1484 | * pgt_buf_start-pgt_buf_end) therefore they are going to be mapped RO and | ||
1485 | * everything is fine. | ||
1486 | * Some of these pages are not pagetable pages yet (they fall in the range | ||
1487 | * pgt_buf_end-pgt_buf_top; for example the page at pgt_buf_end) so they | ||
1488 | * are going to be mapped RW. When these pages become pagetable pages and | ||
1489 | * are hooked into the pagetable, xen will find that the guest has already | ||
1490 | * a RW mapping of them somewhere and fail the operation. | ||
1491 | * The reason Xen requires pagetables to be RO is that the hypervisor needs | ||
1492 | * to verify that the pagetables are valid before using them. The validation | ||
1493 | * operations are called "pinning". | ||
1494 | * | ||
1495 | * In order to fix the issue we mark all the pages in the entire range | ||
1496 | * pgt_buf_start-pgt_buf_top as RO, however when the pagetable allocation | ||
1497 | * is completed only the range pgt_buf_start-pgt_buf_end is reserved by | ||
1498 | * init_memory_mapping. Hence the kernel is going to crash as soon as one | ||
1499 | * of the pages in the range pgt_buf_end-pgt_buf_top is reused (b/c those | ||
1500 | * ranges are RO). | ||
1501 | * | ||
1502 | * For this reason, 'mark_rw_past_pgt' is introduced which is called _after_ | ||
1503 | * the init_memory_mapping has completed (in a perfect world we would | ||
1504 | * call this function from init_memory_mapping, but lets ignore that). | ||
1505 | * | ||
1506 | * Because we are called _after_ init_memory_mapping the pgt_buf_[start, | ||
1507 | * end,top] have all changed to new values (b/c init_memory_mapping | ||
1508 | * is called and setting up another new page-table). Hence, the first time | ||
1509 | * we enter this function, we save away the pgt_buf_start value and update | ||
1510 | * the pgt_buf_[end,top]. | ||
1511 | * | ||
1512 | * When we detect that the "old" pgt_buf_start through pgt_buf_end | ||
1513 | * PFNs have been reserved (so memblock_x86_reserve_range has been called), | ||
1514 | * we immediately set out to RW the "old" pgt_buf_end through pgt_buf_top. | ||
1515 | * | ||
1516 | * And then we update those "old" pgt_buf_[end|top] with the new ones | ||
1517 | * so that we can redo this on the next pagetable. | ||
1518 | */ | ||
1519 | static __init void mark_rw_past_pgt(void) { | ||
1520 | |||
1521 | if (pgt_buf_end > pgt_buf_start) { | ||
1522 | u64 addr, size; | ||
1523 | |||
1524 | /* Save it away. */ | ||
1525 | if (!__pgt_buf_start) { | ||
1526 | __pgt_buf_start = pgt_buf_start; | ||
1527 | __pgt_buf_end = pgt_buf_end; | ||
1528 | __pgt_buf_top = pgt_buf_top; | ||
1529 | return; | ||
1530 | } | ||
1531 | /* If we get the range that starts at __pgt_buf_end that means | ||
1532 | * the range is reserved, and that in 'init_memory_mapping' | ||
1533 | * the 'memblock_x86_reserve_range' has been called with the | ||
1534 | * outdated __pgt_buf_start, __pgt_buf_end (the "new" | ||
1535 | * pgt_buf_[start|end|top] refer now to a new pagetable. | ||
1536 | * Note: we are called _after_ the pgt_buf_[..] have been | ||
1537 | * updated.*/ | ||
1538 | |||
1539 | addr = memblock_x86_find_in_range_size(PFN_PHYS(__pgt_buf_start), | ||
1540 | &size, PAGE_SIZE); | ||
1541 | |||
1542 | /* Still not reserved, meaning 'memblock_x86_reserve_range' | ||
1543 | * hasn't been called yet. Update the _end and _top.*/ | ||
1544 | if (addr == PFN_PHYS(__pgt_buf_start)) { | ||
1545 | __pgt_buf_end = pgt_buf_end; | ||
1546 | __pgt_buf_top = pgt_buf_top; | ||
1547 | return; | ||
1548 | } | ||
1549 | |||
1550 | /* OK, the area is reserved, meaning it is time for us to | ||
1551 | * set RW for the old end->top PFNs. */ | ||
1552 | |||
1553 | /* ..unless we had already done this. */ | ||
1554 | if (__pgt_buf_end == __last_pgt_set_rw) | ||
1555 | return; | ||
1556 | |||
1557 | addr = PFN_PHYS(__pgt_buf_end); | ||
1558 | |||
1559 | /* set as RW the rest */ | ||
1560 | printk(KERN_DEBUG "xen: setting RW the range %llx - %llx\n", | ||
1561 | PFN_PHYS(__pgt_buf_end), PFN_PHYS(__pgt_buf_top)); | ||
1562 | |||
1563 | while (addr < PFN_PHYS(__pgt_buf_top)) { | ||
1564 | make_lowmem_page_readwrite(__va(addr)); | ||
1565 | addr += PAGE_SIZE; | ||
1566 | } | ||
1567 | /* And update everything so that we are ready for the next | ||
1568 | * pagetable (the one created for regions past 4GB) */ | ||
1569 | __last_pgt_set_rw = __pgt_buf_end; | ||
1570 | __pgt_buf_start = pgt_buf_start; | ||
1571 | __pgt_buf_end = pgt_buf_end; | ||
1572 | __pgt_buf_top = pgt_buf_top; | ||
1573 | } | ||
1574 | return; | ||
1575 | } | ||
1576 | #else | ||
1577 | static __init void mark_rw_past_pgt(void) { } | ||
1578 | #endif | ||
1579 | static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd) | 1480 | static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd) |
1580 | { | 1481 | { |
1581 | #ifdef CONFIG_X86_64 | 1482 | #ifdef CONFIG_X86_64 |
@@ -1587,7 +1488,7 @@ static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd) | |||
1587 | } | 1488 | } |
1588 | 1489 | ||
1589 | #ifdef CONFIG_X86_32 | 1490 | #ifdef CONFIG_X86_32 |
1590 | static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte) | 1491 | static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte) |
1591 | { | 1492 | { |
1592 | /* If there's an existing pte, then don't allow _PAGE_RW to be set */ | 1493 | /* If there's an existing pte, then don't allow _PAGE_RW to be set */ |
1593 | if (pte_val_ma(*ptep) & _PAGE_PRESENT) | 1494 | if (pte_val_ma(*ptep) & _PAGE_PRESENT) |
@@ -1597,19 +1498,11 @@ static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte) | |||
1597 | return pte; | 1498 | return pte; |
1598 | } | 1499 | } |
1599 | #else /* CONFIG_X86_64 */ | 1500 | #else /* CONFIG_X86_64 */ |
1600 | static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte) | 1501 | static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte) |
1601 | { | 1502 | { |
1602 | unsigned long pfn = pte_pfn(pte); | 1503 | unsigned long pfn = pte_pfn(pte); |
1603 | 1504 | ||
1604 | /* | 1505 | /* |
1605 | * A bit of optimization. We do not need to call the workaround | ||
1606 | * when xen_set_pte_init is called with a PTE with 0 as PFN. | ||
1607 | * That is b/c the pagetable at that point are just being populated | ||
1608 | * with empty values and we can save some cycles by not calling | ||
1609 | * the 'memblock' code.*/ | ||
1610 | if (pfn) | ||
1611 | mark_rw_past_pgt(); | ||
1612 | /* | ||
1613 | * If the new pfn is within the range of the newly allocated | 1506 | * If the new pfn is within the range of the newly allocated |
1614 | * kernel pagetable, and it isn't being mapped into an | 1507 | * kernel pagetable, and it isn't being mapped into an |
1615 | * early_ioremap fixmap slot as a freshly allocated page, make sure | 1508 | * early_ioremap fixmap slot as a freshly allocated page, make sure |
@@ -1626,7 +1519,7 @@ static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte) | |||
1626 | 1519 | ||
1627 | /* Init-time set_pte while constructing initial pagetables, which | 1520 | /* Init-time set_pte while constructing initial pagetables, which |
1628 | doesn't allow RO pagetable pages to be remapped RW */ | 1521 | doesn't allow RO pagetable pages to be remapped RW */ |
1629 | static __init void xen_set_pte_init(pte_t *ptep, pte_t pte) | 1522 | static void __init xen_set_pte_init(pte_t *ptep, pte_t pte) |
1630 | { | 1523 | { |
1631 | pte = mask_rw_pte(ptep, pte); | 1524 | pte = mask_rw_pte(ptep, pte); |
1632 | 1525 | ||
@@ -1644,7 +1537,7 @@ static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn) | |||
1644 | 1537 | ||
1645 | /* Early in boot, while setting up the initial pagetable, assume | 1538 | /* Early in boot, while setting up the initial pagetable, assume |
1646 | everything is pinned. */ | 1539 | everything is pinned. */ |
1647 | static __init void xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn) | 1540 | static void __init xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn) |
1648 | { | 1541 | { |
1649 | #ifdef CONFIG_FLATMEM | 1542 | #ifdef CONFIG_FLATMEM |
1650 | BUG_ON(mem_map); /* should only be used early */ | 1543 | BUG_ON(mem_map); /* should only be used early */ |
@@ -1654,7 +1547,7 @@ static __init void xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn) | |||
1654 | } | 1547 | } |
1655 | 1548 | ||
1656 | /* Used for pmd and pud */ | 1549 | /* Used for pmd and pud */ |
1657 | static __init void xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn) | 1550 | static void __init xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn) |
1658 | { | 1551 | { |
1659 | #ifdef CONFIG_FLATMEM | 1552 | #ifdef CONFIG_FLATMEM |
1660 | BUG_ON(mem_map); /* should only be used early */ | 1553 | BUG_ON(mem_map); /* should only be used early */ |
@@ -1664,13 +1557,13 @@ static __init void xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn) | |||
1664 | 1557 | ||
1665 | /* Early release_pte assumes that all pts are pinned, since there's | 1558 | /* Early release_pte assumes that all pts are pinned, since there's |
1666 | only init_mm and anything attached to that is pinned. */ | 1559 | only init_mm and anything attached to that is pinned. */ |
1667 | static __init void xen_release_pte_init(unsigned long pfn) | 1560 | static void __init xen_release_pte_init(unsigned long pfn) |
1668 | { | 1561 | { |
1669 | pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn); | 1562 | pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn); |
1670 | make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); | 1563 | make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); |
1671 | } | 1564 | } |
1672 | 1565 | ||
1673 | static __init void xen_release_pmd_init(unsigned long pfn) | 1566 | static void __init xen_release_pmd_init(unsigned long pfn) |
1674 | { | 1567 | { |
1675 | make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); | 1568 | make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); |
1676 | } | 1569 | } |
@@ -1796,7 +1689,7 @@ static void set_page_prot(void *addr, pgprot_t prot) | |||
1796 | BUG(); | 1689 | BUG(); |
1797 | } | 1690 | } |
1798 | 1691 | ||
1799 | static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn) | 1692 | static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn) |
1800 | { | 1693 | { |
1801 | unsigned pmdidx, pteidx; | 1694 | unsigned pmdidx, pteidx; |
1802 | unsigned ident_pte; | 1695 | unsigned ident_pte; |
@@ -1879,7 +1772,7 @@ static void convert_pfn_mfn(void *v) | |||
1879 | * of the physical mapping once some sort of allocator has been set | 1772 | * of the physical mapping once some sort of allocator has been set |
1880 | * up. | 1773 | * up. |
1881 | */ | 1774 | */ |
1882 | __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, | 1775 | pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd, |
1883 | unsigned long max_pfn) | 1776 | unsigned long max_pfn) |
1884 | { | 1777 | { |
1885 | pud_t *l3; | 1778 | pud_t *l3; |
@@ -1950,7 +1843,7 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, | |||
1950 | static RESERVE_BRK_ARRAY(pmd_t, initial_kernel_pmd, PTRS_PER_PMD); | 1843 | static RESERVE_BRK_ARRAY(pmd_t, initial_kernel_pmd, PTRS_PER_PMD); |
1951 | static RESERVE_BRK_ARRAY(pmd_t, swapper_kernel_pmd, PTRS_PER_PMD); | 1844 | static RESERVE_BRK_ARRAY(pmd_t, swapper_kernel_pmd, PTRS_PER_PMD); |
1952 | 1845 | ||
1953 | static __init void xen_write_cr3_init(unsigned long cr3) | 1846 | static void __init xen_write_cr3_init(unsigned long cr3) |
1954 | { | 1847 | { |
1955 | unsigned long pfn = PFN_DOWN(__pa(swapper_pg_dir)); | 1848 | unsigned long pfn = PFN_DOWN(__pa(swapper_pg_dir)); |
1956 | 1849 | ||
@@ -1987,7 +1880,7 @@ static __init void xen_write_cr3_init(unsigned long cr3) | |||
1987 | pv_mmu_ops.write_cr3 = &xen_write_cr3; | 1880 | pv_mmu_ops.write_cr3 = &xen_write_cr3; |
1988 | } | 1881 | } |
1989 | 1882 | ||
1990 | __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, | 1883 | pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd, |
1991 | unsigned long max_pfn) | 1884 | unsigned long max_pfn) |
1992 | { | 1885 | { |
1993 | pmd_t *kernel_pmd; | 1886 | pmd_t *kernel_pmd; |
@@ -2093,7 +1986,7 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot) | |||
2093 | #endif | 1986 | #endif |
2094 | } | 1987 | } |
2095 | 1988 | ||
2096 | __init void xen_ident_map_ISA(void) | 1989 | void __init xen_ident_map_ISA(void) |
2097 | { | 1990 | { |
2098 | unsigned long pa; | 1991 | unsigned long pa; |
2099 | 1992 | ||
@@ -2116,10 +2009,8 @@ __init void xen_ident_map_ISA(void) | |||
2116 | xen_flush_tlb(); | 2009 | xen_flush_tlb(); |
2117 | } | 2010 | } |
2118 | 2011 | ||
2119 | static __init void xen_post_allocator_init(void) | 2012 | static void __init xen_post_allocator_init(void) |
2120 | { | 2013 | { |
2121 | mark_rw_past_pgt(); | ||
2122 | |||
2123 | #ifdef CONFIG_XEN_DEBUG | 2014 | #ifdef CONFIG_XEN_DEBUG |
2124 | pv_mmu_ops.make_pte = PV_CALLEE_SAVE(xen_make_pte_debug); | 2015 | pv_mmu_ops.make_pte = PV_CALLEE_SAVE(xen_make_pte_debug); |
2125 | #endif | 2016 | #endif |
@@ -2155,7 +2046,7 @@ static void xen_leave_lazy_mmu(void) | |||
2155 | preempt_enable(); | 2046 | preempt_enable(); |
2156 | } | 2047 | } |
2157 | 2048 | ||
2158 | static const struct pv_mmu_ops xen_mmu_ops __initdata = { | 2049 | static const struct pv_mmu_ops xen_mmu_ops __initconst = { |
2159 | .read_cr2 = xen_read_cr2, | 2050 | .read_cr2 = xen_read_cr2, |
2160 | .write_cr2 = xen_write_cr2, | 2051 | .write_cr2 = xen_write_cr2, |
2161 | 2052 | ||
@@ -2228,6 +2119,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = { | |||
2228 | 2119 | ||
2229 | void __init xen_init_mmu_ops(void) | 2120 | void __init xen_init_mmu_ops(void) |
2230 | { | 2121 | { |
2122 | x86_init.mapping.pagetable_reserve = xen_mapping_pagetable_reserve; | ||
2231 | x86_init.paging.pagetable_setup_start = xen_pagetable_setup_start; | 2123 | x86_init.paging.pagetable_setup_start = xen_pagetable_setup_start; |
2232 | x86_init.paging.pagetable_setup_done = xen_pagetable_setup_done; | 2124 | x86_init.paging.pagetable_setup_done = xen_pagetable_setup_done; |
2233 | pv_mmu_ops = xen_mmu_ops; | 2125 | pv_mmu_ops = xen_mmu_ops; |
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c index 141eb0de8b06..58efeb9d5440 100644 --- a/arch/x86/xen/p2m.c +++ b/arch/x86/xen/p2m.c | |||
@@ -522,11 +522,20 @@ static bool __init __early_alloc_p2m(unsigned long pfn) | |||
522 | /* Boundary cross-over for the edges: */ | 522 | /* Boundary cross-over for the edges: */ |
523 | if (idx) { | 523 | if (idx) { |
524 | unsigned long *p2m = extend_brk(PAGE_SIZE, PAGE_SIZE); | 524 | unsigned long *p2m = extend_brk(PAGE_SIZE, PAGE_SIZE); |
525 | unsigned long *mid_mfn_p; | ||
525 | 526 | ||
526 | p2m_init(p2m); | 527 | p2m_init(p2m); |
527 | 528 | ||
528 | p2m_top[topidx][mididx] = p2m; | 529 | p2m_top[topidx][mididx] = p2m; |
529 | 530 | ||
531 | /* For save/restore we need to MFN of the P2M saved */ | ||
532 | |||
533 | mid_mfn_p = p2m_top_mfn_p[topidx]; | ||
534 | WARN(mid_mfn_p[mididx] != virt_to_mfn(p2m_missing), | ||
535 | "P2M_TOP_P[%d][%d] != MFN of p2m_missing!\n", | ||
536 | topidx, mididx); | ||
537 | mid_mfn_p[mididx] = virt_to_mfn(p2m); | ||
538 | |||
530 | } | 539 | } |
531 | return idx != 0; | 540 | return idx != 0; |
532 | } | 541 | } |
@@ -549,12 +558,29 @@ unsigned long __init set_phys_range_identity(unsigned long pfn_s, | |||
549 | pfn += P2M_MID_PER_PAGE * P2M_PER_PAGE) | 558 | pfn += P2M_MID_PER_PAGE * P2M_PER_PAGE) |
550 | { | 559 | { |
551 | unsigned topidx = p2m_top_index(pfn); | 560 | unsigned topidx = p2m_top_index(pfn); |
552 | if (p2m_top[topidx] == p2m_mid_missing) { | 561 | unsigned long *mid_mfn_p; |
553 | unsigned long **mid = extend_brk(PAGE_SIZE, PAGE_SIZE); | 562 | unsigned long **mid; |
563 | |||
564 | mid = p2m_top[topidx]; | ||
565 | mid_mfn_p = p2m_top_mfn_p[topidx]; | ||
566 | if (mid == p2m_mid_missing) { | ||
567 | mid = extend_brk(PAGE_SIZE, PAGE_SIZE); | ||
554 | 568 | ||
555 | p2m_mid_init(mid); | 569 | p2m_mid_init(mid); |
556 | 570 | ||
557 | p2m_top[topidx] = mid; | 571 | p2m_top[topidx] = mid; |
572 | |||
573 | BUG_ON(mid_mfn_p != p2m_mid_missing_mfn); | ||
574 | } | ||
575 | /* And the save/restore P2M tables.. */ | ||
576 | if (mid_mfn_p == p2m_mid_missing_mfn) { | ||
577 | mid_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE); | ||
578 | p2m_mid_mfn_init(mid_mfn_p); | ||
579 | |||
580 | p2m_top_mfn_p[topidx] = mid_mfn_p; | ||
581 | p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p); | ||
582 | /* Note: we don't set mid_mfn_p[midix] here, | ||
583 | * look in __early_alloc_p2m */ | ||
558 | } | 584 | } |
559 | } | 585 | } |
560 | 586 | ||
@@ -650,7 +676,7 @@ static unsigned long mfn_hash(unsigned long mfn) | |||
650 | } | 676 | } |
651 | 677 | ||
652 | /* Add an MFN override for a particular page */ | 678 | /* Add an MFN override for a particular page */ |
653 | int m2p_add_override(unsigned long mfn, struct page *page) | 679 | int m2p_add_override(unsigned long mfn, struct page *page, bool clear_pte) |
654 | { | 680 | { |
655 | unsigned long flags; | 681 | unsigned long flags; |
656 | unsigned long pfn; | 682 | unsigned long pfn; |
@@ -662,7 +688,6 @@ int m2p_add_override(unsigned long mfn, struct page *page) | |||
662 | if (!PageHighMem(page)) { | 688 | if (!PageHighMem(page)) { |
663 | address = (unsigned long)__va(pfn << PAGE_SHIFT); | 689 | address = (unsigned long)__va(pfn << PAGE_SHIFT); |
664 | ptep = lookup_address(address, &level); | 690 | ptep = lookup_address(address, &level); |
665 | |||
666 | if (WARN(ptep == NULL || level != PG_LEVEL_4K, | 691 | if (WARN(ptep == NULL || level != PG_LEVEL_4K, |
667 | "m2p_add_override: pfn %lx not mapped", pfn)) | 692 | "m2p_add_override: pfn %lx not mapped", pfn)) |
668 | return -EINVAL; | 693 | return -EINVAL; |
@@ -674,18 +699,17 @@ int m2p_add_override(unsigned long mfn, struct page *page) | |||
674 | if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) | 699 | if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) |
675 | return -ENOMEM; | 700 | return -ENOMEM; |
676 | 701 | ||
677 | if (!PageHighMem(page)) | 702 | if (clear_pte && !PageHighMem(page)) |
678 | /* Just zap old mapping for now */ | 703 | /* Just zap old mapping for now */ |
679 | pte_clear(&init_mm, address, ptep); | 704 | pte_clear(&init_mm, address, ptep); |
680 | |||
681 | spin_lock_irqsave(&m2p_override_lock, flags); | 705 | spin_lock_irqsave(&m2p_override_lock, flags); |
682 | list_add(&page->lru, &m2p_overrides[mfn_hash(mfn)]); | 706 | list_add(&page->lru, &m2p_overrides[mfn_hash(mfn)]); |
683 | spin_unlock_irqrestore(&m2p_override_lock, flags); | 707 | spin_unlock_irqrestore(&m2p_override_lock, flags); |
684 | 708 | ||
685 | return 0; | 709 | return 0; |
686 | } | 710 | } |
687 | 711 | EXPORT_SYMBOL_GPL(m2p_add_override); | |
688 | int m2p_remove_override(struct page *page) | 712 | int m2p_remove_override(struct page *page, bool clear_pte) |
689 | { | 713 | { |
690 | unsigned long flags; | 714 | unsigned long flags; |
691 | unsigned long mfn; | 715 | unsigned long mfn; |
@@ -713,7 +737,7 @@ int m2p_remove_override(struct page *page) | |||
713 | spin_unlock_irqrestore(&m2p_override_lock, flags); | 737 | spin_unlock_irqrestore(&m2p_override_lock, flags); |
714 | set_phys_to_machine(pfn, page->index); | 738 | set_phys_to_machine(pfn, page->index); |
715 | 739 | ||
716 | if (!PageHighMem(page)) | 740 | if (clear_pte && !PageHighMem(page)) |
717 | set_pte_at(&init_mm, address, ptep, | 741 | set_pte_at(&init_mm, address, ptep, |
718 | pfn_pte(pfn, PAGE_KERNEL)); | 742 | pfn_pte(pfn, PAGE_KERNEL)); |
719 | /* No tlb flush necessary because the caller already | 743 | /* No tlb flush necessary because the caller already |
@@ -721,6 +745,7 @@ int m2p_remove_override(struct page *page) | |||
721 | 745 | ||
722 | return 0; | 746 | return 0; |
723 | } | 747 | } |
748 | EXPORT_SYMBOL_GPL(m2p_remove_override); | ||
724 | 749 | ||
725 | struct page *m2p_find_override(unsigned long mfn) | 750 | struct page *m2p_find_override(unsigned long mfn) |
726 | { | 751 | { |
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index 90bac0aac3a5..be1a464f6d66 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c | |||
@@ -50,7 +50,7 @@ phys_addr_t xen_extra_mem_start, xen_extra_mem_size; | |||
50 | */ | 50 | */ |
51 | #define EXTRA_MEM_RATIO (10) | 51 | #define EXTRA_MEM_RATIO (10) |
52 | 52 | ||
53 | static __init void xen_add_extra_mem(unsigned long pages) | 53 | static void __init xen_add_extra_mem(unsigned long pages) |
54 | { | 54 | { |
55 | unsigned long pfn; | 55 | unsigned long pfn; |
56 | 56 | ||
@@ -166,7 +166,7 @@ static unsigned long __init xen_set_identity(const struct e820entry *list, | |||
166 | if (last > end) | 166 | if (last > end) |
167 | continue; | 167 | continue; |
168 | 168 | ||
169 | if (entry->type == E820_RAM) { | 169 | if ((entry->type == E820_RAM) || (entry->type == E820_UNUSABLE)) { |
170 | if (start > start_pci) | 170 | if (start > start_pci) |
171 | identity += set_phys_range_identity( | 171 | identity += set_phys_range_identity( |
172 | PFN_UP(start_pci), PFN_DOWN(start)); | 172 | PFN_UP(start_pci), PFN_DOWN(start)); |
@@ -227,7 +227,11 @@ char * __init xen_memory_setup(void) | |||
227 | 227 | ||
228 | memcpy(map_raw, map, sizeof(map)); | 228 | memcpy(map_raw, map, sizeof(map)); |
229 | e820.nr_map = 0; | 229 | e820.nr_map = 0; |
230 | #ifdef CONFIG_X86_32 | ||
231 | xen_extra_mem_start = mem_end; | ||
232 | #else | ||
230 | xen_extra_mem_start = max((1ULL << 32), mem_end); | 233 | xen_extra_mem_start = max((1ULL << 32), mem_end); |
234 | #endif | ||
231 | for (i = 0; i < memmap.nr_entries; i++) { | 235 | for (i = 0; i < memmap.nr_entries; i++) { |
232 | unsigned long long end; | 236 | unsigned long long end; |
233 | 237 | ||
@@ -336,7 +340,7 @@ static void __init fiddle_vdso(void) | |||
336 | #endif | 340 | #endif |
337 | } | 341 | } |
338 | 342 | ||
339 | static __cpuinit int register_callback(unsigned type, const void *func) | 343 | static int __cpuinit register_callback(unsigned type, const void *func) |
340 | { | 344 | { |
341 | struct callback_register callback = { | 345 | struct callback_register callback = { |
342 | .type = type, | 346 | .type = type, |
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index 762b46ab14d5..41038c01de40 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c | |||
@@ -56,7 +56,7 @@ static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id) | |||
56 | return IRQ_HANDLED; | 56 | return IRQ_HANDLED; |
57 | } | 57 | } |
58 | 58 | ||
59 | static __cpuinit void cpu_bringup(void) | 59 | static void __cpuinit cpu_bringup(void) |
60 | { | 60 | { |
61 | int cpu = smp_processor_id(); | 61 | int cpu = smp_processor_id(); |
62 | 62 | ||
@@ -84,7 +84,7 @@ static __cpuinit void cpu_bringup(void) | |||
84 | wmb(); /* make sure everything is out */ | 84 | wmb(); /* make sure everything is out */ |
85 | } | 85 | } |
86 | 86 | ||
87 | static __cpuinit void cpu_bringup_and_idle(void) | 87 | static void __cpuinit cpu_bringup_and_idle(void) |
88 | { | 88 | { |
89 | cpu_bringup(); | 89 | cpu_bringup(); |
90 | cpu_idle(); | 90 | cpu_idle(); |
@@ -241,7 +241,7 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus) | |||
241 | } | 241 | } |
242 | } | 242 | } |
243 | 243 | ||
244 | static __cpuinit int | 244 | static int __cpuinit |
245 | cpu_initialize_context(unsigned int cpu, struct task_struct *idle) | 245 | cpu_initialize_context(unsigned int cpu, struct task_struct *idle) |
246 | { | 246 | { |
247 | struct vcpu_guest_context *ctxt; | 247 | struct vcpu_guest_context *ctxt; |
@@ -485,7 +485,7 @@ static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id) | |||
485 | return IRQ_HANDLED; | 485 | return IRQ_HANDLED; |
486 | } | 486 | } |
487 | 487 | ||
488 | static const struct smp_ops xen_smp_ops __initdata = { | 488 | static const struct smp_ops xen_smp_ops __initconst = { |
489 | .smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu, | 489 | .smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu, |
490 | .smp_prepare_cpus = xen_smp_prepare_cpus, | 490 | .smp_prepare_cpus = xen_smp_prepare_cpus, |
491 | .smp_cpus_done = xen_smp_cpus_done, | 491 | .smp_cpus_done = xen_smp_cpus_done, |
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c index 2e2d370a47b1..bd4ffd7d9589 100644 --- a/arch/x86/xen/time.c +++ b/arch/x86/xen/time.c | |||
@@ -439,11 +439,11 @@ void xen_timer_resume(void) | |||
439 | } | 439 | } |
440 | } | 440 | } |
441 | 441 | ||
442 | static const struct pv_time_ops xen_time_ops __initdata = { | 442 | static const struct pv_time_ops xen_time_ops __initconst = { |
443 | .sched_clock = xen_clocksource_read, | 443 | .sched_clock = xen_clocksource_read, |
444 | }; | 444 | }; |
445 | 445 | ||
446 | static __init void xen_time_init(void) | 446 | static void __init xen_time_init(void) |
447 | { | 447 | { |
448 | int cpu = smp_processor_id(); | 448 | int cpu = smp_processor_id(); |
449 | struct timespec tp; | 449 | struct timespec tp; |
@@ -468,7 +468,7 @@ static __init void xen_time_init(void) | |||
468 | xen_setup_cpu_clockevents(); | 468 | xen_setup_cpu_clockevents(); |
469 | } | 469 | } |
470 | 470 | ||
471 | __init void xen_init_time_ops(void) | 471 | void __init xen_init_time_ops(void) |
472 | { | 472 | { |
473 | pv_time_ops = xen_time_ops; | 473 | pv_time_ops = xen_time_ops; |
474 | 474 | ||
@@ -490,7 +490,7 @@ static void xen_hvm_setup_cpu_clockevents(void) | |||
490 | xen_setup_cpu_clockevents(); | 490 | xen_setup_cpu_clockevents(); |
491 | } | 491 | } |
492 | 492 | ||
493 | __init void xen_hvm_init_time_ops(void) | 493 | void __init xen_hvm_init_time_ops(void) |
494 | { | 494 | { |
495 | /* vector callback is needed otherwise we cannot receive interrupts | 495 | /* vector callback is needed otherwise we cannot receive interrupts |
496 | * on cpu > 0 and at this point we don't know how many cpus are | 496 | * on cpu > 0 and at this point we don't know how many cpus are |
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h index 3112f55638c4..97dfdc8757b3 100644 --- a/arch/x86/xen/xen-ops.h +++ b/arch/x86/xen/xen-ops.h | |||
@@ -74,7 +74,7 @@ static inline void xen_hvm_smp_init(void) {} | |||
74 | 74 | ||
75 | #ifdef CONFIG_PARAVIRT_SPINLOCKS | 75 | #ifdef CONFIG_PARAVIRT_SPINLOCKS |
76 | void __init xen_init_spinlocks(void); | 76 | void __init xen_init_spinlocks(void); |
77 | __cpuinit void xen_init_lock_cpu(int cpu); | 77 | void __cpuinit xen_init_lock_cpu(int cpu); |
78 | void xen_uninit_lock_cpu(int cpu); | 78 | void xen_uninit_lock_cpu(int cpu); |
79 | #else | 79 | #else |
80 | static inline void xen_init_spinlocks(void) | 80 | static inline void xen_init_spinlocks(void) |
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index f0605ab2a761..471fdcc5df85 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c | |||
@@ -114,6 +114,13 @@ struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup) | |||
114 | } | 114 | } |
115 | EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup); | 115 | EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup); |
116 | 116 | ||
117 | struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk) | ||
118 | { | ||
119 | return container_of(task_subsys_state(tsk, blkio_subsys_id), | ||
120 | struct blkio_cgroup, css); | ||
121 | } | ||
122 | EXPORT_SYMBOL_GPL(task_blkio_cgroup); | ||
123 | |||
117 | static inline void | 124 | static inline void |
118 | blkio_update_group_weight(struct blkio_group *blkg, unsigned int weight) | 125 | blkio_update_group_weight(struct blkio_group *blkg, unsigned int weight) |
119 | { | 126 | { |
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h index 10919fae2d3a..c774930cc206 100644 --- a/block/blk-cgroup.h +++ b/block/blk-cgroup.h | |||
@@ -291,6 +291,7 @@ static inline void blkiocg_set_start_empty_time(struct blkio_group *blkg) {} | |||
291 | #if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE) | 291 | #if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE) |
292 | extern struct blkio_cgroup blkio_root_cgroup; | 292 | extern struct blkio_cgroup blkio_root_cgroup; |
293 | extern struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup); | 293 | extern struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup); |
294 | extern struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk); | ||
294 | extern void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg, | 295 | extern void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg, |
295 | struct blkio_group *blkg, void *key, dev_t dev, | 296 | struct blkio_group *blkg, void *key, dev_t dev, |
296 | enum blkio_policy_id plid); | 297 | enum blkio_policy_id plid); |
@@ -314,6 +315,8 @@ void blkiocg_update_io_remove_stats(struct blkio_group *blkg, | |||
314 | struct cgroup; | 315 | struct cgroup; |
315 | static inline struct blkio_cgroup * | 316 | static inline struct blkio_cgroup * |
316 | cgroup_to_blkio_cgroup(struct cgroup *cgroup) { return NULL; } | 317 | cgroup_to_blkio_cgroup(struct cgroup *cgroup) { return NULL; } |
318 | static inline struct blkio_cgroup * | ||
319 | task_blkio_cgroup(struct task_struct *tsk) { return NULL; } | ||
317 | 320 | ||
318 | static inline void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg, | 321 | static inline void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg, |
319 | struct blkio_group *blkg, void *key, dev_t dev, | 322 | struct blkio_group *blkg, void *key, dev_t dev, |
diff --git a/block/blk-core.c b/block/blk-core.c index a2e58eeb3549..3fe00a14822a 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -316,8 +316,10 @@ EXPORT_SYMBOL(__blk_run_queue); | |||
316 | */ | 316 | */ |
317 | void blk_run_queue_async(struct request_queue *q) | 317 | void blk_run_queue_async(struct request_queue *q) |
318 | { | 318 | { |
319 | if (likely(!blk_queue_stopped(q))) | 319 | if (likely(!blk_queue_stopped(q))) { |
320 | __cancel_delayed_work(&q->delay_work); | ||
320 | queue_delayed_work(kblockd_workqueue, &q->delay_work, 0); | 321 | queue_delayed_work(kblockd_workqueue, &q->delay_work, 0); |
322 | } | ||
321 | } | 323 | } |
322 | EXPORT_SYMBOL(blk_run_queue_async); | 324 | EXPORT_SYMBOL(blk_run_queue_async); |
323 | 325 | ||
diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 0475a22a420d..252a81a306f7 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c | |||
@@ -160,9 +160,8 @@ static void throtl_put_tg(struct throtl_grp *tg) | |||
160 | } | 160 | } |
161 | 161 | ||
162 | static struct throtl_grp * throtl_find_alloc_tg(struct throtl_data *td, | 162 | static struct throtl_grp * throtl_find_alloc_tg(struct throtl_data *td, |
163 | struct cgroup *cgroup) | 163 | struct blkio_cgroup *blkcg) |
164 | { | 164 | { |
165 | struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup); | ||
166 | struct throtl_grp *tg = NULL; | 165 | struct throtl_grp *tg = NULL; |
167 | void *key = td; | 166 | void *key = td; |
168 | struct backing_dev_info *bdi = &td->queue->backing_dev_info; | 167 | struct backing_dev_info *bdi = &td->queue->backing_dev_info; |
@@ -229,12 +228,12 @@ done: | |||
229 | 228 | ||
230 | static struct throtl_grp * throtl_get_tg(struct throtl_data *td) | 229 | static struct throtl_grp * throtl_get_tg(struct throtl_data *td) |
231 | { | 230 | { |
232 | struct cgroup *cgroup; | ||
233 | struct throtl_grp *tg = NULL; | 231 | struct throtl_grp *tg = NULL; |
232 | struct blkio_cgroup *blkcg; | ||
234 | 233 | ||
235 | rcu_read_lock(); | 234 | rcu_read_lock(); |
236 | cgroup = task_cgroup(current, blkio_subsys_id); | 235 | blkcg = task_blkio_cgroup(current); |
237 | tg = throtl_find_alloc_tg(td, cgroup); | 236 | tg = throtl_find_alloc_tg(td, blkcg); |
238 | if (!tg) | 237 | if (!tg) |
239 | tg = &td->root_tg; | 238 | tg = &td->root_tg; |
240 | rcu_read_unlock(); | 239 | rcu_read_unlock(); |
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 5b52011e3a40..ab7a9e6a9b1c 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -1014,10 +1014,9 @@ void cfq_update_blkio_group_weight(void *key, struct blkio_group *blkg, | |||
1014 | cfqg->needs_update = true; | 1014 | cfqg->needs_update = true; |
1015 | } | 1015 | } |
1016 | 1016 | ||
1017 | static struct cfq_group * | 1017 | static struct cfq_group * cfq_find_alloc_cfqg(struct cfq_data *cfqd, |
1018 | cfq_find_alloc_cfqg(struct cfq_data *cfqd, struct cgroup *cgroup, int create) | 1018 | struct blkio_cgroup *blkcg, int create) |
1019 | { | 1019 | { |
1020 | struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup); | ||
1021 | struct cfq_group *cfqg = NULL; | 1020 | struct cfq_group *cfqg = NULL; |
1022 | void *key = cfqd; | 1021 | void *key = cfqd; |
1023 | int i, j; | 1022 | int i, j; |
@@ -1079,12 +1078,12 @@ done: | |||
1079 | */ | 1078 | */ |
1080 | static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd, int create) | 1079 | static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd, int create) |
1081 | { | 1080 | { |
1082 | struct cgroup *cgroup; | 1081 | struct blkio_cgroup *blkcg; |
1083 | struct cfq_group *cfqg = NULL; | 1082 | struct cfq_group *cfqg = NULL; |
1084 | 1083 | ||
1085 | rcu_read_lock(); | 1084 | rcu_read_lock(); |
1086 | cgroup = task_cgroup(current, blkio_subsys_id); | 1085 | blkcg = task_blkio_cgroup(current); |
1087 | cfqg = cfq_find_alloc_cfqg(cfqd, cgroup, create); | 1086 | cfqg = cfq_find_alloc_cfqg(cfqd, blkcg, create); |
1088 | if (!cfqg && create) | 1087 | if (!cfqg && create) |
1089 | cfqg = &cfqd->root_group; | 1088 | cfqg = &cfqd->root_group; |
1090 | rcu_read_unlock(); | 1089 | rcu_read_unlock(); |
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c index 3a73a93596e8..85b32376dad7 100644 --- a/drivers/acpi/processor_perflib.c +++ b/drivers/acpi/processor_perflib.c | |||
@@ -49,10 +49,6 @@ ACPI_MODULE_NAME("processor_perflib"); | |||
49 | 49 | ||
50 | static DEFINE_MUTEX(performance_mutex); | 50 | static DEFINE_MUTEX(performance_mutex); |
51 | 51 | ||
52 | /* Use cpufreq debug layer for _PPC changes. */ | ||
53 | #define cpufreq_printk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, \ | ||
54 | "cpufreq-core", msg) | ||
55 | |||
56 | /* | 52 | /* |
57 | * _PPC support is implemented as a CPUfreq policy notifier: | 53 | * _PPC support is implemented as a CPUfreq policy notifier: |
58 | * This means each time a CPUfreq driver registered also with | 54 | * This means each time a CPUfreq driver registered also with |
@@ -145,7 +141,7 @@ static int acpi_processor_get_platform_limit(struct acpi_processor *pr) | |||
145 | return -ENODEV; | 141 | return -ENODEV; |
146 | } | 142 | } |
147 | 143 | ||
148 | cpufreq_printk("CPU %d: _PPC is %d - frequency %s limited\n", pr->id, | 144 | pr_debug("CPU %d: _PPC is %d - frequency %s limited\n", pr->id, |
149 | (int)ppc, ppc ? "" : "not"); | 145 | (int)ppc, ppc ? "" : "not"); |
150 | 146 | ||
151 | pr->performance_platform_limit = (int)ppc; | 147 | pr->performance_platform_limit = (int)ppc; |
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c index ff9d832a163d..d38c40fe4ddb 100644 --- a/drivers/ata/libahci.c +++ b/drivers/ata/libahci.c | |||
@@ -561,27 +561,6 @@ void ahci_start_engine(struct ata_port *ap) | |||
561 | { | 561 | { |
562 | void __iomem *port_mmio = ahci_port_base(ap); | 562 | void __iomem *port_mmio = ahci_port_base(ap); |
563 | u32 tmp; | 563 | u32 tmp; |
564 | u8 status; | ||
565 | |||
566 | status = readl(port_mmio + PORT_TFDATA) & 0xFF; | ||
567 | |||
568 | /* | ||
569 | * At end of section 10.1 of AHCI spec (rev 1.3), it states | ||
570 | * Software shall not set PxCMD.ST to 1 until it is determined | ||
571 | * that a functoinal device is present on the port as determined by | ||
572 | * PxTFD.STS.BSY=0, PxTFD.STS.DRQ=0 and PxSSTS.DET=3h | ||
573 | * | ||
574 | * Even though most AHCI host controllers work without this check, | ||
575 | * specific controller will fail under this condition | ||
576 | */ | ||
577 | if (status & (ATA_BUSY | ATA_DRQ)) | ||
578 | return; | ||
579 | else { | ||
580 | ahci_scr_read(&ap->link, SCR_STATUS, &tmp); | ||
581 | |||
582 | if ((tmp & 0xf) != 0x3) | ||
583 | return; | ||
584 | } | ||
585 | 564 | ||
586 | /* start DMA */ | 565 | /* start DMA */ |
587 | tmp = readl(port_mmio + PORT_CMD); | 566 | tmp = readl(port_mmio + PORT_CMD); |
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index f26f2fe3480a..dad9fd660f37 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c | |||
@@ -3316,7 +3316,7 @@ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy, | |||
3316 | struct ata_eh_context *ehc = &link->eh_context; | 3316 | struct ata_eh_context *ehc = &link->eh_context; |
3317 | struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL; | 3317 | struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL; |
3318 | enum ata_lpm_policy old_policy = link->lpm_policy; | 3318 | enum ata_lpm_policy old_policy = link->lpm_policy; |
3319 | bool no_dipm = ap->flags & ATA_FLAG_NO_DIPM; | 3319 | bool no_dipm = link->ap->flags & ATA_FLAG_NO_DIPM; |
3320 | unsigned int hints = ATA_LPM_EMPTY | ATA_LPM_HIPM; | 3320 | unsigned int hints = ATA_LPM_EMPTY | ATA_LPM_HIPM; |
3321 | unsigned int err_mask; | 3321 | unsigned int err_mask; |
3322 | int rc; | 3322 | int rc; |
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c index bdd2719f3f68..bc9e702186dd 100644 --- a/drivers/atm/fore200e.c +++ b/drivers/atm/fore200e.c | |||
@@ -2643,16 +2643,19 @@ fore200e_init(struct fore200e* fore200e, struct device *parent) | |||
2643 | } | 2643 | } |
2644 | 2644 | ||
2645 | #ifdef CONFIG_SBUS | 2645 | #ifdef CONFIG_SBUS |
2646 | static const struct of_device_id fore200e_sba_match[]; | ||
2646 | static int __devinit fore200e_sba_probe(struct platform_device *op) | 2647 | static int __devinit fore200e_sba_probe(struct platform_device *op) |
2647 | { | 2648 | { |
2649 | const struct of_device_id *match; | ||
2648 | const struct fore200e_bus *bus; | 2650 | const struct fore200e_bus *bus; |
2649 | struct fore200e *fore200e; | 2651 | struct fore200e *fore200e; |
2650 | static int index = 0; | 2652 | static int index = 0; |
2651 | int err; | 2653 | int err; |
2652 | 2654 | ||
2653 | if (!op->dev.of_match) | 2655 | match = of_match_device(fore200e_sba_match, &op->dev); |
2656 | if (!match) | ||
2654 | return -EINVAL; | 2657 | return -EINVAL; |
2655 | bus = op->dev.of_match->data; | 2658 | bus = match->data; |
2656 | 2659 | ||
2657 | fore200e = kzalloc(sizeof(struct fore200e), GFP_KERNEL); | 2660 | fore200e = kzalloc(sizeof(struct fore200e), GFP_KERNEL); |
2658 | if (!fore200e) | 2661 | if (!fore200e) |
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig index e9e5238f3106..d57e8d0fb823 100644 --- a/drivers/base/Kconfig +++ b/drivers/base/Kconfig | |||
@@ -168,11 +168,4 @@ config SYS_HYPERVISOR | |||
168 | bool | 168 | bool |
169 | default n | 169 | default n |
170 | 170 | ||
171 | config ARCH_NO_SYSDEV_OPS | ||
172 | bool | ||
173 | ---help--- | ||
174 | To be selected by architectures that don't use sysdev class or | ||
175 | sysdev driver power management (suspend/resume) and shutdown | ||
176 | operations. | ||
177 | |||
178 | endmenu | 171 | endmenu |
diff --git a/drivers/base/base.h b/drivers/base/base.h index 19f49e41ce5d..a34dca0ad041 100644 --- a/drivers/base/base.h +++ b/drivers/base/base.h | |||
@@ -111,8 +111,6 @@ static inline int driver_match_device(struct device_driver *drv, | |||
111 | return drv->bus->match ? drv->bus->match(dev, drv) : 1; | 111 | return drv->bus->match ? drv->bus->match(dev, drv) : 1; |
112 | } | 112 | } |
113 | 113 | ||
114 | extern void sysdev_shutdown(void); | ||
115 | |||
116 | extern char *make_class_name(const char *name, struct kobject *kobj); | 114 | extern char *make_class_name(const char *name, struct kobject *kobj); |
117 | 115 | ||
118 | extern int devres_release_all(struct device *dev); | 116 | extern int devres_release_all(struct device *dev); |
diff --git a/drivers/base/dd.c b/drivers/base/dd.c index da57ee9d63fe..29917c7506cb 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c | |||
@@ -316,8 +316,7 @@ static void __device_release_driver(struct device *dev) | |||
316 | 316 | ||
317 | drv = dev->driver; | 317 | drv = dev->driver; |
318 | if (drv) { | 318 | if (drv) { |
319 | pm_runtime_get_noresume(dev); | 319 | pm_runtime_get_sync(dev); |
320 | pm_runtime_barrier(dev); | ||
321 | 320 | ||
322 | driver_sysfs_remove(dev); | 321 | driver_sysfs_remove(dev); |
323 | 322 | ||
@@ -326,6 +325,8 @@ static void __device_release_driver(struct device *dev) | |||
326 | BUS_NOTIFY_UNBIND_DRIVER, | 325 | BUS_NOTIFY_UNBIND_DRIVER, |
327 | dev); | 326 | dev); |
328 | 327 | ||
328 | pm_runtime_put_sync(dev); | ||
329 | |||
329 | if (dev->bus && dev->bus->remove) | 330 | if (dev->bus && dev->bus->remove) |
330 | dev->bus->remove(dev); | 331 | dev->bus->remove(dev); |
331 | else if (drv->remove) | 332 | else if (drv->remove) |
@@ -338,7 +339,6 @@ static void __device_release_driver(struct device *dev) | |||
338 | BUS_NOTIFY_UNBOUND_DRIVER, | 339 | BUS_NOTIFY_UNBOUND_DRIVER, |
339 | dev); | 340 | dev); |
340 | 341 | ||
341 | pm_runtime_put_sync(dev); | ||
342 | } | 342 | } |
343 | } | 343 | } |
344 | 344 | ||
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c index 8c798ef7f13f..bbb03e6f7255 100644 --- a/drivers/base/firmware_class.c +++ b/drivers/base/firmware_class.c | |||
@@ -521,6 +521,11 @@ static int _request_firmware(const struct firmware **firmware_p, | |||
521 | if (!firmware_p) | 521 | if (!firmware_p) |
522 | return -EINVAL; | 522 | return -EINVAL; |
523 | 523 | ||
524 | if (WARN_ON(usermodehelper_is_disabled())) { | ||
525 | dev_err(device, "firmware: %s will not be loaded\n", name); | ||
526 | return -EBUSY; | ||
527 | } | ||
528 | |||
524 | *firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL); | 529 | *firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL); |
525 | if (!firmware) { | 530 | if (!firmware) { |
526 | dev_err(device, "%s: kmalloc(struct firmware) failed\n", | 531 | dev_err(device, "%s: kmalloc(struct firmware) failed\n", |
diff --git a/drivers/base/platform.c b/drivers/base/platform.c index 9e0e4fc24c46..48425f183029 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c | |||
@@ -667,7 +667,7 @@ static int platform_legacy_resume(struct device *dev) | |||
667 | return ret; | 667 | return ret; |
668 | } | 668 | } |
669 | 669 | ||
670 | static int platform_pm_prepare(struct device *dev) | 670 | int platform_pm_prepare(struct device *dev) |
671 | { | 671 | { |
672 | struct device_driver *drv = dev->driver; | 672 | struct device_driver *drv = dev->driver; |
673 | int ret = 0; | 673 | int ret = 0; |
@@ -678,7 +678,7 @@ static int platform_pm_prepare(struct device *dev) | |||
678 | return ret; | 678 | return ret; |
679 | } | 679 | } |
680 | 680 | ||
681 | static void platform_pm_complete(struct device *dev) | 681 | void platform_pm_complete(struct device *dev) |
682 | { | 682 | { |
683 | struct device_driver *drv = dev->driver; | 683 | struct device_driver *drv = dev->driver; |
684 | 684 | ||
@@ -686,16 +686,11 @@ static void platform_pm_complete(struct device *dev) | |||
686 | drv->pm->complete(dev); | 686 | drv->pm->complete(dev); |
687 | } | 687 | } |
688 | 688 | ||
689 | #else /* !CONFIG_PM_SLEEP */ | 689 | #endif /* CONFIG_PM_SLEEP */ |
690 | |||
691 | #define platform_pm_prepare NULL | ||
692 | #define platform_pm_complete NULL | ||
693 | |||
694 | #endif /* !CONFIG_PM_SLEEP */ | ||
695 | 690 | ||
696 | #ifdef CONFIG_SUSPEND | 691 | #ifdef CONFIG_SUSPEND |
697 | 692 | ||
698 | int __weak platform_pm_suspend(struct device *dev) | 693 | int platform_pm_suspend(struct device *dev) |
699 | { | 694 | { |
700 | struct device_driver *drv = dev->driver; | 695 | struct device_driver *drv = dev->driver; |
701 | int ret = 0; | 696 | int ret = 0; |
@@ -713,7 +708,7 @@ int __weak platform_pm_suspend(struct device *dev) | |||
713 | return ret; | 708 | return ret; |
714 | } | 709 | } |
715 | 710 | ||
716 | int __weak platform_pm_suspend_noirq(struct device *dev) | 711 | int platform_pm_suspend_noirq(struct device *dev) |
717 | { | 712 | { |
718 | struct device_driver *drv = dev->driver; | 713 | struct device_driver *drv = dev->driver; |
719 | int ret = 0; | 714 | int ret = 0; |
@@ -729,7 +724,7 @@ int __weak platform_pm_suspend_noirq(struct device *dev) | |||
729 | return ret; | 724 | return ret; |
730 | } | 725 | } |
731 | 726 | ||
732 | int __weak platform_pm_resume(struct device *dev) | 727 | int platform_pm_resume(struct device *dev) |
733 | { | 728 | { |
734 | struct device_driver *drv = dev->driver; | 729 | struct device_driver *drv = dev->driver; |
735 | int ret = 0; | 730 | int ret = 0; |
@@ -747,7 +742,7 @@ int __weak platform_pm_resume(struct device *dev) | |||
747 | return ret; | 742 | return ret; |
748 | } | 743 | } |
749 | 744 | ||
750 | int __weak platform_pm_resume_noirq(struct device *dev) | 745 | int platform_pm_resume_noirq(struct device *dev) |
751 | { | 746 | { |
752 | struct device_driver *drv = dev->driver; | 747 | struct device_driver *drv = dev->driver; |
753 | int ret = 0; | 748 | int ret = 0; |
@@ -763,18 +758,11 @@ int __weak platform_pm_resume_noirq(struct device *dev) | |||
763 | return ret; | 758 | return ret; |
764 | } | 759 | } |
765 | 760 | ||
766 | #else /* !CONFIG_SUSPEND */ | 761 | #endif /* CONFIG_SUSPEND */ |
767 | |||
768 | #define platform_pm_suspend NULL | ||
769 | #define platform_pm_resume NULL | ||
770 | #define platform_pm_suspend_noirq NULL | ||
771 | #define platform_pm_resume_noirq NULL | ||
772 | |||
773 | #endif /* !CONFIG_SUSPEND */ | ||
774 | 762 | ||
775 | #ifdef CONFIG_HIBERNATE_CALLBACKS | 763 | #ifdef CONFIG_HIBERNATE_CALLBACKS |
776 | 764 | ||
777 | static int platform_pm_freeze(struct device *dev) | 765 | int platform_pm_freeze(struct device *dev) |
778 | { | 766 | { |
779 | struct device_driver *drv = dev->driver; | 767 | struct device_driver *drv = dev->driver; |
780 | int ret = 0; | 768 | int ret = 0; |
@@ -792,7 +780,7 @@ static int platform_pm_freeze(struct device *dev) | |||
792 | return ret; | 780 | return ret; |
793 | } | 781 | } |
794 | 782 | ||
795 | static int platform_pm_freeze_noirq(struct device *dev) | 783 | int platform_pm_freeze_noirq(struct device *dev) |
796 | { | 784 | { |
797 | struct device_driver *drv = dev->driver; | 785 | struct device_driver *drv = dev->driver; |
798 | int ret = 0; | 786 | int ret = 0; |
@@ -808,7 +796,7 @@ static int platform_pm_freeze_noirq(struct device *dev) | |||
808 | return ret; | 796 | return ret; |
809 | } | 797 | } |
810 | 798 | ||
811 | static int platform_pm_thaw(struct device *dev) | 799 | int platform_pm_thaw(struct device *dev) |
812 | { | 800 | { |
813 | struct device_driver *drv = dev->driver; | 801 | struct device_driver *drv = dev->driver; |
814 | int ret = 0; | 802 | int ret = 0; |
@@ -826,7 +814,7 @@ static int platform_pm_thaw(struct device *dev) | |||
826 | return ret; | 814 | return ret; |
827 | } | 815 | } |
828 | 816 | ||
829 | static int platform_pm_thaw_noirq(struct device *dev) | 817 | int platform_pm_thaw_noirq(struct device *dev) |
830 | { | 818 | { |
831 | struct device_driver *drv = dev->driver; | 819 | struct device_driver *drv = dev->driver; |
832 | int ret = 0; | 820 | int ret = 0; |
@@ -842,7 +830,7 @@ static int platform_pm_thaw_noirq(struct device *dev) | |||
842 | return ret; | 830 | return ret; |
843 | } | 831 | } |
844 | 832 | ||
845 | static int platform_pm_poweroff(struct device *dev) | 833 | int platform_pm_poweroff(struct device *dev) |
846 | { | 834 | { |
847 | struct device_driver *drv = dev->driver; | 835 | struct device_driver *drv = dev->driver; |
848 | int ret = 0; | 836 | int ret = 0; |
@@ -860,7 +848,7 @@ static int platform_pm_poweroff(struct device *dev) | |||
860 | return ret; | 848 | return ret; |
861 | } | 849 | } |
862 | 850 | ||
863 | static int platform_pm_poweroff_noirq(struct device *dev) | 851 | int platform_pm_poweroff_noirq(struct device *dev) |
864 | { | 852 | { |
865 | struct device_driver *drv = dev->driver; | 853 | struct device_driver *drv = dev->driver; |
866 | int ret = 0; | 854 | int ret = 0; |
@@ -876,7 +864,7 @@ static int platform_pm_poweroff_noirq(struct device *dev) | |||
876 | return ret; | 864 | return ret; |
877 | } | 865 | } |
878 | 866 | ||
879 | static int platform_pm_restore(struct device *dev) | 867 | int platform_pm_restore(struct device *dev) |
880 | { | 868 | { |
881 | struct device_driver *drv = dev->driver; | 869 | struct device_driver *drv = dev->driver; |
882 | int ret = 0; | 870 | int ret = 0; |
@@ -894,7 +882,7 @@ static int platform_pm_restore(struct device *dev) | |||
894 | return ret; | 882 | return ret; |
895 | } | 883 | } |
896 | 884 | ||
897 | static int platform_pm_restore_noirq(struct device *dev) | 885 | int platform_pm_restore_noirq(struct device *dev) |
898 | { | 886 | { |
899 | struct device_driver *drv = dev->driver; | 887 | struct device_driver *drv = dev->driver; |
900 | int ret = 0; | 888 | int ret = 0; |
@@ -910,62 +898,13 @@ static int platform_pm_restore_noirq(struct device *dev) | |||
910 | return ret; | 898 | return ret; |
911 | } | 899 | } |
912 | 900 | ||
913 | #else /* !CONFIG_HIBERNATE_CALLBACKS */ | 901 | #endif /* CONFIG_HIBERNATE_CALLBACKS */ |
914 | |||
915 | #define platform_pm_freeze NULL | ||
916 | #define platform_pm_thaw NULL | ||
917 | #define platform_pm_poweroff NULL | ||
918 | #define platform_pm_restore NULL | ||
919 | #define platform_pm_freeze_noirq NULL | ||
920 | #define platform_pm_thaw_noirq NULL | ||
921 | #define platform_pm_poweroff_noirq NULL | ||
922 | #define platform_pm_restore_noirq NULL | ||
923 | |||
924 | #endif /* !CONFIG_HIBERNATE_CALLBACKS */ | ||
925 | |||
926 | #ifdef CONFIG_PM_RUNTIME | ||
927 | |||
928 | int __weak platform_pm_runtime_suspend(struct device *dev) | ||
929 | { | ||
930 | return pm_generic_runtime_suspend(dev); | ||
931 | }; | ||
932 | |||
933 | int __weak platform_pm_runtime_resume(struct device *dev) | ||
934 | { | ||
935 | return pm_generic_runtime_resume(dev); | ||
936 | }; | ||
937 | |||
938 | int __weak platform_pm_runtime_idle(struct device *dev) | ||
939 | { | ||
940 | return pm_generic_runtime_idle(dev); | ||
941 | }; | ||
942 | |||
943 | #else /* !CONFIG_PM_RUNTIME */ | ||
944 | |||
945 | #define platform_pm_runtime_suspend NULL | ||
946 | #define platform_pm_runtime_resume NULL | ||
947 | #define platform_pm_runtime_idle NULL | ||
948 | |||
949 | #endif /* !CONFIG_PM_RUNTIME */ | ||
950 | 902 | ||
951 | static const struct dev_pm_ops platform_dev_pm_ops = { | 903 | static const struct dev_pm_ops platform_dev_pm_ops = { |
952 | .prepare = platform_pm_prepare, | 904 | .runtime_suspend = pm_generic_runtime_suspend, |
953 | .complete = platform_pm_complete, | 905 | .runtime_resume = pm_generic_runtime_resume, |
954 | .suspend = platform_pm_suspend, | 906 | .runtime_idle = pm_generic_runtime_idle, |
955 | .resume = platform_pm_resume, | 907 | USE_PLATFORM_PM_SLEEP_OPS |
956 | .freeze = platform_pm_freeze, | ||
957 | .thaw = platform_pm_thaw, | ||
958 | .poweroff = platform_pm_poweroff, | ||
959 | .restore = platform_pm_restore, | ||
960 | .suspend_noirq = platform_pm_suspend_noirq, | ||
961 | .resume_noirq = platform_pm_resume_noirq, | ||
962 | .freeze_noirq = platform_pm_freeze_noirq, | ||
963 | .thaw_noirq = platform_pm_thaw_noirq, | ||
964 | .poweroff_noirq = platform_pm_poweroff_noirq, | ||
965 | .restore_noirq = platform_pm_restore_noirq, | ||
966 | .runtime_suspend = platform_pm_runtime_suspend, | ||
967 | .runtime_resume = platform_pm_runtime_resume, | ||
968 | .runtime_idle = platform_pm_runtime_idle, | ||
969 | }; | 908 | }; |
970 | 909 | ||
971 | struct bus_type platform_bus_type = { | 910 | struct bus_type platform_bus_type = { |
@@ -977,41 +916,6 @@ struct bus_type platform_bus_type = { | |||
977 | }; | 916 | }; |
978 | EXPORT_SYMBOL_GPL(platform_bus_type); | 917 | EXPORT_SYMBOL_GPL(platform_bus_type); |
979 | 918 | ||
980 | /** | ||
981 | * platform_bus_get_pm_ops() - return pointer to busses dev_pm_ops | ||
982 | * | ||
983 | * This function can be used by platform code to get the current | ||
984 | * set of dev_pm_ops functions used by the platform_bus_type. | ||
985 | */ | ||
986 | const struct dev_pm_ops * __init platform_bus_get_pm_ops(void) | ||
987 | { | ||
988 | return platform_bus_type.pm; | ||
989 | } | ||
990 | |||
991 | /** | ||
992 | * platform_bus_set_pm_ops() - update dev_pm_ops for the platform_bus_type | ||
993 | * | ||
994 | * @pm: pointer to new dev_pm_ops struct to be used for platform_bus_type | ||
995 | * | ||
996 | * Platform code can override the dev_pm_ops methods of | ||
997 | * platform_bus_type by using this function. It is expected that | ||
998 | * platform code will first do a platform_bus_get_pm_ops(), then | ||
999 | * kmemdup it, then customize selected methods and pass a pointer to | ||
1000 | * the new struct dev_pm_ops to this function. | ||
1001 | * | ||
1002 | * Since platform-specific code is customizing methods for *all* | ||
1003 | * devices (not just platform-specific devices) it is expected that | ||
1004 | * any custom overrides of these functions will keep existing behavior | ||
1005 | * and simply extend it. For example, any customization of the | ||
1006 | * runtime PM methods should continue to call the pm_generic_* | ||
1007 | * functions as the default ones do in addition to the | ||
1008 | * platform-specific behavior. | ||
1009 | */ | ||
1010 | void __init platform_bus_set_pm_ops(const struct dev_pm_ops *pm) | ||
1011 | { | ||
1012 | platform_bus_type.pm = pm; | ||
1013 | } | ||
1014 | |||
1015 | int __init platform_bus_init(void) | 919 | int __init platform_bus_init(void) |
1016 | { | 920 | { |
1017 | int error; | 921 | int error; |
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile index 118c1b92a511..3647e114d0e7 100644 --- a/drivers/base/power/Makefile +++ b/drivers/base/power/Makefile | |||
@@ -3,6 +3,6 @@ obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o | |||
3 | obj-$(CONFIG_PM_RUNTIME) += runtime.o | 3 | obj-$(CONFIG_PM_RUNTIME) += runtime.o |
4 | obj-$(CONFIG_PM_TRACE_RTC) += trace.o | 4 | obj-$(CONFIG_PM_TRACE_RTC) += trace.o |
5 | obj-$(CONFIG_PM_OPP) += opp.o | 5 | obj-$(CONFIG_PM_OPP) += opp.o |
6 | obj-$(CONFIG_HAVE_CLK) += clock_ops.o | ||
6 | 7 | ||
7 | ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG | 8 | ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG \ No newline at end of file |
8 | ccflags-$(CONFIG_PM_VERBOSE) += -DDEBUG | ||
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c new file mode 100644 index 000000000000..c0dd09df7be8 --- /dev/null +++ b/drivers/base/power/clock_ops.c | |||
@@ -0,0 +1,431 @@ | |||
1 | /* | ||
2 | * drivers/base/power/clock_ops.c - Generic clock manipulation PM callbacks | ||
3 | * | ||
4 | * Copyright (c) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp. | ||
5 | * | ||
6 | * This file is released under the GPLv2. | ||
7 | */ | ||
8 | |||
9 | #include <linux/init.h> | ||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/io.h> | ||
12 | #include <linux/pm.h> | ||
13 | #include <linux/pm_runtime.h> | ||
14 | #include <linux/clk.h> | ||
15 | #include <linux/slab.h> | ||
16 | #include <linux/err.h> | ||
17 | |||
18 | #ifdef CONFIG_PM_RUNTIME | ||
19 | |||
20 | struct pm_runtime_clk_data { | ||
21 | struct list_head clock_list; | ||
22 | struct mutex lock; | ||
23 | }; | ||
24 | |||
25 | enum pce_status { | ||
26 | PCE_STATUS_NONE = 0, | ||
27 | PCE_STATUS_ACQUIRED, | ||
28 | PCE_STATUS_ENABLED, | ||
29 | PCE_STATUS_ERROR, | ||
30 | }; | ||
31 | |||
32 | struct pm_clock_entry { | ||
33 | struct list_head node; | ||
34 | char *con_id; | ||
35 | struct clk *clk; | ||
36 | enum pce_status status; | ||
37 | }; | ||
38 | |||
39 | static struct pm_runtime_clk_data *__to_prd(struct device *dev) | ||
40 | { | ||
41 | return dev ? dev->power.subsys_data : NULL; | ||
42 | } | ||
43 | |||
44 | /** | ||
45 | * pm_runtime_clk_add - Start using a device clock for runtime PM. | ||
46 | * @dev: Device whose clock is going to be used for runtime PM. | ||
47 | * @con_id: Connection ID of the clock. | ||
48 | * | ||
49 | * Add the clock represented by @con_id to the list of clocks used for | ||
50 | * the runtime PM of @dev. | ||
51 | */ | ||
52 | int pm_runtime_clk_add(struct device *dev, const char *con_id) | ||
53 | { | ||
54 | struct pm_runtime_clk_data *prd = __to_prd(dev); | ||
55 | struct pm_clock_entry *ce; | ||
56 | |||
57 | if (!prd) | ||
58 | return -EINVAL; | ||
59 | |||
60 | ce = kzalloc(sizeof(*ce), GFP_KERNEL); | ||
61 | if (!ce) { | ||
62 | dev_err(dev, "Not enough memory for clock entry.\n"); | ||
63 | return -ENOMEM; | ||
64 | } | ||
65 | |||
66 | if (con_id) { | ||
67 | ce->con_id = kstrdup(con_id, GFP_KERNEL); | ||
68 | if (!ce->con_id) { | ||
69 | dev_err(dev, | ||
70 | "Not enough memory for clock connection ID.\n"); | ||
71 | kfree(ce); | ||
72 | return -ENOMEM; | ||
73 | } | ||
74 | } | ||
75 | |||
76 | mutex_lock(&prd->lock); | ||
77 | list_add_tail(&ce->node, &prd->clock_list); | ||
78 | mutex_unlock(&prd->lock); | ||
79 | return 0; | ||
80 | } | ||
81 | |||
82 | /** | ||
83 | * __pm_runtime_clk_remove - Destroy runtime PM clock entry. | ||
84 | * @ce: Runtime PM clock entry to destroy. | ||
85 | * | ||
86 | * This routine must be called under the mutex protecting the runtime PM list | ||
87 | * of clocks corresponding the the @ce's device. | ||
88 | */ | ||
89 | static void __pm_runtime_clk_remove(struct pm_clock_entry *ce) | ||
90 | { | ||
91 | if (!ce) | ||
92 | return; | ||
93 | |||
94 | list_del(&ce->node); | ||
95 | |||
96 | if (ce->status < PCE_STATUS_ERROR) { | ||
97 | if (ce->status == PCE_STATUS_ENABLED) | ||
98 | clk_disable(ce->clk); | ||
99 | |||
100 | if (ce->status >= PCE_STATUS_ACQUIRED) | ||
101 | clk_put(ce->clk); | ||
102 | } | ||
103 | |||
104 | if (ce->con_id) | ||
105 | kfree(ce->con_id); | ||
106 | |||
107 | kfree(ce); | ||
108 | } | ||
109 | |||
110 | /** | ||
111 | * pm_runtime_clk_remove - Stop using a device clock for runtime PM. | ||
112 | * @dev: Device whose clock should not be used for runtime PM any more. | ||
113 | * @con_id: Connection ID of the clock. | ||
114 | * | ||
115 | * Remove the clock represented by @con_id from the list of clocks used for | ||
116 | * the runtime PM of @dev. | ||
117 | */ | ||
118 | void pm_runtime_clk_remove(struct device *dev, const char *con_id) | ||
119 | { | ||
120 | struct pm_runtime_clk_data *prd = __to_prd(dev); | ||
121 | struct pm_clock_entry *ce; | ||
122 | |||
123 | if (!prd) | ||
124 | return; | ||
125 | |||
126 | mutex_lock(&prd->lock); | ||
127 | |||
128 | list_for_each_entry(ce, &prd->clock_list, node) { | ||
129 | if (!con_id && !ce->con_id) { | ||
130 | __pm_runtime_clk_remove(ce); | ||
131 | break; | ||
132 | } else if (!con_id || !ce->con_id) { | ||
133 | continue; | ||
134 | } else if (!strcmp(con_id, ce->con_id)) { | ||
135 | __pm_runtime_clk_remove(ce); | ||
136 | break; | ||
137 | } | ||
138 | } | ||
139 | |||
140 | mutex_unlock(&prd->lock); | ||
141 | } | ||
142 | |||
143 | /** | ||
144 | * pm_runtime_clk_init - Initialize a device's list of runtime PM clocks. | ||
145 | * @dev: Device to initialize the list of runtime PM clocks for. | ||
146 | * | ||
147 | * Allocate a struct pm_runtime_clk_data object, initialize its lock member and | ||
148 | * make the @dev's power.subsys_data field point to it. | ||
149 | */ | ||
150 | int pm_runtime_clk_init(struct device *dev) | ||
151 | { | ||
152 | struct pm_runtime_clk_data *prd; | ||
153 | |||
154 | prd = kzalloc(sizeof(*prd), GFP_KERNEL); | ||
155 | if (!prd) { | ||
156 | dev_err(dev, "Not enough memory fo runtime PM data.\n"); | ||
157 | return -ENOMEM; | ||
158 | } | ||
159 | |||
160 | INIT_LIST_HEAD(&prd->clock_list); | ||
161 | mutex_init(&prd->lock); | ||
162 | dev->power.subsys_data = prd; | ||
163 | return 0; | ||
164 | } | ||
165 | |||
166 | /** | ||
167 | * pm_runtime_clk_destroy - Destroy a device's list of runtime PM clocks. | ||
168 | * @dev: Device to destroy the list of runtime PM clocks for. | ||
169 | * | ||
170 | * Clear the @dev's power.subsys_data field, remove the list of clock entries | ||
171 | * from the struct pm_runtime_clk_data object pointed to by it before and free | ||
172 | * that object. | ||
173 | */ | ||
174 | void pm_runtime_clk_destroy(struct device *dev) | ||
175 | { | ||
176 | struct pm_runtime_clk_data *prd = __to_prd(dev); | ||
177 | struct pm_clock_entry *ce, *c; | ||
178 | |||
179 | if (!prd) | ||
180 | return; | ||
181 | |||
182 | dev->power.subsys_data = NULL; | ||
183 | |||
184 | mutex_lock(&prd->lock); | ||
185 | |||
186 | list_for_each_entry_safe_reverse(ce, c, &prd->clock_list, node) | ||
187 | __pm_runtime_clk_remove(ce); | ||
188 | |||
189 | mutex_unlock(&prd->lock); | ||
190 | |||
191 | kfree(prd); | ||
192 | } | ||
193 | |||
194 | /** | ||
195 | * pm_runtime_clk_acquire - Acquire a device clock. | ||
196 | * @dev: Device whose clock is to be acquired. | ||
197 | * @con_id: Connection ID of the clock. | ||
198 | */ | ||
199 | static void pm_runtime_clk_acquire(struct device *dev, | ||
200 | struct pm_clock_entry *ce) | ||
201 | { | ||
202 | ce->clk = clk_get(dev, ce->con_id); | ||
203 | if (IS_ERR(ce->clk)) { | ||
204 | ce->status = PCE_STATUS_ERROR; | ||
205 | } else { | ||
206 | ce->status = PCE_STATUS_ACQUIRED; | ||
207 | dev_dbg(dev, "Clock %s managed by runtime PM.\n", ce->con_id); | ||
208 | } | ||
209 | } | ||
210 | |||
211 | /** | ||
212 | * pm_runtime_clk_suspend - Disable clocks in a device's runtime PM clock list. | ||
213 | * @dev: Device to disable the clocks for. | ||
214 | */ | ||
215 | int pm_runtime_clk_suspend(struct device *dev) | ||
216 | { | ||
217 | struct pm_runtime_clk_data *prd = __to_prd(dev); | ||
218 | struct pm_clock_entry *ce; | ||
219 | |||
220 | dev_dbg(dev, "%s()\n", __func__); | ||
221 | |||
222 | if (!prd) | ||
223 | return 0; | ||
224 | |||
225 | mutex_lock(&prd->lock); | ||
226 | |||
227 | list_for_each_entry_reverse(ce, &prd->clock_list, node) { | ||
228 | if (ce->status == PCE_STATUS_NONE) | ||
229 | pm_runtime_clk_acquire(dev, ce); | ||
230 | |||
231 | if (ce->status < PCE_STATUS_ERROR) { | ||
232 | clk_disable(ce->clk); | ||
233 | ce->status = PCE_STATUS_ACQUIRED; | ||
234 | } | ||
235 | } | ||
236 | |||
237 | mutex_unlock(&prd->lock); | ||
238 | |||
239 | return 0; | ||
240 | } | ||
241 | |||
242 | /** | ||
243 | * pm_runtime_clk_resume - Enable clocks in a device's runtime PM clock list. | ||
244 | * @dev: Device to enable the clocks for. | ||
245 | */ | ||
246 | int pm_runtime_clk_resume(struct device *dev) | ||
247 | { | ||
248 | struct pm_runtime_clk_data *prd = __to_prd(dev); | ||
249 | struct pm_clock_entry *ce; | ||
250 | |||
251 | dev_dbg(dev, "%s()\n", __func__); | ||
252 | |||
253 | if (!prd) | ||
254 | return 0; | ||
255 | |||
256 | mutex_lock(&prd->lock); | ||
257 | |||
258 | list_for_each_entry(ce, &prd->clock_list, node) { | ||
259 | if (ce->status == PCE_STATUS_NONE) | ||
260 | pm_runtime_clk_acquire(dev, ce); | ||
261 | |||
262 | if (ce->status < PCE_STATUS_ERROR) { | ||
263 | clk_enable(ce->clk); | ||
264 | ce->status = PCE_STATUS_ENABLED; | ||
265 | } | ||
266 | } | ||
267 | |||
268 | mutex_unlock(&prd->lock); | ||
269 | |||
270 | return 0; | ||
271 | } | ||
272 | |||
273 | /** | ||
274 | * pm_runtime_clk_notify - Notify routine for device addition and removal. | ||
275 | * @nb: Notifier block object this function is a member of. | ||
276 | * @action: Operation being carried out by the caller. | ||
277 | * @data: Device the routine is being run for. | ||
278 | * | ||
279 | * For this function to work, @nb must be a member of an object of type | ||
280 | * struct pm_clk_notifier_block containing all of the requisite data. | ||
281 | * Specifically, the pwr_domain member of that object is copied to the device's | ||
282 | * pwr_domain field and its con_ids member is used to populate the device's list | ||
283 | * of runtime PM clocks, depending on @action. | ||
284 | * | ||
285 | * If the device's pwr_domain field is already populated with a value different | ||
286 | * from the one stored in the struct pm_clk_notifier_block object, the function | ||
287 | * does nothing. | ||
288 | */ | ||
289 | static int pm_runtime_clk_notify(struct notifier_block *nb, | ||
290 | unsigned long action, void *data) | ||
291 | { | ||
292 | struct pm_clk_notifier_block *clknb; | ||
293 | struct device *dev = data; | ||
294 | char *con_id; | ||
295 | int error; | ||
296 | |||
297 | dev_dbg(dev, "%s() %ld\n", __func__, action); | ||
298 | |||
299 | clknb = container_of(nb, struct pm_clk_notifier_block, nb); | ||
300 | |||
301 | switch (action) { | ||
302 | case BUS_NOTIFY_ADD_DEVICE: | ||
303 | if (dev->pwr_domain) | ||
304 | break; | ||
305 | |||
306 | error = pm_runtime_clk_init(dev); | ||
307 | if (error) | ||
308 | break; | ||
309 | |||
310 | dev->pwr_domain = clknb->pwr_domain; | ||
311 | if (clknb->con_ids[0]) { | ||
312 | for (con_id = clknb->con_ids[0]; *con_id; con_id++) | ||
313 | pm_runtime_clk_add(dev, con_id); | ||
314 | } else { | ||
315 | pm_runtime_clk_add(dev, NULL); | ||
316 | } | ||
317 | |||
318 | break; | ||
319 | case BUS_NOTIFY_DEL_DEVICE: | ||
320 | if (dev->pwr_domain != clknb->pwr_domain) | ||
321 | break; | ||
322 | |||
323 | dev->pwr_domain = NULL; | ||
324 | pm_runtime_clk_destroy(dev); | ||
325 | break; | ||
326 | } | ||
327 | |||
328 | return 0; | ||
329 | } | ||
330 | |||
331 | #else /* !CONFIG_PM_RUNTIME */ | ||
332 | |||
333 | /** | ||
334 | * enable_clock - Enable a device clock. | ||
335 | * @dev: Device whose clock is to be enabled. | ||
336 | * @con_id: Connection ID of the clock. | ||
337 | */ | ||
338 | static void enable_clock(struct device *dev, const char *con_id) | ||
339 | { | ||
340 | struct clk *clk; | ||
341 | |||
342 | clk = clk_get(dev, con_id); | ||
343 | if (!IS_ERR(clk)) { | ||
344 | clk_enable(clk); | ||
345 | clk_put(clk); | ||
346 | dev_info(dev, "Runtime PM disabled, clock forced on.\n"); | ||
347 | } | ||
348 | } | ||
349 | |||
350 | /** | ||
351 | * disable_clock - Disable a device clock. | ||
352 | * @dev: Device whose clock is to be disabled. | ||
353 | * @con_id: Connection ID of the clock. | ||
354 | */ | ||
355 | static void disable_clock(struct device *dev, const char *con_id) | ||
356 | { | ||
357 | struct clk *clk; | ||
358 | |||
359 | clk = clk_get(dev, con_id); | ||
360 | if (!IS_ERR(clk)) { | ||
361 | clk_disable(clk); | ||
362 | clk_put(clk); | ||
363 | dev_info(dev, "Runtime PM disabled, clock forced off.\n"); | ||
364 | } | ||
365 | } | ||
366 | |||
367 | /** | ||
368 | * pm_runtime_clk_notify - Notify routine for device addition and removal. | ||
369 | * @nb: Notifier block object this function is a member of. | ||
370 | * @action: Operation being carried out by the caller. | ||
371 | * @data: Device the routine is being run for. | ||
372 | * | ||
373 | * For this function to work, @nb must be a member of an object of type | ||
374 | * struct pm_clk_notifier_block containing all of the requisite data. | ||
375 | * Specifically, the con_ids member of that object is used to enable or disable | ||
376 | * the device's clocks, depending on @action. | ||
377 | */ | ||
378 | static int pm_runtime_clk_notify(struct notifier_block *nb, | ||
379 | unsigned long action, void *data) | ||
380 | { | ||
381 | struct pm_clk_notifier_block *clknb; | ||
382 | struct device *dev = data; | ||
383 | char *con_id; | ||
384 | |||
385 | dev_dbg(dev, "%s() %ld\n", __func__, action); | ||
386 | |||
387 | clknb = container_of(nb, struct pm_clk_notifier_block, nb); | ||
388 | |||
389 | switch (action) { | ||
390 | case BUS_NOTIFY_ADD_DEVICE: | ||
391 | if (clknb->con_ids[0]) { | ||
392 | for (con_id = clknb->con_ids[0]; *con_id; con_id++) | ||
393 | enable_clock(dev, con_id); | ||
394 | } else { | ||
395 | enable_clock(dev, NULL); | ||
396 | } | ||
397 | break; | ||
398 | case BUS_NOTIFY_DEL_DEVICE: | ||
399 | if (clknb->con_ids[0]) { | ||
400 | for (con_id = clknb->con_ids[0]; *con_id; con_id++) | ||
401 | disable_clock(dev, con_id); | ||
402 | } else { | ||
403 | disable_clock(dev, NULL); | ||
404 | } | ||
405 | break; | ||
406 | } | ||
407 | |||
408 | return 0; | ||
409 | } | ||
410 | |||
411 | #endif /* !CONFIG_PM_RUNTIME */ | ||
412 | |||
413 | /** | ||
414 | * pm_runtime_clk_add_notifier - Add bus type notifier for runtime PM clocks. | ||
415 | * @bus: Bus type to add the notifier to. | ||
416 | * @clknb: Notifier to be added to the given bus type. | ||
417 | * | ||
418 | * The nb member of @clknb is not expected to be initialized and its | ||
419 | * notifier_call member will be replaced with pm_runtime_clk_notify(). However, | ||
420 | * the remaining members of @clknb should be populated prior to calling this | ||
421 | * routine. | ||
422 | */ | ||
423 | void pm_runtime_clk_add_notifier(struct bus_type *bus, | ||
424 | struct pm_clk_notifier_block *clknb) | ||
425 | { | ||
426 | if (!bus || !clknb) | ||
427 | return; | ||
428 | |||
429 | clknb->nb.notifier_call = pm_runtime_clk_notify; | ||
430 | bus_register_notifier(bus, &clknb->nb); | ||
431 | } | ||
diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c index 42f97f925629..cb3bb368681c 100644 --- a/drivers/base/power/generic_ops.c +++ b/drivers/base/power/generic_ops.c | |||
@@ -74,6 +74,23 @@ EXPORT_SYMBOL_GPL(pm_generic_runtime_resume); | |||
74 | 74 | ||
75 | #ifdef CONFIG_PM_SLEEP | 75 | #ifdef CONFIG_PM_SLEEP |
76 | /** | 76 | /** |
77 | * pm_generic_prepare - Generic routine preparing a device for power transition. | ||
78 | * @dev: Device to prepare. | ||
79 | * | ||
80 | * Prepare a device for a system-wide power transition. | ||
81 | */ | ||
82 | int pm_generic_prepare(struct device *dev) | ||
83 | { | ||
84 | struct device_driver *drv = dev->driver; | ||
85 | int ret = 0; | ||
86 | |||
87 | if (drv && drv->pm && drv->pm->prepare) | ||
88 | ret = drv->pm->prepare(dev); | ||
89 | |||
90 | return ret; | ||
91 | } | ||
92 | |||
93 | /** | ||
77 | * __pm_generic_call - Generic suspend/freeze/poweroff/thaw subsystem callback. | 94 | * __pm_generic_call - Generic suspend/freeze/poweroff/thaw subsystem callback. |
78 | * @dev: Device to handle. | 95 | * @dev: Device to handle. |
79 | * @event: PM transition of the system under way. | 96 | * @event: PM transition of the system under way. |
@@ -213,16 +230,38 @@ int pm_generic_restore(struct device *dev) | |||
213 | return __pm_generic_resume(dev, PM_EVENT_RESTORE); | 230 | return __pm_generic_resume(dev, PM_EVENT_RESTORE); |
214 | } | 231 | } |
215 | EXPORT_SYMBOL_GPL(pm_generic_restore); | 232 | EXPORT_SYMBOL_GPL(pm_generic_restore); |
233 | |||
234 | /** | ||
235 | * pm_generic_complete - Generic routine competing a device power transition. | ||
236 | * @dev: Device to handle. | ||
237 | * | ||
238 | * Complete a device power transition during a system-wide power transition. | ||
239 | */ | ||
240 | void pm_generic_complete(struct device *dev) | ||
241 | { | ||
242 | struct device_driver *drv = dev->driver; | ||
243 | |||
244 | if (drv && drv->pm && drv->pm->complete) | ||
245 | drv->pm->complete(dev); | ||
246 | |||
247 | /* | ||
248 | * Let runtime PM try to suspend devices that haven't been in use before | ||
249 | * going into the system-wide sleep state we're resuming from. | ||
250 | */ | ||
251 | pm_runtime_idle(dev); | ||
252 | } | ||
216 | #endif /* CONFIG_PM_SLEEP */ | 253 | #endif /* CONFIG_PM_SLEEP */ |
217 | 254 | ||
218 | struct dev_pm_ops generic_subsys_pm_ops = { | 255 | struct dev_pm_ops generic_subsys_pm_ops = { |
219 | #ifdef CONFIG_PM_SLEEP | 256 | #ifdef CONFIG_PM_SLEEP |
257 | .prepare = pm_generic_prepare, | ||
220 | .suspend = pm_generic_suspend, | 258 | .suspend = pm_generic_suspend, |
221 | .resume = pm_generic_resume, | 259 | .resume = pm_generic_resume, |
222 | .freeze = pm_generic_freeze, | 260 | .freeze = pm_generic_freeze, |
223 | .thaw = pm_generic_thaw, | 261 | .thaw = pm_generic_thaw, |
224 | .poweroff = pm_generic_poweroff, | 262 | .poweroff = pm_generic_poweroff, |
225 | .restore = pm_generic_restore, | 263 | .restore = pm_generic_restore, |
264 | .complete = pm_generic_complete, | ||
226 | #endif | 265 | #endif |
227 | #ifdef CONFIG_PM_RUNTIME | 266 | #ifdef CONFIG_PM_RUNTIME |
228 | .runtime_suspend = pm_generic_runtime_suspend, | 267 | .runtime_suspend = pm_generic_runtime_suspend, |
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index abe3ab709e87..aa6320207745 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c | |||
@@ -426,10 +426,8 @@ static int device_resume_noirq(struct device *dev, pm_message_t state) | |||
426 | 426 | ||
427 | if (dev->pwr_domain) { | 427 | if (dev->pwr_domain) { |
428 | pm_dev_dbg(dev, state, "EARLY power domain "); | 428 | pm_dev_dbg(dev, state, "EARLY power domain "); |
429 | pm_noirq_op(dev, &dev->pwr_domain->ops, state); | 429 | error = pm_noirq_op(dev, &dev->pwr_domain->ops, state); |
430 | } | 430 | } else if (dev->type && dev->type->pm) { |
431 | |||
432 | if (dev->type && dev->type->pm) { | ||
433 | pm_dev_dbg(dev, state, "EARLY type "); | 431 | pm_dev_dbg(dev, state, "EARLY type "); |
434 | error = pm_noirq_op(dev, dev->type->pm, state); | 432 | error = pm_noirq_op(dev, dev->type->pm, state); |
435 | } else if (dev->class && dev->class->pm) { | 433 | } else if (dev->class && dev->class->pm) { |
@@ -517,7 +515,8 @@ static int device_resume(struct device *dev, pm_message_t state, bool async) | |||
517 | 515 | ||
518 | if (dev->pwr_domain) { | 516 | if (dev->pwr_domain) { |
519 | pm_dev_dbg(dev, state, "power domain "); | 517 | pm_dev_dbg(dev, state, "power domain "); |
520 | pm_op(dev, &dev->pwr_domain->ops, state); | 518 | error = pm_op(dev, &dev->pwr_domain->ops, state); |
519 | goto End; | ||
521 | } | 520 | } |
522 | 521 | ||
523 | if (dev->type && dev->type->pm) { | 522 | if (dev->type && dev->type->pm) { |
@@ -580,11 +579,13 @@ static bool is_async(struct device *dev) | |||
580 | * Execute the appropriate "resume" callback for all devices whose status | 579 | * Execute the appropriate "resume" callback for all devices whose status |
581 | * indicates that they are suspended. | 580 | * indicates that they are suspended. |
582 | */ | 581 | */ |
583 | static void dpm_resume(pm_message_t state) | 582 | void dpm_resume(pm_message_t state) |
584 | { | 583 | { |
585 | struct device *dev; | 584 | struct device *dev; |
586 | ktime_t starttime = ktime_get(); | 585 | ktime_t starttime = ktime_get(); |
587 | 586 | ||
587 | might_sleep(); | ||
588 | |||
588 | mutex_lock(&dpm_list_mtx); | 589 | mutex_lock(&dpm_list_mtx); |
589 | pm_transition = state; | 590 | pm_transition = state; |
590 | async_error = 0; | 591 | async_error = 0; |
@@ -629,12 +630,11 @@ static void device_complete(struct device *dev, pm_message_t state) | |||
629 | { | 630 | { |
630 | device_lock(dev); | 631 | device_lock(dev); |
631 | 632 | ||
632 | if (dev->pwr_domain && dev->pwr_domain->ops.complete) { | 633 | if (dev->pwr_domain) { |
633 | pm_dev_dbg(dev, state, "completing power domain "); | 634 | pm_dev_dbg(dev, state, "completing power domain "); |
634 | dev->pwr_domain->ops.complete(dev); | 635 | if (dev->pwr_domain->ops.complete) |
635 | } | 636 | dev->pwr_domain->ops.complete(dev); |
636 | 637 | } else if (dev->type && dev->type->pm) { | |
637 | if (dev->type && dev->type->pm) { | ||
638 | pm_dev_dbg(dev, state, "completing type "); | 638 | pm_dev_dbg(dev, state, "completing type "); |
639 | if (dev->type->pm->complete) | 639 | if (dev->type->pm->complete) |
640 | dev->type->pm->complete(dev); | 640 | dev->type->pm->complete(dev); |
@@ -658,10 +658,12 @@ static void device_complete(struct device *dev, pm_message_t state) | |||
658 | * Execute the ->complete() callbacks for all devices whose PM status is not | 658 | * Execute the ->complete() callbacks for all devices whose PM status is not |
659 | * DPM_ON (this allows new devices to be registered). | 659 | * DPM_ON (this allows new devices to be registered). |
660 | */ | 660 | */ |
661 | static void dpm_complete(pm_message_t state) | 661 | void dpm_complete(pm_message_t state) |
662 | { | 662 | { |
663 | struct list_head list; | 663 | struct list_head list; |
664 | 664 | ||
665 | might_sleep(); | ||
666 | |||
665 | INIT_LIST_HEAD(&list); | 667 | INIT_LIST_HEAD(&list); |
666 | mutex_lock(&dpm_list_mtx); | 668 | mutex_lock(&dpm_list_mtx); |
667 | while (!list_empty(&dpm_prepared_list)) { | 669 | while (!list_empty(&dpm_prepared_list)) { |
@@ -690,7 +692,6 @@ static void dpm_complete(pm_message_t state) | |||
690 | */ | 692 | */ |
691 | void dpm_resume_end(pm_message_t state) | 693 | void dpm_resume_end(pm_message_t state) |
692 | { | 694 | { |
693 | might_sleep(); | ||
694 | dpm_resume(state); | 695 | dpm_resume(state); |
695 | dpm_complete(state); | 696 | dpm_complete(state); |
696 | } | 697 | } |
@@ -732,7 +733,12 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state) | |||
732 | { | 733 | { |
733 | int error; | 734 | int error; |
734 | 735 | ||
735 | if (dev->type && dev->type->pm) { | 736 | if (dev->pwr_domain) { |
737 | pm_dev_dbg(dev, state, "LATE power domain "); | ||
738 | error = pm_noirq_op(dev, &dev->pwr_domain->ops, state); | ||
739 | if (error) | ||
740 | return error; | ||
741 | } else if (dev->type && dev->type->pm) { | ||
736 | pm_dev_dbg(dev, state, "LATE type "); | 742 | pm_dev_dbg(dev, state, "LATE type "); |
737 | error = pm_noirq_op(dev, dev->type->pm, state); | 743 | error = pm_noirq_op(dev, dev->type->pm, state); |
738 | if (error) | 744 | if (error) |
@@ -749,11 +755,6 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state) | |||
749 | return error; | 755 | return error; |
750 | } | 756 | } |
751 | 757 | ||
752 | if (dev->pwr_domain) { | ||
753 | pm_dev_dbg(dev, state, "LATE power domain "); | ||
754 | pm_noirq_op(dev, &dev->pwr_domain->ops, state); | ||
755 | } | ||
756 | |||
757 | return 0; | 758 | return 0; |
758 | } | 759 | } |
759 | 760 | ||
@@ -841,21 +842,27 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) | |||
841 | goto End; | 842 | goto End; |
842 | } | 843 | } |
843 | 844 | ||
845 | if (dev->pwr_domain) { | ||
846 | pm_dev_dbg(dev, state, "power domain "); | ||
847 | error = pm_op(dev, &dev->pwr_domain->ops, state); | ||
848 | goto End; | ||
849 | } | ||
850 | |||
844 | if (dev->type && dev->type->pm) { | 851 | if (dev->type && dev->type->pm) { |
845 | pm_dev_dbg(dev, state, "type "); | 852 | pm_dev_dbg(dev, state, "type "); |
846 | error = pm_op(dev, dev->type->pm, state); | 853 | error = pm_op(dev, dev->type->pm, state); |
847 | goto Domain; | 854 | goto End; |
848 | } | 855 | } |
849 | 856 | ||
850 | if (dev->class) { | 857 | if (dev->class) { |
851 | if (dev->class->pm) { | 858 | if (dev->class->pm) { |
852 | pm_dev_dbg(dev, state, "class "); | 859 | pm_dev_dbg(dev, state, "class "); |
853 | error = pm_op(dev, dev->class->pm, state); | 860 | error = pm_op(dev, dev->class->pm, state); |
854 | goto Domain; | 861 | goto End; |
855 | } else if (dev->class->suspend) { | 862 | } else if (dev->class->suspend) { |
856 | pm_dev_dbg(dev, state, "legacy class "); | 863 | pm_dev_dbg(dev, state, "legacy class "); |
857 | error = legacy_suspend(dev, state, dev->class->suspend); | 864 | error = legacy_suspend(dev, state, dev->class->suspend); |
858 | goto Domain; | 865 | goto End; |
859 | } | 866 | } |
860 | } | 867 | } |
861 | 868 | ||
@@ -869,12 +876,6 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) | |||
869 | } | 876 | } |
870 | } | 877 | } |
871 | 878 | ||
872 | Domain: | ||
873 | if (!error && dev->pwr_domain) { | ||
874 | pm_dev_dbg(dev, state, "power domain "); | ||
875 | pm_op(dev, &dev->pwr_domain->ops, state); | ||
876 | } | ||
877 | |||
878 | End: | 879 | End: |
879 | device_unlock(dev); | 880 | device_unlock(dev); |
880 | complete_all(&dev->power.completion); | 881 | complete_all(&dev->power.completion); |
@@ -914,11 +915,13 @@ static int device_suspend(struct device *dev) | |||
914 | * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices. | 915 | * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices. |
915 | * @state: PM transition of the system being carried out. | 916 | * @state: PM transition of the system being carried out. |
916 | */ | 917 | */ |
917 | static int dpm_suspend(pm_message_t state) | 918 | int dpm_suspend(pm_message_t state) |
918 | { | 919 | { |
919 | ktime_t starttime = ktime_get(); | 920 | ktime_t starttime = ktime_get(); |
920 | int error = 0; | 921 | int error = 0; |
921 | 922 | ||
923 | might_sleep(); | ||
924 | |||
922 | mutex_lock(&dpm_list_mtx); | 925 | mutex_lock(&dpm_list_mtx); |
923 | pm_transition = state; | 926 | pm_transition = state; |
924 | async_error = 0; | 927 | async_error = 0; |
@@ -965,7 +968,14 @@ static int device_prepare(struct device *dev, pm_message_t state) | |||
965 | 968 | ||
966 | device_lock(dev); | 969 | device_lock(dev); |
967 | 970 | ||
968 | if (dev->type && dev->type->pm) { | 971 | if (dev->pwr_domain) { |
972 | pm_dev_dbg(dev, state, "preparing power domain "); | ||
973 | if (dev->pwr_domain->ops.prepare) | ||
974 | error = dev->pwr_domain->ops.prepare(dev); | ||
975 | suspend_report_result(dev->pwr_domain->ops.prepare, error); | ||
976 | if (error) | ||
977 | goto End; | ||
978 | } else if (dev->type && dev->type->pm) { | ||
969 | pm_dev_dbg(dev, state, "preparing type "); | 979 | pm_dev_dbg(dev, state, "preparing type "); |
970 | if (dev->type->pm->prepare) | 980 | if (dev->type->pm->prepare) |
971 | error = dev->type->pm->prepare(dev); | 981 | error = dev->type->pm->prepare(dev); |
@@ -984,13 +994,6 @@ static int device_prepare(struct device *dev, pm_message_t state) | |||
984 | if (dev->bus->pm->prepare) | 994 | if (dev->bus->pm->prepare) |
985 | error = dev->bus->pm->prepare(dev); | 995 | error = dev->bus->pm->prepare(dev); |
986 | suspend_report_result(dev->bus->pm->prepare, error); | 996 | suspend_report_result(dev->bus->pm->prepare, error); |
987 | if (error) | ||
988 | goto End; | ||
989 | } | ||
990 | |||
991 | if (dev->pwr_domain && dev->pwr_domain->ops.prepare) { | ||
992 | pm_dev_dbg(dev, state, "preparing power domain "); | ||
993 | dev->pwr_domain->ops.prepare(dev); | ||
994 | } | 997 | } |
995 | 998 | ||
996 | End: | 999 | End: |
@@ -1005,10 +1008,12 @@ static int device_prepare(struct device *dev, pm_message_t state) | |||
1005 | * | 1008 | * |
1006 | * Execute the ->prepare() callback(s) for all devices. | 1009 | * Execute the ->prepare() callback(s) for all devices. |
1007 | */ | 1010 | */ |
1008 | static int dpm_prepare(pm_message_t state) | 1011 | int dpm_prepare(pm_message_t state) |
1009 | { | 1012 | { |
1010 | int error = 0; | 1013 | int error = 0; |
1011 | 1014 | ||
1015 | might_sleep(); | ||
1016 | |||
1012 | mutex_lock(&dpm_list_mtx); | 1017 | mutex_lock(&dpm_list_mtx); |
1013 | while (!list_empty(&dpm_list)) { | 1018 | while (!list_empty(&dpm_list)) { |
1014 | struct device *dev = to_device(dpm_list.next); | 1019 | struct device *dev = to_device(dpm_list.next); |
@@ -1057,7 +1062,6 @@ int dpm_suspend_start(pm_message_t state) | |||
1057 | { | 1062 | { |
1058 | int error; | 1063 | int error; |
1059 | 1064 | ||
1060 | might_sleep(); | ||
1061 | error = dpm_prepare(state); | 1065 | error = dpm_prepare(state); |
1062 | if (!error) | 1066 | if (!error) |
1063 | error = dpm_suspend(state); | 1067 | error = dpm_suspend(state); |
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 3172c60d23a9..0d4587b15c55 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c | |||
@@ -168,7 +168,6 @@ static int rpm_check_suspend_allowed(struct device *dev) | |||
168 | static int rpm_idle(struct device *dev, int rpmflags) | 168 | static int rpm_idle(struct device *dev, int rpmflags) |
169 | { | 169 | { |
170 | int (*callback)(struct device *); | 170 | int (*callback)(struct device *); |
171 | int (*domain_callback)(struct device *); | ||
172 | int retval; | 171 | int retval; |
173 | 172 | ||
174 | retval = rpm_check_suspend_allowed(dev); | 173 | retval = rpm_check_suspend_allowed(dev); |
@@ -214,7 +213,9 @@ static int rpm_idle(struct device *dev, int rpmflags) | |||
214 | 213 | ||
215 | dev->power.idle_notification = true; | 214 | dev->power.idle_notification = true; |
216 | 215 | ||
217 | if (dev->type && dev->type->pm) | 216 | if (dev->pwr_domain) |
217 | callback = dev->pwr_domain->ops.runtime_idle; | ||
218 | else if (dev->type && dev->type->pm) | ||
218 | callback = dev->type->pm->runtime_idle; | 219 | callback = dev->type->pm->runtime_idle; |
219 | else if (dev->class && dev->class->pm) | 220 | else if (dev->class && dev->class->pm) |
220 | callback = dev->class->pm->runtime_idle; | 221 | callback = dev->class->pm->runtime_idle; |
@@ -223,19 +224,10 @@ static int rpm_idle(struct device *dev, int rpmflags) | |||
223 | else | 224 | else |
224 | callback = NULL; | 225 | callback = NULL; |
225 | 226 | ||
226 | if (dev->pwr_domain) | 227 | if (callback) { |
227 | domain_callback = dev->pwr_domain->ops.runtime_idle; | ||
228 | else | ||
229 | domain_callback = NULL; | ||
230 | |||
231 | if (callback || domain_callback) { | ||
232 | spin_unlock_irq(&dev->power.lock); | 228 | spin_unlock_irq(&dev->power.lock); |
233 | 229 | ||
234 | if (domain_callback) | 230 | callback(dev); |
235 | retval = domain_callback(dev); | ||
236 | |||
237 | if (!retval && callback) | ||
238 | callback(dev); | ||
239 | 231 | ||
240 | spin_lock_irq(&dev->power.lock); | 232 | spin_lock_irq(&dev->power.lock); |
241 | } | 233 | } |
@@ -382,7 +374,9 @@ static int rpm_suspend(struct device *dev, int rpmflags) | |||
382 | 374 | ||
383 | __update_runtime_status(dev, RPM_SUSPENDING); | 375 | __update_runtime_status(dev, RPM_SUSPENDING); |
384 | 376 | ||
385 | if (dev->type && dev->type->pm) | 377 | if (dev->pwr_domain) |
378 | callback = dev->pwr_domain->ops.runtime_suspend; | ||
379 | else if (dev->type && dev->type->pm) | ||
386 | callback = dev->type->pm->runtime_suspend; | 380 | callback = dev->type->pm->runtime_suspend; |
387 | else if (dev->class && dev->class->pm) | 381 | else if (dev->class && dev->class->pm) |
388 | callback = dev->class->pm->runtime_suspend; | 382 | callback = dev->class->pm->runtime_suspend; |
@@ -400,8 +394,6 @@ static int rpm_suspend(struct device *dev, int rpmflags) | |||
400 | else | 394 | else |
401 | pm_runtime_cancel_pending(dev); | 395 | pm_runtime_cancel_pending(dev); |
402 | } else { | 396 | } else { |
403 | if (dev->pwr_domain) | ||
404 | rpm_callback(dev->pwr_domain->ops.runtime_suspend, dev); | ||
405 | no_callback: | 397 | no_callback: |
406 | __update_runtime_status(dev, RPM_SUSPENDED); | 398 | __update_runtime_status(dev, RPM_SUSPENDED); |
407 | pm_runtime_deactivate_timer(dev); | 399 | pm_runtime_deactivate_timer(dev); |
@@ -582,9 +574,8 @@ static int rpm_resume(struct device *dev, int rpmflags) | |||
582 | __update_runtime_status(dev, RPM_RESUMING); | 574 | __update_runtime_status(dev, RPM_RESUMING); |
583 | 575 | ||
584 | if (dev->pwr_domain) | 576 | if (dev->pwr_domain) |
585 | rpm_callback(dev->pwr_domain->ops.runtime_resume, dev); | 577 | callback = dev->pwr_domain->ops.runtime_resume; |
586 | 578 | else if (dev->type && dev->type->pm) | |
587 | if (dev->type && dev->type->pm) | ||
588 | callback = dev->type->pm->runtime_resume; | 579 | callback = dev->type->pm->runtime_resume; |
589 | else if (dev->class && dev->class->pm) | 580 | else if (dev->class && dev->class->pm) |
590 | callback = dev->class->pm->runtime_resume; | 581 | callback = dev->class->pm->runtime_resume; |
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c index fff49bee781d..a9f5b8979611 100644 --- a/drivers/base/power/sysfs.c +++ b/drivers/base/power/sysfs.c | |||
@@ -212,8 +212,9 @@ static ssize_t autosuspend_delay_ms_store(struct device *dev, | |||
212 | static DEVICE_ATTR(autosuspend_delay_ms, 0644, autosuspend_delay_ms_show, | 212 | static DEVICE_ATTR(autosuspend_delay_ms, 0644, autosuspend_delay_ms_show, |
213 | autosuspend_delay_ms_store); | 213 | autosuspend_delay_ms_store); |
214 | 214 | ||
215 | #endif | 215 | #endif /* CONFIG_PM_RUNTIME */ |
216 | 216 | ||
217 | #ifdef CONFIG_PM_SLEEP | ||
217 | static ssize_t | 218 | static ssize_t |
218 | wake_show(struct device * dev, struct device_attribute *attr, char * buf) | 219 | wake_show(struct device * dev, struct device_attribute *attr, char * buf) |
219 | { | 220 | { |
@@ -248,7 +249,6 @@ wake_store(struct device * dev, struct device_attribute *attr, | |||
248 | 249 | ||
249 | static DEVICE_ATTR(wakeup, 0644, wake_show, wake_store); | 250 | static DEVICE_ATTR(wakeup, 0644, wake_show, wake_store); |
250 | 251 | ||
251 | #ifdef CONFIG_PM_SLEEP | ||
252 | static ssize_t wakeup_count_show(struct device *dev, | 252 | static ssize_t wakeup_count_show(struct device *dev, |
253 | struct device_attribute *attr, char *buf) | 253 | struct device_attribute *attr, char *buf) |
254 | { | 254 | { |
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index abbbd33e8d8a..84f7c7d5a098 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c | |||
@@ -110,7 +110,6 @@ void wakeup_source_add(struct wakeup_source *ws) | |||
110 | spin_lock_irq(&events_lock); | 110 | spin_lock_irq(&events_lock); |
111 | list_add_rcu(&ws->entry, &wakeup_sources); | 111 | list_add_rcu(&ws->entry, &wakeup_sources); |
112 | spin_unlock_irq(&events_lock); | 112 | spin_unlock_irq(&events_lock); |
113 | synchronize_rcu(); | ||
114 | } | 113 | } |
115 | EXPORT_SYMBOL_GPL(wakeup_source_add); | 114 | EXPORT_SYMBOL_GPL(wakeup_source_add); |
116 | 115 | ||
diff --git a/drivers/base/sys.c b/drivers/base/sys.c index acde9b5ee131..9dff77bfe1e3 100644 --- a/drivers/base/sys.c +++ b/drivers/base/sys.c | |||
@@ -328,203 +328,8 @@ void sysdev_unregister(struct sys_device *sysdev) | |||
328 | kobject_put(&sysdev->kobj); | 328 | kobject_put(&sysdev->kobj); |
329 | } | 329 | } |
330 | 330 | ||
331 | 331 | EXPORT_SYMBOL_GPL(sysdev_register); | |
332 | #ifndef CONFIG_ARCH_NO_SYSDEV_OPS | 332 | EXPORT_SYMBOL_GPL(sysdev_unregister); |
333 | /** | ||
334 | * sysdev_shutdown - Shut down all system devices. | ||
335 | * | ||
336 | * Loop over each class of system devices, and the devices in each | ||
337 | * of those classes. For each device, we call the shutdown method for | ||
338 | * each driver registered for the device - the auxiliaries, | ||
339 | * and the class driver. | ||
340 | * | ||
341 | * Note: The list is iterated in reverse order, so that we shut down | ||
342 | * child devices before we shut down their parents. The list ordering | ||
343 | * is guaranteed by virtue of the fact that child devices are registered | ||
344 | * after their parents. | ||
345 | */ | ||
346 | void sysdev_shutdown(void) | ||
347 | { | ||
348 | struct sysdev_class *cls; | ||
349 | |||
350 | pr_debug("Shutting Down System Devices\n"); | ||
351 | |||
352 | mutex_lock(&sysdev_drivers_lock); | ||
353 | list_for_each_entry_reverse(cls, &system_kset->list, kset.kobj.entry) { | ||
354 | struct sys_device *sysdev; | ||
355 | |||
356 | pr_debug("Shutting down type '%s':\n", | ||
357 | kobject_name(&cls->kset.kobj)); | ||
358 | |||
359 | list_for_each_entry(sysdev, &cls->kset.list, kobj.entry) { | ||
360 | struct sysdev_driver *drv; | ||
361 | pr_debug(" %s\n", kobject_name(&sysdev->kobj)); | ||
362 | |||
363 | /* Call auxiliary drivers first */ | ||
364 | list_for_each_entry(drv, &cls->drivers, entry) { | ||
365 | if (drv->shutdown) | ||
366 | drv->shutdown(sysdev); | ||
367 | } | ||
368 | |||
369 | /* Now call the generic one */ | ||
370 | if (cls->shutdown) | ||
371 | cls->shutdown(sysdev); | ||
372 | } | ||
373 | } | ||
374 | mutex_unlock(&sysdev_drivers_lock); | ||
375 | } | ||
376 | |||
377 | static void __sysdev_resume(struct sys_device *dev) | ||
378 | { | ||
379 | struct sysdev_class *cls = dev->cls; | ||
380 | struct sysdev_driver *drv; | ||
381 | |||
382 | /* First, call the class-specific one */ | ||
383 | if (cls->resume) | ||
384 | cls->resume(dev); | ||
385 | WARN_ONCE(!irqs_disabled(), | ||
386 | "Interrupts enabled after %pF\n", cls->resume); | ||
387 | |||
388 | /* Call auxiliary drivers next. */ | ||
389 | list_for_each_entry(drv, &cls->drivers, entry) { | ||
390 | if (drv->resume) | ||
391 | drv->resume(dev); | ||
392 | WARN_ONCE(!irqs_disabled(), | ||
393 | "Interrupts enabled after %pF\n", drv->resume); | ||
394 | } | ||
395 | } | ||
396 | |||
397 | /** | ||
398 | * sysdev_suspend - Suspend all system devices. | ||
399 | * @state: Power state to enter. | ||
400 | * | ||
401 | * We perform an almost identical operation as sysdev_shutdown() | ||
402 | * above, though calling ->suspend() instead. Interrupts are disabled | ||
403 | * when this called. Devices are responsible for both saving state and | ||
404 | * quiescing or powering down the device. | ||
405 | * | ||
406 | * This is only called by the device PM core, so we let them handle | ||
407 | * all synchronization. | ||
408 | */ | ||
409 | int sysdev_suspend(pm_message_t state) | ||
410 | { | ||
411 | struct sysdev_class *cls; | ||
412 | struct sys_device *sysdev, *err_dev; | ||
413 | struct sysdev_driver *drv, *err_drv; | ||
414 | int ret; | ||
415 | |||
416 | pr_debug("Checking wake-up interrupts\n"); | ||
417 | |||
418 | /* Return error code if there are any wake-up interrupts pending */ | ||
419 | ret = check_wakeup_irqs(); | ||
420 | if (ret) | ||
421 | return ret; | ||
422 | |||
423 | WARN_ONCE(!irqs_disabled(), | ||
424 | "Interrupts enabled while suspending system devices\n"); | ||
425 | |||
426 | pr_debug("Suspending System Devices\n"); | ||
427 | |||
428 | list_for_each_entry_reverse(cls, &system_kset->list, kset.kobj.entry) { | ||
429 | pr_debug("Suspending type '%s':\n", | ||
430 | kobject_name(&cls->kset.kobj)); | ||
431 | |||
432 | list_for_each_entry(sysdev, &cls->kset.list, kobj.entry) { | ||
433 | pr_debug(" %s\n", kobject_name(&sysdev->kobj)); | ||
434 | |||
435 | /* Call auxiliary drivers first */ | ||
436 | list_for_each_entry(drv, &cls->drivers, entry) { | ||
437 | if (drv->suspend) { | ||
438 | ret = drv->suspend(sysdev, state); | ||
439 | if (ret) | ||
440 | goto aux_driver; | ||
441 | } | ||
442 | WARN_ONCE(!irqs_disabled(), | ||
443 | "Interrupts enabled after %pF\n", | ||
444 | drv->suspend); | ||
445 | } | ||
446 | |||
447 | /* Now call the generic one */ | ||
448 | if (cls->suspend) { | ||
449 | ret = cls->suspend(sysdev, state); | ||
450 | if (ret) | ||
451 | goto cls_driver; | ||
452 | WARN_ONCE(!irqs_disabled(), | ||
453 | "Interrupts enabled after %pF\n", | ||
454 | cls->suspend); | ||
455 | } | ||
456 | } | ||
457 | } | ||
458 | return 0; | ||
459 | /* resume current sysdev */ | ||
460 | cls_driver: | ||
461 | drv = NULL; | ||
462 | printk(KERN_ERR "Class suspend failed for %s: %d\n", | ||
463 | kobject_name(&sysdev->kobj), ret); | ||
464 | |||
465 | aux_driver: | ||
466 | if (drv) | ||
467 | printk(KERN_ERR "Class driver suspend failed for %s: %d\n", | ||
468 | kobject_name(&sysdev->kobj), ret); | ||
469 | list_for_each_entry(err_drv, &cls->drivers, entry) { | ||
470 | if (err_drv == drv) | ||
471 | break; | ||
472 | if (err_drv->resume) | ||
473 | err_drv->resume(sysdev); | ||
474 | } | ||
475 | |||
476 | /* resume other sysdevs in current class */ | ||
477 | list_for_each_entry(err_dev, &cls->kset.list, kobj.entry) { | ||
478 | if (err_dev == sysdev) | ||
479 | break; | ||
480 | pr_debug(" %s\n", kobject_name(&err_dev->kobj)); | ||
481 | __sysdev_resume(err_dev); | ||
482 | } | ||
483 | |||
484 | /* resume other classes */ | ||
485 | list_for_each_entry_continue(cls, &system_kset->list, kset.kobj.entry) { | ||
486 | list_for_each_entry(err_dev, &cls->kset.list, kobj.entry) { | ||
487 | pr_debug(" %s\n", kobject_name(&err_dev->kobj)); | ||
488 | __sysdev_resume(err_dev); | ||
489 | } | ||
490 | } | ||
491 | return ret; | ||
492 | } | ||
493 | EXPORT_SYMBOL_GPL(sysdev_suspend); | ||
494 | |||
495 | /** | ||
496 | * sysdev_resume - Bring system devices back to life. | ||
497 | * | ||
498 | * Similar to sysdev_suspend(), but we iterate the list forwards | ||
499 | * to guarantee that parent devices are resumed before their children. | ||
500 | * | ||
501 | * Note: Interrupts are disabled when called. | ||
502 | */ | ||
503 | int sysdev_resume(void) | ||
504 | { | ||
505 | struct sysdev_class *cls; | ||
506 | |||
507 | WARN_ONCE(!irqs_disabled(), | ||
508 | "Interrupts enabled while resuming system devices\n"); | ||
509 | |||
510 | pr_debug("Resuming System Devices\n"); | ||
511 | |||
512 | list_for_each_entry(cls, &system_kset->list, kset.kobj.entry) { | ||
513 | struct sys_device *sysdev; | ||
514 | |||
515 | pr_debug("Resuming type '%s':\n", | ||
516 | kobject_name(&cls->kset.kobj)); | ||
517 | |||
518 | list_for_each_entry(sysdev, &cls->kset.list, kobj.entry) { | ||
519 | pr_debug(" %s\n", kobject_name(&sysdev->kobj)); | ||
520 | |||
521 | __sysdev_resume(sysdev); | ||
522 | } | ||
523 | } | ||
524 | return 0; | ||
525 | } | ||
526 | EXPORT_SYMBOL_GPL(sysdev_resume); | ||
527 | #endif /* CONFIG_ARCH_NO_SYSDEV_OPS */ | ||
528 | 333 | ||
529 | int __init system_bus_init(void) | 334 | int __init system_bus_init(void) |
530 | { | 335 | { |
@@ -534,9 +339,6 @@ int __init system_bus_init(void) | |||
534 | return 0; | 339 | return 0; |
535 | } | 340 | } |
536 | 341 | ||
537 | EXPORT_SYMBOL_GPL(sysdev_register); | ||
538 | EXPORT_SYMBOL_GPL(sysdev_unregister); | ||
539 | |||
540 | #define to_ext_attr(x) container_of(x, struct sysdev_ext_attribute, attr) | 342 | #define to_ext_attr(x) container_of(x, struct sysdev_ext_attribute, attr) |
541 | 343 | ||
542 | ssize_t sysdev_store_ulong(struct sys_device *sysdev, | 344 | ssize_t sysdev_store_ulong(struct sys_device *sysdev, |
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c index 8066d086578a..e086fbbbe853 100644 --- a/drivers/block/DAC960.c +++ b/drivers/block/DAC960.c | |||
@@ -2547,7 +2547,6 @@ static bool DAC960_RegisterBlockDevice(DAC960_Controller_T *Controller) | |||
2547 | disk->major = MajorNumber; | 2547 | disk->major = MajorNumber; |
2548 | disk->first_minor = n << DAC960_MaxPartitionsBits; | 2548 | disk->first_minor = n << DAC960_MaxPartitionsBits; |
2549 | disk->fops = &DAC960_BlockDeviceOperations; | 2549 | disk->fops = &DAC960_BlockDeviceOperations; |
2550 | disk->events = DISK_EVENT_MEDIA_CHANGE; | ||
2551 | } | 2550 | } |
2552 | /* | 2551 | /* |
2553 | Indicate the Block Device Registration completed successfully, | 2552 | Indicate the Block Device Registration completed successfully, |
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c index 456c0cc90dcf..8eba86bba599 100644 --- a/drivers/block/amiflop.c +++ b/drivers/block/amiflop.c | |||
@@ -1736,7 +1736,6 @@ static int __init fd_probe_drives(void) | |||
1736 | disk->major = FLOPPY_MAJOR; | 1736 | disk->major = FLOPPY_MAJOR; |
1737 | disk->first_minor = drive; | 1737 | disk->first_minor = drive; |
1738 | disk->fops = &floppy_fops; | 1738 | disk->fops = &floppy_fops; |
1739 | disk->events = DISK_EVENT_MEDIA_CHANGE; | ||
1740 | sprintf(disk->disk_name, "fd%d", drive); | 1739 | sprintf(disk->disk_name, "fd%d", drive); |
1741 | disk->private_data = &unit[drive]; | 1740 | disk->private_data = &unit[drive]; |
1742 | set_capacity(disk, 880*2); | 1741 | set_capacity(disk, 880*2); |
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c index c871eae14120..ede16c64ff07 100644 --- a/drivers/block/ataflop.c +++ b/drivers/block/ataflop.c | |||
@@ -1964,7 +1964,6 @@ static int __init atari_floppy_init (void) | |||
1964 | unit[i].disk->first_minor = i; | 1964 | unit[i].disk->first_minor = i; |
1965 | sprintf(unit[i].disk->disk_name, "fd%d", i); | 1965 | sprintf(unit[i].disk->disk_name, "fd%d", i); |
1966 | unit[i].disk->fops = &floppy_fops; | 1966 | unit[i].disk->fops = &floppy_fops; |
1967 | unit[i].disk->events = DISK_EVENT_MEDIA_CHANGE; | ||
1968 | unit[i].disk->private_data = &unit[i]; | 1967 | unit[i].disk->private_data = &unit[i]; |
1969 | unit[i].disk->queue = blk_init_queue(do_fd_request, | 1968 | unit[i].disk->queue = blk_init_queue(do_fd_request, |
1970 | &ataflop_lock); | 1969 | &ataflop_lock); |
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 301d7a9a41a6..db8f88586c8d 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c | |||
@@ -4205,7 +4205,6 @@ static int __init floppy_init(void) | |||
4205 | disks[dr]->major = FLOPPY_MAJOR; | 4205 | disks[dr]->major = FLOPPY_MAJOR; |
4206 | disks[dr]->first_minor = TOMINOR(dr); | 4206 | disks[dr]->first_minor = TOMINOR(dr); |
4207 | disks[dr]->fops = &floppy_fops; | 4207 | disks[dr]->fops = &floppy_fops; |
4208 | disks[dr]->events = DISK_EVENT_MEDIA_CHANGE; | ||
4209 | sprintf(disks[dr]->disk_name, "fd%d", dr); | 4208 | sprintf(disks[dr]->disk_name, "fd%d", dr); |
4210 | 4209 | ||
4211 | init_timer(&motor_off_timer[dr]); | 4210 | init_timer(&motor_off_timer[dr]); |
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c index 2f2ccf686251..8690e31d9932 100644 --- a/drivers/block/paride/pcd.c +++ b/drivers/block/paride/pcd.c | |||
@@ -320,7 +320,6 @@ static void pcd_init_units(void) | |||
320 | disk->first_minor = unit; | 320 | disk->first_minor = unit; |
321 | strcpy(disk->disk_name, cd->name); /* umm... */ | 321 | strcpy(disk->disk_name, cd->name); /* umm... */ |
322 | disk->fops = &pcd_bdops; | 322 | disk->fops = &pcd_bdops; |
323 | disk->events = DISK_EVENT_MEDIA_CHANGE; | ||
324 | } | 323 | } |
325 | } | 324 | } |
326 | 325 | ||
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c index 21dfdb776869..869e7676d46f 100644 --- a/drivers/block/paride/pd.c +++ b/drivers/block/paride/pd.c | |||
@@ -837,7 +837,6 @@ static void pd_probe_drive(struct pd_unit *disk) | |||
837 | p->fops = &pd_fops; | 837 | p->fops = &pd_fops; |
838 | p->major = major; | 838 | p->major = major; |
839 | p->first_minor = (disk - pd) << PD_BITS; | 839 | p->first_minor = (disk - pd) << PD_BITS; |
840 | p->events = DISK_EVENT_MEDIA_CHANGE; | ||
841 | disk->gd = p; | 840 | disk->gd = p; |
842 | p->private_data = disk; | 841 | p->private_data = disk; |
843 | p->queue = pd_queue; | 842 | p->queue = pd_queue; |
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c index 7adeb1edbf43..f21b520ef419 100644 --- a/drivers/block/paride/pf.c +++ b/drivers/block/paride/pf.c | |||
@@ -294,7 +294,6 @@ static void __init pf_init_units(void) | |||
294 | disk->first_minor = unit; | 294 | disk->first_minor = unit; |
295 | strcpy(disk->disk_name, pf->name); | 295 | strcpy(disk->disk_name, pf->name); |
296 | disk->fops = &pf_fops; | 296 | disk->fops = &pf_fops; |
297 | disk->events = DISK_EVENT_MEDIA_CHANGE; | ||
298 | if (!(*drives[unit])[D_PRT]) | 297 | if (!(*drives[unit])[D_PRT]) |
299 | pf_drive_count++; | 298 | pf_drive_count++; |
300 | } | 299 | } |
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 3e904717c1c0..9712fad82bc6 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c | |||
@@ -92,6 +92,8 @@ struct rbd_client { | |||
92 | struct list_head node; | 92 | struct list_head node; |
93 | }; | 93 | }; |
94 | 94 | ||
95 | struct rbd_req_coll; | ||
96 | |||
95 | /* | 97 | /* |
96 | * a single io request | 98 | * a single io request |
97 | */ | 99 | */ |
@@ -100,6 +102,24 @@ struct rbd_request { | |||
100 | struct bio *bio; /* cloned bio */ | 102 | struct bio *bio; /* cloned bio */ |
101 | struct page **pages; /* list of used pages */ | 103 | struct page **pages; /* list of used pages */ |
102 | u64 len; | 104 | u64 len; |
105 | int coll_index; | ||
106 | struct rbd_req_coll *coll; | ||
107 | }; | ||
108 | |||
109 | struct rbd_req_status { | ||
110 | int done; | ||
111 | int rc; | ||
112 | u64 bytes; | ||
113 | }; | ||
114 | |||
115 | /* | ||
116 | * a collection of requests | ||
117 | */ | ||
118 | struct rbd_req_coll { | ||
119 | int total; | ||
120 | int num_done; | ||
121 | struct kref kref; | ||
122 | struct rbd_req_status status[0]; | ||
103 | }; | 123 | }; |
104 | 124 | ||
105 | struct rbd_snap { | 125 | struct rbd_snap { |
@@ -416,6 +436,17 @@ static void rbd_put_client(struct rbd_device *rbd_dev) | |||
416 | rbd_dev->client = NULL; | 436 | rbd_dev->client = NULL; |
417 | } | 437 | } |
418 | 438 | ||
439 | /* | ||
440 | * Destroy requests collection | ||
441 | */ | ||
442 | static void rbd_coll_release(struct kref *kref) | ||
443 | { | ||
444 | struct rbd_req_coll *coll = | ||
445 | container_of(kref, struct rbd_req_coll, kref); | ||
446 | |||
447 | dout("rbd_coll_release %p\n", coll); | ||
448 | kfree(coll); | ||
449 | } | ||
419 | 450 | ||
420 | /* | 451 | /* |
421 | * Create a new header structure, translate header format from the on-disk | 452 | * Create a new header structure, translate header format from the on-disk |
@@ -590,6 +621,14 @@ static u64 rbd_get_segment(struct rbd_image_header *header, | |||
590 | return len; | 621 | return len; |
591 | } | 622 | } |
592 | 623 | ||
624 | static int rbd_get_num_segments(struct rbd_image_header *header, | ||
625 | u64 ofs, u64 len) | ||
626 | { | ||
627 | u64 start_seg = ofs >> header->obj_order; | ||
628 | u64 end_seg = (ofs + len - 1) >> header->obj_order; | ||
629 | return end_seg - start_seg + 1; | ||
630 | } | ||
631 | |||
593 | /* | 632 | /* |
594 | * bio helpers | 633 | * bio helpers |
595 | */ | 634 | */ |
@@ -735,6 +774,50 @@ static void rbd_destroy_ops(struct ceph_osd_req_op *ops) | |||
735 | kfree(ops); | 774 | kfree(ops); |
736 | } | 775 | } |
737 | 776 | ||
777 | static void rbd_coll_end_req_index(struct request *rq, | ||
778 | struct rbd_req_coll *coll, | ||
779 | int index, | ||
780 | int ret, u64 len) | ||
781 | { | ||
782 | struct request_queue *q; | ||
783 | int min, max, i; | ||
784 | |||
785 | dout("rbd_coll_end_req_index %p index %d ret %d len %lld\n", | ||
786 | coll, index, ret, len); | ||
787 | |||
788 | if (!rq) | ||
789 | return; | ||
790 | |||
791 | if (!coll) { | ||
792 | blk_end_request(rq, ret, len); | ||
793 | return; | ||
794 | } | ||
795 | |||
796 | q = rq->q; | ||
797 | |||
798 | spin_lock_irq(q->queue_lock); | ||
799 | coll->status[index].done = 1; | ||
800 | coll->status[index].rc = ret; | ||
801 | coll->status[index].bytes = len; | ||
802 | max = min = coll->num_done; | ||
803 | while (max < coll->total && coll->status[max].done) | ||
804 | max++; | ||
805 | |||
806 | for (i = min; i<max; i++) { | ||
807 | __blk_end_request(rq, coll->status[i].rc, | ||
808 | coll->status[i].bytes); | ||
809 | coll->num_done++; | ||
810 | kref_put(&coll->kref, rbd_coll_release); | ||
811 | } | ||
812 | spin_unlock_irq(q->queue_lock); | ||
813 | } | ||
814 | |||
815 | static void rbd_coll_end_req(struct rbd_request *req, | ||
816 | int ret, u64 len) | ||
817 | { | ||
818 | rbd_coll_end_req_index(req->rq, req->coll, req->coll_index, ret, len); | ||
819 | } | ||
820 | |||
738 | /* | 821 | /* |
739 | * Send ceph osd request | 822 | * Send ceph osd request |
740 | */ | 823 | */ |
@@ -749,6 +832,8 @@ static int rbd_do_request(struct request *rq, | |||
749 | int flags, | 832 | int flags, |
750 | struct ceph_osd_req_op *ops, | 833 | struct ceph_osd_req_op *ops, |
751 | int num_reply, | 834 | int num_reply, |
835 | struct rbd_req_coll *coll, | ||
836 | int coll_index, | ||
752 | void (*rbd_cb)(struct ceph_osd_request *req, | 837 | void (*rbd_cb)(struct ceph_osd_request *req, |
753 | struct ceph_msg *msg), | 838 | struct ceph_msg *msg), |
754 | struct ceph_osd_request **linger_req, | 839 | struct ceph_osd_request **linger_req, |
@@ -763,12 +848,20 @@ static int rbd_do_request(struct request *rq, | |||
763 | struct ceph_osd_request_head *reqhead; | 848 | struct ceph_osd_request_head *reqhead; |
764 | struct rbd_image_header *header = &dev->header; | 849 | struct rbd_image_header *header = &dev->header; |
765 | 850 | ||
766 | ret = -ENOMEM; | ||
767 | req_data = kzalloc(sizeof(*req_data), GFP_NOIO); | 851 | req_data = kzalloc(sizeof(*req_data), GFP_NOIO); |
768 | if (!req_data) | 852 | if (!req_data) { |
769 | goto done; | 853 | if (coll) |
854 | rbd_coll_end_req_index(rq, coll, coll_index, | ||
855 | -ENOMEM, len); | ||
856 | return -ENOMEM; | ||
857 | } | ||
770 | 858 | ||
771 | dout("rbd_do_request len=%lld ofs=%lld\n", len, ofs); | 859 | if (coll) { |
860 | req_data->coll = coll; | ||
861 | req_data->coll_index = coll_index; | ||
862 | } | ||
863 | |||
864 | dout("rbd_do_request obj=%s ofs=%lld len=%lld\n", obj, len, ofs); | ||
772 | 865 | ||
773 | down_read(&header->snap_rwsem); | 866 | down_read(&header->snap_rwsem); |
774 | 867 | ||
@@ -828,7 +921,8 @@ static int rbd_do_request(struct request *rq, | |||
828 | ret = ceph_osdc_wait_request(&dev->client->osdc, req); | 921 | ret = ceph_osdc_wait_request(&dev->client->osdc, req); |
829 | if (ver) | 922 | if (ver) |
830 | *ver = le64_to_cpu(req->r_reassert_version.version); | 923 | *ver = le64_to_cpu(req->r_reassert_version.version); |
831 | dout("reassert_ver=%lld\n", le64_to_cpu(req->r_reassert_version.version)); | 924 | dout("reassert_ver=%lld\n", |
925 | le64_to_cpu(req->r_reassert_version.version)); | ||
832 | ceph_osdc_put_request(req); | 926 | ceph_osdc_put_request(req); |
833 | } | 927 | } |
834 | return ret; | 928 | return ret; |
@@ -837,10 +931,8 @@ done_err: | |||
837 | bio_chain_put(req_data->bio); | 931 | bio_chain_put(req_data->bio); |
838 | ceph_osdc_put_request(req); | 932 | ceph_osdc_put_request(req); |
839 | done_pages: | 933 | done_pages: |
934 | rbd_coll_end_req(req_data, ret, len); | ||
840 | kfree(req_data); | 935 | kfree(req_data); |
841 | done: | ||
842 | if (rq) | ||
843 | blk_end_request(rq, ret, len); | ||
844 | return ret; | 936 | return ret; |
845 | } | 937 | } |
846 | 938 | ||
@@ -874,7 +966,7 @@ static void rbd_req_cb(struct ceph_osd_request *req, struct ceph_msg *msg) | |||
874 | bytes = req_data->len; | 966 | bytes = req_data->len; |
875 | } | 967 | } |
876 | 968 | ||
877 | blk_end_request(req_data->rq, rc, bytes); | 969 | rbd_coll_end_req(req_data, rc, bytes); |
878 | 970 | ||
879 | if (req_data->bio) | 971 | if (req_data->bio) |
880 | bio_chain_put(req_data->bio); | 972 | bio_chain_put(req_data->bio); |
@@ -934,6 +1026,7 @@ static int rbd_req_sync_op(struct rbd_device *dev, | |||
934 | flags, | 1026 | flags, |
935 | ops, | 1027 | ops, |
936 | 2, | 1028 | 2, |
1029 | NULL, 0, | ||
937 | NULL, | 1030 | NULL, |
938 | linger_req, ver); | 1031 | linger_req, ver); |
939 | if (ret < 0) | 1032 | if (ret < 0) |
@@ -959,7 +1052,9 @@ static int rbd_do_op(struct request *rq, | |||
959 | u64 snapid, | 1052 | u64 snapid, |
960 | int opcode, int flags, int num_reply, | 1053 | int opcode, int flags, int num_reply, |
961 | u64 ofs, u64 len, | 1054 | u64 ofs, u64 len, |
962 | struct bio *bio) | 1055 | struct bio *bio, |
1056 | struct rbd_req_coll *coll, | ||
1057 | int coll_index) | ||
963 | { | 1058 | { |
964 | char *seg_name; | 1059 | char *seg_name; |
965 | u64 seg_ofs; | 1060 | u64 seg_ofs; |
@@ -995,7 +1090,10 @@ static int rbd_do_op(struct request *rq, | |||
995 | flags, | 1090 | flags, |
996 | ops, | 1091 | ops, |
997 | num_reply, | 1092 | num_reply, |
1093 | coll, coll_index, | ||
998 | rbd_req_cb, 0, NULL); | 1094 | rbd_req_cb, 0, NULL); |
1095 | |||
1096 | rbd_destroy_ops(ops); | ||
999 | done: | 1097 | done: |
1000 | kfree(seg_name); | 1098 | kfree(seg_name); |
1001 | return ret; | 1099 | return ret; |
@@ -1008,13 +1106,15 @@ static int rbd_req_write(struct request *rq, | |||
1008 | struct rbd_device *rbd_dev, | 1106 | struct rbd_device *rbd_dev, |
1009 | struct ceph_snap_context *snapc, | 1107 | struct ceph_snap_context *snapc, |
1010 | u64 ofs, u64 len, | 1108 | u64 ofs, u64 len, |
1011 | struct bio *bio) | 1109 | struct bio *bio, |
1110 | struct rbd_req_coll *coll, | ||
1111 | int coll_index) | ||
1012 | { | 1112 | { |
1013 | return rbd_do_op(rq, rbd_dev, snapc, CEPH_NOSNAP, | 1113 | return rbd_do_op(rq, rbd_dev, snapc, CEPH_NOSNAP, |
1014 | CEPH_OSD_OP_WRITE, | 1114 | CEPH_OSD_OP_WRITE, |
1015 | CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK, | 1115 | CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK, |
1016 | 2, | 1116 | 2, |
1017 | ofs, len, bio); | 1117 | ofs, len, bio, coll, coll_index); |
1018 | } | 1118 | } |
1019 | 1119 | ||
1020 | /* | 1120 | /* |
@@ -1024,14 +1124,16 @@ static int rbd_req_read(struct request *rq, | |||
1024 | struct rbd_device *rbd_dev, | 1124 | struct rbd_device *rbd_dev, |
1025 | u64 snapid, | 1125 | u64 snapid, |
1026 | u64 ofs, u64 len, | 1126 | u64 ofs, u64 len, |
1027 | struct bio *bio) | 1127 | struct bio *bio, |
1128 | struct rbd_req_coll *coll, | ||
1129 | int coll_index) | ||
1028 | { | 1130 | { |
1029 | return rbd_do_op(rq, rbd_dev, NULL, | 1131 | return rbd_do_op(rq, rbd_dev, NULL, |
1030 | (snapid ? snapid : CEPH_NOSNAP), | 1132 | (snapid ? snapid : CEPH_NOSNAP), |
1031 | CEPH_OSD_OP_READ, | 1133 | CEPH_OSD_OP_READ, |
1032 | CEPH_OSD_FLAG_READ, | 1134 | CEPH_OSD_FLAG_READ, |
1033 | 2, | 1135 | 2, |
1034 | ofs, len, bio); | 1136 | ofs, len, bio, coll, coll_index); |
1035 | } | 1137 | } |
1036 | 1138 | ||
1037 | /* | 1139 | /* |
@@ -1063,7 +1165,9 @@ static int rbd_req_sync_notify_ack(struct rbd_device *dev, | |||
1063 | { | 1165 | { |
1064 | struct ceph_osd_req_op *ops; | 1166 | struct ceph_osd_req_op *ops; |
1065 | struct page **pages = NULL; | 1167 | struct page **pages = NULL; |
1066 | int ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_NOTIFY_ACK, 0); | 1168 | int ret; |
1169 | |||
1170 | ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_NOTIFY_ACK, 0); | ||
1067 | if (ret < 0) | 1171 | if (ret < 0) |
1068 | return ret; | 1172 | return ret; |
1069 | 1173 | ||
@@ -1077,6 +1181,7 @@ static int rbd_req_sync_notify_ack(struct rbd_device *dev, | |||
1077 | CEPH_OSD_FLAG_READ, | 1181 | CEPH_OSD_FLAG_READ, |
1078 | ops, | 1182 | ops, |
1079 | 1, | 1183 | 1, |
1184 | NULL, 0, | ||
1080 | rbd_simple_req_cb, 0, NULL); | 1185 | rbd_simple_req_cb, 0, NULL); |
1081 | 1186 | ||
1082 | rbd_destroy_ops(ops); | 1187 | rbd_destroy_ops(ops); |
@@ -1274,6 +1379,20 @@ static int rbd_req_sync_exec(struct rbd_device *dev, | |||
1274 | return ret; | 1379 | return ret; |
1275 | } | 1380 | } |
1276 | 1381 | ||
1382 | static struct rbd_req_coll *rbd_alloc_coll(int num_reqs) | ||
1383 | { | ||
1384 | struct rbd_req_coll *coll = | ||
1385 | kzalloc(sizeof(struct rbd_req_coll) + | ||
1386 | sizeof(struct rbd_req_status) * num_reqs, | ||
1387 | GFP_ATOMIC); | ||
1388 | |||
1389 | if (!coll) | ||
1390 | return NULL; | ||
1391 | coll->total = num_reqs; | ||
1392 | kref_init(&coll->kref); | ||
1393 | return coll; | ||
1394 | } | ||
1395 | |||
1277 | /* | 1396 | /* |
1278 | * block device queue callback | 1397 | * block device queue callback |
1279 | */ | 1398 | */ |
@@ -1291,6 +1410,8 @@ static void rbd_rq_fn(struct request_queue *q) | |||
1291 | bool do_write; | 1410 | bool do_write; |
1292 | int size, op_size = 0; | 1411 | int size, op_size = 0; |
1293 | u64 ofs; | 1412 | u64 ofs; |
1413 | int num_segs, cur_seg = 0; | ||
1414 | struct rbd_req_coll *coll; | ||
1294 | 1415 | ||
1295 | /* peek at request from block layer */ | 1416 | /* peek at request from block layer */ |
1296 | if (!rq) | 1417 | if (!rq) |
@@ -1321,6 +1442,14 @@ static void rbd_rq_fn(struct request_queue *q) | |||
1321 | do_write ? "write" : "read", | 1442 | do_write ? "write" : "read", |
1322 | size, blk_rq_pos(rq) * 512ULL); | 1443 | size, blk_rq_pos(rq) * 512ULL); |
1323 | 1444 | ||
1445 | num_segs = rbd_get_num_segments(&rbd_dev->header, ofs, size); | ||
1446 | coll = rbd_alloc_coll(num_segs); | ||
1447 | if (!coll) { | ||
1448 | spin_lock_irq(q->queue_lock); | ||
1449 | __blk_end_request_all(rq, -ENOMEM); | ||
1450 | goto next; | ||
1451 | } | ||
1452 | |||
1324 | do { | 1453 | do { |
1325 | /* a bio clone to be passed down to OSD req */ | 1454 | /* a bio clone to be passed down to OSD req */ |
1326 | dout("rq->bio->bi_vcnt=%d\n", rq->bio->bi_vcnt); | 1455 | dout("rq->bio->bi_vcnt=%d\n", rq->bio->bi_vcnt); |
@@ -1328,35 +1457,41 @@ static void rbd_rq_fn(struct request_queue *q) | |||
1328 | rbd_dev->header.block_name, | 1457 | rbd_dev->header.block_name, |
1329 | ofs, size, | 1458 | ofs, size, |
1330 | NULL, NULL); | 1459 | NULL, NULL); |
1460 | kref_get(&coll->kref); | ||
1331 | bio = bio_chain_clone(&rq_bio, &next_bio, &bp, | 1461 | bio = bio_chain_clone(&rq_bio, &next_bio, &bp, |
1332 | op_size, GFP_ATOMIC); | 1462 | op_size, GFP_ATOMIC); |
1333 | if (!bio) { | 1463 | if (!bio) { |
1334 | spin_lock_irq(q->queue_lock); | 1464 | rbd_coll_end_req_index(rq, coll, cur_seg, |
1335 | __blk_end_request_all(rq, -ENOMEM); | 1465 | -ENOMEM, op_size); |
1336 | goto next; | 1466 | goto next_seg; |
1337 | } | 1467 | } |
1338 | 1468 | ||
1469 | |||
1339 | /* init OSD command: write or read */ | 1470 | /* init OSD command: write or read */ |
1340 | if (do_write) | 1471 | if (do_write) |
1341 | rbd_req_write(rq, rbd_dev, | 1472 | rbd_req_write(rq, rbd_dev, |
1342 | rbd_dev->header.snapc, | 1473 | rbd_dev->header.snapc, |
1343 | ofs, | 1474 | ofs, |
1344 | op_size, bio); | 1475 | op_size, bio, |
1476 | coll, cur_seg); | ||
1345 | else | 1477 | else |
1346 | rbd_req_read(rq, rbd_dev, | 1478 | rbd_req_read(rq, rbd_dev, |
1347 | cur_snap_id(rbd_dev), | 1479 | cur_snap_id(rbd_dev), |
1348 | ofs, | 1480 | ofs, |
1349 | op_size, bio); | 1481 | op_size, bio, |
1482 | coll, cur_seg); | ||
1350 | 1483 | ||
1484 | next_seg: | ||
1351 | size -= op_size; | 1485 | size -= op_size; |
1352 | ofs += op_size; | 1486 | ofs += op_size; |
1353 | 1487 | ||
1488 | cur_seg++; | ||
1354 | rq_bio = next_bio; | 1489 | rq_bio = next_bio; |
1355 | } while (size > 0); | 1490 | } while (size > 0); |
1491 | kref_put(&coll->kref, rbd_coll_release); | ||
1356 | 1492 | ||
1357 | if (bp) | 1493 | if (bp) |
1358 | bio_pair_release(bp); | 1494 | bio_pair_release(bp); |
1359 | |||
1360 | spin_lock_irq(q->queue_lock); | 1495 | spin_lock_irq(q->queue_lock); |
1361 | next: | 1496 | next: |
1362 | rq = blk_fetch_request(q); | 1497 | rq = blk_fetch_request(q); |
diff --git a/drivers/block/swim.c b/drivers/block/swim.c index 24a482f2fbd6..fd5adcd55944 100644 --- a/drivers/block/swim.c +++ b/drivers/block/swim.c | |||
@@ -858,7 +858,6 @@ static int __devinit swim_floppy_init(struct swim_priv *swd) | |||
858 | swd->unit[drive].disk->first_minor = drive; | 858 | swd->unit[drive].disk->first_minor = drive; |
859 | sprintf(swd->unit[drive].disk->disk_name, "fd%d", drive); | 859 | sprintf(swd->unit[drive].disk->disk_name, "fd%d", drive); |
860 | swd->unit[drive].disk->fops = &floppy_fops; | 860 | swd->unit[drive].disk->fops = &floppy_fops; |
861 | swd->unit[drive].disk->events = DISK_EVENT_MEDIA_CHANGE; | ||
862 | swd->unit[drive].disk->private_data = &swd->unit[drive]; | 861 | swd->unit[drive].disk->private_data = &swd->unit[drive]; |
863 | swd->unit[drive].disk->queue = swd->queue; | 862 | swd->unit[drive].disk->queue = swd->queue; |
864 | set_capacity(swd->unit[drive].disk, 2880); | 863 | set_capacity(swd->unit[drive].disk, 2880); |
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c index 4c10f56facbf..773bfa792777 100644 --- a/drivers/block/swim3.c +++ b/drivers/block/swim3.c | |||
@@ -1163,7 +1163,6 @@ static int __devinit swim3_attach(struct macio_dev *mdev, const struct of_device | |||
1163 | disk->major = FLOPPY_MAJOR; | 1163 | disk->major = FLOPPY_MAJOR; |
1164 | disk->first_minor = i; | 1164 | disk->first_minor = i; |
1165 | disk->fops = &floppy_fops; | 1165 | disk->fops = &floppy_fops; |
1166 | disk->events = DISK_EVENT_MEDIA_CHANGE; | ||
1167 | disk->private_data = &floppy_states[i]; | 1166 | disk->private_data = &floppy_states[i]; |
1168 | disk->queue = swim3_queue; | 1167 | disk->queue = swim3_queue; |
1169 | disk->flags |= GENHD_FL_REMOVABLE; | 1168 | disk->flags |= GENHD_FL_REMOVABLE; |
diff --git a/drivers/block/ub.c b/drivers/block/ub.c index 68b9430c7cfe..0e376d46bdd1 100644 --- a/drivers/block/ub.c +++ b/drivers/block/ub.c | |||
@@ -2334,7 +2334,6 @@ static int ub_probe_lun(struct ub_dev *sc, int lnum) | |||
2334 | disk->major = UB_MAJOR; | 2334 | disk->major = UB_MAJOR; |
2335 | disk->first_minor = lun->id * UB_PARTS_PER_LUN; | 2335 | disk->first_minor = lun->id * UB_PARTS_PER_LUN; |
2336 | disk->fops = &ub_bd_fops; | 2336 | disk->fops = &ub_bd_fops; |
2337 | disk->events = DISK_EVENT_MEDIA_CHANGE; | ||
2338 | disk->private_data = lun; | 2337 | disk->private_data = lun; |
2339 | disk->driverfs_dev = &sc->intf->dev; | 2338 | disk->driverfs_dev = &sc->intf->dev; |
2340 | 2339 | ||
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c index 645ff765cd12..6c7fd7db6dff 100644 --- a/drivers/block/xsysace.c +++ b/drivers/block/xsysace.c | |||
@@ -1005,7 +1005,6 @@ static int __devinit ace_setup(struct ace_device *ace) | |||
1005 | ace->gd->major = ace_major; | 1005 | ace->gd->major = ace_major; |
1006 | ace->gd->first_minor = ace->id * ACE_NUM_MINORS; | 1006 | ace->gd->first_minor = ace->id * ACE_NUM_MINORS; |
1007 | ace->gd->fops = &ace_fops; | 1007 | ace->gd->fops = &ace_fops; |
1008 | ace->gd->events = DISK_EVENT_MEDIA_CHANGE; | ||
1009 | ace->gd->queue = ace->queue; | 1008 | ace->gd->queue = ace->queue; |
1010 | ace->gd->private_data = ace; | 1009 | ace->gd->private_data = ace; |
1011 | snprintf(ace->gd->disk_name, 32, "xs%c", ace->id + 'a'); | 1010 | snprintf(ace->gd->disk_name, 32, "xs%c", ace->id + 'a'); |
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c index 514dd8efaf73..75fb965b8f72 100644 --- a/drivers/cdrom/cdrom.c +++ b/drivers/cdrom/cdrom.c | |||
@@ -986,6 +986,9 @@ int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev, fmode_t | |||
986 | 986 | ||
987 | cdinfo(CD_OPEN, "entering cdrom_open\n"); | 987 | cdinfo(CD_OPEN, "entering cdrom_open\n"); |
988 | 988 | ||
989 | /* open is event synchronization point, check events first */ | ||
990 | check_disk_change(bdev); | ||
991 | |||
989 | /* if this was a O_NONBLOCK open and we should honor the flags, | 992 | /* if this was a O_NONBLOCK open and we should honor the flags, |
990 | * do a quick open without drive/disc integrity checks. */ | 993 | * do a quick open without drive/disc integrity checks. */ |
991 | cdi->use_count++; | 994 | cdi->use_count++; |
@@ -1012,9 +1015,6 @@ int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev, fmode_t | |||
1012 | 1015 | ||
1013 | cdinfo(CD_OPEN, "Use count for \"/dev/%s\" now %d\n", | 1016 | cdinfo(CD_OPEN, "Use count for \"/dev/%s\" now %d\n", |
1014 | cdi->name, cdi->use_count); | 1017 | cdi->name, cdi->use_count); |
1015 | /* Do this on open. Don't wait for mount, because they might | ||
1016 | not be mounting, but opening with O_NONBLOCK */ | ||
1017 | check_disk_change(bdev); | ||
1018 | return 0; | 1018 | return 0; |
1019 | err_release: | 1019 | err_release: |
1020 | if (CDROM_CAN(CDC_LOCK) && cdi->options & CDO_LOCK) { | 1020 | if (CDROM_CAN(CDC_LOCK) && cdi->options & CDO_LOCK) { |
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c index b2b034fea34e..3ceaf006e7f0 100644 --- a/drivers/cdrom/gdrom.c +++ b/drivers/cdrom/gdrom.c | |||
@@ -803,7 +803,6 @@ static int __devinit probe_gdrom(struct platform_device *devptr) | |||
803 | goto probe_fail_cdrom_register; | 803 | goto probe_fail_cdrom_register; |
804 | } | 804 | } |
805 | gd.disk->fops = &gdrom_bdops; | 805 | gd.disk->fops = &gdrom_bdops; |
806 | gd.disk->events = DISK_EVENT_MEDIA_CHANGE; | ||
807 | /* latch on to the interrupt */ | 806 | /* latch on to the interrupt */ |
808 | err = gdrom_set_interrupt_handlers(); | 807 | err = gdrom_set_interrupt_handlers(); |
809 | if (err) | 808 | if (err) |
diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c index 4e874c5fa605..e427fbe45999 100644 --- a/drivers/cdrom/viocd.c +++ b/drivers/cdrom/viocd.c | |||
@@ -626,7 +626,6 @@ static int viocd_probe(struct vio_dev *vdev, const struct vio_device_id *id) | |||
626 | gendisk->queue = q; | 626 | gendisk->queue = q; |
627 | gendisk->fops = &viocd_fops; | 627 | gendisk->fops = &viocd_fops; |
628 | gendisk->flags = GENHD_FL_CD|GENHD_FL_REMOVABLE; | 628 | gendisk->flags = GENHD_FL_CD|GENHD_FL_REMOVABLE; |
629 | gendisk->events = DISK_EVENT_MEDIA_CHANGE; | ||
630 | set_capacity(gendisk, 0); | 629 | set_capacity(gendisk, 0); |
631 | gendisk->private_data = d; | 630 | gendisk->private_data = d; |
632 | d->viocd_disk = gendisk; | 631 | d->viocd_disk = gendisk; |
diff --git a/drivers/char/hw_random/n2-drv.c b/drivers/char/hw_random/n2-drv.c index 43ac61978d8b..ac6739e085e3 100644 --- a/drivers/char/hw_random/n2-drv.c +++ b/drivers/char/hw_random/n2-drv.c | |||
@@ -619,15 +619,18 @@ static void __devinit n2rng_driver_version(void) | |||
619 | pr_info("%s", version); | 619 | pr_info("%s", version); |
620 | } | 620 | } |
621 | 621 | ||
622 | static const struct of_device_id n2rng_match[]; | ||
622 | static int __devinit n2rng_probe(struct platform_device *op) | 623 | static int __devinit n2rng_probe(struct platform_device *op) |
623 | { | 624 | { |
625 | const struct of_device_id *match; | ||
624 | int victoria_falls; | 626 | int victoria_falls; |
625 | int err = -ENOMEM; | 627 | int err = -ENOMEM; |
626 | struct n2rng *np; | 628 | struct n2rng *np; |
627 | 629 | ||
628 | if (!op->dev.of_match) | 630 | match = of_match_device(n2rng_match, &op->dev); |
631 | if (!match) | ||
629 | return -EINVAL; | 632 | return -EINVAL; |
630 | victoria_falls = (op->dev.of_match->data != NULL); | 633 | victoria_falls = (match->data != NULL); |
631 | 634 | ||
632 | n2rng_driver_version(); | 635 | n2rng_driver_version(); |
633 | np = kzalloc(sizeof(*np), GFP_KERNEL); | 636 | np = kzalloc(sizeof(*np), GFP_KERNEL); |
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index cc6c9b2546a3..64c6b8530615 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c | |||
@@ -2554,9 +2554,11 @@ static struct pci_driver ipmi_pci_driver = { | |||
2554 | }; | 2554 | }; |
2555 | #endif /* CONFIG_PCI */ | 2555 | #endif /* CONFIG_PCI */ |
2556 | 2556 | ||
2557 | static struct of_device_id ipmi_match[]; | ||
2557 | static int __devinit ipmi_probe(struct platform_device *dev) | 2558 | static int __devinit ipmi_probe(struct platform_device *dev) |
2558 | { | 2559 | { |
2559 | #ifdef CONFIG_OF | 2560 | #ifdef CONFIG_OF |
2561 | const struct of_device_id *match; | ||
2560 | struct smi_info *info; | 2562 | struct smi_info *info; |
2561 | struct resource resource; | 2563 | struct resource resource; |
2562 | const __be32 *regsize, *regspacing, *regshift; | 2564 | const __be32 *regsize, *regspacing, *regshift; |
@@ -2566,7 +2568,8 @@ static int __devinit ipmi_probe(struct platform_device *dev) | |||
2566 | 2568 | ||
2567 | dev_info(&dev->dev, "probing via device tree\n"); | 2569 | dev_info(&dev->dev, "probing via device tree\n"); |
2568 | 2570 | ||
2569 | if (!dev->dev.of_match) | 2571 | match = of_match_device(ipmi_match, &dev->dev); |
2572 | if (!match) | ||
2570 | return -EINVAL; | 2573 | return -EINVAL; |
2571 | 2574 | ||
2572 | ret = of_address_to_resource(np, 0, &resource); | 2575 | ret = of_address_to_resource(np, 0, &resource); |
@@ -2601,7 +2604,7 @@ static int __devinit ipmi_probe(struct platform_device *dev) | |||
2601 | return -ENOMEM; | 2604 | return -ENOMEM; |
2602 | } | 2605 | } |
2603 | 2606 | ||
2604 | info->si_type = (enum si_type) dev->dev.of_match->data; | 2607 | info->si_type = (enum si_type) match->data; |
2605 | info->addr_source = SI_DEVICETREE; | 2608 | info->addr_source = SI_DEVICETREE; |
2606 | info->irq_setup = std_irq_setup; | 2609 | info->irq_setup = std_irq_setup; |
2607 | 2610 | ||
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/drivers/char/xilinx_hwicap/xilinx_hwicap.c index d6412c16385f..39ccdeada791 100644 --- a/drivers/char/xilinx_hwicap/xilinx_hwicap.c +++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.c | |||
@@ -715,13 +715,13 @@ static int __devexit hwicap_remove(struct device *dev) | |||
715 | } | 715 | } |
716 | 716 | ||
717 | #ifdef CONFIG_OF | 717 | #ifdef CONFIG_OF |
718 | static int __devinit hwicap_of_probe(struct platform_device *op) | 718 | static int __devinit hwicap_of_probe(struct platform_device *op, |
719 | const struct hwicap_driver_config *config) | ||
719 | { | 720 | { |
720 | struct resource res; | 721 | struct resource res; |
721 | const unsigned int *id; | 722 | const unsigned int *id; |
722 | const char *family; | 723 | const char *family; |
723 | int rc; | 724 | int rc; |
724 | const struct hwicap_driver_config *config = op->dev.of_match->data; | ||
725 | const struct config_registers *regs; | 725 | const struct config_registers *regs; |
726 | 726 | ||
727 | 727 | ||
@@ -751,20 +751,24 @@ static int __devinit hwicap_of_probe(struct platform_device *op) | |||
751 | regs); | 751 | regs); |
752 | } | 752 | } |
753 | #else | 753 | #else |
754 | static inline int hwicap_of_probe(struct platform_device *op) | 754 | static inline int hwicap_of_probe(struct platform_device *op, |
755 | const struct hwicap_driver_config *config) | ||
755 | { | 756 | { |
756 | return -EINVAL; | 757 | return -EINVAL; |
757 | } | 758 | } |
758 | #endif /* CONFIG_OF */ | 759 | #endif /* CONFIG_OF */ |
759 | 760 | ||
761 | static const struct of_device_id __devinitconst hwicap_of_match[]; | ||
760 | static int __devinit hwicap_drv_probe(struct platform_device *pdev) | 762 | static int __devinit hwicap_drv_probe(struct platform_device *pdev) |
761 | { | 763 | { |
764 | const struct of_device_id *match; | ||
762 | struct resource *res; | 765 | struct resource *res; |
763 | const struct config_registers *regs; | 766 | const struct config_registers *regs; |
764 | const char *family; | 767 | const char *family; |
765 | 768 | ||
766 | if (pdev->dev.of_match) | 769 | match = of_match_device(hwicap_of_match, &pdev->dev); |
767 | return hwicap_of_probe(pdev); | 770 | if (match) |
771 | return hwicap_of_probe(pdev, match->data); | ||
768 | 772 | ||
769 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 773 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
770 | if (!res) | 774 | if (!res) |
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index ca8ee8093d6c..9fb84853d8e3 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig | |||
@@ -1,3 +1,5 @@ | |||
1 | menu "CPU Frequency scaling" | ||
2 | |||
1 | config CPU_FREQ | 3 | config CPU_FREQ |
2 | bool "CPU Frequency scaling" | 4 | bool "CPU Frequency scaling" |
3 | help | 5 | help |
@@ -18,19 +20,6 @@ if CPU_FREQ | |||
18 | config CPU_FREQ_TABLE | 20 | config CPU_FREQ_TABLE |
19 | tristate | 21 | tristate |
20 | 22 | ||
21 | config CPU_FREQ_DEBUG | ||
22 | bool "Enable CPUfreq debugging" | ||
23 | help | ||
24 | Say Y here to enable CPUfreq subsystem (including drivers) | ||
25 | debugging. You will need to activate it via the kernel | ||
26 | command line by passing | ||
27 | cpufreq.debug=<value> | ||
28 | |||
29 | To get <value>, add | ||
30 | 1 to activate CPUfreq core debugging, | ||
31 | 2 to activate CPUfreq drivers debugging, and | ||
32 | 4 to activate CPUfreq governor debugging | ||
33 | |||
34 | config CPU_FREQ_STAT | 23 | config CPU_FREQ_STAT |
35 | tristate "CPU frequency translation statistics" | 24 | tristate "CPU frequency translation statistics" |
36 | select CPU_FREQ_TABLE | 25 | select CPU_FREQ_TABLE |
@@ -190,4 +179,10 @@ config CPU_FREQ_GOV_CONSERVATIVE | |||
190 | 179 | ||
191 | If in doubt, say N. | 180 | If in doubt, say N. |
192 | 181 | ||
193 | endif # CPU_FREQ | 182 | menu "x86 CPU frequency scaling drivers" |
183 | depends on X86 | ||
184 | source "drivers/cpufreq/Kconfig.x86" | ||
185 | endmenu | ||
186 | |||
187 | endif | ||
188 | endmenu | ||
diff --git a/arch/x86/kernel/cpu/cpufreq/Kconfig b/drivers/cpufreq/Kconfig.x86 index 870e6cc6ad28..343f84760487 100644 --- a/arch/x86/kernel/cpu/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig.x86 | |||
@@ -1,15 +1,7 @@ | |||
1 | # | 1 | # |
2 | # CPU Frequency scaling | 2 | # x86 CPU Frequency scaling drivers |
3 | # | 3 | # |
4 | 4 | ||
5 | menu "CPU Frequency scaling" | ||
6 | |||
7 | source "drivers/cpufreq/Kconfig" | ||
8 | |||
9 | if CPU_FREQ | ||
10 | |||
11 | comment "CPUFreq processor drivers" | ||
12 | |||
13 | config X86_PCC_CPUFREQ | 5 | config X86_PCC_CPUFREQ |
14 | tristate "Processor Clocking Control interface driver" | 6 | tristate "Processor Clocking Control interface driver" |
15 | depends on ACPI && ACPI_PROCESSOR | 7 | depends on ACPI && ACPI_PROCESSOR |
@@ -261,6 +253,3 @@ config X86_SPEEDSTEP_RELAXED_CAP_CHECK | |||
261 | option lets the probing code bypass some of those checks if the | 253 | option lets the probing code bypass some of those checks if the |
262 | parameter "relaxed_check=1" is passed to the module. | 254 | parameter "relaxed_check=1" is passed to the module. |
263 | 255 | ||
264 | endif # CPU_FREQ | ||
265 | |||
266 | endmenu | ||
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index 71fc3b4173f1..c7f1a6f16b6e 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile | |||
@@ -13,3 +13,29 @@ obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o | |||
13 | # CPUfreq cross-arch helpers | 13 | # CPUfreq cross-arch helpers |
14 | obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o | 14 | obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o |
15 | 15 | ||
16 | ##################################################################################d | ||
17 | # x86 drivers. | ||
18 | # Link order matters. K8 is preferred to ACPI because of firmware bugs in early | ||
19 | # K8 systems. ACPI is preferred to all other hardware-specific drivers. | ||
20 | # speedstep-* is preferred over p4-clockmod. | ||
21 | |||
22 | obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o mperf.o | ||
23 | obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o mperf.o | ||
24 | obj-$(CONFIG_X86_PCC_CPUFREQ) += pcc-cpufreq.o | ||
25 | obj-$(CONFIG_X86_POWERNOW_K6) += powernow-k6.o | ||
26 | obj-$(CONFIG_X86_POWERNOW_K7) += powernow-k7.o | ||
27 | obj-$(CONFIG_X86_LONGHAUL) += longhaul.o | ||
28 | obj-$(CONFIG_X86_E_POWERSAVER) += e_powersaver.o | ||
29 | obj-$(CONFIG_ELAN_CPUFREQ) += elanfreq.o | ||
30 | obj-$(CONFIG_SC520_CPUFREQ) += sc520_freq.o | ||
31 | obj-$(CONFIG_X86_LONGRUN) += longrun.o | ||
32 | obj-$(CONFIG_X86_GX_SUSPMOD) += gx-suspmod.o | ||
33 | obj-$(CONFIG_X86_SPEEDSTEP_ICH) += speedstep-ich.o | ||
34 | obj-$(CONFIG_X86_SPEEDSTEP_LIB) += speedstep-lib.o | ||
35 | obj-$(CONFIG_X86_SPEEDSTEP_SMI) += speedstep-smi.o | ||
36 | obj-$(CONFIG_X86_SPEEDSTEP_CENTRINO) += speedstep-centrino.o | ||
37 | obj-$(CONFIG_X86_P4_CLOCKMOD) += p4-clockmod.o | ||
38 | obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o | ||
39 | |||
40 | ##################################################################################d | ||
41 | |||
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c index a2baafb2fe6d..4e04e1274388 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/drivers/cpufreq/acpi-cpufreq.c | |||
@@ -47,9 +47,6 @@ | |||
47 | #include <asm/cpufeature.h> | 47 | #include <asm/cpufeature.h> |
48 | #include "mperf.h" | 48 | #include "mperf.h" |
49 | 49 | ||
50 | #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ | ||
51 | "acpi-cpufreq", msg) | ||
52 | |||
53 | MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski"); | 50 | MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski"); |
54 | MODULE_DESCRIPTION("ACPI Processor P-States Driver"); | 51 | MODULE_DESCRIPTION("ACPI Processor P-States Driver"); |
55 | MODULE_LICENSE("GPL"); | 52 | MODULE_LICENSE("GPL"); |
@@ -233,7 +230,7 @@ static u32 get_cur_val(const struct cpumask *mask) | |||
233 | cmd.mask = mask; | 230 | cmd.mask = mask; |
234 | drv_read(&cmd); | 231 | drv_read(&cmd); |
235 | 232 | ||
236 | dprintk("get_cur_val = %u\n", cmd.val); | 233 | pr_debug("get_cur_val = %u\n", cmd.val); |
237 | 234 | ||
238 | return cmd.val; | 235 | return cmd.val; |
239 | } | 236 | } |
@@ -244,7 +241,7 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu) | |||
244 | unsigned int freq; | 241 | unsigned int freq; |
245 | unsigned int cached_freq; | 242 | unsigned int cached_freq; |
246 | 243 | ||
247 | dprintk("get_cur_freq_on_cpu (%d)\n", cpu); | 244 | pr_debug("get_cur_freq_on_cpu (%d)\n", cpu); |
248 | 245 | ||
249 | if (unlikely(data == NULL || | 246 | if (unlikely(data == NULL || |
250 | data->acpi_data == NULL || data->freq_table == NULL)) { | 247 | data->acpi_data == NULL || data->freq_table == NULL)) { |
@@ -261,7 +258,7 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu) | |||
261 | data->resume = 1; | 258 | data->resume = 1; |
262 | } | 259 | } |
263 | 260 | ||
264 | dprintk("cur freq = %u\n", freq); | 261 | pr_debug("cur freq = %u\n", freq); |
265 | 262 | ||
266 | return freq; | 263 | return freq; |
267 | } | 264 | } |
@@ -293,7 +290,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, | |||
293 | unsigned int i; | 290 | unsigned int i; |
294 | int result = 0; | 291 | int result = 0; |
295 | 292 | ||
296 | dprintk("acpi_cpufreq_target %d (%d)\n", target_freq, policy->cpu); | 293 | pr_debug("acpi_cpufreq_target %d (%d)\n", target_freq, policy->cpu); |
297 | 294 | ||
298 | if (unlikely(data == NULL || | 295 | if (unlikely(data == NULL || |
299 | data->acpi_data == NULL || data->freq_table == NULL)) { | 296 | data->acpi_data == NULL || data->freq_table == NULL)) { |
@@ -313,11 +310,11 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, | |||
313 | next_perf_state = data->freq_table[next_state].index; | 310 | next_perf_state = data->freq_table[next_state].index; |
314 | if (perf->state == next_perf_state) { | 311 | if (perf->state == next_perf_state) { |
315 | if (unlikely(data->resume)) { | 312 | if (unlikely(data->resume)) { |
316 | dprintk("Called after resume, resetting to P%d\n", | 313 | pr_debug("Called after resume, resetting to P%d\n", |
317 | next_perf_state); | 314 | next_perf_state); |
318 | data->resume = 0; | 315 | data->resume = 0; |
319 | } else { | 316 | } else { |
320 | dprintk("Already at target state (P%d)\n", | 317 | pr_debug("Already at target state (P%d)\n", |
321 | next_perf_state); | 318 | next_perf_state); |
322 | goto out; | 319 | goto out; |
323 | } | 320 | } |
@@ -357,7 +354,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, | |||
357 | 354 | ||
358 | if (acpi_pstate_strict) { | 355 | if (acpi_pstate_strict) { |
359 | if (!check_freqs(cmd.mask, freqs.new, data)) { | 356 | if (!check_freqs(cmd.mask, freqs.new, data)) { |
360 | dprintk("acpi_cpufreq_target failed (%d)\n", | 357 | pr_debug("acpi_cpufreq_target failed (%d)\n", |
361 | policy->cpu); | 358 | policy->cpu); |
362 | result = -EAGAIN; | 359 | result = -EAGAIN; |
363 | goto out; | 360 | goto out; |
@@ -378,7 +375,7 @@ static int acpi_cpufreq_verify(struct cpufreq_policy *policy) | |||
378 | { | 375 | { |
379 | struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); | 376 | struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); |
380 | 377 | ||
381 | dprintk("acpi_cpufreq_verify\n"); | 378 | pr_debug("acpi_cpufreq_verify\n"); |
382 | 379 | ||
383 | return cpufreq_frequency_table_verify(policy, data->freq_table); | 380 | return cpufreq_frequency_table_verify(policy, data->freq_table); |
384 | } | 381 | } |
@@ -433,11 +430,11 @@ static void free_acpi_perf_data(void) | |||
433 | static int __init acpi_cpufreq_early_init(void) | 430 | static int __init acpi_cpufreq_early_init(void) |
434 | { | 431 | { |
435 | unsigned int i; | 432 | unsigned int i; |
436 | dprintk("acpi_cpufreq_early_init\n"); | 433 | pr_debug("acpi_cpufreq_early_init\n"); |
437 | 434 | ||
438 | acpi_perf_data = alloc_percpu(struct acpi_processor_performance); | 435 | acpi_perf_data = alloc_percpu(struct acpi_processor_performance); |
439 | if (!acpi_perf_data) { | 436 | if (!acpi_perf_data) { |
440 | dprintk("Memory allocation error for acpi_perf_data.\n"); | 437 | pr_debug("Memory allocation error for acpi_perf_data.\n"); |
441 | return -ENOMEM; | 438 | return -ENOMEM; |
442 | } | 439 | } |
443 | for_each_possible_cpu(i) { | 440 | for_each_possible_cpu(i) { |
@@ -519,7 +516,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
519 | static int blacklisted; | 516 | static int blacklisted; |
520 | #endif | 517 | #endif |
521 | 518 | ||
522 | dprintk("acpi_cpufreq_cpu_init\n"); | 519 | pr_debug("acpi_cpufreq_cpu_init\n"); |
523 | 520 | ||
524 | #ifdef CONFIG_SMP | 521 | #ifdef CONFIG_SMP |
525 | if (blacklisted) | 522 | if (blacklisted) |
@@ -566,7 +563,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
566 | 563 | ||
567 | /* capability check */ | 564 | /* capability check */ |
568 | if (perf->state_count <= 1) { | 565 | if (perf->state_count <= 1) { |
569 | dprintk("No P-States\n"); | 566 | pr_debug("No P-States\n"); |
570 | result = -ENODEV; | 567 | result = -ENODEV; |
571 | goto err_unreg; | 568 | goto err_unreg; |
572 | } | 569 | } |
@@ -578,11 +575,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
578 | 575 | ||
579 | switch (perf->control_register.space_id) { | 576 | switch (perf->control_register.space_id) { |
580 | case ACPI_ADR_SPACE_SYSTEM_IO: | 577 | case ACPI_ADR_SPACE_SYSTEM_IO: |
581 | dprintk("SYSTEM IO addr space\n"); | 578 | pr_debug("SYSTEM IO addr space\n"); |
582 | data->cpu_feature = SYSTEM_IO_CAPABLE; | 579 | data->cpu_feature = SYSTEM_IO_CAPABLE; |
583 | break; | 580 | break; |
584 | case ACPI_ADR_SPACE_FIXED_HARDWARE: | 581 | case ACPI_ADR_SPACE_FIXED_HARDWARE: |
585 | dprintk("HARDWARE addr space\n"); | 582 | pr_debug("HARDWARE addr space\n"); |
586 | if (!check_est_cpu(cpu)) { | 583 | if (!check_est_cpu(cpu)) { |
587 | result = -ENODEV; | 584 | result = -ENODEV; |
588 | goto err_unreg; | 585 | goto err_unreg; |
@@ -590,7 +587,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
590 | data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE; | 587 | data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE; |
591 | break; | 588 | break; |
592 | default: | 589 | default: |
593 | dprintk("Unknown addr space %d\n", | 590 | pr_debug("Unknown addr space %d\n", |
594 | (u32) (perf->control_register.space_id)); | 591 | (u32) (perf->control_register.space_id)); |
595 | result = -ENODEV; | 592 | result = -ENODEV; |
596 | goto err_unreg; | 593 | goto err_unreg; |
@@ -661,9 +658,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
661 | if (cpu_has(c, X86_FEATURE_APERFMPERF)) | 658 | if (cpu_has(c, X86_FEATURE_APERFMPERF)) |
662 | acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf; | 659 | acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf; |
663 | 660 | ||
664 | dprintk("CPU%u - ACPI performance management activated.\n", cpu); | 661 | pr_debug("CPU%u - ACPI performance management activated.\n", cpu); |
665 | for (i = 0; i < perf->state_count; i++) | 662 | for (i = 0; i < perf->state_count; i++) |
666 | dprintk(" %cP%d: %d MHz, %d mW, %d uS\n", | 663 | pr_debug(" %cP%d: %d MHz, %d mW, %d uS\n", |
667 | (i == perf->state ? '*' : ' '), i, | 664 | (i == perf->state ? '*' : ' '), i, |
668 | (u32) perf->states[i].core_frequency, | 665 | (u32) perf->states[i].core_frequency, |
669 | (u32) perf->states[i].power, | 666 | (u32) perf->states[i].power, |
@@ -694,7 +691,7 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy) | |||
694 | { | 691 | { |
695 | struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); | 692 | struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); |
696 | 693 | ||
697 | dprintk("acpi_cpufreq_cpu_exit\n"); | 694 | pr_debug("acpi_cpufreq_cpu_exit\n"); |
698 | 695 | ||
699 | if (data) { | 696 | if (data) { |
700 | cpufreq_frequency_table_put_attr(policy->cpu); | 697 | cpufreq_frequency_table_put_attr(policy->cpu); |
@@ -712,7 +709,7 @@ static int acpi_cpufreq_resume(struct cpufreq_policy *policy) | |||
712 | { | 709 | { |
713 | struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); | 710 | struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); |
714 | 711 | ||
715 | dprintk("acpi_cpufreq_resume\n"); | 712 | pr_debug("acpi_cpufreq_resume\n"); |
716 | 713 | ||
717 | data->resume = 1; | 714 | data->resume = 1; |
718 | 715 | ||
@@ -743,7 +740,7 @@ static int __init acpi_cpufreq_init(void) | |||
743 | if (acpi_disabled) | 740 | if (acpi_disabled) |
744 | return 0; | 741 | return 0; |
745 | 742 | ||
746 | dprintk("acpi_cpufreq_init\n"); | 743 | pr_debug("acpi_cpufreq_init\n"); |
747 | 744 | ||
748 | ret = acpi_cpufreq_early_init(); | 745 | ret = acpi_cpufreq_early_init(); |
749 | if (ret) | 746 | if (ret) |
@@ -758,7 +755,7 @@ static int __init acpi_cpufreq_init(void) | |||
758 | 755 | ||
759 | static void __exit acpi_cpufreq_exit(void) | 756 | static void __exit acpi_cpufreq_exit(void) |
760 | { | 757 | { |
761 | dprintk("acpi_cpufreq_exit\n"); | 758 | pr_debug("acpi_cpufreq_exit\n"); |
762 | 759 | ||
763 | cpufreq_unregister_driver(&acpi_cpufreq_driver); | 760 | cpufreq_unregister_driver(&acpi_cpufreq_driver); |
764 | 761 | ||
diff --git a/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c b/drivers/cpufreq/cpufreq-nforce2.c index 141abebc4516..7bac808804f3 100644 --- a/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c +++ b/drivers/cpufreq/cpufreq-nforce2.c | |||
@@ -57,8 +57,6 @@ MODULE_PARM_DESC(min_fsb, | |||
57 | "Minimum FSB to use, if not defined: current FSB - 50"); | 57 | "Minimum FSB to use, if not defined: current FSB - 50"); |
58 | 58 | ||
59 | #define PFX "cpufreq-nforce2: " | 59 | #define PFX "cpufreq-nforce2: " |
60 | #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ | ||
61 | "cpufreq-nforce2", msg) | ||
62 | 60 | ||
63 | /** | 61 | /** |
64 | * nforce2_calc_fsb - calculate FSB | 62 | * nforce2_calc_fsb - calculate FSB |
@@ -270,7 +268,7 @@ static int nforce2_target(struct cpufreq_policy *policy, | |||
270 | if (freqs.old == freqs.new) | 268 | if (freqs.old == freqs.new) |
271 | return 0; | 269 | return 0; |
272 | 270 | ||
273 | dprintk("Old CPU frequency %d kHz, new %d kHz\n", | 271 | pr_debug("Old CPU frequency %d kHz, new %d kHz\n", |
274 | freqs.old, freqs.new); | 272 | freqs.old, freqs.new); |
275 | 273 | ||
276 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | 274 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); |
@@ -282,7 +280,7 @@ static int nforce2_target(struct cpufreq_policy *policy, | |||
282 | printk(KERN_ERR PFX "Changing FSB to %d failed\n", | 280 | printk(KERN_ERR PFX "Changing FSB to %d failed\n", |
283 | target_fsb); | 281 | target_fsb); |
284 | else | 282 | else |
285 | dprintk("Changed FSB successfully to %d\n", | 283 | pr_debug("Changed FSB successfully to %d\n", |
286 | target_fsb); | 284 | target_fsb); |
287 | 285 | ||
288 | /* Enable IRQs */ | 286 | /* Enable IRQs */ |
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 2dafc5c38ae7..0a5bea9e3585 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
@@ -32,9 +32,6 @@ | |||
32 | 32 | ||
33 | #include <trace/events/power.h> | 33 | #include <trace/events/power.h> |
34 | 34 | ||
35 | #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, \ | ||
36 | "cpufreq-core", msg) | ||
37 | |||
38 | /** | 35 | /** |
39 | * The "cpufreq driver" - the arch- or hardware-dependent low | 36 | * The "cpufreq driver" - the arch- or hardware-dependent low |
40 | * level driver of CPUFreq support, and its spinlock. This lock | 37 | * level driver of CPUFreq support, and its spinlock. This lock |
@@ -181,93 +178,6 @@ EXPORT_SYMBOL_GPL(cpufreq_cpu_put); | |||
181 | 178 | ||
182 | 179 | ||
183 | /********************************************************************* | 180 | /********************************************************************* |
184 | * UNIFIED DEBUG HELPERS * | ||
185 | *********************************************************************/ | ||
186 | #ifdef CONFIG_CPU_FREQ_DEBUG | ||
187 | |||
188 | /* what part(s) of the CPUfreq subsystem are debugged? */ | ||
189 | static unsigned int debug; | ||
190 | |||
191 | /* is the debug output ratelimit'ed using printk_ratelimit? User can | ||
192 | * set or modify this value. | ||
193 | */ | ||
194 | static unsigned int debug_ratelimit = 1; | ||
195 | |||
196 | /* is the printk_ratelimit'ing enabled? It's enabled after a successful | ||
197 | * loading of a cpufreq driver, temporarily disabled when a new policy | ||
198 | * is set, and disabled upon cpufreq driver removal | ||
199 | */ | ||
200 | static unsigned int disable_ratelimit = 1; | ||
201 | static DEFINE_SPINLOCK(disable_ratelimit_lock); | ||
202 | |||
203 | static void cpufreq_debug_enable_ratelimit(void) | ||
204 | { | ||
205 | unsigned long flags; | ||
206 | |||
207 | spin_lock_irqsave(&disable_ratelimit_lock, flags); | ||
208 | if (disable_ratelimit) | ||
209 | disable_ratelimit--; | ||
210 | spin_unlock_irqrestore(&disable_ratelimit_lock, flags); | ||
211 | } | ||
212 | |||
213 | static void cpufreq_debug_disable_ratelimit(void) | ||
214 | { | ||
215 | unsigned long flags; | ||
216 | |||
217 | spin_lock_irqsave(&disable_ratelimit_lock, flags); | ||
218 | disable_ratelimit++; | ||
219 | spin_unlock_irqrestore(&disable_ratelimit_lock, flags); | ||
220 | } | ||
221 | |||
222 | void cpufreq_debug_printk(unsigned int type, const char *prefix, | ||
223 | const char *fmt, ...) | ||
224 | { | ||
225 | char s[256]; | ||
226 | va_list args; | ||
227 | unsigned int len; | ||
228 | unsigned long flags; | ||
229 | |||
230 | WARN_ON(!prefix); | ||
231 | if (type & debug) { | ||
232 | spin_lock_irqsave(&disable_ratelimit_lock, flags); | ||
233 | if (!disable_ratelimit && debug_ratelimit | ||
234 | && !printk_ratelimit()) { | ||
235 | spin_unlock_irqrestore(&disable_ratelimit_lock, flags); | ||
236 | return; | ||
237 | } | ||
238 | spin_unlock_irqrestore(&disable_ratelimit_lock, flags); | ||
239 | |||
240 | len = snprintf(s, 256, KERN_DEBUG "%s: ", prefix); | ||
241 | |||
242 | va_start(args, fmt); | ||
243 | len += vsnprintf(&s[len], (256 - len), fmt, args); | ||
244 | va_end(args); | ||
245 | |||
246 | printk(s); | ||
247 | |||
248 | WARN_ON(len < 5); | ||
249 | } | ||
250 | } | ||
251 | EXPORT_SYMBOL(cpufreq_debug_printk); | ||
252 | |||
253 | |||
254 | module_param(debug, uint, 0644); | ||
255 | MODULE_PARM_DESC(debug, "CPUfreq debugging: add 1 to debug core," | ||
256 | " 2 to debug drivers, and 4 to debug governors."); | ||
257 | |||
258 | module_param(debug_ratelimit, uint, 0644); | ||
259 | MODULE_PARM_DESC(debug_ratelimit, "CPUfreq debugging:" | ||
260 | " set to 0 to disable ratelimiting."); | ||
261 | |||
262 | #else /* !CONFIG_CPU_FREQ_DEBUG */ | ||
263 | |||
264 | static inline void cpufreq_debug_enable_ratelimit(void) { return; } | ||
265 | static inline void cpufreq_debug_disable_ratelimit(void) { return; } | ||
266 | |||
267 | #endif /* CONFIG_CPU_FREQ_DEBUG */ | ||
268 | |||
269 | |||
270 | /********************************************************************* | ||
271 | * EXTERNALLY AFFECTING FREQUENCY CHANGES * | 181 | * EXTERNALLY AFFECTING FREQUENCY CHANGES * |
272 | *********************************************************************/ | 182 | *********************************************************************/ |
273 | 183 | ||
@@ -291,7 +201,7 @@ static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) | |||
291 | if (!l_p_j_ref_freq) { | 201 | if (!l_p_j_ref_freq) { |
292 | l_p_j_ref = loops_per_jiffy; | 202 | l_p_j_ref = loops_per_jiffy; |
293 | l_p_j_ref_freq = ci->old; | 203 | l_p_j_ref_freq = ci->old; |
294 | dprintk("saving %lu as reference value for loops_per_jiffy; " | 204 | pr_debug("saving %lu as reference value for loops_per_jiffy; " |
295 | "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq); | 205 | "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq); |
296 | } | 206 | } |
297 | if ((val == CPUFREQ_PRECHANGE && ci->old < ci->new) || | 207 | if ((val == CPUFREQ_PRECHANGE && ci->old < ci->new) || |
@@ -299,7 +209,7 @@ static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) | |||
299 | (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) { | 209 | (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) { |
300 | loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq, | 210 | loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq, |
301 | ci->new); | 211 | ci->new); |
302 | dprintk("scaling loops_per_jiffy to %lu " | 212 | pr_debug("scaling loops_per_jiffy to %lu " |
303 | "for frequency %u kHz\n", loops_per_jiffy, ci->new); | 213 | "for frequency %u kHz\n", loops_per_jiffy, ci->new); |
304 | } | 214 | } |
305 | } | 215 | } |
@@ -326,7 +236,7 @@ void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state) | |||
326 | BUG_ON(irqs_disabled()); | 236 | BUG_ON(irqs_disabled()); |
327 | 237 | ||
328 | freqs->flags = cpufreq_driver->flags; | 238 | freqs->flags = cpufreq_driver->flags; |
329 | dprintk("notification %u of frequency transition to %u kHz\n", | 239 | pr_debug("notification %u of frequency transition to %u kHz\n", |
330 | state, freqs->new); | 240 | state, freqs->new); |
331 | 241 | ||
332 | policy = per_cpu(cpufreq_cpu_data, freqs->cpu); | 242 | policy = per_cpu(cpufreq_cpu_data, freqs->cpu); |
@@ -340,7 +250,7 @@ void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state) | |||
340 | if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) { | 250 | if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) { |
341 | if ((policy) && (policy->cpu == freqs->cpu) && | 251 | if ((policy) && (policy->cpu == freqs->cpu) && |
342 | (policy->cur) && (policy->cur != freqs->old)) { | 252 | (policy->cur) && (policy->cur != freqs->old)) { |
343 | dprintk("Warning: CPU frequency is" | 253 | pr_debug("Warning: CPU frequency is" |
344 | " %u, cpufreq assumed %u kHz.\n", | 254 | " %u, cpufreq assumed %u kHz.\n", |
345 | freqs->old, policy->cur); | 255 | freqs->old, policy->cur); |
346 | freqs->old = policy->cur; | 256 | freqs->old = policy->cur; |
@@ -353,7 +263,7 @@ void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state) | |||
353 | 263 | ||
354 | case CPUFREQ_POSTCHANGE: | 264 | case CPUFREQ_POSTCHANGE: |
355 | adjust_jiffies(CPUFREQ_POSTCHANGE, freqs); | 265 | adjust_jiffies(CPUFREQ_POSTCHANGE, freqs); |
356 | dprintk("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new, | 266 | pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new, |
357 | (unsigned long)freqs->cpu); | 267 | (unsigned long)freqs->cpu); |
358 | trace_power_frequency(POWER_PSTATE, freqs->new, freqs->cpu); | 268 | trace_power_frequency(POWER_PSTATE, freqs->new, freqs->cpu); |
359 | trace_cpu_frequency(freqs->new, freqs->cpu); | 269 | trace_cpu_frequency(freqs->new, freqs->cpu); |
@@ -411,21 +321,14 @@ static int cpufreq_parse_governor(char *str_governor, unsigned int *policy, | |||
411 | t = __find_governor(str_governor); | 321 | t = __find_governor(str_governor); |
412 | 322 | ||
413 | if (t == NULL) { | 323 | if (t == NULL) { |
414 | char *name = kasprintf(GFP_KERNEL, "cpufreq_%s", | 324 | int ret; |
415 | str_governor); | ||
416 | |||
417 | if (name) { | ||
418 | int ret; | ||
419 | 325 | ||
420 | mutex_unlock(&cpufreq_governor_mutex); | 326 | mutex_unlock(&cpufreq_governor_mutex); |
421 | ret = request_module("%s", name); | 327 | ret = request_module("cpufreq_%s", str_governor); |
422 | mutex_lock(&cpufreq_governor_mutex); | 328 | mutex_lock(&cpufreq_governor_mutex); |
423 | 329 | ||
424 | if (ret == 0) | 330 | if (ret == 0) |
425 | t = __find_governor(str_governor); | 331 | t = __find_governor(str_governor); |
426 | } | ||
427 | |||
428 | kfree(name); | ||
429 | } | 332 | } |
430 | 333 | ||
431 | if (t != NULL) { | 334 | if (t != NULL) { |
@@ -753,7 +656,7 @@ no_policy: | |||
753 | static void cpufreq_sysfs_release(struct kobject *kobj) | 656 | static void cpufreq_sysfs_release(struct kobject *kobj) |
754 | { | 657 | { |
755 | struct cpufreq_policy *policy = to_policy(kobj); | 658 | struct cpufreq_policy *policy = to_policy(kobj); |
756 | dprintk("last reference is dropped\n"); | 659 | pr_debug("last reference is dropped\n"); |
757 | complete(&policy->kobj_unregister); | 660 | complete(&policy->kobj_unregister); |
758 | } | 661 | } |
759 | 662 | ||
@@ -788,7 +691,7 @@ static int cpufreq_add_dev_policy(unsigned int cpu, | |||
788 | gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu)); | 691 | gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu)); |
789 | if (gov) { | 692 | if (gov) { |
790 | policy->governor = gov; | 693 | policy->governor = gov; |
791 | dprintk("Restoring governor %s for cpu %d\n", | 694 | pr_debug("Restoring governor %s for cpu %d\n", |
792 | policy->governor->name, cpu); | 695 | policy->governor->name, cpu); |
793 | } | 696 | } |
794 | #endif | 697 | #endif |
@@ -824,7 +727,7 @@ static int cpufreq_add_dev_policy(unsigned int cpu, | |||
824 | per_cpu(cpufreq_cpu_data, cpu) = managed_policy; | 727 | per_cpu(cpufreq_cpu_data, cpu) = managed_policy; |
825 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | 728 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); |
826 | 729 | ||
827 | dprintk("CPU already managed, adding link\n"); | 730 | pr_debug("CPU already managed, adding link\n"); |
828 | ret = sysfs_create_link(&sys_dev->kobj, | 731 | ret = sysfs_create_link(&sys_dev->kobj, |
829 | &managed_policy->kobj, | 732 | &managed_policy->kobj, |
830 | "cpufreq"); | 733 | "cpufreq"); |
@@ -865,7 +768,7 @@ static int cpufreq_add_dev_symlink(unsigned int cpu, | |||
865 | if (!cpu_online(j)) | 768 | if (!cpu_online(j)) |
866 | continue; | 769 | continue; |
867 | 770 | ||
868 | dprintk("CPU %u already managed, adding link\n", j); | 771 | pr_debug("CPU %u already managed, adding link\n", j); |
869 | managed_policy = cpufreq_cpu_get(cpu); | 772 | managed_policy = cpufreq_cpu_get(cpu); |
870 | cpu_sys_dev = get_cpu_sysdev(j); | 773 | cpu_sys_dev = get_cpu_sysdev(j); |
871 | ret = sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj, | 774 | ret = sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj, |
@@ -941,7 +844,7 @@ static int cpufreq_add_dev_interface(unsigned int cpu, | |||
941 | policy->user_policy.governor = policy->governor; | 844 | policy->user_policy.governor = policy->governor; |
942 | 845 | ||
943 | if (ret) { | 846 | if (ret) { |
944 | dprintk("setting policy failed\n"); | 847 | pr_debug("setting policy failed\n"); |
945 | if (cpufreq_driver->exit) | 848 | if (cpufreq_driver->exit) |
946 | cpufreq_driver->exit(policy); | 849 | cpufreq_driver->exit(policy); |
947 | } | 850 | } |
@@ -977,8 +880,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) | |||
977 | if (cpu_is_offline(cpu)) | 880 | if (cpu_is_offline(cpu)) |
978 | return 0; | 881 | return 0; |
979 | 882 | ||
980 | cpufreq_debug_disable_ratelimit(); | 883 | pr_debug("adding CPU %u\n", cpu); |
981 | dprintk("adding CPU %u\n", cpu); | ||
982 | 884 | ||
983 | #ifdef CONFIG_SMP | 885 | #ifdef CONFIG_SMP |
984 | /* check whether a different CPU already registered this | 886 | /* check whether a different CPU already registered this |
@@ -986,7 +888,6 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) | |||
986 | policy = cpufreq_cpu_get(cpu); | 888 | policy = cpufreq_cpu_get(cpu); |
987 | if (unlikely(policy)) { | 889 | if (unlikely(policy)) { |
988 | cpufreq_cpu_put(policy); | 890 | cpufreq_cpu_put(policy); |
989 | cpufreq_debug_enable_ratelimit(); | ||
990 | return 0; | 891 | return 0; |
991 | } | 892 | } |
992 | #endif | 893 | #endif |
@@ -1037,7 +938,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) | |||
1037 | */ | 938 | */ |
1038 | ret = cpufreq_driver->init(policy); | 939 | ret = cpufreq_driver->init(policy); |
1039 | if (ret) { | 940 | if (ret) { |
1040 | dprintk("initialization failed\n"); | 941 | pr_debug("initialization failed\n"); |
1041 | goto err_unlock_policy; | 942 | goto err_unlock_policy; |
1042 | } | 943 | } |
1043 | policy->user_policy.min = policy->min; | 944 | policy->user_policy.min = policy->min; |
@@ -1063,8 +964,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) | |||
1063 | 964 | ||
1064 | kobject_uevent(&policy->kobj, KOBJ_ADD); | 965 | kobject_uevent(&policy->kobj, KOBJ_ADD); |
1065 | module_put(cpufreq_driver->owner); | 966 | module_put(cpufreq_driver->owner); |
1066 | dprintk("initialization complete\n"); | 967 | pr_debug("initialization complete\n"); |
1067 | cpufreq_debug_enable_ratelimit(); | ||
1068 | 968 | ||
1069 | return 0; | 969 | return 0; |
1070 | 970 | ||
@@ -1088,7 +988,6 @@ err_free_policy: | |||
1088 | nomem_out: | 988 | nomem_out: |
1089 | module_put(cpufreq_driver->owner); | 989 | module_put(cpufreq_driver->owner); |
1090 | module_out: | 990 | module_out: |
1091 | cpufreq_debug_enable_ratelimit(); | ||
1092 | return ret; | 991 | return ret; |
1093 | } | 992 | } |
1094 | 993 | ||
@@ -1112,15 +1011,13 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev) | |||
1112 | unsigned int j; | 1011 | unsigned int j; |
1113 | #endif | 1012 | #endif |
1114 | 1013 | ||
1115 | cpufreq_debug_disable_ratelimit(); | 1014 | pr_debug("unregistering CPU %u\n", cpu); |
1116 | dprintk("unregistering CPU %u\n", cpu); | ||
1117 | 1015 | ||
1118 | spin_lock_irqsave(&cpufreq_driver_lock, flags); | 1016 | spin_lock_irqsave(&cpufreq_driver_lock, flags); |
1119 | data = per_cpu(cpufreq_cpu_data, cpu); | 1017 | data = per_cpu(cpufreq_cpu_data, cpu); |
1120 | 1018 | ||
1121 | if (!data) { | 1019 | if (!data) { |
1122 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | 1020 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); |
1123 | cpufreq_debug_enable_ratelimit(); | ||
1124 | unlock_policy_rwsem_write(cpu); | 1021 | unlock_policy_rwsem_write(cpu); |
1125 | return -EINVAL; | 1022 | return -EINVAL; |
1126 | } | 1023 | } |
@@ -1132,12 +1029,11 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev) | |||
1132 | * only need to unlink, put and exit | 1029 | * only need to unlink, put and exit |
1133 | */ | 1030 | */ |
1134 | if (unlikely(cpu != data->cpu)) { | 1031 | if (unlikely(cpu != data->cpu)) { |
1135 | dprintk("removing link\n"); | 1032 | pr_debug("removing link\n"); |
1136 | cpumask_clear_cpu(cpu, data->cpus); | 1033 | cpumask_clear_cpu(cpu, data->cpus); |
1137 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | 1034 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); |
1138 | kobj = &sys_dev->kobj; | 1035 | kobj = &sys_dev->kobj; |
1139 | cpufreq_cpu_put(data); | 1036 | cpufreq_cpu_put(data); |
1140 | cpufreq_debug_enable_ratelimit(); | ||
1141 | unlock_policy_rwsem_write(cpu); | 1037 | unlock_policy_rwsem_write(cpu); |
1142 | sysfs_remove_link(kobj, "cpufreq"); | 1038 | sysfs_remove_link(kobj, "cpufreq"); |
1143 | return 0; | 1039 | return 0; |
@@ -1170,7 +1066,7 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev) | |||
1170 | for_each_cpu(j, data->cpus) { | 1066 | for_each_cpu(j, data->cpus) { |
1171 | if (j == cpu) | 1067 | if (j == cpu) |
1172 | continue; | 1068 | continue; |
1173 | dprintk("removing link for cpu %u\n", j); | 1069 | pr_debug("removing link for cpu %u\n", j); |
1174 | #ifdef CONFIG_HOTPLUG_CPU | 1070 | #ifdef CONFIG_HOTPLUG_CPU |
1175 | strncpy(per_cpu(cpufreq_cpu_governor, j), | 1071 | strncpy(per_cpu(cpufreq_cpu_governor, j), |
1176 | data->governor->name, CPUFREQ_NAME_LEN); | 1072 | data->governor->name, CPUFREQ_NAME_LEN); |
@@ -1199,21 +1095,35 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev) | |||
1199 | * not referenced anymore by anybody before we proceed with | 1095 | * not referenced anymore by anybody before we proceed with |
1200 | * unloading. | 1096 | * unloading. |
1201 | */ | 1097 | */ |
1202 | dprintk("waiting for dropping of refcount\n"); | 1098 | pr_debug("waiting for dropping of refcount\n"); |
1203 | wait_for_completion(cmp); | 1099 | wait_for_completion(cmp); |
1204 | dprintk("wait complete\n"); | 1100 | pr_debug("wait complete\n"); |
1205 | 1101 | ||
1206 | lock_policy_rwsem_write(cpu); | 1102 | lock_policy_rwsem_write(cpu); |
1207 | if (cpufreq_driver->exit) | 1103 | if (cpufreq_driver->exit) |
1208 | cpufreq_driver->exit(data); | 1104 | cpufreq_driver->exit(data); |
1209 | unlock_policy_rwsem_write(cpu); | 1105 | unlock_policy_rwsem_write(cpu); |
1210 | 1106 | ||
1107 | #ifdef CONFIG_HOTPLUG_CPU | ||
1108 | /* when the CPU which is the parent of the kobj is hotplugged | ||
1109 | * offline, check for siblings, and create cpufreq sysfs interface | ||
1110 | * and symlinks | ||
1111 | */ | ||
1112 | if (unlikely(cpumask_weight(data->cpus) > 1)) { | ||
1113 | /* first sibling now owns the new sysfs dir */ | ||
1114 | cpumask_clear_cpu(cpu, data->cpus); | ||
1115 | cpufreq_add_dev(get_cpu_sysdev(cpumask_first(data->cpus))); | ||
1116 | |||
1117 | /* finally remove our own symlink */ | ||
1118 | lock_policy_rwsem_write(cpu); | ||
1119 | __cpufreq_remove_dev(sys_dev); | ||
1120 | } | ||
1121 | #endif | ||
1122 | |||
1211 | free_cpumask_var(data->related_cpus); | 1123 | free_cpumask_var(data->related_cpus); |
1212 | free_cpumask_var(data->cpus); | 1124 | free_cpumask_var(data->cpus); |
1213 | kfree(data); | 1125 | kfree(data); |
1214 | per_cpu(cpufreq_cpu_data, cpu) = NULL; | ||
1215 | 1126 | ||
1216 | cpufreq_debug_enable_ratelimit(); | ||
1217 | return 0; | 1127 | return 0; |
1218 | } | 1128 | } |
1219 | 1129 | ||
@@ -1239,7 +1149,7 @@ static void handle_update(struct work_struct *work) | |||
1239 | struct cpufreq_policy *policy = | 1149 | struct cpufreq_policy *policy = |
1240 | container_of(work, struct cpufreq_policy, update); | 1150 | container_of(work, struct cpufreq_policy, update); |
1241 | unsigned int cpu = policy->cpu; | 1151 | unsigned int cpu = policy->cpu; |
1242 | dprintk("handle_update for cpu %u called\n", cpu); | 1152 | pr_debug("handle_update for cpu %u called\n", cpu); |
1243 | cpufreq_update_policy(cpu); | 1153 | cpufreq_update_policy(cpu); |
1244 | } | 1154 | } |
1245 | 1155 | ||
@@ -1257,7 +1167,7 @@ static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq, | |||
1257 | { | 1167 | { |
1258 | struct cpufreq_freqs freqs; | 1168 | struct cpufreq_freqs freqs; |
1259 | 1169 | ||
1260 | dprintk("Warning: CPU frequency out of sync: cpufreq and timing " | 1170 | pr_debug("Warning: CPU frequency out of sync: cpufreq and timing " |
1261 | "core thinks of %u, is %u kHz.\n", old_freq, new_freq); | 1171 | "core thinks of %u, is %u kHz.\n", old_freq, new_freq); |
1262 | 1172 | ||
1263 | freqs.cpu = cpu; | 1173 | freqs.cpu = cpu; |
@@ -1360,7 +1270,7 @@ static int cpufreq_bp_suspend(void) | |||
1360 | int cpu = smp_processor_id(); | 1270 | int cpu = smp_processor_id(); |
1361 | struct cpufreq_policy *cpu_policy; | 1271 | struct cpufreq_policy *cpu_policy; |
1362 | 1272 | ||
1363 | dprintk("suspending cpu %u\n", cpu); | 1273 | pr_debug("suspending cpu %u\n", cpu); |
1364 | 1274 | ||
1365 | /* If there's no policy for the boot CPU, we have nothing to do. */ | 1275 | /* If there's no policy for the boot CPU, we have nothing to do. */ |
1366 | cpu_policy = cpufreq_cpu_get(cpu); | 1276 | cpu_policy = cpufreq_cpu_get(cpu); |
@@ -1398,7 +1308,7 @@ static void cpufreq_bp_resume(void) | |||
1398 | int cpu = smp_processor_id(); | 1308 | int cpu = smp_processor_id(); |
1399 | struct cpufreq_policy *cpu_policy; | 1309 | struct cpufreq_policy *cpu_policy; |
1400 | 1310 | ||
1401 | dprintk("resuming cpu %u\n", cpu); | 1311 | pr_debug("resuming cpu %u\n", cpu); |
1402 | 1312 | ||
1403 | /* If there's no policy for the boot CPU, we have nothing to do. */ | 1313 | /* If there's no policy for the boot CPU, we have nothing to do. */ |
1404 | cpu_policy = cpufreq_cpu_get(cpu); | 1314 | cpu_policy = cpufreq_cpu_get(cpu); |
@@ -1510,7 +1420,7 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy, | |||
1510 | { | 1420 | { |
1511 | int retval = -EINVAL; | 1421 | int retval = -EINVAL; |
1512 | 1422 | ||
1513 | dprintk("target for CPU %u: %u kHz, relation %u\n", policy->cpu, | 1423 | pr_debug("target for CPU %u: %u kHz, relation %u\n", policy->cpu, |
1514 | target_freq, relation); | 1424 | target_freq, relation); |
1515 | if (cpu_online(policy->cpu) && cpufreq_driver->target) | 1425 | if (cpu_online(policy->cpu) && cpufreq_driver->target) |
1516 | retval = cpufreq_driver->target(policy, target_freq, relation); | 1426 | retval = cpufreq_driver->target(policy, target_freq, relation); |
@@ -1596,7 +1506,7 @@ static int __cpufreq_governor(struct cpufreq_policy *policy, | |||
1596 | if (!try_module_get(policy->governor->owner)) | 1506 | if (!try_module_get(policy->governor->owner)) |
1597 | return -EINVAL; | 1507 | return -EINVAL; |
1598 | 1508 | ||
1599 | dprintk("__cpufreq_governor for CPU %u, event %u\n", | 1509 | pr_debug("__cpufreq_governor for CPU %u, event %u\n", |
1600 | policy->cpu, event); | 1510 | policy->cpu, event); |
1601 | ret = policy->governor->governor(policy, event); | 1511 | ret = policy->governor->governor(policy, event); |
1602 | 1512 | ||
@@ -1697,8 +1607,7 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data, | |||
1697 | { | 1607 | { |
1698 | int ret = 0; | 1608 | int ret = 0; |
1699 | 1609 | ||
1700 | cpufreq_debug_disable_ratelimit(); | 1610 | pr_debug("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu, |
1701 | dprintk("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu, | ||
1702 | policy->min, policy->max); | 1611 | policy->min, policy->max); |
1703 | 1612 | ||
1704 | memcpy(&policy->cpuinfo, &data->cpuinfo, | 1613 | memcpy(&policy->cpuinfo, &data->cpuinfo, |
@@ -1735,19 +1644,19 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data, | |||
1735 | data->min = policy->min; | 1644 | data->min = policy->min; |
1736 | data->max = policy->max; | 1645 | data->max = policy->max; |
1737 | 1646 | ||
1738 | dprintk("new min and max freqs are %u - %u kHz\n", | 1647 | pr_debug("new min and max freqs are %u - %u kHz\n", |
1739 | data->min, data->max); | 1648 | data->min, data->max); |
1740 | 1649 | ||
1741 | if (cpufreq_driver->setpolicy) { | 1650 | if (cpufreq_driver->setpolicy) { |
1742 | data->policy = policy->policy; | 1651 | data->policy = policy->policy; |
1743 | dprintk("setting range\n"); | 1652 | pr_debug("setting range\n"); |
1744 | ret = cpufreq_driver->setpolicy(policy); | 1653 | ret = cpufreq_driver->setpolicy(policy); |
1745 | } else { | 1654 | } else { |
1746 | if (policy->governor != data->governor) { | 1655 | if (policy->governor != data->governor) { |
1747 | /* save old, working values */ | 1656 | /* save old, working values */ |
1748 | struct cpufreq_governor *old_gov = data->governor; | 1657 | struct cpufreq_governor *old_gov = data->governor; |
1749 | 1658 | ||
1750 | dprintk("governor switch\n"); | 1659 | pr_debug("governor switch\n"); |
1751 | 1660 | ||
1752 | /* end old governor */ | 1661 | /* end old governor */ |
1753 | if (data->governor) | 1662 | if (data->governor) |
@@ -1757,7 +1666,7 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data, | |||
1757 | data->governor = policy->governor; | 1666 | data->governor = policy->governor; |
1758 | if (__cpufreq_governor(data, CPUFREQ_GOV_START)) { | 1667 | if (__cpufreq_governor(data, CPUFREQ_GOV_START)) { |
1759 | /* new governor failed, so re-start old one */ | 1668 | /* new governor failed, so re-start old one */ |
1760 | dprintk("starting governor %s failed\n", | 1669 | pr_debug("starting governor %s failed\n", |
1761 | data->governor->name); | 1670 | data->governor->name); |
1762 | if (old_gov) { | 1671 | if (old_gov) { |
1763 | data->governor = old_gov; | 1672 | data->governor = old_gov; |
@@ -1769,12 +1678,11 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data, | |||
1769 | } | 1678 | } |
1770 | /* might be a policy change, too, so fall through */ | 1679 | /* might be a policy change, too, so fall through */ |
1771 | } | 1680 | } |
1772 | dprintk("governor: change or update limits\n"); | 1681 | pr_debug("governor: change or update limits\n"); |
1773 | __cpufreq_governor(data, CPUFREQ_GOV_LIMITS); | 1682 | __cpufreq_governor(data, CPUFREQ_GOV_LIMITS); |
1774 | } | 1683 | } |
1775 | 1684 | ||
1776 | error_out: | 1685 | error_out: |
1777 | cpufreq_debug_enable_ratelimit(); | ||
1778 | return ret; | 1686 | return ret; |
1779 | } | 1687 | } |
1780 | 1688 | ||
@@ -1801,7 +1709,7 @@ int cpufreq_update_policy(unsigned int cpu) | |||
1801 | goto fail; | 1709 | goto fail; |
1802 | } | 1710 | } |
1803 | 1711 | ||
1804 | dprintk("updating policy for CPU %u\n", cpu); | 1712 | pr_debug("updating policy for CPU %u\n", cpu); |
1805 | memcpy(&policy, data, sizeof(struct cpufreq_policy)); | 1713 | memcpy(&policy, data, sizeof(struct cpufreq_policy)); |
1806 | policy.min = data->user_policy.min; | 1714 | policy.min = data->user_policy.min; |
1807 | policy.max = data->user_policy.max; | 1715 | policy.max = data->user_policy.max; |
@@ -1813,7 +1721,7 @@ int cpufreq_update_policy(unsigned int cpu) | |||
1813 | if (cpufreq_driver->get) { | 1721 | if (cpufreq_driver->get) { |
1814 | policy.cur = cpufreq_driver->get(cpu); | 1722 | policy.cur = cpufreq_driver->get(cpu); |
1815 | if (!data->cur) { | 1723 | if (!data->cur) { |
1816 | dprintk("Driver did not initialize current freq"); | 1724 | pr_debug("Driver did not initialize current freq"); |
1817 | data->cur = policy.cur; | 1725 | data->cur = policy.cur; |
1818 | } else { | 1726 | } else { |
1819 | if (data->cur != policy.cur) | 1727 | if (data->cur != policy.cur) |
@@ -1889,7 +1797,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) | |||
1889 | ((!driver_data->setpolicy) && (!driver_data->target))) | 1797 | ((!driver_data->setpolicy) && (!driver_data->target))) |
1890 | return -EINVAL; | 1798 | return -EINVAL; |
1891 | 1799 | ||
1892 | dprintk("trying to register driver %s\n", driver_data->name); | 1800 | pr_debug("trying to register driver %s\n", driver_data->name); |
1893 | 1801 | ||
1894 | if (driver_data->setpolicy) | 1802 | if (driver_data->setpolicy) |
1895 | driver_data->flags |= CPUFREQ_CONST_LOOPS; | 1803 | driver_data->flags |= CPUFREQ_CONST_LOOPS; |
@@ -1920,15 +1828,14 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) | |||
1920 | 1828 | ||
1921 | /* if all ->init() calls failed, unregister */ | 1829 | /* if all ->init() calls failed, unregister */ |
1922 | if (ret) { | 1830 | if (ret) { |
1923 | dprintk("no CPU initialized for driver %s\n", | 1831 | pr_debug("no CPU initialized for driver %s\n", |
1924 | driver_data->name); | 1832 | driver_data->name); |
1925 | goto err_sysdev_unreg; | 1833 | goto err_sysdev_unreg; |
1926 | } | 1834 | } |
1927 | } | 1835 | } |
1928 | 1836 | ||
1929 | register_hotcpu_notifier(&cpufreq_cpu_notifier); | 1837 | register_hotcpu_notifier(&cpufreq_cpu_notifier); |
1930 | dprintk("driver %s up and running\n", driver_data->name); | 1838 | pr_debug("driver %s up and running\n", driver_data->name); |
1931 | cpufreq_debug_enable_ratelimit(); | ||
1932 | 1839 | ||
1933 | return 0; | 1840 | return 0; |
1934 | err_sysdev_unreg: | 1841 | err_sysdev_unreg: |
@@ -1955,14 +1862,10 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver) | |||
1955 | { | 1862 | { |
1956 | unsigned long flags; | 1863 | unsigned long flags; |
1957 | 1864 | ||
1958 | cpufreq_debug_disable_ratelimit(); | 1865 | if (!cpufreq_driver || (driver != cpufreq_driver)) |
1959 | |||
1960 | if (!cpufreq_driver || (driver != cpufreq_driver)) { | ||
1961 | cpufreq_debug_enable_ratelimit(); | ||
1962 | return -EINVAL; | 1866 | return -EINVAL; |
1963 | } | ||
1964 | 1867 | ||
1965 | dprintk("unregistering driver %s\n", driver->name); | 1868 | pr_debug("unregistering driver %s\n", driver->name); |
1966 | 1869 | ||
1967 | sysdev_driver_unregister(&cpu_sysdev_class, &cpufreq_sysdev_driver); | 1870 | sysdev_driver_unregister(&cpu_sysdev_class, &cpufreq_sysdev_driver); |
1968 | unregister_hotcpu_notifier(&cpufreq_cpu_notifier); | 1871 | unregister_hotcpu_notifier(&cpufreq_cpu_notifier); |
diff --git a/drivers/cpufreq/cpufreq_performance.c b/drivers/cpufreq/cpufreq_performance.c index 7e2e515087f8..f13a8a9af6a1 100644 --- a/drivers/cpufreq/cpufreq_performance.c +++ b/drivers/cpufreq/cpufreq_performance.c | |||
@@ -15,9 +15,6 @@ | |||
15 | #include <linux/cpufreq.h> | 15 | #include <linux/cpufreq.h> |
16 | #include <linux/init.h> | 16 | #include <linux/init.h> |
17 | 17 | ||
18 | #define dprintk(msg...) \ | ||
19 | cpufreq_debug_printk(CPUFREQ_DEBUG_GOVERNOR, "performance", msg) | ||
20 | |||
21 | 18 | ||
22 | static int cpufreq_governor_performance(struct cpufreq_policy *policy, | 19 | static int cpufreq_governor_performance(struct cpufreq_policy *policy, |
23 | unsigned int event) | 20 | unsigned int event) |
@@ -25,7 +22,7 @@ static int cpufreq_governor_performance(struct cpufreq_policy *policy, | |||
25 | switch (event) { | 22 | switch (event) { |
26 | case CPUFREQ_GOV_START: | 23 | case CPUFREQ_GOV_START: |
27 | case CPUFREQ_GOV_LIMITS: | 24 | case CPUFREQ_GOV_LIMITS: |
28 | dprintk("setting to %u kHz because of event %u\n", | 25 | pr_debug("setting to %u kHz because of event %u\n", |
29 | policy->max, event); | 26 | policy->max, event); |
30 | __cpufreq_driver_target(policy, policy->max, | 27 | __cpufreq_driver_target(policy, policy->max, |
31 | CPUFREQ_RELATION_H); | 28 | CPUFREQ_RELATION_H); |
diff --git a/drivers/cpufreq/cpufreq_powersave.c b/drivers/cpufreq/cpufreq_powersave.c index e6db5faf3eb1..4c2eb512f2bc 100644 --- a/drivers/cpufreq/cpufreq_powersave.c +++ b/drivers/cpufreq/cpufreq_powersave.c | |||
@@ -15,16 +15,13 @@ | |||
15 | #include <linux/cpufreq.h> | 15 | #include <linux/cpufreq.h> |
16 | #include <linux/init.h> | 16 | #include <linux/init.h> |
17 | 17 | ||
18 | #define dprintk(msg...) \ | ||
19 | cpufreq_debug_printk(CPUFREQ_DEBUG_GOVERNOR, "powersave", msg) | ||
20 | |||
21 | static int cpufreq_governor_powersave(struct cpufreq_policy *policy, | 18 | static int cpufreq_governor_powersave(struct cpufreq_policy *policy, |
22 | unsigned int event) | 19 | unsigned int event) |
23 | { | 20 | { |
24 | switch (event) { | 21 | switch (event) { |
25 | case CPUFREQ_GOV_START: | 22 | case CPUFREQ_GOV_START: |
26 | case CPUFREQ_GOV_LIMITS: | 23 | case CPUFREQ_GOV_LIMITS: |
27 | dprintk("setting to %u kHz because of event %u\n", | 24 | pr_debug("setting to %u kHz because of event %u\n", |
28 | policy->min, event); | 25 | policy->min, event); |
29 | __cpufreq_driver_target(policy, policy->min, | 26 | __cpufreq_driver_target(policy, policy->min, |
30 | CPUFREQ_RELATION_L); | 27 | CPUFREQ_RELATION_L); |
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c index 00d73fc8e4e2..b60a4c263686 100644 --- a/drivers/cpufreq/cpufreq_stats.c +++ b/drivers/cpufreq/cpufreq_stats.c | |||
@@ -165,17 +165,27 @@ static int freq_table_get_index(struct cpufreq_stats *stat, unsigned int freq) | |||
165 | return -1; | 165 | return -1; |
166 | } | 166 | } |
167 | 167 | ||
168 | /* should be called late in the CPU removal sequence so that the stats | ||
169 | * memory is still available in case someone tries to use it. | ||
170 | */ | ||
168 | static void cpufreq_stats_free_table(unsigned int cpu) | 171 | static void cpufreq_stats_free_table(unsigned int cpu) |
169 | { | 172 | { |
170 | struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, cpu); | 173 | struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, cpu); |
171 | struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); | ||
172 | if (policy && policy->cpu == cpu) | ||
173 | sysfs_remove_group(&policy->kobj, &stats_attr_group); | ||
174 | if (stat) { | 174 | if (stat) { |
175 | kfree(stat->time_in_state); | 175 | kfree(stat->time_in_state); |
176 | kfree(stat); | 176 | kfree(stat); |
177 | } | 177 | } |
178 | per_cpu(cpufreq_stats_table, cpu) = NULL; | 178 | per_cpu(cpufreq_stats_table, cpu) = NULL; |
179 | } | ||
180 | |||
181 | /* must be called early in the CPU removal sequence (before | ||
182 | * cpufreq_remove_dev) so that policy is still valid. | ||
183 | */ | ||
184 | static void cpufreq_stats_free_sysfs(unsigned int cpu) | ||
185 | { | ||
186 | struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); | ||
187 | if (policy && policy->cpu == cpu) | ||
188 | sysfs_remove_group(&policy->kobj, &stats_attr_group); | ||
179 | if (policy) | 189 | if (policy) |
180 | cpufreq_cpu_put(policy); | 190 | cpufreq_cpu_put(policy); |
181 | } | 191 | } |
@@ -316,6 +326,9 @@ static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb, | |||
316 | case CPU_ONLINE_FROZEN: | 326 | case CPU_ONLINE_FROZEN: |
317 | cpufreq_update_policy(cpu); | 327 | cpufreq_update_policy(cpu); |
318 | break; | 328 | break; |
329 | case CPU_DOWN_PREPARE: | ||
330 | cpufreq_stats_free_sysfs(cpu); | ||
331 | break; | ||
319 | case CPU_DEAD: | 332 | case CPU_DEAD: |
320 | case CPU_DEAD_FROZEN: | 333 | case CPU_DEAD_FROZEN: |
321 | cpufreq_stats_free_table(cpu); | 334 | cpufreq_stats_free_table(cpu); |
@@ -324,9 +337,10 @@ static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb, | |||
324 | return NOTIFY_OK; | 337 | return NOTIFY_OK; |
325 | } | 338 | } |
326 | 339 | ||
327 | static struct notifier_block cpufreq_stat_cpu_notifier __refdata = | 340 | /* priority=1 so this will get called before cpufreq_remove_dev */ |
328 | { | 341 | static struct notifier_block cpufreq_stat_cpu_notifier __refdata = { |
329 | .notifier_call = cpufreq_stat_cpu_callback, | 342 | .notifier_call = cpufreq_stat_cpu_callback, |
343 | .priority = 1, | ||
330 | }; | 344 | }; |
331 | 345 | ||
332 | static struct notifier_block notifier_policy_block = { | 346 | static struct notifier_block notifier_policy_block = { |
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c index 66d2d1d6c80f..f231015904c0 100644 --- a/drivers/cpufreq/cpufreq_userspace.c +++ b/drivers/cpufreq/cpufreq_userspace.c | |||
@@ -37,9 +37,6 @@ static DEFINE_PER_CPU(unsigned int, cpu_is_managed); | |||
37 | static DEFINE_MUTEX(userspace_mutex); | 37 | static DEFINE_MUTEX(userspace_mutex); |
38 | static int cpus_using_userspace_governor; | 38 | static int cpus_using_userspace_governor; |
39 | 39 | ||
40 | #define dprintk(msg...) \ | ||
41 | cpufreq_debug_printk(CPUFREQ_DEBUG_GOVERNOR, "userspace", msg) | ||
42 | |||
43 | /* keep track of frequency transitions */ | 40 | /* keep track of frequency transitions */ |
44 | static int | 41 | static int |
45 | userspace_cpufreq_notifier(struct notifier_block *nb, unsigned long val, | 42 | userspace_cpufreq_notifier(struct notifier_block *nb, unsigned long val, |
@@ -50,7 +47,7 @@ userspace_cpufreq_notifier(struct notifier_block *nb, unsigned long val, | |||
50 | if (!per_cpu(cpu_is_managed, freq->cpu)) | 47 | if (!per_cpu(cpu_is_managed, freq->cpu)) |
51 | return 0; | 48 | return 0; |
52 | 49 | ||
53 | dprintk("saving cpu_cur_freq of cpu %u to be %u kHz\n", | 50 | pr_debug("saving cpu_cur_freq of cpu %u to be %u kHz\n", |
54 | freq->cpu, freq->new); | 51 | freq->cpu, freq->new); |
55 | per_cpu(cpu_cur_freq, freq->cpu) = freq->new; | 52 | per_cpu(cpu_cur_freq, freq->cpu) = freq->new; |
56 | 53 | ||
@@ -73,7 +70,7 @@ static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq) | |||
73 | { | 70 | { |
74 | int ret = -EINVAL; | 71 | int ret = -EINVAL; |
75 | 72 | ||
76 | dprintk("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq); | 73 | pr_debug("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq); |
77 | 74 | ||
78 | mutex_lock(&userspace_mutex); | 75 | mutex_lock(&userspace_mutex); |
79 | if (!per_cpu(cpu_is_managed, policy->cpu)) | 76 | if (!per_cpu(cpu_is_managed, policy->cpu)) |
@@ -134,7 +131,7 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy, | |||
134 | per_cpu(cpu_max_freq, cpu) = policy->max; | 131 | per_cpu(cpu_max_freq, cpu) = policy->max; |
135 | per_cpu(cpu_cur_freq, cpu) = policy->cur; | 132 | per_cpu(cpu_cur_freq, cpu) = policy->cur; |
136 | per_cpu(cpu_set_freq, cpu) = policy->cur; | 133 | per_cpu(cpu_set_freq, cpu) = policy->cur; |
137 | dprintk("managing cpu %u started " | 134 | pr_debug("managing cpu %u started " |
138 | "(%u - %u kHz, currently %u kHz)\n", | 135 | "(%u - %u kHz, currently %u kHz)\n", |
139 | cpu, | 136 | cpu, |
140 | per_cpu(cpu_min_freq, cpu), | 137 | per_cpu(cpu_min_freq, cpu), |
@@ -156,12 +153,12 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy, | |||
156 | per_cpu(cpu_min_freq, cpu) = 0; | 153 | per_cpu(cpu_min_freq, cpu) = 0; |
157 | per_cpu(cpu_max_freq, cpu) = 0; | 154 | per_cpu(cpu_max_freq, cpu) = 0; |
158 | per_cpu(cpu_set_freq, cpu) = 0; | 155 | per_cpu(cpu_set_freq, cpu) = 0; |
159 | dprintk("managing cpu %u stopped\n", cpu); | 156 | pr_debug("managing cpu %u stopped\n", cpu); |
160 | mutex_unlock(&userspace_mutex); | 157 | mutex_unlock(&userspace_mutex); |
161 | break; | 158 | break; |
162 | case CPUFREQ_GOV_LIMITS: | 159 | case CPUFREQ_GOV_LIMITS: |
163 | mutex_lock(&userspace_mutex); | 160 | mutex_lock(&userspace_mutex); |
164 | dprintk("limit event for cpu %u: %u - %u kHz, " | 161 | pr_debug("limit event for cpu %u: %u - %u kHz, " |
165 | "currently %u kHz, last set to %u kHz\n", | 162 | "currently %u kHz, last set to %u kHz\n", |
166 | cpu, policy->min, policy->max, | 163 | cpu, policy->min, policy->max, |
167 | per_cpu(cpu_cur_freq, cpu), | 164 | per_cpu(cpu_cur_freq, cpu), |
diff --git a/arch/x86/kernel/cpu/cpufreq/e_powersaver.c b/drivers/cpufreq/e_powersaver.c index 35a257dd4bb7..35a257dd4bb7 100644 --- a/arch/x86/kernel/cpu/cpufreq/e_powersaver.c +++ b/drivers/cpufreq/e_powersaver.c | |||
diff --git a/arch/x86/kernel/cpu/cpufreq/elanfreq.c b/drivers/cpufreq/elanfreq.c index c587db472a75..c587db472a75 100644 --- a/arch/x86/kernel/cpu/cpufreq/elanfreq.c +++ b/drivers/cpufreq/elanfreq.c | |||
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c index 05432216e224..90431cb92804 100644 --- a/drivers/cpufreq/freq_table.c +++ b/drivers/cpufreq/freq_table.c | |||
@@ -14,9 +14,6 @@ | |||
14 | #include <linux/init.h> | 14 | #include <linux/init.h> |
15 | #include <linux/cpufreq.h> | 15 | #include <linux/cpufreq.h> |
16 | 16 | ||
17 | #define dprintk(msg...) \ | ||
18 | cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, "freq-table", msg) | ||
19 | |||
20 | /********************************************************************* | 17 | /********************************************************************* |
21 | * FREQUENCY TABLE HELPERS * | 18 | * FREQUENCY TABLE HELPERS * |
22 | *********************************************************************/ | 19 | *********************************************************************/ |
@@ -31,11 +28,11 @@ int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy, | |||
31 | for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { | 28 | for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { |
32 | unsigned int freq = table[i].frequency; | 29 | unsigned int freq = table[i].frequency; |
33 | if (freq == CPUFREQ_ENTRY_INVALID) { | 30 | if (freq == CPUFREQ_ENTRY_INVALID) { |
34 | dprintk("table entry %u is invalid, skipping\n", i); | 31 | pr_debug("table entry %u is invalid, skipping\n", i); |
35 | 32 | ||
36 | continue; | 33 | continue; |
37 | } | 34 | } |
38 | dprintk("table entry %u: %u kHz, %u index\n", | 35 | pr_debug("table entry %u: %u kHz, %u index\n", |
39 | i, freq, table[i].index); | 36 | i, freq, table[i].index); |
40 | if (freq < min_freq) | 37 | if (freq < min_freq) |
41 | min_freq = freq; | 38 | min_freq = freq; |
@@ -61,7 +58,7 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy *policy, | |||
61 | unsigned int i; | 58 | unsigned int i; |
62 | unsigned int count = 0; | 59 | unsigned int count = 0; |
63 | 60 | ||
64 | dprintk("request for verification of policy (%u - %u kHz) for cpu %u\n", | 61 | pr_debug("request for verification of policy (%u - %u kHz) for cpu %u\n", |
65 | policy->min, policy->max, policy->cpu); | 62 | policy->min, policy->max, policy->cpu); |
66 | 63 | ||
67 | if (!cpu_online(policy->cpu)) | 64 | if (!cpu_online(policy->cpu)) |
@@ -86,7 +83,7 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy *policy, | |||
86 | cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, | 83 | cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, |
87 | policy->cpuinfo.max_freq); | 84 | policy->cpuinfo.max_freq); |
88 | 85 | ||
89 | dprintk("verification lead to (%u - %u kHz) for cpu %u\n", | 86 | pr_debug("verification lead to (%u - %u kHz) for cpu %u\n", |
90 | policy->min, policy->max, policy->cpu); | 87 | policy->min, policy->max, policy->cpu); |
91 | 88 | ||
92 | return 0; | 89 | return 0; |
@@ -110,7 +107,7 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy, | |||
110 | }; | 107 | }; |
111 | unsigned int i; | 108 | unsigned int i; |
112 | 109 | ||
113 | dprintk("request for target %u kHz (relation: %u) for cpu %u\n", | 110 | pr_debug("request for target %u kHz (relation: %u) for cpu %u\n", |
114 | target_freq, relation, policy->cpu); | 111 | target_freq, relation, policy->cpu); |
115 | 112 | ||
116 | switch (relation) { | 113 | switch (relation) { |
@@ -167,7 +164,7 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy, | |||
167 | } else | 164 | } else |
168 | *index = optimal.index; | 165 | *index = optimal.index; |
169 | 166 | ||
170 | dprintk("target is %u (%u kHz, %u)\n", *index, table[*index].frequency, | 167 | pr_debug("target is %u (%u kHz, %u)\n", *index, table[*index].frequency, |
171 | table[*index].index); | 168 | table[*index].index); |
172 | 169 | ||
173 | return 0; | 170 | return 0; |
@@ -216,14 +213,14 @@ EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_available_freqs); | |||
216 | void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table, | 213 | void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table, |
217 | unsigned int cpu) | 214 | unsigned int cpu) |
218 | { | 215 | { |
219 | dprintk("setting show_table for cpu %u to %p\n", cpu, table); | 216 | pr_debug("setting show_table for cpu %u to %p\n", cpu, table); |
220 | per_cpu(cpufreq_show_table, cpu) = table; | 217 | per_cpu(cpufreq_show_table, cpu) = table; |
221 | } | 218 | } |
222 | EXPORT_SYMBOL_GPL(cpufreq_frequency_table_get_attr); | 219 | EXPORT_SYMBOL_GPL(cpufreq_frequency_table_get_attr); |
223 | 220 | ||
224 | void cpufreq_frequency_table_put_attr(unsigned int cpu) | 221 | void cpufreq_frequency_table_put_attr(unsigned int cpu) |
225 | { | 222 | { |
226 | dprintk("clearing show_table for cpu %u\n", cpu); | 223 | pr_debug("clearing show_table for cpu %u\n", cpu); |
227 | per_cpu(cpufreq_show_table, cpu) = NULL; | 224 | per_cpu(cpufreq_show_table, cpu) = NULL; |
228 | } | 225 | } |
229 | EXPORT_SYMBOL_GPL(cpufreq_frequency_table_put_attr); | 226 | EXPORT_SYMBOL_GPL(cpufreq_frequency_table_put_attr); |
diff --git a/arch/x86/kernel/cpu/cpufreq/gx-suspmod.c b/drivers/cpufreq/gx-suspmod.c index 32974cf84232..ffe1f2c92ed3 100644 --- a/arch/x86/kernel/cpu/cpufreq/gx-suspmod.c +++ b/drivers/cpufreq/gx-suspmod.c | |||
@@ -142,9 +142,6 @@ module_param(max_duration, int, 0444); | |||
142 | #define POLICY_MIN_DIV 20 | 142 | #define POLICY_MIN_DIV 20 |
143 | 143 | ||
144 | 144 | ||
145 | #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ | ||
146 | "gx-suspmod", msg) | ||
147 | |||
148 | /** | 145 | /** |
149 | * we can detect a core multipiler from dir0_lsb | 146 | * we can detect a core multipiler from dir0_lsb |
150 | * from GX1 datasheet p.56, | 147 | * from GX1 datasheet p.56, |
@@ -191,7 +188,7 @@ static __init struct pci_dev *gx_detect_chipset(void) | |||
191 | /* check if CPU is a MediaGX or a Geode. */ | 188 | /* check if CPU is a MediaGX or a Geode. */ |
192 | if ((boot_cpu_data.x86_vendor != X86_VENDOR_NSC) && | 189 | if ((boot_cpu_data.x86_vendor != X86_VENDOR_NSC) && |
193 | (boot_cpu_data.x86_vendor != X86_VENDOR_CYRIX)) { | 190 | (boot_cpu_data.x86_vendor != X86_VENDOR_CYRIX)) { |
194 | dprintk("error: no MediaGX/Geode processor found!\n"); | 191 | pr_debug("error: no MediaGX/Geode processor found!\n"); |
195 | return NULL; | 192 | return NULL; |
196 | } | 193 | } |
197 | 194 | ||
@@ -201,7 +198,7 @@ static __init struct pci_dev *gx_detect_chipset(void) | |||
201 | return gx_pci; | 198 | return gx_pci; |
202 | } | 199 | } |
203 | 200 | ||
204 | dprintk("error: no supported chipset found!\n"); | 201 | pr_debug("error: no supported chipset found!\n"); |
205 | return NULL; | 202 | return NULL; |
206 | } | 203 | } |
207 | 204 | ||
@@ -305,14 +302,14 @@ static void gx_set_cpuspeed(unsigned int khz) | |||
305 | break; | 302 | break; |
306 | default: | 303 | default: |
307 | local_irq_restore(flags); | 304 | local_irq_restore(flags); |
308 | dprintk("fatal: try to set unknown chipset.\n"); | 305 | pr_debug("fatal: try to set unknown chipset.\n"); |
309 | return; | 306 | return; |
310 | } | 307 | } |
311 | } else { | 308 | } else { |
312 | suscfg = gx_params->pci_suscfg & ~(SUSMOD); | 309 | suscfg = gx_params->pci_suscfg & ~(SUSMOD); |
313 | gx_params->off_duration = 0; | 310 | gx_params->off_duration = 0; |
314 | gx_params->on_duration = 0; | 311 | gx_params->on_duration = 0; |
315 | dprintk("suspend modulation disabled: cpu runs 100%% speed.\n"); | 312 | pr_debug("suspend modulation disabled: cpu runs 100%% speed.\n"); |
316 | } | 313 | } |
317 | 314 | ||
318 | gx_write_byte(PCI_MODOFF, gx_params->off_duration); | 315 | gx_write_byte(PCI_MODOFF, gx_params->off_duration); |
@@ -327,9 +324,9 @@ static void gx_set_cpuspeed(unsigned int khz) | |||
327 | 324 | ||
328 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | 325 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); |
329 | 326 | ||
330 | dprintk("suspend modulation w/ duration of ON:%d us, OFF:%d us\n", | 327 | pr_debug("suspend modulation w/ duration of ON:%d us, OFF:%d us\n", |
331 | gx_params->on_duration * 32, gx_params->off_duration * 32); | 328 | gx_params->on_duration * 32, gx_params->off_duration * 32); |
332 | dprintk("suspend modulation w/ clock speed: %d kHz.\n", freqs.new); | 329 | pr_debug("suspend modulation w/ clock speed: %d kHz.\n", freqs.new); |
333 | } | 330 | } |
334 | 331 | ||
335 | /**************************************************************** | 332 | /**************************************************************** |
@@ -428,8 +425,8 @@ static int cpufreq_gx_cpu_init(struct cpufreq_policy *policy) | |||
428 | stock_freq = maxfreq; | 425 | stock_freq = maxfreq; |
429 | curfreq = gx_get_cpuspeed(0); | 426 | curfreq = gx_get_cpuspeed(0); |
430 | 427 | ||
431 | dprintk("cpu max frequency is %d.\n", maxfreq); | 428 | pr_debug("cpu max frequency is %d.\n", maxfreq); |
432 | dprintk("cpu current frequency is %dkHz.\n", curfreq); | 429 | pr_debug("cpu current frequency is %dkHz.\n", curfreq); |
433 | 430 | ||
434 | /* setup basic struct for cpufreq API */ | 431 | /* setup basic struct for cpufreq API */ |
435 | policy->cpu = 0; | 432 | policy->cpu = 0; |
@@ -475,7 +472,7 @@ static int __init cpufreq_gx_init(void) | |||
475 | if (max_duration > 0xff) | 472 | if (max_duration > 0xff) |
476 | max_duration = 0xff; | 473 | max_duration = 0xff; |
477 | 474 | ||
478 | dprintk("geode suspend modulation available.\n"); | 475 | pr_debug("geode suspend modulation available.\n"); |
479 | 476 | ||
480 | params = kzalloc(sizeof(struct gxfreq_params), GFP_KERNEL); | 477 | params = kzalloc(sizeof(struct gxfreq_params), GFP_KERNEL); |
481 | if (params == NULL) | 478 | if (params == NULL) |
diff --git a/arch/x86/kernel/cpu/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c index cf48cdd6907d..f47d26e2a135 100644 --- a/arch/x86/kernel/cpu/cpufreq/longhaul.c +++ b/drivers/cpufreq/longhaul.c | |||
@@ -77,9 +77,6 @@ static int scale_voltage; | |||
77 | static int disable_acpi_c3; | 77 | static int disable_acpi_c3; |
78 | static int revid_errata; | 78 | static int revid_errata; |
79 | 79 | ||
80 | #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ | ||
81 | "longhaul", msg) | ||
82 | |||
83 | 80 | ||
84 | /* Clock ratios multiplied by 10 */ | 81 | /* Clock ratios multiplied by 10 */ |
85 | static int mults[32]; | 82 | static int mults[32]; |
@@ -87,7 +84,6 @@ static int eblcr[32]; | |||
87 | static int longhaul_version; | 84 | static int longhaul_version; |
88 | static struct cpufreq_frequency_table *longhaul_table; | 85 | static struct cpufreq_frequency_table *longhaul_table; |
89 | 86 | ||
90 | #ifdef CONFIG_CPU_FREQ_DEBUG | ||
91 | static char speedbuffer[8]; | 87 | static char speedbuffer[8]; |
92 | 88 | ||
93 | static char *print_speed(int speed) | 89 | static char *print_speed(int speed) |
@@ -106,7 +102,6 @@ static char *print_speed(int speed) | |||
106 | 102 | ||
107 | return speedbuffer; | 103 | return speedbuffer; |
108 | } | 104 | } |
109 | #endif | ||
110 | 105 | ||
111 | 106 | ||
112 | static unsigned int calc_speed(int mult) | 107 | static unsigned int calc_speed(int mult) |
@@ -275,7 +270,7 @@ static void longhaul_setstate(unsigned int table_index) | |||
275 | 270 | ||
276 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | 271 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); |
277 | 272 | ||
278 | dprintk("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n", | 273 | pr_debug("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n", |
279 | fsb, mult/10, mult%10, print_speed(speed/1000)); | 274 | fsb, mult/10, mult%10, print_speed(speed/1000)); |
280 | retry_loop: | 275 | retry_loop: |
281 | preempt_disable(); | 276 | preempt_disable(); |
@@ -460,12 +455,12 @@ static int __cpuinit longhaul_get_ranges(void) | |||
460 | break; | 455 | break; |
461 | } | 456 | } |
462 | 457 | ||
463 | dprintk("MinMult:%d.%dx MaxMult:%d.%dx\n", | 458 | pr_debug("MinMult:%d.%dx MaxMult:%d.%dx\n", |
464 | minmult/10, minmult%10, maxmult/10, maxmult%10); | 459 | minmult/10, minmult%10, maxmult/10, maxmult%10); |
465 | 460 | ||
466 | highest_speed = calc_speed(maxmult); | 461 | highest_speed = calc_speed(maxmult); |
467 | lowest_speed = calc_speed(minmult); | 462 | lowest_speed = calc_speed(minmult); |
468 | dprintk("FSB:%dMHz Lowest speed: %s Highest speed:%s\n", fsb, | 463 | pr_debug("FSB:%dMHz Lowest speed: %s Highest speed:%s\n", fsb, |
469 | print_speed(lowest_speed/1000), | 464 | print_speed(lowest_speed/1000), |
470 | print_speed(highest_speed/1000)); | 465 | print_speed(highest_speed/1000)); |
471 | 466 | ||
diff --git a/arch/x86/kernel/cpu/cpufreq/longhaul.h b/drivers/cpufreq/longhaul.h index cbf48fbca881..cbf48fbca881 100644 --- a/arch/x86/kernel/cpu/cpufreq/longhaul.h +++ b/drivers/cpufreq/longhaul.h | |||
diff --git a/arch/x86/kernel/cpu/cpufreq/longrun.c b/drivers/cpufreq/longrun.c index d9f51367666b..34ea359b370e 100644 --- a/arch/x86/kernel/cpu/cpufreq/longrun.c +++ b/drivers/cpufreq/longrun.c | |||
@@ -15,9 +15,6 @@ | |||
15 | #include <asm/msr.h> | 15 | #include <asm/msr.h> |
16 | #include <asm/processor.h> | 16 | #include <asm/processor.h> |
17 | 17 | ||
18 | #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ | ||
19 | "longrun", msg) | ||
20 | |||
21 | static struct cpufreq_driver longrun_driver; | 18 | static struct cpufreq_driver longrun_driver; |
22 | 19 | ||
23 | /** | 20 | /** |
@@ -40,14 +37,14 @@ static void __cpuinit longrun_get_policy(struct cpufreq_policy *policy) | |||
40 | u32 msr_lo, msr_hi; | 37 | u32 msr_lo, msr_hi; |
41 | 38 | ||
42 | rdmsr(MSR_TMTA_LONGRUN_FLAGS, msr_lo, msr_hi); | 39 | rdmsr(MSR_TMTA_LONGRUN_FLAGS, msr_lo, msr_hi); |
43 | dprintk("longrun flags are %x - %x\n", msr_lo, msr_hi); | 40 | pr_debug("longrun flags are %x - %x\n", msr_lo, msr_hi); |
44 | if (msr_lo & 0x01) | 41 | if (msr_lo & 0x01) |
45 | policy->policy = CPUFREQ_POLICY_PERFORMANCE; | 42 | policy->policy = CPUFREQ_POLICY_PERFORMANCE; |
46 | else | 43 | else |
47 | policy->policy = CPUFREQ_POLICY_POWERSAVE; | 44 | policy->policy = CPUFREQ_POLICY_POWERSAVE; |
48 | 45 | ||
49 | rdmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi); | 46 | rdmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi); |
50 | dprintk("longrun ctrl is %x - %x\n", msr_lo, msr_hi); | 47 | pr_debug("longrun ctrl is %x - %x\n", msr_lo, msr_hi); |
51 | msr_lo &= 0x0000007F; | 48 | msr_lo &= 0x0000007F; |
52 | msr_hi &= 0x0000007F; | 49 | msr_hi &= 0x0000007F; |
53 | 50 | ||
@@ -150,7 +147,7 @@ static unsigned int longrun_get(unsigned int cpu) | |||
150 | return 0; | 147 | return 0; |
151 | 148 | ||
152 | cpuid(0x80860007, &eax, &ebx, &ecx, &edx); | 149 | cpuid(0x80860007, &eax, &ebx, &ecx, &edx); |
153 | dprintk("cpuid eax is %u\n", eax); | 150 | pr_debug("cpuid eax is %u\n", eax); |
154 | 151 | ||
155 | return eax * 1000; | 152 | return eax * 1000; |
156 | } | 153 | } |
@@ -196,7 +193,7 @@ static int __cpuinit longrun_determine_freqs(unsigned int *low_freq, | |||
196 | rdmsr(MSR_TMTA_LRTI_VOLT_MHZ, msr_lo, msr_hi); | 193 | rdmsr(MSR_TMTA_LRTI_VOLT_MHZ, msr_lo, msr_hi); |
197 | *high_freq = msr_lo * 1000; /* to kHz */ | 194 | *high_freq = msr_lo * 1000; /* to kHz */ |
198 | 195 | ||
199 | dprintk("longrun table interface told %u - %u kHz\n", | 196 | pr_debug("longrun table interface told %u - %u kHz\n", |
200 | *low_freq, *high_freq); | 197 | *low_freq, *high_freq); |
201 | 198 | ||
202 | if (*low_freq > *high_freq) | 199 | if (*low_freq > *high_freq) |
@@ -207,7 +204,7 @@ static int __cpuinit longrun_determine_freqs(unsigned int *low_freq, | |||
207 | /* set the upper border to the value determined during TSC init */ | 204 | /* set the upper border to the value determined during TSC init */ |
208 | *high_freq = (cpu_khz / 1000); | 205 | *high_freq = (cpu_khz / 1000); |
209 | *high_freq = *high_freq * 1000; | 206 | *high_freq = *high_freq * 1000; |
210 | dprintk("high frequency is %u kHz\n", *high_freq); | 207 | pr_debug("high frequency is %u kHz\n", *high_freq); |
211 | 208 | ||
212 | /* get current borders */ | 209 | /* get current borders */ |
213 | rdmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi); | 210 | rdmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi); |
@@ -233,7 +230,7 @@ static int __cpuinit longrun_determine_freqs(unsigned int *low_freq, | |||
233 | /* restore values */ | 230 | /* restore values */ |
234 | wrmsr(MSR_TMTA_LONGRUN_CTRL, save_lo, save_hi); | 231 | wrmsr(MSR_TMTA_LONGRUN_CTRL, save_lo, save_hi); |
235 | } | 232 | } |
236 | dprintk("percentage is %u %%, freq is %u MHz\n", ecx, eax); | 233 | pr_debug("percentage is %u %%, freq is %u MHz\n", ecx, eax); |
237 | 234 | ||
238 | /* performance_pctg = (current_freq - low_freq)/(high_freq - low_freq) | 235 | /* performance_pctg = (current_freq - low_freq)/(high_freq - low_freq) |
239 | * eqals | 236 | * eqals |
@@ -249,7 +246,7 @@ static int __cpuinit longrun_determine_freqs(unsigned int *low_freq, | |||
249 | edx = ((eax - ebx) * 100) / (100 - ecx); | 246 | edx = ((eax - ebx) * 100) / (100 - ecx); |
250 | *low_freq = edx * 1000; /* back to kHz */ | 247 | *low_freq = edx * 1000; /* back to kHz */ |
251 | 248 | ||
252 | dprintk("low frequency is %u kHz\n", *low_freq); | 249 | pr_debug("low frequency is %u kHz\n", *low_freq); |
253 | 250 | ||
254 | if (*low_freq > *high_freq) | 251 | if (*low_freq > *high_freq) |
255 | *low_freq = *high_freq; | 252 | *low_freq = *high_freq; |
diff --git a/arch/x86/kernel/cpu/cpufreq/mperf.c b/drivers/cpufreq/mperf.c index 911e193018ae..911e193018ae 100644 --- a/arch/x86/kernel/cpu/cpufreq/mperf.c +++ b/drivers/cpufreq/mperf.c | |||
diff --git a/arch/x86/kernel/cpu/cpufreq/mperf.h b/drivers/cpufreq/mperf.h index 5dbf2950dc22..5dbf2950dc22 100644 --- a/arch/x86/kernel/cpu/cpufreq/mperf.h +++ b/drivers/cpufreq/mperf.h | |||
diff --git a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c index 52c93648e492..6be3e0760c26 100644 --- a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c +++ b/drivers/cpufreq/p4-clockmod.c | |||
@@ -35,8 +35,6 @@ | |||
35 | #include "speedstep-lib.h" | 35 | #include "speedstep-lib.h" |
36 | 36 | ||
37 | #define PFX "p4-clockmod: " | 37 | #define PFX "p4-clockmod: " |
38 | #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ | ||
39 | "p4-clockmod", msg) | ||
40 | 38 | ||
41 | /* | 39 | /* |
42 | * Duty Cycle (3bits), note DC_DISABLE is not specified in | 40 | * Duty Cycle (3bits), note DC_DISABLE is not specified in |
@@ -66,7 +64,7 @@ static int cpufreq_p4_setdc(unsigned int cpu, unsigned int newstate) | |||
66 | rdmsr_on_cpu(cpu, MSR_IA32_THERM_STATUS, &l, &h); | 64 | rdmsr_on_cpu(cpu, MSR_IA32_THERM_STATUS, &l, &h); |
67 | 65 | ||
68 | if (l & 0x01) | 66 | if (l & 0x01) |
69 | dprintk("CPU#%d currently thermal throttled\n", cpu); | 67 | pr_debug("CPU#%d currently thermal throttled\n", cpu); |
70 | 68 | ||
71 | if (has_N44_O17_errata[cpu] && | 69 | if (has_N44_O17_errata[cpu] && |
72 | (newstate == DC_25PT || newstate == DC_DFLT)) | 70 | (newstate == DC_25PT || newstate == DC_DFLT)) |
@@ -74,10 +72,10 @@ static int cpufreq_p4_setdc(unsigned int cpu, unsigned int newstate) | |||
74 | 72 | ||
75 | rdmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, &l, &h); | 73 | rdmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, &l, &h); |
76 | if (newstate == DC_DISABLE) { | 74 | if (newstate == DC_DISABLE) { |
77 | dprintk("CPU#%d disabling modulation\n", cpu); | 75 | pr_debug("CPU#%d disabling modulation\n", cpu); |
78 | wrmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, l & ~(1<<4), h); | 76 | wrmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, l & ~(1<<4), h); |
79 | } else { | 77 | } else { |
80 | dprintk("CPU#%d setting duty cycle to %d%%\n", | 78 | pr_debug("CPU#%d setting duty cycle to %d%%\n", |
81 | cpu, ((125 * newstate) / 10)); | 79 | cpu, ((125 * newstate) / 10)); |
82 | /* bits 63 - 5 : reserved | 80 | /* bits 63 - 5 : reserved |
83 | * bit 4 : enable/disable | 81 | * bit 4 : enable/disable |
@@ -217,7 +215,7 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy) | |||
217 | case 0x0f11: | 215 | case 0x0f11: |
218 | case 0x0f12: | 216 | case 0x0f12: |
219 | has_N44_O17_errata[policy->cpu] = 1; | 217 | has_N44_O17_errata[policy->cpu] = 1; |
220 | dprintk("has errata -- disabling low frequencies\n"); | 218 | pr_debug("has errata -- disabling low frequencies\n"); |
221 | } | 219 | } |
222 | 220 | ||
223 | if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4D && | 221 | if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4D && |
diff --git a/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c index 755a31e0f5b0..7b0603eb0129 100644 --- a/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c +++ b/drivers/cpufreq/pcc-cpufreq.c | |||
@@ -39,7 +39,7 @@ | |||
39 | 39 | ||
40 | #include <acpi/processor.h> | 40 | #include <acpi/processor.h> |
41 | 41 | ||
42 | #define PCC_VERSION "1.00.00" | 42 | #define PCC_VERSION "1.10.00" |
43 | #define POLL_LOOPS 300 | 43 | #define POLL_LOOPS 300 |
44 | 44 | ||
45 | #define CMD_COMPLETE 0x1 | 45 | #define CMD_COMPLETE 0x1 |
@@ -48,9 +48,6 @@ | |||
48 | 48 | ||
49 | #define BUF_SZ 4 | 49 | #define BUF_SZ 4 |
50 | 50 | ||
51 | #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ | ||
52 | "pcc-cpufreq", msg) | ||
53 | |||
54 | struct pcc_register_resource { | 51 | struct pcc_register_resource { |
55 | u8 descriptor; | 52 | u8 descriptor; |
56 | u16 length; | 53 | u16 length; |
@@ -102,7 +99,7 @@ static struct acpi_generic_address doorbell; | |||
102 | static u64 doorbell_preserve; | 99 | static u64 doorbell_preserve; |
103 | static u64 doorbell_write; | 100 | static u64 doorbell_write; |
104 | 101 | ||
105 | static u8 OSC_UUID[16] = {0x63, 0x9B, 0x2C, 0x9F, 0x70, 0x91, 0x49, 0x1f, | 102 | static u8 OSC_UUID[16] = {0x9F, 0x2C, 0x9B, 0x63, 0x91, 0x70, 0x1f, 0x49, |
106 | 0xBB, 0x4F, 0xA5, 0x98, 0x2F, 0xA1, 0xB5, 0x46}; | 103 | 0xBB, 0x4F, 0xA5, 0x98, 0x2F, 0xA1, 0xB5, 0x46}; |
107 | 104 | ||
108 | struct pcc_cpu { | 105 | struct pcc_cpu { |
@@ -152,7 +149,7 @@ static unsigned int pcc_get_freq(unsigned int cpu) | |||
152 | 149 | ||
153 | spin_lock(&pcc_lock); | 150 | spin_lock(&pcc_lock); |
154 | 151 | ||
155 | dprintk("get: get_freq for CPU %d\n", cpu); | 152 | pr_debug("get: get_freq for CPU %d\n", cpu); |
156 | pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu); | 153 | pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu); |
157 | 154 | ||
158 | input_buffer = 0x1; | 155 | input_buffer = 0x1; |
@@ -170,7 +167,7 @@ static unsigned int pcc_get_freq(unsigned int cpu) | |||
170 | 167 | ||
171 | status = ioread16(&pcch_hdr->status); | 168 | status = ioread16(&pcch_hdr->status); |
172 | if (status != CMD_COMPLETE) { | 169 | if (status != CMD_COMPLETE) { |
173 | dprintk("get: FAILED: for CPU %d, status is %d\n", | 170 | pr_debug("get: FAILED: for CPU %d, status is %d\n", |
174 | cpu, status); | 171 | cpu, status); |
175 | goto cmd_incomplete; | 172 | goto cmd_incomplete; |
176 | } | 173 | } |
@@ -178,14 +175,14 @@ static unsigned int pcc_get_freq(unsigned int cpu) | |||
178 | curr_freq = (((ioread32(&pcch_hdr->nominal) * (output_buffer & 0xff)) | 175 | curr_freq = (((ioread32(&pcch_hdr->nominal) * (output_buffer & 0xff)) |
179 | / 100) * 1000); | 176 | / 100) * 1000); |
180 | 177 | ||
181 | dprintk("get: SUCCESS: (virtual) output_offset for cpu %d is " | 178 | pr_debug("get: SUCCESS: (virtual) output_offset for cpu %d is " |
182 | "0x%x, contains a value of: 0x%x. Speed is: %d MHz\n", | 179 | "0x%p, contains a value of: 0x%x. Speed is: %d MHz\n", |
183 | cpu, (pcch_virt_addr + pcc_cpu_data->output_offset), | 180 | cpu, (pcch_virt_addr + pcc_cpu_data->output_offset), |
184 | output_buffer, curr_freq); | 181 | output_buffer, curr_freq); |
185 | 182 | ||
186 | freq_limit = (output_buffer >> 8) & 0xff; | 183 | freq_limit = (output_buffer >> 8) & 0xff; |
187 | if (freq_limit != 0xff) { | 184 | if (freq_limit != 0xff) { |
188 | dprintk("get: frequency for cpu %d is being temporarily" | 185 | pr_debug("get: frequency for cpu %d is being temporarily" |
189 | " capped at %d\n", cpu, curr_freq); | 186 | " capped at %d\n", cpu, curr_freq); |
190 | } | 187 | } |
191 | 188 | ||
@@ -212,8 +209,8 @@ static int pcc_cpufreq_target(struct cpufreq_policy *policy, | |||
212 | cpu = policy->cpu; | 209 | cpu = policy->cpu; |
213 | pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu); | 210 | pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu); |
214 | 211 | ||
215 | dprintk("target: CPU %d should go to target freq: %d " | 212 | pr_debug("target: CPU %d should go to target freq: %d " |
216 | "(virtual) input_offset is 0x%x\n", | 213 | "(virtual) input_offset is 0x%p\n", |
217 | cpu, target_freq, | 214 | cpu, target_freq, |
218 | (pcch_virt_addr + pcc_cpu_data->input_offset)); | 215 | (pcch_virt_addr + pcc_cpu_data->input_offset)); |
219 | 216 | ||
@@ -234,14 +231,14 @@ static int pcc_cpufreq_target(struct cpufreq_policy *policy, | |||
234 | 231 | ||
235 | status = ioread16(&pcch_hdr->status); | 232 | status = ioread16(&pcch_hdr->status); |
236 | if (status != CMD_COMPLETE) { | 233 | if (status != CMD_COMPLETE) { |
237 | dprintk("target: FAILED for cpu %d, with status: 0x%x\n", | 234 | pr_debug("target: FAILED for cpu %d, with status: 0x%x\n", |
238 | cpu, status); | 235 | cpu, status); |
239 | goto cmd_incomplete; | 236 | goto cmd_incomplete; |
240 | } | 237 | } |
241 | iowrite16(0, &pcch_hdr->status); | 238 | iowrite16(0, &pcch_hdr->status); |
242 | 239 | ||
243 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | 240 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); |
244 | dprintk("target: was SUCCESSFUL for cpu %d\n", cpu); | 241 | pr_debug("target: was SUCCESSFUL for cpu %d\n", cpu); |
245 | spin_unlock(&pcc_lock); | 242 | spin_unlock(&pcc_lock); |
246 | 243 | ||
247 | return 0; | 244 | return 0; |
@@ -293,7 +290,7 @@ static int pcc_get_offset(int cpu) | |||
293 | memset_io((pcch_virt_addr + pcc_cpu_data->input_offset), 0, BUF_SZ); | 290 | memset_io((pcch_virt_addr + pcc_cpu_data->input_offset), 0, BUF_SZ); |
294 | memset_io((pcch_virt_addr + pcc_cpu_data->output_offset), 0, BUF_SZ); | 291 | memset_io((pcch_virt_addr + pcc_cpu_data->output_offset), 0, BUF_SZ); |
295 | 292 | ||
296 | dprintk("pcc_get_offset: for CPU %d: pcc_cpu_data " | 293 | pr_debug("pcc_get_offset: for CPU %d: pcc_cpu_data " |
297 | "input_offset: 0x%x, pcc_cpu_data output_offset: 0x%x\n", | 294 | "input_offset: 0x%x, pcc_cpu_data output_offset: 0x%x\n", |
298 | cpu, pcc_cpu_data->input_offset, pcc_cpu_data->output_offset); | 295 | cpu, pcc_cpu_data->input_offset, pcc_cpu_data->output_offset); |
299 | out_free: | 296 | out_free: |
@@ -410,7 +407,7 @@ static int __init pcc_cpufreq_probe(void) | |||
410 | if (ACPI_SUCCESS(status)) { | 407 | if (ACPI_SUCCESS(status)) { |
411 | ret = pcc_cpufreq_do_osc(&osc_handle); | 408 | ret = pcc_cpufreq_do_osc(&osc_handle); |
412 | if (ret) | 409 | if (ret) |
413 | dprintk("probe: _OSC evaluation did not succeed\n"); | 410 | pr_debug("probe: _OSC evaluation did not succeed\n"); |
414 | /* Firmware's use of _OSC is optional */ | 411 | /* Firmware's use of _OSC is optional */ |
415 | ret = 0; | 412 | ret = 0; |
416 | } | 413 | } |
@@ -433,7 +430,7 @@ static int __init pcc_cpufreq_probe(void) | |||
433 | 430 | ||
434 | mem_resource = (struct pcc_memory_resource *)member->buffer.pointer; | 431 | mem_resource = (struct pcc_memory_resource *)member->buffer.pointer; |
435 | 432 | ||
436 | dprintk("probe: mem_resource descriptor: 0x%x," | 433 | pr_debug("probe: mem_resource descriptor: 0x%x," |
437 | " length: %d, space_id: %d, resource_usage: %d," | 434 | " length: %d, space_id: %d, resource_usage: %d," |
438 | " type_specific: %d, granularity: 0x%llx," | 435 | " type_specific: %d, granularity: 0x%llx," |
439 | " minimum: 0x%llx, maximum: 0x%llx," | 436 | " minimum: 0x%llx, maximum: 0x%llx," |
@@ -453,13 +450,13 @@ static int __init pcc_cpufreq_probe(void) | |||
453 | pcch_virt_addr = ioremap_nocache(mem_resource->minimum, | 450 | pcch_virt_addr = ioremap_nocache(mem_resource->minimum, |
454 | mem_resource->address_length); | 451 | mem_resource->address_length); |
455 | if (pcch_virt_addr == NULL) { | 452 | if (pcch_virt_addr == NULL) { |
456 | dprintk("probe: could not map shared mem region\n"); | 453 | pr_debug("probe: could not map shared mem region\n"); |
457 | goto out_free; | 454 | goto out_free; |
458 | } | 455 | } |
459 | pcch_hdr = pcch_virt_addr; | 456 | pcch_hdr = pcch_virt_addr; |
460 | 457 | ||
461 | dprintk("probe: PCCH header (virtual) addr: 0x%p\n", pcch_hdr); | 458 | pr_debug("probe: PCCH header (virtual) addr: 0x%p\n", pcch_hdr); |
462 | dprintk("probe: PCCH header is at physical address: 0x%llx," | 459 | pr_debug("probe: PCCH header is at physical address: 0x%llx," |
463 | " signature: 0x%x, length: %d bytes, major: %d, minor: %d," | 460 | " signature: 0x%x, length: %d bytes, major: %d, minor: %d," |
464 | " supported features: 0x%x, command field: 0x%x," | 461 | " supported features: 0x%x, command field: 0x%x," |
465 | " status field: 0x%x, nominal latency: %d us\n", | 462 | " status field: 0x%x, nominal latency: %d us\n", |
@@ -469,7 +466,7 @@ static int __init pcc_cpufreq_probe(void) | |||
469 | ioread16(&pcch_hdr->command), ioread16(&pcch_hdr->status), | 466 | ioread16(&pcch_hdr->command), ioread16(&pcch_hdr->status), |
470 | ioread32(&pcch_hdr->latency)); | 467 | ioread32(&pcch_hdr->latency)); |
471 | 468 | ||
472 | dprintk("probe: min time between commands: %d us," | 469 | pr_debug("probe: min time between commands: %d us," |
473 | " max time between commands: %d us," | 470 | " max time between commands: %d us," |
474 | " nominal CPU frequency: %d MHz," | 471 | " nominal CPU frequency: %d MHz," |
475 | " minimum CPU frequency: %d MHz," | 472 | " minimum CPU frequency: %d MHz," |
@@ -494,7 +491,7 @@ static int __init pcc_cpufreq_probe(void) | |||
494 | doorbell.access_width = 64; | 491 | doorbell.access_width = 64; |
495 | doorbell.address = reg_resource->address; | 492 | doorbell.address = reg_resource->address; |
496 | 493 | ||
497 | dprintk("probe: doorbell: space_id is %d, bit_width is %d, " | 494 | pr_debug("probe: doorbell: space_id is %d, bit_width is %d, " |
498 | "bit_offset is %d, access_width is %d, address is 0x%llx\n", | 495 | "bit_offset is %d, access_width is %d, address is 0x%llx\n", |
499 | doorbell.space_id, doorbell.bit_width, doorbell.bit_offset, | 496 | doorbell.space_id, doorbell.bit_width, doorbell.bit_offset, |
500 | doorbell.access_width, reg_resource->address); | 497 | doorbell.access_width, reg_resource->address); |
@@ -515,7 +512,7 @@ static int __init pcc_cpufreq_probe(void) | |||
515 | 512 | ||
516 | doorbell_write = member->integer.value; | 513 | doorbell_write = member->integer.value; |
517 | 514 | ||
518 | dprintk("probe: doorbell_preserve: 0x%llx," | 515 | pr_debug("probe: doorbell_preserve: 0x%llx," |
519 | " doorbell_write: 0x%llx\n", | 516 | " doorbell_write: 0x%llx\n", |
520 | doorbell_preserve, doorbell_write); | 517 | doorbell_preserve, doorbell_write); |
521 | 518 | ||
@@ -550,7 +547,7 @@ static int pcc_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
550 | 547 | ||
551 | result = pcc_get_offset(cpu); | 548 | result = pcc_get_offset(cpu); |
552 | if (result) { | 549 | if (result) { |
553 | dprintk("init: PCCP evaluation failed\n"); | 550 | pr_debug("init: PCCP evaluation failed\n"); |
554 | goto out; | 551 | goto out; |
555 | } | 552 | } |
556 | 553 | ||
@@ -561,12 +558,12 @@ static int pcc_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
561 | policy->cur = pcc_get_freq(cpu); | 558 | policy->cur = pcc_get_freq(cpu); |
562 | 559 | ||
563 | if (!policy->cur) { | 560 | if (!policy->cur) { |
564 | dprintk("init: Unable to get current CPU frequency\n"); | 561 | pr_debug("init: Unable to get current CPU frequency\n"); |
565 | result = -EINVAL; | 562 | result = -EINVAL; |
566 | goto out; | 563 | goto out; |
567 | } | 564 | } |
568 | 565 | ||
569 | dprintk("init: policy->max is %d, policy->min is %d\n", | 566 | pr_debug("init: policy->max is %d, policy->min is %d\n", |
570 | policy->max, policy->min); | 567 | policy->max, policy->min); |
571 | out: | 568 | out: |
572 | return result; | 569 | return result; |
@@ -597,7 +594,7 @@ static int __init pcc_cpufreq_init(void) | |||
597 | 594 | ||
598 | ret = pcc_cpufreq_probe(); | 595 | ret = pcc_cpufreq_probe(); |
599 | if (ret) { | 596 | if (ret) { |
600 | dprintk("pcc_cpufreq_init: PCCH evaluation failed\n"); | 597 | pr_debug("pcc_cpufreq_init: PCCH evaluation failed\n"); |
601 | return ret; | 598 | return ret; |
602 | } | 599 | } |
603 | 600 | ||
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k6.c b/drivers/cpufreq/powernow-k6.c index b3379d6a5c57..b3379d6a5c57 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k6.c +++ b/drivers/cpufreq/powernow-k6.c | |||
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k7.c b/drivers/cpufreq/powernow-k7.c index 4a45fd6e41ba..d71d9f372359 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k7.c +++ b/drivers/cpufreq/powernow-k7.c | |||
@@ -68,7 +68,6 @@ union powernow_acpi_control_t { | |||
68 | }; | 68 | }; |
69 | #endif | 69 | #endif |
70 | 70 | ||
71 | #ifdef CONFIG_CPU_FREQ_DEBUG | ||
72 | /* divide by 1000 to get VCore voltage in V. */ | 71 | /* divide by 1000 to get VCore voltage in V. */ |
73 | static const int mobile_vid_table[32] = { | 72 | static const int mobile_vid_table[32] = { |
74 | 2000, 1950, 1900, 1850, 1800, 1750, 1700, 1650, | 73 | 2000, 1950, 1900, 1850, 1800, 1750, 1700, 1650, |
@@ -76,7 +75,6 @@ static const int mobile_vid_table[32] = { | |||
76 | 1275, 1250, 1225, 1200, 1175, 1150, 1125, 1100, | 75 | 1275, 1250, 1225, 1200, 1175, 1150, 1125, 1100, |
77 | 1075, 1050, 1025, 1000, 975, 950, 925, 0, | 76 | 1075, 1050, 1025, 1000, 975, 950, 925, 0, |
78 | }; | 77 | }; |
79 | #endif | ||
80 | 78 | ||
81 | /* divide by 10 to get FID. */ | 79 | /* divide by 10 to get FID. */ |
82 | static const int fid_codes[32] = { | 80 | static const int fid_codes[32] = { |
@@ -103,9 +101,6 @@ static unsigned int fsb; | |||
103 | static unsigned int latency; | 101 | static unsigned int latency; |
104 | static char have_a0; | 102 | static char have_a0; |
105 | 103 | ||
106 | #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ | ||
107 | "powernow-k7", msg) | ||
108 | |||
109 | static int check_fsb(unsigned int fsbspeed) | 104 | static int check_fsb(unsigned int fsbspeed) |
110 | { | 105 | { |
111 | int delta; | 106 | int delta; |
@@ -209,7 +204,7 @@ static int get_ranges(unsigned char *pst) | |||
209 | vid = *pst++; | 204 | vid = *pst++; |
210 | powernow_table[j].index |= (vid << 8); /* upper 8 bits */ | 205 | powernow_table[j].index |= (vid << 8); /* upper 8 bits */ |
211 | 206 | ||
212 | dprintk(" FID: 0x%x (%d.%dx [%dMHz]) " | 207 | pr_debug(" FID: 0x%x (%d.%dx [%dMHz]) " |
213 | "VID: 0x%x (%d.%03dV)\n", fid, fid_codes[fid] / 10, | 208 | "VID: 0x%x (%d.%03dV)\n", fid, fid_codes[fid] / 10, |
214 | fid_codes[fid] % 10, speed/1000, vid, | 209 | fid_codes[fid] % 10, speed/1000, vid, |
215 | mobile_vid_table[vid]/1000, | 210 | mobile_vid_table[vid]/1000, |
@@ -367,7 +362,7 @@ static int powernow_acpi_init(void) | |||
367 | unsigned int speed, speed_mhz; | 362 | unsigned int speed, speed_mhz; |
368 | 363 | ||
369 | pc.val = (unsigned long) state->control; | 364 | pc.val = (unsigned long) state->control; |
370 | dprintk("acpi: P%d: %d MHz %d mW %d uS control %08x SGTC %d\n", | 365 | pr_debug("acpi: P%d: %d MHz %d mW %d uS control %08x SGTC %d\n", |
371 | i, | 366 | i, |
372 | (u32) state->core_frequency, | 367 | (u32) state->core_frequency, |
373 | (u32) state->power, | 368 | (u32) state->power, |
@@ -401,7 +396,7 @@ static int powernow_acpi_init(void) | |||
401 | invalidate_entry(i); | 396 | invalidate_entry(i); |
402 | } | 397 | } |
403 | 398 | ||
404 | dprintk(" FID: 0x%x (%d.%dx [%dMHz]) " | 399 | pr_debug(" FID: 0x%x (%d.%dx [%dMHz]) " |
405 | "VID: 0x%x (%d.%03dV)\n", fid, fid_codes[fid] / 10, | 400 | "VID: 0x%x (%d.%03dV)\n", fid, fid_codes[fid] / 10, |
406 | fid_codes[fid] % 10, speed_mhz, vid, | 401 | fid_codes[fid] % 10, speed_mhz, vid, |
407 | mobile_vid_table[vid]/1000, | 402 | mobile_vid_table[vid]/1000, |
@@ -409,7 +404,7 @@ static int powernow_acpi_init(void) | |||
409 | 404 | ||
410 | if (state->core_frequency != speed_mhz) { | 405 | if (state->core_frequency != speed_mhz) { |
411 | state->core_frequency = speed_mhz; | 406 | state->core_frequency = speed_mhz; |
412 | dprintk(" Corrected ACPI frequency to %d\n", | 407 | pr_debug(" Corrected ACPI frequency to %d\n", |
413 | speed_mhz); | 408 | speed_mhz); |
414 | } | 409 | } |
415 | 410 | ||
@@ -453,8 +448,8 @@ static int powernow_acpi_init(void) | |||
453 | 448 | ||
454 | static void print_pst_entry(struct pst_s *pst, unsigned int j) | 449 | static void print_pst_entry(struct pst_s *pst, unsigned int j) |
455 | { | 450 | { |
456 | dprintk("PST:%d (@%p)\n", j, pst); | 451 | pr_debug("PST:%d (@%p)\n", j, pst); |
457 | dprintk(" cpuid: 0x%x fsb: %d maxFID: 0x%x startvid: 0x%x\n", | 452 | pr_debug(" cpuid: 0x%x fsb: %d maxFID: 0x%x startvid: 0x%x\n", |
458 | pst->cpuid, pst->fsbspeed, pst->maxfid, pst->startvid); | 453 | pst->cpuid, pst->fsbspeed, pst->maxfid, pst->startvid); |
459 | } | 454 | } |
460 | 455 | ||
@@ -474,20 +469,20 @@ static int powernow_decode_bios(int maxfid, int startvid) | |||
474 | p = phys_to_virt(i); | 469 | p = phys_to_virt(i); |
475 | 470 | ||
476 | if (memcmp(p, "AMDK7PNOW!", 10) == 0) { | 471 | if (memcmp(p, "AMDK7PNOW!", 10) == 0) { |
477 | dprintk("Found PSB header at %p\n", p); | 472 | pr_debug("Found PSB header at %p\n", p); |
478 | psb = (struct psb_s *) p; | 473 | psb = (struct psb_s *) p; |
479 | dprintk("Table version: 0x%x\n", psb->tableversion); | 474 | pr_debug("Table version: 0x%x\n", psb->tableversion); |
480 | if (psb->tableversion != 0x12) { | 475 | if (psb->tableversion != 0x12) { |
481 | printk(KERN_INFO PFX "Sorry, only v1.2 tables" | 476 | printk(KERN_INFO PFX "Sorry, only v1.2 tables" |
482 | " supported right now\n"); | 477 | " supported right now\n"); |
483 | return -ENODEV; | 478 | return -ENODEV; |
484 | } | 479 | } |
485 | 480 | ||
486 | dprintk("Flags: 0x%x\n", psb->flags); | 481 | pr_debug("Flags: 0x%x\n", psb->flags); |
487 | if ((psb->flags & 1) == 0) | 482 | if ((psb->flags & 1) == 0) |
488 | dprintk("Mobile voltage regulator\n"); | 483 | pr_debug("Mobile voltage regulator\n"); |
489 | else | 484 | else |
490 | dprintk("Desktop voltage regulator\n"); | 485 | pr_debug("Desktop voltage regulator\n"); |
491 | 486 | ||
492 | latency = psb->settlingtime; | 487 | latency = psb->settlingtime; |
493 | if (latency < 100) { | 488 | if (latency < 100) { |
@@ -497,9 +492,9 @@ static int powernow_decode_bios(int maxfid, int startvid) | |||
497 | "Correcting.\n", latency); | 492 | "Correcting.\n", latency); |
498 | latency = 100; | 493 | latency = 100; |
499 | } | 494 | } |
500 | dprintk("Settling Time: %d microseconds.\n", | 495 | pr_debug("Settling Time: %d microseconds.\n", |
501 | psb->settlingtime); | 496 | psb->settlingtime); |
502 | dprintk("Has %d PST tables. (Only dumping ones " | 497 | pr_debug("Has %d PST tables. (Only dumping ones " |
503 | "relevant to this CPU).\n", | 498 | "relevant to this CPU).\n", |
504 | psb->numpst); | 499 | psb->numpst); |
505 | 500 | ||
@@ -650,7 +645,7 @@ static int __cpuinit powernow_cpu_init(struct cpufreq_policy *policy) | |||
650 | printk(KERN_WARNING PFX "can not determine bus frequency\n"); | 645 | printk(KERN_WARNING PFX "can not determine bus frequency\n"); |
651 | return -EINVAL; | 646 | return -EINVAL; |
652 | } | 647 | } |
653 | dprintk("FSB: %3dMHz\n", fsb/1000); | 648 | pr_debug("FSB: %3dMHz\n", fsb/1000); |
654 | 649 | ||
655 | if (dmi_check_system(powernow_dmi_table) || acpi_force) { | 650 | if (dmi_check_system(powernow_dmi_table) || acpi_force) { |
656 | printk(KERN_INFO PFX "PSB/PST known to be broken. " | 651 | printk(KERN_INFO PFX "PSB/PST known to be broken. " |
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k7.h b/drivers/cpufreq/powernow-k7.h index 35fb4eaf6e1c..35fb4eaf6e1c 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k7.h +++ b/drivers/cpufreq/powernow-k7.h | |||
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c index 2368e38327b3..83479b6fb9a1 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/drivers/cpufreq/powernow-k8.c | |||
@@ -139,7 +139,7 @@ static int query_current_values_with_pending_wait(struct powernow_k8_data *data) | |||
139 | } | 139 | } |
140 | do { | 140 | do { |
141 | if (i++ > 10000) { | 141 | if (i++ > 10000) { |
142 | dprintk("detected change pending stuck\n"); | 142 | pr_debug("detected change pending stuck\n"); |
143 | return 1; | 143 | return 1; |
144 | } | 144 | } |
145 | rdmsr(MSR_FIDVID_STATUS, lo, hi); | 145 | rdmsr(MSR_FIDVID_STATUS, lo, hi); |
@@ -176,7 +176,7 @@ static void fidvid_msr_init(void) | |||
176 | fid = lo & MSR_S_LO_CURRENT_FID; | 176 | fid = lo & MSR_S_LO_CURRENT_FID; |
177 | lo = fid | (vid << MSR_C_LO_VID_SHIFT); | 177 | lo = fid | (vid << MSR_C_LO_VID_SHIFT); |
178 | hi = MSR_C_HI_STP_GNT_BENIGN; | 178 | hi = MSR_C_HI_STP_GNT_BENIGN; |
179 | dprintk("cpu%d, init lo 0x%x, hi 0x%x\n", smp_processor_id(), lo, hi); | 179 | pr_debug("cpu%d, init lo 0x%x, hi 0x%x\n", smp_processor_id(), lo, hi); |
180 | wrmsr(MSR_FIDVID_CTL, lo, hi); | 180 | wrmsr(MSR_FIDVID_CTL, lo, hi); |
181 | } | 181 | } |
182 | 182 | ||
@@ -196,7 +196,7 @@ static int write_new_fid(struct powernow_k8_data *data, u32 fid) | |||
196 | lo |= (data->currvid << MSR_C_LO_VID_SHIFT); | 196 | lo |= (data->currvid << MSR_C_LO_VID_SHIFT); |
197 | lo |= MSR_C_LO_INIT_FID_VID; | 197 | lo |= MSR_C_LO_INIT_FID_VID; |
198 | 198 | ||
199 | dprintk("writing fid 0x%x, lo 0x%x, hi 0x%x\n", | 199 | pr_debug("writing fid 0x%x, lo 0x%x, hi 0x%x\n", |
200 | fid, lo, data->plllock * PLL_LOCK_CONVERSION); | 200 | fid, lo, data->plllock * PLL_LOCK_CONVERSION); |
201 | 201 | ||
202 | do { | 202 | do { |
@@ -244,7 +244,7 @@ static int write_new_vid(struct powernow_k8_data *data, u32 vid) | |||
244 | lo |= (vid << MSR_C_LO_VID_SHIFT); | 244 | lo |= (vid << MSR_C_LO_VID_SHIFT); |
245 | lo |= MSR_C_LO_INIT_FID_VID; | 245 | lo |= MSR_C_LO_INIT_FID_VID; |
246 | 246 | ||
247 | dprintk("writing vid 0x%x, lo 0x%x, hi 0x%x\n", | 247 | pr_debug("writing vid 0x%x, lo 0x%x, hi 0x%x\n", |
248 | vid, lo, STOP_GRANT_5NS); | 248 | vid, lo, STOP_GRANT_5NS); |
249 | 249 | ||
250 | do { | 250 | do { |
@@ -325,7 +325,7 @@ static int transition_fid_vid(struct powernow_k8_data *data, | |||
325 | return 1; | 325 | return 1; |
326 | } | 326 | } |
327 | 327 | ||
328 | dprintk("transitioned (cpu%d): new fid 0x%x, vid 0x%x\n", | 328 | pr_debug("transitioned (cpu%d): new fid 0x%x, vid 0x%x\n", |
329 | smp_processor_id(), data->currfid, data->currvid); | 329 | smp_processor_id(), data->currfid, data->currvid); |
330 | 330 | ||
331 | return 0; | 331 | return 0; |
@@ -339,7 +339,7 @@ static int core_voltage_pre_transition(struct powernow_k8_data *data, | |||
339 | u32 savefid = data->currfid; | 339 | u32 savefid = data->currfid; |
340 | u32 maxvid, lo, rvomult = 1; | 340 | u32 maxvid, lo, rvomult = 1; |
341 | 341 | ||
342 | dprintk("ph1 (cpu%d): start, currfid 0x%x, currvid 0x%x, " | 342 | pr_debug("ph1 (cpu%d): start, currfid 0x%x, currvid 0x%x, " |
343 | "reqvid 0x%x, rvo 0x%x\n", | 343 | "reqvid 0x%x, rvo 0x%x\n", |
344 | smp_processor_id(), | 344 | smp_processor_id(), |
345 | data->currfid, data->currvid, reqvid, data->rvo); | 345 | data->currfid, data->currvid, reqvid, data->rvo); |
@@ -349,12 +349,12 @@ static int core_voltage_pre_transition(struct powernow_k8_data *data, | |||
349 | rvosteps *= rvomult; | 349 | rvosteps *= rvomult; |
350 | rdmsr(MSR_FIDVID_STATUS, lo, maxvid); | 350 | rdmsr(MSR_FIDVID_STATUS, lo, maxvid); |
351 | maxvid = 0x1f & (maxvid >> 16); | 351 | maxvid = 0x1f & (maxvid >> 16); |
352 | dprintk("ph1 maxvid=0x%x\n", maxvid); | 352 | pr_debug("ph1 maxvid=0x%x\n", maxvid); |
353 | if (reqvid < maxvid) /* lower numbers are higher voltages */ | 353 | if (reqvid < maxvid) /* lower numbers are higher voltages */ |
354 | reqvid = maxvid; | 354 | reqvid = maxvid; |
355 | 355 | ||
356 | while (data->currvid > reqvid) { | 356 | while (data->currvid > reqvid) { |
357 | dprintk("ph1: curr 0x%x, req vid 0x%x\n", | 357 | pr_debug("ph1: curr 0x%x, req vid 0x%x\n", |
358 | data->currvid, reqvid); | 358 | data->currvid, reqvid); |
359 | if (decrease_vid_code_by_step(data, reqvid, data->vidmvs)) | 359 | if (decrease_vid_code_by_step(data, reqvid, data->vidmvs)) |
360 | return 1; | 360 | return 1; |
@@ -365,7 +365,7 @@ static int core_voltage_pre_transition(struct powernow_k8_data *data, | |||
365 | if (data->currvid == maxvid) { | 365 | if (data->currvid == maxvid) { |
366 | rvosteps = 0; | 366 | rvosteps = 0; |
367 | } else { | 367 | } else { |
368 | dprintk("ph1: changing vid for rvo, req 0x%x\n", | 368 | pr_debug("ph1: changing vid for rvo, req 0x%x\n", |
369 | data->currvid - 1); | 369 | data->currvid - 1); |
370 | if (decrease_vid_code_by_step(data, data->currvid-1, 1)) | 370 | if (decrease_vid_code_by_step(data, data->currvid-1, 1)) |
371 | return 1; | 371 | return 1; |
@@ -382,7 +382,7 @@ static int core_voltage_pre_transition(struct powernow_k8_data *data, | |||
382 | return 1; | 382 | return 1; |
383 | } | 383 | } |
384 | 384 | ||
385 | dprintk("ph1 complete, currfid 0x%x, currvid 0x%x\n", | 385 | pr_debug("ph1 complete, currfid 0x%x, currvid 0x%x\n", |
386 | data->currfid, data->currvid); | 386 | data->currfid, data->currvid); |
387 | 387 | ||
388 | return 0; | 388 | return 0; |
@@ -400,7 +400,7 @@ static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid) | |||
400 | return 0; | 400 | return 0; |
401 | } | 401 | } |
402 | 402 | ||
403 | dprintk("ph2 (cpu%d): starting, currfid 0x%x, currvid 0x%x, " | 403 | pr_debug("ph2 (cpu%d): starting, currfid 0x%x, currvid 0x%x, " |
404 | "reqfid 0x%x\n", | 404 | "reqfid 0x%x\n", |
405 | smp_processor_id(), | 405 | smp_processor_id(), |
406 | data->currfid, data->currvid, reqfid); | 406 | data->currfid, data->currvid, reqfid); |
@@ -457,7 +457,7 @@ static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid) | |||
457 | return 1; | 457 | return 1; |
458 | } | 458 | } |
459 | 459 | ||
460 | dprintk("ph2 complete, currfid 0x%x, currvid 0x%x\n", | 460 | pr_debug("ph2 complete, currfid 0x%x, currvid 0x%x\n", |
461 | data->currfid, data->currvid); | 461 | data->currfid, data->currvid); |
462 | 462 | ||
463 | return 0; | 463 | return 0; |
@@ -470,7 +470,7 @@ static int core_voltage_post_transition(struct powernow_k8_data *data, | |||
470 | u32 savefid = data->currfid; | 470 | u32 savefid = data->currfid; |
471 | u32 savereqvid = reqvid; | 471 | u32 savereqvid = reqvid; |
472 | 472 | ||
473 | dprintk("ph3 (cpu%d): starting, currfid 0x%x, currvid 0x%x\n", | 473 | pr_debug("ph3 (cpu%d): starting, currfid 0x%x, currvid 0x%x\n", |
474 | smp_processor_id(), | 474 | smp_processor_id(), |
475 | data->currfid, data->currvid); | 475 | data->currfid, data->currvid); |
476 | 476 | ||
@@ -498,17 +498,17 @@ static int core_voltage_post_transition(struct powernow_k8_data *data, | |||
498 | return 1; | 498 | return 1; |
499 | 499 | ||
500 | if (savereqvid != data->currvid) { | 500 | if (savereqvid != data->currvid) { |
501 | dprintk("ph3 failed, currvid 0x%x\n", data->currvid); | 501 | pr_debug("ph3 failed, currvid 0x%x\n", data->currvid); |
502 | return 1; | 502 | return 1; |
503 | } | 503 | } |
504 | 504 | ||
505 | if (savefid != data->currfid) { | 505 | if (savefid != data->currfid) { |
506 | dprintk("ph3 failed, currfid changed 0x%x\n", | 506 | pr_debug("ph3 failed, currfid changed 0x%x\n", |
507 | data->currfid); | 507 | data->currfid); |
508 | return 1; | 508 | return 1; |
509 | } | 509 | } |
510 | 510 | ||
511 | dprintk("ph3 complete, currfid 0x%x, currvid 0x%x\n", | 511 | pr_debug("ph3 complete, currfid 0x%x, currvid 0x%x\n", |
512 | data->currfid, data->currvid); | 512 | data->currfid, data->currvid); |
513 | 513 | ||
514 | return 0; | 514 | return 0; |
@@ -707,7 +707,7 @@ static int fill_powernow_table(struct powernow_k8_data *data, | |||
707 | return -EIO; | 707 | return -EIO; |
708 | } | 708 | } |
709 | 709 | ||
710 | dprintk("cfid 0x%x, cvid 0x%x\n", data->currfid, data->currvid); | 710 | pr_debug("cfid 0x%x, cvid 0x%x\n", data->currfid, data->currvid); |
711 | data->powernow_table = powernow_table; | 711 | data->powernow_table = powernow_table; |
712 | if (cpumask_first(cpu_core_mask(data->cpu)) == data->cpu) | 712 | if (cpumask_first(cpu_core_mask(data->cpu)) == data->cpu) |
713 | print_basics(data); | 713 | print_basics(data); |
@@ -717,7 +717,7 @@ static int fill_powernow_table(struct powernow_k8_data *data, | |||
717 | (pst[j].vid == data->currvid)) | 717 | (pst[j].vid == data->currvid)) |
718 | return 0; | 718 | return 0; |
719 | 719 | ||
720 | dprintk("currfid/vid do not match PST, ignoring\n"); | 720 | pr_debug("currfid/vid do not match PST, ignoring\n"); |
721 | return 0; | 721 | return 0; |
722 | } | 722 | } |
723 | 723 | ||
@@ -739,36 +739,36 @@ static int find_psb_table(struct powernow_k8_data *data) | |||
739 | if (memcmp(psb, PSB_ID_STRING, PSB_ID_STRING_LEN) != 0) | 739 | if (memcmp(psb, PSB_ID_STRING, PSB_ID_STRING_LEN) != 0) |
740 | continue; | 740 | continue; |
741 | 741 | ||
742 | dprintk("found PSB header at 0x%p\n", psb); | 742 | pr_debug("found PSB header at 0x%p\n", psb); |
743 | 743 | ||
744 | dprintk("table vers: 0x%x\n", psb->tableversion); | 744 | pr_debug("table vers: 0x%x\n", psb->tableversion); |
745 | if (psb->tableversion != PSB_VERSION_1_4) { | 745 | if (psb->tableversion != PSB_VERSION_1_4) { |
746 | printk(KERN_ERR FW_BUG PFX "PSB table is not v1.4\n"); | 746 | printk(KERN_ERR FW_BUG PFX "PSB table is not v1.4\n"); |
747 | return -ENODEV; | 747 | return -ENODEV; |
748 | } | 748 | } |
749 | 749 | ||
750 | dprintk("flags: 0x%x\n", psb->flags1); | 750 | pr_debug("flags: 0x%x\n", psb->flags1); |
751 | if (psb->flags1) { | 751 | if (psb->flags1) { |
752 | printk(KERN_ERR FW_BUG PFX "unknown flags\n"); | 752 | printk(KERN_ERR FW_BUG PFX "unknown flags\n"); |
753 | return -ENODEV; | 753 | return -ENODEV; |
754 | } | 754 | } |
755 | 755 | ||
756 | data->vstable = psb->vstable; | 756 | data->vstable = psb->vstable; |
757 | dprintk("voltage stabilization time: %d(*20us)\n", | 757 | pr_debug("voltage stabilization time: %d(*20us)\n", |
758 | data->vstable); | 758 | data->vstable); |
759 | 759 | ||
760 | dprintk("flags2: 0x%x\n", psb->flags2); | 760 | pr_debug("flags2: 0x%x\n", psb->flags2); |
761 | data->rvo = psb->flags2 & 3; | 761 | data->rvo = psb->flags2 & 3; |
762 | data->irt = ((psb->flags2) >> 2) & 3; | 762 | data->irt = ((psb->flags2) >> 2) & 3; |
763 | mvs = ((psb->flags2) >> 4) & 3; | 763 | mvs = ((psb->flags2) >> 4) & 3; |
764 | data->vidmvs = 1 << mvs; | 764 | data->vidmvs = 1 << mvs; |
765 | data->batps = ((psb->flags2) >> 6) & 3; | 765 | data->batps = ((psb->flags2) >> 6) & 3; |
766 | 766 | ||
767 | dprintk("ramp voltage offset: %d\n", data->rvo); | 767 | pr_debug("ramp voltage offset: %d\n", data->rvo); |
768 | dprintk("isochronous relief time: %d\n", data->irt); | 768 | pr_debug("isochronous relief time: %d\n", data->irt); |
769 | dprintk("maximum voltage step: %d - 0x%x\n", mvs, data->vidmvs); | 769 | pr_debug("maximum voltage step: %d - 0x%x\n", mvs, data->vidmvs); |
770 | 770 | ||
771 | dprintk("numpst: 0x%x\n", psb->num_tables); | 771 | pr_debug("numpst: 0x%x\n", psb->num_tables); |
772 | cpst = psb->num_tables; | 772 | cpst = psb->num_tables; |
773 | if ((psb->cpuid == 0x00000fc0) || | 773 | if ((psb->cpuid == 0x00000fc0) || |
774 | (psb->cpuid == 0x00000fe0)) { | 774 | (psb->cpuid == 0x00000fe0)) { |
@@ -783,13 +783,13 @@ static int find_psb_table(struct powernow_k8_data *data) | |||
783 | } | 783 | } |
784 | 784 | ||
785 | data->plllock = psb->plllocktime; | 785 | data->plllock = psb->plllocktime; |
786 | dprintk("plllocktime: 0x%x (units 1us)\n", psb->plllocktime); | 786 | pr_debug("plllocktime: 0x%x (units 1us)\n", psb->plllocktime); |
787 | dprintk("maxfid: 0x%x\n", psb->maxfid); | 787 | pr_debug("maxfid: 0x%x\n", psb->maxfid); |
788 | dprintk("maxvid: 0x%x\n", psb->maxvid); | 788 | pr_debug("maxvid: 0x%x\n", psb->maxvid); |
789 | maxvid = psb->maxvid; | 789 | maxvid = psb->maxvid; |
790 | 790 | ||
791 | data->numps = psb->numps; | 791 | data->numps = psb->numps; |
792 | dprintk("numpstates: 0x%x\n", data->numps); | 792 | pr_debug("numpstates: 0x%x\n", data->numps); |
793 | return fill_powernow_table(data, | 793 | return fill_powernow_table(data, |
794 | (struct pst_s *)(psb+1), maxvid); | 794 | (struct pst_s *)(psb+1), maxvid); |
795 | } | 795 | } |
@@ -834,13 +834,13 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) | |||
834 | u64 control, status; | 834 | u64 control, status; |
835 | 835 | ||
836 | if (acpi_processor_register_performance(&data->acpi_data, data->cpu)) { | 836 | if (acpi_processor_register_performance(&data->acpi_data, data->cpu)) { |
837 | dprintk("register performance failed: bad ACPI data\n"); | 837 | pr_debug("register performance failed: bad ACPI data\n"); |
838 | return -EIO; | 838 | return -EIO; |
839 | } | 839 | } |
840 | 840 | ||
841 | /* verify the data contained in the ACPI structures */ | 841 | /* verify the data contained in the ACPI structures */ |
842 | if (data->acpi_data.state_count <= 1) { | 842 | if (data->acpi_data.state_count <= 1) { |
843 | dprintk("No ACPI P-States\n"); | 843 | pr_debug("No ACPI P-States\n"); |
844 | goto err_out; | 844 | goto err_out; |
845 | } | 845 | } |
846 | 846 | ||
@@ -849,7 +849,7 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) | |||
849 | 849 | ||
850 | if ((control != ACPI_ADR_SPACE_FIXED_HARDWARE) || | 850 | if ((control != ACPI_ADR_SPACE_FIXED_HARDWARE) || |
851 | (status != ACPI_ADR_SPACE_FIXED_HARDWARE)) { | 851 | (status != ACPI_ADR_SPACE_FIXED_HARDWARE)) { |
852 | dprintk("Invalid control/status registers (%x - %x)\n", | 852 | pr_debug("Invalid control/status registers (%llx - %llx)\n", |
853 | control, status); | 853 | control, status); |
854 | goto err_out; | 854 | goto err_out; |
855 | } | 855 | } |
@@ -858,7 +858,7 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) | |||
858 | powernow_table = kmalloc((sizeof(struct cpufreq_frequency_table) | 858 | powernow_table = kmalloc((sizeof(struct cpufreq_frequency_table) |
859 | * (data->acpi_data.state_count + 1)), GFP_KERNEL); | 859 | * (data->acpi_data.state_count + 1)), GFP_KERNEL); |
860 | if (!powernow_table) { | 860 | if (!powernow_table) { |
861 | dprintk("powernow_table memory alloc failure\n"); | 861 | pr_debug("powernow_table memory alloc failure\n"); |
862 | goto err_out; | 862 | goto err_out; |
863 | } | 863 | } |
864 | 864 | ||
@@ -928,7 +928,7 @@ static int fill_powernow_table_pstate(struct powernow_k8_data *data, | |||
928 | } | 928 | } |
929 | rdmsr(MSR_PSTATE_DEF_BASE + index, lo, hi); | 929 | rdmsr(MSR_PSTATE_DEF_BASE + index, lo, hi); |
930 | if (!(hi & HW_PSTATE_VALID_MASK)) { | 930 | if (!(hi & HW_PSTATE_VALID_MASK)) { |
931 | dprintk("invalid pstate %d, ignoring\n", index); | 931 | pr_debug("invalid pstate %d, ignoring\n", index); |
932 | invalidate_entry(powernow_table, i); | 932 | invalidate_entry(powernow_table, i); |
933 | continue; | 933 | continue; |
934 | } | 934 | } |
@@ -968,7 +968,7 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data, | |||
968 | vid = (control >> VID_SHIFT) & VID_MASK; | 968 | vid = (control >> VID_SHIFT) & VID_MASK; |
969 | } | 969 | } |
970 | 970 | ||
971 | dprintk(" %d : fid 0x%x, vid 0x%x\n", i, fid, vid); | 971 | pr_debug(" %d : fid 0x%x, vid 0x%x\n", i, fid, vid); |
972 | 972 | ||
973 | index = fid | (vid<<8); | 973 | index = fid | (vid<<8); |
974 | powernow_table[i].index = index; | 974 | powernow_table[i].index = index; |
@@ -978,7 +978,7 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data, | |||
978 | 978 | ||
979 | /* verify frequency is OK */ | 979 | /* verify frequency is OK */ |
980 | if ((freq > (MAX_FREQ * 1000)) || (freq < (MIN_FREQ * 1000))) { | 980 | if ((freq > (MAX_FREQ * 1000)) || (freq < (MIN_FREQ * 1000))) { |
981 | dprintk("invalid freq %u kHz, ignoring\n", freq); | 981 | pr_debug("invalid freq %u kHz, ignoring\n", freq); |
982 | invalidate_entry(powernow_table, i); | 982 | invalidate_entry(powernow_table, i); |
983 | continue; | 983 | continue; |
984 | } | 984 | } |
@@ -986,7 +986,7 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data, | |||
986 | /* verify voltage is OK - | 986 | /* verify voltage is OK - |
987 | * BIOSs are using "off" to indicate invalid */ | 987 | * BIOSs are using "off" to indicate invalid */ |
988 | if (vid == VID_OFF) { | 988 | if (vid == VID_OFF) { |
989 | dprintk("invalid vid %u, ignoring\n", vid); | 989 | pr_debug("invalid vid %u, ignoring\n", vid); |
990 | invalidate_entry(powernow_table, i); | 990 | invalidate_entry(powernow_table, i); |
991 | continue; | 991 | continue; |
992 | } | 992 | } |
@@ -1047,7 +1047,7 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data, | |||
1047 | int res, i; | 1047 | int res, i; |
1048 | struct cpufreq_freqs freqs; | 1048 | struct cpufreq_freqs freqs; |
1049 | 1049 | ||
1050 | dprintk("cpu %d transition to index %u\n", smp_processor_id(), index); | 1050 | pr_debug("cpu %d transition to index %u\n", smp_processor_id(), index); |
1051 | 1051 | ||
1052 | /* fid/vid correctness check for k8 */ | 1052 | /* fid/vid correctness check for k8 */ |
1053 | /* fid are the lower 8 bits of the index we stored into | 1053 | /* fid are the lower 8 bits of the index we stored into |
@@ -1057,18 +1057,18 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data, | |||
1057 | fid = data->powernow_table[index].index & 0xFF; | 1057 | fid = data->powernow_table[index].index & 0xFF; |
1058 | vid = (data->powernow_table[index].index & 0xFF00) >> 8; | 1058 | vid = (data->powernow_table[index].index & 0xFF00) >> 8; |
1059 | 1059 | ||
1060 | dprintk("table matched fid 0x%x, giving vid 0x%x\n", fid, vid); | 1060 | pr_debug("table matched fid 0x%x, giving vid 0x%x\n", fid, vid); |
1061 | 1061 | ||
1062 | if (query_current_values_with_pending_wait(data)) | 1062 | if (query_current_values_with_pending_wait(data)) |
1063 | return 1; | 1063 | return 1; |
1064 | 1064 | ||
1065 | if ((data->currvid == vid) && (data->currfid == fid)) { | 1065 | if ((data->currvid == vid) && (data->currfid == fid)) { |
1066 | dprintk("target matches current values (fid 0x%x, vid 0x%x)\n", | 1066 | pr_debug("target matches current values (fid 0x%x, vid 0x%x)\n", |
1067 | fid, vid); | 1067 | fid, vid); |
1068 | return 0; | 1068 | return 0; |
1069 | } | 1069 | } |
1070 | 1070 | ||
1071 | dprintk("cpu %d, changing to fid 0x%x, vid 0x%x\n", | 1071 | pr_debug("cpu %d, changing to fid 0x%x, vid 0x%x\n", |
1072 | smp_processor_id(), fid, vid); | 1072 | smp_processor_id(), fid, vid); |
1073 | freqs.old = find_khz_freq_from_fid(data->currfid); | 1073 | freqs.old = find_khz_freq_from_fid(data->currfid); |
1074 | freqs.new = find_khz_freq_from_fid(fid); | 1074 | freqs.new = find_khz_freq_from_fid(fid); |
@@ -1096,7 +1096,7 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, | |||
1096 | int res, i; | 1096 | int res, i; |
1097 | struct cpufreq_freqs freqs; | 1097 | struct cpufreq_freqs freqs; |
1098 | 1098 | ||
1099 | dprintk("cpu %d transition to index %u\n", smp_processor_id(), index); | 1099 | pr_debug("cpu %d transition to index %u\n", smp_processor_id(), index); |
1100 | 1100 | ||
1101 | /* get MSR index for hardware pstate transition */ | 1101 | /* get MSR index for hardware pstate transition */ |
1102 | pstate = index & HW_PSTATE_MASK; | 1102 | pstate = index & HW_PSTATE_MASK; |
@@ -1156,14 +1156,14 @@ static int powernowk8_target(struct cpufreq_policy *pol, | |||
1156 | goto err_out; | 1156 | goto err_out; |
1157 | } | 1157 | } |
1158 | 1158 | ||
1159 | dprintk("targ: cpu %d, %d kHz, min %d, max %d, relation %d\n", | 1159 | pr_debug("targ: cpu %d, %d kHz, min %d, max %d, relation %d\n", |
1160 | pol->cpu, targfreq, pol->min, pol->max, relation); | 1160 | pol->cpu, targfreq, pol->min, pol->max, relation); |
1161 | 1161 | ||
1162 | if (query_current_values_with_pending_wait(data)) | 1162 | if (query_current_values_with_pending_wait(data)) |
1163 | goto err_out; | 1163 | goto err_out; |
1164 | 1164 | ||
1165 | if (cpu_family != CPU_HW_PSTATE) { | 1165 | if (cpu_family != CPU_HW_PSTATE) { |
1166 | dprintk("targ: curr fid 0x%x, vid 0x%x\n", | 1166 | pr_debug("targ: curr fid 0x%x, vid 0x%x\n", |
1167 | data->currfid, data->currvid); | 1167 | data->currfid, data->currvid); |
1168 | 1168 | ||
1169 | if ((checkvid != data->currvid) || | 1169 | if ((checkvid != data->currvid) || |
@@ -1319,7 +1319,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) | |||
1319 | data->currpstate); | 1319 | data->currpstate); |
1320 | else | 1320 | else |
1321 | pol->cur = find_khz_freq_from_fid(data->currfid); | 1321 | pol->cur = find_khz_freq_from_fid(data->currfid); |
1322 | dprintk("policy current frequency %d kHz\n", pol->cur); | 1322 | pr_debug("policy current frequency %d kHz\n", pol->cur); |
1323 | 1323 | ||
1324 | /* min/max the cpu is capable of */ | 1324 | /* min/max the cpu is capable of */ |
1325 | if (cpufreq_frequency_table_cpuinfo(pol, data->powernow_table)) { | 1325 | if (cpufreq_frequency_table_cpuinfo(pol, data->powernow_table)) { |
@@ -1337,10 +1337,10 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) | |||
1337 | cpufreq_frequency_table_get_attr(data->powernow_table, pol->cpu); | 1337 | cpufreq_frequency_table_get_attr(data->powernow_table, pol->cpu); |
1338 | 1338 | ||
1339 | if (cpu_family == CPU_HW_PSTATE) | 1339 | if (cpu_family == CPU_HW_PSTATE) |
1340 | dprintk("cpu_init done, current pstate 0x%x\n", | 1340 | pr_debug("cpu_init done, current pstate 0x%x\n", |
1341 | data->currpstate); | 1341 | data->currpstate); |
1342 | else | 1342 | else |
1343 | dprintk("cpu_init done, current fid 0x%x, vid 0x%x\n", | 1343 | pr_debug("cpu_init done, current fid 0x%x, vid 0x%x\n", |
1344 | data->currfid, data->currvid); | 1344 | data->currfid, data->currvid); |
1345 | 1345 | ||
1346 | per_cpu(powernow_data, pol->cpu) = data; | 1346 | per_cpu(powernow_data, pol->cpu) = data; |
@@ -1586,7 +1586,7 @@ static int __cpuinit powernowk8_init(void) | |||
1586 | /* driver entry point for term */ | 1586 | /* driver entry point for term */ |
1587 | static void __exit powernowk8_exit(void) | 1587 | static void __exit powernowk8_exit(void) |
1588 | { | 1588 | { |
1589 | dprintk("exit\n"); | 1589 | pr_debug("exit\n"); |
1590 | 1590 | ||
1591 | if (boot_cpu_has(X86_FEATURE_CPB)) { | 1591 | if (boot_cpu_has(X86_FEATURE_CPB)) { |
1592 | msrs_free(msrs); | 1592 | msrs_free(msrs); |
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h b/drivers/cpufreq/powernow-k8.h index df3529b1c02d..3744d26cdc2b 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h +++ b/drivers/cpufreq/powernow-k8.h | |||
@@ -211,8 +211,6 @@ struct pst_s { | |||
211 | u8 vid; | 211 | u8 vid; |
212 | }; | 212 | }; |
213 | 213 | ||
214 | #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "powernow-k8", msg) | ||
215 | |||
216 | static int core_voltage_pre_transition(struct powernow_k8_data *data, | 214 | static int core_voltage_pre_transition(struct powernow_k8_data *data, |
217 | u32 reqvid, u32 regfid); | 215 | u32 reqvid, u32 regfid); |
218 | static int core_voltage_post_transition(struct powernow_k8_data *data, u32 reqvid); | 216 | static int core_voltage_post_transition(struct powernow_k8_data *data, u32 reqvid); |
diff --git a/arch/x86/kernel/cpu/cpufreq/sc520_freq.c b/drivers/cpufreq/sc520_freq.c index 435a996a613a..1e205e6b1727 100644 --- a/arch/x86/kernel/cpu/cpufreq/sc520_freq.c +++ b/drivers/cpufreq/sc520_freq.c | |||
@@ -29,8 +29,6 @@ | |||
29 | 29 | ||
30 | static __u8 __iomem *cpuctl; | 30 | static __u8 __iomem *cpuctl; |
31 | 31 | ||
32 | #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ | ||
33 | "sc520_freq", msg) | ||
34 | #define PFX "sc520_freq: " | 32 | #define PFX "sc520_freq: " |
35 | 33 | ||
36 | static struct cpufreq_frequency_table sc520_freq_table[] = { | 34 | static struct cpufreq_frequency_table sc520_freq_table[] = { |
@@ -66,7 +64,7 @@ static void sc520_freq_set_cpu_state(unsigned int state) | |||
66 | 64 | ||
67 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | 65 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); |
68 | 66 | ||
69 | dprintk("attempting to set frequency to %i kHz\n", | 67 | pr_debug("attempting to set frequency to %i kHz\n", |
70 | sc520_freq_table[state].frequency); | 68 | sc520_freq_table[state].frequency); |
71 | 69 | ||
72 | local_irq_disable(); | 70 | local_irq_disable(); |
@@ -161,7 +159,7 @@ static int __init sc520_freq_init(void) | |||
161 | /* Test if we have the right hardware */ | 159 | /* Test if we have the right hardware */ |
162 | if (c->x86_vendor != X86_VENDOR_AMD || | 160 | if (c->x86_vendor != X86_VENDOR_AMD || |
163 | c->x86 != 4 || c->x86_model != 9) { | 161 | c->x86 != 4 || c->x86_model != 9) { |
164 | dprintk("no Elan SC520 processor found!\n"); | 162 | pr_debug("no Elan SC520 processor found!\n"); |
165 | return -ENODEV; | 163 | return -ENODEV; |
166 | } | 164 | } |
167 | cpuctl = ioremap((unsigned long)(MMCR_BASE + OFFS_CPUCTL), 1); | 165 | cpuctl = ioremap((unsigned long)(MMCR_BASE + OFFS_CPUCTL), 1); |
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c index 9b1ff37de46a..6ea3455def21 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c +++ b/drivers/cpufreq/speedstep-centrino.c | |||
@@ -29,9 +29,6 @@ | |||
29 | #define PFX "speedstep-centrino: " | 29 | #define PFX "speedstep-centrino: " |
30 | #define MAINTAINER "cpufreq@vger.kernel.org" | 30 | #define MAINTAINER "cpufreq@vger.kernel.org" |
31 | 31 | ||
32 | #define dprintk(msg...) \ | ||
33 | cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "speedstep-centrino", msg) | ||
34 | |||
35 | #define INTEL_MSR_RANGE (0xffff) | 32 | #define INTEL_MSR_RANGE (0xffff) |
36 | 33 | ||
37 | struct cpu_id | 34 | struct cpu_id |
@@ -244,7 +241,7 @@ static int centrino_cpu_init_table(struct cpufreq_policy *policy) | |||
244 | 241 | ||
245 | if (model->cpu_id == NULL) { | 242 | if (model->cpu_id == NULL) { |
246 | /* No match at all */ | 243 | /* No match at all */ |
247 | dprintk("no support for CPU model \"%s\": " | 244 | pr_debug("no support for CPU model \"%s\": " |
248 | "send /proc/cpuinfo to " MAINTAINER "\n", | 245 | "send /proc/cpuinfo to " MAINTAINER "\n", |
249 | cpu->x86_model_id); | 246 | cpu->x86_model_id); |
250 | return -ENOENT; | 247 | return -ENOENT; |
@@ -252,15 +249,15 @@ static int centrino_cpu_init_table(struct cpufreq_policy *policy) | |||
252 | 249 | ||
253 | if (model->op_points == NULL) { | 250 | if (model->op_points == NULL) { |
254 | /* Matched a non-match */ | 251 | /* Matched a non-match */ |
255 | dprintk("no table support for CPU model \"%s\"\n", | 252 | pr_debug("no table support for CPU model \"%s\"\n", |
256 | cpu->x86_model_id); | 253 | cpu->x86_model_id); |
257 | dprintk("try using the acpi-cpufreq driver\n"); | 254 | pr_debug("try using the acpi-cpufreq driver\n"); |
258 | return -ENOENT; | 255 | return -ENOENT; |
259 | } | 256 | } |
260 | 257 | ||
261 | per_cpu(centrino_model, policy->cpu) = model; | 258 | per_cpu(centrino_model, policy->cpu) = model; |
262 | 259 | ||
263 | dprintk("found \"%s\": max frequency: %dkHz\n", | 260 | pr_debug("found \"%s\": max frequency: %dkHz\n", |
264 | model->model_name, model->max_freq); | 261 | model->model_name, model->max_freq); |
265 | 262 | ||
266 | return 0; | 263 | return 0; |
@@ -369,7 +366,7 @@ static int centrino_cpu_init(struct cpufreq_policy *policy) | |||
369 | per_cpu(centrino_cpu, policy->cpu) = &cpu_ids[i]; | 366 | per_cpu(centrino_cpu, policy->cpu) = &cpu_ids[i]; |
370 | 367 | ||
371 | if (!per_cpu(centrino_cpu, policy->cpu)) { | 368 | if (!per_cpu(centrino_cpu, policy->cpu)) { |
372 | dprintk("found unsupported CPU with " | 369 | pr_debug("found unsupported CPU with " |
373 | "Enhanced SpeedStep: send /proc/cpuinfo to " | 370 | "Enhanced SpeedStep: send /proc/cpuinfo to " |
374 | MAINTAINER "\n"); | 371 | MAINTAINER "\n"); |
375 | return -ENODEV; | 372 | return -ENODEV; |
@@ -385,7 +382,7 @@ static int centrino_cpu_init(struct cpufreq_policy *policy) | |||
385 | 382 | ||
386 | if (!(l & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) { | 383 | if (!(l & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) { |
387 | l |= MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP; | 384 | l |= MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP; |
388 | dprintk("trying to enable Enhanced SpeedStep (%x)\n", l); | 385 | pr_debug("trying to enable Enhanced SpeedStep (%x)\n", l); |
389 | wrmsr(MSR_IA32_MISC_ENABLE, l, h); | 386 | wrmsr(MSR_IA32_MISC_ENABLE, l, h); |
390 | 387 | ||
391 | /* check to see if it stuck */ | 388 | /* check to see if it stuck */ |
@@ -402,7 +399,7 @@ static int centrino_cpu_init(struct cpufreq_policy *policy) | |||
402 | /* 10uS transition latency */ | 399 | /* 10uS transition latency */ |
403 | policy->cur = freq; | 400 | policy->cur = freq; |
404 | 401 | ||
405 | dprintk("centrino_cpu_init: cur=%dkHz\n", policy->cur); | 402 | pr_debug("centrino_cpu_init: cur=%dkHz\n", policy->cur); |
406 | 403 | ||
407 | ret = cpufreq_frequency_table_cpuinfo(policy, | 404 | ret = cpufreq_frequency_table_cpuinfo(policy, |
408 | per_cpu(centrino_model, policy->cpu)->op_points); | 405 | per_cpu(centrino_model, policy->cpu)->op_points); |
@@ -498,7 +495,7 @@ static int centrino_target (struct cpufreq_policy *policy, | |||
498 | good_cpu = j; | 495 | good_cpu = j; |
499 | 496 | ||
500 | if (good_cpu >= nr_cpu_ids) { | 497 | if (good_cpu >= nr_cpu_ids) { |
501 | dprintk("couldn't limit to CPUs in this domain\n"); | 498 | pr_debug("couldn't limit to CPUs in this domain\n"); |
502 | retval = -EAGAIN; | 499 | retval = -EAGAIN; |
503 | if (first_cpu) { | 500 | if (first_cpu) { |
504 | /* We haven't started the transition yet. */ | 501 | /* We haven't started the transition yet. */ |
@@ -512,7 +509,7 @@ static int centrino_target (struct cpufreq_policy *policy, | |||
512 | if (first_cpu) { | 509 | if (first_cpu) { |
513 | rdmsr_on_cpu(good_cpu, MSR_IA32_PERF_CTL, &oldmsr, &h); | 510 | rdmsr_on_cpu(good_cpu, MSR_IA32_PERF_CTL, &oldmsr, &h); |
514 | if (msr == (oldmsr & 0xffff)) { | 511 | if (msr == (oldmsr & 0xffff)) { |
515 | dprintk("no change needed - msr was and needs " | 512 | pr_debug("no change needed - msr was and needs " |
516 | "to be %x\n", oldmsr); | 513 | "to be %x\n", oldmsr); |
517 | retval = 0; | 514 | retval = 0; |
518 | goto out; | 515 | goto out; |
@@ -521,7 +518,7 @@ static int centrino_target (struct cpufreq_policy *policy, | |||
521 | freqs.old = extract_clock(oldmsr, cpu, 0); | 518 | freqs.old = extract_clock(oldmsr, cpu, 0); |
522 | freqs.new = extract_clock(msr, cpu, 0); | 519 | freqs.new = extract_clock(msr, cpu, 0); |
523 | 520 | ||
524 | dprintk("target=%dkHz old=%d new=%d msr=%04x\n", | 521 | pr_debug("target=%dkHz old=%d new=%d msr=%04x\n", |
525 | target_freq, freqs.old, freqs.new, msr); | 522 | target_freq, freqs.old, freqs.new, msr); |
526 | 523 | ||
527 | for_each_cpu(k, policy->cpus) { | 524 | for_each_cpu(k, policy->cpus) { |
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c b/drivers/cpufreq/speedstep-ich.c index 561758e95180..a748ce782fee 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c +++ b/drivers/cpufreq/speedstep-ich.c | |||
@@ -53,10 +53,6 @@ static struct cpufreq_frequency_table speedstep_freqs[] = { | |||
53 | }; | 53 | }; |
54 | 54 | ||
55 | 55 | ||
56 | #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ | ||
57 | "speedstep-ich", msg) | ||
58 | |||
59 | |||
60 | /** | 56 | /** |
61 | * speedstep_find_register - read the PMBASE address | 57 | * speedstep_find_register - read the PMBASE address |
62 | * | 58 | * |
@@ -80,7 +76,7 @@ static int speedstep_find_register(void) | |||
80 | return -ENODEV; | 76 | return -ENODEV; |
81 | } | 77 | } |
82 | 78 | ||
83 | dprintk("pmbase is 0x%x\n", pmbase); | 79 | pr_debug("pmbase is 0x%x\n", pmbase); |
84 | return 0; | 80 | return 0; |
85 | } | 81 | } |
86 | 82 | ||
@@ -106,13 +102,13 @@ static void speedstep_set_state(unsigned int state) | |||
106 | /* read state */ | 102 | /* read state */ |
107 | value = inb(pmbase + 0x50); | 103 | value = inb(pmbase + 0x50); |
108 | 104 | ||
109 | dprintk("read at pmbase 0x%x + 0x50 returned 0x%x\n", pmbase, value); | 105 | pr_debug("read at pmbase 0x%x + 0x50 returned 0x%x\n", pmbase, value); |
110 | 106 | ||
111 | /* write new state */ | 107 | /* write new state */ |
112 | value &= 0xFE; | 108 | value &= 0xFE; |
113 | value |= state; | 109 | value |= state; |
114 | 110 | ||
115 | dprintk("writing 0x%x to pmbase 0x%x + 0x50\n", value, pmbase); | 111 | pr_debug("writing 0x%x to pmbase 0x%x + 0x50\n", value, pmbase); |
116 | 112 | ||
117 | /* Disable bus master arbitration */ | 113 | /* Disable bus master arbitration */ |
118 | pm2_blk = inb(pmbase + 0x20); | 114 | pm2_blk = inb(pmbase + 0x20); |
@@ -132,10 +128,10 @@ static void speedstep_set_state(unsigned int state) | |||
132 | /* Enable IRQs */ | 128 | /* Enable IRQs */ |
133 | local_irq_restore(flags); | 129 | local_irq_restore(flags); |
134 | 130 | ||
135 | dprintk("read at pmbase 0x%x + 0x50 returned 0x%x\n", pmbase, value); | 131 | pr_debug("read at pmbase 0x%x + 0x50 returned 0x%x\n", pmbase, value); |
136 | 132 | ||
137 | if (state == (value & 0x1)) | 133 | if (state == (value & 0x1)) |
138 | dprintk("change to %u MHz succeeded\n", | 134 | pr_debug("change to %u MHz succeeded\n", |
139 | speedstep_get_frequency(speedstep_processor) / 1000); | 135 | speedstep_get_frequency(speedstep_processor) / 1000); |
140 | else | 136 | else |
141 | printk(KERN_ERR "cpufreq: change failed - I/O error\n"); | 137 | printk(KERN_ERR "cpufreq: change failed - I/O error\n"); |
@@ -165,7 +161,7 @@ static int speedstep_activate(void) | |||
165 | pci_read_config_word(speedstep_chipset_dev, 0x00A0, &value); | 161 | pci_read_config_word(speedstep_chipset_dev, 0x00A0, &value); |
166 | if (!(value & 0x08)) { | 162 | if (!(value & 0x08)) { |
167 | value |= 0x08; | 163 | value |= 0x08; |
168 | dprintk("activating SpeedStep (TM) registers\n"); | 164 | pr_debug("activating SpeedStep (TM) registers\n"); |
169 | pci_write_config_word(speedstep_chipset_dev, 0x00A0, value); | 165 | pci_write_config_word(speedstep_chipset_dev, 0x00A0, value); |
170 | } | 166 | } |
171 | 167 | ||
@@ -218,7 +214,7 @@ static unsigned int speedstep_detect_chipset(void) | |||
218 | return 2; /* 2-M */ | 214 | return 2; /* 2-M */ |
219 | 215 | ||
220 | if (hostbridge->revision < 5) { | 216 | if (hostbridge->revision < 5) { |
221 | dprintk("hostbridge does not support speedstep\n"); | 217 | pr_debug("hostbridge does not support speedstep\n"); |
222 | speedstep_chipset_dev = NULL; | 218 | speedstep_chipset_dev = NULL; |
223 | pci_dev_put(hostbridge); | 219 | pci_dev_put(hostbridge); |
224 | return 0; | 220 | return 0; |
@@ -246,7 +242,7 @@ static unsigned int speedstep_get(unsigned int cpu) | |||
246 | if (smp_call_function_single(cpu, get_freq_data, &speed, 1) != 0) | 242 | if (smp_call_function_single(cpu, get_freq_data, &speed, 1) != 0) |
247 | BUG(); | 243 | BUG(); |
248 | 244 | ||
249 | dprintk("detected %u kHz as current frequency\n", speed); | 245 | pr_debug("detected %u kHz as current frequency\n", speed); |
250 | return speed; | 246 | return speed; |
251 | } | 247 | } |
252 | 248 | ||
@@ -276,7 +272,7 @@ static int speedstep_target(struct cpufreq_policy *policy, | |||
276 | freqs.new = speedstep_freqs[newstate].frequency; | 272 | freqs.new = speedstep_freqs[newstate].frequency; |
277 | freqs.cpu = policy->cpu; | 273 | freqs.cpu = policy->cpu; |
278 | 274 | ||
279 | dprintk("transiting from %u to %u kHz\n", freqs.old, freqs.new); | 275 | pr_debug("transiting from %u to %u kHz\n", freqs.old, freqs.new); |
280 | 276 | ||
281 | /* no transition necessary */ | 277 | /* no transition necessary */ |
282 | if (freqs.old == freqs.new) | 278 | if (freqs.old == freqs.new) |
@@ -351,7 +347,7 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy) | |||
351 | if (!speed) | 347 | if (!speed) |
352 | return -EIO; | 348 | return -EIO; |
353 | 349 | ||
354 | dprintk("currently at %s speed setting - %i MHz\n", | 350 | pr_debug("currently at %s speed setting - %i MHz\n", |
355 | (speed == speedstep_freqs[SPEEDSTEP_LOW].frequency) | 351 | (speed == speedstep_freqs[SPEEDSTEP_LOW].frequency) |
356 | ? "low" : "high", | 352 | ? "low" : "high", |
357 | (speed / 1000)); | 353 | (speed / 1000)); |
@@ -405,14 +401,14 @@ static int __init speedstep_init(void) | |||
405 | /* detect processor */ | 401 | /* detect processor */ |
406 | speedstep_processor = speedstep_detect_processor(); | 402 | speedstep_processor = speedstep_detect_processor(); |
407 | if (!speedstep_processor) { | 403 | if (!speedstep_processor) { |
408 | dprintk("Intel(R) SpeedStep(TM) capable processor " | 404 | pr_debug("Intel(R) SpeedStep(TM) capable processor " |
409 | "not found\n"); | 405 | "not found\n"); |
410 | return -ENODEV; | 406 | return -ENODEV; |
411 | } | 407 | } |
412 | 408 | ||
413 | /* detect chipset */ | 409 | /* detect chipset */ |
414 | if (!speedstep_detect_chipset()) { | 410 | if (!speedstep_detect_chipset()) { |
415 | dprintk("Intel(R) SpeedStep(TM) for this chipset not " | 411 | pr_debug("Intel(R) SpeedStep(TM) for this chipset not " |
416 | "(yet) available.\n"); | 412 | "(yet) available.\n"); |
417 | return -ENODEV; | 413 | return -ENODEV; |
418 | } | 414 | } |
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c b/drivers/cpufreq/speedstep-lib.c index a94ec6be69fa..8af2d2fd9d51 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c +++ b/drivers/cpufreq/speedstep-lib.c | |||
@@ -18,9 +18,6 @@ | |||
18 | #include <asm/tsc.h> | 18 | #include <asm/tsc.h> |
19 | #include "speedstep-lib.h" | 19 | #include "speedstep-lib.h" |
20 | 20 | ||
21 | #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ | ||
22 | "speedstep-lib", msg) | ||
23 | |||
24 | #define PFX "speedstep-lib: " | 21 | #define PFX "speedstep-lib: " |
25 | 22 | ||
26 | #ifdef CONFIG_X86_SPEEDSTEP_RELAXED_CAP_CHECK | 23 | #ifdef CONFIG_X86_SPEEDSTEP_RELAXED_CAP_CHECK |
@@ -75,7 +72,7 @@ static unsigned int pentium3_get_frequency(enum speedstep_processor processor) | |||
75 | 72 | ||
76 | /* read MSR 0x2a - we only need the low 32 bits */ | 73 | /* read MSR 0x2a - we only need the low 32 bits */ |
77 | rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_tmp); | 74 | rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_tmp); |
78 | dprintk("P3 - MSR_IA32_EBL_CR_POWERON: 0x%x 0x%x\n", msr_lo, msr_tmp); | 75 | pr_debug("P3 - MSR_IA32_EBL_CR_POWERON: 0x%x 0x%x\n", msr_lo, msr_tmp); |
79 | msr_tmp = msr_lo; | 76 | msr_tmp = msr_lo; |
80 | 77 | ||
81 | /* decode the FSB */ | 78 | /* decode the FSB */ |
@@ -89,7 +86,7 @@ static unsigned int pentium3_get_frequency(enum speedstep_processor processor) | |||
89 | 86 | ||
90 | /* decode the multiplier */ | 87 | /* decode the multiplier */ |
91 | if (processor == SPEEDSTEP_CPU_PIII_C_EARLY) { | 88 | if (processor == SPEEDSTEP_CPU_PIII_C_EARLY) { |
92 | dprintk("workaround for early PIIIs\n"); | 89 | pr_debug("workaround for early PIIIs\n"); |
93 | msr_lo &= 0x03c00000; | 90 | msr_lo &= 0x03c00000; |
94 | } else | 91 | } else |
95 | msr_lo &= 0x0bc00000; | 92 | msr_lo &= 0x0bc00000; |
@@ -100,7 +97,7 @@ static unsigned int pentium3_get_frequency(enum speedstep_processor processor) | |||
100 | j++; | 97 | j++; |
101 | } | 98 | } |
102 | 99 | ||
103 | dprintk("speed is %u\n", | 100 | pr_debug("speed is %u\n", |
104 | (msr_decode_mult[j].ratio * msr_decode_fsb[i].value * 100)); | 101 | (msr_decode_mult[j].ratio * msr_decode_fsb[i].value * 100)); |
105 | 102 | ||
106 | return msr_decode_mult[j].ratio * msr_decode_fsb[i].value * 100; | 103 | return msr_decode_mult[j].ratio * msr_decode_fsb[i].value * 100; |
@@ -112,7 +109,7 @@ static unsigned int pentiumM_get_frequency(void) | |||
112 | u32 msr_lo, msr_tmp; | 109 | u32 msr_lo, msr_tmp; |
113 | 110 | ||
114 | rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_tmp); | 111 | rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_tmp); |
115 | dprintk("PM - MSR_IA32_EBL_CR_POWERON: 0x%x 0x%x\n", msr_lo, msr_tmp); | 112 | pr_debug("PM - MSR_IA32_EBL_CR_POWERON: 0x%x 0x%x\n", msr_lo, msr_tmp); |
116 | 113 | ||
117 | /* see table B-2 of 24547212.pdf */ | 114 | /* see table B-2 of 24547212.pdf */ |
118 | if (msr_lo & 0x00040000) { | 115 | if (msr_lo & 0x00040000) { |
@@ -122,7 +119,7 @@ static unsigned int pentiumM_get_frequency(void) | |||
122 | } | 119 | } |
123 | 120 | ||
124 | msr_tmp = (msr_lo >> 22) & 0x1f; | 121 | msr_tmp = (msr_lo >> 22) & 0x1f; |
125 | dprintk("bits 22-26 are 0x%x, speed is %u\n", | 122 | pr_debug("bits 22-26 are 0x%x, speed is %u\n", |
126 | msr_tmp, (msr_tmp * 100 * 1000)); | 123 | msr_tmp, (msr_tmp * 100 * 1000)); |
127 | 124 | ||
128 | return msr_tmp * 100 * 1000; | 125 | return msr_tmp * 100 * 1000; |
@@ -160,11 +157,11 @@ static unsigned int pentium_core_get_frequency(void) | |||
160 | } | 157 | } |
161 | 158 | ||
162 | rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_tmp); | 159 | rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_tmp); |
163 | dprintk("PCORE - MSR_IA32_EBL_CR_POWERON: 0x%x 0x%x\n", | 160 | pr_debug("PCORE - MSR_IA32_EBL_CR_POWERON: 0x%x 0x%x\n", |
164 | msr_lo, msr_tmp); | 161 | msr_lo, msr_tmp); |
165 | 162 | ||
166 | msr_tmp = (msr_lo >> 22) & 0x1f; | 163 | msr_tmp = (msr_lo >> 22) & 0x1f; |
167 | dprintk("bits 22-26 are 0x%x, speed is %u\n", | 164 | pr_debug("bits 22-26 are 0x%x, speed is %u\n", |
168 | msr_tmp, (msr_tmp * fsb)); | 165 | msr_tmp, (msr_tmp * fsb)); |
169 | 166 | ||
170 | ret = (msr_tmp * fsb); | 167 | ret = (msr_tmp * fsb); |
@@ -190,7 +187,7 @@ static unsigned int pentium4_get_frequency(void) | |||
190 | 187 | ||
191 | rdmsr(0x2c, msr_lo, msr_hi); | 188 | rdmsr(0x2c, msr_lo, msr_hi); |
192 | 189 | ||
193 | dprintk("P4 - MSR_EBC_FREQUENCY_ID: 0x%x 0x%x\n", msr_lo, msr_hi); | 190 | pr_debug("P4 - MSR_EBC_FREQUENCY_ID: 0x%x 0x%x\n", msr_lo, msr_hi); |
194 | 191 | ||
195 | /* decode the FSB: see IA-32 Intel (C) Architecture Software | 192 | /* decode the FSB: see IA-32 Intel (C) Architecture Software |
196 | * Developer's Manual, Volume 3: System Prgramming Guide, | 193 | * Developer's Manual, Volume 3: System Prgramming Guide, |
@@ -217,7 +214,7 @@ static unsigned int pentium4_get_frequency(void) | |||
217 | /* Multiplier. */ | 214 | /* Multiplier. */ |
218 | mult = msr_lo >> 24; | 215 | mult = msr_lo >> 24; |
219 | 216 | ||
220 | dprintk("P4 - FSB %u kHz; Multiplier %u; Speed %u kHz\n", | 217 | pr_debug("P4 - FSB %u kHz; Multiplier %u; Speed %u kHz\n", |
221 | fsb, mult, (fsb * mult)); | 218 | fsb, mult, (fsb * mult)); |
222 | 219 | ||
223 | ret = (fsb * mult); | 220 | ret = (fsb * mult); |
@@ -257,7 +254,7 @@ unsigned int speedstep_detect_processor(void) | |||
257 | struct cpuinfo_x86 *c = &cpu_data(0); | 254 | struct cpuinfo_x86 *c = &cpu_data(0); |
258 | u32 ebx, msr_lo, msr_hi; | 255 | u32 ebx, msr_lo, msr_hi; |
259 | 256 | ||
260 | dprintk("x86: %x, model: %x\n", c->x86, c->x86_model); | 257 | pr_debug("x86: %x, model: %x\n", c->x86, c->x86_model); |
261 | 258 | ||
262 | if ((c->x86_vendor != X86_VENDOR_INTEL) || | 259 | if ((c->x86_vendor != X86_VENDOR_INTEL) || |
263 | ((c->x86 != 6) && (c->x86 != 0xF))) | 260 | ((c->x86 != 6) && (c->x86 != 0xF))) |
@@ -272,7 +269,7 @@ unsigned int speedstep_detect_processor(void) | |||
272 | ebx = cpuid_ebx(0x00000001); | 269 | ebx = cpuid_ebx(0x00000001); |
273 | ebx &= 0x000000FF; | 270 | ebx &= 0x000000FF; |
274 | 271 | ||
275 | dprintk("ebx value is %x, x86_mask is %x\n", ebx, c->x86_mask); | 272 | pr_debug("ebx value is %x, x86_mask is %x\n", ebx, c->x86_mask); |
276 | 273 | ||
277 | switch (c->x86_mask) { | 274 | switch (c->x86_mask) { |
278 | case 4: | 275 | case 4: |
@@ -327,7 +324,7 @@ unsigned int speedstep_detect_processor(void) | |||
327 | /* cpuid_ebx(1) is 0x04 for desktop PIII, | 324 | /* cpuid_ebx(1) is 0x04 for desktop PIII, |
328 | * 0x06 for mobile PIII-M */ | 325 | * 0x06 for mobile PIII-M */ |
329 | ebx = cpuid_ebx(0x00000001); | 326 | ebx = cpuid_ebx(0x00000001); |
330 | dprintk("ebx is %x\n", ebx); | 327 | pr_debug("ebx is %x\n", ebx); |
331 | 328 | ||
332 | ebx &= 0x000000FF; | 329 | ebx &= 0x000000FF; |
333 | 330 | ||
@@ -344,7 +341,7 @@ unsigned int speedstep_detect_processor(void) | |||
344 | /* all mobile PIII Coppermines have FSB 100 MHz | 341 | /* all mobile PIII Coppermines have FSB 100 MHz |
345 | * ==> sort out a few desktop PIIIs. */ | 342 | * ==> sort out a few desktop PIIIs. */ |
346 | rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_hi); | 343 | rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_hi); |
347 | dprintk("Coppermine: MSR_IA32_EBL_CR_POWERON is 0x%x, 0x%x\n", | 344 | pr_debug("Coppermine: MSR_IA32_EBL_CR_POWERON is 0x%x, 0x%x\n", |
348 | msr_lo, msr_hi); | 345 | msr_lo, msr_hi); |
349 | msr_lo &= 0x00c0000; | 346 | msr_lo &= 0x00c0000; |
350 | if (msr_lo != 0x0080000) | 347 | if (msr_lo != 0x0080000) |
@@ -357,12 +354,12 @@ unsigned int speedstep_detect_processor(void) | |||
357 | * bit 56 or 57 is set | 354 | * bit 56 or 57 is set |
358 | */ | 355 | */ |
359 | rdmsr(MSR_IA32_PLATFORM_ID, msr_lo, msr_hi); | 356 | rdmsr(MSR_IA32_PLATFORM_ID, msr_lo, msr_hi); |
360 | dprintk("Coppermine: MSR_IA32_PLATFORM ID is 0x%x, 0x%x\n", | 357 | pr_debug("Coppermine: MSR_IA32_PLATFORM ID is 0x%x, 0x%x\n", |
361 | msr_lo, msr_hi); | 358 | msr_lo, msr_hi); |
362 | if ((msr_hi & (1<<18)) && | 359 | if ((msr_hi & (1<<18)) && |
363 | (relaxed_check ? 1 : (msr_hi & (3<<24)))) { | 360 | (relaxed_check ? 1 : (msr_hi & (3<<24)))) { |
364 | if (c->x86_mask == 0x01) { | 361 | if (c->x86_mask == 0x01) { |
365 | dprintk("early PIII version\n"); | 362 | pr_debug("early PIII version\n"); |
366 | return SPEEDSTEP_CPU_PIII_C_EARLY; | 363 | return SPEEDSTEP_CPU_PIII_C_EARLY; |
367 | } else | 364 | } else |
368 | return SPEEDSTEP_CPU_PIII_C; | 365 | return SPEEDSTEP_CPU_PIII_C; |
@@ -393,14 +390,14 @@ unsigned int speedstep_get_freqs(enum speedstep_processor processor, | |||
393 | if ((!processor) || (!low_speed) || (!high_speed) || (!set_state)) | 390 | if ((!processor) || (!low_speed) || (!high_speed) || (!set_state)) |
394 | return -EINVAL; | 391 | return -EINVAL; |
395 | 392 | ||
396 | dprintk("trying to determine both speeds\n"); | 393 | pr_debug("trying to determine both speeds\n"); |
397 | 394 | ||
398 | /* get current speed */ | 395 | /* get current speed */ |
399 | prev_speed = speedstep_get_frequency(processor); | 396 | prev_speed = speedstep_get_frequency(processor); |
400 | if (!prev_speed) | 397 | if (!prev_speed) |
401 | return -EIO; | 398 | return -EIO; |
402 | 399 | ||
403 | dprintk("previous speed is %u\n", prev_speed); | 400 | pr_debug("previous speed is %u\n", prev_speed); |
404 | 401 | ||
405 | local_irq_save(flags); | 402 | local_irq_save(flags); |
406 | 403 | ||
@@ -412,7 +409,7 @@ unsigned int speedstep_get_freqs(enum speedstep_processor processor, | |||
412 | goto out; | 409 | goto out; |
413 | } | 410 | } |
414 | 411 | ||
415 | dprintk("low speed is %u\n", *low_speed); | 412 | pr_debug("low speed is %u\n", *low_speed); |
416 | 413 | ||
417 | /* start latency measurement */ | 414 | /* start latency measurement */ |
418 | if (transition_latency) | 415 | if (transition_latency) |
@@ -431,7 +428,7 @@ unsigned int speedstep_get_freqs(enum speedstep_processor processor, | |||
431 | goto out; | 428 | goto out; |
432 | } | 429 | } |
433 | 430 | ||
434 | dprintk("high speed is %u\n", *high_speed); | 431 | pr_debug("high speed is %u\n", *high_speed); |
435 | 432 | ||
436 | if (*low_speed == *high_speed) { | 433 | if (*low_speed == *high_speed) { |
437 | ret = -ENODEV; | 434 | ret = -ENODEV; |
@@ -445,7 +442,7 @@ unsigned int speedstep_get_freqs(enum speedstep_processor processor, | |||
445 | if (transition_latency) { | 442 | if (transition_latency) { |
446 | *transition_latency = (tv2.tv_sec - tv1.tv_sec) * USEC_PER_SEC + | 443 | *transition_latency = (tv2.tv_sec - tv1.tv_sec) * USEC_PER_SEC + |
447 | tv2.tv_usec - tv1.tv_usec; | 444 | tv2.tv_usec - tv1.tv_usec; |
448 | dprintk("transition latency is %u uSec\n", *transition_latency); | 445 | pr_debug("transition latency is %u uSec\n", *transition_latency); |
449 | 446 | ||
450 | /* convert uSec to nSec and add 20% for safety reasons */ | 447 | /* convert uSec to nSec and add 20% for safety reasons */ |
451 | *transition_latency *= 1200; | 448 | *transition_latency *= 1200; |
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-lib.h b/drivers/cpufreq/speedstep-lib.h index 70d9cea1219d..70d9cea1219d 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-lib.h +++ b/drivers/cpufreq/speedstep-lib.h | |||
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-smi.c b/drivers/cpufreq/speedstep-smi.c index 91bc25b67bc1..c76ead3490bf 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-smi.c +++ b/drivers/cpufreq/speedstep-smi.c | |||
@@ -55,9 +55,6 @@ static struct cpufreq_frequency_table speedstep_freqs[] = { | |||
55 | * of DMA activity going on? */ | 55 | * of DMA activity going on? */ |
56 | #define SMI_TRIES 5 | 56 | #define SMI_TRIES 5 |
57 | 57 | ||
58 | #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ | ||
59 | "speedstep-smi", msg) | ||
60 | |||
61 | /** | 58 | /** |
62 | * speedstep_smi_ownership | 59 | * speedstep_smi_ownership |
63 | */ | 60 | */ |
@@ -70,7 +67,7 @@ static int speedstep_smi_ownership(void) | |||
70 | command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff); | 67 | command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff); |
71 | magic = virt_to_phys(magic_data); | 68 | magic = virt_to_phys(magic_data); |
72 | 69 | ||
73 | dprintk("trying to obtain ownership with command %x at port %x\n", | 70 | pr_debug("trying to obtain ownership with command %x at port %x\n", |
74 | command, smi_port); | 71 | command, smi_port); |
75 | 72 | ||
76 | __asm__ __volatile__( | 73 | __asm__ __volatile__( |
@@ -85,7 +82,7 @@ static int speedstep_smi_ownership(void) | |||
85 | : "memory" | 82 | : "memory" |
86 | ); | 83 | ); |
87 | 84 | ||
88 | dprintk("result is %x\n", result); | 85 | pr_debug("result is %x\n", result); |
89 | 86 | ||
90 | return result; | 87 | return result; |
91 | } | 88 | } |
@@ -106,13 +103,13 @@ static int speedstep_smi_get_freqs(unsigned int *low, unsigned int *high) | |||
106 | u32 function = GET_SPEEDSTEP_FREQS; | 103 | u32 function = GET_SPEEDSTEP_FREQS; |
107 | 104 | ||
108 | if (!(ist_info.event & 0xFFFF)) { | 105 | if (!(ist_info.event & 0xFFFF)) { |
109 | dprintk("bug #1422 -- can't read freqs from BIOS\n"); | 106 | pr_debug("bug #1422 -- can't read freqs from BIOS\n"); |
110 | return -ENODEV; | 107 | return -ENODEV; |
111 | } | 108 | } |
112 | 109 | ||
113 | command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff); | 110 | command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff); |
114 | 111 | ||
115 | dprintk("trying to determine frequencies with command %x at port %x\n", | 112 | pr_debug("trying to determine frequencies with command %x at port %x\n", |
116 | command, smi_port); | 113 | command, smi_port); |
117 | 114 | ||
118 | __asm__ __volatile__( | 115 | __asm__ __volatile__( |
@@ -129,7 +126,7 @@ static int speedstep_smi_get_freqs(unsigned int *low, unsigned int *high) | |||
129 | "d" (smi_port), "S" (0), "D" (0) | 126 | "d" (smi_port), "S" (0), "D" (0) |
130 | ); | 127 | ); |
131 | 128 | ||
132 | dprintk("result %x, low_freq %u, high_freq %u\n", | 129 | pr_debug("result %x, low_freq %u, high_freq %u\n", |
133 | result, low_mhz, high_mhz); | 130 | result, low_mhz, high_mhz); |
134 | 131 | ||
135 | /* abort if results are obviously incorrect... */ | 132 | /* abort if results are obviously incorrect... */ |
@@ -154,7 +151,7 @@ static int speedstep_get_state(void) | |||
154 | 151 | ||
155 | command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff); | 152 | command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff); |
156 | 153 | ||
157 | dprintk("trying to determine current setting with command %x " | 154 | pr_debug("trying to determine current setting with command %x " |
158 | "at port %x\n", command, smi_port); | 155 | "at port %x\n", command, smi_port); |
159 | 156 | ||
160 | __asm__ __volatile__( | 157 | __asm__ __volatile__( |
@@ -168,7 +165,7 @@ static int speedstep_get_state(void) | |||
168 | "d" (smi_port), "S" (0), "D" (0) | 165 | "d" (smi_port), "S" (0), "D" (0) |
169 | ); | 166 | ); |
170 | 167 | ||
171 | dprintk("state is %x, result is %x\n", state, result); | 168 | pr_debug("state is %x, result is %x\n", state, result); |
172 | 169 | ||
173 | return state & 1; | 170 | return state & 1; |
174 | } | 171 | } |
@@ -194,13 +191,13 @@ static void speedstep_set_state(unsigned int state) | |||
194 | 191 | ||
195 | command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff); | 192 | command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff); |
196 | 193 | ||
197 | dprintk("trying to set frequency to state %u " | 194 | pr_debug("trying to set frequency to state %u " |
198 | "with command %x at port %x\n", | 195 | "with command %x at port %x\n", |
199 | state, command, smi_port); | 196 | state, command, smi_port); |
200 | 197 | ||
201 | do { | 198 | do { |
202 | if (retry) { | 199 | if (retry) { |
203 | dprintk("retry %u, previous result %u, waiting...\n", | 200 | pr_debug("retry %u, previous result %u, waiting...\n", |
204 | retry, result); | 201 | retry, result); |
205 | mdelay(retry * 50); | 202 | mdelay(retry * 50); |
206 | } | 203 | } |
@@ -221,7 +218,7 @@ static void speedstep_set_state(unsigned int state) | |||
221 | local_irq_restore(flags); | 218 | local_irq_restore(flags); |
222 | 219 | ||
223 | if (new_state == state) | 220 | if (new_state == state) |
224 | dprintk("change to %u MHz succeeded after %u tries " | 221 | pr_debug("change to %u MHz succeeded after %u tries " |
225 | "with result %u\n", | 222 | "with result %u\n", |
226 | (speedstep_freqs[new_state].frequency / 1000), | 223 | (speedstep_freqs[new_state].frequency / 1000), |
227 | retry, result); | 224 | retry, result); |
@@ -292,7 +289,7 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy) | |||
292 | 289 | ||
293 | result = speedstep_smi_ownership(); | 290 | result = speedstep_smi_ownership(); |
294 | if (result) { | 291 | if (result) { |
295 | dprintk("fails in acquiring ownership of a SMI interface.\n"); | 292 | pr_debug("fails in acquiring ownership of a SMI interface.\n"); |
296 | return -EINVAL; | 293 | return -EINVAL; |
297 | } | 294 | } |
298 | 295 | ||
@@ -304,7 +301,7 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy) | |||
304 | if (result) { | 301 | if (result) { |
305 | /* fall back to speedstep_lib.c dection mechanism: | 302 | /* fall back to speedstep_lib.c dection mechanism: |
306 | * try both states out */ | 303 | * try both states out */ |
307 | dprintk("could not detect low and high frequencies " | 304 | pr_debug("could not detect low and high frequencies " |
308 | "by SMI call.\n"); | 305 | "by SMI call.\n"); |
309 | result = speedstep_get_freqs(speedstep_processor, | 306 | result = speedstep_get_freqs(speedstep_processor, |
310 | low, high, | 307 | low, high, |
@@ -312,18 +309,18 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy) | |||
312 | &speedstep_set_state); | 309 | &speedstep_set_state); |
313 | 310 | ||
314 | if (result) { | 311 | if (result) { |
315 | dprintk("could not detect two different speeds" | 312 | pr_debug("could not detect two different speeds" |
316 | " -- aborting.\n"); | 313 | " -- aborting.\n"); |
317 | return result; | 314 | return result; |
318 | } else | 315 | } else |
319 | dprintk("workaround worked.\n"); | 316 | pr_debug("workaround worked.\n"); |
320 | } | 317 | } |
321 | 318 | ||
322 | /* get current speed setting */ | 319 | /* get current speed setting */ |
323 | state = speedstep_get_state(); | 320 | state = speedstep_get_state(); |
324 | speed = speedstep_freqs[state].frequency; | 321 | speed = speedstep_freqs[state].frequency; |
325 | 322 | ||
326 | dprintk("currently at %s speed setting - %i MHz\n", | 323 | pr_debug("currently at %s speed setting - %i MHz\n", |
327 | (speed == speedstep_freqs[SPEEDSTEP_LOW].frequency) | 324 | (speed == speedstep_freqs[SPEEDSTEP_LOW].frequency) |
328 | ? "low" : "high", | 325 | ? "low" : "high", |
329 | (speed / 1000)); | 326 | (speed / 1000)); |
@@ -360,7 +357,7 @@ static int speedstep_resume(struct cpufreq_policy *policy) | |||
360 | int result = speedstep_smi_ownership(); | 357 | int result = speedstep_smi_ownership(); |
361 | 358 | ||
362 | if (result) | 359 | if (result) |
363 | dprintk("fails in re-acquiring ownership of a SMI interface.\n"); | 360 | pr_debug("fails in re-acquiring ownership of a SMI interface.\n"); |
364 | 361 | ||
365 | return result; | 362 | return result; |
366 | } | 363 | } |
@@ -403,12 +400,12 @@ static int __init speedstep_init(void) | |||
403 | } | 400 | } |
404 | 401 | ||
405 | if (!speedstep_processor) { | 402 | if (!speedstep_processor) { |
406 | dprintk("No supported Intel CPU detected.\n"); | 403 | pr_debug("No supported Intel CPU detected.\n"); |
407 | return -ENODEV; | 404 | return -ENODEV; |
408 | } | 405 | } |
409 | 406 | ||
410 | dprintk("signature:0x%.8lx, command:0x%.8lx, " | 407 | pr_debug("signature:0x%.8ulx, command:0x%.8ulx, " |
411 | "event:0x%.8lx, perf_level:0x%.8lx.\n", | 408 | "event:0x%.8ulx, perf_level:0x%.8ulx.\n", |
412 | ist_info.signature, ist_info.command, | 409 | ist_info.signature, ist_info.command, |
413 | ist_info.event, ist_info.perf_level); | 410 | ist_info.event, ist_info.perf_level); |
414 | 411 | ||
diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c index c1f0045ceb8e..af8e7b1aa290 100644 --- a/drivers/edac/ppc4xx_edac.c +++ b/drivers/edac/ppc4xx_edac.c | |||
@@ -1019,7 +1019,7 @@ ppc4xx_edac_mc_init(struct mem_ctl_info *mci, | |||
1019 | struct ppc4xx_edac_pdata *pdata = NULL; | 1019 | struct ppc4xx_edac_pdata *pdata = NULL; |
1020 | const struct device_node *np = op->dev.of_node; | 1020 | const struct device_node *np = op->dev.of_node; |
1021 | 1021 | ||
1022 | if (op->dev.of_match == NULL) | 1022 | if (of_match_device(ppc4xx_edac_match, &op->dev) == NULL) |
1023 | return -EINVAL; | 1023 | return -EINVAL; |
1024 | 1024 | ||
1025 | /* Initial driver pointers and private data */ | 1025 | /* Initial driver pointers and private data */ |
diff --git a/drivers/firmware/iscsi_ibft_find.c b/drivers/firmware/iscsi_ibft_find.c index 2192456dfd68..f032e446fc11 100644 --- a/drivers/firmware/iscsi_ibft_find.c +++ b/drivers/firmware/iscsi_ibft_find.c | |||
@@ -42,7 +42,20 @@ | |||
42 | struct acpi_table_ibft *ibft_addr; | 42 | struct acpi_table_ibft *ibft_addr; |
43 | EXPORT_SYMBOL_GPL(ibft_addr); | 43 | EXPORT_SYMBOL_GPL(ibft_addr); |
44 | 44 | ||
45 | #define IBFT_SIGN "iBFT" | 45 | static const struct { |
46 | char *sign; | ||
47 | } ibft_signs[] = { | ||
48 | #ifdef CONFIG_ACPI | ||
49 | /* | ||
50 | * One spec says "IBFT", the other says "iBFT". We have to check | ||
51 | * for both. | ||
52 | */ | ||
53 | { ACPI_SIG_IBFT }, | ||
54 | #endif | ||
55 | { "iBFT" }, | ||
56 | { "BIFT" }, /* Broadcom iSCSI Offload */ | ||
57 | }; | ||
58 | |||
46 | #define IBFT_SIGN_LEN 4 | 59 | #define IBFT_SIGN_LEN 4 |
47 | #define IBFT_START 0x80000 /* 512kB */ | 60 | #define IBFT_START 0x80000 /* 512kB */ |
48 | #define IBFT_END 0x100000 /* 1MB */ | 61 | #define IBFT_END 0x100000 /* 1MB */ |
@@ -62,6 +75,7 @@ static int __init find_ibft_in_mem(void) | |||
62 | unsigned long pos; | 75 | unsigned long pos; |
63 | unsigned int len = 0; | 76 | unsigned int len = 0; |
64 | void *virt; | 77 | void *virt; |
78 | int i; | ||
65 | 79 | ||
66 | for (pos = IBFT_START; pos < IBFT_END; pos += 16) { | 80 | for (pos = IBFT_START; pos < IBFT_END; pos += 16) { |
67 | /* The table can't be inside the VGA BIOS reserved space, | 81 | /* The table can't be inside the VGA BIOS reserved space, |
@@ -69,18 +83,23 @@ static int __init find_ibft_in_mem(void) | |||
69 | if (pos == VGA_MEM) | 83 | if (pos == VGA_MEM) |
70 | pos += VGA_SIZE; | 84 | pos += VGA_SIZE; |
71 | virt = isa_bus_to_virt(pos); | 85 | virt = isa_bus_to_virt(pos); |
72 | if (memcmp(virt, IBFT_SIGN, IBFT_SIGN_LEN) == 0) { | 86 | |
73 | unsigned long *addr = | 87 | for (i = 0; i < ARRAY_SIZE(ibft_signs); i++) { |
74 | (unsigned long *)isa_bus_to_virt(pos + 4); | 88 | if (memcmp(virt, ibft_signs[i].sign, IBFT_SIGN_LEN) == |
75 | len = *addr; | 89 | 0) { |
76 | /* if the length of the table extends past 1M, | 90 | unsigned long *addr = |
77 | * the table cannot be valid. */ | 91 | (unsigned long *)isa_bus_to_virt(pos + 4); |
78 | if (pos + len <= (IBFT_END-1)) { | 92 | len = *addr; |
79 | ibft_addr = (struct acpi_table_ibft *)virt; | 93 | /* if the length of the table extends past 1M, |
80 | break; | 94 | * the table cannot be valid. */ |
95 | if (pos + len <= (IBFT_END-1)) { | ||
96 | ibft_addr = (struct acpi_table_ibft *)virt; | ||
97 | goto done; | ||
98 | } | ||
81 | } | 99 | } |
82 | } | 100 | } |
83 | } | 101 | } |
102 | done: | ||
84 | return len; | 103 | return len; |
85 | } | 104 | } |
86 | /* | 105 | /* |
@@ -89,18 +108,12 @@ static int __init find_ibft_in_mem(void) | |||
89 | */ | 108 | */ |
90 | unsigned long __init find_ibft_region(unsigned long *sizep) | 109 | unsigned long __init find_ibft_region(unsigned long *sizep) |
91 | { | 110 | { |
92 | 111 | int i; | |
93 | ibft_addr = NULL; | 112 | ibft_addr = NULL; |
94 | 113 | ||
95 | #ifdef CONFIG_ACPI | 114 | #ifdef CONFIG_ACPI |
96 | /* | 115 | for (i = 0; i < ARRAY_SIZE(ibft_signs) && !ibft_addr; i++) |
97 | * One spec says "IBFT", the other says "iBFT". We have to check | 116 | acpi_table_parse(ibft_signs[i].sign, acpi_find_ibft); |
98 | * for both. | ||
99 | */ | ||
100 | if (!ibft_addr) | ||
101 | acpi_table_parse(ACPI_SIG_IBFT, acpi_find_ibft); | ||
102 | if (!ibft_addr) | ||
103 | acpi_table_parse(IBFT_SIGN, acpi_find_ibft); | ||
104 | #endif /* CONFIG_ACPI */ | 117 | #endif /* CONFIG_ACPI */ |
105 | 118 | ||
106 | /* iBFT 1.03 section 1.4.3.1 mandates that UEFI machines will | 119 | /* iBFT 1.03 section 1.4.3.1 mandates that UEFI machines will |
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 11d7a72c22d9..140b9525b48a 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c | |||
@@ -1516,17 +1516,33 @@ bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel) | |||
1516 | } | 1516 | } |
1517 | EXPORT_SYMBOL(drm_fb_helper_initial_config); | 1517 | EXPORT_SYMBOL(drm_fb_helper_initial_config); |
1518 | 1518 | ||
1519 | bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) | 1519 | /** |
1520 | * drm_fb_helper_hotplug_event - respond to a hotplug notification by | ||
1521 | * probing all the outputs attached to the fb. | ||
1522 | * @fb_helper: the drm_fb_helper | ||
1523 | * | ||
1524 | * LOCKING: | ||
1525 | * Called at runtime, must take mode config lock. | ||
1526 | * | ||
1527 | * Scan the connectors attached to the fb_helper and try to put together a | ||
1528 | * setup after *notification of a change in output configuration. | ||
1529 | * | ||
1530 | * RETURNS: | ||
1531 | * 0 on success and a non-zero error code otherwise. | ||
1532 | */ | ||
1533 | int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) | ||
1520 | { | 1534 | { |
1535 | struct drm_device *dev = fb_helper->dev; | ||
1521 | int count = 0; | 1536 | int count = 0; |
1522 | u32 max_width, max_height, bpp_sel; | 1537 | u32 max_width, max_height, bpp_sel; |
1523 | bool bound = false, crtcs_bound = false; | 1538 | bool bound = false, crtcs_bound = false; |
1524 | struct drm_crtc *crtc; | 1539 | struct drm_crtc *crtc; |
1525 | 1540 | ||
1526 | if (!fb_helper->fb) | 1541 | if (!fb_helper->fb) |
1527 | return false; | 1542 | return 0; |
1528 | 1543 | ||
1529 | list_for_each_entry(crtc, &fb_helper->dev->mode_config.crtc_list, head) { | 1544 | mutex_lock(&dev->mode_config.mutex); |
1545 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | ||
1530 | if (crtc->fb) | 1546 | if (crtc->fb) |
1531 | crtcs_bound = true; | 1547 | crtcs_bound = true; |
1532 | if (crtc->fb == fb_helper->fb) | 1548 | if (crtc->fb == fb_helper->fb) |
@@ -1535,7 +1551,8 @@ bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) | |||
1535 | 1551 | ||
1536 | if (!bound && crtcs_bound) { | 1552 | if (!bound && crtcs_bound) { |
1537 | fb_helper->delayed_hotplug = true; | 1553 | fb_helper->delayed_hotplug = true; |
1538 | return false; | 1554 | mutex_unlock(&dev->mode_config.mutex); |
1555 | return 0; | ||
1539 | } | 1556 | } |
1540 | DRM_DEBUG_KMS("\n"); | 1557 | DRM_DEBUG_KMS("\n"); |
1541 | 1558 | ||
@@ -1546,6 +1563,7 @@ bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) | |||
1546 | count = drm_fb_helper_probe_connector_modes(fb_helper, max_width, | 1563 | count = drm_fb_helper_probe_connector_modes(fb_helper, max_width, |
1547 | max_height); | 1564 | max_height); |
1548 | drm_setup_crtcs(fb_helper); | 1565 | drm_setup_crtcs(fb_helper); |
1566 | mutex_unlock(&dev->mode_config.mutex); | ||
1549 | 1567 | ||
1550 | return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel); | 1568 | return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel); |
1551 | } | 1569 | } |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index c34a8dd31d02..32d1b3e829c8 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -49,7 +49,7 @@ module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600); | |||
49 | unsigned int i915_powersave = 1; | 49 | unsigned int i915_powersave = 1; |
50 | module_param_named(powersave, i915_powersave, int, 0600); | 50 | module_param_named(powersave, i915_powersave, int, 0600); |
51 | 51 | ||
52 | unsigned int i915_semaphores = 1; | 52 | unsigned int i915_semaphores = 0; |
53 | module_param_named(semaphores, i915_semaphores, int, 0600); | 53 | module_param_named(semaphores, i915_semaphores, int, 0600); |
54 | 54 | ||
55 | unsigned int i915_enable_rc6 = 0; | 55 | unsigned int i915_enable_rc6 = 0; |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 373c2a005ec1..2166ee071ddb 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -5154,6 +5154,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
5154 | 5154 | ||
5155 | I915_WRITE(DSPCNTR(plane), dspcntr); | 5155 | I915_WRITE(DSPCNTR(plane), dspcntr); |
5156 | POSTING_READ(DSPCNTR(plane)); | 5156 | POSTING_READ(DSPCNTR(plane)); |
5157 | if (!HAS_PCH_SPLIT(dev)) | ||
5158 | intel_enable_plane(dev_priv, plane, pipe); | ||
5157 | 5159 | ||
5158 | ret = intel_pipe_set_base(crtc, x, y, old_fb); | 5160 | ret = intel_pipe_set_base(crtc, x, y, old_fb); |
5159 | 5161 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c index 4bce801bc588..c77111eca6ac 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c | |||
@@ -42,7 +42,8 @@ nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages, | |||
42 | 42 | ||
43 | nvbe->nr_pages = 0; | 43 | nvbe->nr_pages = 0; |
44 | while (num_pages--) { | 44 | while (num_pages--) { |
45 | if (dma_addrs[nvbe->nr_pages] != DMA_ERROR_CODE) { | 45 | /* this code path isn't called and is incorrect anyways */ |
46 | if (0) { /*dma_addrs[nvbe->nr_pages] != DMA_ERROR_CODE)*/ | ||
46 | nvbe->pages[nvbe->nr_pages] = | 47 | nvbe->pages[nvbe->nr_pages] = |
47 | dma_addrs[nvbe->nr_pages]; | 48 | dma_addrs[nvbe->nr_pages]; |
48 | nvbe->ttm_alloced[nvbe->nr_pages] = true; | 49 | nvbe->ttm_alloced[nvbe->nr_pages] = true; |
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index c20eac3379e6..9073e3bfb08c 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
@@ -1780,7 +1780,10 @@ static void evergreen_gpu_init(struct radeon_device *rdev) | |||
1780 | 1780 | ||
1781 | 1781 | ||
1782 | mc_shared_chmap = RREG32(MC_SHARED_CHMAP); | 1782 | mc_shared_chmap = RREG32(MC_SHARED_CHMAP); |
1783 | mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); | 1783 | if (rdev->flags & RADEON_IS_IGP) |
1784 | mc_arb_ramcfg = RREG32(FUS_MC_ARB_RAMCFG); | ||
1785 | else | ||
1786 | mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); | ||
1784 | 1787 | ||
1785 | switch (rdev->config.evergreen.max_tile_pipes) { | 1788 | switch (rdev->config.evergreen.max_tile_pipes) { |
1786 | case 1: | 1789 | case 1: |
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h index 94533849927e..fc40e0cc3451 100644 --- a/drivers/gpu/drm/radeon/evergreend.h +++ b/drivers/gpu/drm/radeon/evergreend.h | |||
@@ -200,6 +200,7 @@ | |||
200 | #define BURSTLENGTH_SHIFT 9 | 200 | #define BURSTLENGTH_SHIFT 9 |
201 | #define BURSTLENGTH_MASK 0x00000200 | 201 | #define BURSTLENGTH_MASK 0x00000200 |
202 | #define CHANSIZE_OVERRIDE (1 << 11) | 202 | #define CHANSIZE_OVERRIDE (1 << 11) |
203 | #define FUS_MC_ARB_RAMCFG 0x2768 | ||
203 | #define MC_VM_AGP_TOP 0x2028 | 204 | #define MC_VM_AGP_TOP 0x2028 |
204 | #define MC_VM_AGP_BOT 0x202C | 205 | #define MC_VM_AGP_BOT 0x202C |
205 | #define MC_VM_AGP_BASE 0x2030 | 206 | #define MC_VM_AGP_BASE 0x2030 |
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 7aade20f63a8..3d8a7634bbe9 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c | |||
@@ -674,7 +674,7 @@ static void cayman_gpu_init(struct radeon_device *rdev) | |||
674 | 674 | ||
675 | cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE); | 675 | cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE); |
676 | cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG); | 676 | cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG); |
677 | cgts_tcc_disable = RREG32(CGTS_TCC_DISABLE); | 677 | cgts_tcc_disable = 0xff000000; |
678 | gc_user_rb_backend_disable = RREG32(GC_USER_RB_BACKEND_DISABLE); | 678 | gc_user_rb_backend_disable = RREG32(GC_USER_RB_BACKEND_DISABLE); |
679 | gc_user_shader_pipe_config = RREG32(GC_USER_SHADER_PIPE_CONFIG); | 679 | gc_user_shader_pipe_config = RREG32(GC_USER_SHADER_PIPE_CONFIG); |
680 | cgts_user_tcc_disable = RREG32(CGTS_USER_TCC_DISABLE); | 680 | cgts_user_tcc_disable = RREG32(CGTS_USER_TCC_DISABLE); |
@@ -871,7 +871,7 @@ static void cayman_gpu_init(struct radeon_device *rdev) | |||
871 | 871 | ||
872 | smx_dc_ctl0 = RREG32(SMX_DC_CTL0); | 872 | smx_dc_ctl0 = RREG32(SMX_DC_CTL0); |
873 | smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff); | 873 | smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff); |
874 | smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.evergreen.sx_num_of_sets); | 874 | smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.cayman.sx_num_of_sets); |
875 | WREG32(SMX_DC_CTL0, smx_dc_ctl0); | 875 | WREG32(SMX_DC_CTL0, smx_dc_ctl0); |
876 | 876 | ||
877 | WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4) | CRC_SIMD_ID_WADDR_DISABLE); | 877 | WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4) | CRC_SIMD_ID_WADDR_DISABLE); |
@@ -887,20 +887,20 @@ static void cayman_gpu_init(struct radeon_device *rdev) | |||
887 | 887 | ||
888 | WREG32(TA_CNTL_AUX, DISABLE_CUBE_ANISO); | 888 | WREG32(TA_CNTL_AUX, DISABLE_CUBE_ANISO); |
889 | 889 | ||
890 | WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_size / 4) - 1) | | 890 | WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.cayman.sx_max_export_size / 4) - 1) | |
891 | POSITION_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_pos_size / 4) - 1) | | 891 | POSITION_BUFFER_SIZE((rdev->config.cayman.sx_max_export_pos_size / 4) - 1) | |
892 | SMX_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_smx_size / 4) - 1))); | 892 | SMX_BUFFER_SIZE((rdev->config.cayman.sx_max_export_smx_size / 4) - 1))); |
893 | 893 | ||
894 | WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.evergreen.sc_prim_fifo_size) | | 894 | WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.cayman.sc_prim_fifo_size) | |
895 | SC_HIZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_hiz_tile_fifo_size) | | 895 | SC_HIZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_hiz_tile_fifo_size) | |
896 | SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_earlyz_tile_fifo_size))); | 896 | SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_earlyz_tile_fifo_size))); |
897 | 897 | ||
898 | 898 | ||
899 | WREG32(VGT_NUM_INSTANCES, 1); | 899 | WREG32(VGT_NUM_INSTANCES, 1); |
900 | 900 | ||
901 | WREG32(CP_PERFMON_CNTL, 0); | 901 | WREG32(CP_PERFMON_CNTL, 0); |
902 | 902 | ||
903 | WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.evergreen.sq_num_cf_insts) | | 903 | WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.cayman.sq_num_cf_insts) | |
904 | FETCH_FIFO_HIWATER(0x4) | | 904 | FETCH_FIFO_HIWATER(0x4) | |
905 | DONE_FIFO_HIWATER(0xe0) | | 905 | DONE_FIFO_HIWATER(0xe0) | |
906 | ALU_UPDATE_FIFO_HIWATER(0x8))); | 906 | ALU_UPDATE_FIFO_HIWATER(0x8))); |
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index dd881d035f09..90dfb2b8cf03 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c | |||
@@ -1574,9 +1574,17 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct | |||
1574 | ATOM_FAKE_EDID_PATCH_RECORD *fake_edid_record; | 1574 | ATOM_FAKE_EDID_PATCH_RECORD *fake_edid_record; |
1575 | ATOM_PANEL_RESOLUTION_PATCH_RECORD *panel_res_record; | 1575 | ATOM_PANEL_RESOLUTION_PATCH_RECORD *panel_res_record; |
1576 | bool bad_record = false; | 1576 | bool bad_record = false; |
1577 | u8 *record = (u8 *)(mode_info->atom_context->bios + | 1577 | u8 *record; |
1578 | data_offset + | 1578 | |
1579 | le16_to_cpu(lvds_info->info.usModePatchTableOffset)); | 1579 | if ((frev == 1) && (crev < 2)) |
1580 | /* absolute */ | ||
1581 | record = (u8 *)(mode_info->atom_context->bios + | ||
1582 | le16_to_cpu(lvds_info->info.usModePatchTableOffset)); | ||
1583 | else | ||
1584 | /* relative */ | ||
1585 | record = (u8 *)(mode_info->atom_context->bios + | ||
1586 | data_offset + | ||
1587 | le16_to_cpu(lvds_info->info.usModePatchTableOffset)); | ||
1580 | while (*record != ATOM_RECORD_END_TYPE) { | 1588 | while (*record != ATOM_RECORD_END_TYPE) { |
1581 | switch (*record) { | 1589 | switch (*record) { |
1582 | case LCD_MODE_PATCH_RECORD_MODE_TYPE: | 1590 | case LCD_MODE_PATCH_RECORD_MODE_TYPE: |
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index 8a955bbdb608..a533f52fd163 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c | |||
@@ -181,9 +181,9 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, | |||
181 | p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); | 181 | p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); |
182 | 182 | ||
183 | for (i = 0; i < pages; i++, p++) { | 183 | for (i = 0; i < pages; i++, p++) { |
184 | /* On TTM path, we only use the DMA API if TTM_PAGE_FLAG_DMA32 | 184 | /* we reverted the patch using dma_addr in TTM for now but this |
185 | * is requested. */ | 185 | * code stops building on alpha so just comment it out for now */ |
186 | if (dma_addr[i] != DMA_ERROR_CODE) { | 186 | if (0) { /*dma_addr[i] != DMA_ERROR_CODE) */ |
187 | rdev->gart.ttm_alloced[p] = true; | 187 | rdev->gart.ttm_alloced[p] = true; |
188 | rdev->gart.pages_addr[p] = dma_addr[i]; | 188 | rdev->gart.pages_addr[p] = dma_addr[i]; |
189 | } else { | 189 | } else { |
diff --git a/drivers/gpu/drm/radeon/reg_srcs/cayman b/drivers/gpu/drm/radeon/reg_srcs/cayman index 6334f8ac1209..0aa8e85a9457 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/cayman +++ b/drivers/gpu/drm/radeon/reg_srcs/cayman | |||
@@ -33,6 +33,7 @@ cayman 0x9400 | |||
33 | 0x00008E48 SQ_EX_ALLOC_TABLE_SLOTS | 33 | 0x00008E48 SQ_EX_ALLOC_TABLE_SLOTS |
34 | 0x00009100 SPI_CONFIG_CNTL | 34 | 0x00009100 SPI_CONFIG_CNTL |
35 | 0x0000913C SPI_CONFIG_CNTL_1 | 35 | 0x0000913C SPI_CONFIG_CNTL_1 |
36 | 0x00009508 TA_CNTL_AUX | ||
36 | 0x00009830 DB_DEBUG | 37 | 0x00009830 DB_DEBUG |
37 | 0x00009834 DB_DEBUG2 | 38 | 0x00009834 DB_DEBUG2 |
38 | 0x00009838 DB_DEBUG3 | 39 | 0x00009838 DB_DEBUG3 |
diff --git a/drivers/gpu/drm/radeon/reg_srcs/evergreen b/drivers/gpu/drm/radeon/reg_srcs/evergreen index 7e1637176e08..0e28cae7ea43 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/evergreen +++ b/drivers/gpu/drm/radeon/reg_srcs/evergreen | |||
@@ -46,6 +46,7 @@ evergreen 0x9400 | |||
46 | 0x00008E48 SQ_EX_ALLOC_TABLE_SLOTS | 46 | 0x00008E48 SQ_EX_ALLOC_TABLE_SLOTS |
47 | 0x00009100 SPI_CONFIG_CNTL | 47 | 0x00009100 SPI_CONFIG_CNTL |
48 | 0x0000913C SPI_CONFIG_CNTL_1 | 48 | 0x0000913C SPI_CONFIG_CNTL_1 |
49 | 0x00009508 TA_CNTL_AUX | ||
49 | 0x00009700 VC_CNTL | 50 | 0x00009700 VC_CNTL |
50 | 0x00009714 VC_ENHANCE | 51 | 0x00009714 VC_ENHANCE |
51 | 0x00009830 DB_DEBUG | 52 | 0x00009830 DB_DEBUG |
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c index e01cacba685f..498b284e5ef9 100644 --- a/drivers/gpu/vga/vga_switcheroo.c +++ b/drivers/gpu/vga/vga_switcheroo.c | |||
@@ -219,9 +219,6 @@ static int vga_switchto_stage1(struct vga_switcheroo_client *new_client) | |||
219 | int i; | 219 | int i; |
220 | struct vga_switcheroo_client *active = NULL; | 220 | struct vga_switcheroo_client *active = NULL; |
221 | 221 | ||
222 | if (new_client->active == true) | ||
223 | return 0; | ||
224 | |||
225 | for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) { | 222 | for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) { |
226 | if (vgasr_priv.clients[i].active == true) { | 223 | if (vgasr_priv.clients[i].active == true) { |
227 | active = &vgasr_priv.clients[i]; | 224 | active = &vgasr_priv.clients[i]; |
@@ -372,6 +369,9 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf, | |||
372 | goto out; | 369 | goto out; |
373 | } | 370 | } |
374 | 371 | ||
372 | if (client->active == true) | ||
373 | goto out; | ||
374 | |||
375 | /* okay we want a switch - test if devices are willing to switch */ | 375 | /* okay we want a switch - test if devices are willing to switch */ |
376 | can_switch = true; | 376 | can_switch = true; |
377 | for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) { | 377 | for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) { |
diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c index 75b984c519ac..107397a606b4 100644 --- a/drivers/i2c/busses/i2c-mpc.c +++ b/drivers/i2c/busses/i2c-mpc.c | |||
@@ -560,15 +560,18 @@ static struct i2c_adapter mpc_ops = { | |||
560 | .timeout = HZ, | 560 | .timeout = HZ, |
561 | }; | 561 | }; |
562 | 562 | ||
563 | static const struct of_device_id mpc_i2c_of_match[]; | ||
563 | static int __devinit fsl_i2c_probe(struct platform_device *op) | 564 | static int __devinit fsl_i2c_probe(struct platform_device *op) |
564 | { | 565 | { |
566 | const struct of_device_id *match; | ||
565 | struct mpc_i2c *i2c; | 567 | struct mpc_i2c *i2c; |
566 | const u32 *prop; | 568 | const u32 *prop; |
567 | u32 clock = MPC_I2C_CLOCK_LEGACY; | 569 | u32 clock = MPC_I2C_CLOCK_LEGACY; |
568 | int result = 0; | 570 | int result = 0; |
569 | int plen; | 571 | int plen; |
570 | 572 | ||
571 | if (!op->dev.of_match) | 573 | match = of_match_device(mpc_i2c_of_match, &op->dev); |
574 | if (!match) | ||
572 | return -EINVAL; | 575 | return -EINVAL; |
573 | 576 | ||
574 | i2c = kzalloc(sizeof(*i2c), GFP_KERNEL); | 577 | i2c = kzalloc(sizeof(*i2c), GFP_KERNEL); |
@@ -605,8 +608,8 @@ static int __devinit fsl_i2c_probe(struct platform_device *op) | |||
605 | clock = *prop; | 608 | clock = *prop; |
606 | } | 609 | } |
607 | 610 | ||
608 | if (op->dev.of_match->data) { | 611 | if (match->data) { |
609 | struct mpc_i2c_data *data = op->dev.of_match->data; | 612 | struct mpc_i2c_data *data = match->data; |
610 | data->setup(op->dev.of_node, i2c, clock, data->prescaler); | 613 | data->setup(op->dev.of_node, i2c, clock, data->prescaler); |
611 | } else { | 614 | } else { |
612 | /* Backwards compatibility */ | 615 | /* Backwards compatibility */ |
diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c index a97e3fec8148..04be9f82e14b 100644 --- a/drivers/i2c/busses/i2c-pnx.c +++ b/drivers/i2c/busses/i2c-pnx.c | |||
@@ -65,7 +65,7 @@ static inline void i2c_pnx_arm_timer(struct i2c_pnx_algo_data *alg_data) | |||
65 | jiffies, expires); | 65 | jiffies, expires); |
66 | 66 | ||
67 | timer->expires = jiffies + expires; | 67 | timer->expires = jiffies + expires; |
68 | timer->data = (unsigned long)&alg_data; | 68 | timer->data = (unsigned long)alg_data; |
69 | 69 | ||
70 | add_timer(timer); | 70 | add_timer(timer); |
71 | } | 71 | } |
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 5ed9d25d021a..99dde874fbbd 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c | |||
@@ -148,6 +148,7 @@ struct rdma_id_private { | |||
148 | u32 qp_num; | 148 | u32 qp_num; |
149 | u8 srq; | 149 | u8 srq; |
150 | u8 tos; | 150 | u8 tos; |
151 | u8 reuseaddr; | ||
151 | }; | 152 | }; |
152 | 153 | ||
153 | struct cma_multicast { | 154 | struct cma_multicast { |
@@ -712,6 +713,21 @@ static inline int cma_any_addr(struct sockaddr *addr) | |||
712 | return cma_zero_addr(addr) || cma_loopback_addr(addr); | 713 | return cma_zero_addr(addr) || cma_loopback_addr(addr); |
713 | } | 714 | } |
714 | 715 | ||
716 | static int cma_addr_cmp(struct sockaddr *src, struct sockaddr *dst) | ||
717 | { | ||
718 | if (src->sa_family != dst->sa_family) | ||
719 | return -1; | ||
720 | |||
721 | switch (src->sa_family) { | ||
722 | case AF_INET: | ||
723 | return ((struct sockaddr_in *) src)->sin_addr.s_addr != | ||
724 | ((struct sockaddr_in *) dst)->sin_addr.s_addr; | ||
725 | default: | ||
726 | return ipv6_addr_cmp(&((struct sockaddr_in6 *) src)->sin6_addr, | ||
727 | &((struct sockaddr_in6 *) dst)->sin6_addr); | ||
728 | } | ||
729 | } | ||
730 | |||
715 | static inline __be16 cma_port(struct sockaddr *addr) | 731 | static inline __be16 cma_port(struct sockaddr *addr) |
716 | { | 732 | { |
717 | if (addr->sa_family == AF_INET) | 733 | if (addr->sa_family == AF_INET) |
@@ -1564,50 +1580,6 @@ static void cma_listen_on_all(struct rdma_id_private *id_priv) | |||
1564 | mutex_unlock(&lock); | 1580 | mutex_unlock(&lock); |
1565 | } | 1581 | } |
1566 | 1582 | ||
1567 | int rdma_listen(struct rdma_cm_id *id, int backlog) | ||
1568 | { | ||
1569 | struct rdma_id_private *id_priv; | ||
1570 | int ret; | ||
1571 | |||
1572 | id_priv = container_of(id, struct rdma_id_private, id); | ||
1573 | if (id_priv->state == CMA_IDLE) { | ||
1574 | ((struct sockaddr *) &id->route.addr.src_addr)->sa_family = AF_INET; | ||
1575 | ret = rdma_bind_addr(id, (struct sockaddr *) &id->route.addr.src_addr); | ||
1576 | if (ret) | ||
1577 | return ret; | ||
1578 | } | ||
1579 | |||
1580 | if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_LISTEN)) | ||
1581 | return -EINVAL; | ||
1582 | |||
1583 | id_priv->backlog = backlog; | ||
1584 | if (id->device) { | ||
1585 | switch (rdma_node_get_transport(id->device->node_type)) { | ||
1586 | case RDMA_TRANSPORT_IB: | ||
1587 | ret = cma_ib_listen(id_priv); | ||
1588 | if (ret) | ||
1589 | goto err; | ||
1590 | break; | ||
1591 | case RDMA_TRANSPORT_IWARP: | ||
1592 | ret = cma_iw_listen(id_priv, backlog); | ||
1593 | if (ret) | ||
1594 | goto err; | ||
1595 | break; | ||
1596 | default: | ||
1597 | ret = -ENOSYS; | ||
1598 | goto err; | ||
1599 | } | ||
1600 | } else | ||
1601 | cma_listen_on_all(id_priv); | ||
1602 | |||
1603 | return 0; | ||
1604 | err: | ||
1605 | id_priv->backlog = 0; | ||
1606 | cma_comp_exch(id_priv, CMA_LISTEN, CMA_ADDR_BOUND); | ||
1607 | return ret; | ||
1608 | } | ||
1609 | EXPORT_SYMBOL(rdma_listen); | ||
1610 | |||
1611 | void rdma_set_service_type(struct rdma_cm_id *id, int tos) | 1583 | void rdma_set_service_type(struct rdma_cm_id *id, int tos) |
1612 | { | 1584 | { |
1613 | struct rdma_id_private *id_priv; | 1585 | struct rdma_id_private *id_priv; |
@@ -2090,6 +2062,25 @@ err: | |||
2090 | } | 2062 | } |
2091 | EXPORT_SYMBOL(rdma_resolve_addr); | 2063 | EXPORT_SYMBOL(rdma_resolve_addr); |
2092 | 2064 | ||
2065 | int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse) | ||
2066 | { | ||
2067 | struct rdma_id_private *id_priv; | ||
2068 | unsigned long flags; | ||
2069 | int ret; | ||
2070 | |||
2071 | id_priv = container_of(id, struct rdma_id_private, id); | ||
2072 | spin_lock_irqsave(&id_priv->lock, flags); | ||
2073 | if (id_priv->state == CMA_IDLE) { | ||
2074 | id_priv->reuseaddr = reuse; | ||
2075 | ret = 0; | ||
2076 | } else { | ||
2077 | ret = -EINVAL; | ||
2078 | } | ||
2079 | spin_unlock_irqrestore(&id_priv->lock, flags); | ||
2080 | return ret; | ||
2081 | } | ||
2082 | EXPORT_SYMBOL(rdma_set_reuseaddr); | ||
2083 | |||
2093 | static void cma_bind_port(struct rdma_bind_list *bind_list, | 2084 | static void cma_bind_port(struct rdma_bind_list *bind_list, |
2094 | struct rdma_id_private *id_priv) | 2085 | struct rdma_id_private *id_priv) |
2095 | { | 2086 | { |
@@ -2165,41 +2156,71 @@ retry: | |||
2165 | return -EADDRNOTAVAIL; | 2156 | return -EADDRNOTAVAIL; |
2166 | } | 2157 | } |
2167 | 2158 | ||
2168 | static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv) | 2159 | /* |
2160 | * Check that the requested port is available. This is called when trying to | ||
2161 | * bind to a specific port, or when trying to listen on a bound port. In | ||
2162 | * the latter case, the provided id_priv may already be on the bind_list, but | ||
2163 | * we still need to check that it's okay to start listening. | ||
2164 | */ | ||
2165 | static int cma_check_port(struct rdma_bind_list *bind_list, | ||
2166 | struct rdma_id_private *id_priv, uint8_t reuseaddr) | ||
2169 | { | 2167 | { |
2170 | struct rdma_id_private *cur_id; | 2168 | struct rdma_id_private *cur_id; |
2171 | struct sockaddr_in *sin, *cur_sin; | 2169 | struct sockaddr *addr, *cur_addr; |
2172 | struct rdma_bind_list *bind_list; | ||
2173 | struct hlist_node *node; | 2170 | struct hlist_node *node; |
2171 | |||
2172 | addr = (struct sockaddr *) &id_priv->id.route.addr.src_addr; | ||
2173 | if (cma_any_addr(addr) && !reuseaddr) | ||
2174 | return -EADDRNOTAVAIL; | ||
2175 | |||
2176 | hlist_for_each_entry(cur_id, node, &bind_list->owners, node) { | ||
2177 | if (id_priv == cur_id) | ||
2178 | continue; | ||
2179 | |||
2180 | if ((cur_id->state == CMA_LISTEN) || | ||
2181 | !reuseaddr || !cur_id->reuseaddr) { | ||
2182 | cur_addr = (struct sockaddr *) &cur_id->id.route.addr.src_addr; | ||
2183 | if (cma_any_addr(cur_addr)) | ||
2184 | return -EADDRNOTAVAIL; | ||
2185 | |||
2186 | if (!cma_addr_cmp(addr, cur_addr)) | ||
2187 | return -EADDRINUSE; | ||
2188 | } | ||
2189 | } | ||
2190 | return 0; | ||
2191 | } | ||
2192 | |||
2193 | static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv) | ||
2194 | { | ||
2195 | struct rdma_bind_list *bind_list; | ||
2174 | unsigned short snum; | 2196 | unsigned short snum; |
2197 | int ret; | ||
2175 | 2198 | ||
2176 | sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr; | 2199 | snum = ntohs(cma_port((struct sockaddr *) &id_priv->id.route.addr.src_addr)); |
2177 | snum = ntohs(sin->sin_port); | ||
2178 | if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE)) | 2200 | if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE)) |
2179 | return -EACCES; | 2201 | return -EACCES; |
2180 | 2202 | ||
2181 | bind_list = idr_find(ps, snum); | 2203 | bind_list = idr_find(ps, snum); |
2182 | if (!bind_list) | 2204 | if (!bind_list) { |
2183 | return cma_alloc_port(ps, id_priv, snum); | 2205 | ret = cma_alloc_port(ps, id_priv, snum); |
2184 | 2206 | } else { | |
2185 | /* | 2207 | ret = cma_check_port(bind_list, id_priv, id_priv->reuseaddr); |
2186 | * We don't support binding to any address if anyone is bound to | 2208 | if (!ret) |
2187 | * a specific address on the same port. | 2209 | cma_bind_port(bind_list, id_priv); |
2188 | */ | ||
2189 | if (cma_any_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr)) | ||
2190 | return -EADDRNOTAVAIL; | ||
2191 | |||
2192 | hlist_for_each_entry(cur_id, node, &bind_list->owners, node) { | ||
2193 | if (cma_any_addr((struct sockaddr *) &cur_id->id.route.addr.src_addr)) | ||
2194 | return -EADDRNOTAVAIL; | ||
2195 | |||
2196 | cur_sin = (struct sockaddr_in *) &cur_id->id.route.addr.src_addr; | ||
2197 | if (sin->sin_addr.s_addr == cur_sin->sin_addr.s_addr) | ||
2198 | return -EADDRINUSE; | ||
2199 | } | 2210 | } |
2211 | return ret; | ||
2212 | } | ||
2200 | 2213 | ||
2201 | cma_bind_port(bind_list, id_priv); | 2214 | static int cma_bind_listen(struct rdma_id_private *id_priv) |
2202 | return 0; | 2215 | { |
2216 | struct rdma_bind_list *bind_list = id_priv->bind_list; | ||
2217 | int ret = 0; | ||
2218 | |||
2219 | mutex_lock(&lock); | ||
2220 | if (bind_list->owners.first->next) | ||
2221 | ret = cma_check_port(bind_list, id_priv, 0); | ||
2222 | mutex_unlock(&lock); | ||
2223 | return ret; | ||
2203 | } | 2224 | } |
2204 | 2225 | ||
2205 | static int cma_get_port(struct rdma_id_private *id_priv) | 2226 | static int cma_get_port(struct rdma_id_private *id_priv) |
@@ -2253,6 +2274,56 @@ static int cma_check_linklocal(struct rdma_dev_addr *dev_addr, | |||
2253 | return 0; | 2274 | return 0; |
2254 | } | 2275 | } |
2255 | 2276 | ||
2277 | int rdma_listen(struct rdma_cm_id *id, int backlog) | ||
2278 | { | ||
2279 | struct rdma_id_private *id_priv; | ||
2280 | int ret; | ||
2281 | |||
2282 | id_priv = container_of(id, struct rdma_id_private, id); | ||
2283 | if (id_priv->state == CMA_IDLE) { | ||
2284 | ((struct sockaddr *) &id->route.addr.src_addr)->sa_family = AF_INET; | ||
2285 | ret = rdma_bind_addr(id, (struct sockaddr *) &id->route.addr.src_addr); | ||
2286 | if (ret) | ||
2287 | return ret; | ||
2288 | } | ||
2289 | |||
2290 | if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_LISTEN)) | ||
2291 | return -EINVAL; | ||
2292 | |||
2293 | if (id_priv->reuseaddr) { | ||
2294 | ret = cma_bind_listen(id_priv); | ||
2295 | if (ret) | ||
2296 | goto err; | ||
2297 | } | ||
2298 | |||
2299 | id_priv->backlog = backlog; | ||
2300 | if (id->device) { | ||
2301 | switch (rdma_node_get_transport(id->device->node_type)) { | ||
2302 | case RDMA_TRANSPORT_IB: | ||
2303 | ret = cma_ib_listen(id_priv); | ||
2304 | if (ret) | ||
2305 | goto err; | ||
2306 | break; | ||
2307 | case RDMA_TRANSPORT_IWARP: | ||
2308 | ret = cma_iw_listen(id_priv, backlog); | ||
2309 | if (ret) | ||
2310 | goto err; | ||
2311 | break; | ||
2312 | default: | ||
2313 | ret = -ENOSYS; | ||
2314 | goto err; | ||
2315 | } | ||
2316 | } else | ||
2317 | cma_listen_on_all(id_priv); | ||
2318 | |||
2319 | return 0; | ||
2320 | err: | ||
2321 | id_priv->backlog = 0; | ||
2322 | cma_comp_exch(id_priv, CMA_LISTEN, CMA_ADDR_BOUND); | ||
2323 | return ret; | ||
2324 | } | ||
2325 | EXPORT_SYMBOL(rdma_listen); | ||
2326 | |||
2256 | int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) | 2327 | int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) |
2257 | { | 2328 | { |
2258 | struct rdma_id_private *id_priv; | 2329 | struct rdma_id_private *id_priv; |
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c index 2a1e9ae134b4..a9c042345c6f 100644 --- a/drivers/infiniband/core/iwcm.c +++ b/drivers/infiniband/core/iwcm.c | |||
@@ -725,7 +725,7 @@ static int cm_conn_rep_handler(struct iwcm_id_private *cm_id_priv, | |||
725 | */ | 725 | */ |
726 | clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); | 726 | clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); |
727 | BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_SENT); | 727 | BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_SENT); |
728 | if (iw_event->status == IW_CM_EVENT_STATUS_ACCEPTED) { | 728 | if (iw_event->status == 0) { |
729 | cm_id_priv->id.local_addr = iw_event->local_addr; | 729 | cm_id_priv->id.local_addr = iw_event->local_addr; |
730 | cm_id_priv->id.remote_addr = iw_event->remote_addr; | 730 | cm_id_priv->id.remote_addr = iw_event->remote_addr; |
731 | cm_id_priv->state = IW_CM_STATE_ESTABLISHED; | 731 | cm_id_priv->state = IW_CM_STATE_ESTABLISHED; |
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index ec1e9da1488b..b3fa798525b2 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c | |||
@@ -883,6 +883,13 @@ static int ucma_set_option_id(struct ucma_context *ctx, int optname, | |||
883 | } | 883 | } |
884 | rdma_set_service_type(ctx->cm_id, *((u8 *) optval)); | 884 | rdma_set_service_type(ctx->cm_id, *((u8 *) optval)); |
885 | break; | 885 | break; |
886 | case RDMA_OPTION_ID_REUSEADDR: | ||
887 | if (optlen != sizeof(int)) { | ||
888 | ret = -EINVAL; | ||
889 | break; | ||
890 | } | ||
891 | ret = rdma_set_reuseaddr(ctx->cm_id, *((int *) optval) ? 1 : 0); | ||
892 | break; | ||
886 | default: | 893 | default: |
887 | ret = -ENOSYS; | 894 | ret = -ENOSYS; |
888 | } | 895 | } |
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 9d8dcfab2b38..d7ee70fc9173 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c | |||
@@ -1198,9 +1198,7 @@ static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) | |||
1198 | } | 1198 | } |
1199 | PDBG("%s ep %p status %d error %d\n", __func__, ep, | 1199 | PDBG("%s ep %p status %d error %d\n", __func__, ep, |
1200 | rpl->status, status2errno(rpl->status)); | 1200 | rpl->status, status2errno(rpl->status)); |
1201 | ep->com.wr_wait.ret = status2errno(rpl->status); | 1201 | c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status)); |
1202 | ep->com.wr_wait.done = 1; | ||
1203 | wake_up(&ep->com.wr_wait.wait); | ||
1204 | 1202 | ||
1205 | return 0; | 1203 | return 0; |
1206 | } | 1204 | } |
@@ -1234,9 +1232,7 @@ static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb) | |||
1234 | struct c4iw_listen_ep *ep = lookup_stid(t, stid); | 1232 | struct c4iw_listen_ep *ep = lookup_stid(t, stid); |
1235 | 1233 | ||
1236 | PDBG("%s ep %p\n", __func__, ep); | 1234 | PDBG("%s ep %p\n", __func__, ep); |
1237 | ep->com.wr_wait.ret = status2errno(rpl->status); | 1235 | c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status)); |
1238 | ep->com.wr_wait.done = 1; | ||
1239 | wake_up(&ep->com.wr_wait.wait); | ||
1240 | return 0; | 1236 | return 0; |
1241 | } | 1237 | } |
1242 | 1238 | ||
@@ -1466,7 +1462,7 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb) | |||
1466 | struct c4iw_qp_attributes attrs; | 1462 | struct c4iw_qp_attributes attrs; |
1467 | int disconnect = 1; | 1463 | int disconnect = 1; |
1468 | int release = 0; | 1464 | int release = 0; |
1469 | int closing = 0; | 1465 | int abort = 0; |
1470 | struct tid_info *t = dev->rdev.lldi.tids; | 1466 | struct tid_info *t = dev->rdev.lldi.tids; |
1471 | unsigned int tid = GET_TID(hdr); | 1467 | unsigned int tid = GET_TID(hdr); |
1472 | 1468 | ||
@@ -1492,23 +1488,22 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb) | |||
1492 | * in rdma connection migration (see c4iw_accept_cr()). | 1488 | * in rdma connection migration (see c4iw_accept_cr()). |
1493 | */ | 1489 | */ |
1494 | __state_set(&ep->com, CLOSING); | 1490 | __state_set(&ep->com, CLOSING); |
1495 | ep->com.wr_wait.done = 1; | ||
1496 | ep->com.wr_wait.ret = -ECONNRESET; | ||
1497 | PDBG("waking up ep %p tid %u\n", ep, ep->hwtid); | 1491 | PDBG("waking up ep %p tid %u\n", ep, ep->hwtid); |
1498 | wake_up(&ep->com.wr_wait.wait); | 1492 | c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); |
1499 | break; | 1493 | break; |
1500 | case MPA_REP_SENT: | 1494 | case MPA_REP_SENT: |
1501 | __state_set(&ep->com, CLOSING); | 1495 | __state_set(&ep->com, CLOSING); |
1502 | ep->com.wr_wait.done = 1; | ||
1503 | ep->com.wr_wait.ret = -ECONNRESET; | ||
1504 | PDBG("waking up ep %p tid %u\n", ep, ep->hwtid); | 1496 | PDBG("waking up ep %p tid %u\n", ep, ep->hwtid); |
1505 | wake_up(&ep->com.wr_wait.wait); | 1497 | c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); |
1506 | break; | 1498 | break; |
1507 | case FPDU_MODE: | 1499 | case FPDU_MODE: |
1508 | start_ep_timer(ep); | 1500 | start_ep_timer(ep); |
1509 | __state_set(&ep->com, CLOSING); | 1501 | __state_set(&ep->com, CLOSING); |
1510 | closing = 1; | 1502 | attrs.next_state = C4IW_QP_STATE_CLOSING; |
1503 | abort = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, | ||
1504 | C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); | ||
1511 | peer_close_upcall(ep); | 1505 | peer_close_upcall(ep); |
1506 | disconnect = 1; | ||
1512 | break; | 1507 | break; |
1513 | case ABORTING: | 1508 | case ABORTING: |
1514 | disconnect = 0; | 1509 | disconnect = 0; |
@@ -1536,11 +1531,6 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb) | |||
1536 | BUG_ON(1); | 1531 | BUG_ON(1); |
1537 | } | 1532 | } |
1538 | mutex_unlock(&ep->com.mutex); | 1533 | mutex_unlock(&ep->com.mutex); |
1539 | if (closing) { | ||
1540 | attrs.next_state = C4IW_QP_STATE_CLOSING; | ||
1541 | c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, | ||
1542 | C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); | ||
1543 | } | ||
1544 | if (disconnect) | 1534 | if (disconnect) |
1545 | c4iw_ep_disconnect(ep, 0, GFP_KERNEL); | 1535 | c4iw_ep_disconnect(ep, 0, GFP_KERNEL); |
1546 | if (release) | 1536 | if (release) |
@@ -1581,9 +1571,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) | |||
1581 | /* | 1571 | /* |
1582 | * Wake up any threads in rdma_init() or rdma_fini(). | 1572 | * Wake up any threads in rdma_init() or rdma_fini(). |
1583 | */ | 1573 | */ |
1584 | ep->com.wr_wait.done = 1; | 1574 | c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); |
1585 | ep->com.wr_wait.ret = -ECONNRESET; | ||
1586 | wake_up(&ep->com.wr_wait.wait); | ||
1587 | 1575 | ||
1588 | mutex_lock(&ep->com.mutex); | 1576 | mutex_lock(&ep->com.mutex); |
1589 | switch (ep->com.state) { | 1577 | switch (ep->com.state) { |
@@ -1710,14 +1698,14 @@ static int terminate(struct c4iw_dev *dev, struct sk_buff *skb) | |||
1710 | ep = lookup_tid(t, tid); | 1698 | ep = lookup_tid(t, tid); |
1711 | BUG_ON(!ep); | 1699 | BUG_ON(!ep); |
1712 | 1700 | ||
1713 | if (ep->com.qp) { | 1701 | if (ep && ep->com.qp) { |
1714 | printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n", tid, | 1702 | printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n", tid, |
1715 | ep->com.qp->wq.sq.qid); | 1703 | ep->com.qp->wq.sq.qid); |
1716 | attrs.next_state = C4IW_QP_STATE_TERMINATE; | 1704 | attrs.next_state = C4IW_QP_STATE_TERMINATE; |
1717 | c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, | 1705 | c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, |
1718 | C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); | 1706 | C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); |
1719 | } else | 1707 | } else |
1720 | printk(KERN_WARNING MOD "TERM received tid %u no qp\n", tid); | 1708 | printk(KERN_WARNING MOD "TERM received tid %u no ep/qp\n", tid); |
1721 | 1709 | ||
1722 | return 0; | 1710 | return 0; |
1723 | } | 1711 | } |
@@ -2296,14 +2284,8 @@ static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb) | |||
2296 | ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff); | 2284 | ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff); |
2297 | wr_waitp = (struct c4iw_wr_wait *)(__force unsigned long) rpl->data[1]; | 2285 | wr_waitp = (struct c4iw_wr_wait *)(__force unsigned long) rpl->data[1]; |
2298 | PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret); | 2286 | PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret); |
2299 | if (wr_waitp) { | 2287 | if (wr_waitp) |
2300 | if (ret) | 2288 | c4iw_wake_up(wr_waitp, ret ? -ret : 0); |
2301 | wr_waitp->ret = -ret; | ||
2302 | else | ||
2303 | wr_waitp->ret = 0; | ||
2304 | wr_waitp->done = 1; | ||
2305 | wake_up(&wr_waitp->wait); | ||
2306 | } | ||
2307 | kfree_skb(skb); | 2289 | kfree_skb(skb); |
2308 | break; | 2290 | break; |
2309 | case 2: | 2291 | case 2: |
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c index e29172c2afcb..40a13cc633a3 100644 --- a/drivers/infiniband/hw/cxgb4/device.c +++ b/drivers/infiniband/hw/cxgb4/device.c | |||
@@ -44,7 +44,7 @@ MODULE_DESCRIPTION("Chelsio T4 RDMA Driver"); | |||
44 | MODULE_LICENSE("Dual BSD/GPL"); | 44 | MODULE_LICENSE("Dual BSD/GPL"); |
45 | MODULE_VERSION(DRV_VERSION); | 45 | MODULE_VERSION(DRV_VERSION); |
46 | 46 | ||
47 | static LIST_HEAD(dev_list); | 47 | static LIST_HEAD(uld_ctx_list); |
48 | static DEFINE_MUTEX(dev_mutex); | 48 | static DEFINE_MUTEX(dev_mutex); |
49 | 49 | ||
50 | static struct dentry *c4iw_debugfs_root; | 50 | static struct dentry *c4iw_debugfs_root; |
@@ -370,18 +370,23 @@ static void c4iw_rdev_close(struct c4iw_rdev *rdev) | |||
370 | c4iw_destroy_resource(&rdev->resource); | 370 | c4iw_destroy_resource(&rdev->resource); |
371 | } | 371 | } |
372 | 372 | ||
373 | static void c4iw_remove(struct c4iw_dev *dev) | 373 | struct uld_ctx { |
374 | struct list_head entry; | ||
375 | struct cxgb4_lld_info lldi; | ||
376 | struct c4iw_dev *dev; | ||
377 | }; | ||
378 | |||
379 | static void c4iw_remove(struct uld_ctx *ctx) | ||
374 | { | 380 | { |
375 | PDBG("%s c4iw_dev %p\n", __func__, dev); | 381 | PDBG("%s c4iw_dev %p\n", __func__, ctx->dev); |
376 | list_del(&dev->entry); | 382 | c4iw_unregister_device(ctx->dev); |
377 | if (dev->registered) | 383 | c4iw_rdev_close(&ctx->dev->rdev); |
378 | c4iw_unregister_device(dev); | 384 | idr_destroy(&ctx->dev->cqidr); |
379 | c4iw_rdev_close(&dev->rdev); | 385 | idr_destroy(&ctx->dev->qpidr); |
380 | idr_destroy(&dev->cqidr); | 386 | idr_destroy(&ctx->dev->mmidr); |
381 | idr_destroy(&dev->qpidr); | 387 | iounmap(ctx->dev->rdev.oc_mw_kva); |
382 | idr_destroy(&dev->mmidr); | 388 | ib_dealloc_device(&ctx->dev->ibdev); |
383 | iounmap(dev->rdev.oc_mw_kva); | 389 | ctx->dev = NULL; |
384 | ib_dealloc_device(&dev->ibdev); | ||
385 | } | 390 | } |
386 | 391 | ||
387 | static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) | 392 | static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) |
@@ -392,7 +397,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) | |||
392 | devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp)); | 397 | devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp)); |
393 | if (!devp) { | 398 | if (!devp) { |
394 | printk(KERN_ERR MOD "Cannot allocate ib device\n"); | 399 | printk(KERN_ERR MOD "Cannot allocate ib device\n"); |
395 | return NULL; | 400 | return ERR_PTR(-ENOMEM); |
396 | } | 401 | } |
397 | devp->rdev.lldi = *infop; | 402 | devp->rdev.lldi = *infop; |
398 | 403 | ||
@@ -402,27 +407,23 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) | |||
402 | devp->rdev.oc_mw_kva = ioremap_wc(devp->rdev.oc_mw_pa, | 407 | devp->rdev.oc_mw_kva = ioremap_wc(devp->rdev.oc_mw_pa, |
403 | devp->rdev.lldi.vr->ocq.size); | 408 | devp->rdev.lldi.vr->ocq.size); |
404 | 409 | ||
405 | printk(KERN_INFO MOD "ocq memory: " | 410 | PDBG(KERN_INFO MOD "ocq memory: " |
406 | "hw_start 0x%x size %u mw_pa 0x%lx mw_kva %p\n", | 411 | "hw_start 0x%x size %u mw_pa 0x%lx mw_kva %p\n", |
407 | devp->rdev.lldi.vr->ocq.start, devp->rdev.lldi.vr->ocq.size, | 412 | devp->rdev.lldi.vr->ocq.start, devp->rdev.lldi.vr->ocq.size, |
408 | devp->rdev.oc_mw_pa, devp->rdev.oc_mw_kva); | 413 | devp->rdev.oc_mw_pa, devp->rdev.oc_mw_kva); |
409 | 414 | ||
410 | mutex_lock(&dev_mutex); | ||
411 | |||
412 | ret = c4iw_rdev_open(&devp->rdev); | 415 | ret = c4iw_rdev_open(&devp->rdev); |
413 | if (ret) { | 416 | if (ret) { |
414 | mutex_unlock(&dev_mutex); | 417 | mutex_unlock(&dev_mutex); |
415 | printk(KERN_ERR MOD "Unable to open CXIO rdev err %d\n", ret); | 418 | printk(KERN_ERR MOD "Unable to open CXIO rdev err %d\n", ret); |
416 | ib_dealloc_device(&devp->ibdev); | 419 | ib_dealloc_device(&devp->ibdev); |
417 | return NULL; | 420 | return ERR_PTR(ret); |
418 | } | 421 | } |
419 | 422 | ||
420 | idr_init(&devp->cqidr); | 423 | idr_init(&devp->cqidr); |
421 | idr_init(&devp->qpidr); | 424 | idr_init(&devp->qpidr); |
422 | idr_init(&devp->mmidr); | 425 | idr_init(&devp->mmidr); |
423 | spin_lock_init(&devp->lock); | 426 | spin_lock_init(&devp->lock); |
424 | list_add_tail(&devp->entry, &dev_list); | ||
425 | mutex_unlock(&dev_mutex); | ||
426 | 427 | ||
427 | if (c4iw_debugfs_root) { | 428 | if (c4iw_debugfs_root) { |
428 | devp->debugfs_root = debugfs_create_dir( | 429 | devp->debugfs_root = debugfs_create_dir( |
@@ -435,7 +436,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) | |||
435 | 436 | ||
436 | static void *c4iw_uld_add(const struct cxgb4_lld_info *infop) | 437 | static void *c4iw_uld_add(const struct cxgb4_lld_info *infop) |
437 | { | 438 | { |
438 | struct c4iw_dev *dev; | 439 | struct uld_ctx *ctx; |
439 | static int vers_printed; | 440 | static int vers_printed; |
440 | int i; | 441 | int i; |
441 | 442 | ||
@@ -443,25 +444,33 @@ static void *c4iw_uld_add(const struct cxgb4_lld_info *infop) | |||
443 | printk(KERN_INFO MOD "Chelsio T4 RDMA Driver - version %s\n", | 444 | printk(KERN_INFO MOD "Chelsio T4 RDMA Driver - version %s\n", |
444 | DRV_VERSION); | 445 | DRV_VERSION); |
445 | 446 | ||
446 | dev = c4iw_alloc(infop); | 447 | ctx = kzalloc(sizeof *ctx, GFP_KERNEL); |
447 | if (!dev) | 448 | if (!ctx) { |
449 | ctx = ERR_PTR(-ENOMEM); | ||
448 | goto out; | 450 | goto out; |
451 | } | ||
452 | ctx->lldi = *infop; | ||
449 | 453 | ||
450 | PDBG("%s found device %s nchan %u nrxq %u ntxq %u nports %u\n", | 454 | PDBG("%s found device %s nchan %u nrxq %u ntxq %u nports %u\n", |
451 | __func__, pci_name(dev->rdev.lldi.pdev), | 455 | __func__, pci_name(ctx->lldi.pdev), |
452 | dev->rdev.lldi.nchan, dev->rdev.lldi.nrxq, | 456 | ctx->lldi.nchan, ctx->lldi.nrxq, |
453 | dev->rdev.lldi.ntxq, dev->rdev.lldi.nports); | 457 | ctx->lldi.ntxq, ctx->lldi.nports); |
458 | |||
459 | mutex_lock(&dev_mutex); | ||
460 | list_add_tail(&ctx->entry, &uld_ctx_list); | ||
461 | mutex_unlock(&dev_mutex); | ||
454 | 462 | ||
455 | for (i = 0; i < dev->rdev.lldi.nrxq; i++) | 463 | for (i = 0; i < ctx->lldi.nrxq; i++) |
456 | PDBG("rxqid[%u] %u\n", i, dev->rdev.lldi.rxq_ids[i]); | 464 | PDBG("rxqid[%u] %u\n", i, ctx->lldi.rxq_ids[i]); |
457 | out: | 465 | out: |
458 | return dev; | 466 | return ctx; |
459 | } | 467 | } |
460 | 468 | ||
461 | static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp, | 469 | static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp, |
462 | const struct pkt_gl *gl) | 470 | const struct pkt_gl *gl) |
463 | { | 471 | { |
464 | struct c4iw_dev *dev = handle; | 472 | struct uld_ctx *ctx = handle; |
473 | struct c4iw_dev *dev = ctx->dev; | ||
465 | struct sk_buff *skb; | 474 | struct sk_buff *skb; |
466 | const struct cpl_act_establish *rpl; | 475 | const struct cpl_act_establish *rpl; |
467 | unsigned int opcode; | 476 | unsigned int opcode; |
@@ -503,47 +512,49 @@ nomem: | |||
503 | 512 | ||
504 | static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state) | 513 | static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state) |
505 | { | 514 | { |
506 | struct c4iw_dev *dev = handle; | 515 | struct uld_ctx *ctx = handle; |
507 | 516 | ||
508 | PDBG("%s new_state %u\n", __func__, new_state); | 517 | PDBG("%s new_state %u\n", __func__, new_state); |
509 | switch (new_state) { | 518 | switch (new_state) { |
510 | case CXGB4_STATE_UP: | 519 | case CXGB4_STATE_UP: |
511 | printk(KERN_INFO MOD "%s: Up\n", pci_name(dev->rdev.lldi.pdev)); | 520 | printk(KERN_INFO MOD "%s: Up\n", pci_name(ctx->lldi.pdev)); |
512 | if (!dev->registered) { | 521 | if (!ctx->dev) { |
513 | int ret; | 522 | int ret = 0; |
514 | ret = c4iw_register_device(dev); | 523 | |
515 | if (ret) | 524 | ctx->dev = c4iw_alloc(&ctx->lldi); |
525 | if (!IS_ERR(ctx->dev)) | ||
526 | ret = c4iw_register_device(ctx->dev); | ||
527 | if (IS_ERR(ctx->dev) || ret) | ||
516 | printk(KERN_ERR MOD | 528 | printk(KERN_ERR MOD |
517 | "%s: RDMA registration failed: %d\n", | 529 | "%s: RDMA registration failed: %d\n", |
518 | pci_name(dev->rdev.lldi.pdev), ret); | 530 | pci_name(ctx->lldi.pdev), ret); |
519 | } | 531 | } |
520 | break; | 532 | break; |
521 | case CXGB4_STATE_DOWN: | 533 | case CXGB4_STATE_DOWN: |
522 | printk(KERN_INFO MOD "%s: Down\n", | 534 | printk(KERN_INFO MOD "%s: Down\n", |
523 | pci_name(dev->rdev.lldi.pdev)); | 535 | pci_name(ctx->lldi.pdev)); |
524 | if (dev->registered) | 536 | if (ctx->dev) |
525 | c4iw_unregister_device(dev); | 537 | c4iw_remove(ctx); |
526 | break; | 538 | break; |
527 | case CXGB4_STATE_START_RECOVERY: | 539 | case CXGB4_STATE_START_RECOVERY: |
528 | printk(KERN_INFO MOD "%s: Fatal Error\n", | 540 | printk(KERN_INFO MOD "%s: Fatal Error\n", |
529 | pci_name(dev->rdev.lldi.pdev)); | 541 | pci_name(ctx->lldi.pdev)); |
530 | dev->rdev.flags |= T4_FATAL_ERROR; | 542 | if (ctx->dev) { |
531 | if (dev->registered) { | ||
532 | struct ib_event event; | 543 | struct ib_event event; |
533 | 544 | ||
545 | ctx->dev->rdev.flags |= T4_FATAL_ERROR; | ||
534 | memset(&event, 0, sizeof event); | 546 | memset(&event, 0, sizeof event); |
535 | event.event = IB_EVENT_DEVICE_FATAL; | 547 | event.event = IB_EVENT_DEVICE_FATAL; |
536 | event.device = &dev->ibdev; | 548 | event.device = &ctx->dev->ibdev; |
537 | ib_dispatch_event(&event); | 549 | ib_dispatch_event(&event); |
538 | c4iw_unregister_device(dev); | 550 | c4iw_remove(ctx); |
539 | } | 551 | } |
540 | break; | 552 | break; |
541 | case CXGB4_STATE_DETACH: | 553 | case CXGB4_STATE_DETACH: |
542 | printk(KERN_INFO MOD "%s: Detach\n", | 554 | printk(KERN_INFO MOD "%s: Detach\n", |
543 | pci_name(dev->rdev.lldi.pdev)); | 555 | pci_name(ctx->lldi.pdev)); |
544 | mutex_lock(&dev_mutex); | 556 | if (ctx->dev) |
545 | c4iw_remove(dev); | 557 | c4iw_remove(ctx); |
546 | mutex_unlock(&dev_mutex); | ||
547 | break; | 558 | break; |
548 | } | 559 | } |
549 | return 0; | 560 | return 0; |
@@ -576,11 +587,13 @@ static int __init c4iw_init_module(void) | |||
576 | 587 | ||
577 | static void __exit c4iw_exit_module(void) | 588 | static void __exit c4iw_exit_module(void) |
578 | { | 589 | { |
579 | struct c4iw_dev *dev, *tmp; | 590 | struct uld_ctx *ctx, *tmp; |
580 | 591 | ||
581 | mutex_lock(&dev_mutex); | 592 | mutex_lock(&dev_mutex); |
582 | list_for_each_entry_safe(dev, tmp, &dev_list, entry) { | 593 | list_for_each_entry_safe(ctx, tmp, &uld_ctx_list, entry) { |
583 | c4iw_remove(dev); | 594 | if (ctx->dev) |
595 | c4iw_remove(ctx); | ||
596 | kfree(ctx); | ||
584 | } | 597 | } |
585 | mutex_unlock(&dev_mutex); | 598 | mutex_unlock(&dev_mutex); |
586 | cxgb4_unregister_uld(CXGB4_ULD_RDMA); | 599 | cxgb4_unregister_uld(CXGB4_ULD_RDMA); |
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index 9f6166f59268..35d2a5dd9bb4 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h | |||
@@ -131,42 +131,58 @@ static inline int c4iw_num_stags(struct c4iw_rdev *rdev) | |||
131 | 131 | ||
132 | #define C4IW_WR_TO (10*HZ) | 132 | #define C4IW_WR_TO (10*HZ) |
133 | 133 | ||
134 | enum { | ||
135 | REPLY_READY = 0, | ||
136 | }; | ||
137 | |||
134 | struct c4iw_wr_wait { | 138 | struct c4iw_wr_wait { |
135 | wait_queue_head_t wait; | 139 | wait_queue_head_t wait; |
136 | int done; | 140 | unsigned long status; |
137 | int ret; | 141 | int ret; |
138 | }; | 142 | }; |
139 | 143 | ||
140 | static inline void c4iw_init_wr_wait(struct c4iw_wr_wait *wr_waitp) | 144 | static inline void c4iw_init_wr_wait(struct c4iw_wr_wait *wr_waitp) |
141 | { | 145 | { |
142 | wr_waitp->ret = 0; | 146 | wr_waitp->ret = 0; |
143 | wr_waitp->done = 0; | 147 | wr_waitp->status = 0; |
144 | init_waitqueue_head(&wr_waitp->wait); | 148 | init_waitqueue_head(&wr_waitp->wait); |
145 | } | 149 | } |
146 | 150 | ||
151 | static inline void c4iw_wake_up(struct c4iw_wr_wait *wr_waitp, int ret) | ||
152 | { | ||
153 | wr_waitp->ret = ret; | ||
154 | set_bit(REPLY_READY, &wr_waitp->status); | ||
155 | wake_up(&wr_waitp->wait); | ||
156 | } | ||
157 | |||
147 | static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev, | 158 | static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev, |
148 | struct c4iw_wr_wait *wr_waitp, | 159 | struct c4iw_wr_wait *wr_waitp, |
149 | u32 hwtid, u32 qpid, | 160 | u32 hwtid, u32 qpid, |
150 | const char *func) | 161 | const char *func) |
151 | { | 162 | { |
152 | unsigned to = C4IW_WR_TO; | 163 | unsigned to = C4IW_WR_TO; |
153 | do { | 164 | int ret; |
154 | 165 | ||
155 | wait_event_timeout(wr_waitp->wait, wr_waitp->done, to); | 166 | do { |
156 | if (!wr_waitp->done) { | 167 | ret = wait_event_timeout(wr_waitp->wait, |
168 | test_and_clear_bit(REPLY_READY, &wr_waitp->status), to); | ||
169 | if (!ret) { | ||
157 | printk(KERN_ERR MOD "%s - Device %s not responding - " | 170 | printk(KERN_ERR MOD "%s - Device %s not responding - " |
158 | "tid %u qpid %u\n", func, | 171 | "tid %u qpid %u\n", func, |
159 | pci_name(rdev->lldi.pdev), hwtid, qpid); | 172 | pci_name(rdev->lldi.pdev), hwtid, qpid); |
173 | if (c4iw_fatal_error(rdev)) { | ||
174 | wr_waitp->ret = -EIO; | ||
175 | break; | ||
176 | } | ||
160 | to = to << 2; | 177 | to = to << 2; |
161 | } | 178 | } |
162 | } while (!wr_waitp->done); | 179 | } while (!ret); |
163 | if (wr_waitp->ret) | 180 | if (wr_waitp->ret) |
164 | printk(KERN_WARNING MOD "%s: FW reply %d tid %u qpid %u\n", | 181 | PDBG("%s: FW reply %d tid %u qpid %u\n", |
165 | pci_name(rdev->lldi.pdev), wr_waitp->ret, hwtid, qpid); | 182 | pci_name(rdev->lldi.pdev), wr_waitp->ret, hwtid, qpid); |
166 | return wr_waitp->ret; | 183 | return wr_waitp->ret; |
167 | } | 184 | } |
168 | 185 | ||
169 | |||
170 | struct c4iw_dev { | 186 | struct c4iw_dev { |
171 | struct ib_device ibdev; | 187 | struct ib_device ibdev; |
172 | struct c4iw_rdev rdev; | 188 | struct c4iw_rdev rdev; |
@@ -175,9 +191,7 @@ struct c4iw_dev { | |||
175 | struct idr qpidr; | 191 | struct idr qpidr; |
176 | struct idr mmidr; | 192 | struct idr mmidr; |
177 | spinlock_t lock; | 193 | spinlock_t lock; |
178 | struct list_head entry; | ||
179 | struct dentry *debugfs_root; | 194 | struct dentry *debugfs_root; |
180 | u8 registered; | ||
181 | }; | 195 | }; |
182 | 196 | ||
183 | static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev) | 197 | static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev) |
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c index f66dd8bf5128..5b9e4220ca08 100644 --- a/drivers/infiniband/hw/cxgb4/provider.c +++ b/drivers/infiniband/hw/cxgb4/provider.c | |||
@@ -516,7 +516,6 @@ int c4iw_register_device(struct c4iw_dev *dev) | |||
516 | if (ret) | 516 | if (ret) |
517 | goto bail2; | 517 | goto bail2; |
518 | } | 518 | } |
519 | dev->registered = 1; | ||
520 | return 0; | 519 | return 0; |
521 | bail2: | 520 | bail2: |
522 | ib_unregister_device(&dev->ibdev); | 521 | ib_unregister_device(&dev->ibdev); |
@@ -535,6 +534,5 @@ void c4iw_unregister_device(struct c4iw_dev *dev) | |||
535 | c4iw_class_attributes[i]); | 534 | c4iw_class_attributes[i]); |
536 | ib_unregister_device(&dev->ibdev); | 535 | ib_unregister_device(&dev->ibdev); |
537 | kfree(dev->ibdev.iwcm); | 536 | kfree(dev->ibdev.iwcm); |
538 | dev->registered = 0; | ||
539 | return; | 537 | return; |
540 | } | 538 | } |
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index 70a5a3c646da..3b773b05a898 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c | |||
@@ -214,7 +214,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, | |||
214 | V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */ | 214 | V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */ |
215 | V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */ | 215 | V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */ |
216 | V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */ | 216 | V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */ |
217 | t4_sq_onchip(&wq->sq) ? F_FW_RI_RES_WR_ONCHIP : 0 | | 217 | (t4_sq_onchip(&wq->sq) ? F_FW_RI_RES_WR_ONCHIP : 0) | |
218 | V_FW_RI_RES_WR_IQID(scq->cqid)); | 218 | V_FW_RI_RES_WR_IQID(scq->cqid)); |
219 | res->u.sqrq.dcaen_to_eqsize = cpu_to_be32( | 219 | res->u.sqrq.dcaen_to_eqsize = cpu_to_be32( |
220 | V_FW_RI_RES_WR_DCAEN(0) | | 220 | V_FW_RI_RES_WR_DCAEN(0) | |
@@ -1210,7 +1210,6 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, | |||
1210 | if (ret) { | 1210 | if (ret) { |
1211 | if (internal) | 1211 | if (internal) |
1212 | c4iw_get_ep(&qhp->ep->com); | 1212 | c4iw_get_ep(&qhp->ep->com); |
1213 | disconnect = abort = 1; | ||
1214 | goto err; | 1213 | goto err; |
1215 | } | 1214 | } |
1216 | break; | 1215 | break; |
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c index 58c0e417bc30..be24ac726114 100644 --- a/drivers/infiniband/hw/ipath/ipath_driver.c +++ b/drivers/infiniband/hw/ipath/ipath_driver.c | |||
@@ -398,7 +398,6 @@ static int __devinit ipath_init_one(struct pci_dev *pdev, | |||
398 | struct ipath_devdata *dd; | 398 | struct ipath_devdata *dd; |
399 | unsigned long long addr; | 399 | unsigned long long addr; |
400 | u32 bar0 = 0, bar1 = 0; | 400 | u32 bar0 = 0, bar1 = 0; |
401 | u8 rev; | ||
402 | 401 | ||
403 | dd = ipath_alloc_devdata(pdev); | 402 | dd = ipath_alloc_devdata(pdev); |
404 | if (IS_ERR(dd)) { | 403 | if (IS_ERR(dd)) { |
@@ -540,13 +539,7 @@ static int __devinit ipath_init_one(struct pci_dev *pdev, | |||
540 | goto bail_regions; | 539 | goto bail_regions; |
541 | } | 540 | } |
542 | 541 | ||
543 | ret = pci_read_config_byte(pdev, PCI_REVISION_ID, &rev); | 542 | dd->ipath_pcirev = pdev->revision; |
544 | if (ret) { | ||
545 | ipath_dev_err(dd, "Failed to read PCI revision ID unit " | ||
546 | "%u: err %d\n", dd->ipath_unit, -ret); | ||
547 | goto bail_regions; /* shouldn't ever happen */ | ||
548 | } | ||
549 | dd->ipath_pcirev = rev; | ||
550 | 543 | ||
551 | #if defined(__powerpc__) | 544 | #if defined(__powerpc__) |
552 | /* There isn't a generic way to specify writethrough mappings */ | 545 | /* There isn't a generic way to specify writethrough mappings */ |
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c index 33c7eedaba6c..e74cdf9ef471 100644 --- a/drivers/infiniband/hw/nes/nes_cm.c +++ b/drivers/infiniband/hw/nes/nes_cm.c | |||
@@ -2563,7 +2563,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp) | |||
2563 | u16 last_ae; | 2563 | u16 last_ae; |
2564 | u8 original_hw_tcp_state; | 2564 | u8 original_hw_tcp_state; |
2565 | u8 original_ibqp_state; | 2565 | u8 original_ibqp_state; |
2566 | enum iw_cm_event_status disconn_status = IW_CM_EVENT_STATUS_OK; | 2566 | int disconn_status = 0; |
2567 | int issue_disconn = 0; | 2567 | int issue_disconn = 0; |
2568 | int issue_close = 0; | 2568 | int issue_close = 0; |
2569 | int issue_flush = 0; | 2569 | int issue_flush = 0; |
@@ -2605,7 +2605,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp) | |||
2605 | (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET))) { | 2605 | (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET))) { |
2606 | issue_disconn = 1; | 2606 | issue_disconn = 1; |
2607 | if (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET) | 2607 | if (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET) |
2608 | disconn_status = IW_CM_EVENT_STATUS_RESET; | 2608 | disconn_status = -ECONNRESET; |
2609 | } | 2609 | } |
2610 | 2610 | ||
2611 | if (((original_hw_tcp_state == NES_AEQE_TCP_STATE_CLOSED) || | 2611 | if (((original_hw_tcp_state == NES_AEQE_TCP_STATE_CLOSED) || |
@@ -2666,7 +2666,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp) | |||
2666 | cm_id->provider_data = nesqp; | 2666 | cm_id->provider_data = nesqp; |
2667 | /* Send up the close complete event */ | 2667 | /* Send up the close complete event */ |
2668 | cm_event.event = IW_CM_EVENT_CLOSE; | 2668 | cm_event.event = IW_CM_EVENT_CLOSE; |
2669 | cm_event.status = IW_CM_EVENT_STATUS_OK; | 2669 | cm_event.status = 0; |
2670 | cm_event.provider_data = cm_id->provider_data; | 2670 | cm_event.provider_data = cm_id->provider_data; |
2671 | cm_event.local_addr = cm_id->local_addr; | 2671 | cm_event.local_addr = cm_id->local_addr; |
2672 | cm_event.remote_addr = cm_id->remote_addr; | 2672 | cm_event.remote_addr = cm_id->remote_addr; |
@@ -2966,7 +2966,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
2966 | nes_add_ref(&nesqp->ibqp); | 2966 | nes_add_ref(&nesqp->ibqp); |
2967 | 2967 | ||
2968 | cm_event.event = IW_CM_EVENT_ESTABLISHED; | 2968 | cm_event.event = IW_CM_EVENT_ESTABLISHED; |
2969 | cm_event.status = IW_CM_EVENT_STATUS_ACCEPTED; | 2969 | cm_event.status = 0; |
2970 | cm_event.provider_data = (void *)nesqp; | 2970 | cm_event.provider_data = (void *)nesqp; |
2971 | cm_event.local_addr = cm_id->local_addr; | 2971 | cm_event.local_addr = cm_id->local_addr; |
2972 | cm_event.remote_addr = cm_id->remote_addr; | 2972 | cm_event.remote_addr = cm_id->remote_addr; |
@@ -3377,7 +3377,7 @@ static void cm_event_connected(struct nes_cm_event *event) | |||
3377 | 3377 | ||
3378 | /* notify OF layer we successfully created the requested connection */ | 3378 | /* notify OF layer we successfully created the requested connection */ |
3379 | cm_event.event = IW_CM_EVENT_CONNECT_REPLY; | 3379 | cm_event.event = IW_CM_EVENT_CONNECT_REPLY; |
3380 | cm_event.status = IW_CM_EVENT_STATUS_ACCEPTED; | 3380 | cm_event.status = 0; |
3381 | cm_event.provider_data = cm_id->provider_data; | 3381 | cm_event.provider_data = cm_id->provider_data; |
3382 | cm_event.local_addr.sin_family = AF_INET; | 3382 | cm_event.local_addr.sin_family = AF_INET; |
3383 | cm_event.local_addr.sin_port = cm_id->local_addr.sin_port; | 3383 | cm_event.local_addr.sin_port = cm_id->local_addr.sin_port; |
@@ -3484,7 +3484,7 @@ static void cm_event_reset(struct nes_cm_event *event) | |||
3484 | nesqp->cm_id = NULL; | 3484 | nesqp->cm_id = NULL; |
3485 | /* cm_id->provider_data = NULL; */ | 3485 | /* cm_id->provider_data = NULL; */ |
3486 | cm_event.event = IW_CM_EVENT_DISCONNECT; | 3486 | cm_event.event = IW_CM_EVENT_DISCONNECT; |
3487 | cm_event.status = IW_CM_EVENT_STATUS_RESET; | 3487 | cm_event.status = -ECONNRESET; |
3488 | cm_event.provider_data = cm_id->provider_data; | 3488 | cm_event.provider_data = cm_id->provider_data; |
3489 | cm_event.local_addr = cm_id->local_addr; | 3489 | cm_event.local_addr = cm_id->local_addr; |
3490 | cm_event.remote_addr = cm_id->remote_addr; | 3490 | cm_event.remote_addr = cm_id->remote_addr; |
@@ -3495,7 +3495,7 @@ static void cm_event_reset(struct nes_cm_event *event) | |||
3495 | ret = cm_id->event_handler(cm_id, &cm_event); | 3495 | ret = cm_id->event_handler(cm_id, &cm_event); |
3496 | atomic_inc(&cm_closes); | 3496 | atomic_inc(&cm_closes); |
3497 | cm_event.event = IW_CM_EVENT_CLOSE; | 3497 | cm_event.event = IW_CM_EVENT_CLOSE; |
3498 | cm_event.status = IW_CM_EVENT_STATUS_OK; | 3498 | cm_event.status = 0; |
3499 | cm_event.provider_data = cm_id->provider_data; | 3499 | cm_event.provider_data = cm_id->provider_data; |
3500 | cm_event.local_addr = cm_id->local_addr; | 3500 | cm_event.local_addr = cm_id->local_addr; |
3501 | cm_event.remote_addr = cm_id->remote_addr; | 3501 | cm_event.remote_addr = cm_id->remote_addr; |
@@ -3534,7 +3534,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event) | |||
3534 | cm_node, cm_id, jiffies); | 3534 | cm_node, cm_id, jiffies); |
3535 | 3535 | ||
3536 | cm_event.event = IW_CM_EVENT_CONNECT_REQUEST; | 3536 | cm_event.event = IW_CM_EVENT_CONNECT_REQUEST; |
3537 | cm_event.status = IW_CM_EVENT_STATUS_OK; | 3537 | cm_event.status = 0; |
3538 | cm_event.provider_data = (void *)cm_node; | 3538 | cm_event.provider_data = (void *)cm_node; |
3539 | 3539 | ||
3540 | cm_event.local_addr.sin_family = AF_INET; | 3540 | cm_event.local_addr.sin_family = AF_INET; |
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index 26d8018c0a7c..95ca93ceedac 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c | |||
@@ -1484,7 +1484,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp) | |||
1484 | (nesqp->ibqp_state == IB_QPS_RTR)) && (nesqp->cm_id)) { | 1484 | (nesqp->ibqp_state == IB_QPS_RTR)) && (nesqp->cm_id)) { |
1485 | cm_id = nesqp->cm_id; | 1485 | cm_id = nesqp->cm_id; |
1486 | cm_event.event = IW_CM_EVENT_CONNECT_REPLY; | 1486 | cm_event.event = IW_CM_EVENT_CONNECT_REPLY; |
1487 | cm_event.status = IW_CM_EVENT_STATUS_TIMEOUT; | 1487 | cm_event.status = -ETIMEDOUT; |
1488 | cm_event.local_addr = cm_id->local_addr; | 1488 | cm_event.local_addr = cm_id->local_addr; |
1489 | cm_event.remote_addr = cm_id->remote_addr; | 1489 | cm_event.remote_addr = cm_id->remote_addr; |
1490 | cm_event.private_data = NULL; | 1490 | cm_event.private_data = NULL; |
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c index 6bab3eaea70f..9f53e68a096a 100644 --- a/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/drivers/infiniband/hw/qib/qib_iba7322.c | |||
@@ -7534,7 +7534,8 @@ static int serdes_7322_init_new(struct qib_pportdata *ppd) | |||
7534 | ibsd_wr_allchans(ppd, 4, (1 << 10), BMASK(10, 10)); | 7534 | ibsd_wr_allchans(ppd, 4, (1 << 10), BMASK(10, 10)); |
7535 | tstart = get_jiffies_64(); | 7535 | tstart = get_jiffies_64(); |
7536 | while (chan_done && | 7536 | while (chan_done && |
7537 | !time_after64(tstart, tstart + msecs_to_jiffies(500))) { | 7537 | !time_after64(get_jiffies_64(), |
7538 | tstart + msecs_to_jiffies(500))) { | ||
7538 | msleep(20); | 7539 | msleep(20); |
7539 | for (chan = 0; chan < SERDES_CHANS; ++chan) { | 7540 | for (chan = 0; chan < SERDES_CHANS; ++chan) { |
7540 | rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), | 7541 | rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), |
diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c index 48b6674cbc49..891cc2ff5f00 100644 --- a/drivers/infiniband/hw/qib/qib_pcie.c +++ b/drivers/infiniband/hw/qib/qib_pcie.c | |||
@@ -526,11 +526,8 @@ static int qib_tune_pcie_coalesce(struct qib_devdata *dd) | |||
526 | */ | 526 | */ |
527 | devid = parent->device; | 527 | devid = parent->device; |
528 | if (devid >= 0x25e2 && devid <= 0x25fa) { | 528 | if (devid >= 0x25e2 && devid <= 0x25fa) { |
529 | u8 rev; | ||
530 | |||
531 | /* 5000 P/V/X/Z */ | 529 | /* 5000 P/V/X/Z */ |
532 | pci_read_config_byte(parent, PCI_REVISION_ID, &rev); | 530 | if (parent->revision <= 0xb2) |
533 | if (rev <= 0xb2) | ||
534 | bits = 1U << 10; | 531 | bits = 1U << 10; |
535 | else | 532 | else |
536 | bits = 7U << 10; | 533 | bits = 7U << 10; |
diff --git a/drivers/input/keyboard/atakbd.c b/drivers/input/keyboard/atakbd.c index 1839194ea987..10bcd4ae5402 100644 --- a/drivers/input/keyboard/atakbd.c +++ b/drivers/input/keyboard/atakbd.c | |||
@@ -223,8 +223,9 @@ static int __init atakbd_init(void) | |||
223 | return -ENODEV; | 223 | return -ENODEV; |
224 | 224 | ||
225 | // need to init core driver if not already done so | 225 | // need to init core driver if not already done so |
226 | if (atari_keyb_init()) | 226 | error = atari_keyb_init(); |
227 | return -ENODEV; | 227 | if (error) |
228 | return error; | ||
228 | 229 | ||
229 | atakbd_dev = input_allocate_device(); | 230 | atakbd_dev = input_allocate_device(); |
230 | if (!atakbd_dev) | 231 | if (!atakbd_dev) |
diff --git a/drivers/input/mouse/atarimouse.c b/drivers/input/mouse/atarimouse.c index adf45b3040e9..5c4a692bf73a 100644 --- a/drivers/input/mouse/atarimouse.c +++ b/drivers/input/mouse/atarimouse.c | |||
@@ -77,15 +77,15 @@ static void atamouse_interrupt(char *buf) | |||
77 | #endif | 77 | #endif |
78 | 78 | ||
79 | /* only relative events get here */ | 79 | /* only relative events get here */ |
80 | dx = buf[1]; | 80 | dx = buf[1]; |
81 | dy = -buf[2]; | 81 | dy = buf[2]; |
82 | 82 | ||
83 | input_report_rel(atamouse_dev, REL_X, dx); | 83 | input_report_rel(atamouse_dev, REL_X, dx); |
84 | input_report_rel(atamouse_dev, REL_Y, dy); | 84 | input_report_rel(atamouse_dev, REL_Y, dy); |
85 | 85 | ||
86 | input_report_key(atamouse_dev, BTN_LEFT, buttons & 0x1); | 86 | input_report_key(atamouse_dev, BTN_LEFT, buttons & 0x4); |
87 | input_report_key(atamouse_dev, BTN_MIDDLE, buttons & 0x2); | 87 | input_report_key(atamouse_dev, BTN_MIDDLE, buttons & 0x2); |
88 | input_report_key(atamouse_dev, BTN_RIGHT, buttons & 0x4); | 88 | input_report_key(atamouse_dev, BTN_RIGHT, buttons & 0x1); |
89 | 89 | ||
90 | input_sync(atamouse_dev); | 90 | input_sync(atamouse_dev); |
91 | 91 | ||
@@ -108,7 +108,7 @@ static int atamouse_open(struct input_dev *dev) | |||
108 | static void atamouse_close(struct input_dev *dev) | 108 | static void atamouse_close(struct input_dev *dev) |
109 | { | 109 | { |
110 | ikbd_mouse_disable(); | 110 | ikbd_mouse_disable(); |
111 | atari_mouse_interrupt_hook = NULL; | 111 | atari_input_mouse_interrupt_hook = NULL; |
112 | } | 112 | } |
113 | 113 | ||
114 | static int __init atamouse_init(void) | 114 | static int __init atamouse_init(void) |
@@ -118,8 +118,9 @@ static int __init atamouse_init(void) | |||
118 | if (!MACH_IS_ATARI || !ATARIHW_PRESENT(ST_MFP)) | 118 | if (!MACH_IS_ATARI || !ATARIHW_PRESENT(ST_MFP)) |
119 | return -ENODEV; | 119 | return -ENODEV; |
120 | 120 | ||
121 | if (!atari_keyb_init()) | 121 | error = atari_keyb_init(); |
122 | return -ENODEV; | 122 | if (error) |
123 | return error; | ||
123 | 124 | ||
124 | atamouse_dev = input_allocate_device(); | 125 | atamouse_dev = input_allocate_device(); |
125 | if (!atamouse_dev) | 126 | if (!atamouse_dev) |
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c index c24946f51256..1de1c19dad30 100644 --- a/drivers/input/touchscreen/ads7846.c +++ b/drivers/input/touchscreen/ads7846.c | |||
@@ -281,17 +281,24 @@ struct ser_req { | |||
281 | u8 command; | 281 | u8 command; |
282 | u8 ref_off; | 282 | u8 ref_off; |
283 | u16 scratch; | 283 | u16 scratch; |
284 | __be16 sample; | ||
285 | struct spi_message msg; | 284 | struct spi_message msg; |
286 | struct spi_transfer xfer[6]; | 285 | struct spi_transfer xfer[6]; |
286 | /* | ||
287 | * DMA (thus cache coherency maintenance) requires the | ||
288 | * transfer buffers to live in their own cache lines. | ||
289 | */ | ||
290 | __be16 sample ____cacheline_aligned; | ||
287 | }; | 291 | }; |
288 | 292 | ||
289 | struct ads7845_ser_req { | 293 | struct ads7845_ser_req { |
290 | u8 command[3]; | 294 | u8 command[3]; |
291 | u8 pwrdown[3]; | ||
292 | u8 sample[3]; | ||
293 | struct spi_message msg; | 295 | struct spi_message msg; |
294 | struct spi_transfer xfer[2]; | 296 | struct spi_transfer xfer[2]; |
297 | /* | ||
298 | * DMA (thus cache coherency maintenance) requires the | ||
299 | * transfer buffers to live in their own cache lines. | ||
300 | */ | ||
301 | u8 sample[3] ____cacheline_aligned; | ||
295 | }; | 302 | }; |
296 | 303 | ||
297 | static int ads7846_read12_ser(struct device *dev, unsigned command) | 304 | static int ads7846_read12_ser(struct device *dev, unsigned command) |
diff --git a/drivers/leds/leds-lm3530.c b/drivers/leds/leds-lm3530.c index e7089a1f6cb6..b37e6186d0fa 100644 --- a/drivers/leds/leds-lm3530.c +++ b/drivers/leds/leds-lm3530.c | |||
@@ -349,6 +349,7 @@ static const struct i2c_device_id lm3530_id[] = { | |||
349 | {LM3530_NAME, 0}, | 349 | {LM3530_NAME, 0}, |
350 | {} | 350 | {} |
351 | }; | 351 | }; |
352 | MODULE_DEVICE_TABLE(i2c, lm3530_id); | ||
352 | 353 | ||
353 | static struct i2c_driver lm3530_i2c_driver = { | 354 | static struct i2c_driver lm3530_i2c_driver = { |
354 | .probe = lm3530_probe, | 355 | .probe = lm3530_probe, |
diff --git a/drivers/lguest/Kconfig b/drivers/lguest/Kconfig index 0aaa0597a622..34ae49dc557c 100644 --- a/drivers/lguest/Kconfig +++ b/drivers/lguest/Kconfig | |||
@@ -5,8 +5,10 @@ config LGUEST | |||
5 | ---help--- | 5 | ---help--- |
6 | This is a very simple module which allows you to run | 6 | This is a very simple module which allows you to run |
7 | multiple instances of the same Linux kernel, using the | 7 | multiple instances of the same Linux kernel, using the |
8 | "lguest" command found in the Documentation/lguest directory. | 8 | "lguest" command found in the Documentation/virtual/lguest |
9 | directory. | ||
10 | |||
9 | Note that "lguest" is pronounced to rhyme with "fell quest", | 11 | Note that "lguest" is pronounced to rhyme with "fell quest", |
10 | not "rustyvisor". See Documentation/lguest/lguest.txt. | 12 | not "rustyvisor". See Documentation/virtual/lguest/lguest.txt. |
11 | 13 | ||
12 | If unsure, say N. If curious, say M. If masochistic, say Y. | 14 | If unsure, say N. If curious, say M. If masochistic, say Y. |
diff --git a/drivers/lguest/Makefile b/drivers/lguest/Makefile index 7d463c26124f..8ac947c7e7c7 100644 --- a/drivers/lguest/Makefile +++ b/drivers/lguest/Makefile | |||
@@ -18,7 +18,7 @@ Mastery: PREFIX=M | |||
18 | Beer: | 18 | Beer: |
19 | @for f in Preparation Guest Drivers Launcher Host Switcher Mastery; do echo "{==- $$f -==}"; make -s $$f; done; echo "{==-==}" | 19 | @for f in Preparation Guest Drivers Launcher Host Switcher Mastery; do echo "{==- $$f -==}"; make -s $$f; done; echo "{==-==}" |
20 | Preparation Preparation! Guest Drivers Launcher Host Switcher Mastery: | 20 | Preparation Preparation! Guest Drivers Launcher Host Switcher Mastery: |
21 | @sh ../../Documentation/lguest/extract $(PREFIX) `find ../../* -name '*.[chS]' -wholename '*lguest*'` | 21 | @sh ../../Documentation/virtual/lguest/extract $(PREFIX) `find ../../* -name '*.[chS]' -wholename '*lguest*'` |
22 | Puppy: | 22 | Puppy: |
23 | @clear | 23 | @clear |
24 | @printf " __ \n (___()'\`;\n /, /\`\n \\\\\\\"--\\\\\\ \n" | 24 | @printf " __ \n (___()'\`;\n /, /\`\n \\\\\\\"--\\\\\\ \n" |
diff --git a/drivers/media/video/cx88/cx88-input.c b/drivers/media/video/cx88/cx88-input.c index c820e2f53527..3f442003623d 100644 --- a/drivers/media/video/cx88/cx88-input.c +++ b/drivers/media/video/cx88/cx88-input.c | |||
@@ -524,7 +524,7 @@ void cx88_ir_irq(struct cx88_core *core) | |||
524 | for (todo = 32; todo > 0; todo -= bits) { | 524 | for (todo = 32; todo > 0; todo -= bits) { |
525 | ev.pulse = samples & 0x80000000 ? false : true; | 525 | ev.pulse = samples & 0x80000000 ? false : true; |
526 | bits = min(todo, 32U - fls(ev.pulse ? samples : ~samples)); | 526 | bits = min(todo, 32U - fls(ev.pulse ? samples : ~samples)); |
527 | ev.duration = (bits * NSEC_PER_SEC) / (1000 * ir_samplerate); | 527 | ev.duration = (bits * (NSEC_PER_SEC / 1000)) / ir_samplerate; |
528 | ir_raw_event_store_with_filter(ir->dev, &ev); | 528 | ir_raw_event_store_with_filter(ir->dev, &ev); |
529 | samples <<= bits; | 529 | samples <<= bits; |
530 | } | 530 | } |
diff --git a/drivers/media/video/soc_camera.c b/drivers/media/video/soc_camera.c index 3973f9a94753..ddb4c091dedc 100644 --- a/drivers/media/video/soc_camera.c +++ b/drivers/media/video/soc_camera.c | |||
@@ -136,11 +136,50 @@ unsigned long soc_camera_apply_sensor_flags(struct soc_camera_link *icl, | |||
136 | } | 136 | } |
137 | EXPORT_SYMBOL(soc_camera_apply_sensor_flags); | 137 | EXPORT_SYMBOL(soc_camera_apply_sensor_flags); |
138 | 138 | ||
139 | #define pixfmtstr(x) (x) & 0xff, ((x) >> 8) & 0xff, ((x) >> 16) & 0xff, \ | ||
140 | ((x) >> 24) & 0xff | ||
141 | |||
142 | static int soc_camera_try_fmt(struct soc_camera_device *icd, | ||
143 | struct v4l2_format *f) | ||
144 | { | ||
145 | struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); | ||
146 | struct v4l2_pix_format *pix = &f->fmt.pix; | ||
147 | int ret; | ||
148 | |||
149 | dev_dbg(&icd->dev, "TRY_FMT(%c%c%c%c, %ux%u)\n", | ||
150 | pixfmtstr(pix->pixelformat), pix->width, pix->height); | ||
151 | |||
152 | pix->bytesperline = 0; | ||
153 | pix->sizeimage = 0; | ||
154 | |||
155 | ret = ici->ops->try_fmt(icd, f); | ||
156 | if (ret < 0) | ||
157 | return ret; | ||
158 | |||
159 | if (!pix->sizeimage) { | ||
160 | if (!pix->bytesperline) { | ||
161 | const struct soc_camera_format_xlate *xlate; | ||
162 | |||
163 | xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat); | ||
164 | if (!xlate) | ||
165 | return -EINVAL; | ||
166 | |||
167 | ret = soc_mbus_bytes_per_line(pix->width, | ||
168 | xlate->host_fmt); | ||
169 | if (ret > 0) | ||
170 | pix->bytesperline = ret; | ||
171 | } | ||
172 | if (pix->bytesperline) | ||
173 | pix->sizeimage = pix->bytesperline * pix->height; | ||
174 | } | ||
175 | |||
176 | return 0; | ||
177 | } | ||
178 | |||
139 | static int soc_camera_try_fmt_vid_cap(struct file *file, void *priv, | 179 | static int soc_camera_try_fmt_vid_cap(struct file *file, void *priv, |
140 | struct v4l2_format *f) | 180 | struct v4l2_format *f) |
141 | { | 181 | { |
142 | struct soc_camera_device *icd = file->private_data; | 182 | struct soc_camera_device *icd = file->private_data; |
143 | struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); | ||
144 | 183 | ||
145 | WARN_ON(priv != file->private_data); | 184 | WARN_ON(priv != file->private_data); |
146 | 185 | ||
@@ -149,7 +188,7 @@ static int soc_camera_try_fmt_vid_cap(struct file *file, void *priv, | |||
149 | return -EINVAL; | 188 | return -EINVAL; |
150 | 189 | ||
151 | /* limit format to hardware capabilities */ | 190 | /* limit format to hardware capabilities */ |
152 | return ici->ops->try_fmt(icd, f); | 191 | return soc_camera_try_fmt(icd, f); |
153 | } | 192 | } |
154 | 193 | ||
155 | static int soc_camera_enum_input(struct file *file, void *priv, | 194 | static int soc_camera_enum_input(struct file *file, void *priv, |
@@ -362,9 +401,6 @@ static void soc_camera_free_user_formats(struct soc_camera_device *icd) | |||
362 | icd->user_formats = NULL; | 401 | icd->user_formats = NULL; |
363 | } | 402 | } |
364 | 403 | ||
365 | #define pixfmtstr(x) (x) & 0xff, ((x) >> 8) & 0xff, ((x) >> 16) & 0xff, \ | ||
366 | ((x) >> 24) & 0xff | ||
367 | |||
368 | /* Called with .vb_lock held, or from the first open(2), see comment there */ | 404 | /* Called with .vb_lock held, or from the first open(2), see comment there */ |
369 | static int soc_camera_set_fmt(struct soc_camera_device *icd, | 405 | static int soc_camera_set_fmt(struct soc_camera_device *icd, |
370 | struct v4l2_format *f) | 406 | struct v4l2_format *f) |
@@ -377,7 +413,7 @@ static int soc_camera_set_fmt(struct soc_camera_device *icd, | |||
377 | pixfmtstr(pix->pixelformat), pix->width, pix->height); | 413 | pixfmtstr(pix->pixelformat), pix->width, pix->height); |
378 | 414 | ||
379 | /* We always call try_fmt() before set_fmt() or set_crop() */ | 415 | /* We always call try_fmt() before set_fmt() or set_crop() */ |
380 | ret = ici->ops->try_fmt(icd, f); | 416 | ret = soc_camera_try_fmt(icd, f); |
381 | if (ret < 0) | 417 | if (ret < 0) |
382 | return ret; | 418 | return ret; |
383 | 419 | ||
diff --git a/drivers/media/video/v4l2-device.c b/drivers/media/video/v4l2-device.c index 5aeaf876ba9b..4aae501f02d0 100644 --- a/drivers/media/video/v4l2-device.c +++ b/drivers/media/video/v4l2-device.c | |||
@@ -155,8 +155,10 @@ int v4l2_device_register_subdev(struct v4l2_device *v4l2_dev, | |||
155 | sd->v4l2_dev = v4l2_dev; | 155 | sd->v4l2_dev = v4l2_dev; |
156 | if (sd->internal_ops && sd->internal_ops->registered) { | 156 | if (sd->internal_ops && sd->internal_ops->registered) { |
157 | err = sd->internal_ops->registered(sd); | 157 | err = sd->internal_ops->registered(sd); |
158 | if (err) | 158 | if (err) { |
159 | module_put(sd->owner); | ||
159 | return err; | 160 | return err; |
161 | } | ||
160 | } | 162 | } |
161 | 163 | ||
162 | /* This just returns 0 if either of the two args is NULL */ | 164 | /* This just returns 0 if either of the two args is NULL */ |
@@ -164,6 +166,7 @@ int v4l2_device_register_subdev(struct v4l2_device *v4l2_dev, | |||
164 | if (err) { | 166 | if (err) { |
165 | if (sd->internal_ops && sd->internal_ops->unregistered) | 167 | if (sd->internal_ops && sd->internal_ops->unregistered) |
166 | sd->internal_ops->unregistered(sd); | 168 | sd->internal_ops->unregistered(sd); |
169 | module_put(sd->owner); | ||
167 | return err; | 170 | return err; |
168 | } | 171 | } |
169 | 172 | ||
diff --git a/drivers/media/video/v4l2-subdev.c b/drivers/media/video/v4l2-subdev.c index 0b8064490676..812729ebf09e 100644 --- a/drivers/media/video/v4l2-subdev.c +++ b/drivers/media/video/v4l2-subdev.c | |||
@@ -155,25 +155,25 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg) | |||
155 | 155 | ||
156 | switch (cmd) { | 156 | switch (cmd) { |
157 | case VIDIOC_QUERYCTRL: | 157 | case VIDIOC_QUERYCTRL: |
158 | return v4l2_subdev_queryctrl(sd, arg); | 158 | return v4l2_queryctrl(sd->ctrl_handler, arg); |
159 | 159 | ||
160 | case VIDIOC_QUERYMENU: | 160 | case VIDIOC_QUERYMENU: |
161 | return v4l2_subdev_querymenu(sd, arg); | 161 | return v4l2_querymenu(sd->ctrl_handler, arg); |
162 | 162 | ||
163 | case VIDIOC_G_CTRL: | 163 | case VIDIOC_G_CTRL: |
164 | return v4l2_subdev_g_ctrl(sd, arg); | 164 | return v4l2_g_ctrl(sd->ctrl_handler, arg); |
165 | 165 | ||
166 | case VIDIOC_S_CTRL: | 166 | case VIDIOC_S_CTRL: |
167 | return v4l2_subdev_s_ctrl(sd, arg); | 167 | return v4l2_s_ctrl(sd->ctrl_handler, arg); |
168 | 168 | ||
169 | case VIDIOC_G_EXT_CTRLS: | 169 | case VIDIOC_G_EXT_CTRLS: |
170 | return v4l2_subdev_g_ext_ctrls(sd, arg); | 170 | return v4l2_g_ext_ctrls(sd->ctrl_handler, arg); |
171 | 171 | ||
172 | case VIDIOC_S_EXT_CTRLS: | 172 | case VIDIOC_S_EXT_CTRLS: |
173 | return v4l2_subdev_s_ext_ctrls(sd, arg); | 173 | return v4l2_s_ext_ctrls(sd->ctrl_handler, arg); |
174 | 174 | ||
175 | case VIDIOC_TRY_EXT_CTRLS: | 175 | case VIDIOC_TRY_EXT_CTRLS: |
176 | return v4l2_subdev_try_ext_ctrls(sd, arg); | 176 | return v4l2_try_ext_ctrls(sd->ctrl_handler, arg); |
177 | 177 | ||
178 | case VIDIOC_DQEVENT: | 178 | case VIDIOC_DQEVENT: |
179 | if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS)) | 179 | if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS)) |
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c index 643ad52e3ca2..4796bbf0ae4e 100644 --- a/drivers/message/i2o/i2o_block.c +++ b/drivers/message/i2o/i2o_block.c | |||
@@ -1000,7 +1000,6 @@ static struct i2o_block_device *i2o_block_device_alloc(void) | |||
1000 | gd->major = I2O_MAJOR; | 1000 | gd->major = I2O_MAJOR; |
1001 | gd->queue = queue; | 1001 | gd->queue = queue; |
1002 | gd->fops = &i2o_block_fops; | 1002 | gd->fops = &i2o_block_fops; |
1003 | gd->events = DISK_EVENT_MEDIA_CHANGE; | ||
1004 | gd->private_data = dev; | 1003 | gd->private_data = dev; |
1005 | 1004 | ||
1006 | dev->gd = gd; | 1005 | dev->gd = gd; |
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c index d4a851c6b5bf..0b4d5b23bec9 100644 --- a/drivers/mfd/asic3.c +++ b/drivers/mfd/asic3.c | |||
@@ -144,7 +144,7 @@ static void asic3_irq_demux(unsigned int irq, struct irq_desc *desc) | |||
144 | int iter, i; | 144 | int iter, i; |
145 | unsigned long flags; | 145 | unsigned long flags; |
146 | 146 | ||
147 | data->chip->irq_ack(irq_data); | 147 | data->chip->irq_ack(data); |
148 | 148 | ||
149 | for (iter = 0 ; iter < MAX_ASIC_ISR_LOOPS; iter++) { | 149 | for (iter = 0 ; iter < MAX_ASIC_ISR_LOOPS; iter++) { |
150 | u32 status; | 150 | u32 status; |
diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c index 2e165117457b..3ab9ffa00aad 100644 --- a/drivers/mfd/omap-usb-host.c +++ b/drivers/mfd/omap-usb-host.c | |||
@@ -717,14 +717,14 @@ static int usbhs_enable(struct device *dev) | |||
717 | gpio_request(pdata->ehci_data->reset_gpio_port[0], | 717 | gpio_request(pdata->ehci_data->reset_gpio_port[0], |
718 | "USB1 PHY reset"); | 718 | "USB1 PHY reset"); |
719 | gpio_direction_output | 719 | gpio_direction_output |
720 | (pdata->ehci_data->reset_gpio_port[0], 1); | 720 | (pdata->ehci_data->reset_gpio_port[0], 0); |
721 | } | 721 | } |
722 | 722 | ||
723 | if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1])) { | 723 | if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1])) { |
724 | gpio_request(pdata->ehci_data->reset_gpio_port[1], | 724 | gpio_request(pdata->ehci_data->reset_gpio_port[1], |
725 | "USB2 PHY reset"); | 725 | "USB2 PHY reset"); |
726 | gpio_direction_output | 726 | gpio_direction_output |
727 | (pdata->ehci_data->reset_gpio_port[1], 1); | 727 | (pdata->ehci_data->reset_gpio_port[1], 0); |
728 | } | 728 | } |
729 | 729 | ||
730 | /* Hold the PHY in RESET for enough time till DIR is high */ | 730 | /* Hold the PHY in RESET for enough time till DIR is high */ |
@@ -904,11 +904,11 @@ static int usbhs_enable(struct device *dev) | |||
904 | 904 | ||
905 | if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0])) | 905 | if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0])) |
906 | gpio_set_value | 906 | gpio_set_value |
907 | (pdata->ehci_data->reset_gpio_port[0], 0); | 907 | (pdata->ehci_data->reset_gpio_port[0], 1); |
908 | 908 | ||
909 | if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1])) | 909 | if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1])) |
910 | gpio_set_value | 910 | gpio_set_value |
911 | (pdata->ehci_data->reset_gpio_port[1], 0); | 911 | (pdata->ehci_data->reset_gpio_port[1], 1); |
912 | } | 912 | } |
913 | 913 | ||
914 | end_count: | 914 | end_count: |
diff --git a/drivers/mfd/twl4030-power.c b/drivers/mfd/twl4030-power.c index 16422de0823a..2c0d4d16491a 100644 --- a/drivers/mfd/twl4030-power.c +++ b/drivers/mfd/twl4030-power.c | |||
@@ -447,12 +447,13 @@ static int __init load_twl4030_script(struct twl4030_script *tscript, | |||
447 | if (err) | 447 | if (err) |
448 | goto out; | 448 | goto out; |
449 | } | 449 | } |
450 | if (tscript->flags & TWL4030_SLEEP_SCRIPT) | 450 | if (tscript->flags & TWL4030_SLEEP_SCRIPT) { |
451 | if (order) | 451 | if (order) |
452 | pr_warning("TWL4030: Bad order of scripts (sleep "\ | 452 | pr_warning("TWL4030: Bad order of scripts (sleep "\ |
453 | "script before wakeup) Leads to boot"\ | 453 | "script before wakeup) Leads to boot"\ |
454 | "failure on some boards\n"); | 454 | "failure on some boards\n"); |
455 | err = twl4030_config_sleep_sequence(address); | 455 | err = twl4030_config_sleep_sequence(address); |
456 | } | ||
456 | out: | 457 | out: |
457 | return err; | 458 | return err; |
458 | } | 459 | } |
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c index 2b200c1cfbba..461e6a17fb90 100644 --- a/drivers/mmc/core/host.c +++ b/drivers/mmc/core/host.c | |||
@@ -94,7 +94,7 @@ static void mmc_host_clk_gate_delayed(struct mmc_host *host) | |||
94 | spin_unlock_irqrestore(&host->clk_lock, flags); | 94 | spin_unlock_irqrestore(&host->clk_lock, flags); |
95 | return; | 95 | return; |
96 | } | 96 | } |
97 | mmc_claim_host(host); | 97 | mutex_lock(&host->clk_gate_mutex); |
98 | spin_lock_irqsave(&host->clk_lock, flags); | 98 | spin_lock_irqsave(&host->clk_lock, flags); |
99 | if (!host->clk_requests) { | 99 | if (!host->clk_requests) { |
100 | spin_unlock_irqrestore(&host->clk_lock, flags); | 100 | spin_unlock_irqrestore(&host->clk_lock, flags); |
@@ -104,7 +104,7 @@ static void mmc_host_clk_gate_delayed(struct mmc_host *host) | |||
104 | pr_debug("%s: gated MCI clock\n", mmc_hostname(host)); | 104 | pr_debug("%s: gated MCI clock\n", mmc_hostname(host)); |
105 | } | 105 | } |
106 | spin_unlock_irqrestore(&host->clk_lock, flags); | 106 | spin_unlock_irqrestore(&host->clk_lock, flags); |
107 | mmc_release_host(host); | 107 | mutex_unlock(&host->clk_gate_mutex); |
108 | } | 108 | } |
109 | 109 | ||
110 | /* | 110 | /* |
@@ -130,7 +130,7 @@ void mmc_host_clk_ungate(struct mmc_host *host) | |||
130 | { | 130 | { |
131 | unsigned long flags; | 131 | unsigned long flags; |
132 | 132 | ||
133 | mmc_claim_host(host); | 133 | mutex_lock(&host->clk_gate_mutex); |
134 | spin_lock_irqsave(&host->clk_lock, flags); | 134 | spin_lock_irqsave(&host->clk_lock, flags); |
135 | if (host->clk_gated) { | 135 | if (host->clk_gated) { |
136 | spin_unlock_irqrestore(&host->clk_lock, flags); | 136 | spin_unlock_irqrestore(&host->clk_lock, flags); |
@@ -140,7 +140,7 @@ void mmc_host_clk_ungate(struct mmc_host *host) | |||
140 | } | 140 | } |
141 | host->clk_requests++; | 141 | host->clk_requests++; |
142 | spin_unlock_irqrestore(&host->clk_lock, flags); | 142 | spin_unlock_irqrestore(&host->clk_lock, flags); |
143 | mmc_release_host(host); | 143 | mutex_unlock(&host->clk_gate_mutex); |
144 | } | 144 | } |
145 | 145 | ||
146 | /** | 146 | /** |
@@ -215,6 +215,7 @@ static inline void mmc_host_clk_init(struct mmc_host *host) | |||
215 | host->clk_gated = false; | 215 | host->clk_gated = false; |
216 | INIT_WORK(&host->clk_gate_work, mmc_host_clk_gate_work); | 216 | INIT_WORK(&host->clk_gate_work, mmc_host_clk_gate_work); |
217 | spin_lock_init(&host->clk_lock); | 217 | spin_lock_init(&host->clk_lock); |
218 | mutex_init(&host->clk_gate_mutex); | ||
218 | } | 219 | } |
219 | 220 | ||
220 | /** | 221 | /** |
diff --git a/drivers/mmc/host/sdhci-of-core.c b/drivers/mmc/host/sdhci-of-core.c index f9b611fc773e..60e4186a4345 100644 --- a/drivers/mmc/host/sdhci-of-core.c +++ b/drivers/mmc/host/sdhci-of-core.c | |||
@@ -124,8 +124,10 @@ static bool __devinit sdhci_of_wp_inverted(struct device_node *np) | |||
124 | #endif | 124 | #endif |
125 | } | 125 | } |
126 | 126 | ||
127 | static const struct of_device_id sdhci_of_match[]; | ||
127 | static int __devinit sdhci_of_probe(struct platform_device *ofdev) | 128 | static int __devinit sdhci_of_probe(struct platform_device *ofdev) |
128 | { | 129 | { |
130 | const struct of_device_id *match; | ||
129 | struct device_node *np = ofdev->dev.of_node; | 131 | struct device_node *np = ofdev->dev.of_node; |
130 | struct sdhci_of_data *sdhci_of_data; | 132 | struct sdhci_of_data *sdhci_of_data; |
131 | struct sdhci_host *host; | 133 | struct sdhci_host *host; |
@@ -134,9 +136,10 @@ static int __devinit sdhci_of_probe(struct platform_device *ofdev) | |||
134 | int size; | 136 | int size; |
135 | int ret; | 137 | int ret; |
136 | 138 | ||
137 | if (!ofdev->dev.of_match) | 139 | match = of_match_device(sdhci_of_match, &ofdev->dev); |
140 | if (!match) | ||
138 | return -EINVAL; | 141 | return -EINVAL; |
139 | sdhci_of_data = ofdev->dev.of_match->data; | 142 | sdhci_of_data = match->data; |
140 | 143 | ||
141 | if (!of_device_is_available(np)) | 144 | if (!of_device_is_available(np)) |
142 | return -ENODEV; | 145 | return -ENODEV; |
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig index 44b1f46458ca..5069111c81cc 100644 --- a/drivers/mtd/maps/Kconfig +++ b/drivers/mtd/maps/Kconfig | |||
@@ -260,6 +260,13 @@ config MTD_BCM963XX | |||
260 | Support for parsing CFE image tag and creating MTD partitions on | 260 | Support for parsing CFE image tag and creating MTD partitions on |
261 | Broadcom BCM63xx boards. | 261 | Broadcom BCM63xx boards. |
262 | 262 | ||
263 | config MTD_LANTIQ | ||
264 | tristate "Lantiq SoC NOR support" | ||
265 | depends on LANTIQ | ||
266 | select MTD_PARTITIONS | ||
267 | help | ||
268 | Support for NOR flash attached to the Lantiq SoC's External Bus Unit. | ||
269 | |||
263 | config MTD_DILNETPC | 270 | config MTD_DILNETPC |
264 | tristate "CFI Flash device mapped on DIL/Net PC" | 271 | tristate "CFI Flash device mapped on DIL/Net PC" |
265 | depends on X86 && MTD_PARTITIONS && MTD_CFI_INTELEXT && BROKEN | 272 | depends on X86 && MTD_PARTITIONS && MTD_CFI_INTELEXT && BROKEN |
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile index 08533bd5cba7..6adf4c9b9057 100644 --- a/drivers/mtd/maps/Makefile +++ b/drivers/mtd/maps/Makefile | |||
@@ -60,3 +60,4 @@ obj-$(CONFIG_MTD_VMU) += vmu-flash.o | |||
60 | obj-$(CONFIG_MTD_GPIO_ADDR) += gpio-addr-flash.o | 60 | obj-$(CONFIG_MTD_GPIO_ADDR) += gpio-addr-flash.o |
61 | obj-$(CONFIG_MTD_BCM963XX) += bcm963xx-flash.o | 61 | obj-$(CONFIG_MTD_BCM963XX) += bcm963xx-flash.o |
62 | obj-$(CONFIG_MTD_LATCH_ADDR) += latch-addr-flash.o | 62 | obj-$(CONFIG_MTD_LATCH_ADDR) += latch-addr-flash.o |
63 | obj-$(CONFIG_MTD_LANTIQ) += lantiq-flash.o | ||
diff --git a/drivers/mtd/maps/lantiq-flash.c b/drivers/mtd/maps/lantiq-flash.c new file mode 100644 index 000000000000..a90cabd7b84d --- /dev/null +++ b/drivers/mtd/maps/lantiq-flash.c | |||
@@ -0,0 +1,251 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * Copyright (C) 2004 Liu Peng Infineon IFAP DC COM CPE | ||
7 | * Copyright (C) 2010 John Crispin <blogic@openwrt.org> | ||
8 | */ | ||
9 | |||
10 | #include <linux/module.h> | ||
11 | #include <linux/types.h> | ||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/io.h> | ||
14 | #include <linux/slab.h> | ||
15 | #include <linux/init.h> | ||
16 | #include <linux/mtd/mtd.h> | ||
17 | #include <linux/mtd/map.h> | ||
18 | #include <linux/mtd/partitions.h> | ||
19 | #include <linux/mtd/cfi.h> | ||
20 | #include <linux/platform_device.h> | ||
21 | #include <linux/mtd/physmap.h> | ||
22 | |||
23 | #include <lantiq_soc.h> | ||
24 | #include <lantiq_platform.h> | ||
25 | |||
26 | /* | ||
27 | * The NOR flash is connected to the same external bus unit (EBU) as PCI. | ||
28 | * To make PCI work we need to enable the endianness swapping for the address | ||
29 | * written to the EBU. This endianness swapping works for PCI correctly but | ||
30 | * fails for attached NOR devices. To workaround this we need to use a complex | ||
31 | * map. The workaround involves swapping all addresses whilst probing the chip. | ||
32 | * Once probing is complete we stop swapping the addresses but swizzle the | ||
33 | * unlock addresses to ensure that access to the NOR device works correctly. | ||
34 | */ | ||
35 | |||
36 | enum { | ||
37 | LTQ_NOR_PROBING, | ||
38 | LTQ_NOR_NORMAL | ||
39 | }; | ||
40 | |||
41 | struct ltq_mtd { | ||
42 | struct resource *res; | ||
43 | struct mtd_info *mtd; | ||
44 | struct map_info *map; | ||
45 | }; | ||
46 | |||
47 | static char ltq_map_name[] = "ltq_nor"; | ||
48 | |||
49 | static map_word | ||
50 | ltq_read16(struct map_info *map, unsigned long adr) | ||
51 | { | ||
52 | unsigned long flags; | ||
53 | map_word temp; | ||
54 | |||
55 | if (map->map_priv_1 == LTQ_NOR_PROBING) | ||
56 | adr ^= 2; | ||
57 | spin_lock_irqsave(&ebu_lock, flags); | ||
58 | temp.x[0] = *(u16 *)(map->virt + adr); | ||
59 | spin_unlock_irqrestore(&ebu_lock, flags); | ||
60 | return temp; | ||
61 | } | ||
62 | |||
63 | static void | ||
64 | ltq_write16(struct map_info *map, map_word d, unsigned long adr) | ||
65 | { | ||
66 | unsigned long flags; | ||
67 | |||
68 | if (map->map_priv_1 == LTQ_NOR_PROBING) | ||
69 | adr ^= 2; | ||
70 | spin_lock_irqsave(&ebu_lock, flags); | ||
71 | *(u16 *)(map->virt + adr) = d.x[0]; | ||
72 | spin_unlock_irqrestore(&ebu_lock, flags); | ||
73 | } | ||
74 | |||
75 | /* | ||
76 | * The following 2 functions copy data between iomem and a cached memory | ||
77 | * section. As memcpy() makes use of pre-fetching we cannot use it here. | ||
78 | * The normal alternative of using memcpy_{to,from}io also makes use of | ||
79 | * memcpy() on MIPS so it is not applicable either. We are therefore stuck | ||
80 | * with having to use our own loop. | ||
81 | */ | ||
82 | static void | ||
83 | ltq_copy_from(struct map_info *map, void *to, | ||
84 | unsigned long from, ssize_t len) | ||
85 | { | ||
86 | unsigned char *f = (unsigned char *)map->virt + from; | ||
87 | unsigned char *t = (unsigned char *)to; | ||
88 | unsigned long flags; | ||
89 | |||
90 | spin_lock_irqsave(&ebu_lock, flags); | ||
91 | while (len--) | ||
92 | *t++ = *f++; | ||
93 | spin_unlock_irqrestore(&ebu_lock, flags); | ||
94 | } | ||
95 | |||
96 | static void | ||
97 | ltq_copy_to(struct map_info *map, unsigned long to, | ||
98 | const void *from, ssize_t len) | ||
99 | { | ||
100 | unsigned char *f = (unsigned char *)from; | ||
101 | unsigned char *t = (unsigned char *)map->virt + to; | ||
102 | unsigned long flags; | ||
103 | |||
104 | spin_lock_irqsave(&ebu_lock, flags); | ||
105 | while (len--) | ||
106 | *t++ = *f++; | ||
107 | spin_unlock_irqrestore(&ebu_lock, flags); | ||
108 | } | ||
109 | |||
110 | static const char const *part_probe_types[] = { "cmdlinepart", NULL }; | ||
111 | |||
112 | static int __init | ||
113 | ltq_mtd_probe(struct platform_device *pdev) | ||
114 | { | ||
115 | struct physmap_flash_data *ltq_mtd_data = dev_get_platdata(&pdev->dev); | ||
116 | struct ltq_mtd *ltq_mtd; | ||
117 | struct mtd_partition *parts; | ||
118 | struct resource *res; | ||
119 | int nr_parts = 0; | ||
120 | struct cfi_private *cfi; | ||
121 | int err; | ||
122 | |||
123 | ltq_mtd = kzalloc(sizeof(struct ltq_mtd), GFP_KERNEL); | ||
124 | platform_set_drvdata(pdev, ltq_mtd); | ||
125 | |||
126 | ltq_mtd->res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
127 | if (!ltq_mtd->res) { | ||
128 | dev_err(&pdev->dev, "failed to get memory resource"); | ||
129 | err = -ENOENT; | ||
130 | goto err_out; | ||
131 | } | ||
132 | |||
133 | res = devm_request_mem_region(&pdev->dev, ltq_mtd->res->start, | ||
134 | resource_size(ltq_mtd->res), dev_name(&pdev->dev)); | ||
135 | if (!ltq_mtd->res) { | ||
136 | dev_err(&pdev->dev, "failed to request mem resource"); | ||
137 | err = -EBUSY; | ||
138 | goto err_out; | ||
139 | } | ||
140 | |||
141 | ltq_mtd->map = kzalloc(sizeof(struct map_info), GFP_KERNEL); | ||
142 | ltq_mtd->map->phys = res->start; | ||
143 | ltq_mtd->map->size = resource_size(res); | ||
144 | ltq_mtd->map->virt = devm_ioremap_nocache(&pdev->dev, | ||
145 | ltq_mtd->map->phys, ltq_mtd->map->size); | ||
146 | if (!ltq_mtd->map->virt) { | ||
147 | dev_err(&pdev->dev, "failed to ioremap!\n"); | ||
148 | err = -ENOMEM; | ||
149 | goto err_free; | ||
150 | } | ||
151 | |||
152 | ltq_mtd->map->name = ltq_map_name; | ||
153 | ltq_mtd->map->bankwidth = 2; | ||
154 | ltq_mtd->map->read = ltq_read16; | ||
155 | ltq_mtd->map->write = ltq_write16; | ||
156 | ltq_mtd->map->copy_from = ltq_copy_from; | ||
157 | ltq_mtd->map->copy_to = ltq_copy_to; | ||
158 | |||
159 | ltq_mtd->map->map_priv_1 = LTQ_NOR_PROBING; | ||
160 | ltq_mtd->mtd = do_map_probe("cfi_probe", ltq_mtd->map); | ||
161 | ltq_mtd->map->map_priv_1 = LTQ_NOR_NORMAL; | ||
162 | |||
163 | if (!ltq_mtd->mtd) { | ||
164 | dev_err(&pdev->dev, "probing failed\n"); | ||
165 | err = -ENXIO; | ||
166 | goto err_unmap; | ||
167 | } | ||
168 | |||
169 | ltq_mtd->mtd->owner = THIS_MODULE; | ||
170 | |||
171 | cfi = ltq_mtd->map->fldrv_priv; | ||
172 | cfi->addr_unlock1 ^= 1; | ||
173 | cfi->addr_unlock2 ^= 1; | ||
174 | |||
175 | nr_parts = parse_mtd_partitions(ltq_mtd->mtd, | ||
176 | part_probe_types, &parts, 0); | ||
177 | if (nr_parts > 0) { | ||
178 | dev_info(&pdev->dev, | ||
179 | "using %d partitions from cmdline", nr_parts); | ||
180 | } else { | ||
181 | nr_parts = ltq_mtd_data->nr_parts; | ||
182 | parts = ltq_mtd_data->parts; | ||
183 | } | ||
184 | |||
185 | err = add_mtd_partitions(ltq_mtd->mtd, parts, nr_parts); | ||
186 | if (err) { | ||
187 | dev_err(&pdev->dev, "failed to add partitions\n"); | ||
188 | goto err_destroy; | ||
189 | } | ||
190 | |||
191 | return 0; | ||
192 | |||
193 | err_destroy: | ||
194 | map_destroy(ltq_mtd->mtd); | ||
195 | err_unmap: | ||
196 | iounmap(ltq_mtd->map->virt); | ||
197 | err_free: | ||
198 | kfree(ltq_mtd->map); | ||
199 | err_out: | ||
200 | kfree(ltq_mtd); | ||
201 | return err; | ||
202 | } | ||
203 | |||
204 | static int __devexit | ||
205 | ltq_mtd_remove(struct platform_device *pdev) | ||
206 | { | ||
207 | struct ltq_mtd *ltq_mtd = platform_get_drvdata(pdev); | ||
208 | |||
209 | if (ltq_mtd) { | ||
210 | if (ltq_mtd->mtd) { | ||
211 | del_mtd_partitions(ltq_mtd->mtd); | ||
212 | map_destroy(ltq_mtd->mtd); | ||
213 | } | ||
214 | if (ltq_mtd->map->virt) | ||
215 | iounmap(ltq_mtd->map->virt); | ||
216 | kfree(ltq_mtd->map); | ||
217 | kfree(ltq_mtd); | ||
218 | } | ||
219 | return 0; | ||
220 | } | ||
221 | |||
222 | static struct platform_driver ltq_mtd_driver = { | ||
223 | .remove = __devexit_p(ltq_mtd_remove), | ||
224 | .driver = { | ||
225 | .name = "ltq_nor", | ||
226 | .owner = THIS_MODULE, | ||
227 | }, | ||
228 | }; | ||
229 | |||
230 | static int __init | ||
231 | init_ltq_mtd(void) | ||
232 | { | ||
233 | int ret = platform_driver_probe(<q_mtd_driver, ltq_mtd_probe); | ||
234 | |||
235 | if (ret) | ||
236 | pr_err("ltq_nor: error registering platform driver"); | ||
237 | return ret; | ||
238 | } | ||
239 | |||
240 | static void __exit | ||
241 | exit_ltq_mtd(void) | ||
242 | { | ||
243 | platform_driver_unregister(<q_mtd_driver); | ||
244 | } | ||
245 | |||
246 | module_init(init_ltq_mtd); | ||
247 | module_exit(exit_ltq_mtd); | ||
248 | |||
249 | MODULE_LICENSE("GPL"); | ||
250 | MODULE_AUTHOR("John Crispin <blogic@openwrt.org>"); | ||
251 | MODULE_DESCRIPTION("Lantiq SoC NOR"); | ||
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c index bd483f0c57e1..c1d33464aee8 100644 --- a/drivers/mtd/maps/physmap_of.c +++ b/drivers/mtd/maps/physmap_of.c | |||
@@ -214,11 +214,13 @@ static void __devinit of_free_probes(const char **probes) | |||
214 | } | 214 | } |
215 | #endif | 215 | #endif |
216 | 216 | ||
217 | static struct of_device_id of_flash_match[]; | ||
217 | static int __devinit of_flash_probe(struct platform_device *dev) | 218 | static int __devinit of_flash_probe(struct platform_device *dev) |
218 | { | 219 | { |
219 | #ifdef CONFIG_MTD_PARTITIONS | 220 | #ifdef CONFIG_MTD_PARTITIONS |
220 | const char **part_probe_types; | 221 | const char **part_probe_types; |
221 | #endif | 222 | #endif |
223 | const struct of_device_id *match; | ||
222 | struct device_node *dp = dev->dev.of_node; | 224 | struct device_node *dp = dev->dev.of_node; |
223 | struct resource res; | 225 | struct resource res; |
224 | struct of_flash *info; | 226 | struct of_flash *info; |
@@ -232,9 +234,10 @@ static int __devinit of_flash_probe(struct platform_device *dev) | |||
232 | struct mtd_info **mtd_list = NULL; | 234 | struct mtd_info **mtd_list = NULL; |
233 | resource_size_t res_size; | 235 | resource_size_t res_size; |
234 | 236 | ||
235 | if (!dev->dev.of_match) | 237 | match = of_match_device(of_flash_match, &dev->dev); |
238 | if (!match) | ||
236 | return -EINVAL; | 239 | return -EINVAL; |
237 | probe_type = dev->dev.of_match->data; | 240 | probe_type = match->data; |
238 | 241 | ||
239 | reg_tuple_size = (of_n_addr_cells(dp) + of_n_size_cells(dp)) * sizeof(u32); | 242 | reg_tuple_size = (of_n_addr_cells(dp) + of_n_size_cells(dp)) * sizeof(u32); |
240 | 243 | ||
diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c index 3ffe05db4923..5d513b54a7d7 100644 --- a/drivers/mtd/nand/au1550nd.c +++ b/drivers/mtd/nand/au1550nd.c | |||
@@ -10,6 +10,7 @@ | |||
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/slab.h> | 12 | #include <linux/slab.h> |
13 | #include <linux/gpio.h> | ||
13 | #include <linux/init.h> | 14 | #include <linux/init.h> |
14 | #include <linux/module.h> | 15 | #include <linux/module.h> |
15 | #include <linux/interrupt.h> | 16 | #include <linux/interrupt.h> |
@@ -470,7 +471,7 @@ static int __init au1xxx_nand_init(void) | |||
470 | 471 | ||
471 | #ifdef CONFIG_MIPS_PB1550 | 472 | #ifdef CONFIG_MIPS_PB1550 |
472 | /* set gpio206 high */ | 473 | /* set gpio206 high */ |
473 | au_writel(au_readl(GPIO2_DIR) & ~(1 << 6), GPIO2_DIR); | 474 | gpio_direction_input(206); |
474 | 475 | ||
475 | boot_swapboot = (au_readl(MEM_STSTAT) & (0x7 << 1)) | ((bcsr_read(BCSR_STATUS) >> 6) & 0x1); | 476 | boot_swapboot = (au_readl(MEM_STSTAT) & (0x7 << 1)) | ((bcsr_read(BCSR_STATUS) >> 6) & 0x1); |
476 | 477 | ||
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index dc280bc8eba2..19f04a34783a 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
@@ -2017,6 +2017,13 @@ config FTMAC100 | |||
2017 | from Faraday. It is used on Faraday A320, Andes AG101 and some | 2017 | from Faraday. It is used on Faraday A320, Andes AG101 and some |
2018 | other ARM/NDS32 SoC's. | 2018 | other ARM/NDS32 SoC's. |
2019 | 2019 | ||
2020 | config LANTIQ_ETOP | ||
2021 | tristate "Lantiq SoC ETOP driver" | ||
2022 | depends on SOC_TYPE_XWAY | ||
2023 | help | ||
2024 | Support for the MII0 inside the Lantiq SoC | ||
2025 | |||
2026 | |||
2020 | source "drivers/net/fs_enet/Kconfig" | 2027 | source "drivers/net/fs_enet/Kconfig" |
2021 | 2028 | ||
2022 | source "drivers/net/octeon/Kconfig" | 2029 | source "drivers/net/octeon/Kconfig" |
@@ -2536,7 +2543,7 @@ config S6GMAC | |||
2536 | source "drivers/net/stmmac/Kconfig" | 2543 | source "drivers/net/stmmac/Kconfig" |
2537 | 2544 | ||
2538 | config PCH_GBE | 2545 | config PCH_GBE |
2539 | tristate "PCH Gigabit Ethernet" | 2546 | tristate "Intel EG20T PCH / OKI SEMICONDUCTOR ML7223 IOH GbE" |
2540 | depends on PCI | 2547 | depends on PCI |
2541 | select MII | 2548 | select MII |
2542 | ---help--- | 2549 | ---help--- |
@@ -2548,6 +2555,12 @@ config PCH_GBE | |||
2548 | to Gigabit Ethernet. | 2555 | to Gigabit Ethernet. |
2549 | This driver enables Gigabit Ethernet function. | 2556 | This driver enables Gigabit Ethernet function. |
2550 | 2557 | ||
2558 | This driver also can be used for OKI SEMICONDUCTOR IOH(Input/ | ||
2559 | Output Hub), ML7223. | ||
2560 | ML7223 IOH is for MP(Media Phone) use. | ||
2561 | ML7223 is companion chip for Intel Atom E6xx series. | ||
2562 | ML7223 is completely compatible for Intel EG20T PCH. | ||
2563 | |||
2551 | endif # NETDEV_1000 | 2564 | endif # NETDEV_1000 |
2552 | 2565 | ||
2553 | # | 2566 | # |
diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 01b604ad155e..209fbb70619b 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile | |||
@@ -144,7 +144,7 @@ obj-$(CONFIG_NE3210) += ne3210.o 8390.o | |||
144 | obj-$(CONFIG_SB1250_MAC) += sb1250-mac.o | 144 | obj-$(CONFIG_SB1250_MAC) += sb1250-mac.o |
145 | obj-$(CONFIG_B44) += b44.o | 145 | obj-$(CONFIG_B44) += b44.o |
146 | obj-$(CONFIG_FORCEDETH) += forcedeth.o | 146 | obj-$(CONFIG_FORCEDETH) += forcedeth.o |
147 | obj-$(CONFIG_NE_H8300) += ne-h8300.o 8390.o | 147 | obj-$(CONFIG_NE_H8300) += ne-h8300.o |
148 | obj-$(CONFIG_AX88796) += ax88796.o | 148 | obj-$(CONFIG_AX88796) += ax88796.o |
149 | obj-$(CONFIG_BCM63XX_ENET) += bcm63xx_enet.o | 149 | obj-$(CONFIG_BCM63XX_ENET) += bcm63xx_enet.o |
150 | obj-$(CONFIG_FTMAC100) += ftmac100.o | 150 | obj-$(CONFIG_FTMAC100) += ftmac100.o |
@@ -219,7 +219,7 @@ obj-$(CONFIG_SC92031) += sc92031.o | |||
219 | obj-$(CONFIG_LP486E) += lp486e.o | 219 | obj-$(CONFIG_LP486E) += lp486e.o |
220 | 220 | ||
221 | obj-$(CONFIG_ETH16I) += eth16i.o | 221 | obj-$(CONFIG_ETH16I) += eth16i.o |
222 | obj-$(CONFIG_ZORRO8390) += zorro8390.o 8390.o | 222 | obj-$(CONFIG_ZORRO8390) += zorro8390.o |
223 | obj-$(CONFIG_HPLANCE) += hplance.o 7990.o | 223 | obj-$(CONFIG_HPLANCE) += hplance.o 7990.o |
224 | obj-$(CONFIG_MVME147_NET) += mvme147.o 7990.o | 224 | obj-$(CONFIG_MVME147_NET) += mvme147.o 7990.o |
225 | obj-$(CONFIG_EQUALIZER) += eql.o | 225 | obj-$(CONFIG_EQUALIZER) += eql.o |
@@ -231,7 +231,7 @@ obj-$(CONFIG_SGI_IOC3_ETH) += ioc3-eth.o | |||
231 | obj-$(CONFIG_DECLANCE) += declance.o | 231 | obj-$(CONFIG_DECLANCE) += declance.o |
232 | obj-$(CONFIG_ATARILANCE) += atarilance.o | 232 | obj-$(CONFIG_ATARILANCE) += atarilance.o |
233 | obj-$(CONFIG_A2065) += a2065.o | 233 | obj-$(CONFIG_A2065) += a2065.o |
234 | obj-$(CONFIG_HYDRA) += hydra.o 8390.o | 234 | obj-$(CONFIG_HYDRA) += hydra.o |
235 | obj-$(CONFIG_ARIADNE) += ariadne.o | 235 | obj-$(CONFIG_ARIADNE) += ariadne.o |
236 | obj-$(CONFIG_CS89x0) += cs89x0.o | 236 | obj-$(CONFIG_CS89x0) += cs89x0.o |
237 | obj-$(CONFIG_MACSONIC) += macsonic.o | 237 | obj-$(CONFIG_MACSONIC) += macsonic.o |
@@ -259,6 +259,7 @@ obj-$(CONFIG_MLX4_CORE) += mlx4/ | |||
259 | obj-$(CONFIG_ENC28J60) += enc28j60.o | 259 | obj-$(CONFIG_ENC28J60) += enc28j60.o |
260 | obj-$(CONFIG_ETHOC) += ethoc.o | 260 | obj-$(CONFIG_ETHOC) += ethoc.o |
261 | obj-$(CONFIG_GRETH) += greth.o | 261 | obj-$(CONFIG_GRETH) += greth.o |
262 | obj-$(CONFIG_LANTIQ_ETOP) += lantiq_etop.o | ||
262 | 263 | ||
263 | obj-$(CONFIG_XTENSA_XT2000_SONIC) += xtsonic.o | 264 | obj-$(CONFIG_XTENSA_XT2000_SONIC) += xtsonic.o |
264 | 265 | ||
diff --git a/drivers/net/arm/etherh.c b/drivers/net/arm/etherh.c index 4af235d41fda..fbfb5b47c506 100644 --- a/drivers/net/arm/etherh.c +++ b/drivers/net/arm/etherh.c | |||
@@ -527,7 +527,7 @@ static void __init etherh_banner(void) | |||
527 | * Read the ethernet address string from the on board rom. | 527 | * Read the ethernet address string from the on board rom. |
528 | * This is an ascii string... | 528 | * This is an ascii string... |
529 | */ | 529 | */ |
530 | static int __init etherh_addr(char *addr, struct expansion_card *ec) | 530 | static int __devinit etherh_addr(char *addr, struct expansion_card *ec) |
531 | { | 531 | { |
532 | struct in_chunk_dir cd; | 532 | struct in_chunk_dir cd; |
533 | char *s; | 533 | char *s; |
@@ -655,7 +655,7 @@ static const struct net_device_ops etherh_netdev_ops = { | |||
655 | static u32 etherh_regoffsets[16]; | 655 | static u32 etherh_regoffsets[16]; |
656 | static u32 etherm_regoffsets[16]; | 656 | static u32 etherm_regoffsets[16]; |
657 | 657 | ||
658 | static int __init | 658 | static int __devinit |
659 | etherh_probe(struct expansion_card *ec, const struct ecard_id *id) | 659 | etherh_probe(struct expansion_card *ec, const struct ecard_id *id) |
660 | { | 660 | { |
661 | const struct etherh_data *data = id->data; | 661 | const struct etherh_data *data = id->data; |
diff --git a/drivers/net/atarilance.c b/drivers/net/atarilance.c index ce0091eb06f5..1264d781b554 100644 --- a/drivers/net/atarilance.c +++ b/drivers/net/atarilance.c | |||
@@ -554,7 +554,7 @@ static unsigned long __init lance_probe1( struct net_device *dev, | |||
554 | memaddr == (unsigned short *)0xffe00000) { | 554 | memaddr == (unsigned short *)0xffe00000) { |
555 | /* PAMs card and Riebl on ST use level 5 autovector */ | 555 | /* PAMs card and Riebl on ST use level 5 autovector */ |
556 | if (request_irq(IRQ_AUTO_5, lance_interrupt, IRQ_TYPE_PRIO, | 556 | if (request_irq(IRQ_AUTO_5, lance_interrupt, IRQ_TYPE_PRIO, |
557 | "PAM/Riebl-ST Ethernet", dev)) { | 557 | "PAM,Riebl-ST Ethernet", dev)) { |
558 | printk( "Lance: request for irq %d failed\n", IRQ_AUTO_5 ); | 558 | printk( "Lance: request for irq %d failed\n", IRQ_AUTO_5 ); |
559 | return 0; | 559 | return 0; |
560 | } | 560 | } |
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h index 66823eded7a3..2353eca32593 100644 --- a/drivers/net/benet/be.h +++ b/drivers/net/benet/be.h | |||
@@ -213,7 +213,7 @@ struct be_rx_stats { | |||
213 | 213 | ||
214 | struct be_rx_compl_info { | 214 | struct be_rx_compl_info { |
215 | u32 rss_hash; | 215 | u32 rss_hash; |
216 | u16 vid; | 216 | u16 vlan_tag; |
217 | u16 pkt_size; | 217 | u16 pkt_size; |
218 | u16 rxq_idx; | 218 | u16 rxq_idx; |
219 | u16 mac_id; | 219 | u16 mac_id; |
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c index 1e2d825bb94a..9dc9394fd4ca 100644 --- a/drivers/net/benet/be_cmds.c +++ b/drivers/net/benet/be_cmds.c | |||
@@ -132,7 +132,7 @@ static void be_async_grp5_pvid_state_process(struct be_adapter *adapter, | |||
132 | struct be_async_event_grp5_pvid_state *evt) | 132 | struct be_async_event_grp5_pvid_state *evt) |
133 | { | 133 | { |
134 | if (evt->enabled) | 134 | if (evt->enabled) |
135 | adapter->pvid = evt->tag; | 135 | adapter->pvid = le16_to_cpu(evt->tag); |
136 | else | 136 | else |
137 | adapter->pvid = 0; | 137 | adapter->pvid = 0; |
138 | } | 138 | } |
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c index 02a0443d1821..9187fb4e08f1 100644 --- a/drivers/net/benet/be_main.c +++ b/drivers/net/benet/be_main.c | |||
@@ -1018,7 +1018,8 @@ static void be_rx_compl_process(struct be_adapter *adapter, | |||
1018 | kfree_skb(skb); | 1018 | kfree_skb(skb); |
1019 | return; | 1019 | return; |
1020 | } | 1020 | } |
1021 | vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, rxcp->vid); | 1021 | vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, |
1022 | rxcp->vlan_tag); | ||
1022 | } else { | 1023 | } else { |
1023 | netif_receive_skb(skb); | 1024 | netif_receive_skb(skb); |
1024 | } | 1025 | } |
@@ -1076,7 +1077,8 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter, | |||
1076 | if (likely(!rxcp->vlanf)) | 1077 | if (likely(!rxcp->vlanf)) |
1077 | napi_gro_frags(&eq_obj->napi); | 1078 | napi_gro_frags(&eq_obj->napi); |
1078 | else | 1079 | else |
1079 | vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, rxcp->vid); | 1080 | vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, |
1081 | rxcp->vlan_tag); | ||
1080 | } | 1082 | } |
1081 | 1083 | ||
1082 | static void be_parse_rx_compl_v1(struct be_adapter *adapter, | 1084 | static void be_parse_rx_compl_v1(struct be_adapter *adapter, |
@@ -1102,7 +1104,8 @@ static void be_parse_rx_compl_v1(struct be_adapter *adapter, | |||
1102 | rxcp->pkt_type = | 1104 | rxcp->pkt_type = |
1103 | AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl); | 1105 | AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl); |
1104 | rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm, compl); | 1106 | rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm, compl); |
1105 | rxcp->vid = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag, compl); | 1107 | rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag, |
1108 | compl); | ||
1106 | } | 1109 | } |
1107 | 1110 | ||
1108 | static void be_parse_rx_compl_v0(struct be_adapter *adapter, | 1111 | static void be_parse_rx_compl_v0(struct be_adapter *adapter, |
@@ -1128,7 +1131,8 @@ static void be_parse_rx_compl_v0(struct be_adapter *adapter, | |||
1128 | rxcp->pkt_type = | 1131 | rxcp->pkt_type = |
1129 | AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl); | 1132 | AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl); |
1130 | rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm, compl); | 1133 | rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm, compl); |
1131 | rxcp->vid = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag, compl); | 1134 | rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag, |
1135 | compl); | ||
1132 | } | 1136 | } |
1133 | 1137 | ||
1134 | static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo) | 1138 | static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo) |
@@ -1155,9 +1159,11 @@ static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo) | |||
1155 | rxcp->vlanf = 0; | 1159 | rxcp->vlanf = 0; |
1156 | 1160 | ||
1157 | if (!lancer_chip(adapter)) | 1161 | if (!lancer_chip(adapter)) |
1158 | rxcp->vid = swab16(rxcp->vid); | 1162 | rxcp->vlan_tag = swab16(rxcp->vlan_tag); |
1159 | 1163 | ||
1160 | if ((adapter->pvid == rxcp->vid) && !adapter->vlan_tag[rxcp->vid]) | 1164 | if (((adapter->pvid & VLAN_VID_MASK) == |
1165 | (rxcp->vlan_tag & VLAN_VID_MASK)) && | ||
1166 | !adapter->vlan_tag[rxcp->vlan_tag]) | ||
1161 | rxcp->vlanf = 0; | 1167 | rxcp->vlanf = 0; |
1162 | 1168 | ||
1163 | /* As the compl has been parsed, reset it; we wont touch it again */ | 1169 | /* As the compl has been parsed, reset it; we wont touch it again */ |
diff --git a/drivers/net/bonding/bond_3ad.h b/drivers/net/bonding/bond_3ad.h index b28baff70864..01b8a6af275b 100644 --- a/drivers/net/bonding/bond_3ad.h +++ b/drivers/net/bonding/bond_3ad.h | |||
@@ -39,7 +39,7 @@ | |||
39 | 39 | ||
40 | typedef struct mac_addr { | 40 | typedef struct mac_addr { |
41 | u8 mac_addr_value[ETH_ALEN]; | 41 | u8 mac_addr_value[ETH_ALEN]; |
42 | } mac_addr_t; | 42 | } __packed mac_addr_t; |
43 | 43 | ||
44 | enum { | 44 | enum { |
45 | BOND_AD_STABLE = 0, | 45 | BOND_AD_STABLE = 0, |
@@ -134,12 +134,12 @@ typedef struct lacpdu { | |||
134 | u8 tlv_type_terminator; // = terminator | 134 | u8 tlv_type_terminator; // = terminator |
135 | u8 terminator_length; // = 0 | 135 | u8 terminator_length; // = 0 |
136 | u8 reserved_50[50]; // = 0 | 136 | u8 reserved_50[50]; // = 0 |
137 | } lacpdu_t; | 137 | } __packed lacpdu_t; |
138 | 138 | ||
139 | typedef struct lacpdu_header { | 139 | typedef struct lacpdu_header { |
140 | struct ethhdr hdr; | 140 | struct ethhdr hdr; |
141 | struct lacpdu lacpdu; | 141 | struct lacpdu lacpdu; |
142 | } lacpdu_header_t; | 142 | } __packed lacpdu_header_t; |
143 | 143 | ||
144 | // Marker Protocol Data Unit(PDU) structure(43.5.3.2 in the 802.3ad standard) | 144 | // Marker Protocol Data Unit(PDU) structure(43.5.3.2 in the 802.3ad standard) |
145 | typedef struct bond_marker { | 145 | typedef struct bond_marker { |
@@ -155,12 +155,12 @@ typedef struct bond_marker { | |||
155 | u8 tlv_type_terminator; // = 0x00 | 155 | u8 tlv_type_terminator; // = 0x00 |
156 | u8 terminator_length; // = 0x00 | 156 | u8 terminator_length; // = 0x00 |
157 | u8 reserved_90[90]; // = 0 | 157 | u8 reserved_90[90]; // = 0 |
158 | } bond_marker_t; | 158 | } __packed bond_marker_t; |
159 | 159 | ||
160 | typedef struct bond_marker_header { | 160 | typedef struct bond_marker_header { |
161 | struct ethhdr hdr; | 161 | struct ethhdr hdr; |
162 | struct bond_marker marker; | 162 | struct bond_marker marker; |
163 | } bond_marker_header_t; | 163 | } __packed bond_marker_header_t; |
164 | 164 | ||
165 | #pragma pack() | 165 | #pragma pack() |
166 | 166 | ||
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c index bd1d811c204f..5fedc3375562 100644 --- a/drivers/net/can/mscan/mpc5xxx_can.c +++ b/drivers/net/can/mscan/mpc5xxx_can.c | |||
@@ -247,8 +247,10 @@ static u32 __devinit mpc512x_can_get_clock(struct platform_device *ofdev, | |||
247 | } | 247 | } |
248 | #endif /* CONFIG_PPC_MPC512x */ | 248 | #endif /* CONFIG_PPC_MPC512x */ |
249 | 249 | ||
250 | static struct of_device_id mpc5xxx_can_table[]; | ||
250 | static int __devinit mpc5xxx_can_probe(struct platform_device *ofdev) | 251 | static int __devinit mpc5xxx_can_probe(struct platform_device *ofdev) |
251 | { | 252 | { |
253 | const struct of_device_id *match; | ||
252 | struct mpc5xxx_can_data *data; | 254 | struct mpc5xxx_can_data *data; |
253 | struct device_node *np = ofdev->dev.of_node; | 255 | struct device_node *np = ofdev->dev.of_node; |
254 | struct net_device *dev; | 256 | struct net_device *dev; |
@@ -258,9 +260,10 @@ static int __devinit mpc5xxx_can_probe(struct platform_device *ofdev) | |||
258 | int irq, mscan_clksrc = 0; | 260 | int irq, mscan_clksrc = 0; |
259 | int err = -ENOMEM; | 261 | int err = -ENOMEM; |
260 | 262 | ||
261 | if (!ofdev->dev.of_match) | 263 | match = of_match_device(mpc5xxx_can_table, &ofdev->dev); |
264 | if (!match) | ||
262 | return -EINVAL; | 265 | return -EINVAL; |
263 | data = (struct mpc5xxx_can_data *)ofdev->dev.of_match->data; | 266 | data = match->data; |
264 | 267 | ||
265 | base = of_iomap(np, 0); | 268 | base = of_iomap(np, 0); |
266 | if (!base) { | 269 | if (!base) { |
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c index a358ea9445a2..f501bba1fc6f 100644 --- a/drivers/net/can/sja1000/sja1000.c +++ b/drivers/net/can/sja1000/sja1000.c | |||
@@ -346,10 +346,10 @@ static void sja1000_rx(struct net_device *dev) | |||
346 | | (priv->read_reg(priv, REG_ID2) >> 5); | 346 | | (priv->read_reg(priv, REG_ID2) >> 5); |
347 | } | 347 | } |
348 | 348 | ||
349 | cf->can_dlc = get_can_dlc(fi & 0x0F); | ||
349 | if (fi & FI_RTR) { | 350 | if (fi & FI_RTR) { |
350 | id |= CAN_RTR_FLAG; | 351 | id |= CAN_RTR_FLAG; |
351 | } else { | 352 | } else { |
352 | cf->can_dlc = get_can_dlc(fi & 0x0F); | ||
353 | for (i = 0; i < cf->can_dlc; i++) | 353 | for (i = 0; i < cf->can_dlc; i++) |
354 | cf->data[i] = priv->read_reg(priv, dreg++); | 354 | cf->data[i] = priv->read_reg(priv, dreg++); |
355 | } | 355 | } |
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c index b423965a78d1..1b49df6b2470 100644 --- a/drivers/net/can/slcan.c +++ b/drivers/net/can/slcan.c | |||
@@ -583,7 +583,9 @@ static int slcan_open(struct tty_struct *tty) | |||
583 | /* Done. We have linked the TTY line to a channel. */ | 583 | /* Done. We have linked the TTY line to a channel. */ |
584 | rtnl_unlock(); | 584 | rtnl_unlock(); |
585 | tty->receive_room = 65536; /* We don't flow control */ | 585 | tty->receive_room = 65536; /* We don't flow control */ |
586 | return sl->dev->base_addr; | 586 | |
587 | /* TTY layer expects 0 on success */ | ||
588 | return 0; | ||
587 | 589 | ||
588 | err_free_chan: | 590 | err_free_chan: |
589 | sl->tty = NULL; | 591 | sl->tty = NULL; |
diff --git a/drivers/net/ehea/ehea_ethtool.c b/drivers/net/ehea/ehea_ethtool.c index 3e2e734fecb7..f3bbdcef338c 100644 --- a/drivers/net/ehea/ehea_ethtool.c +++ b/drivers/net/ehea/ehea_ethtool.c | |||
@@ -55,15 +55,20 @@ static int ehea_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
55 | cmd->duplex = -1; | 55 | cmd->duplex = -1; |
56 | } | 56 | } |
57 | 57 | ||
58 | cmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_1000baseT_Full | 58 | if (cmd->speed == SPEED_10000) { |
59 | | SUPPORTED_100baseT_Full | SUPPORTED_100baseT_Half | 59 | cmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); |
60 | | SUPPORTED_10baseT_Full | SUPPORTED_10baseT_Half | 60 | cmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE); |
61 | | SUPPORTED_Autoneg | SUPPORTED_FIBRE); | 61 | cmd->port = PORT_FIBRE; |
62 | 62 | } else { | |
63 | cmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_Autoneg | 63 | cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_100baseT_Full |
64 | | ADVERTISED_FIBRE); | 64 | | SUPPORTED_100baseT_Half | SUPPORTED_10baseT_Full |
65 | | SUPPORTED_10baseT_Half | SUPPORTED_Autoneg | ||
66 | | SUPPORTED_TP); | ||
67 | cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg | ||
68 | | ADVERTISED_TP); | ||
69 | cmd->port = PORT_TP; | ||
70 | } | ||
65 | 71 | ||
66 | cmd->port = PORT_FIBRE; | ||
67 | cmd->autoneg = port->autoneg == 1 ? AUTONEG_ENABLE : AUTONEG_DISABLE; | 72 | cmd->autoneg = port->autoneg == 1 ? AUTONEG_ENABLE : AUTONEG_DISABLE; |
68 | 73 | ||
69 | return 0; | 74 | return 0; |
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c index 53c0f04b1b23..cf79cf759e13 100644 --- a/drivers/net/ehea/ehea_main.c +++ b/drivers/net/ehea/ehea_main.c | |||
@@ -2688,9 +2688,6 @@ static int ehea_open(struct net_device *dev) | |||
2688 | netif_start_queue(dev); | 2688 | netif_start_queue(dev); |
2689 | } | 2689 | } |
2690 | 2690 | ||
2691 | init_waitqueue_head(&port->swqe_avail_wq); | ||
2692 | init_waitqueue_head(&port->restart_wq); | ||
2693 | |||
2694 | mutex_unlock(&port->port_lock); | 2691 | mutex_unlock(&port->port_lock); |
2695 | 2692 | ||
2696 | return ret; | 2693 | return ret; |
@@ -3276,6 +3273,9 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter, | |||
3276 | 3273 | ||
3277 | INIT_WORK(&port->reset_task, ehea_reset_port); | 3274 | INIT_WORK(&port->reset_task, ehea_reset_port); |
3278 | 3275 | ||
3276 | init_waitqueue_head(&port->swqe_avail_wq); | ||
3277 | init_waitqueue_head(&port->restart_wq); | ||
3278 | |||
3279 | ret = register_netdev(dev); | 3279 | ret = register_netdev(dev); |
3280 | if (ret) { | 3280 | if (ret) { |
3281 | pr_err("register_netdev failed. ret=%d\n", ret); | 3281 | pr_err("register_netdev failed. ret=%d\n", ret); |
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c index 24cb953900dd..5131e61c358c 100644 --- a/drivers/net/fs_enet/fs_enet-main.c +++ b/drivers/net/fs_enet/fs_enet-main.c | |||
@@ -998,8 +998,10 @@ static const struct net_device_ops fs_enet_netdev_ops = { | |||
998 | #endif | 998 | #endif |
999 | }; | 999 | }; |
1000 | 1000 | ||
1001 | static struct of_device_id fs_enet_match[]; | ||
1001 | static int __devinit fs_enet_probe(struct platform_device *ofdev) | 1002 | static int __devinit fs_enet_probe(struct platform_device *ofdev) |
1002 | { | 1003 | { |
1004 | const struct of_device_id *match; | ||
1003 | struct net_device *ndev; | 1005 | struct net_device *ndev; |
1004 | struct fs_enet_private *fep; | 1006 | struct fs_enet_private *fep; |
1005 | struct fs_platform_info *fpi; | 1007 | struct fs_platform_info *fpi; |
@@ -1007,14 +1009,15 @@ static int __devinit fs_enet_probe(struct platform_device *ofdev) | |||
1007 | const u8 *mac_addr; | 1009 | const u8 *mac_addr; |
1008 | int privsize, len, ret = -ENODEV; | 1010 | int privsize, len, ret = -ENODEV; |
1009 | 1011 | ||
1010 | if (!ofdev->dev.of_match) | 1012 | match = of_match_device(fs_enet_match, &ofdev->dev); |
1013 | if (!match) | ||
1011 | return -EINVAL; | 1014 | return -EINVAL; |
1012 | 1015 | ||
1013 | fpi = kzalloc(sizeof(*fpi), GFP_KERNEL); | 1016 | fpi = kzalloc(sizeof(*fpi), GFP_KERNEL); |
1014 | if (!fpi) | 1017 | if (!fpi) |
1015 | return -ENOMEM; | 1018 | return -ENOMEM; |
1016 | 1019 | ||
1017 | if (!IS_FEC(ofdev->dev.of_match)) { | 1020 | if (!IS_FEC(match)) { |
1018 | data = of_get_property(ofdev->dev.of_node, "fsl,cpm-command", &len); | 1021 | data = of_get_property(ofdev->dev.of_node, "fsl,cpm-command", &len); |
1019 | if (!data || len != 4) | 1022 | if (!data || len != 4) |
1020 | goto out_free_fpi; | 1023 | goto out_free_fpi; |
@@ -1049,7 +1052,7 @@ static int __devinit fs_enet_probe(struct platform_device *ofdev) | |||
1049 | fep->dev = &ofdev->dev; | 1052 | fep->dev = &ofdev->dev; |
1050 | fep->ndev = ndev; | 1053 | fep->ndev = ndev; |
1051 | fep->fpi = fpi; | 1054 | fep->fpi = fpi; |
1052 | fep->ops = ofdev->dev.of_match->data; | 1055 | fep->ops = match->data; |
1053 | 1056 | ||
1054 | ret = fep->ops->setup_data(ndev); | 1057 | ret = fep->ops->setup_data(ndev); |
1055 | if (ret) | 1058 | if (ret) |
diff --git a/drivers/net/fs_enet/mii-fec.c b/drivers/net/fs_enet/mii-fec.c index 7e840d373ab3..6a2e150e75bb 100644 --- a/drivers/net/fs_enet/mii-fec.c +++ b/drivers/net/fs_enet/mii-fec.c | |||
@@ -101,17 +101,20 @@ static int fs_enet_fec_mii_reset(struct mii_bus *bus) | |||
101 | return 0; | 101 | return 0; |
102 | } | 102 | } |
103 | 103 | ||
104 | static struct of_device_id fs_enet_mdio_fec_match[]; | ||
104 | static int __devinit fs_enet_mdio_probe(struct platform_device *ofdev) | 105 | static int __devinit fs_enet_mdio_probe(struct platform_device *ofdev) |
105 | { | 106 | { |
107 | const struct of_device_id *match; | ||
106 | struct resource res; | 108 | struct resource res; |
107 | struct mii_bus *new_bus; | 109 | struct mii_bus *new_bus; |
108 | struct fec_info *fec; | 110 | struct fec_info *fec; |
109 | int (*get_bus_freq)(struct device_node *); | 111 | int (*get_bus_freq)(struct device_node *); |
110 | int ret = -ENOMEM, clock, speed; | 112 | int ret = -ENOMEM, clock, speed; |
111 | 113 | ||
112 | if (!ofdev->dev.of_match) | 114 | match = of_match_device(fs_enet_mdio_fec_match, &ofdev->dev); |
115 | if (!match) | ||
113 | return -EINVAL; | 116 | return -EINVAL; |
114 | get_bus_freq = ofdev->dev.of_match->data; | 117 | get_bus_freq = match->data; |
115 | 118 | ||
116 | new_bus = mdiobus_alloc(); | 119 | new_bus = mdiobus_alloc(); |
117 | if (!new_bus) | 120 | if (!new_bus) |
diff --git a/drivers/net/hydra.c b/drivers/net/hydra.c index c5ef62ceb840..1cd481c04202 100644 --- a/drivers/net/hydra.c +++ b/drivers/net/hydra.c | |||
@@ -98,15 +98,15 @@ static const struct net_device_ops hydra_netdev_ops = { | |||
98 | .ndo_open = hydra_open, | 98 | .ndo_open = hydra_open, |
99 | .ndo_stop = hydra_close, | 99 | .ndo_stop = hydra_close, |
100 | 100 | ||
101 | .ndo_start_xmit = ei_start_xmit, | 101 | .ndo_start_xmit = __ei_start_xmit, |
102 | .ndo_tx_timeout = ei_tx_timeout, | 102 | .ndo_tx_timeout = __ei_tx_timeout, |
103 | .ndo_get_stats = ei_get_stats, | 103 | .ndo_get_stats = __ei_get_stats, |
104 | .ndo_set_multicast_list = ei_set_multicast_list, | 104 | .ndo_set_multicast_list = __ei_set_multicast_list, |
105 | .ndo_validate_addr = eth_validate_addr, | 105 | .ndo_validate_addr = eth_validate_addr, |
106 | .ndo_set_mac_address = eth_mac_addr, | 106 | .ndo_set_mac_address = eth_mac_addr, |
107 | .ndo_change_mtu = eth_change_mtu, | 107 | .ndo_change_mtu = eth_change_mtu, |
108 | #ifdef CONFIG_NET_POLL_CONTROLLER | 108 | #ifdef CONFIG_NET_POLL_CONTROLLER |
109 | .ndo_poll_controller = ei_poll, | 109 | .ndo_poll_controller = __ei_poll, |
110 | #endif | 110 | #endif |
111 | }; | 111 | }; |
112 | 112 | ||
@@ -125,7 +125,7 @@ static int __devinit hydra_init(struct zorro_dev *z) | |||
125 | 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e, | 125 | 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e, |
126 | }; | 126 | }; |
127 | 127 | ||
128 | dev = alloc_ei_netdev(); | 128 | dev = ____alloc_ei_netdev(0); |
129 | if (!dev) | 129 | if (!dev) |
130 | return -ENOMEM; | 130 | return -ENOMEM; |
131 | 131 | ||
diff --git a/drivers/net/lantiq_etop.c b/drivers/net/lantiq_etop.c new file mode 100644 index 000000000000..45f252b7da30 --- /dev/null +++ b/drivers/net/lantiq_etop.c | |||
@@ -0,0 +1,805 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * You should have received a copy of the GNU General Public License | ||
12 | * along with this program; if not, write to the Free Software | ||
13 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. | ||
14 | * | ||
15 | * Copyright (C) 2011 John Crispin <blogic@openwrt.org> | ||
16 | */ | ||
17 | |||
18 | #include <linux/kernel.h> | ||
19 | #include <linux/slab.h> | ||
20 | #include <linux/errno.h> | ||
21 | #include <linux/types.h> | ||
22 | #include <linux/interrupt.h> | ||
23 | #include <linux/uaccess.h> | ||
24 | #include <linux/in.h> | ||
25 | #include <linux/netdevice.h> | ||
26 | #include <linux/etherdevice.h> | ||
27 | #include <linux/phy.h> | ||
28 | #include <linux/ip.h> | ||
29 | #include <linux/tcp.h> | ||
30 | #include <linux/skbuff.h> | ||
31 | #include <linux/mm.h> | ||
32 | #include <linux/platform_device.h> | ||
33 | #include <linux/ethtool.h> | ||
34 | #include <linux/init.h> | ||
35 | #include <linux/delay.h> | ||
36 | #include <linux/io.h> | ||
37 | |||
38 | #include <asm/checksum.h> | ||
39 | |||
40 | #include <lantiq_soc.h> | ||
41 | #include <xway_dma.h> | ||
42 | #include <lantiq_platform.h> | ||
43 | |||
44 | #define LTQ_ETOP_MDIO 0x11804 | ||
45 | #define MDIO_REQUEST 0x80000000 | ||
46 | #define MDIO_READ 0x40000000 | ||
47 | #define MDIO_ADDR_MASK 0x1f | ||
48 | #define MDIO_ADDR_OFFSET 0x15 | ||
49 | #define MDIO_REG_MASK 0x1f | ||
50 | #define MDIO_REG_OFFSET 0x10 | ||
51 | #define MDIO_VAL_MASK 0xffff | ||
52 | |||
53 | #define PPE32_CGEN 0x800 | ||
54 | #define LQ_PPE32_ENET_MAC_CFG 0x1840 | ||
55 | |||
56 | #define LTQ_ETOP_ENETS0 0x11850 | ||
57 | #define LTQ_ETOP_MAC_DA0 0x1186C | ||
58 | #define LTQ_ETOP_MAC_DA1 0x11870 | ||
59 | #define LTQ_ETOP_CFG 0x16020 | ||
60 | #define LTQ_ETOP_IGPLEN 0x16080 | ||
61 | |||
62 | #define MAX_DMA_CHAN 0x8 | ||
63 | #define MAX_DMA_CRC_LEN 0x4 | ||
64 | #define MAX_DMA_DATA_LEN 0x600 | ||
65 | |||
66 | #define ETOP_FTCU BIT(28) | ||
67 | #define ETOP_MII_MASK 0xf | ||
68 | #define ETOP_MII_NORMAL 0xd | ||
69 | #define ETOP_MII_REVERSE 0xe | ||
70 | #define ETOP_PLEN_UNDER 0x40 | ||
71 | #define ETOP_CGEN 0x800 | ||
72 | |||
73 | /* use 2 static channels for TX/RX */ | ||
74 | #define LTQ_ETOP_TX_CHANNEL 1 | ||
75 | #define LTQ_ETOP_RX_CHANNEL 6 | ||
76 | #define IS_TX(x) (x == LTQ_ETOP_TX_CHANNEL) | ||
77 | #define IS_RX(x) (x == LTQ_ETOP_RX_CHANNEL) | ||
78 | |||
79 | #define ltq_etop_r32(x) ltq_r32(ltq_etop_membase + (x)) | ||
80 | #define ltq_etop_w32(x, y) ltq_w32(x, ltq_etop_membase + (y)) | ||
81 | #define ltq_etop_w32_mask(x, y, z) \ | ||
82 | ltq_w32_mask(x, y, ltq_etop_membase + (z)) | ||
83 | |||
84 | #define DRV_VERSION "1.0" | ||
85 | |||
86 | static void __iomem *ltq_etop_membase; | ||
87 | |||
88 | struct ltq_etop_chan { | ||
89 | int idx; | ||
90 | int tx_free; | ||
91 | struct net_device *netdev; | ||
92 | struct napi_struct napi; | ||
93 | struct ltq_dma_channel dma; | ||
94 | struct sk_buff *skb[LTQ_DESC_NUM]; | ||
95 | }; | ||
96 | |||
97 | struct ltq_etop_priv { | ||
98 | struct net_device *netdev; | ||
99 | struct ltq_eth_data *pldata; | ||
100 | struct resource *res; | ||
101 | |||
102 | struct mii_bus *mii_bus; | ||
103 | struct phy_device *phydev; | ||
104 | |||
105 | struct ltq_etop_chan ch[MAX_DMA_CHAN]; | ||
106 | int tx_free[MAX_DMA_CHAN >> 1]; | ||
107 | |||
108 | spinlock_t lock; | ||
109 | }; | ||
110 | |||
111 | static int | ||
112 | ltq_etop_alloc_skb(struct ltq_etop_chan *ch) | ||
113 | { | ||
114 | ch->skb[ch->dma.desc] = dev_alloc_skb(MAX_DMA_DATA_LEN); | ||
115 | if (!ch->skb[ch->dma.desc]) | ||
116 | return -ENOMEM; | ||
117 | ch->dma.desc_base[ch->dma.desc].addr = dma_map_single(NULL, | ||
118 | ch->skb[ch->dma.desc]->data, MAX_DMA_DATA_LEN, | ||
119 | DMA_FROM_DEVICE); | ||
120 | ch->dma.desc_base[ch->dma.desc].addr = | ||
121 | CPHYSADDR(ch->skb[ch->dma.desc]->data); | ||
122 | ch->dma.desc_base[ch->dma.desc].ctl = | ||
123 | LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) | | ||
124 | MAX_DMA_DATA_LEN; | ||
125 | skb_reserve(ch->skb[ch->dma.desc], NET_IP_ALIGN); | ||
126 | return 0; | ||
127 | } | ||
128 | |||
129 | static void | ||
130 | ltq_etop_hw_receive(struct ltq_etop_chan *ch) | ||
131 | { | ||
132 | struct ltq_etop_priv *priv = netdev_priv(ch->netdev); | ||
133 | struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc]; | ||
134 | struct sk_buff *skb = ch->skb[ch->dma.desc]; | ||
135 | int len = (desc->ctl & LTQ_DMA_SIZE_MASK) - MAX_DMA_CRC_LEN; | ||
136 | unsigned long flags; | ||
137 | |||
138 | spin_lock_irqsave(&priv->lock, flags); | ||
139 | if (ltq_etop_alloc_skb(ch)) { | ||
140 | netdev_err(ch->netdev, | ||
141 | "failed to allocate new rx buffer, stopping DMA\n"); | ||
142 | ltq_dma_close(&ch->dma); | ||
143 | } | ||
144 | ch->dma.desc++; | ||
145 | ch->dma.desc %= LTQ_DESC_NUM; | ||
146 | spin_unlock_irqrestore(&priv->lock, flags); | ||
147 | |||
148 | skb_put(skb, len); | ||
149 | skb->dev = ch->netdev; | ||
150 | skb->protocol = eth_type_trans(skb, ch->netdev); | ||
151 | netif_receive_skb(skb); | ||
152 | } | ||
153 | |||
154 | static int | ||
155 | ltq_etop_poll_rx(struct napi_struct *napi, int budget) | ||
156 | { | ||
157 | struct ltq_etop_chan *ch = container_of(napi, | ||
158 | struct ltq_etop_chan, napi); | ||
159 | int rx = 0; | ||
160 | int complete = 0; | ||
161 | |||
162 | while ((rx < budget) && !complete) { | ||
163 | struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc]; | ||
164 | |||
165 | if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) { | ||
166 | ltq_etop_hw_receive(ch); | ||
167 | rx++; | ||
168 | } else { | ||
169 | complete = 1; | ||
170 | } | ||
171 | } | ||
172 | if (complete || !rx) { | ||
173 | napi_complete(&ch->napi); | ||
174 | ltq_dma_ack_irq(&ch->dma); | ||
175 | } | ||
176 | return rx; | ||
177 | } | ||
178 | |||
179 | static int | ||
180 | ltq_etop_poll_tx(struct napi_struct *napi, int budget) | ||
181 | { | ||
182 | struct ltq_etop_chan *ch = | ||
183 | container_of(napi, struct ltq_etop_chan, napi); | ||
184 | struct ltq_etop_priv *priv = netdev_priv(ch->netdev); | ||
185 | struct netdev_queue *txq = | ||
186 | netdev_get_tx_queue(ch->netdev, ch->idx >> 1); | ||
187 | unsigned long flags; | ||
188 | |||
189 | spin_lock_irqsave(&priv->lock, flags); | ||
190 | while ((ch->dma.desc_base[ch->tx_free].ctl & | ||
191 | (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) { | ||
192 | dev_kfree_skb_any(ch->skb[ch->tx_free]); | ||
193 | ch->skb[ch->tx_free] = NULL; | ||
194 | memset(&ch->dma.desc_base[ch->tx_free], 0, | ||
195 | sizeof(struct ltq_dma_desc)); | ||
196 | ch->tx_free++; | ||
197 | ch->tx_free %= LTQ_DESC_NUM; | ||
198 | } | ||
199 | spin_unlock_irqrestore(&priv->lock, flags); | ||
200 | |||
201 | if (netif_tx_queue_stopped(txq)) | ||
202 | netif_tx_start_queue(txq); | ||
203 | napi_complete(&ch->napi); | ||
204 | ltq_dma_ack_irq(&ch->dma); | ||
205 | return 1; | ||
206 | } | ||
207 | |||
208 | static irqreturn_t | ||
209 | ltq_etop_dma_irq(int irq, void *_priv) | ||
210 | { | ||
211 | struct ltq_etop_priv *priv = _priv; | ||
212 | int ch = irq - LTQ_DMA_CH0_INT; | ||
213 | |||
214 | napi_schedule(&priv->ch[ch].napi); | ||
215 | return IRQ_HANDLED; | ||
216 | } | ||
217 | |||
218 | static void | ||
219 | ltq_etop_free_channel(struct net_device *dev, struct ltq_etop_chan *ch) | ||
220 | { | ||
221 | struct ltq_etop_priv *priv = netdev_priv(dev); | ||
222 | |||
223 | ltq_dma_free(&ch->dma); | ||
224 | if (ch->dma.irq) | ||
225 | free_irq(ch->dma.irq, priv); | ||
226 | if (IS_RX(ch->idx)) { | ||
227 | int desc; | ||
228 | for (desc = 0; desc < LTQ_DESC_NUM; desc++) | ||
229 | dev_kfree_skb_any(ch->skb[ch->dma.desc]); | ||
230 | } | ||
231 | } | ||
232 | |||
233 | static void | ||
234 | ltq_etop_hw_exit(struct net_device *dev) | ||
235 | { | ||
236 | struct ltq_etop_priv *priv = netdev_priv(dev); | ||
237 | int i; | ||
238 | |||
239 | ltq_pmu_disable(PMU_PPE); | ||
240 | for (i = 0; i < MAX_DMA_CHAN; i++) | ||
241 | if (IS_TX(i) || IS_RX(i)) | ||
242 | ltq_etop_free_channel(dev, &priv->ch[i]); | ||
243 | } | ||
244 | |||
245 | static int | ||
246 | ltq_etop_hw_init(struct net_device *dev) | ||
247 | { | ||
248 | struct ltq_etop_priv *priv = netdev_priv(dev); | ||
249 | int i; | ||
250 | |||
251 | ltq_pmu_enable(PMU_PPE); | ||
252 | |||
253 | switch (priv->pldata->mii_mode) { | ||
254 | case PHY_INTERFACE_MODE_RMII: | ||
255 | ltq_etop_w32_mask(ETOP_MII_MASK, | ||
256 | ETOP_MII_REVERSE, LTQ_ETOP_CFG); | ||
257 | break; | ||
258 | |||
259 | case PHY_INTERFACE_MODE_MII: | ||
260 | ltq_etop_w32_mask(ETOP_MII_MASK, | ||
261 | ETOP_MII_NORMAL, LTQ_ETOP_CFG); | ||
262 | break; | ||
263 | |||
264 | default: | ||
265 | netdev_err(dev, "unknown mii mode %d\n", | ||
266 | priv->pldata->mii_mode); | ||
267 | return -ENOTSUPP; | ||
268 | } | ||
269 | |||
270 | /* enable crc generation */ | ||
271 | ltq_etop_w32(PPE32_CGEN, LQ_PPE32_ENET_MAC_CFG); | ||
272 | |||
273 | ltq_dma_init_port(DMA_PORT_ETOP); | ||
274 | |||
275 | for (i = 0; i < MAX_DMA_CHAN; i++) { | ||
276 | int irq = LTQ_DMA_CH0_INT + i; | ||
277 | struct ltq_etop_chan *ch = &priv->ch[i]; | ||
278 | |||
279 | ch->idx = ch->dma.nr = i; | ||
280 | |||
281 | if (IS_TX(i)) { | ||
282 | ltq_dma_alloc_tx(&ch->dma); | ||
283 | request_irq(irq, ltq_etop_dma_irq, IRQF_DISABLED, | ||
284 | "etop_tx", priv); | ||
285 | } else if (IS_RX(i)) { | ||
286 | ltq_dma_alloc_rx(&ch->dma); | ||
287 | for (ch->dma.desc = 0; ch->dma.desc < LTQ_DESC_NUM; | ||
288 | ch->dma.desc++) | ||
289 | if (ltq_etop_alloc_skb(ch)) | ||
290 | return -ENOMEM; | ||
291 | ch->dma.desc = 0; | ||
292 | request_irq(irq, ltq_etop_dma_irq, IRQF_DISABLED, | ||
293 | "etop_rx", priv); | ||
294 | } | ||
295 | ch->dma.irq = irq; | ||
296 | } | ||
297 | return 0; | ||
298 | } | ||
299 | |||
300 | static void | ||
301 | ltq_etop_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) | ||
302 | { | ||
303 | strcpy(info->driver, "Lantiq ETOP"); | ||
304 | strcpy(info->bus_info, "internal"); | ||
305 | strcpy(info->version, DRV_VERSION); | ||
306 | } | ||
307 | |||
308 | static int | ||
309 | ltq_etop_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
310 | { | ||
311 | struct ltq_etop_priv *priv = netdev_priv(dev); | ||
312 | |||
313 | return phy_ethtool_gset(priv->phydev, cmd); | ||
314 | } | ||
315 | |||
316 | static int | ||
317 | ltq_etop_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
318 | { | ||
319 | struct ltq_etop_priv *priv = netdev_priv(dev); | ||
320 | |||
321 | return phy_ethtool_sset(priv->phydev, cmd); | ||
322 | } | ||
323 | |||
324 | static int | ||
325 | ltq_etop_nway_reset(struct net_device *dev) | ||
326 | { | ||
327 | struct ltq_etop_priv *priv = netdev_priv(dev); | ||
328 | |||
329 | return phy_start_aneg(priv->phydev); | ||
330 | } | ||
331 | |||
332 | static const struct ethtool_ops ltq_etop_ethtool_ops = { | ||
333 | .get_drvinfo = ltq_etop_get_drvinfo, | ||
334 | .get_settings = ltq_etop_get_settings, | ||
335 | .set_settings = ltq_etop_set_settings, | ||
336 | .nway_reset = ltq_etop_nway_reset, | ||
337 | }; | ||
338 | |||
339 | static int | ||
340 | ltq_etop_mdio_wr(struct mii_bus *bus, int phy_addr, int phy_reg, u16 phy_data) | ||
341 | { | ||
342 | u32 val = MDIO_REQUEST | | ||
343 | ((phy_addr & MDIO_ADDR_MASK) << MDIO_ADDR_OFFSET) | | ||
344 | ((phy_reg & MDIO_REG_MASK) << MDIO_REG_OFFSET) | | ||
345 | phy_data; | ||
346 | |||
347 | while (ltq_etop_r32(LTQ_ETOP_MDIO) & MDIO_REQUEST) | ||
348 | ; | ||
349 | ltq_etop_w32(val, LTQ_ETOP_MDIO); | ||
350 | return 0; | ||
351 | } | ||
352 | |||
353 | static int | ||
354 | ltq_etop_mdio_rd(struct mii_bus *bus, int phy_addr, int phy_reg) | ||
355 | { | ||
356 | u32 val = MDIO_REQUEST | MDIO_READ | | ||
357 | ((phy_addr & MDIO_ADDR_MASK) << MDIO_ADDR_OFFSET) | | ||
358 | ((phy_reg & MDIO_REG_MASK) << MDIO_REG_OFFSET); | ||
359 | |||
360 | while (ltq_etop_r32(LTQ_ETOP_MDIO) & MDIO_REQUEST) | ||
361 | ; | ||
362 | ltq_etop_w32(val, LTQ_ETOP_MDIO); | ||
363 | while (ltq_etop_r32(LTQ_ETOP_MDIO) & MDIO_REQUEST) | ||
364 | ; | ||
365 | val = ltq_etop_r32(LTQ_ETOP_MDIO) & MDIO_VAL_MASK; | ||
366 | return val; | ||
367 | } | ||
368 | |||
369 | static void | ||
370 | ltq_etop_mdio_link(struct net_device *dev) | ||
371 | { | ||
372 | /* nothing to do */ | ||
373 | } | ||
374 | |||
375 | static int | ||
376 | ltq_etop_mdio_probe(struct net_device *dev) | ||
377 | { | ||
378 | struct ltq_etop_priv *priv = netdev_priv(dev); | ||
379 | struct phy_device *phydev = NULL; | ||
380 | int phy_addr; | ||
381 | |||
382 | for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) { | ||
383 | if (priv->mii_bus->phy_map[phy_addr]) { | ||
384 | phydev = priv->mii_bus->phy_map[phy_addr]; | ||
385 | break; | ||
386 | } | ||
387 | } | ||
388 | |||
389 | if (!phydev) { | ||
390 | netdev_err(dev, "no PHY found\n"); | ||
391 | return -ENODEV; | ||
392 | } | ||
393 | |||
394 | phydev = phy_connect(dev, dev_name(&phydev->dev), <q_etop_mdio_link, | ||
395 | 0, priv->pldata->mii_mode); | ||
396 | |||
397 | if (IS_ERR(phydev)) { | ||
398 | netdev_err(dev, "Could not attach to PHY\n"); | ||
399 | return PTR_ERR(phydev); | ||
400 | } | ||
401 | |||
402 | phydev->supported &= (SUPPORTED_10baseT_Half | ||
403 | | SUPPORTED_10baseT_Full | ||
404 | | SUPPORTED_100baseT_Half | ||
405 | | SUPPORTED_100baseT_Full | ||
406 | | SUPPORTED_Autoneg | ||
407 | | SUPPORTED_MII | ||
408 | | SUPPORTED_TP); | ||
409 | |||
410 | phydev->advertising = phydev->supported; | ||
411 | priv->phydev = phydev; | ||
412 | pr_info("%s: attached PHY [%s] (phy_addr=%s, irq=%d)\n", | ||
413 | dev->name, phydev->drv->name, | ||
414 | dev_name(&phydev->dev), phydev->irq); | ||
415 | |||
416 | return 0; | ||
417 | } | ||
418 | |||
419 | static int | ||
420 | ltq_etop_mdio_init(struct net_device *dev) | ||
421 | { | ||
422 | struct ltq_etop_priv *priv = netdev_priv(dev); | ||
423 | int i; | ||
424 | int err; | ||
425 | |||
426 | priv->mii_bus = mdiobus_alloc(); | ||
427 | if (!priv->mii_bus) { | ||
428 | netdev_err(dev, "failed to allocate mii bus\n"); | ||
429 | err = -ENOMEM; | ||
430 | goto err_out; | ||
431 | } | ||
432 | |||
433 | priv->mii_bus->priv = dev; | ||
434 | priv->mii_bus->read = ltq_etop_mdio_rd; | ||
435 | priv->mii_bus->write = ltq_etop_mdio_wr; | ||
436 | priv->mii_bus->name = "ltq_mii"; | ||
437 | snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%x", 0); | ||
438 | priv->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); | ||
439 | if (!priv->mii_bus->irq) { | ||
440 | err = -ENOMEM; | ||
441 | goto err_out_free_mdiobus; | ||
442 | } | ||
443 | |||
444 | for (i = 0; i < PHY_MAX_ADDR; ++i) | ||
445 | priv->mii_bus->irq[i] = PHY_POLL; | ||
446 | |||
447 | if (mdiobus_register(priv->mii_bus)) { | ||
448 | err = -ENXIO; | ||
449 | goto err_out_free_mdio_irq; | ||
450 | } | ||
451 | |||
452 | if (ltq_etop_mdio_probe(dev)) { | ||
453 | err = -ENXIO; | ||
454 | goto err_out_unregister_bus; | ||
455 | } | ||
456 | return 0; | ||
457 | |||
458 | err_out_unregister_bus: | ||
459 | mdiobus_unregister(priv->mii_bus); | ||
460 | err_out_free_mdio_irq: | ||
461 | kfree(priv->mii_bus->irq); | ||
462 | err_out_free_mdiobus: | ||
463 | mdiobus_free(priv->mii_bus); | ||
464 | err_out: | ||
465 | return err; | ||
466 | } | ||
467 | |||
468 | static void | ||
469 | ltq_etop_mdio_cleanup(struct net_device *dev) | ||
470 | { | ||
471 | struct ltq_etop_priv *priv = netdev_priv(dev); | ||
472 | |||
473 | phy_disconnect(priv->phydev); | ||
474 | mdiobus_unregister(priv->mii_bus); | ||
475 | kfree(priv->mii_bus->irq); | ||
476 | mdiobus_free(priv->mii_bus); | ||
477 | } | ||
478 | |||
479 | static int | ||
480 | ltq_etop_open(struct net_device *dev) | ||
481 | { | ||
482 | struct ltq_etop_priv *priv = netdev_priv(dev); | ||
483 | int i; | ||
484 | |||
485 | for (i = 0; i < MAX_DMA_CHAN; i++) { | ||
486 | struct ltq_etop_chan *ch = &priv->ch[i]; | ||
487 | |||
488 | if (!IS_TX(i) && (!IS_RX(i))) | ||
489 | continue; | ||
490 | ltq_dma_open(&ch->dma); | ||
491 | napi_enable(&ch->napi); | ||
492 | } | ||
493 | phy_start(priv->phydev); | ||
494 | netif_tx_start_all_queues(dev); | ||
495 | return 0; | ||
496 | } | ||
497 | |||
498 | static int | ||
499 | ltq_etop_stop(struct net_device *dev) | ||
500 | { | ||
501 | struct ltq_etop_priv *priv = netdev_priv(dev); | ||
502 | int i; | ||
503 | |||
504 | netif_tx_stop_all_queues(dev); | ||
505 | phy_stop(priv->phydev); | ||
506 | for (i = 0; i < MAX_DMA_CHAN; i++) { | ||
507 | struct ltq_etop_chan *ch = &priv->ch[i]; | ||
508 | |||
509 | if (!IS_RX(i) && !IS_TX(i)) | ||
510 | continue; | ||
511 | napi_disable(&ch->napi); | ||
512 | ltq_dma_close(&ch->dma); | ||
513 | } | ||
514 | return 0; | ||
515 | } | ||
516 | |||
517 | static int | ||
518 | ltq_etop_tx(struct sk_buff *skb, struct net_device *dev) | ||
519 | { | ||
520 | int queue = skb_get_queue_mapping(skb); | ||
521 | struct netdev_queue *txq = netdev_get_tx_queue(dev, queue); | ||
522 | struct ltq_etop_priv *priv = netdev_priv(dev); | ||
523 | struct ltq_etop_chan *ch = &priv->ch[(queue << 1) | 1]; | ||
524 | struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc]; | ||
525 | int len; | ||
526 | unsigned long flags; | ||
527 | u32 byte_offset; | ||
528 | |||
529 | len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len; | ||
530 | |||
531 | if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) || ch->skb[ch->dma.desc]) { | ||
532 | dev_kfree_skb_any(skb); | ||
533 | netdev_err(dev, "tx ring full\n"); | ||
534 | netif_tx_stop_queue(txq); | ||
535 | return NETDEV_TX_BUSY; | ||
536 | } | ||
537 | |||
538 | /* dma needs to start on a 16 byte aligned address */ | ||
539 | byte_offset = CPHYSADDR(skb->data) % 16; | ||
540 | ch->skb[ch->dma.desc] = skb; | ||
541 | |||
542 | dev->trans_start = jiffies; | ||
543 | |||
544 | spin_lock_irqsave(&priv->lock, flags); | ||
545 | desc->addr = ((unsigned int) dma_map_single(NULL, skb->data, len, | ||
546 | DMA_TO_DEVICE)) - byte_offset; | ||
547 | wmb(); | ||
548 | desc->ctl = LTQ_DMA_OWN | LTQ_DMA_SOP | LTQ_DMA_EOP | | ||
549 | LTQ_DMA_TX_OFFSET(byte_offset) | (len & LTQ_DMA_SIZE_MASK); | ||
550 | ch->dma.desc++; | ||
551 | ch->dma.desc %= LTQ_DESC_NUM; | ||
552 | spin_unlock_irqrestore(&priv->lock, flags); | ||
553 | |||
554 | if (ch->dma.desc_base[ch->dma.desc].ctl & LTQ_DMA_OWN) | ||
555 | netif_tx_stop_queue(txq); | ||
556 | |||
557 | return NETDEV_TX_OK; | ||
558 | } | ||
559 | |||
560 | static int | ||
561 | ltq_etop_change_mtu(struct net_device *dev, int new_mtu) | ||
562 | { | ||
563 | int ret = eth_change_mtu(dev, new_mtu); | ||
564 | |||
565 | if (!ret) { | ||
566 | struct ltq_etop_priv *priv = netdev_priv(dev); | ||
567 | unsigned long flags; | ||
568 | |||
569 | spin_lock_irqsave(&priv->lock, flags); | ||
570 | ltq_etop_w32((ETOP_PLEN_UNDER << 16) | new_mtu, | ||
571 | LTQ_ETOP_IGPLEN); | ||
572 | spin_unlock_irqrestore(&priv->lock, flags); | ||
573 | } | ||
574 | return ret; | ||
575 | } | ||
576 | |||
577 | static int | ||
578 | ltq_etop_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | ||
579 | { | ||
580 | struct ltq_etop_priv *priv = netdev_priv(dev); | ||
581 | |||
582 | /* TODO: mii-toll reports "No MII transceiver present!." ?!*/ | ||
583 | return phy_mii_ioctl(priv->phydev, rq, cmd); | ||
584 | } | ||
585 | |||
586 | static int | ||
587 | ltq_etop_set_mac_address(struct net_device *dev, void *p) | ||
588 | { | ||
589 | int ret = eth_mac_addr(dev, p); | ||
590 | |||
591 | if (!ret) { | ||
592 | struct ltq_etop_priv *priv = netdev_priv(dev); | ||
593 | unsigned long flags; | ||
594 | |||
595 | /* store the mac for the unicast filter */ | ||
596 | spin_lock_irqsave(&priv->lock, flags); | ||
597 | ltq_etop_w32(*((u32 *)dev->dev_addr), LTQ_ETOP_MAC_DA0); | ||
598 | ltq_etop_w32(*((u16 *)&dev->dev_addr[4]) << 16, | ||
599 | LTQ_ETOP_MAC_DA1); | ||
600 | spin_unlock_irqrestore(&priv->lock, flags); | ||
601 | } | ||
602 | return ret; | ||
603 | } | ||
604 | |||
605 | static void | ||
606 | ltq_etop_set_multicast_list(struct net_device *dev) | ||
607 | { | ||
608 | struct ltq_etop_priv *priv = netdev_priv(dev); | ||
609 | unsigned long flags; | ||
610 | |||
611 | /* ensure that the unicast filter is not enabled in promiscious mode */ | ||
612 | spin_lock_irqsave(&priv->lock, flags); | ||
613 | if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI)) | ||
614 | ltq_etop_w32_mask(ETOP_FTCU, 0, LTQ_ETOP_ENETS0); | ||
615 | else | ||
616 | ltq_etop_w32_mask(0, ETOP_FTCU, LTQ_ETOP_ENETS0); | ||
617 | spin_unlock_irqrestore(&priv->lock, flags); | ||
618 | } | ||
619 | |||
620 | static u16 | ||
621 | ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb) | ||
622 | { | ||
623 | /* we are currently only using the first queue */ | ||
624 | return 0; | ||
625 | } | ||
626 | |||
627 | static int | ||
628 | ltq_etop_init(struct net_device *dev) | ||
629 | { | ||
630 | struct ltq_etop_priv *priv = netdev_priv(dev); | ||
631 | struct sockaddr mac; | ||
632 | int err; | ||
633 | |||
634 | ether_setup(dev); | ||
635 | dev->watchdog_timeo = 10 * HZ; | ||
636 | err = ltq_etop_hw_init(dev); | ||
637 | if (err) | ||
638 | goto err_hw; | ||
639 | ltq_etop_change_mtu(dev, 1500); | ||
640 | |||
641 | memcpy(&mac, &priv->pldata->mac, sizeof(struct sockaddr)); | ||
642 | if (!is_valid_ether_addr(mac.sa_data)) { | ||
643 | pr_warn("etop: invalid MAC, using random\n"); | ||
644 | random_ether_addr(mac.sa_data); | ||
645 | } | ||
646 | |||
647 | err = ltq_etop_set_mac_address(dev, &mac); | ||
648 | if (err) | ||
649 | goto err_netdev; | ||
650 | ltq_etop_set_multicast_list(dev); | ||
651 | err = ltq_etop_mdio_init(dev); | ||
652 | if (err) | ||
653 | goto err_netdev; | ||
654 | return 0; | ||
655 | |||
656 | err_netdev: | ||
657 | unregister_netdev(dev); | ||
658 | free_netdev(dev); | ||
659 | err_hw: | ||
660 | ltq_etop_hw_exit(dev); | ||
661 | return err; | ||
662 | } | ||
663 | |||
664 | static void | ||
665 | ltq_etop_tx_timeout(struct net_device *dev) | ||
666 | { | ||
667 | int err; | ||
668 | |||
669 | ltq_etop_hw_exit(dev); | ||
670 | err = ltq_etop_hw_init(dev); | ||
671 | if (err) | ||
672 | goto err_hw; | ||
673 | dev->trans_start = jiffies; | ||
674 | netif_wake_queue(dev); | ||
675 | return; | ||
676 | |||
677 | err_hw: | ||
678 | ltq_etop_hw_exit(dev); | ||
679 | netdev_err(dev, "failed to restart etop after TX timeout\n"); | ||
680 | } | ||
681 | |||
682 | static const struct net_device_ops ltq_eth_netdev_ops = { | ||
683 | .ndo_open = ltq_etop_open, | ||
684 | .ndo_stop = ltq_etop_stop, | ||
685 | .ndo_start_xmit = ltq_etop_tx, | ||
686 | .ndo_change_mtu = ltq_etop_change_mtu, | ||
687 | .ndo_do_ioctl = ltq_etop_ioctl, | ||
688 | .ndo_set_mac_address = ltq_etop_set_mac_address, | ||
689 | .ndo_validate_addr = eth_validate_addr, | ||
690 | .ndo_set_multicast_list = ltq_etop_set_multicast_list, | ||
691 | .ndo_select_queue = ltq_etop_select_queue, | ||
692 | .ndo_init = ltq_etop_init, | ||
693 | .ndo_tx_timeout = ltq_etop_tx_timeout, | ||
694 | }; | ||
695 | |||
696 | static int __init | ||
697 | ltq_etop_probe(struct platform_device *pdev) | ||
698 | { | ||
699 | struct net_device *dev; | ||
700 | struct ltq_etop_priv *priv; | ||
701 | struct resource *res; | ||
702 | int err; | ||
703 | int i; | ||
704 | |||
705 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
706 | if (!res) { | ||
707 | dev_err(&pdev->dev, "failed to get etop resource\n"); | ||
708 | err = -ENOENT; | ||
709 | goto err_out; | ||
710 | } | ||
711 | |||
712 | res = devm_request_mem_region(&pdev->dev, res->start, | ||
713 | resource_size(res), dev_name(&pdev->dev)); | ||
714 | if (!res) { | ||
715 | dev_err(&pdev->dev, "failed to request etop resource\n"); | ||
716 | err = -EBUSY; | ||
717 | goto err_out; | ||
718 | } | ||
719 | |||
720 | ltq_etop_membase = devm_ioremap_nocache(&pdev->dev, | ||
721 | res->start, resource_size(res)); | ||
722 | if (!ltq_etop_membase) { | ||
723 | dev_err(&pdev->dev, "failed to remap etop engine %d\n", | ||
724 | pdev->id); | ||
725 | err = -ENOMEM; | ||
726 | goto err_out; | ||
727 | } | ||
728 | |||
729 | dev = alloc_etherdev_mq(sizeof(struct ltq_etop_priv), 4); | ||
730 | strcpy(dev->name, "eth%d"); | ||
731 | dev->netdev_ops = <q_eth_netdev_ops; | ||
732 | dev->ethtool_ops = <q_etop_ethtool_ops; | ||
733 | priv = netdev_priv(dev); | ||
734 | priv->res = res; | ||
735 | priv->pldata = dev_get_platdata(&pdev->dev); | ||
736 | priv->netdev = dev; | ||
737 | spin_lock_init(&priv->lock); | ||
738 | |||
739 | for (i = 0; i < MAX_DMA_CHAN; i++) { | ||
740 | if (IS_TX(i)) | ||
741 | netif_napi_add(dev, &priv->ch[i].napi, | ||
742 | ltq_etop_poll_tx, 8); | ||
743 | else if (IS_RX(i)) | ||
744 | netif_napi_add(dev, &priv->ch[i].napi, | ||
745 | ltq_etop_poll_rx, 32); | ||
746 | priv->ch[i].netdev = dev; | ||
747 | } | ||
748 | |||
749 | err = register_netdev(dev); | ||
750 | if (err) | ||
751 | goto err_free; | ||
752 | |||
753 | platform_set_drvdata(pdev, dev); | ||
754 | return 0; | ||
755 | |||
756 | err_free: | ||
757 | kfree(dev); | ||
758 | err_out: | ||
759 | return err; | ||
760 | } | ||
761 | |||
762 | static int __devexit | ||
763 | ltq_etop_remove(struct platform_device *pdev) | ||
764 | { | ||
765 | struct net_device *dev = platform_get_drvdata(pdev); | ||
766 | |||
767 | if (dev) { | ||
768 | netif_tx_stop_all_queues(dev); | ||
769 | ltq_etop_hw_exit(dev); | ||
770 | ltq_etop_mdio_cleanup(dev); | ||
771 | unregister_netdev(dev); | ||
772 | } | ||
773 | return 0; | ||
774 | } | ||
775 | |||
776 | static struct platform_driver ltq_mii_driver = { | ||
777 | .remove = __devexit_p(ltq_etop_remove), | ||
778 | .driver = { | ||
779 | .name = "ltq_etop", | ||
780 | .owner = THIS_MODULE, | ||
781 | }, | ||
782 | }; | ||
783 | |||
784 | int __init | ||
785 | init_ltq_etop(void) | ||
786 | { | ||
787 | int ret = platform_driver_probe(<q_mii_driver, ltq_etop_probe); | ||
788 | |||
789 | if (ret) | ||
790 | pr_err("ltq_etop: Error registering platfom driver!"); | ||
791 | return ret; | ||
792 | } | ||
793 | |||
794 | static void __exit | ||
795 | exit_ltq_etop(void) | ||
796 | { | ||
797 | platform_driver_unregister(<q_mii_driver); | ||
798 | } | ||
799 | |||
800 | module_init(init_ltq_etop); | ||
801 | module_exit(exit_ltq_etop); | ||
802 | |||
803 | MODULE_AUTHOR("John Crispin <blogic@openwrt.org>"); | ||
804 | MODULE_DESCRIPTION("Lantiq SoC ETOP"); | ||
805 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/net/ne-h8300.c b/drivers/net/ne-h8300.c index 30be8c634ebd..7298a34bc795 100644 --- a/drivers/net/ne-h8300.c +++ b/drivers/net/ne-h8300.c | |||
@@ -167,7 +167,7 @@ static void cleanup_card(struct net_device *dev) | |||
167 | #ifndef MODULE | 167 | #ifndef MODULE |
168 | struct net_device * __init ne_probe(int unit) | 168 | struct net_device * __init ne_probe(int unit) |
169 | { | 169 | { |
170 | struct net_device *dev = alloc_ei_netdev(); | 170 | struct net_device *dev = ____alloc_ei_netdev(0); |
171 | int err; | 171 | int err; |
172 | 172 | ||
173 | if (!dev) | 173 | if (!dev) |
@@ -197,15 +197,15 @@ static const struct net_device_ops ne_netdev_ops = { | |||
197 | .ndo_open = ne_open, | 197 | .ndo_open = ne_open, |
198 | .ndo_stop = ne_close, | 198 | .ndo_stop = ne_close, |
199 | 199 | ||
200 | .ndo_start_xmit = ei_start_xmit, | 200 | .ndo_start_xmit = __ei_start_xmit, |
201 | .ndo_tx_timeout = ei_tx_timeout, | 201 | .ndo_tx_timeout = __ei_tx_timeout, |
202 | .ndo_get_stats = ei_get_stats, | 202 | .ndo_get_stats = __ei_get_stats, |
203 | .ndo_set_multicast_list = ei_set_multicast_list, | 203 | .ndo_set_multicast_list = __ei_set_multicast_list, |
204 | .ndo_validate_addr = eth_validate_addr, | 204 | .ndo_validate_addr = eth_validate_addr, |
205 | .ndo_set_mac_address = eth_mac_addr, | 205 | .ndo_set_mac_address = eth_mac_addr, |
206 | .ndo_change_mtu = eth_change_mtu, | 206 | .ndo_change_mtu = eth_change_mtu, |
207 | #ifdef CONFIG_NET_POLL_CONTROLLER | 207 | #ifdef CONFIG_NET_POLL_CONTROLLER |
208 | .ndo_poll_controller = ei_poll, | 208 | .ndo_poll_controller = __ei_poll, |
209 | #endif | 209 | #endif |
210 | }; | 210 | }; |
211 | 211 | ||
@@ -637,7 +637,7 @@ int init_module(void) | |||
637 | int err; | 637 | int err; |
638 | 638 | ||
639 | for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) { | 639 | for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) { |
640 | struct net_device *dev = alloc_ei_netdev(); | 640 | struct net_device *dev = ____alloc_ei_netdev(0); |
641 | if (!dev) | 641 | if (!dev) |
642 | break; | 642 | break; |
643 | if (io[this_dev]) { | 643 | if (io[this_dev]) { |
diff --git a/drivers/net/pch_gbe/pch_gbe_main.c b/drivers/net/pch_gbe/pch_gbe_main.c index 2ef2f9cdefa6..56d049a472da 100644 --- a/drivers/net/pch_gbe/pch_gbe_main.c +++ b/drivers/net/pch_gbe/pch_gbe_main.c | |||
@@ -34,6 +34,10 @@ const char pch_driver_version[] = DRV_VERSION; | |||
34 | #define PCH_GBE_COPYBREAK_DEFAULT 256 | 34 | #define PCH_GBE_COPYBREAK_DEFAULT 256 |
35 | #define PCH_GBE_PCI_BAR 1 | 35 | #define PCH_GBE_PCI_BAR 1 |
36 | 36 | ||
37 | /* Macros for ML7223 */ | ||
38 | #define PCI_VENDOR_ID_ROHM 0x10db | ||
39 | #define PCI_DEVICE_ID_ROHM_ML7223_GBE 0x8013 | ||
40 | |||
37 | #define PCH_GBE_TX_WEIGHT 64 | 41 | #define PCH_GBE_TX_WEIGHT 64 |
38 | #define PCH_GBE_RX_WEIGHT 64 | 42 | #define PCH_GBE_RX_WEIGHT 64 |
39 | #define PCH_GBE_RX_BUFFER_WRITE 16 | 43 | #define PCH_GBE_RX_BUFFER_WRITE 16 |
@@ -43,8 +47,7 @@ const char pch_driver_version[] = DRV_VERSION; | |||
43 | 47 | ||
44 | #define PCH_GBE_MAC_RGMII_CTRL_SETTING ( \ | 48 | #define PCH_GBE_MAC_RGMII_CTRL_SETTING ( \ |
45 | PCH_GBE_CHIP_TYPE_INTERNAL | \ | 49 | PCH_GBE_CHIP_TYPE_INTERNAL | \ |
46 | PCH_GBE_RGMII_MODE_RGMII | \ | 50 | PCH_GBE_RGMII_MODE_RGMII \ |
47 | PCH_GBE_CRS_SEL \ | ||
48 | ) | 51 | ) |
49 | 52 | ||
50 | /* Ethertype field values */ | 53 | /* Ethertype field values */ |
@@ -1494,12 +1497,11 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter, | |||
1494 | /* Write meta date of skb */ | 1497 | /* Write meta date of skb */ |
1495 | skb_put(skb, length); | 1498 | skb_put(skb, length); |
1496 | skb->protocol = eth_type_trans(skb, netdev); | 1499 | skb->protocol = eth_type_trans(skb, netdev); |
1497 | if ((tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK) == | 1500 | if (tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK) |
1498 | PCH_GBE_RXD_ACC_STAT_TCPIPOK) { | ||
1499 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
1500 | } else { | ||
1501 | skb->ip_summed = CHECKSUM_NONE; | 1501 | skb->ip_summed = CHECKSUM_NONE; |
1502 | } | 1502 | else |
1503 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
1504 | |||
1503 | napi_gro_receive(&adapter->napi, skb); | 1505 | napi_gro_receive(&adapter->napi, skb); |
1504 | (*work_done)++; | 1506 | (*work_done)++; |
1505 | pr_debug("Receive skb->ip_summed: %d length: %d\n", | 1507 | pr_debug("Receive skb->ip_summed: %d length: %d\n", |
@@ -2420,6 +2422,13 @@ static DEFINE_PCI_DEVICE_TABLE(pch_gbe_pcidev_id) = { | |||
2420 | .class = (PCI_CLASS_NETWORK_ETHERNET << 8), | 2422 | .class = (PCI_CLASS_NETWORK_ETHERNET << 8), |
2421 | .class_mask = (0xFFFF00) | 2423 | .class_mask = (0xFFFF00) |
2422 | }, | 2424 | }, |
2425 | {.vendor = PCI_VENDOR_ID_ROHM, | ||
2426 | .device = PCI_DEVICE_ID_ROHM_ML7223_GBE, | ||
2427 | .subvendor = PCI_ANY_ID, | ||
2428 | .subdevice = PCI_ANY_ID, | ||
2429 | .class = (PCI_CLASS_NETWORK_ETHERNET << 8), | ||
2430 | .class_mask = (0xFFFF00) | ||
2431 | }, | ||
2423 | /* required last entry */ | 2432 | /* required last entry */ |
2424 | {0} | 2433 | {0} |
2425 | }; | 2434 | }; |
diff --git a/drivers/net/sfc/mcdi.c b/drivers/net/sfc/mcdi.c index d98479030ef2..3dd45ed61f0a 100644 --- a/drivers/net/sfc/mcdi.c +++ b/drivers/net/sfc/mcdi.c | |||
@@ -50,6 +50,20 @@ static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx) | |||
50 | return &nic_data->mcdi; | 50 | return &nic_data->mcdi; |
51 | } | 51 | } |
52 | 52 | ||
53 | static inline void | ||
54 | efx_mcdi_readd(struct efx_nic *efx, efx_dword_t *value, unsigned reg) | ||
55 | { | ||
56 | struct siena_nic_data *nic_data = efx->nic_data; | ||
57 | value->u32[0] = (__force __le32)__raw_readl(nic_data->mcdi_smem + reg); | ||
58 | } | ||
59 | |||
60 | static inline void | ||
61 | efx_mcdi_writed(struct efx_nic *efx, const efx_dword_t *value, unsigned reg) | ||
62 | { | ||
63 | struct siena_nic_data *nic_data = efx->nic_data; | ||
64 | __raw_writel((__force u32)value->u32[0], nic_data->mcdi_smem + reg); | ||
65 | } | ||
66 | |||
53 | void efx_mcdi_init(struct efx_nic *efx) | 67 | void efx_mcdi_init(struct efx_nic *efx) |
54 | { | 68 | { |
55 | struct efx_mcdi_iface *mcdi; | 69 | struct efx_mcdi_iface *mcdi; |
@@ -70,8 +84,8 @@ static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd, | |||
70 | const u8 *inbuf, size_t inlen) | 84 | const u8 *inbuf, size_t inlen) |
71 | { | 85 | { |
72 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); | 86 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); |
73 | unsigned pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); | 87 | unsigned pdu = MCDI_PDU(efx); |
74 | unsigned doorbell = FR_CZ_MC_TREG_SMEM + MCDI_DOORBELL(efx); | 88 | unsigned doorbell = MCDI_DOORBELL(efx); |
75 | unsigned int i; | 89 | unsigned int i; |
76 | efx_dword_t hdr; | 90 | efx_dword_t hdr; |
77 | u32 xflags, seqno; | 91 | u32 xflags, seqno; |
@@ -92,30 +106,28 @@ static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd, | |||
92 | MCDI_HEADER_SEQ, seqno, | 106 | MCDI_HEADER_SEQ, seqno, |
93 | MCDI_HEADER_XFLAGS, xflags); | 107 | MCDI_HEADER_XFLAGS, xflags); |
94 | 108 | ||
95 | efx_writed(efx, &hdr, pdu); | 109 | efx_mcdi_writed(efx, &hdr, pdu); |
96 | 110 | ||
97 | for (i = 0; i < inlen; i += 4) { | 111 | for (i = 0; i < inlen; i += 4) |
98 | _efx_writed(efx, *((__le32 *)(inbuf + i)), pdu + 4 + i); | 112 | efx_mcdi_writed(efx, (const efx_dword_t *)(inbuf + i), |
99 | /* use wmb() within loop to inhibit write combining */ | 113 | pdu + 4 + i); |
100 | wmb(); | ||
101 | } | ||
102 | 114 | ||
103 | /* ring the doorbell with a distinctive value */ | 115 | /* ring the doorbell with a distinctive value */ |
104 | _efx_writed(efx, (__force __le32) 0x45789abc, doorbell); | 116 | EFX_POPULATE_DWORD_1(hdr, EFX_DWORD_0, 0x45789abc); |
105 | wmb(); | 117 | efx_mcdi_writed(efx, &hdr, doorbell); |
106 | } | 118 | } |
107 | 119 | ||
108 | static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen) | 120 | static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen) |
109 | { | 121 | { |
110 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); | 122 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); |
111 | unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); | 123 | unsigned int pdu = MCDI_PDU(efx); |
112 | int i; | 124 | int i; |
113 | 125 | ||
114 | BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT); | 126 | BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT); |
115 | BUG_ON(outlen & 3 || outlen >= 0x100); | 127 | BUG_ON(outlen & 3 || outlen >= 0x100); |
116 | 128 | ||
117 | for (i = 0; i < outlen; i += 4) | 129 | for (i = 0; i < outlen; i += 4) |
118 | *((__le32 *)(outbuf + i)) = _efx_readd(efx, pdu + 4 + i); | 130 | efx_mcdi_readd(efx, (efx_dword_t *)(outbuf + i), pdu + 4 + i); |
119 | } | 131 | } |
120 | 132 | ||
121 | static int efx_mcdi_poll(struct efx_nic *efx) | 133 | static int efx_mcdi_poll(struct efx_nic *efx) |
@@ -123,7 +135,7 @@ static int efx_mcdi_poll(struct efx_nic *efx) | |||
123 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); | 135 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); |
124 | unsigned int time, finish; | 136 | unsigned int time, finish; |
125 | unsigned int respseq, respcmd, error; | 137 | unsigned int respseq, respcmd, error; |
126 | unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); | 138 | unsigned int pdu = MCDI_PDU(efx); |
127 | unsigned int rc, spins; | 139 | unsigned int rc, spins; |
128 | efx_dword_t reg; | 140 | efx_dword_t reg; |
129 | 141 | ||
@@ -149,8 +161,7 @@ static int efx_mcdi_poll(struct efx_nic *efx) | |||
149 | 161 | ||
150 | time = get_seconds(); | 162 | time = get_seconds(); |
151 | 163 | ||
152 | rmb(); | 164 | efx_mcdi_readd(efx, ®, pdu); |
153 | efx_readd(efx, ®, pdu); | ||
154 | 165 | ||
155 | /* All 1's indicates that shared memory is in reset (and is | 166 | /* All 1's indicates that shared memory is in reset (and is |
156 | * not a valid header). Wait for it to come out reset before | 167 | * not a valid header). Wait for it to come out reset before |
@@ -177,7 +188,7 @@ static int efx_mcdi_poll(struct efx_nic *efx) | |||
177 | respseq, mcdi->seqno); | 188 | respseq, mcdi->seqno); |
178 | rc = EIO; | 189 | rc = EIO; |
179 | } else if (error) { | 190 | } else if (error) { |
180 | efx_readd(efx, ®, pdu + 4); | 191 | efx_mcdi_readd(efx, ®, pdu + 4); |
181 | switch (EFX_DWORD_FIELD(reg, EFX_DWORD_0)) { | 192 | switch (EFX_DWORD_FIELD(reg, EFX_DWORD_0)) { |
182 | #define TRANSLATE_ERROR(name) \ | 193 | #define TRANSLATE_ERROR(name) \ |
183 | case MC_CMD_ERR_ ## name: \ | 194 | case MC_CMD_ERR_ ## name: \ |
@@ -211,21 +222,21 @@ out: | |||
211 | /* Test and clear MC-rebooted flag for this port/function */ | 222 | /* Test and clear MC-rebooted flag for this port/function */ |
212 | int efx_mcdi_poll_reboot(struct efx_nic *efx) | 223 | int efx_mcdi_poll_reboot(struct efx_nic *efx) |
213 | { | 224 | { |
214 | unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_REBOOT_FLAG(efx); | 225 | unsigned int addr = MCDI_REBOOT_FLAG(efx); |
215 | efx_dword_t reg; | 226 | efx_dword_t reg; |
216 | uint32_t value; | 227 | uint32_t value; |
217 | 228 | ||
218 | if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) | 229 | if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) |
219 | return false; | 230 | return false; |
220 | 231 | ||
221 | efx_readd(efx, ®, addr); | 232 | efx_mcdi_readd(efx, ®, addr); |
222 | value = EFX_DWORD_FIELD(reg, EFX_DWORD_0); | 233 | value = EFX_DWORD_FIELD(reg, EFX_DWORD_0); |
223 | 234 | ||
224 | if (value == 0) | 235 | if (value == 0) |
225 | return 0; | 236 | return 0; |
226 | 237 | ||
227 | EFX_ZERO_DWORD(reg); | 238 | EFX_ZERO_DWORD(reg); |
228 | efx_writed(efx, ®, addr); | 239 | efx_mcdi_writed(efx, ®, addr); |
229 | 240 | ||
230 | if (value == MC_STATUS_DWORD_ASSERT) | 241 | if (value == MC_STATUS_DWORD_ASSERT) |
231 | return -EINTR; | 242 | return -EINTR; |
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c index 10f1cb79c147..9b29a8d7c449 100644 --- a/drivers/net/sfc/nic.c +++ b/drivers/net/sfc/nic.c | |||
@@ -1937,6 +1937,13 @@ void efx_nic_get_regs(struct efx_nic *efx, void *buf) | |||
1937 | 1937 | ||
1938 | size = min_t(size_t, table->step, 16); | 1938 | size = min_t(size_t, table->step, 16); |
1939 | 1939 | ||
1940 | if (table->offset >= efx->type->mem_map_size) { | ||
1941 | /* No longer mapped; return dummy data */ | ||
1942 | memcpy(buf, "\xde\xc0\xad\xde", 4); | ||
1943 | buf += table->rows * size; | ||
1944 | continue; | ||
1945 | } | ||
1946 | |||
1940 | for (i = 0; i < table->rows; i++) { | 1947 | for (i = 0; i < table->rows; i++) { |
1941 | switch (table->step) { | 1948 | switch (table->step) { |
1942 | case 4: /* 32-bit register or SRAM */ | 1949 | case 4: /* 32-bit register or SRAM */ |
diff --git a/drivers/net/sfc/nic.h b/drivers/net/sfc/nic.h index a42db6e35be3..d91701abd331 100644 --- a/drivers/net/sfc/nic.h +++ b/drivers/net/sfc/nic.h | |||
@@ -143,10 +143,12 @@ static inline struct falcon_board *falcon_board(struct efx_nic *efx) | |||
143 | /** | 143 | /** |
144 | * struct siena_nic_data - Siena NIC state | 144 | * struct siena_nic_data - Siena NIC state |
145 | * @mcdi: Management-Controller-to-Driver Interface | 145 | * @mcdi: Management-Controller-to-Driver Interface |
146 | * @mcdi_smem: MCDI shared memory mapping. The mapping is always uncacheable. | ||
146 | * @wol_filter_id: Wake-on-LAN packet filter id | 147 | * @wol_filter_id: Wake-on-LAN packet filter id |
147 | */ | 148 | */ |
148 | struct siena_nic_data { | 149 | struct siena_nic_data { |
149 | struct efx_mcdi_iface mcdi; | 150 | struct efx_mcdi_iface mcdi; |
151 | void __iomem *mcdi_smem; | ||
150 | int wol_filter_id; | 152 | int wol_filter_id; |
151 | }; | 153 | }; |
152 | 154 | ||
diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c index e4dd8986b1fe..837869b71db9 100644 --- a/drivers/net/sfc/siena.c +++ b/drivers/net/sfc/siena.c | |||
@@ -220,12 +220,26 @@ static int siena_probe_nic(struct efx_nic *efx) | |||
220 | efx_reado(efx, ®, FR_AZ_CS_DEBUG); | 220 | efx_reado(efx, ®, FR_AZ_CS_DEBUG); |
221 | efx->net_dev->dev_id = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1; | 221 | efx->net_dev->dev_id = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1; |
222 | 222 | ||
223 | /* Initialise MCDI */ | ||
224 | nic_data->mcdi_smem = ioremap_nocache(efx->membase_phys + | ||
225 | FR_CZ_MC_TREG_SMEM, | ||
226 | FR_CZ_MC_TREG_SMEM_STEP * | ||
227 | FR_CZ_MC_TREG_SMEM_ROWS); | ||
228 | if (!nic_data->mcdi_smem) { | ||
229 | netif_err(efx, probe, efx->net_dev, | ||
230 | "could not map MCDI at %llx+%x\n", | ||
231 | (unsigned long long)efx->membase_phys + | ||
232 | FR_CZ_MC_TREG_SMEM, | ||
233 | FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS); | ||
234 | rc = -ENOMEM; | ||
235 | goto fail1; | ||
236 | } | ||
223 | efx_mcdi_init(efx); | 237 | efx_mcdi_init(efx); |
224 | 238 | ||
225 | /* Recover from a failed assertion before probing */ | 239 | /* Recover from a failed assertion before probing */ |
226 | rc = efx_mcdi_handle_assertion(efx); | 240 | rc = efx_mcdi_handle_assertion(efx); |
227 | if (rc) | 241 | if (rc) |
228 | goto fail1; | 242 | goto fail2; |
229 | 243 | ||
230 | /* Let the BMC know that the driver is now in charge of link and | 244 | /* Let the BMC know that the driver is now in charge of link and |
231 | * filter settings. We must do this before we reset the NIC */ | 245 | * filter settings. We must do this before we reset the NIC */ |
@@ -280,6 +294,7 @@ fail4: | |||
280 | fail3: | 294 | fail3: |
281 | efx_mcdi_drv_attach(efx, false, NULL); | 295 | efx_mcdi_drv_attach(efx, false, NULL); |
282 | fail2: | 296 | fail2: |
297 | iounmap(nic_data->mcdi_smem); | ||
283 | fail1: | 298 | fail1: |
284 | kfree(efx->nic_data); | 299 | kfree(efx->nic_data); |
285 | return rc; | 300 | return rc; |
@@ -359,6 +374,8 @@ static int siena_init_nic(struct efx_nic *efx) | |||
359 | 374 | ||
360 | static void siena_remove_nic(struct efx_nic *efx) | 375 | static void siena_remove_nic(struct efx_nic *efx) |
361 | { | 376 | { |
377 | struct siena_nic_data *nic_data = efx->nic_data; | ||
378 | |||
362 | efx_nic_free_buffer(efx, &efx->irq_status); | 379 | efx_nic_free_buffer(efx, &efx->irq_status); |
363 | 380 | ||
364 | siena_reset_hw(efx, RESET_TYPE_ALL); | 381 | siena_reset_hw(efx, RESET_TYPE_ALL); |
@@ -368,7 +385,8 @@ static void siena_remove_nic(struct efx_nic *efx) | |||
368 | efx_mcdi_drv_attach(efx, false, NULL); | 385 | efx_mcdi_drv_attach(efx, false, NULL); |
369 | 386 | ||
370 | /* Tear down the private nic state */ | 387 | /* Tear down the private nic state */ |
371 | kfree(efx->nic_data); | 388 | iounmap(nic_data->mcdi_smem); |
389 | kfree(nic_data); | ||
372 | efx->nic_data = NULL; | 390 | efx->nic_data = NULL; |
373 | } | 391 | } |
374 | 392 | ||
@@ -606,8 +624,7 @@ struct efx_nic_type siena_a0_nic_type = { | |||
606 | .default_mac_ops = &efx_mcdi_mac_operations, | 624 | .default_mac_ops = &efx_mcdi_mac_operations, |
607 | 625 | ||
608 | .revision = EFX_REV_SIENA_A0, | 626 | .revision = EFX_REV_SIENA_A0, |
609 | .mem_map_size = (FR_CZ_MC_TREG_SMEM + | 627 | .mem_map_size = FR_CZ_MC_TREG_SMEM, /* MC_TREG_SMEM mapped separately */ |
610 | FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS), | ||
611 | .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL, | 628 | .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL, |
612 | .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL, | 629 | .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL, |
613 | .buf_tbl_base = FR_BZ_BUF_FULL_TBL, | 630 | .buf_tbl_base = FR_BZ_BUF_FULL_TBL, |
diff --git a/drivers/net/slip.c b/drivers/net/slip.c index 86cbb9ea2f26..8ec1a9a0bb9a 100644 --- a/drivers/net/slip.c +++ b/drivers/net/slip.c | |||
@@ -853,7 +853,9 @@ static int slip_open(struct tty_struct *tty) | |||
853 | /* Done. We have linked the TTY line to a channel. */ | 853 | /* Done. We have linked the TTY line to a channel. */ |
854 | rtnl_unlock(); | 854 | rtnl_unlock(); |
855 | tty->receive_room = 65536; /* We don't flow control */ | 855 | tty->receive_room = 65536; /* We don't flow control */ |
856 | return sl->dev->base_addr; | 856 | |
857 | /* TTY layer expects 0 on success */ | ||
858 | return 0; | ||
857 | 859 | ||
858 | err_free_bufs: | 860 | err_free_bufs: |
859 | sl_free_bufs(sl); | 861 | sl_free_bufs(sl); |
diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c index eb4f59fb01e9..bff2f7999ff0 100644 --- a/drivers/net/sunhme.c +++ b/drivers/net/sunhme.c | |||
@@ -3237,15 +3237,18 @@ static void happy_meal_pci_exit(void) | |||
3237 | #endif | 3237 | #endif |
3238 | 3238 | ||
3239 | #ifdef CONFIG_SBUS | 3239 | #ifdef CONFIG_SBUS |
3240 | static const struct of_device_id hme_sbus_match[]; | ||
3240 | static int __devinit hme_sbus_probe(struct platform_device *op) | 3241 | static int __devinit hme_sbus_probe(struct platform_device *op) |
3241 | { | 3242 | { |
3243 | const struct of_device_id *match; | ||
3242 | struct device_node *dp = op->dev.of_node; | 3244 | struct device_node *dp = op->dev.of_node; |
3243 | const char *model = of_get_property(dp, "model", NULL); | 3245 | const char *model = of_get_property(dp, "model", NULL); |
3244 | int is_qfe; | 3246 | int is_qfe; |
3245 | 3247 | ||
3246 | if (!op->dev.of_match) | 3248 | match = of_match_device(hme_sbus_match, &op->dev); |
3249 | if (!match) | ||
3247 | return -EINVAL; | 3250 | return -EINVAL; |
3248 | is_qfe = (op->dev.of_match->data != NULL); | 3251 | is_qfe = (match->data != NULL); |
3249 | 3252 | ||
3250 | if (!is_qfe && model && !strcmp(model, "SUNW,sbus-qfe")) | 3253 | if (!is_qfe && model && !strcmp(model, "SUNW,sbus-qfe")) |
3251 | is_qfe = 1; | 3254 | is_qfe = 1; |
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index a301479ecc60..c924ea2bce07 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c | |||
@@ -567,7 +567,7 @@ static const struct usb_device_id products [] = { | |||
567 | { | 567 | { |
568 | USB_DEVICE_AND_INTERFACE_INFO(0x1004, 0x61aa, USB_CLASS_COMM, | 568 | USB_DEVICE_AND_INTERFACE_INFO(0x1004, 0x61aa, USB_CLASS_COMM, |
569 | USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), | 569 | USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), |
570 | .driver_info = 0, | 570 | .driver_info = (unsigned long)&wwan_info, |
571 | }, | 571 | }, |
572 | 572 | ||
573 | /* | 573 | /* |
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c index 7d42f9a2c068..81126ff85e05 100644 --- a/drivers/net/usb/ipheth.c +++ b/drivers/net/usb/ipheth.c | |||
@@ -65,6 +65,7 @@ | |||
65 | #define IPHETH_USBINTF_PROTO 1 | 65 | #define IPHETH_USBINTF_PROTO 1 |
66 | 66 | ||
67 | #define IPHETH_BUF_SIZE 1516 | 67 | #define IPHETH_BUF_SIZE 1516 |
68 | #define IPHETH_IP_ALIGN 2 /* padding at front of URB */ | ||
68 | #define IPHETH_TX_TIMEOUT (5 * HZ) | 69 | #define IPHETH_TX_TIMEOUT (5 * HZ) |
69 | 70 | ||
70 | #define IPHETH_INTFNUM 2 | 71 | #define IPHETH_INTFNUM 2 |
@@ -202,18 +203,21 @@ static void ipheth_rcvbulk_callback(struct urb *urb) | |||
202 | return; | 203 | return; |
203 | } | 204 | } |
204 | 205 | ||
205 | len = urb->actual_length; | 206 | if (urb->actual_length <= IPHETH_IP_ALIGN) { |
206 | buf = urb->transfer_buffer; | 207 | dev->net->stats.rx_length_errors++; |
208 | return; | ||
209 | } | ||
210 | len = urb->actual_length - IPHETH_IP_ALIGN; | ||
211 | buf = urb->transfer_buffer + IPHETH_IP_ALIGN; | ||
207 | 212 | ||
208 | skb = dev_alloc_skb(NET_IP_ALIGN + len); | 213 | skb = dev_alloc_skb(len); |
209 | if (!skb) { | 214 | if (!skb) { |
210 | err("%s: dev_alloc_skb: -ENOMEM", __func__); | 215 | err("%s: dev_alloc_skb: -ENOMEM", __func__); |
211 | dev->net->stats.rx_dropped++; | 216 | dev->net->stats.rx_dropped++; |
212 | return; | 217 | return; |
213 | } | 218 | } |
214 | 219 | ||
215 | skb_reserve(skb, NET_IP_ALIGN); | 220 | memcpy(skb_put(skb, len), buf, len); |
216 | memcpy(skb_put(skb, len), buf + NET_IP_ALIGN, len - NET_IP_ALIGN); | ||
217 | skb->dev = dev->net; | 221 | skb->dev = dev->net; |
218 | skb->protocol = eth_type_trans(skb, dev->net); | 222 | skb->protocol = eth_type_trans(skb, dev->net); |
219 | 223 | ||
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 009bba3d753e..9ab439d144ed 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c | |||
@@ -645,6 +645,7 @@ int usbnet_stop (struct net_device *net) | |||
645 | struct driver_info *info = dev->driver_info; | 645 | struct driver_info *info = dev->driver_info; |
646 | int retval; | 646 | int retval; |
647 | 647 | ||
648 | clear_bit(EVENT_DEV_OPEN, &dev->flags); | ||
648 | netif_stop_queue (net); | 649 | netif_stop_queue (net); |
649 | 650 | ||
650 | netif_info(dev, ifdown, dev->net, | 651 | netif_info(dev, ifdown, dev->net, |
@@ -1524,9 +1525,12 @@ int usbnet_resume (struct usb_interface *intf) | |||
1524 | smp_mb(); | 1525 | smp_mb(); |
1525 | clear_bit(EVENT_DEV_ASLEEP, &dev->flags); | 1526 | clear_bit(EVENT_DEV_ASLEEP, &dev->flags); |
1526 | spin_unlock_irq(&dev->txq.lock); | 1527 | spin_unlock_irq(&dev->txq.lock); |
1527 | if (!(dev->txq.qlen >= TX_QLEN(dev))) | 1528 | |
1528 | netif_start_queue(dev->net); | 1529 | if (test_bit(EVENT_DEV_OPEN, &dev->flags)) { |
1529 | tasklet_schedule (&dev->bh); | 1530 | if (!(dev->txq.qlen >= TX_QLEN(dev))) |
1531 | netif_start_queue(dev->net); | ||
1532 | tasklet_schedule (&dev->bh); | ||
1533 | } | ||
1530 | } | 1534 | } |
1531 | return 0; | 1535 | return 0; |
1532 | } | 1536 | } |
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 0d47c3a05307..c16ed961153a 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c | |||
@@ -178,6 +178,7 @@ static void | |||
178 | vmxnet3_process_events(struct vmxnet3_adapter *adapter) | 178 | vmxnet3_process_events(struct vmxnet3_adapter *adapter) |
179 | { | 179 | { |
180 | int i; | 180 | int i; |
181 | unsigned long flags; | ||
181 | u32 events = le32_to_cpu(adapter->shared->ecr); | 182 | u32 events = le32_to_cpu(adapter->shared->ecr); |
182 | if (!events) | 183 | if (!events) |
183 | return; | 184 | return; |
@@ -190,10 +191,10 @@ vmxnet3_process_events(struct vmxnet3_adapter *adapter) | |||
190 | 191 | ||
191 | /* Check if there is an error on xmit/recv queues */ | 192 | /* Check if there is an error on xmit/recv queues */ |
192 | if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) { | 193 | if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) { |
193 | spin_lock(&adapter->cmd_lock); | 194 | spin_lock_irqsave(&adapter->cmd_lock, flags); |
194 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | 195 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, |
195 | VMXNET3_CMD_GET_QUEUE_STATUS); | 196 | VMXNET3_CMD_GET_QUEUE_STATUS); |
196 | spin_unlock(&adapter->cmd_lock); | 197 | spin_unlock_irqrestore(&adapter->cmd_lock, flags); |
197 | 198 | ||
198 | for (i = 0; i < adapter->num_tx_queues; i++) | 199 | for (i = 0; i < adapter->num_tx_queues; i++) |
199 | if (adapter->tqd_start[i].status.stopped) | 200 | if (adapter->tqd_start[i].status.stopped) |
@@ -2733,13 +2734,14 @@ static void | |||
2733 | vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter) | 2734 | vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter) |
2734 | { | 2735 | { |
2735 | u32 cfg; | 2736 | u32 cfg; |
2737 | unsigned long flags; | ||
2736 | 2738 | ||
2737 | /* intr settings */ | 2739 | /* intr settings */ |
2738 | spin_lock(&adapter->cmd_lock); | 2740 | spin_lock_irqsave(&adapter->cmd_lock, flags); |
2739 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | 2741 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, |
2740 | VMXNET3_CMD_GET_CONF_INTR); | 2742 | VMXNET3_CMD_GET_CONF_INTR); |
2741 | cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); | 2743 | cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); |
2742 | spin_unlock(&adapter->cmd_lock); | 2744 | spin_unlock_irqrestore(&adapter->cmd_lock, flags); |
2743 | adapter->intr.type = cfg & 0x3; | 2745 | adapter->intr.type = cfg & 0x3; |
2744 | adapter->intr.mask_mode = (cfg >> 2) & 0x3; | 2746 | adapter->intr.mask_mode = (cfg >> 2) & 0x3; |
2745 | 2747 | ||
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c index 51f2ef142a5b..976467253d20 100644 --- a/drivers/net/vmxnet3/vmxnet3_ethtool.c +++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c | |||
@@ -311,6 +311,9 @@ vmxnet3_set_flags(struct net_device *netdev, u32 data) | |||
311 | /* toggle the LRO feature*/ | 311 | /* toggle the LRO feature*/ |
312 | netdev->features ^= NETIF_F_LRO; | 312 | netdev->features ^= NETIF_F_LRO; |
313 | 313 | ||
314 | /* Update private LRO flag */ | ||
315 | adapter->lro = lro_requested; | ||
316 | |||
314 | /* update harware LRO capability accordingly */ | 317 | /* update harware LRO capability accordingly */ |
315 | if (lro_requested) | 318 | if (lro_requested) |
316 | adapter->shared->devRead.misc.uptFeatures |= | 319 | adapter->shared->devRead.misc.uptFeatures |= |
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 17d04ff8d678..1482fa650833 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c | |||
@@ -2141,6 +2141,8 @@ static void ath9k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class) | |||
2141 | static void ath9k_flush(struct ieee80211_hw *hw, bool drop) | 2141 | static void ath9k_flush(struct ieee80211_hw *hw, bool drop) |
2142 | { | 2142 | { |
2143 | struct ath_softc *sc = hw->priv; | 2143 | struct ath_softc *sc = hw->priv; |
2144 | struct ath_hw *ah = sc->sc_ah; | ||
2145 | struct ath_common *common = ath9k_hw_common(ah); | ||
2144 | int timeout = 200; /* ms */ | 2146 | int timeout = 200; /* ms */ |
2145 | int i, j; | 2147 | int i, j; |
2146 | 2148 | ||
@@ -2149,6 +2151,12 @@ static void ath9k_flush(struct ieee80211_hw *hw, bool drop) | |||
2149 | 2151 | ||
2150 | cancel_delayed_work_sync(&sc->tx_complete_work); | 2152 | cancel_delayed_work_sync(&sc->tx_complete_work); |
2151 | 2153 | ||
2154 | if (sc->sc_flags & SC_OP_INVALID) { | ||
2155 | ath_dbg(common, ATH_DBG_ANY, "Device not present\n"); | ||
2156 | mutex_unlock(&sc->mutex); | ||
2157 | return; | ||
2158 | } | ||
2159 | |||
2152 | if (drop) | 2160 | if (drop) |
2153 | timeout = 1; | 2161 | timeout = 1; |
2154 | 2162 | ||
diff --git a/drivers/net/wireless/iwlegacy/iwl-core.c b/drivers/net/wireless/iwlegacy/iwl-core.c index c1511b14b239..42db0fc8b921 100644 --- a/drivers/net/wireless/iwlegacy/iwl-core.c +++ b/drivers/net/wireless/iwlegacy/iwl-core.c | |||
@@ -2155,6 +2155,13 @@ int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed) | |||
2155 | goto set_ch_out; | 2155 | goto set_ch_out; |
2156 | } | 2156 | } |
2157 | 2157 | ||
2158 | if (priv->iw_mode == NL80211_IFTYPE_ADHOC && | ||
2159 | !iwl_legacy_is_channel_ibss(ch_info)) { | ||
2160 | IWL_DEBUG_MAC80211(priv, "leave - not IBSS channel\n"); | ||
2161 | ret = -EINVAL; | ||
2162 | goto set_ch_out; | ||
2163 | } | ||
2164 | |||
2158 | spin_lock_irqsave(&priv->lock, flags); | 2165 | spin_lock_irqsave(&priv->lock, flags); |
2159 | 2166 | ||
2160 | for_each_context(priv, ctx) { | 2167 | for_each_context(priv, ctx) { |
diff --git a/drivers/net/wireless/iwlegacy/iwl-dev.h b/drivers/net/wireless/iwlegacy/iwl-dev.h index 9ee849d669f3..f43ac1eb9014 100644 --- a/drivers/net/wireless/iwlegacy/iwl-dev.h +++ b/drivers/net/wireless/iwlegacy/iwl-dev.h | |||
@@ -1411,6 +1411,12 @@ iwl_legacy_is_channel_passive(const struct iwl_channel_info *ch) | |||
1411 | return (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) ? 1 : 0; | 1411 | return (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) ? 1 : 0; |
1412 | } | 1412 | } |
1413 | 1413 | ||
1414 | static inline int | ||
1415 | iwl_legacy_is_channel_ibss(const struct iwl_channel_info *ch) | ||
1416 | { | ||
1417 | return (ch->flags & EEPROM_CHANNEL_IBSS) ? 1 : 0; | ||
1418 | } | ||
1419 | |||
1414 | static inline void | 1420 | static inline void |
1415 | __iwl_legacy_free_pages(struct iwl_priv *priv, struct page *page) | 1421 | __iwl_legacy_free_pages(struct iwl_priv *priv, struct page *page) |
1416 | { | 1422 | { |
diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c index 7e8a658b7670..f3ac62431a30 100644 --- a/drivers/net/wireless/libertas/cmd.c +++ b/drivers/net/wireless/libertas/cmd.c | |||
@@ -1339,8 +1339,8 @@ int lbs_execute_next_command(struct lbs_private *priv) | |||
1339 | cpu_to_le16(PS_MODE_ACTION_EXIT_PS)) { | 1339 | cpu_to_le16(PS_MODE_ACTION_EXIT_PS)) { |
1340 | lbs_deb_host( | 1340 | lbs_deb_host( |
1341 | "EXEC_NEXT_CMD: ignore ENTER_PS cmd\n"); | 1341 | "EXEC_NEXT_CMD: ignore ENTER_PS cmd\n"); |
1342 | list_del(&cmdnode->list); | ||
1343 | spin_lock_irqsave(&priv->driver_lock, flags); | 1342 | spin_lock_irqsave(&priv->driver_lock, flags); |
1343 | list_del(&cmdnode->list); | ||
1344 | lbs_complete_command(priv, cmdnode, 0); | 1344 | lbs_complete_command(priv, cmdnode, 0); |
1345 | spin_unlock_irqrestore(&priv->driver_lock, flags); | 1345 | spin_unlock_irqrestore(&priv->driver_lock, flags); |
1346 | 1346 | ||
@@ -1352,8 +1352,8 @@ int lbs_execute_next_command(struct lbs_private *priv) | |||
1352 | (priv->psstate == PS_STATE_PRE_SLEEP)) { | 1352 | (priv->psstate == PS_STATE_PRE_SLEEP)) { |
1353 | lbs_deb_host( | 1353 | lbs_deb_host( |
1354 | "EXEC_NEXT_CMD: ignore EXIT_PS cmd in sleep\n"); | 1354 | "EXEC_NEXT_CMD: ignore EXIT_PS cmd in sleep\n"); |
1355 | list_del(&cmdnode->list); | ||
1356 | spin_lock_irqsave(&priv->driver_lock, flags); | 1355 | spin_lock_irqsave(&priv->driver_lock, flags); |
1356 | list_del(&cmdnode->list); | ||
1357 | lbs_complete_command(priv, cmdnode, 0); | 1357 | lbs_complete_command(priv, cmdnode, 0); |
1358 | spin_unlock_irqrestore(&priv->driver_lock, flags); | 1358 | spin_unlock_irqrestore(&priv->driver_lock, flags); |
1359 | priv->needtowakeup = 1; | 1359 | priv->needtowakeup = 1; |
@@ -1366,7 +1366,9 @@ int lbs_execute_next_command(struct lbs_private *priv) | |||
1366 | "EXEC_NEXT_CMD: sending EXIT_PS\n"); | 1366 | "EXEC_NEXT_CMD: sending EXIT_PS\n"); |
1367 | } | 1367 | } |
1368 | } | 1368 | } |
1369 | spin_lock_irqsave(&priv->driver_lock, flags); | ||
1369 | list_del(&cmdnode->list); | 1370 | list_del(&cmdnode->list); |
1371 | spin_unlock_irqrestore(&priv->driver_lock, flags); | ||
1370 | lbs_deb_host("EXEC_NEXT_CMD: sending command 0x%04x\n", | 1372 | lbs_deb_host("EXEC_NEXT_CMD: sending command 0x%04x\n", |
1371 | le16_to_cpu(cmd->command)); | 1373 | le16_to_cpu(cmd->command)); |
1372 | lbs_submit_command(priv, cmdnode); | 1374 | lbs_submit_command(priv, cmdnode); |
diff --git a/drivers/net/zorro8390.c b/drivers/net/zorro8390.c index b78a38d9172a..8c7c522a056a 100644 --- a/drivers/net/zorro8390.c +++ b/drivers/net/zorro8390.c | |||
@@ -126,7 +126,7 @@ static int __devinit zorro8390_init_one(struct zorro_dev *z, | |||
126 | 126 | ||
127 | board = z->resource.start; | 127 | board = z->resource.start; |
128 | ioaddr = board+cards[i].offset; | 128 | ioaddr = board+cards[i].offset; |
129 | dev = alloc_ei_netdev(); | 129 | dev = ____alloc_ei_netdev(0); |
130 | if (!dev) | 130 | if (!dev) |
131 | return -ENOMEM; | 131 | return -ENOMEM; |
132 | if (!request_mem_region(ioaddr, NE_IO_EXTENT*2, DRV_NAME)) { | 132 | if (!request_mem_region(ioaddr, NE_IO_EXTENT*2, DRV_NAME)) { |
@@ -146,15 +146,15 @@ static int __devinit zorro8390_init_one(struct zorro_dev *z, | |||
146 | static const struct net_device_ops zorro8390_netdev_ops = { | 146 | static const struct net_device_ops zorro8390_netdev_ops = { |
147 | .ndo_open = zorro8390_open, | 147 | .ndo_open = zorro8390_open, |
148 | .ndo_stop = zorro8390_close, | 148 | .ndo_stop = zorro8390_close, |
149 | .ndo_start_xmit = ei_start_xmit, | 149 | .ndo_start_xmit = __ei_start_xmit, |
150 | .ndo_tx_timeout = ei_tx_timeout, | 150 | .ndo_tx_timeout = __ei_tx_timeout, |
151 | .ndo_get_stats = ei_get_stats, | 151 | .ndo_get_stats = __ei_get_stats, |
152 | .ndo_set_multicast_list = ei_set_multicast_list, | 152 | .ndo_set_multicast_list = __ei_set_multicast_list, |
153 | .ndo_validate_addr = eth_validate_addr, | 153 | .ndo_validate_addr = eth_validate_addr, |
154 | .ndo_set_mac_address = eth_mac_addr, | 154 | .ndo_set_mac_address = eth_mac_addr, |
155 | .ndo_change_mtu = eth_change_mtu, | 155 | .ndo_change_mtu = eth_change_mtu, |
156 | #ifdef CONFIG_NET_POLL_CONTROLLER | 156 | #ifdef CONFIG_NET_POLL_CONTROLLER |
157 | .ndo_poll_controller = ei_poll, | 157 | .ndo_poll_controller = __ei_poll, |
158 | #endif | 158 | #endif |
159 | }; | 159 | }; |
160 | 160 | ||
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index d552d2c77844..6af6b628175b 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include <linux/syscore_ops.h> | 39 | #include <linux/syscore_ops.h> |
40 | #include <linux/tboot.h> | 40 | #include <linux/tboot.h> |
41 | #include <linux/dmi.h> | 41 | #include <linux/dmi.h> |
42 | #include <linux/pci-ats.h> | ||
42 | #include <asm/cacheflush.h> | 43 | #include <asm/cacheflush.h> |
43 | #include <asm/iommu.h> | 44 | #include <asm/iommu.h> |
44 | #include "pci.h" | 45 | #include "pci.h" |
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c index 553d8ee55c1c..42fae4776515 100644 --- a/drivers/pci/iov.c +++ b/drivers/pci/iov.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/mutex.h> | 13 | #include <linux/mutex.h> |
14 | #include <linux/string.h> | 14 | #include <linux/string.h> |
15 | #include <linux/delay.h> | 15 | #include <linux/delay.h> |
16 | #include <linux/pci-ats.h> | ||
16 | #include "pci.h" | 17 | #include "pci.h" |
17 | 18 | ||
18 | #define VIRTFN_ID_LEN 16 | 19 | #define VIRTFN_ID_LEN 16 |
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index a6ec200fe5ee..4020025f854e 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h | |||
@@ -250,15 +250,6 @@ struct pci_sriov { | |||
250 | u8 __iomem *mstate; /* VF Migration State Array */ | 250 | u8 __iomem *mstate; /* VF Migration State Array */ |
251 | }; | 251 | }; |
252 | 252 | ||
253 | /* Address Translation Service */ | ||
254 | struct pci_ats { | ||
255 | int pos; /* capability position */ | ||
256 | int stu; /* Smallest Translation Unit */ | ||
257 | int qdep; /* Invalidate Queue Depth */ | ||
258 | int ref_cnt; /* Physical Function reference count */ | ||
259 | unsigned int is_enabled:1; /* Enable bit is set */ | ||
260 | }; | ||
261 | |||
262 | #ifdef CONFIG_PCI_IOV | 253 | #ifdef CONFIG_PCI_IOV |
263 | extern int pci_iov_init(struct pci_dev *dev); | 254 | extern int pci_iov_init(struct pci_dev *dev); |
264 | extern void pci_iov_release(struct pci_dev *dev); | 255 | extern void pci_iov_release(struct pci_dev *dev); |
@@ -269,19 +260,6 @@ extern resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, | |||
269 | extern void pci_restore_iov_state(struct pci_dev *dev); | 260 | extern void pci_restore_iov_state(struct pci_dev *dev); |
270 | extern int pci_iov_bus_range(struct pci_bus *bus); | 261 | extern int pci_iov_bus_range(struct pci_bus *bus); |
271 | 262 | ||
272 | extern int pci_enable_ats(struct pci_dev *dev, int ps); | ||
273 | extern void pci_disable_ats(struct pci_dev *dev); | ||
274 | extern int pci_ats_queue_depth(struct pci_dev *dev); | ||
275 | /** | ||
276 | * pci_ats_enabled - query the ATS status | ||
277 | * @dev: the PCI device | ||
278 | * | ||
279 | * Returns 1 if ATS capability is enabled, or 0 if not. | ||
280 | */ | ||
281 | static inline int pci_ats_enabled(struct pci_dev *dev) | ||
282 | { | ||
283 | return dev->ats && dev->ats->is_enabled; | ||
284 | } | ||
285 | #else | 263 | #else |
286 | static inline int pci_iov_init(struct pci_dev *dev) | 264 | static inline int pci_iov_init(struct pci_dev *dev) |
287 | { | 265 | { |
@@ -304,21 +282,6 @@ static inline int pci_iov_bus_range(struct pci_bus *bus) | |||
304 | return 0; | 282 | return 0; |
305 | } | 283 | } |
306 | 284 | ||
307 | static inline int pci_enable_ats(struct pci_dev *dev, int ps) | ||
308 | { | ||
309 | return -ENODEV; | ||
310 | } | ||
311 | static inline void pci_disable_ats(struct pci_dev *dev) | ||
312 | { | ||
313 | } | ||
314 | static inline int pci_ats_queue_depth(struct pci_dev *dev) | ||
315 | { | ||
316 | return -ENODEV; | ||
317 | } | ||
318 | static inline int pci_ats_enabled(struct pci_dev *dev) | ||
319 | { | ||
320 | return 0; | ||
321 | } | ||
322 | #endif /* CONFIG_PCI_IOV */ | 285 | #endif /* CONFIG_PCI_IOV */ |
323 | 286 | ||
324 | static inline resource_size_t pci_resource_alignment(struct pci_dev *dev, | 287 | static inline resource_size_t pci_resource_alignment(struct pci_dev *dev, |
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index ebf51ad1b714..a806cb321d2e 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c | |||
@@ -579,7 +579,7 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size, | |||
579 | } | 579 | } |
580 | size0 = calculate_iosize(size, min_size, size1, | 580 | size0 = calculate_iosize(size, min_size, size1, |
581 | resource_size(b_res), 4096); | 581 | resource_size(b_res), 4096); |
582 | size1 = !add_size? size0: | 582 | size1 = (!add_head || (add_head && !add_size)) ? size0 : |
583 | calculate_iosize(size, min_size+add_size, size1, | 583 | calculate_iosize(size, min_size+add_size, size1, |
584 | resource_size(b_res), 4096); | 584 | resource_size(b_res), 4096); |
585 | if (!size0 && !size1) { | 585 | if (!size0 && !size1) { |
@@ -677,7 +677,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, | |||
677 | align += aligns[order]; | 677 | align += aligns[order]; |
678 | } | 678 | } |
679 | size0 = calculate_memsize(size, min_size, 0, resource_size(b_res), min_align); | 679 | size0 = calculate_memsize(size, min_size, 0, resource_size(b_res), min_align); |
680 | size1 = !add_size ? size : | 680 | size1 = (!add_head || (add_head && !add_size)) ? size0 : |
681 | calculate_memsize(size, min_size+add_size, 0, | 681 | calculate_memsize(size, min_size+add_size, 0, |
682 | resource_size(b_res), min_align); | 682 | resource_size(b_res), min_align); |
683 | if (!size0 && !size1) { | 683 | if (!size0 && !size1) { |
diff --git a/drivers/rapidio/switches/idt_gen2.c b/drivers/rapidio/switches/idt_gen2.c index ac2701b22e71..043ee3136e40 100644 --- a/drivers/rapidio/switches/idt_gen2.c +++ b/drivers/rapidio/switches/idt_gen2.c | |||
@@ -95,6 +95,9 @@ idtg2_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount, | |||
95 | else | 95 | else |
96 | table++; | 96 | table++; |
97 | 97 | ||
98 | if (route_port == RIO_INVALID_ROUTE) | ||
99 | route_port = IDT_DEFAULT_ROUTE; | ||
100 | |||
98 | rio_mport_write_config_32(mport, destid, hopcount, | 101 | rio_mport_write_config_32(mport, destid, hopcount, |
99 | LOCAL_RTE_CONF_DESTID_SEL, table); | 102 | LOCAL_RTE_CONF_DESTID_SEL, table); |
100 | 103 | ||
@@ -411,6 +414,12 @@ static int idtg2_switch_init(struct rio_dev *rdev, int do_enum) | |||
411 | rdev->rswitch->em_handle = idtg2_em_handler; | 414 | rdev->rswitch->em_handle = idtg2_em_handler; |
412 | rdev->rswitch->sw_sysfs = idtg2_sysfs; | 415 | rdev->rswitch->sw_sysfs = idtg2_sysfs; |
413 | 416 | ||
417 | if (do_enum) { | ||
418 | /* Ensure that default routing is disabled on startup */ | ||
419 | rio_write_config_32(rdev, | ||
420 | RIO_STD_RTE_DEFAULT_PORT, IDT_NO_ROUTE); | ||
421 | } | ||
422 | |||
414 | return 0; | 423 | return 0; |
415 | } | 424 | } |
416 | 425 | ||
diff --git a/drivers/rapidio/switches/idtcps.c b/drivers/rapidio/switches/idtcps.c index 3a971077e7bf..d06ee2d44b44 100644 --- a/drivers/rapidio/switches/idtcps.c +++ b/drivers/rapidio/switches/idtcps.c | |||
@@ -26,6 +26,9 @@ idtcps_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount, | |||
26 | { | 26 | { |
27 | u32 result; | 27 | u32 result; |
28 | 28 | ||
29 | if (route_port == RIO_INVALID_ROUTE) | ||
30 | route_port = CPS_DEFAULT_ROUTE; | ||
31 | |||
29 | if (table == RIO_GLOBAL_TABLE) { | 32 | if (table == RIO_GLOBAL_TABLE) { |
30 | rio_mport_write_config_32(mport, destid, hopcount, | 33 | rio_mport_write_config_32(mport, destid, hopcount, |
31 | RIO_STD_RTE_CONF_DESTID_SEL_CSR, route_destid); | 34 | RIO_STD_RTE_CONF_DESTID_SEL_CSR, route_destid); |
@@ -130,6 +133,9 @@ static int idtcps_switch_init(struct rio_dev *rdev, int do_enum) | |||
130 | /* set TVAL = ~50us */ | 133 | /* set TVAL = ~50us */ |
131 | rio_write_config_32(rdev, | 134 | rio_write_config_32(rdev, |
132 | rdev->phys_efptr + RIO_PORT_LINKTO_CTL_CSR, 0x8e << 8); | 135 | rdev->phys_efptr + RIO_PORT_LINKTO_CTL_CSR, 0x8e << 8); |
136 | /* Ensure that default routing is disabled on startup */ | ||
137 | rio_write_config_32(rdev, | ||
138 | RIO_STD_RTE_DEFAULT_PORT, CPS_NO_ROUTE); | ||
133 | } | 139 | } |
134 | 140 | ||
135 | return 0; | 141 | return 0; |
diff --git a/drivers/rapidio/switches/tsi57x.c b/drivers/rapidio/switches/tsi57x.c index 1a62934bfebc..db8b8028988d 100644 --- a/drivers/rapidio/switches/tsi57x.c +++ b/drivers/rapidio/switches/tsi57x.c | |||
@@ -303,6 +303,12 @@ static int tsi57x_switch_init(struct rio_dev *rdev, int do_enum) | |||
303 | rdev->rswitch->em_init = tsi57x_em_init; | 303 | rdev->rswitch->em_init = tsi57x_em_init; |
304 | rdev->rswitch->em_handle = tsi57x_em_handler; | 304 | rdev->rswitch->em_handle = tsi57x_em_handler; |
305 | 305 | ||
306 | if (do_enum) { | ||
307 | /* Ensure that default routing is disabled on startup */ | ||
308 | rio_write_config_32(rdev, RIO_STD_RTE_DEFAULT_PORT, | ||
309 | RIO_INVALID_ROUTE); | ||
310 | } | ||
311 | |||
306 | return 0; | 312 | return 0; |
307 | } | 313 | } |
308 | 314 | ||
diff --git a/drivers/rtc/rtc-davinci.c b/drivers/rtc/rtc-davinci.c index 8d46838dff8a..755e1fe914af 100644 --- a/drivers/rtc/rtc-davinci.c +++ b/drivers/rtc/rtc-davinci.c | |||
@@ -524,6 +524,8 @@ static int __init davinci_rtc_probe(struct platform_device *pdev) | |||
524 | goto fail2; | 524 | goto fail2; |
525 | } | 525 | } |
526 | 526 | ||
527 | platform_set_drvdata(pdev, davinci_rtc); | ||
528 | |||
527 | davinci_rtc->rtc = rtc_device_register(pdev->name, &pdev->dev, | 529 | davinci_rtc->rtc = rtc_device_register(pdev->name, &pdev->dev, |
528 | &davinci_rtc_ops, THIS_MODULE); | 530 | &davinci_rtc_ops, THIS_MODULE); |
529 | if (IS_ERR(davinci_rtc->rtc)) { | 531 | if (IS_ERR(davinci_rtc->rtc)) { |
@@ -553,8 +555,6 @@ static int __init davinci_rtc_probe(struct platform_device *pdev) | |||
553 | 555 | ||
554 | rtcss_write(davinci_rtc, PRTCSS_RTC_CCTRL_CAEN, PRTCSS_RTC_CCTRL); | 556 | rtcss_write(davinci_rtc, PRTCSS_RTC_CCTRL_CAEN, PRTCSS_RTC_CCTRL); |
555 | 557 | ||
556 | platform_set_drvdata(pdev, davinci_rtc); | ||
557 | |||
558 | device_init_wakeup(&pdev->dev, 0); | 558 | device_init_wakeup(&pdev->dev, 0); |
559 | 559 | ||
560 | return 0; | 560 | return 0; |
@@ -562,6 +562,7 @@ static int __init davinci_rtc_probe(struct platform_device *pdev) | |||
562 | fail4: | 562 | fail4: |
563 | rtc_device_unregister(davinci_rtc->rtc); | 563 | rtc_device_unregister(davinci_rtc->rtc); |
564 | fail3: | 564 | fail3: |
565 | platform_set_drvdata(pdev, NULL); | ||
565 | iounmap(davinci_rtc->base); | 566 | iounmap(davinci_rtc->base); |
566 | fail2: | 567 | fail2: |
567 | release_mem_region(davinci_rtc->pbase, davinci_rtc->base_size); | 568 | release_mem_region(davinci_rtc->pbase, davinci_rtc->base_size); |
diff --git a/drivers/rtc/rtc-ds1286.c b/drivers/rtc/rtc-ds1286.c index 60ce69600828..47e681df31e2 100644 --- a/drivers/rtc/rtc-ds1286.c +++ b/drivers/rtc/rtc-ds1286.c | |||
@@ -355,6 +355,7 @@ static int __devinit ds1286_probe(struct platform_device *pdev) | |||
355 | goto out; | 355 | goto out; |
356 | } | 356 | } |
357 | spin_lock_init(&priv->lock); | 357 | spin_lock_init(&priv->lock); |
358 | platform_set_drvdata(pdev, priv); | ||
358 | rtc = rtc_device_register("ds1286", &pdev->dev, | 359 | rtc = rtc_device_register("ds1286", &pdev->dev, |
359 | &ds1286_ops, THIS_MODULE); | 360 | &ds1286_ops, THIS_MODULE); |
360 | if (IS_ERR(rtc)) { | 361 | if (IS_ERR(rtc)) { |
@@ -362,7 +363,6 @@ static int __devinit ds1286_probe(struct platform_device *pdev) | |||
362 | goto out; | 363 | goto out; |
363 | } | 364 | } |
364 | priv->rtc = rtc; | 365 | priv->rtc = rtc; |
365 | platform_set_drvdata(pdev, priv); | ||
366 | return 0; | 366 | return 0; |
367 | 367 | ||
368 | out: | 368 | out: |
diff --git a/drivers/rtc/rtc-ep93xx.c b/drivers/rtc/rtc-ep93xx.c index 11ae64dcbf3c..335551d333b2 100644 --- a/drivers/rtc/rtc-ep93xx.c +++ b/drivers/rtc/rtc-ep93xx.c | |||
@@ -151,6 +151,7 @@ static int __init ep93xx_rtc_probe(struct platform_device *pdev) | |||
151 | return -ENXIO; | 151 | return -ENXIO; |
152 | 152 | ||
153 | pdev->dev.platform_data = ep93xx_rtc; | 153 | pdev->dev.platform_data = ep93xx_rtc; |
154 | platform_set_drvdata(pdev, rtc); | ||
154 | 155 | ||
155 | rtc = rtc_device_register(pdev->name, | 156 | rtc = rtc_device_register(pdev->name, |
156 | &pdev->dev, &ep93xx_rtc_ops, THIS_MODULE); | 157 | &pdev->dev, &ep93xx_rtc_ops, THIS_MODULE); |
@@ -159,8 +160,6 @@ static int __init ep93xx_rtc_probe(struct platform_device *pdev) | |||
159 | goto exit; | 160 | goto exit; |
160 | } | 161 | } |
161 | 162 | ||
162 | platform_set_drvdata(pdev, rtc); | ||
163 | |||
164 | err = sysfs_create_group(&pdev->dev.kobj, &ep93xx_rtc_sysfs_files); | 163 | err = sysfs_create_group(&pdev->dev.kobj, &ep93xx_rtc_sysfs_files); |
165 | if (err) | 164 | if (err) |
166 | goto fail; | 165 | goto fail; |
@@ -168,9 +167,9 @@ static int __init ep93xx_rtc_probe(struct platform_device *pdev) | |||
168 | return 0; | 167 | return 0; |
169 | 168 | ||
170 | fail: | 169 | fail: |
171 | platform_set_drvdata(pdev, NULL); | ||
172 | rtc_device_unregister(rtc); | 170 | rtc_device_unregister(rtc); |
173 | exit: | 171 | exit: |
172 | platform_set_drvdata(pdev, NULL); | ||
174 | pdev->dev.platform_data = NULL; | 173 | pdev->dev.platform_data = NULL; |
175 | return err; | 174 | return err; |
176 | } | 175 | } |
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c index 69fe664a2228..eda128fc1d38 100644 --- a/drivers/rtc/rtc-m41t80.c +++ b/drivers/rtc/rtc-m41t80.c | |||
@@ -783,6 +783,9 @@ static int m41t80_probe(struct i2c_client *client, | |||
783 | goto exit; | 783 | goto exit; |
784 | } | 784 | } |
785 | 785 | ||
786 | clientdata->features = id->driver_data; | ||
787 | i2c_set_clientdata(client, clientdata); | ||
788 | |||
786 | rtc = rtc_device_register(client->name, &client->dev, | 789 | rtc = rtc_device_register(client->name, &client->dev, |
787 | &m41t80_rtc_ops, THIS_MODULE); | 790 | &m41t80_rtc_ops, THIS_MODULE); |
788 | if (IS_ERR(rtc)) { | 791 | if (IS_ERR(rtc)) { |
@@ -792,8 +795,6 @@ static int m41t80_probe(struct i2c_client *client, | |||
792 | } | 795 | } |
793 | 796 | ||
794 | clientdata->rtc = rtc; | 797 | clientdata->rtc = rtc; |
795 | clientdata->features = id->driver_data; | ||
796 | i2c_set_clientdata(client, clientdata); | ||
797 | 798 | ||
798 | /* Make sure HT (Halt Update) bit is cleared */ | 799 | /* Make sure HT (Halt Update) bit is cleared */ |
799 | rc = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_HOUR); | 800 | rc = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_HOUR); |
diff --git a/drivers/rtc/rtc-max8925.c b/drivers/rtc/rtc-max8925.c index 20494b5edc3c..3bc046f427e0 100644 --- a/drivers/rtc/rtc-max8925.c +++ b/drivers/rtc/rtc-max8925.c | |||
@@ -258,6 +258,8 @@ static int __devinit max8925_rtc_probe(struct platform_device *pdev) | |||
258 | } | 258 | } |
259 | 259 | ||
260 | dev_set_drvdata(&pdev->dev, info); | 260 | dev_set_drvdata(&pdev->dev, info); |
261 | /* XXX - isn't this redundant? */ | ||
262 | platform_set_drvdata(pdev, info); | ||
261 | 263 | ||
262 | info->rtc_dev = rtc_device_register("max8925-rtc", &pdev->dev, | 264 | info->rtc_dev = rtc_device_register("max8925-rtc", &pdev->dev, |
263 | &max8925_rtc_ops, THIS_MODULE); | 265 | &max8925_rtc_ops, THIS_MODULE); |
@@ -267,10 +269,9 @@ static int __devinit max8925_rtc_probe(struct platform_device *pdev) | |||
267 | goto out_rtc; | 269 | goto out_rtc; |
268 | } | 270 | } |
269 | 271 | ||
270 | platform_set_drvdata(pdev, info); | ||
271 | |||
272 | return 0; | 272 | return 0; |
273 | out_rtc: | 273 | out_rtc: |
274 | platform_set_drvdata(pdev, NULL); | ||
274 | free_irq(chip->irq_base + MAX8925_IRQ_RTC_ALARM0, info); | 275 | free_irq(chip->irq_base + MAX8925_IRQ_RTC_ALARM0, info); |
275 | out_irq: | 276 | out_irq: |
276 | kfree(info); | 277 | kfree(info); |
diff --git a/drivers/rtc/rtc-max8998.c b/drivers/rtc/rtc-max8998.c index 3f7bc6b9fefa..2e48aa604273 100644 --- a/drivers/rtc/rtc-max8998.c +++ b/drivers/rtc/rtc-max8998.c | |||
@@ -265,6 +265,8 @@ static int __devinit max8998_rtc_probe(struct platform_device *pdev) | |||
265 | info->rtc = max8998->rtc; | 265 | info->rtc = max8998->rtc; |
266 | info->irq = max8998->irq_base + MAX8998_IRQ_ALARM0; | 266 | info->irq = max8998->irq_base + MAX8998_IRQ_ALARM0; |
267 | 267 | ||
268 | platform_set_drvdata(pdev, info); | ||
269 | |||
268 | info->rtc_dev = rtc_device_register("max8998-rtc", &pdev->dev, | 270 | info->rtc_dev = rtc_device_register("max8998-rtc", &pdev->dev, |
269 | &max8998_rtc_ops, THIS_MODULE); | 271 | &max8998_rtc_ops, THIS_MODULE); |
270 | 272 | ||
@@ -274,8 +276,6 @@ static int __devinit max8998_rtc_probe(struct platform_device *pdev) | |||
274 | goto out_rtc; | 276 | goto out_rtc; |
275 | } | 277 | } |
276 | 278 | ||
277 | platform_set_drvdata(pdev, info); | ||
278 | |||
279 | ret = request_threaded_irq(info->irq, NULL, max8998_rtc_alarm_irq, 0, | 279 | ret = request_threaded_irq(info->irq, NULL, max8998_rtc_alarm_irq, 0, |
280 | "rtc-alarm0", info); | 280 | "rtc-alarm0", info); |
281 | 281 | ||
@@ -293,6 +293,7 @@ static int __devinit max8998_rtc_probe(struct platform_device *pdev) | |||
293 | return 0; | 293 | return 0; |
294 | 294 | ||
295 | out_rtc: | 295 | out_rtc: |
296 | platform_set_drvdata(pdev, NULL); | ||
296 | kfree(info); | 297 | kfree(info); |
297 | return ret; | 298 | return ret; |
298 | } | 299 | } |
diff --git a/drivers/rtc/rtc-mc13xxx.c b/drivers/rtc/rtc-mc13xxx.c index c5ac03793e79..a1a278bc340d 100644 --- a/drivers/rtc/rtc-mc13xxx.c +++ b/drivers/rtc/rtc-mc13xxx.c | |||
@@ -349,11 +349,15 @@ static int __devinit mc13xxx_rtc_probe(struct platform_device *pdev) | |||
349 | if (ret) | 349 | if (ret) |
350 | goto err_alarm_irq_request; | 350 | goto err_alarm_irq_request; |
351 | 351 | ||
352 | mc13xxx_unlock(mc13xxx); | ||
353 | |||
352 | priv->rtc = rtc_device_register(pdev->name, | 354 | priv->rtc = rtc_device_register(pdev->name, |
353 | &pdev->dev, &mc13xxx_rtc_ops, THIS_MODULE); | 355 | &pdev->dev, &mc13xxx_rtc_ops, THIS_MODULE); |
354 | if (IS_ERR(priv->rtc)) { | 356 | if (IS_ERR(priv->rtc)) { |
355 | ret = PTR_ERR(priv->rtc); | 357 | ret = PTR_ERR(priv->rtc); |
356 | 358 | ||
359 | mc13xxx_lock(mc13xxx); | ||
360 | |||
357 | mc13xxx_irq_free(mc13xxx, MC13XXX_IRQ_TODA, priv); | 361 | mc13xxx_irq_free(mc13xxx, MC13XXX_IRQ_TODA, priv); |
358 | err_alarm_irq_request: | 362 | err_alarm_irq_request: |
359 | 363 | ||
@@ -365,12 +369,12 @@ err_reset_irq_status: | |||
365 | mc13xxx_irq_free(mc13xxx, MC13XXX_IRQ_RTCRST, priv); | 369 | mc13xxx_irq_free(mc13xxx, MC13XXX_IRQ_RTCRST, priv); |
366 | err_reset_irq_request: | 370 | err_reset_irq_request: |
367 | 371 | ||
372 | mc13xxx_unlock(mc13xxx); | ||
373 | |||
368 | platform_set_drvdata(pdev, NULL); | 374 | platform_set_drvdata(pdev, NULL); |
369 | kfree(priv); | 375 | kfree(priv); |
370 | } | 376 | } |
371 | 377 | ||
372 | mc13xxx_unlock(mc13xxx); | ||
373 | |||
374 | return ret; | 378 | return ret; |
375 | } | 379 | } |
376 | 380 | ||
diff --git a/drivers/rtc/rtc-msm6242.c b/drivers/rtc/rtc-msm6242.c index 67820626e18f..fcb113c11122 100644 --- a/drivers/rtc/rtc-msm6242.c +++ b/drivers/rtc/rtc-msm6242.c | |||
@@ -214,6 +214,7 @@ static int __init msm6242_rtc_probe(struct platform_device *dev) | |||
214 | error = -ENOMEM; | 214 | error = -ENOMEM; |
215 | goto out_free_priv; | 215 | goto out_free_priv; |
216 | } | 216 | } |
217 | platform_set_drvdata(dev, priv); | ||
217 | 218 | ||
218 | rtc = rtc_device_register("rtc-msm6242", &dev->dev, &msm6242_rtc_ops, | 219 | rtc = rtc_device_register("rtc-msm6242", &dev->dev, &msm6242_rtc_ops, |
219 | THIS_MODULE); | 220 | THIS_MODULE); |
@@ -223,10 +224,10 @@ static int __init msm6242_rtc_probe(struct platform_device *dev) | |||
223 | } | 224 | } |
224 | 225 | ||
225 | priv->rtc = rtc; | 226 | priv->rtc = rtc; |
226 | platform_set_drvdata(dev, priv); | ||
227 | return 0; | 227 | return 0; |
228 | 228 | ||
229 | out_unmap: | 229 | out_unmap: |
230 | platform_set_drvdata(dev, NULL); | ||
230 | iounmap(priv->regs); | 231 | iounmap(priv->regs); |
231 | out_free_priv: | 232 | out_free_priv: |
232 | kfree(priv); | 233 | kfree(priv); |
diff --git a/drivers/rtc/rtc-mxc.c b/drivers/rtc/rtc-mxc.c index 826ab64a8fa9..d814417bee8c 100644 --- a/drivers/rtc/rtc-mxc.c +++ b/drivers/rtc/rtc-mxc.c | |||
@@ -418,14 +418,6 @@ static int __init mxc_rtc_probe(struct platform_device *pdev) | |||
418 | goto exit_put_clk; | 418 | goto exit_put_clk; |
419 | } | 419 | } |
420 | 420 | ||
421 | rtc = rtc_device_register(pdev->name, &pdev->dev, &mxc_rtc_ops, | ||
422 | THIS_MODULE); | ||
423 | if (IS_ERR(rtc)) { | ||
424 | ret = PTR_ERR(rtc); | ||
425 | goto exit_put_clk; | ||
426 | } | ||
427 | |||
428 | pdata->rtc = rtc; | ||
429 | platform_set_drvdata(pdev, pdata); | 421 | platform_set_drvdata(pdev, pdata); |
430 | 422 | ||
431 | /* Configure and enable the RTC */ | 423 | /* Configure and enable the RTC */ |
@@ -438,8 +430,19 @@ static int __init mxc_rtc_probe(struct platform_device *pdev) | |||
438 | pdata->irq = -1; | 430 | pdata->irq = -1; |
439 | } | 431 | } |
440 | 432 | ||
433 | rtc = rtc_device_register(pdev->name, &pdev->dev, &mxc_rtc_ops, | ||
434 | THIS_MODULE); | ||
435 | if (IS_ERR(rtc)) { | ||
436 | ret = PTR_ERR(rtc); | ||
437 | goto exit_clr_drvdata; | ||
438 | } | ||
439 | |||
440 | pdata->rtc = rtc; | ||
441 | |||
441 | return 0; | 442 | return 0; |
442 | 443 | ||
444 | exit_clr_drvdata: | ||
445 | platform_set_drvdata(pdev, NULL); | ||
443 | exit_put_clk: | 446 | exit_put_clk: |
444 | clk_disable(pdata->clk); | 447 | clk_disable(pdata->clk); |
445 | clk_put(pdata->clk); | 448 | clk_put(pdata->clk); |
diff --git a/drivers/rtc/rtc-pcap.c b/drivers/rtc/rtc-pcap.c index a633abc42896..cd4f198cc2ef 100644 --- a/drivers/rtc/rtc-pcap.c +++ b/drivers/rtc/rtc-pcap.c | |||
@@ -151,6 +151,8 @@ static int __devinit pcap_rtc_probe(struct platform_device *pdev) | |||
151 | 151 | ||
152 | pcap_rtc->pcap = dev_get_drvdata(pdev->dev.parent); | 152 | pcap_rtc->pcap = dev_get_drvdata(pdev->dev.parent); |
153 | 153 | ||
154 | platform_set_drvdata(pdev, pcap_rtc); | ||
155 | |||
154 | pcap_rtc->rtc = rtc_device_register("pcap", &pdev->dev, | 156 | pcap_rtc->rtc = rtc_device_register("pcap", &pdev->dev, |
155 | &pcap_rtc_ops, THIS_MODULE); | 157 | &pcap_rtc_ops, THIS_MODULE); |
156 | if (IS_ERR(pcap_rtc->rtc)) { | 158 | if (IS_ERR(pcap_rtc->rtc)) { |
@@ -158,7 +160,6 @@ static int __devinit pcap_rtc_probe(struct platform_device *pdev) | |||
158 | goto fail_rtc; | 160 | goto fail_rtc; |
159 | } | 161 | } |
160 | 162 | ||
161 | platform_set_drvdata(pdev, pcap_rtc); | ||
162 | 163 | ||
163 | timer_irq = pcap_to_irq(pcap_rtc->pcap, PCAP_IRQ_1HZ); | 164 | timer_irq = pcap_to_irq(pcap_rtc->pcap, PCAP_IRQ_1HZ); |
164 | alarm_irq = pcap_to_irq(pcap_rtc->pcap, PCAP_IRQ_TODA); | 165 | alarm_irq = pcap_to_irq(pcap_rtc->pcap, PCAP_IRQ_TODA); |
@@ -177,6 +178,7 @@ fail_alarm: | |||
177 | fail_timer: | 178 | fail_timer: |
178 | rtc_device_unregister(pcap_rtc->rtc); | 179 | rtc_device_unregister(pcap_rtc->rtc); |
179 | fail_rtc: | 180 | fail_rtc: |
181 | platform_set_drvdata(pdev, NULL); | ||
180 | kfree(pcap_rtc); | 182 | kfree(pcap_rtc); |
181 | return err; | 183 | return err; |
182 | } | 184 | } |
diff --git a/drivers/rtc/rtc-rp5c01.c b/drivers/rtc/rtc-rp5c01.c index 694da39b6dd2..359da6d020b9 100644 --- a/drivers/rtc/rtc-rp5c01.c +++ b/drivers/rtc/rtc-rp5c01.c | |||
@@ -249,15 +249,15 @@ static int __init rp5c01_rtc_probe(struct platform_device *dev) | |||
249 | 249 | ||
250 | spin_lock_init(&priv->lock); | 250 | spin_lock_init(&priv->lock); |
251 | 251 | ||
252 | platform_set_drvdata(dev, priv); | ||
253 | |||
252 | rtc = rtc_device_register("rtc-rp5c01", &dev->dev, &rp5c01_rtc_ops, | 254 | rtc = rtc_device_register("rtc-rp5c01", &dev->dev, &rp5c01_rtc_ops, |
253 | THIS_MODULE); | 255 | THIS_MODULE); |
254 | if (IS_ERR(rtc)) { | 256 | if (IS_ERR(rtc)) { |
255 | error = PTR_ERR(rtc); | 257 | error = PTR_ERR(rtc); |
256 | goto out_unmap; | 258 | goto out_unmap; |
257 | } | 259 | } |
258 | |||
259 | priv->rtc = rtc; | 260 | priv->rtc = rtc; |
260 | platform_set_drvdata(dev, priv); | ||
261 | 261 | ||
262 | error = sysfs_create_bin_file(&dev->dev.kobj, &priv->nvram_attr); | 262 | error = sysfs_create_bin_file(&dev->dev.kobj, &priv->nvram_attr); |
263 | if (error) | 263 | if (error) |
@@ -268,6 +268,7 @@ static int __init rp5c01_rtc_probe(struct platform_device *dev) | |||
268 | out_unregister: | 268 | out_unregister: |
269 | rtc_device_unregister(rtc); | 269 | rtc_device_unregister(rtc); |
270 | out_unmap: | 270 | out_unmap: |
271 | platform_set_drvdata(dev, NULL); | ||
271 | iounmap(priv->regs); | 272 | iounmap(priv->regs); |
272 | out_free_priv: | 273 | out_free_priv: |
273 | kfree(priv); | 274 | kfree(priv); |
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c index b3466c491cd3..16512ecae31a 100644 --- a/drivers/rtc/rtc-s3c.c +++ b/drivers/rtc/rtc-s3c.c | |||
@@ -46,6 +46,7 @@ static struct clk *rtc_clk; | |||
46 | static void __iomem *s3c_rtc_base; | 46 | static void __iomem *s3c_rtc_base; |
47 | static int s3c_rtc_alarmno = NO_IRQ; | 47 | static int s3c_rtc_alarmno = NO_IRQ; |
48 | static int s3c_rtc_tickno = NO_IRQ; | 48 | static int s3c_rtc_tickno = NO_IRQ; |
49 | static bool wake_en; | ||
49 | static enum s3c_cpu_type s3c_rtc_cpu_type; | 50 | static enum s3c_cpu_type s3c_rtc_cpu_type; |
50 | 51 | ||
51 | static DEFINE_SPINLOCK(s3c_rtc_pie_lock); | 52 | static DEFINE_SPINLOCK(s3c_rtc_pie_lock); |
@@ -562,8 +563,12 @@ static int s3c_rtc_suspend(struct platform_device *pdev, pm_message_t state) | |||
562 | } | 563 | } |
563 | s3c_rtc_enable(pdev, 0); | 564 | s3c_rtc_enable(pdev, 0); |
564 | 565 | ||
565 | if (device_may_wakeup(&pdev->dev)) | 566 | if (device_may_wakeup(&pdev->dev) && !wake_en) { |
566 | enable_irq_wake(s3c_rtc_alarmno); | 567 | if (enable_irq_wake(s3c_rtc_alarmno) == 0) |
568 | wake_en = true; | ||
569 | else | ||
570 | dev_err(&pdev->dev, "enable_irq_wake failed\n"); | ||
571 | } | ||
567 | 572 | ||
568 | return 0; | 573 | return 0; |
569 | } | 574 | } |
@@ -579,8 +584,10 @@ static int s3c_rtc_resume(struct platform_device *pdev) | |||
579 | writew(tmp | ticnt_en_save, s3c_rtc_base + S3C2410_RTCCON); | 584 | writew(tmp | ticnt_en_save, s3c_rtc_base + S3C2410_RTCCON); |
580 | } | 585 | } |
581 | 586 | ||
582 | if (device_may_wakeup(&pdev->dev)) | 587 | if (device_may_wakeup(&pdev->dev) && wake_en) { |
583 | disable_irq_wake(s3c_rtc_alarmno); | 588 | disable_irq_wake(s3c_rtc_alarmno); |
589 | wake_en = false; | ||
590 | } | ||
584 | 591 | ||
585 | return 0; | 592 | return 0; |
586 | } | 593 | } |
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 475e603fc584..86b6f1cc1b10 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c | |||
@@ -1742,11 +1742,20 @@ int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr) | |||
1742 | static inline int _dasd_term_running_cqr(struct dasd_device *device) | 1742 | static inline int _dasd_term_running_cqr(struct dasd_device *device) |
1743 | { | 1743 | { |
1744 | struct dasd_ccw_req *cqr; | 1744 | struct dasd_ccw_req *cqr; |
1745 | int rc; | ||
1745 | 1746 | ||
1746 | if (list_empty(&device->ccw_queue)) | 1747 | if (list_empty(&device->ccw_queue)) |
1747 | return 0; | 1748 | return 0; |
1748 | cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); | 1749 | cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); |
1749 | return device->discipline->term_IO(cqr); | 1750 | rc = device->discipline->term_IO(cqr); |
1751 | if (!rc) | ||
1752 | /* | ||
1753 | * CQR terminated because a more important request is pending. | ||
1754 | * Undo decreasing of retry counter because this is | ||
1755 | * not an error case. | ||
1756 | */ | ||
1757 | cqr->retries++; | ||
1758 | return rc; | ||
1750 | } | 1759 | } |
1751 | 1760 | ||
1752 | int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr) | 1761 | int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr) |
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c index 4b60ede07f0e..be55fb2b1b1c 100644 --- a/drivers/s390/char/sclp_cmd.c +++ b/drivers/s390/char/sclp_cmd.c | |||
@@ -518,6 +518,8 @@ static void __init insert_increment(u16 rn, int standby, int assigned) | |||
518 | return; | 518 | return; |
519 | new_incr->rn = rn; | 519 | new_incr->rn = rn; |
520 | new_incr->standby = standby; | 520 | new_incr->standby = standby; |
521 | if (!standby) | ||
522 | new_incr->usecount = 1; | ||
521 | last_rn = 0; | 523 | last_rn = 0; |
522 | prev = &sclp_mem_list; | 524 | prev = &sclp_mem_list; |
523 | list_for_each_entry(incr, &sclp_mem_list, list) { | 525 | list_for_each_entry(incr, &sclp_mem_list, list) { |
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c index 83cea9a55e2f..1b3924c2fffd 100644 --- a/drivers/s390/char/tape_block.c +++ b/drivers/s390/char/tape_block.c | |||
@@ -236,7 +236,6 @@ tapeblock_setup_device(struct tape_device * device) | |||
236 | disk->major = tapeblock_major; | 236 | disk->major = tapeblock_major; |
237 | disk->first_minor = device->first_minor; | 237 | disk->first_minor = device->first_minor; |
238 | disk->fops = &tapeblock_fops; | 238 | disk->fops = &tapeblock_fops; |
239 | disk->events = DISK_EVENT_MEDIA_CHANGE; | ||
240 | disk->private_data = tape_get_device(device); | 239 | disk->private_data = tape_get_device(device); |
241 | disk->queue = blkdat->request_queue; | 240 | disk->queue = blkdat->request_queue; |
242 | set_capacity(disk, 0); | 241 | set_capacity(disk, 0); |
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c index e2d45c91b8e8..9689d41c7888 100644 --- a/drivers/scsi/qlogicpti.c +++ b/drivers/scsi/qlogicpti.c | |||
@@ -1292,8 +1292,10 @@ static struct scsi_host_template qpti_template = { | |||
1292 | .use_clustering = ENABLE_CLUSTERING, | 1292 | .use_clustering = ENABLE_CLUSTERING, |
1293 | }; | 1293 | }; |
1294 | 1294 | ||
1295 | static const struct of_device_id qpti_match[]; | ||
1295 | static int __devinit qpti_sbus_probe(struct platform_device *op) | 1296 | static int __devinit qpti_sbus_probe(struct platform_device *op) |
1296 | { | 1297 | { |
1298 | const struct of_device_id *match; | ||
1297 | struct scsi_host_template *tpnt; | 1299 | struct scsi_host_template *tpnt; |
1298 | struct device_node *dp = op->dev.of_node; | 1300 | struct device_node *dp = op->dev.of_node; |
1299 | struct Scsi_Host *host; | 1301 | struct Scsi_Host *host; |
@@ -1301,9 +1303,10 @@ static int __devinit qpti_sbus_probe(struct platform_device *op) | |||
1301 | static int nqptis; | 1303 | static int nqptis; |
1302 | const char *fcode; | 1304 | const char *fcode; |
1303 | 1305 | ||
1304 | if (!op->dev.of_match) | 1306 | match = of_match_device(qpti_match, &op->dev); |
1307 | if (!match) | ||
1305 | return -EINVAL; | 1308 | return -EINVAL; |
1306 | tpnt = op->dev.of_match->data; | 1309 | tpnt = match->data; |
1307 | 1310 | ||
1308 | /* Sometimes Antares cards come up not completely | 1311 | /* Sometimes Antares cards come up not completely |
1309 | * setup, and we get a report of a zero IRQ. | 1312 | * setup, and we get a report of a zero IRQ. |
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 0bac91e72370..ec1803a48723 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
@@ -74,8 +74,6 @@ struct kmem_cache *scsi_sdb_cache; | |||
74 | */ | 74 | */ |
75 | #define SCSI_QUEUE_DELAY 3 | 75 | #define SCSI_QUEUE_DELAY 3 |
76 | 76 | ||
77 | static void scsi_run_queue(struct request_queue *q); | ||
78 | |||
79 | /* | 77 | /* |
80 | * Function: scsi_unprep_request() | 78 | * Function: scsi_unprep_request() |
81 | * | 79 | * |
@@ -161,7 +159,7 @@ static int __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy) | |||
161 | blk_requeue_request(q, cmd->request); | 159 | blk_requeue_request(q, cmd->request); |
162 | spin_unlock_irqrestore(q->queue_lock, flags); | 160 | spin_unlock_irqrestore(q->queue_lock, flags); |
163 | 161 | ||
164 | scsi_run_queue(q); | 162 | kblockd_schedule_work(q, &device->requeue_work); |
165 | 163 | ||
166 | return 0; | 164 | return 0; |
167 | } | 165 | } |
@@ -438,7 +436,11 @@ static void scsi_run_queue(struct request_queue *q) | |||
438 | continue; | 436 | continue; |
439 | } | 437 | } |
440 | 438 | ||
441 | blk_run_queue_async(sdev->request_queue); | 439 | spin_unlock(shost->host_lock); |
440 | spin_lock(sdev->request_queue->queue_lock); | ||
441 | __blk_run_queue(sdev->request_queue); | ||
442 | spin_unlock(sdev->request_queue->queue_lock); | ||
443 | spin_lock(shost->host_lock); | ||
442 | } | 444 | } |
443 | /* put any unprocessed entries back */ | 445 | /* put any unprocessed entries back */ |
444 | list_splice(&starved_list, &shost->starved_list); | 446 | list_splice(&starved_list, &shost->starved_list); |
@@ -447,6 +449,16 @@ static void scsi_run_queue(struct request_queue *q) | |||
447 | blk_run_queue(q); | 449 | blk_run_queue(q); |
448 | } | 450 | } |
449 | 451 | ||
452 | void scsi_requeue_run_queue(struct work_struct *work) | ||
453 | { | ||
454 | struct scsi_device *sdev; | ||
455 | struct request_queue *q; | ||
456 | |||
457 | sdev = container_of(work, struct scsi_device, requeue_work); | ||
458 | q = sdev->request_queue; | ||
459 | scsi_run_queue(q); | ||
460 | } | ||
461 | |||
450 | /* | 462 | /* |
451 | * Function: scsi_requeue_command() | 463 | * Function: scsi_requeue_command() |
452 | * | 464 | * |
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 087821fac8fe..58584dc0724a 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c | |||
@@ -242,6 +242,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget, | |||
242 | int display_failure_msg = 1, ret; | 242 | int display_failure_msg = 1, ret; |
243 | struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); | 243 | struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); |
244 | extern void scsi_evt_thread(struct work_struct *work); | 244 | extern void scsi_evt_thread(struct work_struct *work); |
245 | extern void scsi_requeue_run_queue(struct work_struct *work); | ||
245 | 246 | ||
246 | sdev = kzalloc(sizeof(*sdev) + shost->transportt->device_size, | 247 | sdev = kzalloc(sizeof(*sdev) + shost->transportt->device_size, |
247 | GFP_ATOMIC); | 248 | GFP_ATOMIC); |
@@ -264,6 +265,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget, | |||
264 | INIT_LIST_HEAD(&sdev->event_list); | 265 | INIT_LIST_HEAD(&sdev->event_list); |
265 | spin_lock_init(&sdev->list_lock); | 266 | spin_lock_init(&sdev->list_lock); |
266 | INIT_WORK(&sdev->event_work, scsi_evt_thread); | 267 | INIT_WORK(&sdev->event_work, scsi_evt_thread); |
268 | INIT_WORK(&sdev->requeue_work, scsi_requeue_run_queue); | ||
267 | 269 | ||
268 | sdev->sdev_gendev.parent = get_device(&starget->dev); | 270 | sdev->sdev_gendev.parent = get_device(&starget->dev); |
269 | sdev->sdev_target = starget; | 271 | sdev->sdev_target = starget; |
diff --git a/drivers/ssb/pci.c b/drivers/ssb/pci.c index 6f34963b3c64..7ad48585c5e6 100644 --- a/drivers/ssb/pci.c +++ b/drivers/ssb/pci.c | |||
@@ -662,7 +662,6 @@ static int sprom_extract(struct ssb_bus *bus, struct ssb_sprom *out, | |||
662 | static int ssb_pci_sprom_get(struct ssb_bus *bus, | 662 | static int ssb_pci_sprom_get(struct ssb_bus *bus, |
663 | struct ssb_sprom *sprom) | 663 | struct ssb_sprom *sprom) |
664 | { | 664 | { |
665 | const struct ssb_sprom *fallback; | ||
666 | int err; | 665 | int err; |
667 | u16 *buf; | 666 | u16 *buf; |
668 | 667 | ||
@@ -707,10 +706,17 @@ static int ssb_pci_sprom_get(struct ssb_bus *bus, | |||
707 | if (err) { | 706 | if (err) { |
708 | /* All CRC attempts failed. | 707 | /* All CRC attempts failed. |
709 | * Maybe there is no SPROM on the device? | 708 | * Maybe there is no SPROM on the device? |
710 | * If we have a fallback, use that. */ | 709 | * Now we ask the arch code if there is some sprom |
711 | fallback = ssb_get_fallback_sprom(); | 710 | * available for this device in some other storage */ |
712 | if (fallback) { | 711 | err = ssb_fill_sprom_with_fallback(bus, sprom); |
713 | memcpy(sprom, fallback, sizeof(*sprom)); | 712 | if (err) { |
713 | ssb_printk(KERN_WARNING PFX "WARNING: Using" | ||
714 | " fallback SPROM failed (err %d)\n", | ||
715 | err); | ||
716 | } else { | ||
717 | ssb_dprintk(KERN_DEBUG PFX "Using SPROM" | ||
718 | " revision %d provided by" | ||
719 | " platform.\n", sprom->revision); | ||
714 | err = 0; | 720 | err = 0; |
715 | goto out_free; | 721 | goto out_free; |
716 | } | 722 | } |
diff --git a/drivers/ssb/sprom.c b/drivers/ssb/sprom.c index 5f34d7a3e3a5..45ff0e3a3828 100644 --- a/drivers/ssb/sprom.c +++ b/drivers/ssb/sprom.c | |||
@@ -17,7 +17,7 @@ | |||
17 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
18 | 18 | ||
19 | 19 | ||
20 | static const struct ssb_sprom *fallback_sprom; | 20 | static int(*get_fallback_sprom)(struct ssb_bus *dev, struct ssb_sprom *out); |
21 | 21 | ||
22 | 22 | ||
23 | static int sprom2hex(const u16 *sprom, char *buf, size_t buf_len, | 23 | static int sprom2hex(const u16 *sprom, char *buf, size_t buf_len, |
@@ -145,36 +145,43 @@ out: | |||
145 | } | 145 | } |
146 | 146 | ||
147 | /** | 147 | /** |
148 | * ssb_arch_set_fallback_sprom - Set a fallback SPROM for use if no SPROM is found. | 148 | * ssb_arch_register_fallback_sprom - Registers a method providing a |
149 | * fallback SPROM if no SPROM is found. | ||
149 | * | 150 | * |
150 | * @sprom: The SPROM data structure to register. | 151 | * @sprom_callback: The callback function. |
151 | * | 152 | * |
152 | * With this function the architecture implementation may register a fallback | 153 | * With this function the architecture implementation may register a |
153 | * SPROM data structure. The fallback is only used for PCI based SSB devices, | 154 | * callback handler which fills the SPROM data structure. The fallback is |
154 | * where no valid SPROM can be found in the shadow registers. | 155 | * only used for PCI based SSB devices, where no valid SPROM can be found |
156 | * in the shadow registers. | ||
155 | * | 157 | * |
156 | * This function is useful for weird architectures that have a half-assed SSB device | 158 | * This function is useful for weird architectures that have a half-assed |
157 | * hardwired to their PCI bus. | 159 | * SSB device hardwired to their PCI bus. |
158 | * | 160 | * |
159 | * Note that it does only work with PCI attached SSB devices. PCMCIA devices currently | 161 | * Note that it does only work with PCI attached SSB devices. PCMCIA |
160 | * don't use this fallback. | 162 | * devices currently don't use this fallback. |
161 | * Architectures must provide the SPROM for native SSB devices anyway, | 163 | * Architectures must provide the SPROM for native SSB devices anyway, so |
162 | * so the fallback also isn't used for native devices. | 164 | * the fallback also isn't used for native devices. |
163 | * | 165 | * |
164 | * This function is available for architecture code, only. So it is not exported. | 166 | * This function is available for architecture code, only. So it is not |
167 | * exported. | ||
165 | */ | 168 | */ |
166 | int ssb_arch_set_fallback_sprom(const struct ssb_sprom *sprom) | 169 | int ssb_arch_register_fallback_sprom(int (*sprom_callback)(struct ssb_bus *bus, |
170 | struct ssb_sprom *out)) | ||
167 | { | 171 | { |
168 | if (fallback_sprom) | 172 | if (get_fallback_sprom) |
169 | return -EEXIST; | 173 | return -EEXIST; |
170 | fallback_sprom = sprom; | 174 | get_fallback_sprom = sprom_callback; |
171 | 175 | ||
172 | return 0; | 176 | return 0; |
173 | } | 177 | } |
174 | 178 | ||
175 | const struct ssb_sprom *ssb_get_fallback_sprom(void) | 179 | int ssb_fill_sprom_with_fallback(struct ssb_bus *bus, struct ssb_sprom *out) |
176 | { | 180 | { |
177 | return fallback_sprom; | 181 | if (!get_fallback_sprom) |
182 | return -ENOENT; | ||
183 | |||
184 | return get_fallback_sprom(bus, out); | ||
178 | } | 185 | } |
179 | 186 | ||
180 | /* http://bcm-v4.sipsolutions.net/802.11/IsSpromAvailable */ | 187 | /* http://bcm-v4.sipsolutions.net/802.11/IsSpromAvailable */ |
diff --git a/drivers/ssb/ssb_private.h b/drivers/ssb/ssb_private.h index 0331139a726f..77653014db0b 100644 --- a/drivers/ssb/ssb_private.h +++ b/drivers/ssb/ssb_private.h | |||
@@ -171,7 +171,8 @@ ssize_t ssb_attr_sprom_store(struct ssb_bus *bus, | |||
171 | const char *buf, size_t count, | 171 | const char *buf, size_t count, |
172 | int (*sprom_check_crc)(const u16 *sprom, size_t size), | 172 | int (*sprom_check_crc)(const u16 *sprom, size_t size), |
173 | int (*sprom_write)(struct ssb_bus *bus, const u16 *sprom)); | 173 | int (*sprom_write)(struct ssb_bus *bus, const u16 *sprom)); |
174 | extern const struct ssb_sprom *ssb_get_fallback_sprom(void); | 174 | extern int ssb_fill_sprom_with_fallback(struct ssb_bus *bus, |
175 | struct ssb_sprom *out); | ||
175 | 176 | ||
176 | 177 | ||
177 | /* core.c */ | 178 | /* core.c */ |
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig index 80484af781e1..b1f0f83b870d 100644 --- a/drivers/tty/serial/Kconfig +++ b/drivers/tty/serial/Kconfig | |||
@@ -1391,6 +1391,14 @@ config SERIAL_OF_PLATFORM_NWPSERIAL_CONSOLE | |||
1391 | help | 1391 | help |
1392 | Support for Console on the NWP serial ports. | 1392 | Support for Console on the NWP serial ports. |
1393 | 1393 | ||
1394 | config SERIAL_LANTIQ | ||
1395 | bool "Lantiq serial driver" | ||
1396 | depends on LANTIQ | ||
1397 | select SERIAL_CORE | ||
1398 | select SERIAL_CORE_CONSOLE | ||
1399 | help | ||
1400 | Support for console and UART on Lantiq SoCs. | ||
1401 | |||
1394 | config SERIAL_QE | 1402 | config SERIAL_QE |
1395 | tristate "Freescale QUICC Engine serial port support" | 1403 | tristate "Freescale QUICC Engine serial port support" |
1396 | depends on QUICC_ENGINE | 1404 | depends on QUICC_ENGINE |
diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile index fee0690ef8e3..35276043d9d1 100644 --- a/drivers/tty/serial/Makefile +++ b/drivers/tty/serial/Makefile | |||
@@ -94,3 +94,4 @@ obj-$(CONFIG_SERIAL_IFX6X60) += ifx6x60.o | |||
94 | obj-$(CONFIG_SERIAL_PCH_UART) += pch_uart.o | 94 | obj-$(CONFIG_SERIAL_PCH_UART) += pch_uart.o |
95 | obj-$(CONFIG_SERIAL_MSM_SMD) += msm_smd_tty.o | 95 | obj-$(CONFIG_SERIAL_MSM_SMD) += msm_smd_tty.o |
96 | obj-$(CONFIG_SERIAL_MXS_AUART) += mxs-auart.o | 96 | obj-$(CONFIG_SERIAL_MXS_AUART) += mxs-auart.o |
97 | obj-$(CONFIG_SERIAL_LANTIQ) += lantiq.o | ||
diff --git a/drivers/tty/serial/lantiq.c b/drivers/tty/serial/lantiq.c new file mode 100644 index 000000000000..58cf279ed879 --- /dev/null +++ b/drivers/tty/serial/lantiq.c | |||
@@ -0,0 +1,756 @@ | |||
1 | /* | ||
2 | * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify it | ||
5 | * under the terms of the GNU General Public License version 2 as published | ||
6 | * by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License | ||
14 | * along with this program; if not, write to the Free Software | ||
15 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
16 | * | ||
17 | * Copyright (C) 2004 Infineon IFAP DC COM CPE | ||
18 | * Copyright (C) 2007 Felix Fietkau <nbd@openwrt.org> | ||
19 | * Copyright (C) 2007 John Crispin <blogic@openwrt.org> | ||
20 | * Copyright (C) 2010 Thomas Langer, <thomas.langer@lantiq.com> | ||
21 | */ | ||
22 | |||
23 | #include <linux/slab.h> | ||
24 | #include <linux/module.h> | ||
25 | #include <linux/ioport.h> | ||
26 | #include <linux/init.h> | ||
27 | #include <linux/console.h> | ||
28 | #include <linux/sysrq.h> | ||
29 | #include <linux/device.h> | ||
30 | #include <linux/tty.h> | ||
31 | #include <linux/tty_flip.h> | ||
32 | #include <linux/serial_core.h> | ||
33 | #include <linux/serial.h> | ||
34 | #include <linux/platform_device.h> | ||
35 | #include <linux/io.h> | ||
36 | #include <linux/clk.h> | ||
37 | |||
38 | #include <lantiq_soc.h> | ||
39 | |||
40 | #define PORT_LTQ_ASC 111 | ||
41 | #define MAXPORTS 2 | ||
42 | #define UART_DUMMY_UER_RX 1 | ||
43 | #define DRVNAME "ltq_asc" | ||
44 | #ifdef __BIG_ENDIAN | ||
45 | #define LTQ_ASC_TBUF (0x0020 + 3) | ||
46 | #define LTQ_ASC_RBUF (0x0024 + 3) | ||
47 | #else | ||
48 | #define LTQ_ASC_TBUF 0x0020 | ||
49 | #define LTQ_ASC_RBUF 0x0024 | ||
50 | #endif | ||
51 | #define LTQ_ASC_FSTAT 0x0048 | ||
52 | #define LTQ_ASC_WHBSTATE 0x0018 | ||
53 | #define LTQ_ASC_STATE 0x0014 | ||
54 | #define LTQ_ASC_IRNCR 0x00F8 | ||
55 | #define LTQ_ASC_CLC 0x0000 | ||
56 | #define LTQ_ASC_ID 0x0008 | ||
57 | #define LTQ_ASC_PISEL 0x0004 | ||
58 | #define LTQ_ASC_TXFCON 0x0044 | ||
59 | #define LTQ_ASC_RXFCON 0x0040 | ||
60 | #define LTQ_ASC_CON 0x0010 | ||
61 | #define LTQ_ASC_BG 0x0050 | ||
62 | #define LTQ_ASC_IRNREN 0x00F4 | ||
63 | |||
64 | #define ASC_IRNREN_TX 0x1 | ||
65 | #define ASC_IRNREN_RX 0x2 | ||
66 | #define ASC_IRNREN_ERR 0x4 | ||
67 | #define ASC_IRNREN_TX_BUF 0x8 | ||
68 | #define ASC_IRNCR_TIR 0x1 | ||
69 | #define ASC_IRNCR_RIR 0x2 | ||
70 | #define ASC_IRNCR_EIR 0x4 | ||
71 | |||
72 | #define ASCOPT_CSIZE 0x3 | ||
73 | #define TXFIFO_FL 1 | ||
74 | #define RXFIFO_FL 1 | ||
75 | #define ASCCLC_DISS 0x2 | ||
76 | #define ASCCLC_RMCMASK 0x0000FF00 | ||
77 | #define ASCCLC_RMCOFFSET 8 | ||
78 | #define ASCCON_M_8ASYNC 0x0 | ||
79 | #define ASCCON_M_7ASYNC 0x2 | ||
80 | #define ASCCON_ODD 0x00000020 | ||
81 | #define ASCCON_STP 0x00000080 | ||
82 | #define ASCCON_BRS 0x00000100 | ||
83 | #define ASCCON_FDE 0x00000200 | ||
84 | #define ASCCON_R 0x00008000 | ||
85 | #define ASCCON_FEN 0x00020000 | ||
86 | #define ASCCON_ROEN 0x00080000 | ||
87 | #define ASCCON_TOEN 0x00100000 | ||
88 | #define ASCSTATE_PE 0x00010000 | ||
89 | #define ASCSTATE_FE 0x00020000 | ||
90 | #define ASCSTATE_ROE 0x00080000 | ||
91 | #define ASCSTATE_ANY (ASCSTATE_ROE|ASCSTATE_PE|ASCSTATE_FE) | ||
92 | #define ASCWHBSTATE_CLRREN 0x00000001 | ||
93 | #define ASCWHBSTATE_SETREN 0x00000002 | ||
94 | #define ASCWHBSTATE_CLRPE 0x00000004 | ||
95 | #define ASCWHBSTATE_CLRFE 0x00000008 | ||
96 | #define ASCWHBSTATE_CLRROE 0x00000020 | ||
97 | #define ASCTXFCON_TXFEN 0x0001 | ||
98 | #define ASCTXFCON_TXFFLU 0x0002 | ||
99 | #define ASCTXFCON_TXFITLMASK 0x3F00 | ||
100 | #define ASCTXFCON_TXFITLOFF 8 | ||
101 | #define ASCRXFCON_RXFEN 0x0001 | ||
102 | #define ASCRXFCON_RXFFLU 0x0002 | ||
103 | #define ASCRXFCON_RXFITLMASK 0x3F00 | ||
104 | #define ASCRXFCON_RXFITLOFF 8 | ||
105 | #define ASCFSTAT_RXFFLMASK 0x003F | ||
106 | #define ASCFSTAT_TXFFLMASK 0x3F00 | ||
107 | #define ASCFSTAT_TXFREEMASK 0x3F000000 | ||
108 | #define ASCFSTAT_TXFREEOFF 24 | ||
109 | |||
110 | static void lqasc_tx_chars(struct uart_port *port); | ||
111 | static struct ltq_uart_port *lqasc_port[MAXPORTS]; | ||
112 | static struct uart_driver lqasc_reg; | ||
113 | static DEFINE_SPINLOCK(ltq_asc_lock); | ||
114 | |||
115 | struct ltq_uart_port { | ||
116 | struct uart_port port; | ||
117 | struct clk *clk; | ||
118 | unsigned int tx_irq; | ||
119 | unsigned int rx_irq; | ||
120 | unsigned int err_irq; | ||
121 | }; | ||
122 | |||
123 | static inline struct | ||
124 | ltq_uart_port *to_ltq_uart_port(struct uart_port *port) | ||
125 | { | ||
126 | return container_of(port, struct ltq_uart_port, port); | ||
127 | } | ||
128 | |||
129 | static void | ||
130 | lqasc_stop_tx(struct uart_port *port) | ||
131 | { | ||
132 | return; | ||
133 | } | ||
134 | |||
135 | static void | ||
136 | lqasc_start_tx(struct uart_port *port) | ||
137 | { | ||
138 | unsigned long flags; | ||
139 | spin_lock_irqsave(<q_asc_lock, flags); | ||
140 | lqasc_tx_chars(port); | ||
141 | spin_unlock_irqrestore(<q_asc_lock, flags); | ||
142 | return; | ||
143 | } | ||
144 | |||
145 | static void | ||
146 | lqasc_stop_rx(struct uart_port *port) | ||
147 | { | ||
148 | ltq_w32(ASCWHBSTATE_CLRREN, port->membase + LTQ_ASC_WHBSTATE); | ||
149 | } | ||
150 | |||
151 | static void | ||
152 | lqasc_enable_ms(struct uart_port *port) | ||
153 | { | ||
154 | } | ||
155 | |||
156 | static int | ||
157 | lqasc_rx_chars(struct uart_port *port) | ||
158 | { | ||
159 | struct tty_struct *tty = tty_port_tty_get(&port->state->port); | ||
160 | unsigned int ch = 0, rsr = 0, fifocnt; | ||
161 | |||
162 | if (!tty) { | ||
163 | dev_dbg(port->dev, "%s:tty is busy now", __func__); | ||
164 | return -EBUSY; | ||
165 | } | ||
166 | fifocnt = | ||
167 | ltq_r32(port->membase + LTQ_ASC_FSTAT) & ASCFSTAT_RXFFLMASK; | ||
168 | while (fifocnt--) { | ||
169 | u8 flag = TTY_NORMAL; | ||
170 | ch = ltq_r8(port->membase + LTQ_ASC_RBUF); | ||
171 | rsr = (ltq_r32(port->membase + LTQ_ASC_STATE) | ||
172 | & ASCSTATE_ANY) | UART_DUMMY_UER_RX; | ||
173 | tty_flip_buffer_push(tty); | ||
174 | port->icount.rx++; | ||
175 | |||
176 | /* | ||
177 | * Note that the error handling code is | ||
178 | * out of the main execution path | ||
179 | */ | ||
180 | if (rsr & ASCSTATE_ANY) { | ||
181 | if (rsr & ASCSTATE_PE) { | ||
182 | port->icount.parity++; | ||
183 | ltq_w32_mask(0, ASCWHBSTATE_CLRPE, | ||
184 | port->membase + LTQ_ASC_WHBSTATE); | ||
185 | } else if (rsr & ASCSTATE_FE) { | ||
186 | port->icount.frame++; | ||
187 | ltq_w32_mask(0, ASCWHBSTATE_CLRFE, | ||
188 | port->membase + LTQ_ASC_WHBSTATE); | ||
189 | } | ||
190 | if (rsr & ASCSTATE_ROE) { | ||
191 | port->icount.overrun++; | ||
192 | ltq_w32_mask(0, ASCWHBSTATE_CLRROE, | ||
193 | port->membase + LTQ_ASC_WHBSTATE); | ||
194 | } | ||
195 | |||
196 | rsr &= port->read_status_mask; | ||
197 | |||
198 | if (rsr & ASCSTATE_PE) | ||
199 | flag = TTY_PARITY; | ||
200 | else if (rsr & ASCSTATE_FE) | ||
201 | flag = TTY_FRAME; | ||
202 | } | ||
203 | |||
204 | if ((rsr & port->ignore_status_mask) == 0) | ||
205 | tty_insert_flip_char(tty, ch, flag); | ||
206 | |||
207 | if (rsr & ASCSTATE_ROE) | ||
208 | /* | ||
209 | * Overrun is special, since it's reported | ||
210 | * immediately, and doesn't affect the current | ||
211 | * character | ||
212 | */ | ||
213 | tty_insert_flip_char(tty, 0, TTY_OVERRUN); | ||
214 | } | ||
215 | if (ch != 0) | ||
216 | tty_flip_buffer_push(tty); | ||
217 | tty_kref_put(tty); | ||
218 | return 0; | ||
219 | } | ||
220 | |||
221 | static void | ||
222 | lqasc_tx_chars(struct uart_port *port) | ||
223 | { | ||
224 | struct circ_buf *xmit = &port->state->xmit; | ||
225 | if (uart_tx_stopped(port)) { | ||
226 | lqasc_stop_tx(port); | ||
227 | return; | ||
228 | } | ||
229 | |||
230 | while (((ltq_r32(port->membase + LTQ_ASC_FSTAT) & | ||
231 | ASCFSTAT_TXFREEMASK) >> ASCFSTAT_TXFREEOFF) != 0) { | ||
232 | if (port->x_char) { | ||
233 | ltq_w8(port->x_char, port->membase + LTQ_ASC_TBUF); | ||
234 | port->icount.tx++; | ||
235 | port->x_char = 0; | ||
236 | continue; | ||
237 | } | ||
238 | |||
239 | if (uart_circ_empty(xmit)) | ||
240 | break; | ||
241 | |||
242 | ltq_w8(port->state->xmit.buf[port->state->xmit.tail], | ||
243 | port->membase + LTQ_ASC_TBUF); | ||
244 | xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); | ||
245 | port->icount.tx++; | ||
246 | } | ||
247 | |||
248 | if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) | ||
249 | uart_write_wakeup(port); | ||
250 | } | ||
251 | |||
252 | static irqreturn_t | ||
253 | lqasc_tx_int(int irq, void *_port) | ||
254 | { | ||
255 | unsigned long flags; | ||
256 | struct uart_port *port = (struct uart_port *)_port; | ||
257 | spin_lock_irqsave(<q_asc_lock, flags); | ||
258 | ltq_w32(ASC_IRNCR_TIR, port->membase + LTQ_ASC_IRNCR); | ||
259 | spin_unlock_irqrestore(<q_asc_lock, flags); | ||
260 | lqasc_start_tx(port); | ||
261 | return IRQ_HANDLED; | ||
262 | } | ||
263 | |||
264 | static irqreturn_t | ||
265 | lqasc_err_int(int irq, void *_port) | ||
266 | { | ||
267 | unsigned long flags; | ||
268 | struct uart_port *port = (struct uart_port *)_port; | ||
269 | spin_lock_irqsave(<q_asc_lock, flags); | ||
270 | /* clear any pending interrupts */ | ||
271 | ltq_w32_mask(0, ASCWHBSTATE_CLRPE | ASCWHBSTATE_CLRFE | | ||
272 | ASCWHBSTATE_CLRROE, port->membase + LTQ_ASC_WHBSTATE); | ||
273 | spin_unlock_irqrestore(<q_asc_lock, flags); | ||
274 | return IRQ_HANDLED; | ||
275 | } | ||
276 | |||
277 | static irqreturn_t | ||
278 | lqasc_rx_int(int irq, void *_port) | ||
279 | { | ||
280 | unsigned long flags; | ||
281 | struct uart_port *port = (struct uart_port *)_port; | ||
282 | spin_lock_irqsave(<q_asc_lock, flags); | ||
283 | ltq_w32(ASC_IRNCR_RIR, port->membase + LTQ_ASC_IRNCR); | ||
284 | lqasc_rx_chars(port); | ||
285 | spin_unlock_irqrestore(<q_asc_lock, flags); | ||
286 | return IRQ_HANDLED; | ||
287 | } | ||
288 | |||
289 | static unsigned int | ||
290 | lqasc_tx_empty(struct uart_port *port) | ||
291 | { | ||
292 | int status; | ||
293 | status = ltq_r32(port->membase + LTQ_ASC_FSTAT) & ASCFSTAT_TXFFLMASK; | ||
294 | return status ? 0 : TIOCSER_TEMT; | ||
295 | } | ||
296 | |||
297 | static unsigned int | ||
298 | lqasc_get_mctrl(struct uart_port *port) | ||
299 | { | ||
300 | return TIOCM_CTS | TIOCM_CAR | TIOCM_DSR; | ||
301 | } | ||
302 | |||
303 | static void | ||
304 | lqasc_set_mctrl(struct uart_port *port, u_int mctrl) | ||
305 | { | ||
306 | } | ||
307 | |||
308 | static void | ||
309 | lqasc_break_ctl(struct uart_port *port, int break_state) | ||
310 | { | ||
311 | } | ||
312 | |||
313 | static int | ||
314 | lqasc_startup(struct uart_port *port) | ||
315 | { | ||
316 | struct ltq_uart_port *ltq_port = to_ltq_uart_port(port); | ||
317 | int retval; | ||
318 | |||
319 | port->uartclk = clk_get_rate(ltq_port->clk); | ||
320 | |||
321 | ltq_w32_mask(ASCCLC_DISS | ASCCLC_RMCMASK, (1 << ASCCLC_RMCOFFSET), | ||
322 | port->membase + LTQ_ASC_CLC); | ||
323 | |||
324 | ltq_w32(0, port->membase + LTQ_ASC_PISEL); | ||
325 | ltq_w32( | ||
326 | ((TXFIFO_FL << ASCTXFCON_TXFITLOFF) & ASCTXFCON_TXFITLMASK) | | ||
327 | ASCTXFCON_TXFEN | ASCTXFCON_TXFFLU, | ||
328 | port->membase + LTQ_ASC_TXFCON); | ||
329 | ltq_w32( | ||
330 | ((RXFIFO_FL << ASCRXFCON_RXFITLOFF) & ASCRXFCON_RXFITLMASK) | ||
331 | | ASCRXFCON_RXFEN | ASCRXFCON_RXFFLU, | ||
332 | port->membase + LTQ_ASC_RXFCON); | ||
333 | /* make sure other settings are written to hardware before | ||
334 | * setting enable bits | ||
335 | */ | ||
336 | wmb(); | ||
337 | ltq_w32_mask(0, ASCCON_M_8ASYNC | ASCCON_FEN | ASCCON_TOEN | | ||
338 | ASCCON_ROEN, port->membase + LTQ_ASC_CON); | ||
339 | |||
340 | retval = request_irq(ltq_port->tx_irq, lqasc_tx_int, | ||
341 | IRQF_DISABLED, "asc_tx", port); | ||
342 | if (retval) { | ||
343 | pr_err("failed to request lqasc_tx_int\n"); | ||
344 | return retval; | ||
345 | } | ||
346 | |||
347 | retval = request_irq(ltq_port->rx_irq, lqasc_rx_int, | ||
348 | IRQF_DISABLED, "asc_rx", port); | ||
349 | if (retval) { | ||
350 | pr_err("failed to request lqasc_rx_int\n"); | ||
351 | goto err1; | ||
352 | } | ||
353 | |||
354 | retval = request_irq(ltq_port->err_irq, lqasc_err_int, | ||
355 | IRQF_DISABLED, "asc_err", port); | ||
356 | if (retval) { | ||
357 | pr_err("failed to request lqasc_err_int\n"); | ||
358 | goto err2; | ||
359 | } | ||
360 | |||
361 | ltq_w32(ASC_IRNREN_RX | ASC_IRNREN_ERR | ASC_IRNREN_TX, | ||
362 | port->membase + LTQ_ASC_IRNREN); | ||
363 | return 0; | ||
364 | |||
365 | err2: | ||
366 | free_irq(ltq_port->rx_irq, port); | ||
367 | err1: | ||
368 | free_irq(ltq_port->tx_irq, port); | ||
369 | return retval; | ||
370 | } | ||
371 | |||
372 | static void | ||
373 | lqasc_shutdown(struct uart_port *port) | ||
374 | { | ||
375 | struct ltq_uart_port *ltq_port = to_ltq_uart_port(port); | ||
376 | free_irq(ltq_port->tx_irq, port); | ||
377 | free_irq(ltq_port->rx_irq, port); | ||
378 | free_irq(ltq_port->err_irq, port); | ||
379 | |||
380 | ltq_w32(0, port->membase + LTQ_ASC_CON); | ||
381 | ltq_w32_mask(ASCRXFCON_RXFEN, ASCRXFCON_RXFFLU, | ||
382 | port->membase + LTQ_ASC_RXFCON); | ||
383 | ltq_w32_mask(ASCTXFCON_TXFEN, ASCTXFCON_TXFFLU, | ||
384 | port->membase + LTQ_ASC_TXFCON); | ||
385 | } | ||
386 | |||
387 | static void | ||
388 | lqasc_set_termios(struct uart_port *port, | ||
389 | struct ktermios *new, struct ktermios *old) | ||
390 | { | ||
391 | unsigned int cflag; | ||
392 | unsigned int iflag; | ||
393 | unsigned int divisor; | ||
394 | unsigned int baud; | ||
395 | unsigned int con = 0; | ||
396 | unsigned long flags; | ||
397 | |||
398 | cflag = new->c_cflag; | ||
399 | iflag = new->c_iflag; | ||
400 | |||
401 | switch (cflag & CSIZE) { | ||
402 | case CS7: | ||
403 | con = ASCCON_M_7ASYNC; | ||
404 | break; | ||
405 | |||
406 | case CS5: | ||
407 | case CS6: | ||
408 | default: | ||
409 | new->c_cflag &= ~ CSIZE; | ||
410 | new->c_cflag |= CS8; | ||
411 | con = ASCCON_M_8ASYNC; | ||
412 | break; | ||
413 | } | ||
414 | |||
415 | cflag &= ~CMSPAR; /* Mark/Space parity is not supported */ | ||
416 | |||
417 | if (cflag & CSTOPB) | ||
418 | con |= ASCCON_STP; | ||
419 | |||
420 | if (cflag & PARENB) { | ||
421 | if (!(cflag & PARODD)) | ||
422 | con &= ~ASCCON_ODD; | ||
423 | else | ||
424 | con |= ASCCON_ODD; | ||
425 | } | ||
426 | |||
427 | port->read_status_mask = ASCSTATE_ROE; | ||
428 | if (iflag & INPCK) | ||
429 | port->read_status_mask |= ASCSTATE_FE | ASCSTATE_PE; | ||
430 | |||
431 | port->ignore_status_mask = 0; | ||
432 | if (iflag & IGNPAR) | ||
433 | port->ignore_status_mask |= ASCSTATE_FE | ASCSTATE_PE; | ||
434 | |||
435 | if (iflag & IGNBRK) { | ||
436 | /* | ||
437 | * If we're ignoring parity and break indicators, | ||
438 | * ignore overruns too (for real raw support). | ||
439 | */ | ||
440 | if (iflag & IGNPAR) | ||
441 | port->ignore_status_mask |= ASCSTATE_ROE; | ||
442 | } | ||
443 | |||
444 | if ((cflag & CREAD) == 0) | ||
445 | port->ignore_status_mask |= UART_DUMMY_UER_RX; | ||
446 | |||
447 | /* set error signals - framing, parity and overrun, enable receiver */ | ||
448 | con |= ASCCON_FEN | ASCCON_TOEN | ASCCON_ROEN; | ||
449 | |||
450 | spin_lock_irqsave(<q_asc_lock, flags); | ||
451 | |||
452 | /* set up CON */ | ||
453 | ltq_w32_mask(0, con, port->membase + LTQ_ASC_CON); | ||
454 | |||
455 | /* Set baud rate - take a divider of 2 into account */ | ||
456 | baud = uart_get_baud_rate(port, new, old, 0, port->uartclk / 16); | ||
457 | divisor = uart_get_divisor(port, baud); | ||
458 | divisor = divisor / 2 - 1; | ||
459 | |||
460 | /* disable the baudrate generator */ | ||
461 | ltq_w32_mask(ASCCON_R, 0, port->membase + LTQ_ASC_CON); | ||
462 | |||
463 | /* make sure the fractional divider is off */ | ||
464 | ltq_w32_mask(ASCCON_FDE, 0, port->membase + LTQ_ASC_CON); | ||
465 | |||
466 | /* set up to use divisor of 2 */ | ||
467 | ltq_w32_mask(ASCCON_BRS, 0, port->membase + LTQ_ASC_CON); | ||
468 | |||
469 | /* now we can write the new baudrate into the register */ | ||
470 | ltq_w32(divisor, port->membase + LTQ_ASC_BG); | ||
471 | |||
472 | /* turn the baudrate generator back on */ | ||
473 | ltq_w32_mask(0, ASCCON_R, port->membase + LTQ_ASC_CON); | ||
474 | |||
475 | /* enable rx */ | ||
476 | ltq_w32(ASCWHBSTATE_SETREN, port->membase + LTQ_ASC_WHBSTATE); | ||
477 | |||
478 | spin_unlock_irqrestore(<q_asc_lock, flags); | ||
479 | |||
480 | /* Don't rewrite B0 */ | ||
481 | if (tty_termios_baud_rate(new)) | ||
482 | tty_termios_encode_baud_rate(new, baud, baud); | ||
483 | } | ||
484 | |||
485 | static const char* | ||
486 | lqasc_type(struct uart_port *port) | ||
487 | { | ||
488 | if (port->type == PORT_LTQ_ASC) | ||
489 | return DRVNAME; | ||
490 | else | ||
491 | return NULL; | ||
492 | } | ||
493 | |||
494 | static void | ||
495 | lqasc_release_port(struct uart_port *port) | ||
496 | { | ||
497 | if (port->flags & UPF_IOREMAP) { | ||
498 | iounmap(port->membase); | ||
499 | port->membase = NULL; | ||
500 | } | ||
501 | } | ||
502 | |||
503 | static int | ||
504 | lqasc_request_port(struct uart_port *port) | ||
505 | { | ||
506 | struct platform_device *pdev = to_platform_device(port->dev); | ||
507 | struct resource *res; | ||
508 | int size; | ||
509 | |||
510 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
511 | if (!res) { | ||
512 | dev_err(&pdev->dev, "cannot obtain I/O memory region"); | ||
513 | return -ENODEV; | ||
514 | } | ||
515 | size = resource_size(res); | ||
516 | |||
517 | res = devm_request_mem_region(&pdev->dev, res->start, | ||
518 | size, dev_name(&pdev->dev)); | ||
519 | if (!res) { | ||
520 | dev_err(&pdev->dev, "cannot request I/O memory region"); | ||
521 | return -EBUSY; | ||
522 | } | ||
523 | |||
524 | if (port->flags & UPF_IOREMAP) { | ||
525 | port->membase = devm_ioremap_nocache(&pdev->dev, | ||
526 | port->mapbase, size); | ||
527 | if (port->membase == NULL) | ||
528 | return -ENOMEM; | ||
529 | } | ||
530 | return 0; | ||
531 | } | ||
532 | |||
533 | static void | ||
534 | lqasc_config_port(struct uart_port *port, int flags) | ||
535 | { | ||
536 | if (flags & UART_CONFIG_TYPE) { | ||
537 | port->type = PORT_LTQ_ASC; | ||
538 | lqasc_request_port(port); | ||
539 | } | ||
540 | } | ||
541 | |||
542 | static int | ||
543 | lqasc_verify_port(struct uart_port *port, | ||
544 | struct serial_struct *ser) | ||
545 | { | ||
546 | int ret = 0; | ||
547 | if (ser->type != PORT_UNKNOWN && ser->type != PORT_LTQ_ASC) | ||
548 | ret = -EINVAL; | ||
549 | if (ser->irq < 0 || ser->irq >= NR_IRQS) | ||
550 | ret = -EINVAL; | ||
551 | if (ser->baud_base < 9600) | ||
552 | ret = -EINVAL; | ||
553 | return ret; | ||
554 | } | ||
555 | |||
556 | static struct uart_ops lqasc_pops = { | ||
557 | .tx_empty = lqasc_tx_empty, | ||
558 | .set_mctrl = lqasc_set_mctrl, | ||
559 | .get_mctrl = lqasc_get_mctrl, | ||
560 | .stop_tx = lqasc_stop_tx, | ||
561 | .start_tx = lqasc_start_tx, | ||
562 | .stop_rx = lqasc_stop_rx, | ||
563 | .enable_ms = lqasc_enable_ms, | ||
564 | .break_ctl = lqasc_break_ctl, | ||
565 | .startup = lqasc_startup, | ||
566 | .shutdown = lqasc_shutdown, | ||
567 | .set_termios = lqasc_set_termios, | ||
568 | .type = lqasc_type, | ||
569 | .release_port = lqasc_release_port, | ||
570 | .request_port = lqasc_request_port, | ||
571 | .config_port = lqasc_config_port, | ||
572 | .verify_port = lqasc_verify_port, | ||
573 | }; | ||
574 | |||
575 | static void | ||
576 | lqasc_console_putchar(struct uart_port *port, int ch) | ||
577 | { | ||
578 | int fifofree; | ||
579 | |||
580 | if (!port->membase) | ||
581 | return; | ||
582 | |||
583 | do { | ||
584 | fifofree = (ltq_r32(port->membase + LTQ_ASC_FSTAT) | ||
585 | & ASCFSTAT_TXFREEMASK) >> ASCFSTAT_TXFREEOFF; | ||
586 | } while (fifofree == 0); | ||
587 | ltq_w8(ch, port->membase + LTQ_ASC_TBUF); | ||
588 | } | ||
589 | |||
590 | |||
591 | static void | ||
592 | lqasc_console_write(struct console *co, const char *s, u_int count) | ||
593 | { | ||
594 | struct ltq_uart_port *ltq_port; | ||
595 | struct uart_port *port; | ||
596 | unsigned long flags; | ||
597 | |||
598 | if (co->index >= MAXPORTS) | ||
599 | return; | ||
600 | |||
601 | ltq_port = lqasc_port[co->index]; | ||
602 | if (!ltq_port) | ||
603 | return; | ||
604 | |||
605 | port = <q_port->port; | ||
606 | |||
607 | spin_lock_irqsave(<q_asc_lock, flags); | ||
608 | uart_console_write(port, s, count, lqasc_console_putchar); | ||
609 | spin_unlock_irqrestore(<q_asc_lock, flags); | ||
610 | } | ||
611 | |||
612 | static int __init | ||
613 | lqasc_console_setup(struct console *co, char *options) | ||
614 | { | ||
615 | struct ltq_uart_port *ltq_port; | ||
616 | struct uart_port *port; | ||
617 | int baud = 115200; | ||
618 | int bits = 8; | ||
619 | int parity = 'n'; | ||
620 | int flow = 'n'; | ||
621 | |||
622 | if (co->index >= MAXPORTS) | ||
623 | return -ENODEV; | ||
624 | |||
625 | ltq_port = lqasc_port[co->index]; | ||
626 | if (!ltq_port) | ||
627 | return -ENODEV; | ||
628 | |||
629 | port = <q_port->port; | ||
630 | |||
631 | port->uartclk = clk_get_rate(ltq_port->clk); | ||
632 | |||
633 | if (options) | ||
634 | uart_parse_options(options, &baud, &parity, &bits, &flow); | ||
635 | return uart_set_options(port, co, baud, parity, bits, flow); | ||
636 | } | ||
637 | |||
638 | static struct console lqasc_console = { | ||
639 | .name = "ttyLTQ", | ||
640 | .write = lqasc_console_write, | ||
641 | .device = uart_console_device, | ||
642 | .setup = lqasc_console_setup, | ||
643 | .flags = CON_PRINTBUFFER, | ||
644 | .index = -1, | ||
645 | .data = &lqasc_reg, | ||
646 | }; | ||
647 | |||
648 | static int __init | ||
649 | lqasc_console_init(void) | ||
650 | { | ||
651 | register_console(&lqasc_console); | ||
652 | return 0; | ||
653 | } | ||
654 | console_initcall(lqasc_console_init); | ||
655 | |||
656 | static struct uart_driver lqasc_reg = { | ||
657 | .owner = THIS_MODULE, | ||
658 | .driver_name = DRVNAME, | ||
659 | .dev_name = "ttyLTQ", | ||
660 | .major = 0, | ||
661 | .minor = 0, | ||
662 | .nr = MAXPORTS, | ||
663 | .cons = &lqasc_console, | ||
664 | }; | ||
665 | |||
666 | static int __init | ||
667 | lqasc_probe(struct platform_device *pdev) | ||
668 | { | ||
669 | struct ltq_uart_port *ltq_port; | ||
670 | struct uart_port *port; | ||
671 | struct resource *mmres, *irqres; | ||
672 | int tx_irq, rx_irq, err_irq; | ||
673 | struct clk *clk; | ||
674 | int ret; | ||
675 | |||
676 | mmres = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
677 | irqres = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | ||
678 | if (!mmres || !irqres) | ||
679 | return -ENODEV; | ||
680 | |||
681 | if (pdev->id >= MAXPORTS) | ||
682 | return -EBUSY; | ||
683 | |||
684 | if (lqasc_port[pdev->id] != NULL) | ||
685 | return -EBUSY; | ||
686 | |||
687 | clk = clk_get(&pdev->dev, "fpi"); | ||
688 | if (IS_ERR(clk)) { | ||
689 | pr_err("failed to get fpi clk\n"); | ||
690 | return -ENOENT; | ||
691 | } | ||
692 | |||
693 | tx_irq = platform_get_irq_byname(pdev, "tx"); | ||
694 | rx_irq = platform_get_irq_byname(pdev, "rx"); | ||
695 | err_irq = platform_get_irq_byname(pdev, "err"); | ||
696 | if ((tx_irq < 0) | (rx_irq < 0) | (err_irq < 0)) | ||
697 | return -ENODEV; | ||
698 | |||
699 | ltq_port = kzalloc(sizeof(struct ltq_uart_port), GFP_KERNEL); | ||
700 | if (!ltq_port) | ||
701 | return -ENOMEM; | ||
702 | |||
703 | port = <q_port->port; | ||
704 | |||
705 | port->iotype = SERIAL_IO_MEM; | ||
706 | port->flags = ASYNC_BOOT_AUTOCONF | UPF_IOREMAP; | ||
707 | port->ops = &lqasc_pops; | ||
708 | port->fifosize = 16; | ||
709 | port->type = PORT_LTQ_ASC, | ||
710 | port->line = pdev->id; | ||
711 | port->dev = &pdev->dev; | ||
712 | |||
713 | port->irq = tx_irq; /* unused, just to be backward-compatibe */ | ||
714 | port->mapbase = mmres->start; | ||
715 | |||
716 | ltq_port->clk = clk; | ||
717 | |||
718 | ltq_port->tx_irq = tx_irq; | ||
719 | ltq_port->rx_irq = rx_irq; | ||
720 | ltq_port->err_irq = err_irq; | ||
721 | |||
722 | lqasc_port[pdev->id] = ltq_port; | ||
723 | platform_set_drvdata(pdev, ltq_port); | ||
724 | |||
725 | ret = uart_add_one_port(&lqasc_reg, port); | ||
726 | |||
727 | return ret; | ||
728 | } | ||
729 | |||
730 | static struct platform_driver lqasc_driver = { | ||
731 | .driver = { | ||
732 | .name = DRVNAME, | ||
733 | .owner = THIS_MODULE, | ||
734 | }, | ||
735 | }; | ||
736 | |||
737 | int __init | ||
738 | init_lqasc(void) | ||
739 | { | ||
740 | int ret; | ||
741 | |||
742 | ret = uart_register_driver(&lqasc_reg); | ||
743 | if (ret != 0) | ||
744 | return ret; | ||
745 | |||
746 | ret = platform_driver_probe(&lqasc_driver, lqasc_probe); | ||
747 | if (ret != 0) | ||
748 | uart_unregister_driver(&lqasc_reg); | ||
749 | |||
750 | return ret; | ||
751 | } | ||
752 | |||
753 | module_init(init_lqasc); | ||
754 | |||
755 | MODULE_DESCRIPTION("Lantiq serial port driver"); | ||
756 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/tty/serial/of_serial.c b/drivers/tty/serial/of_serial.c index 0e8eec516df4..c911b2419abb 100644 --- a/drivers/tty/serial/of_serial.c +++ b/drivers/tty/serial/of_serial.c | |||
@@ -80,14 +80,17 @@ static int __devinit of_platform_serial_setup(struct platform_device *ofdev, | |||
80 | /* | 80 | /* |
81 | * Try to register a serial port | 81 | * Try to register a serial port |
82 | */ | 82 | */ |
83 | static struct of_device_id of_platform_serial_table[]; | ||
83 | static int __devinit of_platform_serial_probe(struct platform_device *ofdev) | 84 | static int __devinit of_platform_serial_probe(struct platform_device *ofdev) |
84 | { | 85 | { |
86 | const struct of_device_id *match; | ||
85 | struct of_serial_info *info; | 87 | struct of_serial_info *info; |
86 | struct uart_port port; | 88 | struct uart_port port; |
87 | int port_type; | 89 | int port_type; |
88 | int ret; | 90 | int ret; |
89 | 91 | ||
90 | if (!ofdev->dev.of_match) | 92 | match = of_match_device(of_platform_serial_table, &ofdev->dev); |
93 | if (!match) | ||
91 | return -EINVAL; | 94 | return -EINVAL; |
92 | 95 | ||
93 | if (of_find_property(ofdev->dev.of_node, "used-by-rtas", NULL)) | 96 | if (of_find_property(ofdev->dev.of_node, "used-by-rtas", NULL)) |
@@ -97,7 +100,7 @@ static int __devinit of_platform_serial_probe(struct platform_device *ofdev) | |||
97 | if (info == NULL) | 100 | if (info == NULL) |
98 | return -ENOMEM; | 101 | return -ENOMEM; |
99 | 102 | ||
100 | port_type = (unsigned long)ofdev->dev.of_match->data; | 103 | port_type = (unsigned long)match->data; |
101 | ret = of_platform_serial_setup(ofdev, port_type, &port); | 104 | ret = of_platform_serial_setup(ofdev, port_type, &port); |
102 | if (ret) | 105 | if (ret) |
103 | goto out; | 106 | goto out; |
diff --git a/drivers/usb/gadget/fsl_qe_udc.c b/drivers/usb/gadget/fsl_qe_udc.c index 36613b37c504..3a68e09309f7 100644 --- a/drivers/usb/gadget/fsl_qe_udc.c +++ b/drivers/usb/gadget/fsl_qe_udc.c | |||
@@ -2539,15 +2539,18 @@ static void qe_udc_release(struct device *dev) | |||
2539 | } | 2539 | } |
2540 | 2540 | ||
2541 | /* Driver probe functions */ | 2541 | /* Driver probe functions */ |
2542 | static const struct of_device_id qe_udc_match[]; | ||
2542 | static int __devinit qe_udc_probe(struct platform_device *ofdev) | 2543 | static int __devinit qe_udc_probe(struct platform_device *ofdev) |
2543 | { | 2544 | { |
2545 | const struct of_device_id *match; | ||
2544 | struct device_node *np = ofdev->dev.of_node; | 2546 | struct device_node *np = ofdev->dev.of_node; |
2545 | struct qe_ep *ep; | 2547 | struct qe_ep *ep; |
2546 | unsigned int ret = 0; | 2548 | unsigned int ret = 0; |
2547 | unsigned int i; | 2549 | unsigned int i; |
2548 | const void *prop; | 2550 | const void *prop; |
2549 | 2551 | ||
2550 | if (!ofdev->dev.of_match) | 2552 | match = of_match_device(qe_udc_match, &ofdev->dev); |
2553 | if (!match) | ||
2551 | return -EINVAL; | 2554 | return -EINVAL; |
2552 | 2555 | ||
2553 | prop = of_get_property(np, "mode", NULL); | 2556 | prop = of_get_property(np, "mode", NULL); |
@@ -2561,7 +2564,7 @@ static int __devinit qe_udc_probe(struct platform_device *ofdev) | |||
2561 | return -ENOMEM; | 2564 | return -ENOMEM; |
2562 | } | 2565 | } |
2563 | 2566 | ||
2564 | udc_controller->soc_type = (unsigned long)ofdev->dev.of_match->data; | 2567 | udc_controller->soc_type = (unsigned long)match->data; |
2565 | udc_controller->usb_regs = of_iomap(np, 0); | 2568 | udc_controller->usb_regs = of_iomap(np, 0); |
2566 | if (!udc_controller->usb_regs) { | 2569 | if (!udc_controller->usb_regs) { |
2567 | ret = -ENOMEM; | 2570 | ret = -ENOMEM; |
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 2ab291241635..7aa4eea930f1 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c | |||
@@ -4,7 +4,7 @@ | |||
4 | * Author: Michael S. Tsirkin <mst@redhat.com> | 4 | * Author: Michael S. Tsirkin <mst@redhat.com> |
5 | * | 5 | * |
6 | * Inspiration, some code, and most witty comments come from | 6 | * Inspiration, some code, and most witty comments come from |
7 | * Documentation/lguest/lguest.c, by Rusty Russell | 7 | * Documentation/virtual/lguest/lguest.c, by Rusty Russell |
8 | * | 8 | * |
9 | * This work is licensed under the terms of the GNU GPL, version 2. | 9 | * This work is licensed under the terms of the GNU GPL, version 2. |
10 | * | 10 | * |
diff --git a/drivers/video/acornfb.c b/drivers/video/acornfb.c index 82acb8dc4aa1..6183a57eb69d 100644 --- a/drivers/video/acornfb.c +++ b/drivers/video/acornfb.c | |||
@@ -66,7 +66,7 @@ | |||
66 | * have. Allow 1% either way on the nominal for TVs. | 66 | * have. Allow 1% either way on the nominal for TVs. |
67 | */ | 67 | */ |
68 | #define NR_MONTYPES 6 | 68 | #define NR_MONTYPES 6 |
69 | static struct fb_monspecs monspecs[NR_MONTYPES] __initdata = { | 69 | static struct fb_monspecs monspecs[NR_MONTYPES] __devinitdata = { |
70 | { /* TV */ | 70 | { /* TV */ |
71 | .hfmin = 15469, | 71 | .hfmin = 15469, |
72 | .hfmax = 15781, | 72 | .hfmax = 15781, |
@@ -873,7 +873,7 @@ static struct fb_ops acornfb_ops = { | |||
873 | /* | 873 | /* |
874 | * Everything after here is initialisation!!! | 874 | * Everything after here is initialisation!!! |
875 | */ | 875 | */ |
876 | static struct fb_videomode modedb[] __initdata = { | 876 | static struct fb_videomode modedb[] __devinitdata = { |
877 | { /* 320x256 @ 50Hz */ | 877 | { /* 320x256 @ 50Hz */ |
878 | NULL, 50, 320, 256, 125000, 92, 62, 35, 19, 38, 2, | 878 | NULL, 50, 320, 256, 125000, 92, 62, 35, 19, 38, 2, |
879 | FB_SYNC_COMP_HIGH_ACT, | 879 | FB_SYNC_COMP_HIGH_ACT, |
@@ -925,8 +925,7 @@ static struct fb_videomode modedb[] __initdata = { | |||
925 | } | 925 | } |
926 | }; | 926 | }; |
927 | 927 | ||
928 | static struct fb_videomode __initdata | 928 | static struct fb_videomode acornfb_default_mode __devinitdata = { |
929 | acornfb_default_mode = { | ||
930 | .name = NULL, | 929 | .name = NULL, |
931 | .refresh = 60, | 930 | .refresh = 60, |
932 | .xres = 640, | 931 | .xres = 640, |
@@ -942,7 +941,7 @@ acornfb_default_mode = { | |||
942 | .vmode = FB_VMODE_NONINTERLACED | 941 | .vmode = FB_VMODE_NONINTERLACED |
943 | }; | 942 | }; |
944 | 943 | ||
945 | static void __init acornfb_init_fbinfo(void) | 944 | static void __devinit acornfb_init_fbinfo(void) |
946 | { | 945 | { |
947 | static int first = 1; | 946 | static int first = 1; |
948 | 947 | ||
@@ -1018,8 +1017,7 @@ static void __init acornfb_init_fbinfo(void) | |||
1018 | * size can optionally be followed by 'M' or 'K' for | 1017 | * size can optionally be followed by 'M' or 'K' for |
1019 | * MB or KB respectively. | 1018 | * MB or KB respectively. |
1020 | */ | 1019 | */ |
1021 | static void __init | 1020 | static void __devinit acornfb_parse_mon(char *opt) |
1022 | acornfb_parse_mon(char *opt) | ||
1023 | { | 1021 | { |
1024 | char *p = opt; | 1022 | char *p = opt; |
1025 | 1023 | ||
@@ -1066,8 +1064,7 @@ bad: | |||
1066 | current_par.montype = -1; | 1064 | current_par.montype = -1; |
1067 | } | 1065 | } |
1068 | 1066 | ||
1069 | static void __init | 1067 | static void __devinit acornfb_parse_montype(char *opt) |
1070 | acornfb_parse_montype(char *opt) | ||
1071 | { | 1068 | { |
1072 | current_par.montype = -2; | 1069 | current_par.montype = -2; |
1073 | 1070 | ||
@@ -1108,8 +1105,7 @@ acornfb_parse_montype(char *opt) | |||
1108 | } | 1105 | } |
1109 | } | 1106 | } |
1110 | 1107 | ||
1111 | static void __init | 1108 | static void __devinit acornfb_parse_dram(char *opt) |
1112 | acornfb_parse_dram(char *opt) | ||
1113 | { | 1109 | { |
1114 | unsigned int size; | 1110 | unsigned int size; |
1115 | 1111 | ||
@@ -1134,15 +1130,14 @@ acornfb_parse_dram(char *opt) | |||
1134 | static struct options { | 1130 | static struct options { |
1135 | char *name; | 1131 | char *name; |
1136 | void (*parse)(char *opt); | 1132 | void (*parse)(char *opt); |
1137 | } opt_table[] __initdata = { | 1133 | } opt_table[] __devinitdata = { |
1138 | { "mon", acornfb_parse_mon }, | 1134 | { "mon", acornfb_parse_mon }, |
1139 | { "montype", acornfb_parse_montype }, | 1135 | { "montype", acornfb_parse_montype }, |
1140 | { "dram", acornfb_parse_dram }, | 1136 | { "dram", acornfb_parse_dram }, |
1141 | { NULL, NULL } | 1137 | { NULL, NULL } |
1142 | }; | 1138 | }; |
1143 | 1139 | ||
1144 | int __init | 1140 | static int __devinit acornfb_setup(char *options) |
1145 | acornfb_setup(char *options) | ||
1146 | { | 1141 | { |
1147 | struct options *optp; | 1142 | struct options *optp; |
1148 | char *opt; | 1143 | char *opt; |
@@ -1179,8 +1174,7 @@ acornfb_setup(char *options) | |||
1179 | * Detect type of monitor connected | 1174 | * Detect type of monitor connected |
1180 | * For now, we just assume SVGA | 1175 | * For now, we just assume SVGA |
1181 | */ | 1176 | */ |
1182 | static int __init | 1177 | static int __devinit acornfb_detect_monitortype(void) |
1183 | acornfb_detect_monitortype(void) | ||
1184 | { | 1178 | { |
1185 | return 4; | 1179 | return 4; |
1186 | } | 1180 | } |
diff --git a/drivers/video/atafb.c b/drivers/video/atafb.c index 5b2b5ef4edba..64e41f5448c4 100644 --- a/drivers/video/atafb.c +++ b/drivers/video/atafb.c | |||
@@ -3117,7 +3117,7 @@ int __init atafb_init(void) | |||
3117 | atafb_ops.fb_setcolreg = &falcon_setcolreg; | 3117 | atafb_ops.fb_setcolreg = &falcon_setcolreg; |
3118 | error = request_irq(IRQ_AUTO_4, falcon_vbl_switcher, | 3118 | error = request_irq(IRQ_AUTO_4, falcon_vbl_switcher, |
3119 | IRQ_TYPE_PRIO, | 3119 | IRQ_TYPE_PRIO, |
3120 | "framebuffer/modeswitch", | 3120 | "framebuffer:modeswitch", |
3121 | falcon_vbl_switcher); | 3121 | falcon_vbl_switcher); |
3122 | if (error) | 3122 | if (error) |
3123 | return error; | 3123 | return error; |
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c index e0c2284924b6..5aac00eb1830 100644 --- a/drivers/video/fbmem.c +++ b/drivers/video/fbmem.c | |||
@@ -42,9 +42,34 @@ | |||
42 | 42 | ||
43 | #define FBPIXMAPSIZE (1024 * 8) | 43 | #define FBPIXMAPSIZE (1024 * 8) |
44 | 44 | ||
45 | static DEFINE_MUTEX(registration_lock); | ||
45 | struct fb_info *registered_fb[FB_MAX] __read_mostly; | 46 | struct fb_info *registered_fb[FB_MAX] __read_mostly; |
46 | int num_registered_fb __read_mostly; | 47 | int num_registered_fb __read_mostly; |
47 | 48 | ||
49 | static struct fb_info *get_fb_info(unsigned int idx) | ||
50 | { | ||
51 | struct fb_info *fb_info; | ||
52 | |||
53 | if (idx >= FB_MAX) | ||
54 | return ERR_PTR(-ENODEV); | ||
55 | |||
56 | mutex_lock(®istration_lock); | ||
57 | fb_info = registered_fb[idx]; | ||
58 | if (fb_info) | ||
59 | atomic_inc(&fb_info->count); | ||
60 | mutex_unlock(®istration_lock); | ||
61 | |||
62 | return fb_info; | ||
63 | } | ||
64 | |||
65 | static void put_fb_info(struct fb_info *fb_info) | ||
66 | { | ||
67 | if (!atomic_dec_and_test(&fb_info->count)) | ||
68 | return; | ||
69 | if (fb_info->fbops->fb_destroy) | ||
70 | fb_info->fbops->fb_destroy(fb_info); | ||
71 | } | ||
72 | |||
48 | int lock_fb_info(struct fb_info *info) | 73 | int lock_fb_info(struct fb_info *info) |
49 | { | 74 | { |
50 | mutex_lock(&info->lock); | 75 | mutex_lock(&info->lock); |
@@ -647,6 +672,7 @@ int fb_show_logo(struct fb_info *info, int rotate) { return 0; } | |||
647 | 672 | ||
648 | static void *fb_seq_start(struct seq_file *m, loff_t *pos) | 673 | static void *fb_seq_start(struct seq_file *m, loff_t *pos) |
649 | { | 674 | { |
675 | mutex_lock(®istration_lock); | ||
650 | return (*pos < FB_MAX) ? pos : NULL; | 676 | return (*pos < FB_MAX) ? pos : NULL; |
651 | } | 677 | } |
652 | 678 | ||
@@ -658,6 +684,7 @@ static void *fb_seq_next(struct seq_file *m, void *v, loff_t *pos) | |||
658 | 684 | ||
659 | static void fb_seq_stop(struct seq_file *m, void *v) | 685 | static void fb_seq_stop(struct seq_file *m, void *v) |
660 | { | 686 | { |
687 | mutex_unlock(®istration_lock); | ||
661 | } | 688 | } |
662 | 689 | ||
663 | static int fb_seq_show(struct seq_file *m, void *v) | 690 | static int fb_seq_show(struct seq_file *m, void *v) |
@@ -690,13 +717,30 @@ static const struct file_operations fb_proc_fops = { | |||
690 | .release = seq_release, | 717 | .release = seq_release, |
691 | }; | 718 | }; |
692 | 719 | ||
693 | static ssize_t | 720 | /* |
694 | fb_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) | 721 | * We hold a reference to the fb_info in file->private_data, |
722 | * but if the current registered fb has changed, we don't | ||
723 | * actually want to use it. | ||
724 | * | ||
725 | * So look up the fb_info using the inode minor number, | ||
726 | * and just verify it against the reference we have. | ||
727 | */ | ||
728 | static struct fb_info *file_fb_info(struct file *file) | ||
695 | { | 729 | { |
696 | unsigned long p = *ppos; | ||
697 | struct inode *inode = file->f_path.dentry->d_inode; | 730 | struct inode *inode = file->f_path.dentry->d_inode; |
698 | int fbidx = iminor(inode); | 731 | int fbidx = iminor(inode); |
699 | struct fb_info *info = registered_fb[fbidx]; | 732 | struct fb_info *info = registered_fb[fbidx]; |
733 | |||
734 | if (info != file->private_data) | ||
735 | info = NULL; | ||
736 | return info; | ||
737 | } | ||
738 | |||
739 | static ssize_t | ||
740 | fb_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) | ||
741 | { | ||
742 | unsigned long p = *ppos; | ||
743 | struct fb_info *info = file_fb_info(file); | ||
700 | u8 *buffer, *dst; | 744 | u8 *buffer, *dst; |
701 | u8 __iomem *src; | 745 | u8 __iomem *src; |
702 | int c, cnt = 0, err = 0; | 746 | int c, cnt = 0, err = 0; |
@@ -761,9 +805,7 @@ static ssize_t | |||
761 | fb_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) | 805 | fb_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) |
762 | { | 806 | { |
763 | unsigned long p = *ppos; | 807 | unsigned long p = *ppos; |
764 | struct inode *inode = file->f_path.dentry->d_inode; | 808 | struct fb_info *info = file_fb_info(file); |
765 | int fbidx = iminor(inode); | ||
766 | struct fb_info *info = registered_fb[fbidx]; | ||
767 | u8 *buffer, *src; | 809 | u8 *buffer, *src; |
768 | u8 __iomem *dst; | 810 | u8 __iomem *dst; |
769 | int c, cnt = 0, err = 0; | 811 | int c, cnt = 0, err = 0; |
@@ -1141,10 +1183,10 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd, | |||
1141 | 1183 | ||
1142 | static long fb_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | 1184 | static long fb_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
1143 | { | 1185 | { |
1144 | struct inode *inode = file->f_path.dentry->d_inode; | 1186 | struct fb_info *info = file_fb_info(file); |
1145 | int fbidx = iminor(inode); | ||
1146 | struct fb_info *info = registered_fb[fbidx]; | ||
1147 | 1187 | ||
1188 | if (!info) | ||
1189 | return -ENODEV; | ||
1148 | return do_fb_ioctl(info, cmd, arg); | 1190 | return do_fb_ioctl(info, cmd, arg); |
1149 | } | 1191 | } |
1150 | 1192 | ||
@@ -1265,12 +1307,13 @@ static int fb_get_fscreeninfo(struct fb_info *info, unsigned int cmd, | |||
1265 | static long fb_compat_ioctl(struct file *file, unsigned int cmd, | 1307 | static long fb_compat_ioctl(struct file *file, unsigned int cmd, |
1266 | unsigned long arg) | 1308 | unsigned long arg) |
1267 | { | 1309 | { |
1268 | struct inode *inode = file->f_path.dentry->d_inode; | 1310 | struct fb_info *info = file_fb_info(file); |
1269 | int fbidx = iminor(inode); | 1311 | struct fb_ops *fb; |
1270 | struct fb_info *info = registered_fb[fbidx]; | ||
1271 | struct fb_ops *fb = info->fbops; | ||
1272 | long ret = -ENOIOCTLCMD; | 1312 | long ret = -ENOIOCTLCMD; |
1273 | 1313 | ||
1314 | if (!info) | ||
1315 | return -ENODEV; | ||
1316 | fb = info->fbops; | ||
1274 | switch(cmd) { | 1317 | switch(cmd) { |
1275 | case FBIOGET_VSCREENINFO: | 1318 | case FBIOGET_VSCREENINFO: |
1276 | case FBIOPUT_VSCREENINFO: | 1319 | case FBIOPUT_VSCREENINFO: |
@@ -1303,16 +1346,18 @@ static long fb_compat_ioctl(struct file *file, unsigned int cmd, | |||
1303 | static int | 1346 | static int |
1304 | fb_mmap(struct file *file, struct vm_area_struct * vma) | 1347 | fb_mmap(struct file *file, struct vm_area_struct * vma) |
1305 | { | 1348 | { |
1306 | int fbidx = iminor(file->f_path.dentry->d_inode); | 1349 | struct fb_info *info = file_fb_info(file); |
1307 | struct fb_info *info = registered_fb[fbidx]; | 1350 | struct fb_ops *fb; |
1308 | struct fb_ops *fb = info->fbops; | ||
1309 | unsigned long off; | 1351 | unsigned long off; |
1310 | unsigned long start; | 1352 | unsigned long start; |
1311 | u32 len; | 1353 | u32 len; |
1312 | 1354 | ||
1355 | if (!info) | ||
1356 | return -ENODEV; | ||
1313 | if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) | 1357 | if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) |
1314 | return -EINVAL; | 1358 | return -EINVAL; |
1315 | off = vma->vm_pgoff << PAGE_SHIFT; | 1359 | off = vma->vm_pgoff << PAGE_SHIFT; |
1360 | fb = info->fbops; | ||
1316 | if (!fb) | 1361 | if (!fb) |
1317 | return -ENODEV; | 1362 | return -ENODEV; |
1318 | mutex_lock(&info->mm_lock); | 1363 | mutex_lock(&info->mm_lock); |
@@ -1361,14 +1406,16 @@ __releases(&info->lock) | |||
1361 | struct fb_info *info; | 1406 | struct fb_info *info; |
1362 | int res = 0; | 1407 | int res = 0; |
1363 | 1408 | ||
1364 | if (fbidx >= FB_MAX) | 1409 | info = get_fb_info(fbidx); |
1365 | return -ENODEV; | 1410 | if (!info) { |
1366 | info = registered_fb[fbidx]; | ||
1367 | if (!info) | ||
1368 | request_module("fb%d", fbidx); | 1411 | request_module("fb%d", fbidx); |
1369 | info = registered_fb[fbidx]; | 1412 | info = get_fb_info(fbidx); |
1370 | if (!info) | 1413 | if (!info) |
1371 | return -ENODEV; | 1414 | return -ENODEV; |
1415 | } | ||
1416 | if (IS_ERR(info)) | ||
1417 | return PTR_ERR(info); | ||
1418 | |||
1372 | mutex_lock(&info->lock); | 1419 | mutex_lock(&info->lock); |
1373 | if (!try_module_get(info->fbops->owner)) { | 1420 | if (!try_module_get(info->fbops->owner)) { |
1374 | res = -ENODEV; | 1421 | res = -ENODEV; |
@@ -1386,6 +1433,8 @@ __releases(&info->lock) | |||
1386 | #endif | 1433 | #endif |
1387 | out: | 1434 | out: |
1388 | mutex_unlock(&info->lock); | 1435 | mutex_unlock(&info->lock); |
1436 | if (res) | ||
1437 | put_fb_info(info); | ||
1389 | return res; | 1438 | return res; |
1390 | } | 1439 | } |
1391 | 1440 | ||
@@ -1401,6 +1450,7 @@ __releases(&info->lock) | |||
1401 | info->fbops->fb_release(info,1); | 1450 | info->fbops->fb_release(info,1); |
1402 | module_put(info->fbops->owner); | 1451 | module_put(info->fbops->owner); |
1403 | mutex_unlock(&info->lock); | 1452 | mutex_unlock(&info->lock); |
1453 | put_fb_info(info); | ||
1404 | return 0; | 1454 | return 0; |
1405 | } | 1455 | } |
1406 | 1456 | ||
@@ -1487,8 +1537,10 @@ static bool fb_do_apertures_overlap(struct apertures_struct *gena, | |||
1487 | return false; | 1537 | return false; |
1488 | } | 1538 | } |
1489 | 1539 | ||
1540 | static int do_unregister_framebuffer(struct fb_info *fb_info); | ||
1541 | |||
1490 | #define VGA_FB_PHYS 0xA0000 | 1542 | #define VGA_FB_PHYS 0xA0000 |
1491 | void remove_conflicting_framebuffers(struct apertures_struct *a, | 1543 | static void do_remove_conflicting_framebuffers(struct apertures_struct *a, |
1492 | const char *name, bool primary) | 1544 | const char *name, bool primary) |
1493 | { | 1545 | { |
1494 | int i; | 1546 | int i; |
@@ -1510,43 +1562,32 @@ void remove_conflicting_framebuffers(struct apertures_struct *a, | |||
1510 | printk(KERN_INFO "fb: conflicting fb hw usage " | 1562 | printk(KERN_INFO "fb: conflicting fb hw usage " |
1511 | "%s vs %s - removing generic driver\n", | 1563 | "%s vs %s - removing generic driver\n", |
1512 | name, registered_fb[i]->fix.id); | 1564 | name, registered_fb[i]->fix.id); |
1513 | unregister_framebuffer(registered_fb[i]); | 1565 | do_unregister_framebuffer(registered_fb[i]); |
1514 | } | 1566 | } |
1515 | } | 1567 | } |
1516 | } | 1568 | } |
1517 | EXPORT_SYMBOL(remove_conflicting_framebuffers); | ||
1518 | 1569 | ||
1519 | /** | 1570 | static int do_register_framebuffer(struct fb_info *fb_info) |
1520 | * register_framebuffer - registers a frame buffer device | ||
1521 | * @fb_info: frame buffer info structure | ||
1522 | * | ||
1523 | * Registers a frame buffer device @fb_info. | ||
1524 | * | ||
1525 | * Returns negative errno on error, or zero for success. | ||
1526 | * | ||
1527 | */ | ||
1528 | |||
1529 | int | ||
1530 | register_framebuffer(struct fb_info *fb_info) | ||
1531 | { | 1571 | { |
1532 | int i; | 1572 | int i; |
1533 | struct fb_event event; | 1573 | struct fb_event event; |
1534 | struct fb_videomode mode; | 1574 | struct fb_videomode mode; |
1535 | 1575 | ||
1536 | if (num_registered_fb == FB_MAX) | ||
1537 | return -ENXIO; | ||
1538 | |||
1539 | if (fb_check_foreignness(fb_info)) | 1576 | if (fb_check_foreignness(fb_info)) |
1540 | return -ENOSYS; | 1577 | return -ENOSYS; |
1541 | 1578 | ||
1542 | remove_conflicting_framebuffers(fb_info->apertures, fb_info->fix.id, | 1579 | do_remove_conflicting_framebuffers(fb_info->apertures, fb_info->fix.id, |
1543 | fb_is_primary_device(fb_info)); | 1580 | fb_is_primary_device(fb_info)); |
1544 | 1581 | ||
1582 | if (num_registered_fb == FB_MAX) | ||
1583 | return -ENXIO; | ||
1584 | |||
1545 | num_registered_fb++; | 1585 | num_registered_fb++; |
1546 | for (i = 0 ; i < FB_MAX; i++) | 1586 | for (i = 0 ; i < FB_MAX; i++) |
1547 | if (!registered_fb[i]) | 1587 | if (!registered_fb[i]) |
1548 | break; | 1588 | break; |
1549 | fb_info->node = i; | 1589 | fb_info->node = i; |
1590 | atomic_set(&fb_info->count, 1); | ||
1550 | mutex_init(&fb_info->lock); | 1591 | mutex_init(&fb_info->lock); |
1551 | mutex_init(&fb_info->mm_lock); | 1592 | mutex_init(&fb_info->mm_lock); |
1552 | 1593 | ||
@@ -1592,36 +1633,14 @@ register_framebuffer(struct fb_info *fb_info) | |||
1592 | return 0; | 1633 | return 0; |
1593 | } | 1634 | } |
1594 | 1635 | ||
1595 | 1636 | static int do_unregister_framebuffer(struct fb_info *fb_info) | |
1596 | /** | ||
1597 | * unregister_framebuffer - releases a frame buffer device | ||
1598 | * @fb_info: frame buffer info structure | ||
1599 | * | ||
1600 | * Unregisters a frame buffer device @fb_info. | ||
1601 | * | ||
1602 | * Returns negative errno on error, or zero for success. | ||
1603 | * | ||
1604 | * This function will also notify the framebuffer console | ||
1605 | * to release the driver. | ||
1606 | * | ||
1607 | * This is meant to be called within a driver's module_exit() | ||
1608 | * function. If this is called outside module_exit(), ensure | ||
1609 | * that the driver implements fb_open() and fb_release() to | ||
1610 | * check that no processes are using the device. | ||
1611 | */ | ||
1612 | |||
1613 | int | ||
1614 | unregister_framebuffer(struct fb_info *fb_info) | ||
1615 | { | 1637 | { |
1616 | struct fb_event event; | 1638 | struct fb_event event; |
1617 | int i, ret = 0; | 1639 | int i, ret = 0; |
1618 | 1640 | ||
1619 | i = fb_info->node; | 1641 | i = fb_info->node; |
1620 | if (!registered_fb[i]) { | 1642 | if (i < 0 || i >= FB_MAX || registered_fb[i] != fb_info) |
1621 | ret = -EINVAL; | 1643 | return -EINVAL; |
1622 | goto done; | ||
1623 | } | ||
1624 | |||
1625 | 1644 | ||
1626 | if (!lock_fb_info(fb_info)) | 1645 | if (!lock_fb_info(fb_info)) |
1627 | return -ENODEV; | 1646 | return -ENODEV; |
@@ -1629,16 +1648,14 @@ unregister_framebuffer(struct fb_info *fb_info) | |||
1629 | ret = fb_notifier_call_chain(FB_EVENT_FB_UNBIND, &event); | 1648 | ret = fb_notifier_call_chain(FB_EVENT_FB_UNBIND, &event); |
1630 | unlock_fb_info(fb_info); | 1649 | unlock_fb_info(fb_info); |
1631 | 1650 | ||
1632 | if (ret) { | 1651 | if (ret) |
1633 | ret = -EINVAL; | 1652 | return -EINVAL; |
1634 | goto done; | ||
1635 | } | ||
1636 | 1653 | ||
1637 | if (fb_info->pixmap.addr && | 1654 | if (fb_info->pixmap.addr && |
1638 | (fb_info->pixmap.flags & FB_PIXMAP_DEFAULT)) | 1655 | (fb_info->pixmap.flags & FB_PIXMAP_DEFAULT)) |
1639 | kfree(fb_info->pixmap.addr); | 1656 | kfree(fb_info->pixmap.addr); |
1640 | fb_destroy_modelist(&fb_info->modelist); | 1657 | fb_destroy_modelist(&fb_info->modelist); |
1641 | registered_fb[i]=NULL; | 1658 | registered_fb[i] = NULL; |
1642 | num_registered_fb--; | 1659 | num_registered_fb--; |
1643 | fb_cleanup_device(fb_info); | 1660 | fb_cleanup_device(fb_info); |
1644 | device_destroy(fb_class, MKDEV(FB_MAJOR, i)); | 1661 | device_destroy(fb_class, MKDEV(FB_MAJOR, i)); |
@@ -1646,9 +1663,65 @@ unregister_framebuffer(struct fb_info *fb_info) | |||
1646 | fb_notifier_call_chain(FB_EVENT_FB_UNREGISTERED, &event); | 1663 | fb_notifier_call_chain(FB_EVENT_FB_UNREGISTERED, &event); |
1647 | 1664 | ||
1648 | /* this may free fb info */ | 1665 | /* this may free fb info */ |
1649 | if (fb_info->fbops->fb_destroy) | 1666 | put_fb_info(fb_info); |
1650 | fb_info->fbops->fb_destroy(fb_info); | 1667 | return 0; |
1651 | done: | 1668 | } |
1669 | |||
1670 | void remove_conflicting_framebuffers(struct apertures_struct *a, | ||
1671 | const char *name, bool primary) | ||
1672 | { | ||
1673 | mutex_lock(®istration_lock); | ||
1674 | do_remove_conflicting_framebuffers(a, name, primary); | ||
1675 | mutex_unlock(®istration_lock); | ||
1676 | } | ||
1677 | EXPORT_SYMBOL(remove_conflicting_framebuffers); | ||
1678 | |||
1679 | /** | ||
1680 | * register_framebuffer - registers a frame buffer device | ||
1681 | * @fb_info: frame buffer info structure | ||
1682 | * | ||
1683 | * Registers a frame buffer device @fb_info. | ||
1684 | * | ||
1685 | * Returns negative errno on error, or zero for success. | ||
1686 | * | ||
1687 | */ | ||
1688 | int | ||
1689 | register_framebuffer(struct fb_info *fb_info) | ||
1690 | { | ||
1691 | int ret; | ||
1692 | |||
1693 | mutex_lock(®istration_lock); | ||
1694 | ret = do_register_framebuffer(fb_info); | ||
1695 | mutex_unlock(®istration_lock); | ||
1696 | |||
1697 | return ret; | ||
1698 | } | ||
1699 | |||
1700 | /** | ||
1701 | * unregister_framebuffer - releases a frame buffer device | ||
1702 | * @fb_info: frame buffer info structure | ||
1703 | * | ||
1704 | * Unregisters a frame buffer device @fb_info. | ||
1705 | * | ||
1706 | * Returns negative errno on error, or zero for success. | ||
1707 | * | ||
1708 | * This function will also notify the framebuffer console | ||
1709 | * to release the driver. | ||
1710 | * | ||
1711 | * This is meant to be called within a driver's module_exit() | ||
1712 | * function. If this is called outside module_exit(), ensure | ||
1713 | * that the driver implements fb_open() and fb_release() to | ||
1714 | * check that no processes are using the device. | ||
1715 | */ | ||
1716 | int | ||
1717 | unregister_framebuffer(struct fb_info *fb_info) | ||
1718 | { | ||
1719 | int ret; | ||
1720 | |||
1721 | mutex_lock(®istration_lock); | ||
1722 | ret = do_unregister_framebuffer(fb_info); | ||
1723 | mutex_unlock(®istration_lock); | ||
1724 | |||
1652 | return ret; | 1725 | return ret; |
1653 | } | 1726 | } |
1654 | 1727 | ||
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index 1b0f98bc51b5..022f9eb0b7bf 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig | |||
@@ -990,6 +990,12 @@ config BCM63XX_WDT | |||
990 | To compile this driver as a loadable module, choose M here. | 990 | To compile this driver as a loadable module, choose M here. |
991 | The module will be called bcm63xx_wdt. | 991 | The module will be called bcm63xx_wdt. |
992 | 992 | ||
993 | config LANTIQ_WDT | ||
994 | tristate "Lantiq SoC watchdog" | ||
995 | depends on LANTIQ | ||
996 | help | ||
997 | Hardware driver for the Lantiq SoC Watchdog Timer. | ||
998 | |||
993 | # PARISC Architecture | 999 | # PARISC Architecture |
994 | 1000 | ||
995 | # POWERPC Architecture | 1001 | # POWERPC Architecture |
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile index 3f8608b922a7..ed26f7094e47 100644 --- a/drivers/watchdog/Makefile +++ b/drivers/watchdog/Makefile | |||
@@ -123,6 +123,7 @@ obj-$(CONFIG_AR7_WDT) += ar7_wdt.o | |||
123 | obj-$(CONFIG_TXX9_WDT) += txx9wdt.o | 123 | obj-$(CONFIG_TXX9_WDT) += txx9wdt.o |
124 | obj-$(CONFIG_OCTEON_WDT) += octeon-wdt.o | 124 | obj-$(CONFIG_OCTEON_WDT) += octeon-wdt.o |
125 | octeon-wdt-y := octeon-wdt-main.o octeon-wdt-nmi.o | 125 | octeon-wdt-y := octeon-wdt-main.o octeon-wdt-nmi.o |
126 | obj-$(CONFIG_LANTIQ_WDT) += lantiq_wdt.o | ||
126 | 127 | ||
127 | # PARISC Architecture | 128 | # PARISC Architecture |
128 | 129 | ||
diff --git a/drivers/watchdog/lantiq_wdt.c b/drivers/watchdog/lantiq_wdt.c new file mode 100644 index 000000000000..7d82adac1cb2 --- /dev/null +++ b/drivers/watchdog/lantiq_wdt.c | |||
@@ -0,0 +1,261 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License version 2 as published | ||
4 | * by the Free Software Foundation. | ||
5 | * | ||
6 | * Copyright (C) 2010 John Crispin <blogic@openwrt.org> | ||
7 | * Based on EP93xx wdt driver | ||
8 | */ | ||
9 | |||
10 | #include <linux/module.h> | ||
11 | #include <linux/fs.h> | ||
12 | #include <linux/miscdevice.h> | ||
13 | #include <linux/watchdog.h> | ||
14 | #include <linux/platform_device.h> | ||
15 | #include <linux/uaccess.h> | ||
16 | #include <linux/clk.h> | ||
17 | #include <linux/io.h> | ||
18 | |||
19 | #include <lantiq.h> | ||
20 | |||
21 | /* Section 3.4 of the datasheet | ||
22 | * The password sequence protects the WDT control register from unintended | ||
23 | * write actions, which might cause malfunction of the WDT. | ||
24 | * | ||
25 | * essentially the following two magic passwords need to be written to allow | ||
26 | * IO access to the WDT core | ||
27 | */ | ||
28 | #define LTQ_WDT_PW1 0x00BE0000 | ||
29 | #define LTQ_WDT_PW2 0x00DC0000 | ||
30 | |||
31 | #define LTQ_WDT_CR 0x0 /* watchdog control register */ | ||
32 | #define LTQ_WDT_SR 0x8 /* watchdog status register */ | ||
33 | |||
34 | #define LTQ_WDT_SR_EN (0x1 << 31) /* enable bit */ | ||
35 | #define LTQ_WDT_SR_PWD (0x3 << 26) /* turn on power */ | ||
36 | #define LTQ_WDT_SR_CLKDIV (0x3 << 24) /* turn on clock and set */ | ||
37 | /* divider to 0x40000 */ | ||
38 | #define LTQ_WDT_DIVIDER 0x40000 | ||
39 | #define LTQ_MAX_TIMEOUT ((1 << 16) - 1) /* the reload field is 16 bit */ | ||
40 | |||
41 | static int nowayout = WATCHDOG_NOWAYOUT; | ||
42 | |||
43 | static void __iomem *ltq_wdt_membase; | ||
44 | static unsigned long ltq_io_region_clk_rate; | ||
45 | |||
46 | static unsigned long ltq_wdt_bootstatus; | ||
47 | static unsigned long ltq_wdt_in_use; | ||
48 | static int ltq_wdt_timeout = 30; | ||
49 | static int ltq_wdt_ok_to_close; | ||
50 | |||
51 | static void | ||
52 | ltq_wdt_enable(void) | ||
53 | { | ||
54 | ltq_wdt_timeout = ltq_wdt_timeout * | ||
55 | (ltq_io_region_clk_rate / LTQ_WDT_DIVIDER) + 0x1000; | ||
56 | if (ltq_wdt_timeout > LTQ_MAX_TIMEOUT) | ||
57 | ltq_wdt_timeout = LTQ_MAX_TIMEOUT; | ||
58 | |||
59 | /* write the first password magic */ | ||
60 | ltq_w32(LTQ_WDT_PW1, ltq_wdt_membase + LTQ_WDT_CR); | ||
61 | /* write the second magic plus the configuration and new timeout */ | ||
62 | ltq_w32(LTQ_WDT_SR_EN | LTQ_WDT_SR_PWD | LTQ_WDT_SR_CLKDIV | | ||
63 | LTQ_WDT_PW2 | ltq_wdt_timeout, ltq_wdt_membase + LTQ_WDT_CR); | ||
64 | } | ||
65 | |||
66 | static void | ||
67 | ltq_wdt_disable(void) | ||
68 | { | ||
69 | /* write the first password magic */ | ||
70 | ltq_w32(LTQ_WDT_PW1, ltq_wdt_membase + LTQ_WDT_CR); | ||
71 | /* write the second password magic with no config | ||
72 | * this turns the watchdog off | ||
73 | */ | ||
74 | ltq_w32(LTQ_WDT_PW2, ltq_wdt_membase + LTQ_WDT_CR); | ||
75 | } | ||
76 | |||
77 | static ssize_t | ||
78 | ltq_wdt_write(struct file *file, const char __user *data, | ||
79 | size_t len, loff_t *ppos) | ||
80 | { | ||
81 | if (len) { | ||
82 | if (!nowayout) { | ||
83 | size_t i; | ||
84 | |||
85 | ltq_wdt_ok_to_close = 0; | ||
86 | for (i = 0; i != len; i++) { | ||
87 | char c; | ||
88 | |||
89 | if (get_user(c, data + i)) | ||
90 | return -EFAULT; | ||
91 | if (c == 'V') | ||
92 | ltq_wdt_ok_to_close = 1; | ||
93 | else | ||
94 | ltq_wdt_ok_to_close = 0; | ||
95 | } | ||
96 | } | ||
97 | ltq_wdt_enable(); | ||
98 | } | ||
99 | |||
100 | return len; | ||
101 | } | ||
102 | |||
103 | static struct watchdog_info ident = { | ||
104 | .options = WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | | ||
105 | WDIOF_CARDRESET, | ||
106 | .identity = "ltq_wdt", | ||
107 | }; | ||
108 | |||
109 | static long | ||
110 | ltq_wdt_ioctl(struct file *file, | ||
111 | unsigned int cmd, unsigned long arg) | ||
112 | { | ||
113 | int ret = -ENOTTY; | ||
114 | |||
115 | switch (cmd) { | ||
116 | case WDIOC_GETSUPPORT: | ||
117 | ret = copy_to_user((struct watchdog_info __user *)arg, &ident, | ||
118 | sizeof(ident)) ? -EFAULT : 0; | ||
119 | break; | ||
120 | |||
121 | case WDIOC_GETBOOTSTATUS: | ||
122 | ret = put_user(ltq_wdt_bootstatus, (int __user *)arg); | ||
123 | break; | ||
124 | |||
125 | case WDIOC_GETSTATUS: | ||
126 | ret = put_user(0, (int __user *)arg); | ||
127 | break; | ||
128 | |||
129 | case WDIOC_SETTIMEOUT: | ||
130 | ret = get_user(ltq_wdt_timeout, (int __user *)arg); | ||
131 | if (!ret) | ||
132 | ltq_wdt_enable(); | ||
133 | /* intentional drop through */ | ||
134 | case WDIOC_GETTIMEOUT: | ||
135 | ret = put_user(ltq_wdt_timeout, (int __user *)arg); | ||
136 | break; | ||
137 | |||
138 | case WDIOC_KEEPALIVE: | ||
139 | ltq_wdt_enable(); | ||
140 | ret = 0; | ||
141 | break; | ||
142 | } | ||
143 | return ret; | ||
144 | } | ||
145 | |||
146 | static int | ||
147 | ltq_wdt_open(struct inode *inode, struct file *file) | ||
148 | { | ||
149 | if (test_and_set_bit(0, <q_wdt_in_use)) | ||
150 | return -EBUSY; | ||
151 | ltq_wdt_in_use = 1; | ||
152 | ltq_wdt_enable(); | ||
153 | |||
154 | return nonseekable_open(inode, file); | ||
155 | } | ||
156 | |||
157 | static int | ||
158 | ltq_wdt_release(struct inode *inode, struct file *file) | ||
159 | { | ||
160 | if (ltq_wdt_ok_to_close) | ||
161 | ltq_wdt_disable(); | ||
162 | else | ||
163 | pr_err("ltq_wdt: watchdog closed without warning\n"); | ||
164 | ltq_wdt_ok_to_close = 0; | ||
165 | clear_bit(0, <q_wdt_in_use); | ||
166 | |||
167 | return 0; | ||
168 | } | ||
169 | |||
170 | static const struct file_operations ltq_wdt_fops = { | ||
171 | .owner = THIS_MODULE, | ||
172 | .write = ltq_wdt_write, | ||
173 | .unlocked_ioctl = ltq_wdt_ioctl, | ||
174 | .open = ltq_wdt_open, | ||
175 | .release = ltq_wdt_release, | ||
176 | .llseek = no_llseek, | ||
177 | }; | ||
178 | |||
179 | static struct miscdevice ltq_wdt_miscdev = { | ||
180 | .minor = WATCHDOG_MINOR, | ||
181 | .name = "watchdog", | ||
182 | .fops = <q_wdt_fops, | ||
183 | }; | ||
184 | |||
185 | static int __init | ||
186 | ltq_wdt_probe(struct platform_device *pdev) | ||
187 | { | ||
188 | struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
189 | struct clk *clk; | ||
190 | |||
191 | if (!res) { | ||
192 | dev_err(&pdev->dev, "cannot obtain I/O memory region"); | ||
193 | return -ENOENT; | ||
194 | } | ||
195 | res = devm_request_mem_region(&pdev->dev, res->start, | ||
196 | resource_size(res), dev_name(&pdev->dev)); | ||
197 | if (!res) { | ||
198 | dev_err(&pdev->dev, "cannot request I/O memory region"); | ||
199 | return -EBUSY; | ||
200 | } | ||
201 | ltq_wdt_membase = devm_ioremap_nocache(&pdev->dev, res->start, | ||
202 | resource_size(res)); | ||
203 | if (!ltq_wdt_membase) { | ||
204 | dev_err(&pdev->dev, "cannot remap I/O memory region\n"); | ||
205 | return -ENOMEM; | ||
206 | } | ||
207 | |||
208 | /* we do not need to enable the clock as it is always running */ | ||
209 | clk = clk_get(&pdev->dev, "io"); | ||
210 | WARN_ON(!clk); | ||
211 | ltq_io_region_clk_rate = clk_get_rate(clk); | ||
212 | clk_put(clk); | ||
213 | |||
214 | if (ltq_reset_cause() == LTQ_RST_CAUSE_WDTRST) | ||
215 | ltq_wdt_bootstatus = WDIOF_CARDRESET; | ||
216 | |||
217 | return misc_register(<q_wdt_miscdev); | ||
218 | } | ||
219 | |||
220 | static int __devexit | ||
221 | ltq_wdt_remove(struct platform_device *pdev) | ||
222 | { | ||
223 | misc_deregister(<q_wdt_miscdev); | ||
224 | |||
225 | if (ltq_wdt_membase) | ||
226 | iounmap(ltq_wdt_membase); | ||
227 | |||
228 | return 0; | ||
229 | } | ||
230 | |||
231 | |||
232 | static struct platform_driver ltq_wdt_driver = { | ||
233 | .remove = __devexit_p(ltq_wdt_remove), | ||
234 | .driver = { | ||
235 | .name = "ltq_wdt", | ||
236 | .owner = THIS_MODULE, | ||
237 | }, | ||
238 | }; | ||
239 | |||
240 | static int __init | ||
241 | init_ltq_wdt(void) | ||
242 | { | ||
243 | return platform_driver_probe(<q_wdt_driver, ltq_wdt_probe); | ||
244 | } | ||
245 | |||
246 | static void __exit | ||
247 | exit_ltq_wdt(void) | ||
248 | { | ||
249 | return platform_driver_unregister(<q_wdt_driver); | ||
250 | } | ||
251 | |||
252 | module_init(init_ltq_wdt); | ||
253 | module_exit(exit_ltq_wdt); | ||
254 | |||
255 | module_param(nowayout, int, 0); | ||
256 | MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started"); | ||
257 | |||
258 | MODULE_AUTHOR("John Crispin <blogic@openwrt.org>"); | ||
259 | MODULE_DESCRIPTION("Lantiq SoC Watchdog"); | ||
260 | MODULE_LICENSE("GPL"); | ||
261 | MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); | ||
diff --git a/drivers/watchdog/mpc8xxx_wdt.c b/drivers/watchdog/mpc8xxx_wdt.c index 528bceb220fd..eed5436ffb51 100644 --- a/drivers/watchdog/mpc8xxx_wdt.c +++ b/drivers/watchdog/mpc8xxx_wdt.c | |||
@@ -185,17 +185,20 @@ static struct miscdevice mpc8xxx_wdt_miscdev = { | |||
185 | .fops = &mpc8xxx_wdt_fops, | 185 | .fops = &mpc8xxx_wdt_fops, |
186 | }; | 186 | }; |
187 | 187 | ||
188 | static const struct of_device_id mpc8xxx_wdt_match[]; | ||
188 | static int __devinit mpc8xxx_wdt_probe(struct platform_device *ofdev) | 189 | static int __devinit mpc8xxx_wdt_probe(struct platform_device *ofdev) |
189 | { | 190 | { |
190 | int ret; | 191 | int ret; |
192 | const struct of_device_id *match; | ||
191 | struct device_node *np = ofdev->dev.of_node; | 193 | struct device_node *np = ofdev->dev.of_node; |
192 | struct mpc8xxx_wdt_type *wdt_type; | 194 | struct mpc8xxx_wdt_type *wdt_type; |
193 | u32 freq = fsl_get_sys_freq(); | 195 | u32 freq = fsl_get_sys_freq(); |
194 | bool enabled; | 196 | bool enabled; |
195 | 197 | ||
196 | if (!ofdev->dev.of_match) | 198 | match = of_match_device(mpc8xxx_wdt_match, &ofdev->dev); |
199 | if (!match) | ||
197 | return -EINVAL; | 200 | return -EINVAL; |
198 | wdt_type = ofdev->dev.of_match->data; | 201 | wdt_type = match->data; |
199 | 202 | ||
200 | if (!freq || freq == -1) | 203 | if (!freq || freq == -1) |
201 | return -EINVAL; | 204 | return -EINVAL; |
diff --git a/drivers/watchdog/mtx-1_wdt.c b/drivers/watchdog/mtx-1_wdt.c index 5ec5ac1f7878..1479dc4d6129 100644 --- a/drivers/watchdog/mtx-1_wdt.c +++ b/drivers/watchdog/mtx-1_wdt.c | |||
@@ -66,6 +66,7 @@ static struct { | |||
66 | int default_ticks; | 66 | int default_ticks; |
67 | unsigned long inuse; | 67 | unsigned long inuse; |
68 | unsigned gpio; | 68 | unsigned gpio; |
69 | int gstate; | ||
69 | } mtx1_wdt_device; | 70 | } mtx1_wdt_device; |
70 | 71 | ||
71 | static void mtx1_wdt_trigger(unsigned long unused) | 72 | static void mtx1_wdt_trigger(unsigned long unused) |
@@ -75,13 +76,13 @@ static void mtx1_wdt_trigger(unsigned long unused) | |||
75 | spin_lock(&mtx1_wdt_device.lock); | 76 | spin_lock(&mtx1_wdt_device.lock); |
76 | if (mtx1_wdt_device.running) | 77 | if (mtx1_wdt_device.running) |
77 | ticks--; | 78 | ticks--; |
78 | /* | 79 | |
79 | * toggle GPIO2_15 | 80 | /* toggle wdt gpio */ |
80 | */ | 81 | mtx1_wdt_device.gstate = ~mtx1_wdt_device.gstate; |
81 | tmp = au_readl(GPIO2_DIR); | 82 | if (mtx1_wdt_device.gstate) |
82 | tmp = (tmp & ~(1 << mtx1_wdt_device.gpio)) | | 83 | gpio_direction_output(mtx1_wdt_device.gpio, 1); |
83 | ((~tmp) & (1 << mtx1_wdt_device.gpio)); | 84 | else |
84 | au_writel(tmp, GPIO2_DIR); | 85 | gpio_direction_input(mtx1_wdt_device.gpio); |
85 | 86 | ||
86 | if (mtx1_wdt_device.queue && ticks) | 87 | if (mtx1_wdt_device.queue && ticks) |
87 | mod_timer(&mtx1_wdt_device.timer, jiffies + MTX1_WDT_INTERVAL); | 88 | mod_timer(&mtx1_wdt_device.timer, jiffies + MTX1_WDT_INTERVAL); |
@@ -103,7 +104,8 @@ static void mtx1_wdt_start(void) | |||
103 | spin_lock_irqsave(&mtx1_wdt_device.lock, flags); | 104 | spin_lock_irqsave(&mtx1_wdt_device.lock, flags); |
104 | if (!mtx1_wdt_device.queue) { | 105 | if (!mtx1_wdt_device.queue) { |
105 | mtx1_wdt_device.queue = 1; | 106 | mtx1_wdt_device.queue = 1; |
106 | gpio_set_value(mtx1_wdt_device.gpio, 1); | 107 | mtx1_wdt_device.gstate = 1; |
108 | gpio_direction_output(mtx1_wdt_device.gpio, 1); | ||
107 | mod_timer(&mtx1_wdt_device.timer, jiffies + MTX1_WDT_INTERVAL); | 109 | mod_timer(&mtx1_wdt_device.timer, jiffies + MTX1_WDT_INTERVAL); |
108 | } | 110 | } |
109 | mtx1_wdt_device.running++; | 111 | mtx1_wdt_device.running++; |
@@ -117,7 +119,8 @@ static int mtx1_wdt_stop(void) | |||
117 | spin_lock_irqsave(&mtx1_wdt_device.lock, flags); | 119 | spin_lock_irqsave(&mtx1_wdt_device.lock, flags); |
118 | if (mtx1_wdt_device.queue) { | 120 | if (mtx1_wdt_device.queue) { |
119 | mtx1_wdt_device.queue = 0; | 121 | mtx1_wdt_device.queue = 0; |
120 | gpio_set_value(mtx1_wdt_device.gpio, 0); | 122 | mtx1_wdt_device.gstate = 0; |
123 | gpio_direction_output(mtx1_wdt_device.gpio, 0); | ||
121 | } | 124 | } |
122 | ticks = mtx1_wdt_device.default_ticks; | 125 | ticks = mtx1_wdt_device.default_ticks; |
123 | spin_unlock_irqrestore(&mtx1_wdt_device.lock, flags); | 126 | spin_unlock_irqrestore(&mtx1_wdt_device.lock, flags); |
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile index f420f1ff7f13..4781f806701d 100644 --- a/drivers/xen/Makefile +++ b/drivers/xen/Makefile | |||
@@ -4,21 +4,21 @@ obj-y += xenbus/ | |||
4 | nostackp := $(call cc-option, -fno-stack-protector) | 4 | nostackp := $(call cc-option, -fno-stack-protector) |
5 | CFLAGS_features.o := $(nostackp) | 5 | CFLAGS_features.o := $(nostackp) |
6 | 6 | ||
7 | obj-$(CONFIG_BLOCK) += biomerge.o | 7 | obj-$(CONFIG_BLOCK) += biomerge.o |
8 | obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o | 8 | obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o |
9 | obj-$(CONFIG_XEN_XENCOMM) += xencomm.o | 9 | obj-$(CONFIG_XEN_XENCOMM) += xencomm.o |
10 | obj-$(CONFIG_XEN_BALLOON) += xen-balloon.o | 10 | obj-$(CONFIG_XEN_BALLOON) += xen-balloon.o |
11 | obj-$(CONFIG_XEN_DEV_EVTCHN) += xen-evtchn.o | 11 | obj-$(CONFIG_XEN_DEV_EVTCHN) += xen-evtchn.o |
12 | obj-$(CONFIG_XEN_GNTDEV) += xen-gntdev.o | 12 | obj-$(CONFIG_XEN_GNTDEV) += xen-gntdev.o |
13 | obj-$(CONFIG_XEN_GRANT_DEV_ALLOC) += xen-gntalloc.o | 13 | obj-$(CONFIG_XEN_GRANT_DEV_ALLOC) += xen-gntalloc.o |
14 | obj-$(CONFIG_XENFS) += xenfs/ | 14 | obj-$(CONFIG_XENFS) += xenfs/ |
15 | obj-$(CONFIG_XEN_SYS_HYPERVISOR) += sys-hypervisor.o | 15 | obj-$(CONFIG_XEN_SYS_HYPERVISOR) += sys-hypervisor.o |
16 | obj-$(CONFIG_XEN_PLATFORM_PCI) += xen-platform-pci.o | 16 | obj-$(CONFIG_XEN_PLATFORM_PCI) += xen-platform-pci.o |
17 | obj-$(CONFIG_SWIOTLB_XEN) += swiotlb-xen.o | 17 | obj-$(CONFIG_SWIOTLB_XEN) += swiotlb-xen.o |
18 | obj-$(CONFIG_XEN_DOM0) += pci.o | 18 | obj-$(CONFIG_XEN_DOM0) += pci.o |
19 | 19 | ||
20 | xen-evtchn-y := evtchn.o | 20 | xen-evtchn-y := evtchn.o |
21 | xen-gntdev-y := gntdev.o | 21 | xen-gntdev-y := gntdev.o |
22 | xen-gntalloc-y := gntalloc.o | 22 | xen-gntalloc-y := gntalloc.o |
23 | 23 | ||
24 | xen-platform-pci-y := platform-pci.o | 24 | xen-platform-pci-y := platform-pci.o |
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index 043af8ad6b60..f54290baa3db 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c | |||
@@ -114,7 +114,6 @@ static void __balloon_append(struct page *page) | |||
114 | if (PageHighMem(page)) { | 114 | if (PageHighMem(page)) { |
115 | list_add_tail(&page->lru, &ballooned_pages); | 115 | list_add_tail(&page->lru, &ballooned_pages); |
116 | balloon_stats.balloon_high++; | 116 | balloon_stats.balloon_high++; |
117 | dec_totalhigh_pages(); | ||
118 | } else { | 117 | } else { |
119 | list_add(&page->lru, &ballooned_pages); | 118 | list_add(&page->lru, &ballooned_pages); |
120 | balloon_stats.balloon_low++; | 119 | balloon_stats.balloon_low++; |
@@ -124,6 +123,8 @@ static void __balloon_append(struct page *page) | |||
124 | static void balloon_append(struct page *page) | 123 | static void balloon_append(struct page *page) |
125 | { | 124 | { |
126 | __balloon_append(page); | 125 | __balloon_append(page); |
126 | if (PageHighMem(page)) | ||
127 | dec_totalhigh_pages(); | ||
127 | totalram_pages--; | 128 | totalram_pages--; |
128 | } | 129 | } |
129 | 130 | ||
@@ -193,7 +194,7 @@ static enum bp_state update_schedule(enum bp_state state) | |||
193 | return BP_EAGAIN; | 194 | return BP_EAGAIN; |
194 | } | 195 | } |
195 | 196 | ||
196 | static unsigned long current_target(void) | 197 | static long current_credit(void) |
197 | { | 198 | { |
198 | unsigned long target = balloon_stats.target_pages; | 199 | unsigned long target = balloon_stats.target_pages; |
199 | 200 | ||
@@ -202,7 +203,7 @@ static unsigned long current_target(void) | |||
202 | balloon_stats.balloon_low + | 203 | balloon_stats.balloon_low + |
203 | balloon_stats.balloon_high); | 204 | balloon_stats.balloon_high); |
204 | 205 | ||
205 | return target; | 206 | return target - balloon_stats.current_pages; |
206 | } | 207 | } |
207 | 208 | ||
208 | static enum bp_state increase_reservation(unsigned long nr_pages) | 209 | static enum bp_state increase_reservation(unsigned long nr_pages) |
@@ -246,7 +247,7 @@ static enum bp_state increase_reservation(unsigned long nr_pages) | |||
246 | set_phys_to_machine(pfn, frame_list[i]); | 247 | set_phys_to_machine(pfn, frame_list[i]); |
247 | 248 | ||
248 | /* Link back into the page tables if not highmem. */ | 249 | /* Link back into the page tables if not highmem. */ |
249 | if (!xen_hvm_domain() && pfn < max_low_pfn) { | 250 | if (xen_pv_domain() && !PageHighMem(page)) { |
250 | int ret; | 251 | int ret; |
251 | ret = HYPERVISOR_update_va_mapping( | 252 | ret = HYPERVISOR_update_va_mapping( |
252 | (unsigned long)__va(pfn << PAGE_SHIFT), | 253 | (unsigned long)__va(pfn << PAGE_SHIFT), |
@@ -293,7 +294,7 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) | |||
293 | 294 | ||
294 | scrub_page(page); | 295 | scrub_page(page); |
295 | 296 | ||
296 | if (!xen_hvm_domain() && !PageHighMem(page)) { | 297 | if (xen_pv_domain() && !PageHighMem(page)) { |
297 | ret = HYPERVISOR_update_va_mapping( | 298 | ret = HYPERVISOR_update_va_mapping( |
298 | (unsigned long)__va(pfn << PAGE_SHIFT), | 299 | (unsigned long)__va(pfn << PAGE_SHIFT), |
299 | __pte_ma(0), 0); | 300 | __pte_ma(0), 0); |
@@ -337,7 +338,7 @@ static void balloon_process(struct work_struct *work) | |||
337 | mutex_lock(&balloon_mutex); | 338 | mutex_lock(&balloon_mutex); |
338 | 339 | ||
339 | do { | 340 | do { |
340 | credit = current_target() - balloon_stats.current_pages; | 341 | credit = current_credit(); |
341 | 342 | ||
342 | if (credit > 0) | 343 | if (credit > 0) |
343 | state = increase_reservation(credit); | 344 | state = increase_reservation(credit); |
@@ -420,7 +421,7 @@ void free_xenballooned_pages(int nr_pages, struct page** pages) | |||
420 | } | 421 | } |
421 | 422 | ||
422 | /* The balloon may be too large now. Shrink it if needed. */ | 423 | /* The balloon may be too large now. Shrink it if needed. */ |
423 | if (current_target() != balloon_stats.current_pages) | 424 | if (current_credit()) |
424 | schedule_delayed_work(&balloon_worker, 0); | 425 | schedule_delayed_work(&balloon_worker, 0); |
425 | 426 | ||
426 | mutex_unlock(&balloon_mutex); | 427 | mutex_unlock(&balloon_mutex); |
@@ -429,7 +430,7 @@ EXPORT_SYMBOL(free_xenballooned_pages); | |||
429 | 430 | ||
430 | static int __init balloon_init(void) | 431 | static int __init balloon_init(void) |
431 | { | 432 | { |
432 | unsigned long pfn, nr_pages, extra_pfn_end; | 433 | unsigned long pfn, extra_pfn_end; |
433 | struct page *page; | 434 | struct page *page; |
434 | 435 | ||
435 | if (!xen_domain()) | 436 | if (!xen_domain()) |
@@ -437,11 +438,7 @@ static int __init balloon_init(void) | |||
437 | 438 | ||
438 | pr_info("xen/balloon: Initialising balloon driver.\n"); | 439 | pr_info("xen/balloon: Initialising balloon driver.\n"); |
439 | 440 | ||
440 | if (xen_pv_domain()) | 441 | balloon_stats.current_pages = xen_pv_domain() ? min(xen_start_info->nr_pages, max_pfn) : max_pfn; |
441 | nr_pages = xen_start_info->nr_pages; | ||
442 | else | ||
443 | nr_pages = max_pfn; | ||
444 | balloon_stats.current_pages = min(nr_pages, max_pfn); | ||
445 | balloon_stats.target_pages = balloon_stats.current_pages; | 442 | balloon_stats.target_pages = balloon_stats.current_pages; |
446 | balloon_stats.balloon_low = 0; | 443 | balloon_stats.balloon_low = 0; |
447 | balloon_stats.balloon_high = 0; | 444 | balloon_stats.balloon_high = 0; |
@@ -466,7 +463,7 @@ static int __init balloon_init(void) | |||
466 | pfn < extra_pfn_end; | 463 | pfn < extra_pfn_end; |
467 | pfn++) { | 464 | pfn++) { |
468 | page = pfn_to_page(pfn); | 465 | page = pfn_to_page(pfn); |
469 | /* totalram_pages doesn't include the boot-time | 466 | /* totalram_pages and totalhigh_pages do not include the boot-time |
470 | balloon extension, so don't subtract from it. */ | 467 | balloon extension, so don't subtract from it. */ |
471 | __balloon_append(page); | 468 | __balloon_append(page); |
472 | } | 469 | } |
diff --git a/drivers/xen/events.c b/drivers/xen/events.c index 33167b43ac7e..3ff822b48145 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c | |||
@@ -101,6 +101,7 @@ struct irq_info | |||
101 | unsigned short gsi; | 101 | unsigned short gsi; |
102 | unsigned char vector; | 102 | unsigned char vector; |
103 | unsigned char flags; | 103 | unsigned char flags; |
104 | uint16_t domid; | ||
104 | } pirq; | 105 | } pirq; |
105 | } u; | 106 | } u; |
106 | }; | 107 | }; |
@@ -118,6 +119,8 @@ static DEFINE_PER_CPU(unsigned long [NR_EVENT_CHANNELS/BITS_PER_LONG], | |||
118 | static struct irq_chip xen_dynamic_chip; | 119 | static struct irq_chip xen_dynamic_chip; |
119 | static struct irq_chip xen_percpu_chip; | 120 | static struct irq_chip xen_percpu_chip; |
120 | static struct irq_chip xen_pirq_chip; | 121 | static struct irq_chip xen_pirq_chip; |
122 | static void enable_dynirq(struct irq_data *data); | ||
123 | static void disable_dynirq(struct irq_data *data); | ||
121 | 124 | ||
122 | /* Get info for IRQ */ | 125 | /* Get info for IRQ */ |
123 | static struct irq_info *info_for_irq(unsigned irq) | 126 | static struct irq_info *info_for_irq(unsigned irq) |
@@ -184,6 +187,7 @@ static void xen_irq_info_pirq_init(unsigned irq, | |||
184 | unsigned short pirq, | 187 | unsigned short pirq, |
185 | unsigned short gsi, | 188 | unsigned short gsi, |
186 | unsigned short vector, | 189 | unsigned short vector, |
190 | uint16_t domid, | ||
187 | unsigned char flags) | 191 | unsigned char flags) |
188 | { | 192 | { |
189 | struct irq_info *info = info_for_irq(irq); | 193 | struct irq_info *info = info_for_irq(irq); |
@@ -193,6 +197,7 @@ static void xen_irq_info_pirq_init(unsigned irq, | |||
193 | info->u.pirq.pirq = pirq; | 197 | info->u.pirq.pirq = pirq; |
194 | info->u.pirq.gsi = gsi; | 198 | info->u.pirq.gsi = gsi; |
195 | info->u.pirq.vector = vector; | 199 | info->u.pirq.vector = vector; |
200 | info->u.pirq.domid = domid; | ||
196 | info->u.pirq.flags = flags; | 201 | info->u.pirq.flags = flags; |
197 | } | 202 | } |
198 | 203 | ||
@@ -473,16 +478,6 @@ static void xen_free_irq(unsigned irq) | |||
473 | irq_free_desc(irq); | 478 | irq_free_desc(irq); |
474 | } | 479 | } |
475 | 480 | ||
476 | static void pirq_unmask_notify(int irq) | ||
477 | { | ||
478 | struct physdev_eoi eoi = { .irq = pirq_from_irq(irq) }; | ||
479 | |||
480 | if (unlikely(pirq_needs_eoi(irq))) { | ||
481 | int rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi); | ||
482 | WARN_ON(rc); | ||
483 | } | ||
484 | } | ||
485 | |||
486 | static void pirq_query_unmask(int irq) | 481 | static void pirq_query_unmask(int irq) |
487 | { | 482 | { |
488 | struct physdev_irq_status_query irq_status; | 483 | struct physdev_irq_status_query irq_status; |
@@ -506,6 +501,29 @@ static bool probing_irq(int irq) | |||
506 | return desc && desc->action == NULL; | 501 | return desc && desc->action == NULL; |
507 | } | 502 | } |
508 | 503 | ||
504 | static void eoi_pirq(struct irq_data *data) | ||
505 | { | ||
506 | int evtchn = evtchn_from_irq(data->irq); | ||
507 | struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) }; | ||
508 | int rc = 0; | ||
509 | |||
510 | irq_move_irq(data); | ||
511 | |||
512 | if (VALID_EVTCHN(evtchn)) | ||
513 | clear_evtchn(evtchn); | ||
514 | |||
515 | if (pirq_needs_eoi(data->irq)) { | ||
516 | rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi); | ||
517 | WARN_ON(rc); | ||
518 | } | ||
519 | } | ||
520 | |||
521 | static void mask_ack_pirq(struct irq_data *data) | ||
522 | { | ||
523 | disable_dynirq(data); | ||
524 | eoi_pirq(data); | ||
525 | } | ||
526 | |||
509 | static unsigned int __startup_pirq(unsigned int irq) | 527 | static unsigned int __startup_pirq(unsigned int irq) |
510 | { | 528 | { |
511 | struct evtchn_bind_pirq bind_pirq; | 529 | struct evtchn_bind_pirq bind_pirq; |
@@ -539,7 +557,7 @@ static unsigned int __startup_pirq(unsigned int irq) | |||
539 | 557 | ||
540 | out: | 558 | out: |
541 | unmask_evtchn(evtchn); | 559 | unmask_evtchn(evtchn); |
542 | pirq_unmask_notify(irq); | 560 | eoi_pirq(irq_get_irq_data(irq)); |
543 | 561 | ||
544 | return 0; | 562 | return 0; |
545 | } | 563 | } |
@@ -579,18 +597,7 @@ static void enable_pirq(struct irq_data *data) | |||
579 | 597 | ||
580 | static void disable_pirq(struct irq_data *data) | 598 | static void disable_pirq(struct irq_data *data) |
581 | { | 599 | { |
582 | } | 600 | disable_dynirq(data); |
583 | |||
584 | static void ack_pirq(struct irq_data *data) | ||
585 | { | ||
586 | int evtchn = evtchn_from_irq(data->irq); | ||
587 | |||
588 | irq_move_irq(data); | ||
589 | |||
590 | if (VALID_EVTCHN(evtchn)) { | ||
591 | mask_evtchn(evtchn); | ||
592 | clear_evtchn(evtchn); | ||
593 | } | ||
594 | } | 601 | } |
595 | 602 | ||
596 | static int find_irq_by_gsi(unsigned gsi) | 603 | static int find_irq_by_gsi(unsigned gsi) |
@@ -639,9 +646,6 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi, | |||
639 | if (irq < 0) | 646 | if (irq < 0) |
640 | goto out; | 647 | goto out; |
641 | 648 | ||
642 | irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_level_irq, | ||
643 | name); | ||
644 | |||
645 | irq_op.irq = irq; | 649 | irq_op.irq = irq; |
646 | irq_op.vector = 0; | 650 | irq_op.vector = 0; |
647 | 651 | ||
@@ -655,9 +659,35 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi, | |||
655 | goto out; | 659 | goto out; |
656 | } | 660 | } |
657 | 661 | ||
658 | xen_irq_info_pirq_init(irq, 0, pirq, gsi, irq_op.vector, | 662 | xen_irq_info_pirq_init(irq, 0, pirq, gsi, irq_op.vector, DOMID_SELF, |
659 | shareable ? PIRQ_SHAREABLE : 0); | 663 | shareable ? PIRQ_SHAREABLE : 0); |
660 | 664 | ||
665 | pirq_query_unmask(irq); | ||
666 | /* We try to use the handler with the appropriate semantic for the | ||
667 | * type of interrupt: if the interrupt doesn't need an eoi | ||
668 | * (pirq_needs_eoi returns false), we treat it like an edge | ||
669 | * triggered interrupt so we use handle_edge_irq. | ||
670 | * As a matter of fact this only happens when the corresponding | ||
671 | * physical interrupt is edge triggered or an msi. | ||
672 | * | ||
673 | * On the other hand if the interrupt needs an eoi (pirq_needs_eoi | ||
674 | * returns true) we treat it like a level triggered interrupt so we | ||
675 | * use handle_fasteoi_irq like the native code does for this kind of | ||
676 | * interrupts. | ||
677 | * Depending on the Xen version, pirq_needs_eoi might return true | ||
678 | * not only for level triggered interrupts but for edge triggered | ||
679 | * interrupts too. In any case Xen always honors the eoi mechanism, | ||
680 | * not injecting any more pirqs of the same kind if the first one | ||
681 | * hasn't received an eoi yet. Therefore using the fasteoi handler | ||
682 | * is the right choice either way. | ||
683 | */ | ||
684 | if (pirq_needs_eoi(irq)) | ||
685 | irq_set_chip_and_handler_name(irq, &xen_pirq_chip, | ||
686 | handle_fasteoi_irq, name); | ||
687 | else | ||
688 | irq_set_chip_and_handler_name(irq, &xen_pirq_chip, | ||
689 | handle_edge_irq, name); | ||
690 | |||
661 | out: | 691 | out: |
662 | spin_unlock(&irq_mapping_update_lock); | 692 | spin_unlock(&irq_mapping_update_lock); |
663 | 693 | ||
@@ -680,7 +710,8 @@ int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc) | |||
680 | } | 710 | } |
681 | 711 | ||
682 | int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc, | 712 | int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc, |
683 | int pirq, int vector, const char *name) | 713 | int pirq, int vector, const char *name, |
714 | domid_t domid) | ||
684 | { | 715 | { |
685 | int irq, ret; | 716 | int irq, ret; |
686 | 717 | ||
@@ -690,10 +721,10 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc, | |||
690 | if (irq == -1) | 721 | if (irq == -1) |
691 | goto out; | 722 | goto out; |
692 | 723 | ||
693 | irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_level_irq, | 724 | irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_edge_irq, |
694 | name); | 725 | name); |
695 | 726 | ||
696 | xen_irq_info_pirq_init(irq, 0, pirq, 0, vector, 0); | 727 | xen_irq_info_pirq_init(irq, 0, pirq, 0, vector, domid, 0); |
697 | ret = irq_set_msi_desc(irq, msidesc); | 728 | ret = irq_set_msi_desc(irq, msidesc); |
698 | if (ret < 0) | 729 | if (ret < 0) |
699 | goto error_irq; | 730 | goto error_irq; |
@@ -722,9 +753,16 @@ int xen_destroy_irq(int irq) | |||
722 | 753 | ||
723 | if (xen_initial_domain()) { | 754 | if (xen_initial_domain()) { |
724 | unmap_irq.pirq = info->u.pirq.pirq; | 755 | unmap_irq.pirq = info->u.pirq.pirq; |
725 | unmap_irq.domid = DOMID_SELF; | 756 | unmap_irq.domid = info->u.pirq.domid; |
726 | rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap_irq); | 757 | rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap_irq); |
727 | if (rc) { | 758 | /* If another domain quits without making the pci_disable_msix |
759 | * call, the Xen hypervisor takes care of freeing the PIRQs | ||
760 | * (free_domain_pirqs). | ||
761 | */ | ||
762 | if ((rc == -ESRCH && info->u.pirq.domid != DOMID_SELF)) | ||
763 | printk(KERN_INFO "domain %d does not have %d anymore\n", | ||
764 | info->u.pirq.domid, info->u.pirq.pirq); | ||
765 | else if (rc) { | ||
728 | printk(KERN_WARNING "unmap irq failed %d\n", rc); | 766 | printk(KERN_WARNING "unmap irq failed %d\n", rc); |
729 | goto out; | 767 | goto out; |
730 | } | 768 | } |
@@ -759,6 +797,12 @@ out: | |||
759 | return irq; | 797 | return irq; |
760 | } | 798 | } |
761 | 799 | ||
800 | |||
801 | int xen_pirq_from_irq(unsigned irq) | ||
802 | { | ||
803 | return pirq_from_irq(irq); | ||
804 | } | ||
805 | EXPORT_SYMBOL_GPL(xen_pirq_from_irq); | ||
762 | int bind_evtchn_to_irq(unsigned int evtchn) | 806 | int bind_evtchn_to_irq(unsigned int evtchn) |
763 | { | 807 | { |
764 | int irq; | 808 | int irq; |
@@ -773,7 +817,7 @@ int bind_evtchn_to_irq(unsigned int evtchn) | |||
773 | goto out; | 817 | goto out; |
774 | 818 | ||
775 | irq_set_chip_and_handler_name(irq, &xen_dynamic_chip, | 819 | irq_set_chip_and_handler_name(irq, &xen_dynamic_chip, |
776 | handle_fasteoi_irq, "event"); | 820 | handle_edge_irq, "event"); |
777 | 821 | ||
778 | xen_irq_info_evtchn_init(irq, evtchn); | 822 | xen_irq_info_evtchn_init(irq, evtchn); |
779 | } | 823 | } |
@@ -1179,9 +1223,6 @@ static void __xen_evtchn_do_upcall(void) | |||
1179 | port = (word_idx * BITS_PER_LONG) + bit_idx; | 1223 | port = (word_idx * BITS_PER_LONG) + bit_idx; |
1180 | irq = evtchn_to_irq[port]; | 1224 | irq = evtchn_to_irq[port]; |
1181 | 1225 | ||
1182 | mask_evtchn(port); | ||
1183 | clear_evtchn(port); | ||
1184 | |||
1185 | if (irq != -1) { | 1226 | if (irq != -1) { |
1186 | desc = irq_to_desc(irq); | 1227 | desc = irq_to_desc(irq); |
1187 | if (desc) | 1228 | if (desc) |
@@ -1337,10 +1378,16 @@ static void ack_dynirq(struct irq_data *data) | |||
1337 | { | 1378 | { |
1338 | int evtchn = evtchn_from_irq(data->irq); | 1379 | int evtchn = evtchn_from_irq(data->irq); |
1339 | 1380 | ||
1340 | irq_move_masked_irq(data); | 1381 | irq_move_irq(data); |
1341 | 1382 | ||
1342 | if (VALID_EVTCHN(evtchn)) | 1383 | if (VALID_EVTCHN(evtchn)) |
1343 | unmask_evtchn(evtchn); | 1384 | clear_evtchn(evtchn); |
1385 | } | ||
1386 | |||
1387 | static void mask_ack_dynirq(struct irq_data *data) | ||
1388 | { | ||
1389 | disable_dynirq(data); | ||
1390 | ack_dynirq(data); | ||
1344 | } | 1391 | } |
1345 | 1392 | ||
1346 | static int retrigger_dynirq(struct irq_data *data) | 1393 | static int retrigger_dynirq(struct irq_data *data) |
@@ -1502,6 +1549,18 @@ void xen_poll_irq(int irq) | |||
1502 | xen_poll_irq_timeout(irq, 0 /* no timeout */); | 1549 | xen_poll_irq_timeout(irq, 0 /* no timeout */); |
1503 | } | 1550 | } |
1504 | 1551 | ||
1552 | /* Check whether the IRQ line is shared with other guests. */ | ||
1553 | int xen_test_irq_shared(int irq) | ||
1554 | { | ||
1555 | struct irq_info *info = info_for_irq(irq); | ||
1556 | struct physdev_irq_status_query irq_status = { .irq = info->u.pirq.pirq }; | ||
1557 | |||
1558 | if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status)) | ||
1559 | return 0; | ||
1560 | return !(irq_status.flags & XENIRQSTAT_shared); | ||
1561 | } | ||
1562 | EXPORT_SYMBOL_GPL(xen_test_irq_shared); | ||
1563 | |||
1505 | void xen_irq_resume(void) | 1564 | void xen_irq_resume(void) |
1506 | { | 1565 | { |
1507 | unsigned int cpu, evtchn; | 1566 | unsigned int cpu, evtchn; |
@@ -1535,7 +1594,9 @@ static struct irq_chip xen_dynamic_chip __read_mostly = { | |||
1535 | .irq_mask = disable_dynirq, | 1594 | .irq_mask = disable_dynirq, |
1536 | .irq_unmask = enable_dynirq, | 1595 | .irq_unmask = enable_dynirq, |
1537 | 1596 | ||
1538 | .irq_eoi = ack_dynirq, | 1597 | .irq_ack = ack_dynirq, |
1598 | .irq_mask_ack = mask_ack_dynirq, | ||
1599 | |||
1539 | .irq_set_affinity = set_affinity_irq, | 1600 | .irq_set_affinity = set_affinity_irq, |
1540 | .irq_retrigger = retrigger_dynirq, | 1601 | .irq_retrigger = retrigger_dynirq, |
1541 | }; | 1602 | }; |
@@ -1545,14 +1606,15 @@ static struct irq_chip xen_pirq_chip __read_mostly = { | |||
1545 | 1606 | ||
1546 | .irq_startup = startup_pirq, | 1607 | .irq_startup = startup_pirq, |
1547 | .irq_shutdown = shutdown_pirq, | 1608 | .irq_shutdown = shutdown_pirq, |
1548 | |||
1549 | .irq_enable = enable_pirq, | 1609 | .irq_enable = enable_pirq, |
1550 | .irq_unmask = enable_pirq, | ||
1551 | |||
1552 | .irq_disable = disable_pirq, | 1610 | .irq_disable = disable_pirq, |
1553 | .irq_mask = disable_pirq, | ||
1554 | 1611 | ||
1555 | .irq_ack = ack_pirq, | 1612 | .irq_mask = disable_dynirq, |
1613 | .irq_unmask = enable_dynirq, | ||
1614 | |||
1615 | .irq_ack = eoi_pirq, | ||
1616 | .irq_eoi = eoi_pirq, | ||
1617 | .irq_mask_ack = mask_ack_pirq, | ||
1556 | 1618 | ||
1557 | .irq_set_affinity = set_affinity_irq, | 1619 | .irq_set_affinity = set_affinity_irq, |
1558 | 1620 | ||
diff --git a/drivers/xen/gntalloc.c b/drivers/xen/gntalloc.c index a7ffdfe19fc9..f6832f46aea4 100644 --- a/drivers/xen/gntalloc.c +++ b/drivers/xen/gntalloc.c | |||
@@ -427,6 +427,17 @@ static long gntalloc_ioctl(struct file *filp, unsigned int cmd, | |||
427 | return 0; | 427 | return 0; |
428 | } | 428 | } |
429 | 429 | ||
430 | static void gntalloc_vma_open(struct vm_area_struct *vma) | ||
431 | { | ||
432 | struct gntalloc_gref *gref = vma->vm_private_data; | ||
433 | if (!gref) | ||
434 | return; | ||
435 | |||
436 | spin_lock(&gref_lock); | ||
437 | gref->users++; | ||
438 | spin_unlock(&gref_lock); | ||
439 | } | ||
440 | |||
430 | static void gntalloc_vma_close(struct vm_area_struct *vma) | 441 | static void gntalloc_vma_close(struct vm_area_struct *vma) |
431 | { | 442 | { |
432 | struct gntalloc_gref *gref = vma->vm_private_data; | 443 | struct gntalloc_gref *gref = vma->vm_private_data; |
@@ -441,6 +452,7 @@ static void gntalloc_vma_close(struct vm_area_struct *vma) | |||
441 | } | 452 | } |
442 | 453 | ||
443 | static struct vm_operations_struct gntalloc_vmops = { | 454 | static struct vm_operations_struct gntalloc_vmops = { |
455 | .open = gntalloc_vma_open, | ||
444 | .close = gntalloc_vma_close, | 456 | .close = gntalloc_vma_close, |
445 | }; | 457 | }; |
446 | 458 | ||
@@ -471,8 +483,6 @@ static int gntalloc_mmap(struct file *filp, struct vm_area_struct *vma) | |||
471 | vma->vm_private_data = gref; | 483 | vma->vm_private_data = gref; |
472 | 484 | ||
473 | vma->vm_flags |= VM_RESERVED; | 485 | vma->vm_flags |= VM_RESERVED; |
474 | vma->vm_flags |= VM_DONTCOPY; | ||
475 | vma->vm_flags |= VM_PFNMAP | VM_PFN_AT_MMAP; | ||
476 | 486 | ||
477 | vma->vm_ops = &gntalloc_vmops; | 487 | vma->vm_ops = &gntalloc_vmops; |
478 | 488 | ||
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index b0f9e8fb0052..f914b26cf0c2 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c | |||
@@ -330,17 +330,26 @@ static int unmap_grant_pages(struct grant_map *map, int offset, int pages) | |||
330 | 330 | ||
331 | /* ------------------------------------------------------------------ */ | 331 | /* ------------------------------------------------------------------ */ |
332 | 332 | ||
333 | static void gntdev_vma_open(struct vm_area_struct *vma) | ||
334 | { | ||
335 | struct grant_map *map = vma->vm_private_data; | ||
336 | |||
337 | pr_debug("gntdev_vma_open %p\n", vma); | ||
338 | atomic_inc(&map->users); | ||
339 | } | ||
340 | |||
333 | static void gntdev_vma_close(struct vm_area_struct *vma) | 341 | static void gntdev_vma_close(struct vm_area_struct *vma) |
334 | { | 342 | { |
335 | struct grant_map *map = vma->vm_private_data; | 343 | struct grant_map *map = vma->vm_private_data; |
336 | 344 | ||
337 | pr_debug("close %p\n", vma); | 345 | pr_debug("gntdev_vma_close %p\n", vma); |
338 | map->vma = NULL; | 346 | map->vma = NULL; |
339 | vma->vm_private_data = NULL; | 347 | vma->vm_private_data = NULL; |
340 | gntdev_put_map(map); | 348 | gntdev_put_map(map); |
341 | } | 349 | } |
342 | 350 | ||
343 | static struct vm_operations_struct gntdev_vmops = { | 351 | static struct vm_operations_struct gntdev_vmops = { |
352 | .open = gntdev_vma_open, | ||
344 | .close = gntdev_vma_close, | 353 | .close = gntdev_vma_close, |
345 | }; | 354 | }; |
346 | 355 | ||
@@ -652,7 +661,10 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma) | |||
652 | 661 | ||
653 | vma->vm_ops = &gntdev_vmops; | 662 | vma->vm_ops = &gntdev_vmops; |
654 | 663 | ||
655 | vma->vm_flags |= VM_RESERVED|VM_DONTCOPY|VM_DONTEXPAND|VM_PFNMAP; | 664 | vma->vm_flags |= VM_RESERVED|VM_DONTEXPAND; |
665 | |||
666 | if (use_ptemod) | ||
667 | vma->vm_flags |= VM_DONTCOPY|VM_PFNMAP; | ||
656 | 668 | ||
657 | vma->vm_private_data = map; | 669 | vma->vm_private_data = map; |
658 | 670 | ||
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index 3745a318defc..fd725cde6ad1 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c | |||
@@ -466,13 +466,30 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, | |||
466 | if (map_ops[i].status) | 466 | if (map_ops[i].status) |
467 | continue; | 467 | continue; |
468 | 468 | ||
469 | /* m2p override only supported for GNTMAP_contains_pte mappings */ | 469 | if (map_ops[i].flags & GNTMAP_contains_pte) { |
470 | if (!(map_ops[i].flags & GNTMAP_contains_pte)) | 470 | pte = (pte_t *) (mfn_to_virt(PFN_DOWN(map_ops[i].host_addr)) + |
471 | continue; | ||
472 | pte = (pte_t *) (mfn_to_virt(PFN_DOWN(map_ops[i].host_addr)) + | ||
473 | (map_ops[i].host_addr & ~PAGE_MASK)); | 471 | (map_ops[i].host_addr & ~PAGE_MASK)); |
474 | mfn = pte_mfn(*pte); | 472 | mfn = pte_mfn(*pte); |
475 | ret = m2p_add_override(mfn, pages[i]); | 473 | } else { |
474 | /* If you really wanted to do this: | ||
475 | * mfn = PFN_DOWN(map_ops[i].dev_bus_addr); | ||
476 | * | ||
477 | * The reason we do not implement it is b/c on the | ||
478 | * unmap path (gnttab_unmap_refs) we have no means of | ||
479 | * checking whether the page is !GNTMAP_contains_pte. | ||
480 | * | ||
481 | * That is without some extra data-structure to carry | ||
482 | * the struct page, bool clear_pte, and list_head next | ||
483 | * tuples and deal with allocation/delallocation, etc. | ||
484 | * | ||
485 | * The users of this API set the GNTMAP_contains_pte | ||
486 | * flag so lets just return not supported until it | ||
487 | * becomes neccessary to implement. | ||
488 | */ | ||
489 | return -EOPNOTSUPP; | ||
490 | } | ||
491 | ret = m2p_add_override(mfn, pages[i], | ||
492 | map_ops[i].flags & GNTMAP_contains_pte); | ||
476 | if (ret) | 493 | if (ret) |
477 | return ret; | 494 | return ret; |
478 | } | 495 | } |
@@ -494,7 +511,7 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops, | |||
494 | return ret; | 511 | return ret; |
495 | 512 | ||
496 | for (i = 0; i < count; i++) { | 513 | for (i = 0; i < count; i++) { |
497 | ret = m2p_remove_override(pages[i]); | 514 | ret = m2p_remove_override(pages[i], true /* clear the PTE */); |
498 | if (ret) | 515 | if (ret) |
499 | return ret; | 516 | return ret; |
500 | } | 517 | } |
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c index a2eee574784e..0b5366b5be20 100644 --- a/drivers/xen/manage.c +++ b/drivers/xen/manage.c | |||
@@ -70,12 +70,7 @@ static int xen_suspend(void *data) | |||
70 | 70 | ||
71 | BUG_ON(!irqs_disabled()); | 71 | BUG_ON(!irqs_disabled()); |
72 | 72 | ||
73 | err = sysdev_suspend(PMSG_FREEZE); | 73 | err = syscore_suspend(); |
74 | if (!err) { | ||
75 | err = syscore_suspend(); | ||
76 | if (err) | ||
77 | sysdev_resume(); | ||
78 | } | ||
79 | if (err) { | 74 | if (err) { |
80 | printk(KERN_ERR "xen_suspend: system core suspend failed: %d\n", | 75 | printk(KERN_ERR "xen_suspend: system core suspend failed: %d\n", |
81 | err); | 76 | err); |
@@ -102,7 +97,6 @@ static int xen_suspend(void *data) | |||
102 | } | 97 | } |
103 | 98 | ||
104 | syscore_resume(); | 99 | syscore_resume(); |
105 | sysdev_resume(); | ||
106 | 100 | ||
107 | return 0; | 101 | return 0; |
108 | } | 102 | } |
diff --git a/drivers/xen/sys-hypervisor.c b/drivers/xen/sys-hypervisor.c index 60f1827a32cb..1e0fe01eb670 100644 --- a/drivers/xen/sys-hypervisor.c +++ b/drivers/xen/sys-hypervisor.c | |||
@@ -215,7 +215,7 @@ static struct attribute_group xen_compilation_group = { | |||
215 | .attrs = xen_compile_attrs, | 215 | .attrs = xen_compile_attrs, |
216 | }; | 216 | }; |
217 | 217 | ||
218 | int __init static xen_compilation_init(void) | 218 | static int __init xen_compilation_init(void) |
219 | { | 219 | { |
220 | return sysfs_create_group(hypervisor_kobj, &xen_compilation_group); | 220 | return sysfs_create_group(hypervisor_kobj, &xen_compilation_group); |
221 | } | 221 | } |
diff --git a/fs/block_dev.c b/fs/block_dev.c index 5147bdd3b8e1..257b00e98428 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c | |||
@@ -1102,6 +1102,7 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) | |||
1102 | if (!bdev->bd_part) | 1102 | if (!bdev->bd_part) |
1103 | goto out_clear; | 1103 | goto out_clear; |
1104 | 1104 | ||
1105 | ret = 0; | ||
1105 | if (disk->fops->open) { | 1106 | if (disk->fops->open) { |
1106 | ret = disk->fops->open(bdev, mode); | 1107 | ret = disk->fops->open(bdev, mode); |
1107 | if (ret == -ERESTARTSYS) { | 1108 | if (ret == -ERESTARTSYS) { |
@@ -1118,9 +1119,18 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) | |||
1118 | put_disk(disk); | 1119 | put_disk(disk); |
1119 | goto restart; | 1120 | goto restart; |
1120 | } | 1121 | } |
1121 | if (ret) | ||
1122 | goto out_clear; | ||
1123 | } | 1122 | } |
1123 | /* | ||
1124 | * If the device is invalidated, rescan partition | ||
1125 | * if open succeeded or failed with -ENOMEDIUM. | ||
1126 | * The latter is necessary to prevent ghost | ||
1127 | * partitions on a removed medium. | ||
1128 | */ | ||
1129 | if (bdev->bd_invalidated && (!ret || ret == -ENOMEDIUM)) | ||
1130 | rescan_partitions(disk, bdev); | ||
1131 | if (ret) | ||
1132 | goto out_clear; | ||
1133 | |||
1124 | if (!bdev->bd_openers) { | 1134 | if (!bdev->bd_openers) { |
1125 | bd_set_size(bdev,(loff_t)get_capacity(disk)<<9); | 1135 | bd_set_size(bdev,(loff_t)get_capacity(disk)<<9); |
1126 | bdi = blk_get_backing_dev_info(bdev); | 1136 | bdi = blk_get_backing_dev_info(bdev); |
@@ -1128,8 +1138,6 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) | |||
1128 | bdi = &default_backing_dev_info; | 1138 | bdi = &default_backing_dev_info; |
1129 | bdev_inode_switch_bdi(bdev->bd_inode, bdi); | 1139 | bdev_inode_switch_bdi(bdev->bd_inode, bdi); |
1130 | } | 1140 | } |
1131 | if (bdev->bd_invalidated) | ||
1132 | rescan_partitions(disk, bdev); | ||
1133 | } else { | 1141 | } else { |
1134 | struct block_device *whole; | 1142 | struct block_device *whole; |
1135 | whole = bdget_disk(disk, 0); | 1143 | whole = bdget_disk(disk, 0); |
@@ -1153,13 +1161,14 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) | |||
1153 | } | 1161 | } |
1154 | } else { | 1162 | } else { |
1155 | if (bdev->bd_contains == bdev) { | 1163 | if (bdev->bd_contains == bdev) { |
1156 | if (bdev->bd_disk->fops->open) { | 1164 | ret = 0; |
1165 | if (bdev->bd_disk->fops->open) | ||
1157 | ret = bdev->bd_disk->fops->open(bdev, mode); | 1166 | ret = bdev->bd_disk->fops->open(bdev, mode); |
1158 | if (ret) | 1167 | /* the same as first opener case, read comment there */ |
1159 | goto out_unlock_bdev; | 1168 | if (bdev->bd_invalidated && (!ret || ret == -ENOMEDIUM)) |
1160 | } | ||
1161 | if (bdev->bd_invalidated) | ||
1162 | rescan_partitions(bdev->bd_disk, bdev); | 1169 | rescan_partitions(bdev->bd_disk, bdev); |
1170 | if (ret) | ||
1171 | goto out_unlock_bdev; | ||
1163 | } | 1172 | } |
1164 | /* only one opener holds refs to the module and disk */ | 1173 | /* only one opener holds refs to the module and disk */ |
1165 | module_put(disk->fops->owner); | 1174 | module_put(disk->fops->owner); |
diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c index 5d505aaa72fb..44ea5b92e1ba 100644 --- a/fs/btrfs/acl.c +++ b/fs/btrfs/acl.c | |||
@@ -178,12 +178,13 @@ static int btrfs_xattr_acl_set(struct dentry *dentry, const char *name, | |||
178 | 178 | ||
179 | if (value) { | 179 | if (value) { |
180 | acl = posix_acl_from_xattr(value, size); | 180 | acl = posix_acl_from_xattr(value, size); |
181 | if (IS_ERR(acl)) | ||
182 | return PTR_ERR(acl); | ||
183 | |||
181 | if (acl) { | 184 | if (acl) { |
182 | ret = posix_acl_valid(acl); | 185 | ret = posix_acl_valid(acl); |
183 | if (ret) | 186 | if (ret) |
184 | goto out; | 187 | goto out; |
185 | } else if (IS_ERR(acl)) { | ||
186 | return PTR_ERR(acl); | ||
187 | } | 188 | } |
188 | } | 189 | } |
189 | 190 | ||
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index cd52f7f556ef..9ee6bd55e16c 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
@@ -8856,23 +8856,38 @@ out: | |||
8856 | int btrfs_init_space_info(struct btrfs_fs_info *fs_info) | 8856 | int btrfs_init_space_info(struct btrfs_fs_info *fs_info) |
8857 | { | 8857 | { |
8858 | struct btrfs_space_info *space_info; | 8858 | struct btrfs_space_info *space_info; |
8859 | struct btrfs_super_block *disk_super; | ||
8860 | u64 features; | ||
8861 | u64 flags; | ||
8862 | int mixed = 0; | ||
8859 | int ret; | 8863 | int ret; |
8860 | 8864 | ||
8861 | ret = update_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM, 0, 0, | 8865 | disk_super = &fs_info->super_copy; |
8862 | &space_info); | 8866 | if (!btrfs_super_root(disk_super)) |
8863 | if (ret) | 8867 | return 1; |
8864 | return ret; | ||
8865 | 8868 | ||
8866 | ret = update_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA, 0, 0, | 8869 | features = btrfs_super_incompat_flags(disk_super); |
8867 | &space_info); | 8870 | if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) |
8868 | if (ret) | 8871 | mixed = 1; |
8869 | return ret; | ||
8870 | 8872 | ||
8871 | ret = update_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA, 0, 0, | 8873 | flags = BTRFS_BLOCK_GROUP_SYSTEM; |
8872 | &space_info); | 8874 | ret = update_space_info(fs_info, flags, 0, 0, &space_info); |
8873 | if (ret) | 8875 | if (ret) |
8874 | return ret; | 8876 | goto out; |
8875 | 8877 | ||
8878 | if (mixed) { | ||
8879 | flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA; | ||
8880 | ret = update_space_info(fs_info, flags, 0, 0, &space_info); | ||
8881 | } else { | ||
8882 | flags = BTRFS_BLOCK_GROUP_METADATA; | ||
8883 | ret = update_space_info(fs_info, flags, 0, 0, &space_info); | ||
8884 | if (ret) | ||
8885 | goto out; | ||
8886 | |||
8887 | flags = BTRFS_BLOCK_GROUP_DATA; | ||
8888 | ret = update_space_info(fs_info, flags, 0, 0, &space_info); | ||
8889 | } | ||
8890 | out: | ||
8876 | return ret; | 8891 | return ret; |
8877 | } | 8892 | } |
8878 | 8893 | ||
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index ffb48d6c5433..2616f7ed4799 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c | |||
@@ -81,6 +81,13 @@ static unsigned int btrfs_flags_to_ioctl(unsigned int flags) | |||
81 | iflags |= FS_NOATIME_FL; | 81 | iflags |= FS_NOATIME_FL; |
82 | if (flags & BTRFS_INODE_DIRSYNC) | 82 | if (flags & BTRFS_INODE_DIRSYNC) |
83 | iflags |= FS_DIRSYNC_FL; | 83 | iflags |= FS_DIRSYNC_FL; |
84 | if (flags & BTRFS_INODE_NODATACOW) | ||
85 | iflags |= FS_NOCOW_FL; | ||
86 | |||
87 | if ((flags & BTRFS_INODE_COMPRESS) && !(flags & BTRFS_INODE_NOCOMPRESS)) | ||
88 | iflags |= FS_COMPR_FL; | ||
89 | else if (flags & BTRFS_INODE_NOCOMPRESS) | ||
90 | iflags |= FS_NOCOMP_FL; | ||
84 | 91 | ||
85 | return iflags; | 92 | return iflags; |
86 | } | 93 | } |
@@ -144,16 +151,13 @@ static int check_flags(unsigned int flags) | |||
144 | if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \ | 151 | if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \ |
145 | FS_NOATIME_FL | FS_NODUMP_FL | \ | 152 | FS_NOATIME_FL | FS_NODUMP_FL | \ |
146 | FS_SYNC_FL | FS_DIRSYNC_FL | \ | 153 | FS_SYNC_FL | FS_DIRSYNC_FL | \ |
147 | FS_NOCOMP_FL | FS_COMPR_FL | \ | 154 | FS_NOCOMP_FL | FS_COMPR_FL | |
148 | FS_NOCOW_FL | FS_COW_FL)) | 155 | FS_NOCOW_FL)) |
149 | return -EOPNOTSUPP; | 156 | return -EOPNOTSUPP; |
150 | 157 | ||
151 | if ((flags & FS_NOCOMP_FL) && (flags & FS_COMPR_FL)) | 158 | if ((flags & FS_NOCOMP_FL) && (flags & FS_COMPR_FL)) |
152 | return -EINVAL; | 159 | return -EINVAL; |
153 | 160 | ||
154 | if ((flags & FS_NOCOW_FL) && (flags & FS_COW_FL)) | ||
155 | return -EINVAL; | ||
156 | |||
157 | return 0; | 161 | return 0; |
158 | } | 162 | } |
159 | 163 | ||
@@ -218,6 +222,10 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg) | |||
218 | ip->flags |= BTRFS_INODE_DIRSYNC; | 222 | ip->flags |= BTRFS_INODE_DIRSYNC; |
219 | else | 223 | else |
220 | ip->flags &= ~BTRFS_INODE_DIRSYNC; | 224 | ip->flags &= ~BTRFS_INODE_DIRSYNC; |
225 | if (flags & FS_NOCOW_FL) | ||
226 | ip->flags |= BTRFS_INODE_NODATACOW; | ||
227 | else | ||
228 | ip->flags &= ~BTRFS_INODE_NODATACOW; | ||
221 | 229 | ||
222 | /* | 230 | /* |
223 | * The COMPRESS flag can only be changed by users, while the NOCOMPRESS | 231 | * The COMPRESS flag can only be changed by users, while the NOCOMPRESS |
@@ -230,11 +238,9 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg) | |||
230 | } else if (flags & FS_COMPR_FL) { | 238 | } else if (flags & FS_COMPR_FL) { |
231 | ip->flags |= BTRFS_INODE_COMPRESS; | 239 | ip->flags |= BTRFS_INODE_COMPRESS; |
232 | ip->flags &= ~BTRFS_INODE_NOCOMPRESS; | 240 | ip->flags &= ~BTRFS_INODE_NOCOMPRESS; |
241 | } else { | ||
242 | ip->flags &= ~(BTRFS_INODE_COMPRESS | BTRFS_INODE_NOCOMPRESS); | ||
233 | } | 243 | } |
234 | if (flags & FS_NOCOW_FL) | ||
235 | ip->flags |= BTRFS_INODE_NODATACOW; | ||
236 | else if (flags & FS_COW_FL) | ||
237 | ip->flags &= ~BTRFS_INODE_NODATACOW; | ||
238 | 244 | ||
239 | trans = btrfs_join_transaction(root, 1); | 245 | trans = btrfs_join_transaction(root, 1); |
240 | BUG_ON(IS_ERR(trans)); | 246 | BUG_ON(IS_ERR(trans)); |
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index 9fa08662a88d..2a5404c1c42f 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c | |||
@@ -819,7 +819,7 @@ int __ceph_caps_used(struct ceph_inode_info *ci) | |||
819 | used |= CEPH_CAP_FILE_CACHE; | 819 | used |= CEPH_CAP_FILE_CACHE; |
820 | if (ci->i_wr_ref) | 820 | if (ci->i_wr_ref) |
821 | used |= CEPH_CAP_FILE_WR; | 821 | used |= CEPH_CAP_FILE_WR; |
822 | if (ci->i_wrbuffer_ref) | 822 | if (ci->i_wb_ref || ci->i_wrbuffer_ref) |
823 | used |= CEPH_CAP_FILE_BUFFER; | 823 | used |= CEPH_CAP_FILE_BUFFER; |
824 | return used; | 824 | return used; |
825 | } | 825 | } |
@@ -1990,11 +1990,11 @@ static void __take_cap_refs(struct ceph_inode_info *ci, int got) | |||
1990 | if (got & CEPH_CAP_FILE_WR) | 1990 | if (got & CEPH_CAP_FILE_WR) |
1991 | ci->i_wr_ref++; | 1991 | ci->i_wr_ref++; |
1992 | if (got & CEPH_CAP_FILE_BUFFER) { | 1992 | if (got & CEPH_CAP_FILE_BUFFER) { |
1993 | if (ci->i_wrbuffer_ref == 0) | 1993 | if (ci->i_wb_ref == 0) |
1994 | ihold(&ci->vfs_inode); | 1994 | ihold(&ci->vfs_inode); |
1995 | ci->i_wrbuffer_ref++; | 1995 | ci->i_wb_ref++; |
1996 | dout("__take_cap_refs %p wrbuffer %d -> %d (?)\n", | 1996 | dout("__take_cap_refs %p wb %d -> %d (?)\n", |
1997 | &ci->vfs_inode, ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref); | 1997 | &ci->vfs_inode, ci->i_wb_ref-1, ci->i_wb_ref); |
1998 | } | 1998 | } |
1999 | } | 1999 | } |
2000 | 2000 | ||
@@ -2169,12 +2169,12 @@ void ceph_put_cap_refs(struct ceph_inode_info *ci, int had) | |||
2169 | if (--ci->i_rdcache_ref == 0) | 2169 | if (--ci->i_rdcache_ref == 0) |
2170 | last++; | 2170 | last++; |
2171 | if (had & CEPH_CAP_FILE_BUFFER) { | 2171 | if (had & CEPH_CAP_FILE_BUFFER) { |
2172 | if (--ci->i_wrbuffer_ref == 0) { | 2172 | if (--ci->i_wb_ref == 0) { |
2173 | last++; | 2173 | last++; |
2174 | put++; | 2174 | put++; |
2175 | } | 2175 | } |
2176 | dout("put_cap_refs %p wrbuffer %d -> %d (?)\n", | 2176 | dout("put_cap_refs %p wb %d -> %d (?)\n", |
2177 | inode, ci->i_wrbuffer_ref+1, ci->i_wrbuffer_ref); | 2177 | inode, ci->i_wb_ref+1, ci->i_wb_ref); |
2178 | } | 2178 | } |
2179 | if (had & CEPH_CAP_FILE_WR) | 2179 | if (had & CEPH_CAP_FILE_WR) |
2180 | if (--ci->i_wr_ref == 0) { | 2180 | if (--ci->i_wr_ref == 0) { |
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index 03d6dafda61f..70b6a4839c38 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c | |||
@@ -355,6 +355,7 @@ struct inode *ceph_alloc_inode(struct super_block *sb) | |||
355 | ci->i_rd_ref = 0; | 355 | ci->i_rd_ref = 0; |
356 | ci->i_rdcache_ref = 0; | 356 | ci->i_rdcache_ref = 0; |
357 | ci->i_wr_ref = 0; | 357 | ci->i_wr_ref = 0; |
358 | ci->i_wb_ref = 0; | ||
358 | ci->i_wrbuffer_ref = 0; | 359 | ci->i_wrbuffer_ref = 0; |
359 | ci->i_wrbuffer_ref_head = 0; | 360 | ci->i_wrbuffer_ref_head = 0; |
360 | ci->i_shared_gen = 0; | 361 | ci->i_shared_gen = 0; |
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index f60b07b0feb0..d0fae4ce9ba5 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c | |||
@@ -3304,8 +3304,8 @@ static void con_put(struct ceph_connection *con) | |||
3304 | { | 3304 | { |
3305 | struct ceph_mds_session *s = con->private; | 3305 | struct ceph_mds_session *s = con->private; |
3306 | 3306 | ||
3307 | dout("mdsc con_put %p (%d)\n", s, atomic_read(&s->s_ref) - 1); | ||
3307 | ceph_put_mds_session(s); | 3308 | ceph_put_mds_session(s); |
3308 | dout("mdsc con_put %p (%d)\n", s, atomic_read(&s->s_ref)); | ||
3309 | } | 3309 | } |
3310 | 3310 | ||
3311 | /* | 3311 | /* |
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c index e86ec1155f8f..24067d68a554 100644 --- a/fs/ceph/snap.c +++ b/fs/ceph/snap.c | |||
@@ -206,7 +206,7 @@ void ceph_put_snap_realm(struct ceph_mds_client *mdsc, | |||
206 | up_write(&mdsc->snap_rwsem); | 206 | up_write(&mdsc->snap_rwsem); |
207 | } else { | 207 | } else { |
208 | spin_lock(&mdsc->snap_empty_lock); | 208 | spin_lock(&mdsc->snap_empty_lock); |
209 | list_add(&mdsc->snap_empty, &realm->empty_item); | 209 | list_add(&realm->empty_item, &mdsc->snap_empty); |
210 | spin_unlock(&mdsc->snap_empty_lock); | 210 | spin_unlock(&mdsc->snap_empty_lock); |
211 | } | 211 | } |
212 | } | 212 | } |
diff --git a/fs/ceph/super.h b/fs/ceph/super.h index b1f1b8bb1271..f5cabefa98dc 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h | |||
@@ -293,7 +293,7 @@ struct ceph_inode_info { | |||
293 | 293 | ||
294 | /* held references to caps */ | 294 | /* held references to caps */ |
295 | int i_pin_ref; | 295 | int i_pin_ref; |
296 | int i_rd_ref, i_rdcache_ref, i_wr_ref; | 296 | int i_rd_ref, i_rdcache_ref, i_wr_ref, i_wb_ref; |
297 | int i_wrbuffer_ref, i_wrbuffer_ref_head; | 297 | int i_wrbuffer_ref, i_wrbuffer_ref_head; |
298 | u32 i_shared_gen; /* increment each time we get FILE_SHARED */ | 298 | u32 i_shared_gen; /* increment each time we get FILE_SHARED */ |
299 | u32 i_rdcache_gen; /* incremented each time we get FILE_CACHE. */ | 299 | u32 i_rdcache_gen; /* incremented each time we get FILE_CACHE. */ |
diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c index 23d43cde4306..1b2e180b018d 100644 --- a/fs/cifs/cifs_unicode.c +++ b/fs/cifs/cifs_unicode.c | |||
@@ -277,6 +277,7 @@ cifsConvertToUCS(__le16 *target, const char *source, int srclen, | |||
277 | 277 | ||
278 | for (i = 0, j = 0; i < srclen; j++) { | 278 | for (i = 0, j = 0; i < srclen; j++) { |
279 | src_char = source[i]; | 279 | src_char = source[i]; |
280 | charlen = 1; | ||
280 | switch (src_char) { | 281 | switch (src_char) { |
281 | case 0: | 282 | case 0: |
282 | put_unaligned(0, &target[j]); | 283 | put_unaligned(0, &target[j]); |
@@ -316,16 +317,13 @@ cifsConvertToUCS(__le16 *target, const char *source, int srclen, | |||
316 | dst_char = cpu_to_le16(0x003f); | 317 | dst_char = cpu_to_le16(0x003f); |
317 | charlen = 1; | 318 | charlen = 1; |
318 | } | 319 | } |
319 | /* | ||
320 | * character may take more than one byte in the source | ||
321 | * string, but will take exactly two bytes in the | ||
322 | * target string | ||
323 | */ | ||
324 | i += charlen; | ||
325 | continue; | ||
326 | } | 320 | } |
321 | /* | ||
322 | * character may take more than one byte in the source string, | ||
323 | * but will take exactly two bytes in the target string | ||
324 | */ | ||
325 | i += charlen; | ||
327 | put_unaligned(dst_char, &target[j]); | 326 | put_unaligned(dst_char, &target[j]); |
328 | i++; /* move to next char in source string */ | ||
329 | } | 327 | } |
330 | 328 | ||
331 | ctoUCS_out: | 329 | ctoUCS_out: |
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 05f1dcf7d79a..277262a8e82f 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c | |||
@@ -2673,6 +2673,11 @@ is_path_accessible(int xid, struct cifsTconInfo *tcon, | |||
2673 | 0 /* not legacy */, cifs_sb->local_nls, | 2673 | 0 /* not legacy */, cifs_sb->local_nls, |
2674 | cifs_sb->mnt_cifs_flags & | 2674 | cifs_sb->mnt_cifs_flags & |
2675 | CIFS_MOUNT_MAP_SPECIAL_CHR); | 2675 | CIFS_MOUNT_MAP_SPECIAL_CHR); |
2676 | |||
2677 | if (rc == -EOPNOTSUPP || rc == -EINVAL) | ||
2678 | rc = SMBQueryInformation(xid, tcon, full_path, pfile_info, | ||
2679 | cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & | ||
2680 | CIFS_MOUNT_MAP_SPECIAL_CHR); | ||
2676 | kfree(pfile_info); | 2681 | kfree(pfile_info); |
2677 | return rc; | 2682 | return rc; |
2678 | } | 2683 | } |
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c index 3313dd19f543..9a37a9b6de3a 100644 --- a/fs/configfs/dir.c +++ b/fs/configfs/dir.c | |||
@@ -53,11 +53,14 @@ DEFINE_SPINLOCK(configfs_dirent_lock); | |||
53 | static void configfs_d_iput(struct dentry * dentry, | 53 | static void configfs_d_iput(struct dentry * dentry, |
54 | struct inode * inode) | 54 | struct inode * inode) |
55 | { | 55 | { |
56 | struct configfs_dirent * sd = dentry->d_fsdata; | 56 | struct configfs_dirent *sd = dentry->d_fsdata; |
57 | 57 | ||
58 | if (sd) { | 58 | if (sd) { |
59 | BUG_ON(sd->s_dentry != dentry); | 59 | BUG_ON(sd->s_dentry != dentry); |
60 | /* Coordinate with configfs_readdir */ | ||
61 | spin_lock(&configfs_dirent_lock); | ||
60 | sd->s_dentry = NULL; | 62 | sd->s_dentry = NULL; |
63 | spin_unlock(&configfs_dirent_lock); | ||
61 | configfs_put(sd); | 64 | configfs_put(sd); |
62 | } | 65 | } |
63 | iput(inode); | 66 | iput(inode); |
@@ -689,7 +692,8 @@ static int create_default_group(struct config_group *parent_group, | |||
689 | sd = child->d_fsdata; | 692 | sd = child->d_fsdata; |
690 | sd->s_type |= CONFIGFS_USET_DEFAULT; | 693 | sd->s_type |= CONFIGFS_USET_DEFAULT; |
691 | } else { | 694 | } else { |
692 | d_delete(child); | 695 | BUG_ON(child->d_inode); |
696 | d_drop(child); | ||
693 | dput(child); | 697 | dput(child); |
694 | } | 698 | } |
695 | } | 699 | } |
@@ -1545,7 +1549,7 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir | |||
1545 | struct configfs_dirent * parent_sd = dentry->d_fsdata; | 1549 | struct configfs_dirent * parent_sd = dentry->d_fsdata; |
1546 | struct configfs_dirent *cursor = filp->private_data; | 1550 | struct configfs_dirent *cursor = filp->private_data; |
1547 | struct list_head *p, *q = &cursor->s_sibling; | 1551 | struct list_head *p, *q = &cursor->s_sibling; |
1548 | ino_t ino; | 1552 | ino_t ino = 0; |
1549 | int i = filp->f_pos; | 1553 | int i = filp->f_pos; |
1550 | 1554 | ||
1551 | switch (i) { | 1555 | switch (i) { |
@@ -1573,6 +1577,7 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir | |||
1573 | struct configfs_dirent *next; | 1577 | struct configfs_dirent *next; |
1574 | const char * name; | 1578 | const char * name; |
1575 | int len; | 1579 | int len; |
1580 | struct inode *inode = NULL; | ||
1576 | 1581 | ||
1577 | next = list_entry(p, struct configfs_dirent, | 1582 | next = list_entry(p, struct configfs_dirent, |
1578 | s_sibling); | 1583 | s_sibling); |
@@ -1581,9 +1586,28 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir | |||
1581 | 1586 | ||
1582 | name = configfs_get_name(next); | 1587 | name = configfs_get_name(next); |
1583 | len = strlen(name); | 1588 | len = strlen(name); |
1584 | if (next->s_dentry) | 1589 | |
1585 | ino = next->s_dentry->d_inode->i_ino; | 1590 | /* |
1586 | else | 1591 | * We'll have a dentry and an inode for |
1592 | * PINNED items and for open attribute | ||
1593 | * files. We lock here to prevent a race | ||
1594 | * with configfs_d_iput() clearing | ||
1595 | * s_dentry before calling iput(). | ||
1596 | * | ||
1597 | * Why do we go to the trouble? If | ||
1598 | * someone has an attribute file open, | ||
1599 | * the inode number should match until | ||
1600 | * they close it. Beyond that, we don't | ||
1601 | * care. | ||
1602 | */ | ||
1603 | spin_lock(&configfs_dirent_lock); | ||
1604 | dentry = next->s_dentry; | ||
1605 | if (dentry) | ||
1606 | inode = dentry->d_inode; | ||
1607 | if (inode) | ||
1608 | ino = inode->i_ino; | ||
1609 | spin_unlock(&configfs_dirent_lock); | ||
1610 | if (!inode) | ||
1587 | ino = iunique(configfs_sb, 2); | 1611 | ino = iunique(configfs_sb, 2); |
1588 | 1612 | ||
1589 | if (filldir(dirent, name, len, filp->f_pos, ino, | 1613 | if (filldir(dirent, name, len, filp->f_pos, ino, |
@@ -1683,7 +1707,8 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys) | |||
1683 | err = configfs_attach_group(sd->s_element, &group->cg_item, | 1707 | err = configfs_attach_group(sd->s_element, &group->cg_item, |
1684 | dentry); | 1708 | dentry); |
1685 | if (err) { | 1709 | if (err) { |
1686 | d_delete(dentry); | 1710 | BUG_ON(dentry->d_inode); |
1711 | d_drop(dentry); | ||
1687 | dput(dentry); | 1712 | dput(dentry); |
1688 | } else { | 1713 | } else { |
1689 | spin_lock(&configfs_dirent_lock); | 1714 | spin_lock(&configfs_dirent_lock); |
diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c index 89d394d8fe24..568304d058a3 100644 --- a/fs/debugfs/file.c +++ b/fs/debugfs/file.c | |||
@@ -429,25 +429,16 @@ static ssize_t write_file_bool(struct file *file, const char __user *user_buf, | |||
429 | { | 429 | { |
430 | char buf[32]; | 430 | char buf[32]; |
431 | int buf_size; | 431 | int buf_size; |
432 | bool bv; | ||
432 | u32 *val = file->private_data; | 433 | u32 *val = file->private_data; |
433 | 434 | ||
434 | buf_size = min(count, (sizeof(buf)-1)); | 435 | buf_size = min(count, (sizeof(buf)-1)); |
435 | if (copy_from_user(buf, user_buf, buf_size)) | 436 | if (copy_from_user(buf, user_buf, buf_size)) |
436 | return -EFAULT; | 437 | return -EFAULT; |
437 | 438 | ||
438 | switch (buf[0]) { | 439 | if (strtobool(buf, &bv) == 0) |
439 | case 'y': | 440 | *val = bv; |
440 | case 'Y': | 441 | |
441 | case '1': | ||
442 | *val = 1; | ||
443 | break; | ||
444 | case 'n': | ||
445 | case 'N': | ||
446 | case '0': | ||
447 | *val = 0; | ||
448 | break; | ||
449 | } | ||
450 | |||
451 | return count; | 442 | return count; |
452 | } | 443 | } |
453 | 444 | ||
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index c6ba49bd95b3..b32eb29a4e6f 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c | |||
@@ -174,7 +174,7 @@ static int fuse_dentry_revalidate(struct dentry *entry, struct nameidata *nd) | |||
174 | if (!inode) | 174 | if (!inode) |
175 | return 0; | 175 | return 0; |
176 | 176 | ||
177 | if (nd->flags & LOOKUP_RCU) | 177 | if (nd && (nd->flags & LOOKUP_RCU)) |
178 | return -ECHILD; | 178 | return -ECHILD; |
179 | 179 | ||
180 | fc = get_fuse_conn(inode); | 180 | fc = get_fuse_conn(inode); |
diff --git a/fs/namei.c b/fs/namei.c index 54fc993e3027..e3c4f112ebf7 100644 --- a/fs/namei.c +++ b/fs/namei.c | |||
@@ -179,7 +179,7 @@ EXPORT_SYMBOL(putname); | |||
179 | static int acl_permission_check(struct inode *inode, int mask, unsigned int flags, | 179 | static int acl_permission_check(struct inode *inode, int mask, unsigned int flags, |
180 | int (*check_acl)(struct inode *inode, int mask, unsigned int flags)) | 180 | int (*check_acl)(struct inode *inode, int mask, unsigned int flags)) |
181 | { | 181 | { |
182 | umode_t mode = inode->i_mode; | 182 | unsigned int mode = inode->i_mode; |
183 | 183 | ||
184 | mask &= MAY_READ | MAY_WRITE | MAY_EXEC; | 184 | mask &= MAY_READ | MAY_WRITE | MAY_EXEC; |
185 | 185 | ||
diff --git a/fs/nfs/nfs4filelayout.c b/fs/nfs/nfs4filelayout.c index 6f8192f4cfc7..be79dc9f386d 100644 --- a/fs/nfs/nfs4filelayout.c +++ b/fs/nfs/nfs4filelayout.c | |||
@@ -117,6 +117,8 @@ static int filelayout_async_handle_error(struct rpc_task *task, | |||
117 | case -EKEYEXPIRED: | 117 | case -EKEYEXPIRED: |
118 | rpc_delay(task, FILELAYOUT_POLL_RETRY_MAX); | 118 | rpc_delay(task, FILELAYOUT_POLL_RETRY_MAX); |
119 | break; | 119 | break; |
120 | case -NFS4ERR_RETRY_UNCACHED_REP: | ||
121 | break; | ||
120 | default: | 122 | default: |
121 | dprintk("%s DS error. Retry through MDS %d\n", __func__, | 123 | dprintk("%s DS error. Retry through MDS %d\n", __func__, |
122 | task->tk_status); | 124 | task->tk_status); |
@@ -416,7 +418,8 @@ static int | |||
416 | filelayout_check_layout(struct pnfs_layout_hdr *lo, | 418 | filelayout_check_layout(struct pnfs_layout_hdr *lo, |
417 | struct nfs4_filelayout_segment *fl, | 419 | struct nfs4_filelayout_segment *fl, |
418 | struct nfs4_layoutget_res *lgr, | 420 | struct nfs4_layoutget_res *lgr, |
419 | struct nfs4_deviceid *id) | 421 | struct nfs4_deviceid *id, |
422 | gfp_t gfp_flags) | ||
420 | { | 423 | { |
421 | struct nfs4_file_layout_dsaddr *dsaddr; | 424 | struct nfs4_file_layout_dsaddr *dsaddr; |
422 | int status = -EINVAL; | 425 | int status = -EINVAL; |
@@ -439,7 +442,7 @@ filelayout_check_layout(struct pnfs_layout_hdr *lo, | |||
439 | /* find and reference the deviceid */ | 442 | /* find and reference the deviceid */ |
440 | dsaddr = nfs4_fl_find_get_deviceid(id); | 443 | dsaddr = nfs4_fl_find_get_deviceid(id); |
441 | if (dsaddr == NULL) { | 444 | if (dsaddr == NULL) { |
442 | dsaddr = get_device_info(lo->plh_inode, id); | 445 | dsaddr = get_device_info(lo->plh_inode, id, gfp_flags); |
443 | if (dsaddr == NULL) | 446 | if (dsaddr == NULL) |
444 | goto out; | 447 | goto out; |
445 | } | 448 | } |
@@ -500,7 +503,8 @@ static int | |||
500 | filelayout_decode_layout(struct pnfs_layout_hdr *flo, | 503 | filelayout_decode_layout(struct pnfs_layout_hdr *flo, |
501 | struct nfs4_filelayout_segment *fl, | 504 | struct nfs4_filelayout_segment *fl, |
502 | struct nfs4_layoutget_res *lgr, | 505 | struct nfs4_layoutget_res *lgr, |
503 | struct nfs4_deviceid *id) | 506 | struct nfs4_deviceid *id, |
507 | gfp_t gfp_flags) | ||
504 | { | 508 | { |
505 | struct xdr_stream stream; | 509 | struct xdr_stream stream; |
506 | struct xdr_buf buf = { | 510 | struct xdr_buf buf = { |
@@ -516,7 +520,7 @@ filelayout_decode_layout(struct pnfs_layout_hdr *flo, | |||
516 | 520 | ||
517 | dprintk("%s: set_layout_map Begin\n", __func__); | 521 | dprintk("%s: set_layout_map Begin\n", __func__); |
518 | 522 | ||
519 | scratch = alloc_page(GFP_KERNEL); | 523 | scratch = alloc_page(gfp_flags); |
520 | if (!scratch) | 524 | if (!scratch) |
521 | return -ENOMEM; | 525 | return -ENOMEM; |
522 | 526 | ||
@@ -554,13 +558,13 @@ filelayout_decode_layout(struct pnfs_layout_hdr *flo, | |||
554 | goto out_err; | 558 | goto out_err; |
555 | 559 | ||
556 | fl->fh_array = kzalloc(fl->num_fh * sizeof(struct nfs_fh *), | 560 | fl->fh_array = kzalloc(fl->num_fh * sizeof(struct nfs_fh *), |
557 | GFP_KERNEL); | 561 | gfp_flags); |
558 | if (!fl->fh_array) | 562 | if (!fl->fh_array) |
559 | goto out_err; | 563 | goto out_err; |
560 | 564 | ||
561 | for (i = 0; i < fl->num_fh; i++) { | 565 | for (i = 0; i < fl->num_fh; i++) { |
562 | /* Do we want to use a mempool here? */ | 566 | /* Do we want to use a mempool here? */ |
563 | fl->fh_array[i] = kmalloc(sizeof(struct nfs_fh), GFP_KERNEL); | 567 | fl->fh_array[i] = kmalloc(sizeof(struct nfs_fh), gfp_flags); |
564 | if (!fl->fh_array[i]) | 568 | if (!fl->fh_array[i]) |
565 | goto out_err_free; | 569 | goto out_err_free; |
566 | 570 | ||
@@ -605,19 +609,20 @@ filelayout_free_lseg(struct pnfs_layout_segment *lseg) | |||
605 | 609 | ||
606 | static struct pnfs_layout_segment * | 610 | static struct pnfs_layout_segment * |
607 | filelayout_alloc_lseg(struct pnfs_layout_hdr *layoutid, | 611 | filelayout_alloc_lseg(struct pnfs_layout_hdr *layoutid, |
608 | struct nfs4_layoutget_res *lgr) | 612 | struct nfs4_layoutget_res *lgr, |
613 | gfp_t gfp_flags) | ||
609 | { | 614 | { |
610 | struct nfs4_filelayout_segment *fl; | 615 | struct nfs4_filelayout_segment *fl; |
611 | int rc; | 616 | int rc; |
612 | struct nfs4_deviceid id; | 617 | struct nfs4_deviceid id; |
613 | 618 | ||
614 | dprintk("--> %s\n", __func__); | 619 | dprintk("--> %s\n", __func__); |
615 | fl = kzalloc(sizeof(*fl), GFP_KERNEL); | 620 | fl = kzalloc(sizeof(*fl), gfp_flags); |
616 | if (!fl) | 621 | if (!fl) |
617 | return NULL; | 622 | return NULL; |
618 | 623 | ||
619 | rc = filelayout_decode_layout(layoutid, fl, lgr, &id); | 624 | rc = filelayout_decode_layout(layoutid, fl, lgr, &id, gfp_flags); |
620 | if (rc != 0 || filelayout_check_layout(layoutid, fl, lgr, &id)) { | 625 | if (rc != 0 || filelayout_check_layout(layoutid, fl, lgr, &id, gfp_flags)) { |
621 | _filelayout_free_lseg(fl); | 626 | _filelayout_free_lseg(fl); |
622 | return NULL; | 627 | return NULL; |
623 | } | 628 | } |
@@ -633,7 +638,7 @@ filelayout_alloc_lseg(struct pnfs_layout_hdr *layoutid, | |||
633 | int size = (fl->stripe_type == STRIPE_SPARSE) ? | 638 | int size = (fl->stripe_type == STRIPE_SPARSE) ? |
634 | fl->dsaddr->ds_num : fl->dsaddr->stripe_count; | 639 | fl->dsaddr->ds_num : fl->dsaddr->stripe_count; |
635 | 640 | ||
636 | fl->commit_buckets = kcalloc(size, sizeof(struct list_head), GFP_KERNEL); | 641 | fl->commit_buckets = kcalloc(size, sizeof(struct list_head), gfp_flags); |
637 | if (!fl->commit_buckets) { | 642 | if (!fl->commit_buckets) { |
638 | filelayout_free_lseg(&fl->generic_hdr); | 643 | filelayout_free_lseg(&fl->generic_hdr); |
639 | return NULL; | 644 | return NULL; |
diff --git a/fs/nfs/nfs4filelayout.h b/fs/nfs/nfs4filelayout.h index 7c44579f5832..2b461d77b43a 100644 --- a/fs/nfs/nfs4filelayout.h +++ b/fs/nfs/nfs4filelayout.h | |||
@@ -104,6 +104,6 @@ extern struct nfs4_file_layout_dsaddr * | |||
104 | nfs4_fl_find_get_deviceid(struct nfs4_deviceid *dev_id); | 104 | nfs4_fl_find_get_deviceid(struct nfs4_deviceid *dev_id); |
105 | extern void nfs4_fl_put_deviceid(struct nfs4_file_layout_dsaddr *dsaddr); | 105 | extern void nfs4_fl_put_deviceid(struct nfs4_file_layout_dsaddr *dsaddr); |
106 | struct nfs4_file_layout_dsaddr * | 106 | struct nfs4_file_layout_dsaddr * |
107 | get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id); | 107 | get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id, gfp_t gfp_flags); |
108 | 108 | ||
109 | #endif /* FS_NFS_NFS4FILELAYOUT_H */ | 109 | #endif /* FS_NFS_NFS4FILELAYOUT_H */ |
diff --git a/fs/nfs/nfs4filelayoutdev.c b/fs/nfs/nfs4filelayoutdev.c index de5350f2b249..db07c7af1395 100644 --- a/fs/nfs/nfs4filelayoutdev.c +++ b/fs/nfs/nfs4filelayoutdev.c | |||
@@ -225,11 +225,11 @@ nfs4_fl_free_deviceid(struct nfs4_file_layout_dsaddr *dsaddr) | |||
225 | } | 225 | } |
226 | 226 | ||
227 | static struct nfs4_pnfs_ds * | 227 | static struct nfs4_pnfs_ds * |
228 | nfs4_pnfs_ds_add(struct inode *inode, u32 ip_addr, u32 port) | 228 | nfs4_pnfs_ds_add(struct inode *inode, u32 ip_addr, u32 port, gfp_t gfp_flags) |
229 | { | 229 | { |
230 | struct nfs4_pnfs_ds *tmp_ds, *ds; | 230 | struct nfs4_pnfs_ds *tmp_ds, *ds; |
231 | 231 | ||
232 | ds = kzalloc(sizeof(*tmp_ds), GFP_KERNEL); | 232 | ds = kzalloc(sizeof(*tmp_ds), gfp_flags); |
233 | if (!ds) | 233 | if (!ds) |
234 | goto out; | 234 | goto out; |
235 | 235 | ||
@@ -261,7 +261,7 @@ out: | |||
261 | * Currently only support ipv4, and one multi-path address. | 261 | * Currently only support ipv4, and one multi-path address. |
262 | */ | 262 | */ |
263 | static struct nfs4_pnfs_ds * | 263 | static struct nfs4_pnfs_ds * |
264 | decode_and_add_ds(struct xdr_stream *streamp, struct inode *inode) | 264 | decode_and_add_ds(struct xdr_stream *streamp, struct inode *inode, gfp_t gfp_flags) |
265 | { | 265 | { |
266 | struct nfs4_pnfs_ds *ds = NULL; | 266 | struct nfs4_pnfs_ds *ds = NULL; |
267 | char *buf; | 267 | char *buf; |
@@ -303,7 +303,7 @@ decode_and_add_ds(struct xdr_stream *streamp, struct inode *inode) | |||
303 | rlen); | 303 | rlen); |
304 | goto out_err; | 304 | goto out_err; |
305 | } | 305 | } |
306 | buf = kmalloc(rlen + 1, GFP_KERNEL); | 306 | buf = kmalloc(rlen + 1, gfp_flags); |
307 | if (!buf) { | 307 | if (!buf) { |
308 | dprintk("%s: Not enough memory\n", __func__); | 308 | dprintk("%s: Not enough memory\n", __func__); |
309 | goto out_err; | 309 | goto out_err; |
@@ -333,7 +333,7 @@ decode_and_add_ds(struct xdr_stream *streamp, struct inode *inode) | |||
333 | sscanf(pstr, "-%d-%d", &tmp[0], &tmp[1]); | 333 | sscanf(pstr, "-%d-%d", &tmp[0], &tmp[1]); |
334 | port = htons((tmp[0] << 8) | (tmp[1])); | 334 | port = htons((tmp[0] << 8) | (tmp[1])); |
335 | 335 | ||
336 | ds = nfs4_pnfs_ds_add(inode, ip_addr, port); | 336 | ds = nfs4_pnfs_ds_add(inode, ip_addr, port, gfp_flags); |
337 | dprintk("%s: Decoded address and port %s\n", __func__, buf); | 337 | dprintk("%s: Decoded address and port %s\n", __func__, buf); |
338 | out_free: | 338 | out_free: |
339 | kfree(buf); | 339 | kfree(buf); |
@@ -343,7 +343,7 @@ out_err: | |||
343 | 343 | ||
344 | /* Decode opaque device data and return the result */ | 344 | /* Decode opaque device data and return the result */ |
345 | static struct nfs4_file_layout_dsaddr* | 345 | static struct nfs4_file_layout_dsaddr* |
346 | decode_device(struct inode *ino, struct pnfs_device *pdev) | 346 | decode_device(struct inode *ino, struct pnfs_device *pdev, gfp_t gfp_flags) |
347 | { | 347 | { |
348 | int i; | 348 | int i; |
349 | u32 cnt, num; | 349 | u32 cnt, num; |
@@ -362,7 +362,7 @@ decode_device(struct inode *ino, struct pnfs_device *pdev) | |||
362 | struct page *scratch; | 362 | struct page *scratch; |
363 | 363 | ||
364 | /* set up xdr stream */ | 364 | /* set up xdr stream */ |
365 | scratch = alloc_page(GFP_KERNEL); | 365 | scratch = alloc_page(gfp_flags); |
366 | if (!scratch) | 366 | if (!scratch) |
367 | goto out_err; | 367 | goto out_err; |
368 | 368 | ||
@@ -384,7 +384,7 @@ decode_device(struct inode *ino, struct pnfs_device *pdev) | |||
384 | } | 384 | } |
385 | 385 | ||
386 | /* read stripe indices */ | 386 | /* read stripe indices */ |
387 | stripe_indices = kcalloc(cnt, sizeof(u8), GFP_KERNEL); | 387 | stripe_indices = kcalloc(cnt, sizeof(u8), gfp_flags); |
388 | if (!stripe_indices) | 388 | if (!stripe_indices) |
389 | goto out_err_free_scratch; | 389 | goto out_err_free_scratch; |
390 | 390 | ||
@@ -423,7 +423,7 @@ decode_device(struct inode *ino, struct pnfs_device *pdev) | |||
423 | 423 | ||
424 | dsaddr = kzalloc(sizeof(*dsaddr) + | 424 | dsaddr = kzalloc(sizeof(*dsaddr) + |
425 | (sizeof(struct nfs4_pnfs_ds *) * (num - 1)), | 425 | (sizeof(struct nfs4_pnfs_ds *) * (num - 1)), |
426 | GFP_KERNEL); | 426 | gfp_flags); |
427 | if (!dsaddr) | 427 | if (!dsaddr) |
428 | goto out_err_free_stripe_indices; | 428 | goto out_err_free_stripe_indices; |
429 | 429 | ||
@@ -452,7 +452,7 @@ decode_device(struct inode *ino, struct pnfs_device *pdev) | |||
452 | for (j = 0; j < mp_count; j++) { | 452 | for (j = 0; j < mp_count; j++) { |
453 | if (j == 0) { | 453 | if (j == 0) { |
454 | dsaddr->ds_list[i] = decode_and_add_ds(&stream, | 454 | dsaddr->ds_list[i] = decode_and_add_ds(&stream, |
455 | ino); | 455 | ino, gfp_flags); |
456 | if (dsaddr->ds_list[i] == NULL) | 456 | if (dsaddr->ds_list[i] == NULL) |
457 | goto out_err_free_deviceid; | 457 | goto out_err_free_deviceid; |
458 | } else { | 458 | } else { |
@@ -503,12 +503,12 @@ out_err: | |||
503 | * available devices. | 503 | * available devices. |
504 | */ | 504 | */ |
505 | static struct nfs4_file_layout_dsaddr * | 505 | static struct nfs4_file_layout_dsaddr * |
506 | decode_and_add_device(struct inode *inode, struct pnfs_device *dev) | 506 | decode_and_add_device(struct inode *inode, struct pnfs_device *dev, gfp_t gfp_flags) |
507 | { | 507 | { |
508 | struct nfs4_file_layout_dsaddr *d, *new; | 508 | struct nfs4_file_layout_dsaddr *d, *new; |
509 | long hash; | 509 | long hash; |
510 | 510 | ||
511 | new = decode_device(inode, dev); | 511 | new = decode_device(inode, dev, gfp_flags); |
512 | if (!new) { | 512 | if (!new) { |
513 | printk(KERN_WARNING "%s: Could not decode or add device\n", | 513 | printk(KERN_WARNING "%s: Could not decode or add device\n", |
514 | __func__); | 514 | __func__); |
@@ -537,7 +537,7 @@ decode_and_add_device(struct inode *inode, struct pnfs_device *dev) | |||
537 | * of available devices, and return it. | 537 | * of available devices, and return it. |
538 | */ | 538 | */ |
539 | struct nfs4_file_layout_dsaddr * | 539 | struct nfs4_file_layout_dsaddr * |
540 | get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id) | 540 | get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id, gfp_t gfp_flags) |
541 | { | 541 | { |
542 | struct pnfs_device *pdev = NULL; | 542 | struct pnfs_device *pdev = NULL; |
543 | u32 max_resp_sz; | 543 | u32 max_resp_sz; |
@@ -556,17 +556,17 @@ get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id) | |||
556 | dprintk("%s inode %p max_resp_sz %u max_pages %d\n", | 556 | dprintk("%s inode %p max_resp_sz %u max_pages %d\n", |
557 | __func__, inode, max_resp_sz, max_pages); | 557 | __func__, inode, max_resp_sz, max_pages); |
558 | 558 | ||
559 | pdev = kzalloc(sizeof(struct pnfs_device), GFP_KERNEL); | 559 | pdev = kzalloc(sizeof(struct pnfs_device), gfp_flags); |
560 | if (pdev == NULL) | 560 | if (pdev == NULL) |
561 | return NULL; | 561 | return NULL; |
562 | 562 | ||
563 | pages = kzalloc(max_pages * sizeof(struct page *), GFP_KERNEL); | 563 | pages = kzalloc(max_pages * sizeof(struct page *), gfp_flags); |
564 | if (pages == NULL) { | 564 | if (pages == NULL) { |
565 | kfree(pdev); | 565 | kfree(pdev); |
566 | return NULL; | 566 | return NULL; |
567 | } | 567 | } |
568 | for (i = 0; i < max_pages; i++) { | 568 | for (i = 0; i < max_pages; i++) { |
569 | pages[i] = alloc_page(GFP_KERNEL); | 569 | pages[i] = alloc_page(gfp_flags); |
570 | if (!pages[i]) | 570 | if (!pages[i]) |
571 | goto out_free; | 571 | goto out_free; |
572 | } | 572 | } |
@@ -587,7 +587,7 @@ get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id) | |||
587 | * Found new device, need to decode it and then add it to the | 587 | * Found new device, need to decode it and then add it to the |
588 | * list of known devices for this mountpoint. | 588 | * list of known devices for this mountpoint. |
589 | */ | 589 | */ |
590 | dsaddr = decode_and_add_device(inode, pdev); | 590 | dsaddr = decode_and_add_device(inode, pdev, gfp_flags); |
591 | out_free: | 591 | out_free: |
592 | for (i = 0; i < max_pages; i++) | 592 | for (i = 0; i < max_pages; i++) |
593 | __free_page(pages[i]); | 593 | __free_page(pages[i]); |
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 69c0f3c5ee7a..cf1b339c3937 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c | |||
@@ -300,6 +300,7 @@ static int nfs4_handle_exception(struct nfs_server *server, int errorcode, struc | |||
300 | ret = nfs4_delay(server->client, &exception->timeout); | 300 | ret = nfs4_delay(server->client, &exception->timeout); |
301 | if (ret != 0) | 301 | if (ret != 0) |
302 | break; | 302 | break; |
303 | case -NFS4ERR_RETRY_UNCACHED_REP: | ||
303 | case -NFS4ERR_OLD_STATEID: | 304 | case -NFS4ERR_OLD_STATEID: |
304 | exception->retry = 1; | 305 | exception->retry = 1; |
305 | break; | 306 | break; |
@@ -3695,6 +3696,7 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, | |||
3695 | rpc_delay(task, NFS4_POLL_RETRY_MAX); | 3696 | rpc_delay(task, NFS4_POLL_RETRY_MAX); |
3696 | task->tk_status = 0; | 3697 | task->tk_status = 0; |
3697 | return -EAGAIN; | 3698 | return -EAGAIN; |
3699 | case -NFS4ERR_RETRY_UNCACHED_REP: | ||
3698 | case -NFS4ERR_OLD_STATEID: | 3700 | case -NFS4ERR_OLD_STATEID: |
3699 | task->tk_status = 0; | 3701 | task->tk_status = 0; |
3700 | return -EAGAIN; | 3702 | return -EAGAIN; |
@@ -4844,6 +4846,8 @@ static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata) | |||
4844 | dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status); | 4846 | dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status); |
4845 | rpc_delay(task, NFS4_POLL_RETRY_MIN); | 4847 | rpc_delay(task, NFS4_POLL_RETRY_MIN); |
4846 | task->tk_status = 0; | 4848 | task->tk_status = 0; |
4849 | /* fall through */ | ||
4850 | case -NFS4ERR_RETRY_UNCACHED_REP: | ||
4847 | nfs_restart_rpc(task, data->clp); | 4851 | nfs_restart_rpc(task, data->clp); |
4848 | return; | 4852 | return; |
4849 | } | 4853 | } |
@@ -5479,6 +5483,8 @@ static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nf | |||
5479 | break; | 5483 | break; |
5480 | case -NFS4ERR_DELAY: | 5484 | case -NFS4ERR_DELAY: |
5481 | rpc_delay(task, NFS4_POLL_RETRY_MAX); | 5485 | rpc_delay(task, NFS4_POLL_RETRY_MAX); |
5486 | /* fall through */ | ||
5487 | case -NFS4ERR_RETRY_UNCACHED_REP: | ||
5482 | return -EAGAIN; | 5488 | return -EAGAIN; |
5483 | default: | 5489 | default: |
5484 | nfs4_schedule_lease_recovery(clp); | 5490 | nfs4_schedule_lease_recovery(clp); |
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index ff681ab65d31..f57f5281a520 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c | |||
@@ -383,6 +383,7 @@ pnfs_destroy_all_layouts(struct nfs_client *clp) | |||
383 | plh_layouts); | 383 | plh_layouts); |
384 | dprintk("%s freeing layout for inode %lu\n", __func__, | 384 | dprintk("%s freeing layout for inode %lu\n", __func__, |
385 | lo->plh_inode->i_ino); | 385 | lo->plh_inode->i_ino); |
386 | list_del_init(&lo->plh_layouts); | ||
386 | pnfs_destroy_layout(NFS_I(lo->plh_inode)); | 387 | pnfs_destroy_layout(NFS_I(lo->plh_inode)); |
387 | } | 388 | } |
388 | } | 389 | } |
@@ -466,7 +467,8 @@ pnfs_choose_layoutget_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo, | |||
466 | static struct pnfs_layout_segment * | 467 | static struct pnfs_layout_segment * |
467 | send_layoutget(struct pnfs_layout_hdr *lo, | 468 | send_layoutget(struct pnfs_layout_hdr *lo, |
468 | struct nfs_open_context *ctx, | 469 | struct nfs_open_context *ctx, |
469 | u32 iomode) | 470 | u32 iomode, |
471 | gfp_t gfp_flags) | ||
470 | { | 472 | { |
471 | struct inode *ino = lo->plh_inode; | 473 | struct inode *ino = lo->plh_inode; |
472 | struct nfs_server *server = NFS_SERVER(ino); | 474 | struct nfs_server *server = NFS_SERVER(ino); |
@@ -479,7 +481,7 @@ send_layoutget(struct pnfs_layout_hdr *lo, | |||
479 | dprintk("--> %s\n", __func__); | 481 | dprintk("--> %s\n", __func__); |
480 | 482 | ||
481 | BUG_ON(ctx == NULL); | 483 | BUG_ON(ctx == NULL); |
482 | lgp = kzalloc(sizeof(*lgp), GFP_KERNEL); | 484 | lgp = kzalloc(sizeof(*lgp), gfp_flags); |
483 | if (lgp == NULL) | 485 | if (lgp == NULL) |
484 | return NULL; | 486 | return NULL; |
485 | 487 | ||
@@ -487,12 +489,12 @@ send_layoutget(struct pnfs_layout_hdr *lo, | |||
487 | max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz; | 489 | max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz; |
488 | max_pages = max_resp_sz >> PAGE_SHIFT; | 490 | max_pages = max_resp_sz >> PAGE_SHIFT; |
489 | 491 | ||
490 | pages = kzalloc(max_pages * sizeof(struct page *), GFP_KERNEL); | 492 | pages = kzalloc(max_pages * sizeof(struct page *), gfp_flags); |
491 | if (!pages) | 493 | if (!pages) |
492 | goto out_err_free; | 494 | goto out_err_free; |
493 | 495 | ||
494 | for (i = 0; i < max_pages; i++) { | 496 | for (i = 0; i < max_pages; i++) { |
495 | pages[i] = alloc_page(GFP_KERNEL); | 497 | pages[i] = alloc_page(gfp_flags); |
496 | if (!pages[i]) | 498 | if (!pages[i]) |
497 | goto out_err_free; | 499 | goto out_err_free; |
498 | } | 500 | } |
@@ -508,6 +510,7 @@ send_layoutget(struct pnfs_layout_hdr *lo, | |||
508 | lgp->args.layout.pages = pages; | 510 | lgp->args.layout.pages = pages; |
509 | lgp->args.layout.pglen = max_pages * PAGE_SIZE; | 511 | lgp->args.layout.pglen = max_pages * PAGE_SIZE; |
510 | lgp->lsegpp = &lseg; | 512 | lgp->lsegpp = &lseg; |
513 | lgp->gfp_flags = gfp_flags; | ||
511 | 514 | ||
512 | /* Synchronously retrieve layout information from server and | 515 | /* Synchronously retrieve layout information from server and |
513 | * store in lseg. | 516 | * store in lseg. |
@@ -665,11 +668,11 @@ pnfs_insert_layout(struct pnfs_layout_hdr *lo, | |||
665 | } | 668 | } |
666 | 669 | ||
667 | static struct pnfs_layout_hdr * | 670 | static struct pnfs_layout_hdr * |
668 | alloc_init_layout_hdr(struct inode *ino) | 671 | alloc_init_layout_hdr(struct inode *ino, gfp_t gfp_flags) |
669 | { | 672 | { |
670 | struct pnfs_layout_hdr *lo; | 673 | struct pnfs_layout_hdr *lo; |
671 | 674 | ||
672 | lo = kzalloc(sizeof(struct pnfs_layout_hdr), GFP_KERNEL); | 675 | lo = kzalloc(sizeof(struct pnfs_layout_hdr), gfp_flags); |
673 | if (!lo) | 676 | if (!lo) |
674 | return NULL; | 677 | return NULL; |
675 | atomic_set(&lo->plh_refcount, 1); | 678 | atomic_set(&lo->plh_refcount, 1); |
@@ -681,7 +684,7 @@ alloc_init_layout_hdr(struct inode *ino) | |||
681 | } | 684 | } |
682 | 685 | ||
683 | static struct pnfs_layout_hdr * | 686 | static struct pnfs_layout_hdr * |
684 | pnfs_find_alloc_layout(struct inode *ino) | 687 | pnfs_find_alloc_layout(struct inode *ino, gfp_t gfp_flags) |
685 | { | 688 | { |
686 | struct nfs_inode *nfsi = NFS_I(ino); | 689 | struct nfs_inode *nfsi = NFS_I(ino); |
687 | struct pnfs_layout_hdr *new = NULL; | 690 | struct pnfs_layout_hdr *new = NULL; |
@@ -696,7 +699,7 @@ pnfs_find_alloc_layout(struct inode *ino) | |||
696 | return nfsi->layout; | 699 | return nfsi->layout; |
697 | } | 700 | } |
698 | spin_unlock(&ino->i_lock); | 701 | spin_unlock(&ino->i_lock); |
699 | new = alloc_init_layout_hdr(ino); | 702 | new = alloc_init_layout_hdr(ino, gfp_flags); |
700 | spin_lock(&ino->i_lock); | 703 | spin_lock(&ino->i_lock); |
701 | 704 | ||
702 | if (likely(nfsi->layout == NULL)) /* Won the race? */ | 705 | if (likely(nfsi->layout == NULL)) /* Won the race? */ |
@@ -756,7 +759,8 @@ pnfs_find_lseg(struct pnfs_layout_hdr *lo, u32 iomode) | |||
756 | struct pnfs_layout_segment * | 759 | struct pnfs_layout_segment * |
757 | pnfs_update_layout(struct inode *ino, | 760 | pnfs_update_layout(struct inode *ino, |
758 | struct nfs_open_context *ctx, | 761 | struct nfs_open_context *ctx, |
759 | enum pnfs_iomode iomode) | 762 | enum pnfs_iomode iomode, |
763 | gfp_t gfp_flags) | ||
760 | { | 764 | { |
761 | struct nfs_inode *nfsi = NFS_I(ino); | 765 | struct nfs_inode *nfsi = NFS_I(ino); |
762 | struct nfs_client *clp = NFS_SERVER(ino)->nfs_client; | 766 | struct nfs_client *clp = NFS_SERVER(ino)->nfs_client; |
@@ -767,7 +771,7 @@ pnfs_update_layout(struct inode *ino, | |||
767 | if (!pnfs_enabled_sb(NFS_SERVER(ino))) | 771 | if (!pnfs_enabled_sb(NFS_SERVER(ino))) |
768 | return NULL; | 772 | return NULL; |
769 | spin_lock(&ino->i_lock); | 773 | spin_lock(&ino->i_lock); |
770 | lo = pnfs_find_alloc_layout(ino); | 774 | lo = pnfs_find_alloc_layout(ino, gfp_flags); |
771 | if (lo == NULL) { | 775 | if (lo == NULL) { |
772 | dprintk("%s ERROR: can't get pnfs_layout_hdr\n", __func__); | 776 | dprintk("%s ERROR: can't get pnfs_layout_hdr\n", __func__); |
773 | goto out_unlock; | 777 | goto out_unlock; |
@@ -807,7 +811,7 @@ pnfs_update_layout(struct inode *ino, | |||
807 | spin_unlock(&clp->cl_lock); | 811 | spin_unlock(&clp->cl_lock); |
808 | } | 812 | } |
809 | 813 | ||
810 | lseg = send_layoutget(lo, ctx, iomode); | 814 | lseg = send_layoutget(lo, ctx, iomode, gfp_flags); |
811 | if (!lseg && first) { | 815 | if (!lseg && first) { |
812 | spin_lock(&clp->cl_lock); | 816 | spin_lock(&clp->cl_lock); |
813 | list_del_init(&lo->plh_layouts); | 817 | list_del_init(&lo->plh_layouts); |
@@ -846,7 +850,7 @@ pnfs_layout_process(struct nfs4_layoutget *lgp) | |||
846 | goto out; | 850 | goto out; |
847 | } | 851 | } |
848 | /* Inject layout blob into I/O device driver */ | 852 | /* Inject layout blob into I/O device driver */ |
849 | lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res); | 853 | lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res, lgp->gfp_flags); |
850 | if (!lseg || IS_ERR(lseg)) { | 854 | if (!lseg || IS_ERR(lseg)) { |
851 | if (!lseg) | 855 | if (!lseg) |
852 | status = -ENOMEM; | 856 | status = -ENOMEM; |
@@ -899,7 +903,8 @@ static int pnfs_read_pg_test(struct nfs_pageio_descriptor *pgio, | |||
899 | /* This is first coelesce call for a series of nfs_pages */ | 903 | /* This is first coelesce call for a series of nfs_pages */ |
900 | pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, | 904 | pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, |
901 | prev->wb_context, | 905 | prev->wb_context, |
902 | IOMODE_READ); | 906 | IOMODE_READ, |
907 | GFP_KERNEL); | ||
903 | } | 908 | } |
904 | return NFS_SERVER(pgio->pg_inode)->pnfs_curr_ld->pg_test(pgio, prev, req); | 909 | return NFS_SERVER(pgio->pg_inode)->pnfs_curr_ld->pg_test(pgio, prev, req); |
905 | } | 910 | } |
@@ -921,7 +926,8 @@ static int pnfs_write_pg_test(struct nfs_pageio_descriptor *pgio, | |||
921 | /* This is first coelesce call for a series of nfs_pages */ | 926 | /* This is first coelesce call for a series of nfs_pages */ |
922 | pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, | 927 | pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, |
923 | prev->wb_context, | 928 | prev->wb_context, |
924 | IOMODE_RW); | 929 | IOMODE_RW, |
930 | GFP_NOFS); | ||
925 | } | 931 | } |
926 | return NFS_SERVER(pgio->pg_inode)->pnfs_curr_ld->pg_test(pgio, prev, req); | 932 | return NFS_SERVER(pgio->pg_inode)->pnfs_curr_ld->pg_test(pgio, prev, req); |
927 | } | 933 | } |
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index bc4827202e7a..0c015bad9e7a 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h | |||
@@ -70,7 +70,7 @@ struct pnfs_layoutdriver_type { | |||
70 | const u32 id; | 70 | const u32 id; |
71 | const char *name; | 71 | const char *name; |
72 | struct module *owner; | 72 | struct module *owner; |
73 | struct pnfs_layout_segment * (*alloc_lseg) (struct pnfs_layout_hdr *layoutid, struct nfs4_layoutget_res *lgr); | 73 | struct pnfs_layout_segment * (*alloc_lseg) (struct pnfs_layout_hdr *layoutid, struct nfs4_layoutget_res *lgr, gfp_t gfp_flags); |
74 | void (*free_lseg) (struct pnfs_layout_segment *lseg); | 74 | void (*free_lseg) (struct pnfs_layout_segment *lseg); |
75 | 75 | ||
76 | /* test for nfs page cache coalescing */ | 76 | /* test for nfs page cache coalescing */ |
@@ -126,7 +126,7 @@ void get_layout_hdr(struct pnfs_layout_hdr *lo); | |||
126 | void put_lseg(struct pnfs_layout_segment *lseg); | 126 | void put_lseg(struct pnfs_layout_segment *lseg); |
127 | struct pnfs_layout_segment * | 127 | struct pnfs_layout_segment * |
128 | pnfs_update_layout(struct inode *ino, struct nfs_open_context *ctx, | 128 | pnfs_update_layout(struct inode *ino, struct nfs_open_context *ctx, |
129 | enum pnfs_iomode access_type); | 129 | enum pnfs_iomode access_type, gfp_t gfp_flags); |
130 | void set_pnfs_layoutdriver(struct nfs_server *, u32 id); | 130 | void set_pnfs_layoutdriver(struct nfs_server *, u32 id); |
131 | void unset_pnfs_layoutdriver(struct nfs_server *); | 131 | void unset_pnfs_layoutdriver(struct nfs_server *); |
132 | enum pnfs_try_status pnfs_try_to_write_data(struct nfs_write_data *, | 132 | enum pnfs_try_status pnfs_try_to_write_data(struct nfs_write_data *, |
@@ -245,7 +245,7 @@ static inline void put_lseg(struct pnfs_layout_segment *lseg) | |||
245 | 245 | ||
246 | static inline struct pnfs_layout_segment * | 246 | static inline struct pnfs_layout_segment * |
247 | pnfs_update_layout(struct inode *ino, struct nfs_open_context *ctx, | 247 | pnfs_update_layout(struct inode *ino, struct nfs_open_context *ctx, |
248 | enum pnfs_iomode access_type) | 248 | enum pnfs_iomode access_type, gfp_t gfp_flags) |
249 | { | 249 | { |
250 | return NULL; | 250 | return NULL; |
251 | } | 251 | } |
diff --git a/fs/nfs/read.c b/fs/nfs/read.c index 7cded2b12a05..2bcf0dc306a1 100644 --- a/fs/nfs/read.c +++ b/fs/nfs/read.c | |||
@@ -288,7 +288,7 @@ static int nfs_pagein_multi(struct nfs_pageio_descriptor *desc) | |||
288 | atomic_set(&req->wb_complete, requests); | 288 | atomic_set(&req->wb_complete, requests); |
289 | 289 | ||
290 | BUG_ON(desc->pg_lseg != NULL); | 290 | BUG_ON(desc->pg_lseg != NULL); |
291 | lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_READ); | 291 | lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_READ, GFP_KERNEL); |
292 | ClearPageError(page); | 292 | ClearPageError(page); |
293 | offset = 0; | 293 | offset = 0; |
294 | nbytes = desc->pg_count; | 294 | nbytes = desc->pg_count; |
@@ -351,7 +351,7 @@ static int nfs_pagein_one(struct nfs_pageio_descriptor *desc) | |||
351 | } | 351 | } |
352 | req = nfs_list_entry(data->pages.next); | 352 | req = nfs_list_entry(data->pages.next); |
353 | if ((!lseg) && list_is_singular(&data->pages)) | 353 | if ((!lseg) && list_is_singular(&data->pages)) |
354 | lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_READ); | 354 | lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_READ, GFP_KERNEL); |
355 | 355 | ||
356 | ret = nfs_read_rpcsetup(req, data, &nfs_read_full_ops, desc->pg_count, | 356 | ret = nfs_read_rpcsetup(req, data, &nfs_read_full_ops, desc->pg_count, |
357 | 0, lseg); | 357 | 0, lseg); |
diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 3bd5d7e80f6c..49c715b4ac92 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c | |||
@@ -939,7 +939,7 @@ static int nfs_flush_multi(struct nfs_pageio_descriptor *desc) | |||
939 | atomic_set(&req->wb_complete, requests); | 939 | atomic_set(&req->wb_complete, requests); |
940 | 940 | ||
941 | BUG_ON(desc->pg_lseg); | 941 | BUG_ON(desc->pg_lseg); |
942 | lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_RW); | 942 | lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_RW, GFP_NOFS); |
943 | ClearPageError(page); | 943 | ClearPageError(page); |
944 | offset = 0; | 944 | offset = 0; |
945 | nbytes = desc->pg_count; | 945 | nbytes = desc->pg_count; |
@@ -1013,7 +1013,7 @@ static int nfs_flush_one(struct nfs_pageio_descriptor *desc) | |||
1013 | } | 1013 | } |
1014 | req = nfs_list_entry(data->pages.next); | 1014 | req = nfs_list_entry(data->pages.next); |
1015 | if ((!lseg) && list_is_singular(&data->pages)) | 1015 | if ((!lseg) && list_is_singular(&data->pages)) |
1016 | lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_RW); | 1016 | lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_RW, GFP_NOFS); |
1017 | 1017 | ||
1018 | if ((desc->pg_ioflags & FLUSH_COND_STABLE) && | 1018 | if ((desc->pg_ioflags & FLUSH_COND_STABLE) && |
1019 | (desc->pg_moreio || NFS_I(desc->pg_inode)->ncommit)) | 1019 | (desc->pg_moreio || NFS_I(desc->pg_inode)->ncommit)) |
diff --git a/fs/nilfs2/alloc.c b/fs/nilfs2/alloc.c index 0a0a66d98cce..f7684483785e 100644 --- a/fs/nilfs2/alloc.c +++ b/fs/nilfs2/alloc.c | |||
@@ -646,7 +646,7 @@ int nilfs_palloc_freev(struct inode *inode, __u64 *entry_nrs, size_t nitems) | |||
646 | unsigned long group, group_offset; | 646 | unsigned long group, group_offset; |
647 | int i, j, n, ret; | 647 | int i, j, n, ret; |
648 | 648 | ||
649 | for (i = 0; i < nitems; i += n) { | 649 | for (i = 0; i < nitems; i = j) { |
650 | group = nilfs_palloc_group(inode, entry_nrs[i], &group_offset); | 650 | group = nilfs_palloc_group(inode, entry_nrs[i], &group_offset); |
651 | ret = nilfs_palloc_get_desc_block(inode, group, 0, &desc_bh); | 651 | ret = nilfs_palloc_get_desc_block(inode, group, 0, &desc_bh); |
652 | if (ret < 0) | 652 | if (ret < 0) |
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c index 643720209a98..9a3e6bbff27b 100644 --- a/fs/ocfs2/cluster/heartbeat.c +++ b/fs/ocfs2/cluster/heartbeat.c | |||
@@ -539,25 +539,41 @@ static int o2hb_verify_crc(struct o2hb_region *reg, | |||
539 | 539 | ||
540 | /* We want to make sure that nobody is heartbeating on top of us -- | 540 | /* We want to make sure that nobody is heartbeating on top of us -- |
541 | * this will help detect an invalid configuration. */ | 541 | * this will help detect an invalid configuration. */ |
542 | static int o2hb_check_last_timestamp(struct o2hb_region *reg) | 542 | static void o2hb_check_last_timestamp(struct o2hb_region *reg) |
543 | { | 543 | { |
544 | int node_num, ret; | ||
545 | struct o2hb_disk_slot *slot; | 544 | struct o2hb_disk_slot *slot; |
546 | struct o2hb_disk_heartbeat_block *hb_block; | 545 | struct o2hb_disk_heartbeat_block *hb_block; |
546 | char *errstr; | ||
547 | 547 | ||
548 | node_num = o2nm_this_node(); | 548 | slot = ®->hr_slots[o2nm_this_node()]; |
549 | |||
550 | ret = 1; | ||
551 | slot = ®->hr_slots[node_num]; | ||
552 | /* Don't check on our 1st timestamp */ | 549 | /* Don't check on our 1st timestamp */ |
553 | if (slot->ds_last_time) { | 550 | if (!slot->ds_last_time) |
554 | hb_block = slot->ds_raw_block; | 551 | return; |
555 | 552 | ||
556 | if (le64_to_cpu(hb_block->hb_seq) != slot->ds_last_time) | 553 | hb_block = slot->ds_raw_block; |
557 | ret = 0; | 554 | if (le64_to_cpu(hb_block->hb_seq) == slot->ds_last_time && |
558 | } | 555 | le64_to_cpu(hb_block->hb_generation) == slot->ds_last_generation && |
556 | hb_block->hb_node == slot->ds_node_num) | ||
557 | return; | ||
559 | 558 | ||
560 | return ret; | 559 | #define ERRSTR1 "Another node is heartbeating on device" |
560 | #define ERRSTR2 "Heartbeat generation mismatch on device" | ||
561 | #define ERRSTR3 "Heartbeat sequence mismatch on device" | ||
562 | |||
563 | if (hb_block->hb_node != slot->ds_node_num) | ||
564 | errstr = ERRSTR1; | ||
565 | else if (le64_to_cpu(hb_block->hb_generation) != | ||
566 | slot->ds_last_generation) | ||
567 | errstr = ERRSTR2; | ||
568 | else | ||
569 | errstr = ERRSTR3; | ||
570 | |||
571 | mlog(ML_ERROR, "%s (%s): expected(%u:0x%llx, 0x%llx), " | ||
572 | "ondisk(%u:0x%llx, 0x%llx)\n", errstr, reg->hr_dev_name, | ||
573 | slot->ds_node_num, (unsigned long long)slot->ds_last_generation, | ||
574 | (unsigned long long)slot->ds_last_time, hb_block->hb_node, | ||
575 | (unsigned long long)le64_to_cpu(hb_block->hb_generation), | ||
576 | (unsigned long long)le64_to_cpu(hb_block->hb_seq)); | ||
561 | } | 577 | } |
562 | 578 | ||
563 | static inline void o2hb_prepare_block(struct o2hb_region *reg, | 579 | static inline void o2hb_prepare_block(struct o2hb_region *reg, |
@@ -983,9 +999,7 @@ static int o2hb_do_disk_heartbeat(struct o2hb_region *reg) | |||
983 | /* With an up to date view of the slots, we can check that no | 999 | /* With an up to date view of the slots, we can check that no |
984 | * other node has been improperly configured to heartbeat in | 1000 | * other node has been improperly configured to heartbeat in |
985 | * our slot. */ | 1001 | * our slot. */ |
986 | if (!o2hb_check_last_timestamp(reg)) | 1002 | o2hb_check_last_timestamp(reg); |
987 | mlog(ML_ERROR, "Device \"%s\": another node is heartbeating " | ||
988 | "in our slot!\n", reg->hr_dev_name); | ||
989 | 1003 | ||
990 | /* fill in the proper info for our next heartbeat */ | 1004 | /* fill in the proper info for our next heartbeat */ |
991 | o2hb_prepare_block(reg, reg->hr_generation); | 1005 | o2hb_prepare_block(reg, reg->hr_generation); |
@@ -999,8 +1013,8 @@ static int o2hb_do_disk_heartbeat(struct o2hb_region *reg) | |||
999 | } | 1013 | } |
1000 | 1014 | ||
1001 | i = -1; | 1015 | i = -1; |
1002 | while((i = find_next_bit(configured_nodes, O2NM_MAX_NODES, i + 1)) < O2NM_MAX_NODES) { | 1016 | while((i = find_next_bit(configured_nodes, |
1003 | 1017 | O2NM_MAX_NODES, i + 1)) < O2NM_MAX_NODES) { | |
1004 | change |= o2hb_check_slot(reg, ®->hr_slots[i]); | 1018 | change |= o2hb_check_slot(reg, ®->hr_slots[i]); |
1005 | } | 1019 | } |
1006 | 1020 | ||
@@ -1690,6 +1704,7 @@ static ssize_t o2hb_region_dev_write(struct o2hb_region *reg, | |||
1690 | struct file *filp = NULL; | 1704 | struct file *filp = NULL; |
1691 | struct inode *inode = NULL; | 1705 | struct inode *inode = NULL; |
1692 | ssize_t ret = -EINVAL; | 1706 | ssize_t ret = -EINVAL; |
1707 | int live_threshold; | ||
1693 | 1708 | ||
1694 | if (reg->hr_bdev) | 1709 | if (reg->hr_bdev) |
1695 | goto out; | 1710 | goto out; |
@@ -1766,8 +1781,18 @@ static ssize_t o2hb_region_dev_write(struct o2hb_region *reg, | |||
1766 | * A node is considered live after it has beat LIVE_THRESHOLD | 1781 | * A node is considered live after it has beat LIVE_THRESHOLD |
1767 | * times. We're not steady until we've given them a chance | 1782 | * times. We're not steady until we've given them a chance |
1768 | * _after_ our first read. | 1783 | * _after_ our first read. |
1784 | * The default threshold is bare minimum so as to limit the delay | ||
1785 | * during mounts. For global heartbeat, the threshold doubled for the | ||
1786 | * first region. | ||
1769 | */ | 1787 | */ |
1770 | atomic_set(®->hr_steady_iterations, O2HB_LIVE_THRESHOLD + 1); | 1788 | live_threshold = O2HB_LIVE_THRESHOLD; |
1789 | if (o2hb_global_heartbeat_active()) { | ||
1790 | spin_lock(&o2hb_live_lock); | ||
1791 | if (o2hb_pop_count(&o2hb_region_bitmap, O2NM_MAX_REGIONS) == 1) | ||
1792 | live_threshold <<= 1; | ||
1793 | spin_unlock(&o2hb_live_lock); | ||
1794 | } | ||
1795 | atomic_set(®->hr_steady_iterations, live_threshold + 1); | ||
1771 | 1796 | ||
1772 | hb_task = kthread_run(o2hb_thread, reg, "o2hb-%s", | 1797 | hb_task = kthread_run(o2hb_thread, reg, "o2hb-%s", |
1773 | reg->hr_item.ci_name); | 1798 | reg->hr_item.ci_name); |
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c index 9fe5b8fd658f..8582e3f4f120 100644 --- a/fs/ocfs2/dir.c +++ b/fs/ocfs2/dir.c | |||
@@ -2868,7 +2868,7 @@ static int ocfs2_expand_inline_dir(struct inode *dir, struct buffer_head *di_bh, | |||
2868 | bytes = blocks_wanted << sb->s_blocksize_bits; | 2868 | bytes = blocks_wanted << sb->s_blocksize_bits; |
2869 | struct ocfs2_super *osb = OCFS2_SB(dir->i_sb); | 2869 | struct ocfs2_super *osb = OCFS2_SB(dir->i_sb); |
2870 | struct ocfs2_inode_info *oi = OCFS2_I(dir); | 2870 | struct ocfs2_inode_info *oi = OCFS2_I(dir); |
2871 | struct ocfs2_alloc_context *data_ac; | 2871 | struct ocfs2_alloc_context *data_ac = NULL; |
2872 | struct ocfs2_alloc_context *meta_ac = NULL; | 2872 | struct ocfs2_alloc_context *meta_ac = NULL; |
2873 | struct buffer_head *dirdata_bh = NULL; | 2873 | struct buffer_head *dirdata_bh = NULL; |
2874 | struct buffer_head *dx_root_bh = NULL; | 2874 | struct buffer_head *dx_root_bh = NULL; |
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c index 7540a492eaba..3b179d6cbde0 100644 --- a/fs/ocfs2/dlm/dlmdomain.c +++ b/fs/ocfs2/dlm/dlmdomain.c | |||
@@ -1614,7 +1614,8 @@ static int dlm_try_to_join_domain(struct dlm_ctxt *dlm) | |||
1614 | spin_unlock(&dlm->spinlock); | 1614 | spin_unlock(&dlm->spinlock); |
1615 | 1615 | ||
1616 | /* Support for global heartbeat and node info was added in 1.1 */ | 1616 | /* Support for global heartbeat and node info was added in 1.1 */ |
1617 | if (dlm_protocol.pv_major > 1 || dlm_protocol.pv_minor > 0) { | 1617 | if (dlm->dlm_locking_proto.pv_major > 1 || |
1618 | dlm->dlm_locking_proto.pv_minor > 0) { | ||
1618 | status = dlm_send_nodeinfo(dlm, ctxt->yes_resp_map); | 1619 | status = dlm_send_nodeinfo(dlm, ctxt->yes_resp_map); |
1619 | if (status) { | 1620 | if (status) { |
1620 | mlog_errno(status); | 1621 | mlog_errno(status); |
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c index fede57ed005f..84d166328cf7 100644 --- a/fs/ocfs2/dlm/dlmmaster.c +++ b/fs/ocfs2/dlm/dlmmaster.c | |||
@@ -2574,6 +2574,9 @@ fail: | |||
2574 | res->state &= ~DLM_LOCK_RES_MIGRATING; | 2574 | res->state &= ~DLM_LOCK_RES_MIGRATING; |
2575 | wake = 1; | 2575 | wake = 1; |
2576 | spin_unlock(&res->spinlock); | 2576 | spin_unlock(&res->spinlock); |
2577 | if (dlm_is_host_down(ret)) | ||
2578 | dlm_wait_for_node_death(dlm, target, | ||
2579 | DLM_NODE_DEATH_WAIT_MAX); | ||
2577 | goto leave; | 2580 | goto leave; |
2578 | } | 2581 | } |
2579 | 2582 | ||
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 41565ae52856..89659d6dc206 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c | |||
@@ -1607,6 +1607,9 @@ static void ocfs2_calc_trunc_pos(struct inode *inode, | |||
1607 | range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec); | 1607 | range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec); |
1608 | 1608 | ||
1609 | if (le32_to_cpu(rec->e_cpos) >= trunc_start) { | 1609 | if (le32_to_cpu(rec->e_cpos) >= trunc_start) { |
1610 | /* | ||
1611 | * remove an entire extent record. | ||
1612 | */ | ||
1610 | *trunc_cpos = le32_to_cpu(rec->e_cpos); | 1613 | *trunc_cpos = le32_to_cpu(rec->e_cpos); |
1611 | /* | 1614 | /* |
1612 | * Skip holes if any. | 1615 | * Skip holes if any. |
@@ -1617,7 +1620,16 @@ static void ocfs2_calc_trunc_pos(struct inode *inode, | |||
1617 | *blkno = le64_to_cpu(rec->e_blkno); | 1620 | *blkno = le64_to_cpu(rec->e_blkno); |
1618 | *trunc_end = le32_to_cpu(rec->e_cpos); | 1621 | *trunc_end = le32_to_cpu(rec->e_cpos); |
1619 | } else if (range > trunc_start) { | 1622 | } else if (range > trunc_start) { |
1623 | /* | ||
1624 | * remove a partial extent record, which means we're | ||
1625 | * removing the last extent record. | ||
1626 | */ | ||
1620 | *trunc_cpos = trunc_start; | 1627 | *trunc_cpos = trunc_start; |
1628 | /* | ||
1629 | * skip hole if any. | ||
1630 | */ | ||
1631 | if (range < *trunc_end) | ||
1632 | *trunc_end = range; | ||
1621 | *trunc_len = *trunc_end - trunc_start; | 1633 | *trunc_len = *trunc_end - trunc_start; |
1622 | coff = trunc_start - le32_to_cpu(rec->e_cpos); | 1634 | coff = trunc_start - le32_to_cpu(rec->e_cpos); |
1623 | *blkno = le64_to_cpu(rec->e_blkno) + | 1635 | *blkno = le64_to_cpu(rec->e_blkno) + |
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c index b141a44605ca..295d56454e8b 100644 --- a/fs/ocfs2/journal.c +++ b/fs/ocfs2/journal.c | |||
@@ -1260,6 +1260,9 @@ void ocfs2_complete_mount_recovery(struct ocfs2_super *osb) | |||
1260 | { | 1260 | { |
1261 | struct ocfs2_journal *journal = osb->journal; | 1261 | struct ocfs2_journal *journal = osb->journal; |
1262 | 1262 | ||
1263 | if (ocfs2_is_hard_readonly(osb)) | ||
1264 | return; | ||
1265 | |||
1263 | /* No need to queue up our truncate_log as regular cleanup will catch | 1266 | /* No need to queue up our truncate_log as regular cleanup will catch |
1264 | * that */ | 1267 | * that */ |
1265 | ocfs2_queue_recovery_completion(journal, osb->slot_num, | 1268 | ocfs2_queue_recovery_completion(journal, osb->slot_num, |
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c index e4f9c1b0836c..3e898a48122d 100644 --- a/fs/xfs/linux-2.6/xfs_sync.c +++ b/fs/xfs/linux-2.6/xfs_sync.c | |||
@@ -926,6 +926,7 @@ restart: | |||
926 | XFS_LOOKUP_BATCH, | 926 | XFS_LOOKUP_BATCH, |
927 | XFS_ICI_RECLAIM_TAG); | 927 | XFS_ICI_RECLAIM_TAG); |
928 | if (!nr_found) { | 928 | if (!nr_found) { |
929 | done = 1; | ||
929 | rcu_read_unlock(); | 930 | rcu_read_unlock(); |
930 | break; | 931 | break; |
931 | } | 932 | } |
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c index acdb92f14d51..5fc2380092c8 100644 --- a/fs/xfs/xfs_trans_ail.c +++ b/fs/xfs/xfs_trans_ail.c | |||
@@ -346,20 +346,23 @@ xfs_ail_delete( | |||
346 | */ | 346 | */ |
347 | STATIC void | 347 | STATIC void |
348 | xfs_ail_worker( | 348 | xfs_ail_worker( |
349 | struct work_struct *work) | 349 | struct work_struct *work) |
350 | { | 350 | { |
351 | struct xfs_ail *ailp = container_of(to_delayed_work(work), | 351 | struct xfs_ail *ailp = container_of(to_delayed_work(work), |
352 | struct xfs_ail, xa_work); | 352 | struct xfs_ail, xa_work); |
353 | long tout; | 353 | xfs_mount_t *mp = ailp->xa_mount; |
354 | xfs_lsn_t target = ailp->xa_target; | ||
355 | xfs_lsn_t lsn; | ||
356 | xfs_log_item_t *lip; | ||
357 | int flush_log, count, stuck; | ||
358 | xfs_mount_t *mp = ailp->xa_mount; | ||
359 | struct xfs_ail_cursor *cur = &ailp->xa_cursors; | 354 | struct xfs_ail_cursor *cur = &ailp->xa_cursors; |
360 | int push_xfsbufd = 0; | 355 | xfs_log_item_t *lip; |
356 | xfs_lsn_t lsn; | ||
357 | xfs_lsn_t target; | ||
358 | long tout = 10; | ||
359 | int flush_log = 0; | ||
360 | int stuck = 0; | ||
361 | int count = 0; | ||
362 | int push_xfsbufd = 0; | ||
361 | 363 | ||
362 | spin_lock(&ailp->xa_lock); | 364 | spin_lock(&ailp->xa_lock); |
365 | target = ailp->xa_target; | ||
363 | xfs_trans_ail_cursor_init(ailp, cur); | 366 | xfs_trans_ail_cursor_init(ailp, cur); |
364 | lip = xfs_trans_ail_cursor_first(ailp, cur, ailp->xa_last_pushed_lsn); | 367 | lip = xfs_trans_ail_cursor_first(ailp, cur, ailp->xa_last_pushed_lsn); |
365 | if (!lip || XFS_FORCED_SHUTDOWN(mp)) { | 368 | if (!lip || XFS_FORCED_SHUTDOWN(mp)) { |
@@ -368,8 +371,7 @@ xfs_ail_worker( | |||
368 | */ | 371 | */ |
369 | xfs_trans_ail_cursor_done(ailp, cur); | 372 | xfs_trans_ail_cursor_done(ailp, cur); |
370 | spin_unlock(&ailp->xa_lock); | 373 | spin_unlock(&ailp->xa_lock); |
371 | ailp->xa_last_pushed_lsn = 0; | 374 | goto out_done; |
372 | return; | ||
373 | } | 375 | } |
374 | 376 | ||
375 | XFS_STATS_INC(xs_push_ail); | 377 | XFS_STATS_INC(xs_push_ail); |
@@ -386,8 +388,7 @@ xfs_ail_worker( | |||
386 | * lots of contention on the AIL lists. | 388 | * lots of contention on the AIL lists. |
387 | */ | 389 | */ |
388 | lsn = lip->li_lsn; | 390 | lsn = lip->li_lsn; |
389 | flush_log = stuck = count = 0; | 391 | while ((XFS_LSN_CMP(lip->li_lsn, target) <= 0)) { |
390 | while ((XFS_LSN_CMP(lip->li_lsn, target) < 0)) { | ||
391 | int lock_result; | 392 | int lock_result; |
392 | /* | 393 | /* |
393 | * If we can lock the item without sleeping, unlock the AIL | 394 | * If we can lock the item without sleeping, unlock the AIL |
@@ -480,21 +481,25 @@ xfs_ail_worker( | |||
480 | } | 481 | } |
481 | 482 | ||
482 | /* assume we have more work to do in a short while */ | 483 | /* assume we have more work to do in a short while */ |
483 | tout = 10; | 484 | out_done: |
484 | if (!count) { | 485 | if (!count) { |
485 | /* We're past our target or empty, so idle */ | 486 | /* We're past our target or empty, so idle */ |
486 | ailp->xa_last_pushed_lsn = 0; | 487 | ailp->xa_last_pushed_lsn = 0; |
487 | 488 | ||
488 | /* | 489 | /* |
489 | * Check for an updated push target before clearing the | 490 | * We clear the XFS_AIL_PUSHING_BIT first before checking |
490 | * XFS_AIL_PUSHING_BIT. If the target changed, we've got more | 491 | * whether the target has changed. If the target has changed, |
491 | * work to do. Wait a bit longer before starting that work. | 492 | * this pushes the requeue race directly onto the result of the |
493 | * atomic test/set bit, so we are guaranteed that either the | ||
494 | * the pusher that changed the target or ourselves will requeue | ||
495 | * the work (but not both). | ||
492 | */ | 496 | */ |
497 | clear_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags); | ||
493 | smp_rmb(); | 498 | smp_rmb(); |
494 | if (ailp->xa_target == target) { | 499 | if (XFS_LSN_CMP(ailp->xa_target, target) == 0 || |
495 | clear_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags); | 500 | test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags)) |
496 | return; | 501 | return; |
497 | } | 502 | |
498 | tout = 50; | 503 | tout = 50; |
499 | } else if (XFS_LSN_CMP(lsn, target) >= 0) { | 504 | } else if (XFS_LSN_CMP(lsn, target) >= 0) { |
500 | /* | 505 | /* |
@@ -553,7 +558,7 @@ xfs_ail_push( | |||
553 | * the XFS_AIL_PUSHING_BIT. | 558 | * the XFS_AIL_PUSHING_BIT. |
554 | */ | 559 | */ |
555 | smp_wmb(); | 560 | smp_wmb(); |
556 | ailp->xa_target = threshold_lsn; | 561 | xfs_trans_ail_copy_lsn(ailp, &ailp->xa_target, &threshold_lsn); |
557 | if (!test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags)) | 562 | if (!test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags)) |
558 | queue_delayed_work(xfs_syncd_wq, &ailp->xa_work, 0); | 563 | queue_delayed_work(xfs_syncd_wq, &ailp->xa_work, 0); |
559 | } | 564 | } |
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index bd297a20ab98..077c00d94f6e 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h | |||
@@ -170,6 +170,10 @@ | |||
170 | STRUCT_ALIGN(); \ | 170 | STRUCT_ALIGN(); \ |
171 | *(__tracepoints) \ | 171 | *(__tracepoints) \ |
172 | /* implement dynamic printk debug */ \ | 172 | /* implement dynamic printk debug */ \ |
173 | . = ALIGN(8); \ | ||
174 | VMLINUX_SYMBOL(__start___jump_table) = .; \ | ||
175 | *(__jump_table) \ | ||
176 | VMLINUX_SYMBOL(__stop___jump_table) = .; \ | ||
173 | . = ALIGN(8); \ | 177 | . = ALIGN(8); \ |
174 | VMLINUX_SYMBOL(__start___verbose) = .; \ | 178 | VMLINUX_SYMBOL(__start___verbose) = .; \ |
175 | *(__verbose) \ | 179 | *(__verbose) \ |
@@ -228,8 +232,6 @@ | |||
228 | \ | 232 | \ |
229 | BUG_TABLE \ | 233 | BUG_TABLE \ |
230 | \ | 234 | \ |
231 | JUMP_TABLE \ | ||
232 | \ | ||
233 | /* PCI quirks */ \ | 235 | /* PCI quirks */ \ |
234 | .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \ | 236 | .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \ |
235 | VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \ | 237 | VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \ |
@@ -274,70 +276,70 @@ | |||
274 | /* Kernel symbol table: Normal symbols */ \ | 276 | /* Kernel symbol table: Normal symbols */ \ |
275 | __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \ | 277 | __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \ |
276 | VMLINUX_SYMBOL(__start___ksymtab) = .; \ | 278 | VMLINUX_SYMBOL(__start___ksymtab) = .; \ |
277 | *(__ksymtab) \ | 279 | *(SORT(___ksymtab+*)) \ |
278 | VMLINUX_SYMBOL(__stop___ksymtab) = .; \ | 280 | VMLINUX_SYMBOL(__stop___ksymtab) = .; \ |
279 | } \ | 281 | } \ |
280 | \ | 282 | \ |
281 | /* Kernel symbol table: GPL-only symbols */ \ | 283 | /* Kernel symbol table: GPL-only symbols */ \ |
282 | __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \ | 284 | __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \ |
283 | VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \ | 285 | VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \ |
284 | *(__ksymtab_gpl) \ | 286 | *(SORT(___ksymtab_gpl+*)) \ |
285 | VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \ | 287 | VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \ |
286 | } \ | 288 | } \ |
287 | \ | 289 | \ |
288 | /* Kernel symbol table: Normal unused symbols */ \ | 290 | /* Kernel symbol table: Normal unused symbols */ \ |
289 | __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \ | 291 | __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \ |
290 | VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \ | 292 | VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \ |
291 | *(__ksymtab_unused) \ | 293 | *(SORT(___ksymtab_unused+*)) \ |
292 | VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \ | 294 | VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \ |
293 | } \ | 295 | } \ |
294 | \ | 296 | \ |
295 | /* Kernel symbol table: GPL-only unused symbols */ \ | 297 | /* Kernel symbol table: GPL-only unused symbols */ \ |
296 | __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \ | 298 | __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \ |
297 | VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \ | 299 | VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \ |
298 | *(__ksymtab_unused_gpl) \ | 300 | *(SORT(___ksymtab_unused_gpl+*)) \ |
299 | VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \ | 301 | VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \ |
300 | } \ | 302 | } \ |
301 | \ | 303 | \ |
302 | /* Kernel symbol table: GPL-future-only symbols */ \ | 304 | /* Kernel symbol table: GPL-future-only symbols */ \ |
303 | __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \ | 305 | __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \ |
304 | VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \ | 306 | VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \ |
305 | *(__ksymtab_gpl_future) \ | 307 | *(SORT(___ksymtab_gpl_future+*)) \ |
306 | VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \ | 308 | VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \ |
307 | } \ | 309 | } \ |
308 | \ | 310 | \ |
309 | /* Kernel symbol table: Normal symbols */ \ | 311 | /* Kernel symbol table: Normal symbols */ \ |
310 | __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \ | 312 | __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \ |
311 | VMLINUX_SYMBOL(__start___kcrctab) = .; \ | 313 | VMLINUX_SYMBOL(__start___kcrctab) = .; \ |
312 | *(__kcrctab) \ | 314 | *(SORT(___kcrctab+*)) \ |
313 | VMLINUX_SYMBOL(__stop___kcrctab) = .; \ | 315 | VMLINUX_SYMBOL(__stop___kcrctab) = .; \ |
314 | } \ | 316 | } \ |
315 | \ | 317 | \ |
316 | /* Kernel symbol table: GPL-only symbols */ \ | 318 | /* Kernel symbol table: GPL-only symbols */ \ |
317 | __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \ | 319 | __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \ |
318 | VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \ | 320 | VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \ |
319 | *(__kcrctab_gpl) \ | 321 | *(SORT(___kcrctab_gpl+*)) \ |
320 | VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \ | 322 | VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \ |
321 | } \ | 323 | } \ |
322 | \ | 324 | \ |
323 | /* Kernel symbol table: Normal unused symbols */ \ | 325 | /* Kernel symbol table: Normal unused symbols */ \ |
324 | __kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \ | 326 | __kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \ |
325 | VMLINUX_SYMBOL(__start___kcrctab_unused) = .; \ | 327 | VMLINUX_SYMBOL(__start___kcrctab_unused) = .; \ |
326 | *(__kcrctab_unused) \ | 328 | *(SORT(___kcrctab_unused+*)) \ |
327 | VMLINUX_SYMBOL(__stop___kcrctab_unused) = .; \ | 329 | VMLINUX_SYMBOL(__stop___kcrctab_unused) = .; \ |
328 | } \ | 330 | } \ |
329 | \ | 331 | \ |
330 | /* Kernel symbol table: GPL-only unused symbols */ \ | 332 | /* Kernel symbol table: GPL-only unused symbols */ \ |
331 | __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \ | 333 | __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \ |
332 | VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \ | 334 | VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \ |
333 | *(__kcrctab_unused_gpl) \ | 335 | *(SORT(___kcrctab_unused_gpl+*)) \ |
334 | VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \ | 336 | VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \ |
335 | } \ | 337 | } \ |
336 | \ | 338 | \ |
337 | /* Kernel symbol table: GPL-future-only symbols */ \ | 339 | /* Kernel symbol table: GPL-future-only symbols */ \ |
338 | __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \ | 340 | __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \ |
339 | VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \ | 341 | VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \ |
340 | *(__kcrctab_gpl_future) \ | 342 | *(SORT(___kcrctab_gpl_future+*)) \ |
341 | VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \ | 343 | VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \ |
342 | } \ | 344 | } \ |
343 | \ | 345 | \ |
@@ -589,14 +591,6 @@ | |||
589 | #define BUG_TABLE | 591 | #define BUG_TABLE |
590 | #endif | 592 | #endif |
591 | 593 | ||
592 | #define JUMP_TABLE \ | ||
593 | . = ALIGN(8); \ | ||
594 | __jump_table : AT(ADDR(__jump_table) - LOAD_OFFSET) { \ | ||
595 | VMLINUX_SYMBOL(__start___jump_table) = .; \ | ||
596 | *(__jump_table) \ | ||
597 | VMLINUX_SYMBOL(__stop___jump_table) = .; \ | ||
598 | } | ||
599 | |||
600 | #ifdef CONFIG_PM_TRACE | 594 | #ifdef CONFIG_PM_TRACE |
601 | #define TRACEDATA \ | 595 | #define TRACEDATA \ |
602 | . = ALIGN(4); \ | 596 | . = ALIGN(4); \ |
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h index ade09d7b4271..c99c3d3e7811 100644 --- a/include/drm/drm_fb_helper.h +++ b/include/drm/drm_fb_helper.h | |||
@@ -127,7 +127,7 @@ void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch, | |||
127 | 127 | ||
128 | int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info); | 128 | int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info); |
129 | 129 | ||
130 | bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper); | 130 | int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper); |
131 | bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel); | 131 | bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel); |
132 | int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper); | 132 | int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper); |
133 | int drm_fb_helper_debug_enter(struct fb_info *info); | 133 | int drm_fb_helper_debug_enter(struct fb_info *info); |
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h index b8613e806aa9..01eca1794e14 100644 --- a/include/linux/bootmem.h +++ b/include/linux/bootmem.h | |||
@@ -111,6 +111,8 @@ extern void *__alloc_bootmem_low_node(pg_data_t *pgdat, | |||
111 | __alloc_bootmem_nopanic(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) | 111 | __alloc_bootmem_nopanic(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) |
112 | #define alloc_bootmem_node(pgdat, x) \ | 112 | #define alloc_bootmem_node(pgdat, x) \ |
113 | __alloc_bootmem_node(pgdat, x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) | 113 | __alloc_bootmem_node(pgdat, x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) |
114 | #define alloc_bootmem_node_nopanic(pgdat, x) \ | ||
115 | __alloc_bootmem_node_nopanic(pgdat, x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) | ||
114 | #define alloc_bootmem_pages_node(pgdat, x) \ | 116 | #define alloc_bootmem_pages_node(pgdat, x) \ |
115 | __alloc_bootmem_node(pgdat, x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) | 117 | __alloc_bootmem_node(pgdat, x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) |
116 | #define alloc_bootmem_pages_node_nopanic(pgdat, x) \ | 118 | #define alloc_bootmem_pages_node_nopanic(pgdat, x) \ |
diff --git a/include/linux/bsearch.h b/include/linux/bsearch.h new file mode 100644 index 000000000000..90b1aa867224 --- /dev/null +++ b/include/linux/bsearch.h | |||
@@ -0,0 +1,9 @@ | |||
1 | #ifndef _LINUX_BSEARCH_H | ||
2 | #define _LINUX_BSEARCH_H | ||
3 | |||
4 | #include <linux/types.h> | ||
5 | |||
6 | void *bsearch(const void *key, const void *base, size_t num, size_t size, | ||
7 | int (*cmp)(const void *key, const void *elt)); | ||
8 | |||
9 | #endif /* _LINUX_BSEARCH_H */ | ||
diff --git a/include/linux/capability.h b/include/linux/capability.h index 16ee8b49a200..d4675af963fa 100644 --- a/include/linux/capability.h +++ b/include/linux/capability.h | |||
@@ -546,18 +546,7 @@ extern bool has_capability_noaudit(struct task_struct *t, int cap); | |||
546 | extern bool capable(int cap); | 546 | extern bool capable(int cap); |
547 | extern bool ns_capable(struct user_namespace *ns, int cap); | 547 | extern bool ns_capable(struct user_namespace *ns, int cap); |
548 | extern bool task_ns_capable(struct task_struct *t, int cap); | 548 | extern bool task_ns_capable(struct task_struct *t, int cap); |
549 | 549 | extern bool nsown_capable(int cap); | |
550 | /** | ||
551 | * nsown_capable - Check superior capability to one's own user_ns | ||
552 | * @cap: The capability in question | ||
553 | * | ||
554 | * Return true if the current task has the given superior capability | ||
555 | * targeted at its own user namespace. | ||
556 | */ | ||
557 | static inline bool nsown_capable(int cap) | ||
558 | { | ||
559 | return ns_capable(current_user_ns(), cap); | ||
560 | } | ||
561 | 550 | ||
562 | /* audit system wants to get cap info from files as well */ | 551 | /* audit system wants to get cap info from files as well */ |
563 | extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps); | 552 | extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps); |
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 9343dd3de858..11be48e0d168 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h | |||
@@ -3,7 +3,7 @@ | |||
3 | * | 3 | * |
4 | * Copyright (C) 2001 Russell King | 4 | * Copyright (C) 2001 Russell King |
5 | * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de> | 5 | * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de> |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU General Public License version 2 as | 8 | * it under the terms of the GNU General Public License version 2 as |
9 | * published by the Free Software Foundation. | 9 | * published by the Free Software Foundation. |
@@ -56,9 +56,9 @@ static inline int cpufreq_unregister_notifier(struct notifier_block *nb, | |||
56 | #define CPUFREQ_POLICY_POWERSAVE (1) | 56 | #define CPUFREQ_POLICY_POWERSAVE (1) |
57 | #define CPUFREQ_POLICY_PERFORMANCE (2) | 57 | #define CPUFREQ_POLICY_PERFORMANCE (2) |
58 | 58 | ||
59 | /* Frequency values here are CPU kHz so that hardware which doesn't run | 59 | /* Frequency values here are CPU kHz so that hardware which doesn't run |
60 | * with some frequencies can complain without having to guess what per | 60 | * with some frequencies can complain without having to guess what per |
61 | * cent / per mille means. | 61 | * cent / per mille means. |
62 | * Maximum transition latency is in nanoseconds - if it's unknown, | 62 | * Maximum transition latency is in nanoseconds - if it's unknown, |
63 | * CPUFREQ_ETERNAL shall be used. | 63 | * CPUFREQ_ETERNAL shall be used. |
64 | */ | 64 | */ |
@@ -72,13 +72,15 @@ extern struct kobject *cpufreq_global_kobject; | |||
72 | struct cpufreq_cpuinfo { | 72 | struct cpufreq_cpuinfo { |
73 | unsigned int max_freq; | 73 | unsigned int max_freq; |
74 | unsigned int min_freq; | 74 | unsigned int min_freq; |
75 | unsigned int transition_latency; /* in 10^(-9) s = nanoseconds */ | 75 | |
76 | /* in 10^(-9) s = nanoseconds */ | ||
77 | unsigned int transition_latency; | ||
76 | }; | 78 | }; |
77 | 79 | ||
78 | struct cpufreq_real_policy { | 80 | struct cpufreq_real_policy { |
79 | unsigned int min; /* in kHz */ | 81 | unsigned int min; /* in kHz */ |
80 | unsigned int max; /* in kHz */ | 82 | unsigned int max; /* in kHz */ |
81 | unsigned int policy; /* see above */ | 83 | unsigned int policy; /* see above */ |
82 | struct cpufreq_governor *governor; /* see below */ | 84 | struct cpufreq_governor *governor; /* see below */ |
83 | }; | 85 | }; |
84 | 86 | ||
@@ -94,7 +96,7 @@ struct cpufreq_policy { | |||
94 | unsigned int max; /* in kHz */ | 96 | unsigned int max; /* in kHz */ |
95 | unsigned int cur; /* in kHz, only needed if cpufreq | 97 | unsigned int cur; /* in kHz, only needed if cpufreq |
96 | * governors are used */ | 98 | * governors are used */ |
97 | unsigned int policy; /* see above */ | 99 | unsigned int policy; /* see above */ |
98 | struct cpufreq_governor *governor; /* see below */ | 100 | struct cpufreq_governor *governor; /* see below */ |
99 | 101 | ||
100 | struct work_struct update; /* if update_policy() needs to be | 102 | struct work_struct update; /* if update_policy() needs to be |
@@ -167,11 +169,11 @@ static inline unsigned long cpufreq_scale(unsigned long old, u_int div, u_int mu | |||
167 | 169 | ||
168 | struct cpufreq_governor { | 170 | struct cpufreq_governor { |
169 | char name[CPUFREQ_NAME_LEN]; | 171 | char name[CPUFREQ_NAME_LEN]; |
170 | int (*governor) (struct cpufreq_policy *policy, | 172 | int (*governor) (struct cpufreq_policy *policy, |
171 | unsigned int event); | 173 | unsigned int event); |
172 | ssize_t (*show_setspeed) (struct cpufreq_policy *policy, | 174 | ssize_t (*show_setspeed) (struct cpufreq_policy *policy, |
173 | char *buf); | 175 | char *buf); |
174 | int (*store_setspeed) (struct cpufreq_policy *policy, | 176 | int (*store_setspeed) (struct cpufreq_policy *policy, |
175 | unsigned int freq); | 177 | unsigned int freq); |
176 | unsigned int max_transition_latency; /* HW must be able to switch to | 178 | unsigned int max_transition_latency; /* HW must be able to switch to |
177 | next freq faster than this value in nano secs or we | 179 | next freq faster than this value in nano secs or we |
@@ -180,7 +182,8 @@ struct cpufreq_governor { | |||
180 | struct module *owner; | 182 | struct module *owner; |
181 | }; | 183 | }; |
182 | 184 | ||
183 | /* pass a target to the cpufreq driver | 185 | /* |
186 | * Pass a target to the cpufreq driver. | ||
184 | */ | 187 | */ |
185 | extern int cpufreq_driver_target(struct cpufreq_policy *policy, | 188 | extern int cpufreq_driver_target(struct cpufreq_policy *policy, |
186 | unsigned int target_freq, | 189 | unsigned int target_freq, |
@@ -237,9 +240,9 @@ struct cpufreq_driver { | |||
237 | 240 | ||
238 | /* flags */ | 241 | /* flags */ |
239 | 242 | ||
240 | #define CPUFREQ_STICKY 0x01 /* the driver isn't removed even if | 243 | #define CPUFREQ_STICKY 0x01 /* the driver isn't removed even if |
241 | * all ->init() calls failed */ | 244 | * all ->init() calls failed */ |
242 | #define CPUFREQ_CONST_LOOPS 0x02 /* loops_per_jiffy or other kernel | 245 | #define CPUFREQ_CONST_LOOPS 0x02 /* loops_per_jiffy or other kernel |
243 | * "constants" aren't affected by | 246 | * "constants" aren't affected by |
244 | * frequency transitions */ | 247 | * frequency transitions */ |
245 | #define CPUFREQ_PM_NO_WARN 0x04 /* don't warn on suspend/resume speed | 248 | #define CPUFREQ_PM_NO_WARN 0x04 /* don't warn on suspend/resume speed |
@@ -252,7 +255,7 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver_data); | |||
252 | void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state); | 255 | void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state); |
253 | 256 | ||
254 | 257 | ||
255 | static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy, unsigned int min, unsigned int max) | 258 | static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy, unsigned int min, unsigned int max) |
256 | { | 259 | { |
257 | if (policy->min < min) | 260 | if (policy->min < min) |
258 | policy->min = min; | 261 | policy->min = min; |
@@ -386,34 +389,15 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy, | |||
386 | /* the following 3 funtions are for cpufreq core use only */ | 389 | /* the following 3 funtions are for cpufreq core use only */ |
387 | struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu); | 390 | struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu); |
388 | struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu); | 391 | struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu); |
389 | void cpufreq_cpu_put (struct cpufreq_policy *data); | 392 | void cpufreq_cpu_put(struct cpufreq_policy *data); |
390 | 393 | ||
391 | /* the following are really really optional */ | 394 | /* the following are really really optional */ |
392 | extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs; | 395 | extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs; |
393 | 396 | ||
394 | void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table, | 397 | void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table, |
395 | unsigned int cpu); | 398 | unsigned int cpu); |
396 | 399 | ||
397 | void cpufreq_frequency_table_put_attr(unsigned int cpu); | 400 | void cpufreq_frequency_table_put_attr(unsigned int cpu); |
398 | 401 | ||
399 | 402 | ||
400 | /********************************************************************* | ||
401 | * UNIFIED DEBUG HELPERS * | ||
402 | *********************************************************************/ | ||
403 | |||
404 | #define CPUFREQ_DEBUG_CORE 1 | ||
405 | #define CPUFREQ_DEBUG_DRIVER 2 | ||
406 | #define CPUFREQ_DEBUG_GOVERNOR 4 | ||
407 | |||
408 | #ifdef CONFIG_CPU_FREQ_DEBUG | ||
409 | |||
410 | extern void cpufreq_debug_printk(unsigned int type, const char *prefix, | ||
411 | const char *fmt, ...); | ||
412 | |||
413 | #else | ||
414 | |||
415 | #define cpufreq_debug_printk(msg...) do { } while(0) | ||
416 | |||
417 | #endif /* CONFIG_CPU_FREQ_DEBUG */ | ||
418 | |||
419 | #endif /* _LINUX_CPUFREQ_H */ | 403 | #endif /* _LINUX_CPUFREQ_H */ |
diff --git a/include/linux/cred.h b/include/linux/cred.h index 9aeeb0ba2003..be16b61283cc 100644 --- a/include/linux/cred.h +++ b/include/linux/cred.h | |||
@@ -146,6 +146,7 @@ struct cred { | |||
146 | void *security; /* subjective LSM security */ | 146 | void *security; /* subjective LSM security */ |
147 | #endif | 147 | #endif |
148 | struct user_struct *user; /* real user ID subscription */ | 148 | struct user_struct *user; /* real user ID subscription */ |
149 | struct user_namespace *user_ns; /* cached user->user_ns */ | ||
149 | struct group_info *group_info; /* supplementary groups for euid/fsgid */ | 150 | struct group_info *group_info; /* supplementary groups for euid/fsgid */ |
150 | struct rcu_head rcu; /* RCU deletion hook */ | 151 | struct rcu_head rcu; /* RCU deletion hook */ |
151 | }; | 152 | }; |
@@ -354,10 +355,15 @@ static inline void put_cred(const struct cred *_cred) | |||
354 | #define current_fsgid() (current_cred_xxx(fsgid)) | 355 | #define current_fsgid() (current_cred_xxx(fsgid)) |
355 | #define current_cap() (current_cred_xxx(cap_effective)) | 356 | #define current_cap() (current_cred_xxx(cap_effective)) |
356 | #define current_user() (current_cred_xxx(user)) | 357 | #define current_user() (current_cred_xxx(user)) |
357 | #define _current_user_ns() (current_cred_xxx(user)->user_ns) | ||
358 | #define current_security() (current_cred_xxx(security)) | 358 | #define current_security() (current_cred_xxx(security)) |
359 | 359 | ||
360 | extern struct user_namespace *current_user_ns(void); | 360 | #ifdef CONFIG_USER_NS |
361 | #define current_user_ns() (current_cred_xxx(user_ns)) | ||
362 | #else | ||
363 | extern struct user_namespace init_user_ns; | ||
364 | #define current_user_ns() (&init_user_ns) | ||
365 | #endif | ||
366 | |||
361 | 367 | ||
362 | #define current_uid_gid(_uid, _gid) \ | 368 | #define current_uid_gid(_uid, _gid) \ |
363 | do { \ | 369 | do { \ |
diff --git a/include/linux/device.h b/include/linux/device.h index ab8dfc095709..0d7535000821 100644 --- a/include/linux/device.h +++ b/include/linux/device.h | |||
@@ -442,7 +442,6 @@ struct device { | |||
442 | struct dev_archdata archdata; | 442 | struct dev_archdata archdata; |
443 | 443 | ||
444 | struct device_node *of_node; /* associated device tree node */ | 444 | struct device_node *of_node; /* associated device tree node */ |
445 | const struct of_device_id *of_match; /* matching of_device_id from driver */ | ||
446 | 445 | ||
447 | dev_t devt; /* dev_t, creates the sysfs "dev" */ | 446 | dev_t devt; /* dev_t, creates the sysfs "dev" */ |
448 | 447 | ||
@@ -633,13 +632,6 @@ static inline int devtmpfs_mount(const char *mountpoint) { return 0; } | |||
633 | /* drivers/base/power/shutdown.c */ | 632 | /* drivers/base/power/shutdown.c */ |
634 | extern void device_shutdown(void); | 633 | extern void device_shutdown(void); |
635 | 634 | ||
636 | #ifndef CONFIG_ARCH_NO_SYSDEV_OPS | ||
637 | /* drivers/base/sys.c */ | ||
638 | extern void sysdev_shutdown(void); | ||
639 | #else | ||
640 | static inline void sysdev_shutdown(void) { } | ||
641 | #endif | ||
642 | |||
643 | /* debugging and troubleshooting/diagnostic helpers. */ | 635 | /* debugging and troubleshooting/diagnostic helpers. */ |
644 | extern const char *dev_driver_string(const struct device *dev); | 636 | extern const char *dev_driver_string(const struct device *dev); |
645 | 637 | ||
diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h index 0c9653f11c18..e747ecd48e1c 100644 --- a/include/linux/dynamic_debug.h +++ b/include/linux/dynamic_debug.h | |||
@@ -1,8 +1,6 @@ | |||
1 | #ifndef _DYNAMIC_DEBUG_H | 1 | #ifndef _DYNAMIC_DEBUG_H |
2 | #define _DYNAMIC_DEBUG_H | 2 | #define _DYNAMIC_DEBUG_H |
3 | 3 | ||
4 | #include <linux/jump_label.h> | ||
5 | |||
6 | /* dynamic_printk_enabled, and dynamic_printk_enabled2 are bitmasks in which | 4 | /* dynamic_printk_enabled, and dynamic_printk_enabled2 are bitmasks in which |
7 | * bit n is set to 1 if any modname hashes into the bucket n, 0 otherwise. They | 5 | * bit n is set to 1 if any modname hashes into the bucket n, 0 otherwise. They |
8 | * use independent hash functions, to reduce the chance of false positives. | 6 | * use independent hash functions, to reduce the chance of false positives. |
diff --git a/include/linux/fb.h b/include/linux/fb.h index df728c1c29ed..6a8274877171 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h | |||
@@ -832,6 +832,7 @@ struct fb_tile_ops { | |||
832 | #define FBINFO_CAN_FORCE_OUTPUT 0x200000 | 832 | #define FBINFO_CAN_FORCE_OUTPUT 0x200000 |
833 | 833 | ||
834 | struct fb_info { | 834 | struct fb_info { |
835 | atomic_t count; | ||
835 | int node; | 836 | int node; |
836 | int flags; | 837 | int flags; |
837 | struct mutex lock; /* Lock for open/release/ioctl funcs */ | 838 | struct mutex lock; /* Lock for open/release/ioctl funcs */ |
diff --git a/include/linux/fs.h b/include/linux/fs.h index dbd860af0804..cdf9495df204 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -358,7 +358,6 @@ struct inodes_stat_t { | |||
358 | #define FS_EXTENT_FL 0x00080000 /* Extents */ | 358 | #define FS_EXTENT_FL 0x00080000 /* Extents */ |
359 | #define FS_DIRECTIO_FL 0x00100000 /* Use direct i/o */ | 359 | #define FS_DIRECTIO_FL 0x00100000 /* Use direct i/o */ |
360 | #define FS_NOCOW_FL 0x00800000 /* Do not cow file */ | 360 | #define FS_NOCOW_FL 0x00800000 /* Do not cow file */ |
361 | #define FS_COW_FL 0x02000000 /* Cow file */ | ||
362 | #define FS_RESERVED_FL 0x80000000 /* reserved for ext2 lib */ | 361 | #define FS_RESERVED_FL 0x80000000 /* reserved for ext2 lib */ |
363 | 362 | ||
364 | #define FS_FL_USER_VISIBLE 0x0003DFFF /* User visible flags */ | 363 | #define FS_FL_USER_VISIBLE 0x0003DFFF /* User visible flags */ |
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index ca29e03c1fac..9d88e1cb5dbb 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h | |||
@@ -29,9 +29,22 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, | |||
29 | 29 | ||
30 | typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip); | 30 | typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip); |
31 | 31 | ||
32 | struct ftrace_hash; | ||
33 | |||
34 | enum { | ||
35 | FTRACE_OPS_FL_ENABLED = 1 << 0, | ||
36 | FTRACE_OPS_FL_GLOBAL = 1 << 1, | ||
37 | FTRACE_OPS_FL_DYNAMIC = 1 << 2, | ||
38 | }; | ||
39 | |||
32 | struct ftrace_ops { | 40 | struct ftrace_ops { |
33 | ftrace_func_t func; | 41 | ftrace_func_t func; |
34 | struct ftrace_ops *next; | 42 | struct ftrace_ops *next; |
43 | unsigned long flags; | ||
44 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
45 | struct ftrace_hash *notrace_hash; | ||
46 | struct ftrace_hash *filter_hash; | ||
47 | #endif | ||
35 | }; | 48 | }; |
36 | 49 | ||
37 | extern int function_trace_stop; | 50 | extern int function_trace_stop; |
@@ -146,14 +159,13 @@ extern void unregister_ftrace_function_probe_all(char *glob); | |||
146 | extern int ftrace_text_reserved(void *start, void *end); | 159 | extern int ftrace_text_reserved(void *start, void *end); |
147 | 160 | ||
148 | enum { | 161 | enum { |
149 | FTRACE_FL_FREE = (1 << 0), | 162 | FTRACE_FL_ENABLED = (1 << 30), |
150 | FTRACE_FL_FAILED = (1 << 1), | 163 | FTRACE_FL_FREE = (1 << 31), |
151 | FTRACE_FL_FILTER = (1 << 2), | ||
152 | FTRACE_FL_ENABLED = (1 << 3), | ||
153 | FTRACE_FL_NOTRACE = (1 << 4), | ||
154 | FTRACE_FL_CONVERTED = (1 << 5), | ||
155 | }; | 164 | }; |
156 | 165 | ||
166 | #define FTRACE_FL_MASK (0x3UL << 30) | ||
167 | #define FTRACE_REF_MAX ((1 << 30) - 1) | ||
168 | |||
157 | struct dyn_ftrace { | 169 | struct dyn_ftrace { |
158 | union { | 170 | union { |
159 | unsigned long ip; /* address of mcount call-site */ | 171 | unsigned long ip; /* address of mcount call-site */ |
@@ -167,7 +179,12 @@ struct dyn_ftrace { | |||
167 | }; | 179 | }; |
168 | 180 | ||
169 | int ftrace_force_update(void); | 181 | int ftrace_force_update(void); |
170 | void ftrace_set_filter(unsigned char *buf, int len, int reset); | 182 | void ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, |
183 | int len, int reset); | ||
184 | void ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, | ||
185 | int len, int reset); | ||
186 | void ftrace_set_global_filter(unsigned char *buf, int len, int reset); | ||
187 | void ftrace_set_global_notrace(unsigned char *buf, int len, int reset); | ||
171 | 188 | ||
172 | int register_ftrace_command(struct ftrace_func_command *cmd); | 189 | int register_ftrace_command(struct ftrace_func_command *cmd); |
173 | int unregister_ftrace_command(struct ftrace_func_command *cmd); | 190 | int unregister_ftrace_command(struct ftrace_func_command *cmd); |
diff --git a/include/linux/gfp.h b/include/linux/gfp.h index bfb8f934521e..56d8fc87fbbc 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h | |||
@@ -353,6 +353,8 @@ extern unsigned long get_zeroed_page(gfp_t gfp_mask); | |||
353 | 353 | ||
354 | void *alloc_pages_exact(size_t size, gfp_t gfp_mask); | 354 | void *alloc_pages_exact(size_t size, gfp_t gfp_mask); |
355 | void free_pages_exact(void *virt, size_t size); | 355 | void free_pages_exact(void *virt, size_t size); |
356 | /* This is different from alloc_pages_exact_node !!! */ | ||
357 | void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask); | ||
356 | 358 | ||
357 | #define __get_free_page(gfp_mask) \ | 359 | #define __get_free_page(gfp_mask) \ |
358 | __get_free_pages((gfp_mask), 0) | 360 | __get_free_pages((gfp_mask), 0) |
diff --git a/include/linux/init.h b/include/linux/init.h index 577671c55153..9146f39cdddf 100644 --- a/include/linux/init.h +++ b/include/linux/init.h | |||
@@ -79,29 +79,29 @@ | |||
79 | #define __exitused __used | 79 | #define __exitused __used |
80 | #endif | 80 | #endif |
81 | 81 | ||
82 | #define __exit __section(.exit.text) __exitused __cold | 82 | #define __exit __section(.exit.text) __exitused __cold notrace |
83 | 83 | ||
84 | /* Used for HOTPLUG */ | 84 | /* Used for HOTPLUG */ |
85 | #define __devinit __section(.devinit.text) __cold | 85 | #define __devinit __section(.devinit.text) __cold notrace |
86 | #define __devinitdata __section(.devinit.data) | 86 | #define __devinitdata __section(.devinit.data) |
87 | #define __devinitconst __section(.devinit.rodata) | 87 | #define __devinitconst __section(.devinit.rodata) |
88 | #define __devexit __section(.devexit.text) __exitused __cold | 88 | #define __devexit __section(.devexit.text) __exitused __cold notrace |
89 | #define __devexitdata __section(.devexit.data) | 89 | #define __devexitdata __section(.devexit.data) |
90 | #define __devexitconst __section(.devexit.rodata) | 90 | #define __devexitconst __section(.devexit.rodata) |
91 | 91 | ||
92 | /* Used for HOTPLUG_CPU */ | 92 | /* Used for HOTPLUG_CPU */ |
93 | #define __cpuinit __section(.cpuinit.text) __cold | 93 | #define __cpuinit __section(.cpuinit.text) __cold notrace |
94 | #define __cpuinitdata __section(.cpuinit.data) | 94 | #define __cpuinitdata __section(.cpuinit.data) |
95 | #define __cpuinitconst __section(.cpuinit.rodata) | 95 | #define __cpuinitconst __section(.cpuinit.rodata) |
96 | #define __cpuexit __section(.cpuexit.text) __exitused __cold | 96 | #define __cpuexit __section(.cpuexit.text) __exitused __cold notrace |
97 | #define __cpuexitdata __section(.cpuexit.data) | 97 | #define __cpuexitdata __section(.cpuexit.data) |
98 | #define __cpuexitconst __section(.cpuexit.rodata) | 98 | #define __cpuexitconst __section(.cpuexit.rodata) |
99 | 99 | ||
100 | /* Used for MEMORY_HOTPLUG */ | 100 | /* Used for MEMORY_HOTPLUG */ |
101 | #define __meminit __section(.meminit.text) __cold | 101 | #define __meminit __section(.meminit.text) __cold notrace |
102 | #define __meminitdata __section(.meminit.data) | 102 | #define __meminitdata __section(.meminit.data) |
103 | #define __meminitconst __section(.meminit.rodata) | 103 | #define __meminitconst __section(.meminit.rodata) |
104 | #define __memexit __section(.memexit.text) __exitused __cold | 104 | #define __memexit __section(.memexit.text) __exitused __cold notrace |
105 | #define __memexitdata __section(.memexit.data) | 105 | #define __memexitdata __section(.memexit.data) |
106 | #define __memexitconst __section(.memexit.rodata) | 106 | #define __memexitconst __section(.memexit.rodata) |
107 | 107 | ||
diff --git a/include/linux/irq.h b/include/linux/irq.h index 09a308072f56..8b4538446636 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h | |||
@@ -53,12 +53,13 @@ typedef void (*irq_preflow_handler_t)(struct irq_data *data); | |||
53 | * Bits which can be modified via irq_set/clear/modify_status_flags() | 53 | * Bits which can be modified via irq_set/clear/modify_status_flags() |
54 | * IRQ_LEVEL - Interrupt is level type. Will be also | 54 | * IRQ_LEVEL - Interrupt is level type. Will be also |
55 | * updated in the code when the above trigger | 55 | * updated in the code when the above trigger |
56 | * bits are modified via set_irq_type() | 56 | * bits are modified via irq_set_irq_type() |
57 | * IRQ_PER_CPU - Mark an interrupt PER_CPU. Will protect | 57 | * IRQ_PER_CPU - Mark an interrupt PER_CPU. Will protect |
58 | * it from affinity setting | 58 | * it from affinity setting |
59 | * IRQ_NOPROBE - Interrupt cannot be probed by autoprobing | 59 | * IRQ_NOPROBE - Interrupt cannot be probed by autoprobing |
60 | * IRQ_NOREQUEST - Interrupt cannot be requested via | 60 | * IRQ_NOREQUEST - Interrupt cannot be requested via |
61 | * request_irq() | 61 | * request_irq() |
62 | * IRQ_NOTHREAD - Interrupt cannot be threaded | ||
62 | * IRQ_NOAUTOEN - Interrupt is not automatically enabled in | 63 | * IRQ_NOAUTOEN - Interrupt is not automatically enabled in |
63 | * request/setup_irq() | 64 | * request/setup_irq() |
64 | * IRQ_NO_BALANCING - Interrupt cannot be balanced (affinity set) | 65 | * IRQ_NO_BALANCING - Interrupt cannot be balanced (affinity set) |
@@ -85,6 +86,7 @@ enum { | |||
85 | IRQ_NO_BALANCING = (1 << 13), | 86 | IRQ_NO_BALANCING = (1 << 13), |
86 | IRQ_MOVE_PCNTXT = (1 << 14), | 87 | IRQ_MOVE_PCNTXT = (1 << 14), |
87 | IRQ_NESTED_THREAD = (1 << 15), | 88 | IRQ_NESTED_THREAD = (1 << 15), |
89 | IRQ_NOTHREAD = (1 << 16), | ||
88 | }; | 90 | }; |
89 | 91 | ||
90 | #define IRQF_MODIFY_MASK \ | 92 | #define IRQF_MODIFY_MASK \ |
@@ -261,23 +263,6 @@ static inline void irqd_clr_chained_irq_inprogress(struct irq_data *d) | |||
261 | * struct irq_chip - hardware interrupt chip descriptor | 263 | * struct irq_chip - hardware interrupt chip descriptor |
262 | * | 264 | * |
263 | * @name: name for /proc/interrupts | 265 | * @name: name for /proc/interrupts |
264 | * @startup: deprecated, replaced by irq_startup | ||
265 | * @shutdown: deprecated, replaced by irq_shutdown | ||
266 | * @enable: deprecated, replaced by irq_enable | ||
267 | * @disable: deprecated, replaced by irq_disable | ||
268 | * @ack: deprecated, replaced by irq_ack | ||
269 | * @mask: deprecated, replaced by irq_mask | ||
270 | * @mask_ack: deprecated, replaced by irq_mask_ack | ||
271 | * @unmask: deprecated, replaced by irq_unmask | ||
272 | * @eoi: deprecated, replaced by irq_eoi | ||
273 | * @end: deprecated, will go away with __do_IRQ() | ||
274 | * @set_affinity: deprecated, replaced by irq_set_affinity | ||
275 | * @retrigger: deprecated, replaced by irq_retrigger | ||
276 | * @set_type: deprecated, replaced by irq_set_type | ||
277 | * @set_wake: deprecated, replaced by irq_wake | ||
278 | * @bus_lock: deprecated, replaced by irq_bus_lock | ||
279 | * @bus_sync_unlock: deprecated, replaced by irq_bus_sync_unlock | ||
280 | * | ||
281 | * @irq_startup: start up the interrupt (defaults to ->enable if NULL) | 266 | * @irq_startup: start up the interrupt (defaults to ->enable if NULL) |
282 | * @irq_shutdown: shut down the interrupt (defaults to ->disable if NULL) | 267 | * @irq_shutdown: shut down the interrupt (defaults to ->disable if NULL) |
283 | * @irq_enable: enable the interrupt (defaults to chip->unmask if NULL) | 268 | * @irq_enable: enable the interrupt (defaults to chip->unmask if NULL) |
@@ -295,6 +280,9 @@ static inline void irqd_clr_chained_irq_inprogress(struct irq_data *d) | |||
295 | * @irq_bus_sync_unlock:function to sync and unlock slow bus (i2c) chips | 280 | * @irq_bus_sync_unlock:function to sync and unlock slow bus (i2c) chips |
296 | * @irq_cpu_online: configure an interrupt source for a secondary CPU | 281 | * @irq_cpu_online: configure an interrupt source for a secondary CPU |
297 | * @irq_cpu_offline: un-configure an interrupt source for a secondary CPU | 282 | * @irq_cpu_offline: un-configure an interrupt source for a secondary CPU |
283 | * @irq_suspend: function called from core code on suspend once per chip | ||
284 | * @irq_resume: function called from core code on resume once per chip | ||
285 | * @irq_pm_shutdown: function called from core code on shutdown once per chip | ||
298 | * @irq_print_chip: optional to print special chip info in show_interrupts | 286 | * @irq_print_chip: optional to print special chip info in show_interrupts |
299 | * @flags: chip specific flags | 287 | * @flags: chip specific flags |
300 | * | 288 | * |
@@ -324,6 +312,10 @@ struct irq_chip { | |||
324 | void (*irq_cpu_online)(struct irq_data *data); | 312 | void (*irq_cpu_online)(struct irq_data *data); |
325 | void (*irq_cpu_offline)(struct irq_data *data); | 313 | void (*irq_cpu_offline)(struct irq_data *data); |
326 | 314 | ||
315 | void (*irq_suspend)(struct irq_data *data); | ||
316 | void (*irq_resume)(struct irq_data *data); | ||
317 | void (*irq_pm_shutdown)(struct irq_data *data); | ||
318 | |||
327 | void (*irq_print_chip)(struct irq_data *data, struct seq_file *p); | 319 | void (*irq_print_chip)(struct irq_data *data, struct seq_file *p); |
328 | 320 | ||
329 | unsigned long flags; | 321 | unsigned long flags; |
@@ -439,7 +431,7 @@ irq_set_handler(unsigned int irq, irq_flow_handler_t handle) | |||
439 | /* | 431 | /* |
440 | * Set a highlevel chained flow handler for a given IRQ. | 432 | * Set a highlevel chained flow handler for a given IRQ. |
441 | * (a chained handler is automatically enabled and set to | 433 | * (a chained handler is automatically enabled and set to |
442 | * IRQ_NOREQUEST and IRQ_NOPROBE) | 434 | * IRQ_NOREQUEST, IRQ_NOPROBE, and IRQ_NOTHREAD) |
443 | */ | 435 | */ |
444 | static inline void | 436 | static inline void |
445 | irq_set_chained_handler(unsigned int irq, irq_flow_handler_t handle) | 437 | irq_set_chained_handler(unsigned int irq, irq_flow_handler_t handle) |
@@ -469,6 +461,16 @@ static inline void irq_set_probe(unsigned int irq) | |||
469 | irq_modify_status(irq, IRQ_NOPROBE, 0); | 461 | irq_modify_status(irq, IRQ_NOPROBE, 0); |
470 | } | 462 | } |
471 | 463 | ||
464 | static inline void irq_set_nothread(unsigned int irq) | ||
465 | { | ||
466 | irq_modify_status(irq, 0, IRQ_NOTHREAD); | ||
467 | } | ||
468 | |||
469 | static inline void irq_set_thread(unsigned int irq) | ||
470 | { | ||
471 | irq_modify_status(irq, IRQ_NOTHREAD, 0); | ||
472 | } | ||
473 | |||
472 | static inline void irq_set_nested_thread(unsigned int irq, bool nest) | 474 | static inline void irq_set_nested_thread(unsigned int irq, bool nest) |
473 | { | 475 | { |
474 | if (nest) | 476 | if (nest) |
@@ -573,6 +575,145 @@ static inline int irq_reserve_irq(unsigned int irq) | |||
573 | return irq_reserve_irqs(irq, 1); | 575 | return irq_reserve_irqs(irq, 1); |
574 | } | 576 | } |
575 | 577 | ||
578 | #ifndef irq_reg_writel | ||
579 | # define irq_reg_writel(val, addr) writel(val, addr) | ||
580 | #endif | ||
581 | #ifndef irq_reg_readl | ||
582 | # define irq_reg_readl(addr) readl(addr) | ||
583 | #endif | ||
584 | |||
585 | /** | ||
586 | * struct irq_chip_regs - register offsets for struct irq_gci | ||
587 | * @enable: Enable register offset to reg_base | ||
588 | * @disable: Disable register offset to reg_base | ||
589 | * @mask: Mask register offset to reg_base | ||
590 | * @ack: Ack register offset to reg_base | ||
591 | * @eoi: Eoi register offset to reg_base | ||
592 | * @type: Type configuration register offset to reg_base | ||
593 | * @polarity: Polarity configuration register offset to reg_base | ||
594 | */ | ||
595 | struct irq_chip_regs { | ||
596 | unsigned long enable; | ||
597 | unsigned long disable; | ||
598 | unsigned long mask; | ||
599 | unsigned long ack; | ||
600 | unsigned long eoi; | ||
601 | unsigned long type; | ||
602 | unsigned long polarity; | ||
603 | }; | ||
604 | |||
605 | /** | ||
606 | * struct irq_chip_type - Generic interrupt chip instance for a flow type | ||
607 | * @chip: The real interrupt chip which provides the callbacks | ||
608 | * @regs: Register offsets for this chip | ||
609 | * @handler: Flow handler associated with this chip | ||
610 | * @type: Chip can handle these flow types | ||
611 | * | ||
612 | * A irq_generic_chip can have several instances of irq_chip_type when | ||
613 | * it requires different functions and register offsets for different | ||
614 | * flow types. | ||
615 | */ | ||
616 | struct irq_chip_type { | ||
617 | struct irq_chip chip; | ||
618 | struct irq_chip_regs regs; | ||
619 | irq_flow_handler_t handler; | ||
620 | u32 type; | ||
621 | }; | ||
622 | |||
623 | /** | ||
624 | * struct irq_chip_generic - Generic irq chip data structure | ||
625 | * @lock: Lock to protect register and cache data access | ||
626 | * @reg_base: Register base address (virtual) | ||
627 | * @irq_base: Interrupt base nr for this chip | ||
628 | * @irq_cnt: Number of interrupts handled by this chip | ||
629 | * @mask_cache: Cached mask register | ||
630 | * @type_cache: Cached type register | ||
631 | * @polarity_cache: Cached polarity register | ||
632 | * @wake_enabled: Interrupt can wakeup from suspend | ||
633 | * @wake_active: Interrupt is marked as an wakeup from suspend source | ||
634 | * @num_ct: Number of available irq_chip_type instances (usually 1) | ||
635 | * @private: Private data for non generic chip callbacks | ||
636 | * @list: List head for keeping track of instances | ||
637 | * @chip_types: Array of interrupt irq_chip_types | ||
638 | * | ||
639 | * Note, that irq_chip_generic can have multiple irq_chip_type | ||
640 | * implementations which can be associated to a particular irq line of | ||
641 | * an irq_chip_generic instance. That allows to share and protect | ||
642 | * state in an irq_chip_generic instance when we need to implement | ||
643 | * different flow mechanisms (level/edge) for it. | ||
644 | */ | ||
645 | struct irq_chip_generic { | ||
646 | raw_spinlock_t lock; | ||
647 | void __iomem *reg_base; | ||
648 | unsigned int irq_base; | ||
649 | unsigned int irq_cnt; | ||
650 | u32 mask_cache; | ||
651 | u32 type_cache; | ||
652 | u32 polarity_cache; | ||
653 | u32 wake_enabled; | ||
654 | u32 wake_active; | ||
655 | unsigned int num_ct; | ||
656 | void *private; | ||
657 | struct list_head list; | ||
658 | struct irq_chip_type chip_types[0]; | ||
659 | }; | ||
660 | |||
661 | /** | ||
662 | * enum irq_gc_flags - Initialization flags for generic irq chips | ||
663 | * @IRQ_GC_INIT_MASK_CACHE: Initialize the mask_cache by reading mask reg | ||
664 | * @IRQ_GC_INIT_NESTED_LOCK: Set the lock class of the irqs to nested for | ||
665 | * irq chips which need to call irq_set_wake() on | ||
666 | * the parent irq. Usually GPIO implementations | ||
667 | */ | ||
668 | enum irq_gc_flags { | ||
669 | IRQ_GC_INIT_MASK_CACHE = 1 << 0, | ||
670 | IRQ_GC_INIT_NESTED_LOCK = 1 << 1, | ||
671 | }; | ||
672 | |||
673 | /* Generic chip callback functions */ | ||
674 | void irq_gc_noop(struct irq_data *d); | ||
675 | void irq_gc_mask_disable_reg(struct irq_data *d); | ||
676 | void irq_gc_mask_set_bit(struct irq_data *d); | ||
677 | void irq_gc_mask_clr_bit(struct irq_data *d); | ||
678 | void irq_gc_unmask_enable_reg(struct irq_data *d); | ||
679 | void irq_gc_ack(struct irq_data *d); | ||
680 | void irq_gc_mask_disable_reg_and_ack(struct irq_data *d); | ||
681 | void irq_gc_eoi(struct irq_data *d); | ||
682 | int irq_gc_set_wake(struct irq_data *d, unsigned int on); | ||
683 | |||
684 | /* Setup functions for irq_chip_generic */ | ||
685 | struct irq_chip_generic * | ||
686 | irq_alloc_generic_chip(const char *name, int nr_ct, unsigned int irq_base, | ||
687 | void __iomem *reg_base, irq_flow_handler_t handler); | ||
688 | void irq_setup_generic_chip(struct irq_chip_generic *gc, u32 msk, | ||
689 | enum irq_gc_flags flags, unsigned int clr, | ||
690 | unsigned int set); | ||
691 | int irq_setup_alt_chip(struct irq_data *d, unsigned int type); | ||
692 | void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk, | ||
693 | unsigned int clr, unsigned int set); | ||
694 | |||
695 | static inline struct irq_chip_type *irq_data_get_chip_type(struct irq_data *d) | ||
696 | { | ||
697 | return container_of(d->chip, struct irq_chip_type, chip); | ||
698 | } | ||
699 | |||
700 | #define IRQ_MSK(n) (u32)((n) < 32 ? ((1 << (n)) - 1) : UINT_MAX) | ||
701 | |||
702 | #ifdef CONFIG_SMP | ||
703 | static inline void irq_gc_lock(struct irq_chip_generic *gc) | ||
704 | { | ||
705 | raw_spin_lock(&gc->lock); | ||
706 | } | ||
707 | |||
708 | static inline void irq_gc_unlock(struct irq_chip_generic *gc) | ||
709 | { | ||
710 | raw_spin_unlock(&gc->lock); | ||
711 | } | ||
712 | #else | ||
713 | static inline void irq_gc_lock(struct irq_chip_generic *gc) { } | ||
714 | static inline void irq_gc_unlock(struct irq_chip_generic *gc) { } | ||
715 | #endif | ||
716 | |||
576 | #endif /* CONFIG_GENERIC_HARDIRQS */ | 717 | #endif /* CONFIG_GENERIC_HARDIRQS */ |
577 | 718 | ||
578 | #endif /* !CONFIG_S390 */ | 719 | #endif /* !CONFIG_S390 */ |
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index a082905b5ebe..2d921b35212c 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h | |||
@@ -16,16 +16,18 @@ struct timer_rand_state; | |||
16 | * @irq_data: per irq and chip data passed down to chip functions | 16 | * @irq_data: per irq and chip data passed down to chip functions |
17 | * @timer_rand_state: pointer to timer rand state struct | 17 | * @timer_rand_state: pointer to timer rand state struct |
18 | * @kstat_irqs: irq stats per cpu | 18 | * @kstat_irqs: irq stats per cpu |
19 | * @handle_irq: highlevel irq-events handler [if NULL, __do_IRQ()] | 19 | * @handle_irq: highlevel irq-events handler |
20 | * @preflow_handler: handler called before the flow handler (currently used by sparc) | ||
20 | * @action: the irq action chain | 21 | * @action: the irq action chain |
21 | * @status: status information | 22 | * @status: status information |
22 | * @core_internal_state__do_not_mess_with_it: core internal status information | 23 | * @core_internal_state__do_not_mess_with_it: core internal status information |
23 | * @depth: disable-depth, for nested irq_disable() calls | 24 | * @depth: disable-depth, for nested irq_disable() calls |
24 | * @wake_depth: enable depth, for multiple set_irq_wake() callers | 25 | * @wake_depth: enable depth, for multiple irq_set_irq_wake() callers |
25 | * @irq_count: stats field to detect stalled irqs | 26 | * @irq_count: stats field to detect stalled irqs |
26 | * @last_unhandled: aging timer for unhandled count | 27 | * @last_unhandled: aging timer for unhandled count |
27 | * @irqs_unhandled: stats field for spurious unhandled interrupts | 28 | * @irqs_unhandled: stats field for spurious unhandled interrupts |
28 | * @lock: locking for SMP | 29 | * @lock: locking for SMP |
30 | * @affinity_hint: hint to user space for preferred irq affinity | ||
29 | * @affinity_notify: context for notification of affinity changes | 31 | * @affinity_notify: context for notification of affinity changes |
30 | * @pending_mask: pending rebalanced interrupts | 32 | * @pending_mask: pending rebalanced interrupts |
31 | * @threads_oneshot: bitfield to handle shared oneshot threads | 33 | * @threads_oneshot: bitfield to handle shared oneshot threads |
@@ -109,10 +111,7 @@ static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *de | |||
109 | desc->handle_irq(irq, desc); | 111 | desc->handle_irq(irq, desc); |
110 | } | 112 | } |
111 | 113 | ||
112 | static inline void generic_handle_irq(unsigned int irq) | 114 | int generic_handle_irq(unsigned int irq); |
113 | { | ||
114 | generic_handle_irq_desc(irq, irq_to_desc(irq)); | ||
115 | } | ||
116 | 115 | ||
117 | /* Test to see if a driver has successfully requested an irq */ | 116 | /* Test to see if a driver has successfully requested an irq */ |
118 | static inline int irq_has_action(unsigned int irq) | 117 | static inline int irq_has_action(unsigned int irq) |
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index 7880f18e4b86..83e745f3ead7 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h | |||
@@ -1,20 +1,43 @@ | |||
1 | #ifndef _LINUX_JUMP_LABEL_H | 1 | #ifndef _LINUX_JUMP_LABEL_H |
2 | #define _LINUX_JUMP_LABEL_H | 2 | #define _LINUX_JUMP_LABEL_H |
3 | 3 | ||
4 | #include <linux/types.h> | ||
5 | #include <linux/compiler.h> | ||
6 | |||
4 | #if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL) | 7 | #if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL) |
8 | |||
9 | struct jump_label_key { | ||
10 | atomic_t enabled; | ||
11 | struct jump_entry *entries; | ||
12 | #ifdef CONFIG_MODULES | ||
13 | struct jump_label_mod *next; | ||
14 | #endif | ||
15 | }; | ||
16 | |||
5 | # include <asm/jump_label.h> | 17 | # include <asm/jump_label.h> |
6 | # define HAVE_JUMP_LABEL | 18 | # define HAVE_JUMP_LABEL |
7 | #endif | 19 | #endif |
8 | 20 | ||
9 | enum jump_label_type { | 21 | enum jump_label_type { |
22 | JUMP_LABEL_DISABLE = 0, | ||
10 | JUMP_LABEL_ENABLE, | 23 | JUMP_LABEL_ENABLE, |
11 | JUMP_LABEL_DISABLE | ||
12 | }; | 24 | }; |
13 | 25 | ||
14 | struct module; | 26 | struct module; |
15 | 27 | ||
16 | #ifdef HAVE_JUMP_LABEL | 28 | #ifdef HAVE_JUMP_LABEL |
17 | 29 | ||
30 | #ifdef CONFIG_MODULES | ||
31 | #define JUMP_LABEL_INIT {{ 0 }, NULL, NULL} | ||
32 | #else | ||
33 | #define JUMP_LABEL_INIT {{ 0 }, NULL} | ||
34 | #endif | ||
35 | |||
36 | static __always_inline bool static_branch(struct jump_label_key *key) | ||
37 | { | ||
38 | return arch_static_branch(key); | ||
39 | } | ||
40 | |||
18 | extern struct jump_entry __start___jump_table[]; | 41 | extern struct jump_entry __start___jump_table[]; |
19 | extern struct jump_entry __stop___jump_table[]; | 42 | extern struct jump_entry __stop___jump_table[]; |
20 | 43 | ||
@@ -23,37 +46,37 @@ extern void jump_label_unlock(void); | |||
23 | extern void arch_jump_label_transform(struct jump_entry *entry, | 46 | extern void arch_jump_label_transform(struct jump_entry *entry, |
24 | enum jump_label_type type); | 47 | enum jump_label_type type); |
25 | extern void arch_jump_label_text_poke_early(jump_label_t addr); | 48 | extern void arch_jump_label_text_poke_early(jump_label_t addr); |
26 | extern void jump_label_update(unsigned long key, enum jump_label_type type); | ||
27 | extern void jump_label_apply_nops(struct module *mod); | ||
28 | extern int jump_label_text_reserved(void *start, void *end); | 49 | extern int jump_label_text_reserved(void *start, void *end); |
50 | extern void jump_label_inc(struct jump_label_key *key); | ||
51 | extern void jump_label_dec(struct jump_label_key *key); | ||
52 | extern bool jump_label_enabled(struct jump_label_key *key); | ||
53 | extern void jump_label_apply_nops(struct module *mod); | ||
29 | 54 | ||
30 | #define jump_label_enable(key) \ | 55 | #else |
31 | jump_label_update((unsigned long)key, JUMP_LABEL_ENABLE); | ||
32 | 56 | ||
33 | #define jump_label_disable(key) \ | 57 | #include <asm/atomic.h> |
34 | jump_label_update((unsigned long)key, JUMP_LABEL_DISABLE); | ||
35 | 58 | ||
36 | #else | 59 | #define JUMP_LABEL_INIT {ATOMIC_INIT(0)} |
37 | 60 | ||
38 | #define JUMP_LABEL(key, label) \ | 61 | struct jump_label_key { |
39 | do { \ | 62 | atomic_t enabled; |
40 | if (unlikely(*key)) \ | 63 | }; |
41 | goto label; \ | ||
42 | } while (0) | ||
43 | 64 | ||
44 | #define jump_label_enable(cond_var) \ | 65 | static __always_inline bool static_branch(struct jump_label_key *key) |
45 | do { \ | 66 | { |
46 | *(cond_var) = 1; \ | 67 | if (unlikely(atomic_read(&key->enabled))) |
47 | } while (0) | 68 | return true; |
69 | return false; | ||
70 | } | ||
48 | 71 | ||
49 | #define jump_label_disable(cond_var) \ | 72 | static inline void jump_label_inc(struct jump_label_key *key) |
50 | do { \ | 73 | { |
51 | *(cond_var) = 0; \ | 74 | atomic_inc(&key->enabled); |
52 | } while (0) | 75 | } |
53 | 76 | ||
54 | static inline int jump_label_apply_nops(struct module *mod) | 77 | static inline void jump_label_dec(struct jump_label_key *key) |
55 | { | 78 | { |
56 | return 0; | 79 | atomic_dec(&key->enabled); |
57 | } | 80 | } |
58 | 81 | ||
59 | static inline int jump_label_text_reserved(void *start, void *end) | 82 | static inline int jump_label_text_reserved(void *start, void *end) |
@@ -64,16 +87,16 @@ static inline int jump_label_text_reserved(void *start, void *end) | |||
64 | static inline void jump_label_lock(void) {} | 87 | static inline void jump_label_lock(void) {} |
65 | static inline void jump_label_unlock(void) {} | 88 | static inline void jump_label_unlock(void) {} |
66 | 89 | ||
67 | #endif | 90 | static inline bool jump_label_enabled(struct jump_label_key *key) |
91 | { | ||
92 | return !!atomic_read(&key->enabled); | ||
93 | } | ||
68 | 94 | ||
69 | #define COND_STMT(key, stmt) \ | 95 | static inline int jump_label_apply_nops(struct module *mod) |
70 | do { \ | 96 | { |
71 | __label__ jl_enabled; \ | 97 | return 0; |
72 | JUMP_LABEL(key, jl_enabled); \ | 98 | } |
73 | if (0) { \ | 99 | |
74 | jl_enabled: \ | 100 | #endif |
75 | stmt; \ | ||
76 | } \ | ||
77 | } while (0) | ||
78 | 101 | ||
79 | #endif | 102 | #endif |
diff --git a/include/linux/jump_label_ref.h b/include/linux/jump_label_ref.h deleted file mode 100644 index e5d012ad92c6..000000000000 --- a/include/linux/jump_label_ref.h +++ /dev/null | |||
@@ -1,44 +0,0 @@ | |||
1 | #ifndef _LINUX_JUMP_LABEL_REF_H | ||
2 | #define _LINUX_JUMP_LABEL_REF_H | ||
3 | |||
4 | #include <linux/jump_label.h> | ||
5 | #include <asm/atomic.h> | ||
6 | |||
7 | #ifdef HAVE_JUMP_LABEL | ||
8 | |||
9 | static inline void jump_label_inc(atomic_t *key) | ||
10 | { | ||
11 | if (atomic_add_return(1, key) == 1) | ||
12 | jump_label_enable(key); | ||
13 | } | ||
14 | |||
15 | static inline void jump_label_dec(atomic_t *key) | ||
16 | { | ||
17 | if (atomic_dec_and_test(key)) | ||
18 | jump_label_disable(key); | ||
19 | } | ||
20 | |||
21 | #else /* !HAVE_JUMP_LABEL */ | ||
22 | |||
23 | static inline void jump_label_inc(atomic_t *key) | ||
24 | { | ||
25 | atomic_inc(key); | ||
26 | } | ||
27 | |||
28 | static inline void jump_label_dec(atomic_t *key) | ||
29 | { | ||
30 | atomic_dec(key); | ||
31 | } | ||
32 | |||
33 | #undef JUMP_LABEL | ||
34 | #define JUMP_LABEL(key, label) \ | ||
35 | do { \ | ||
36 | if (unlikely(__builtin_choose_expr( \ | ||
37 | __builtin_types_compatible_p(typeof(key), atomic_t *), \ | ||
38 | atomic_read((atomic_t *)(key)), *(key)))) \ | ||
39 | goto label; \ | ||
40 | } while (0) | ||
41 | |||
42 | #endif /* HAVE_JUMP_LABEL */ | ||
43 | |||
44 | #endif /* _LINUX_JUMP_LABEL_REF_H */ | ||
diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 00cec4dc0ae2..f37ba716ef8b 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h | |||
@@ -283,6 +283,7 @@ extern char *get_options(const char *str, int nints, int *ints); | |||
283 | extern unsigned long long memparse(const char *ptr, char **retptr); | 283 | extern unsigned long long memparse(const char *ptr, char **retptr); |
284 | 284 | ||
285 | extern int core_kernel_text(unsigned long addr); | 285 | extern int core_kernel_text(unsigned long addr); |
286 | extern int core_kernel_data(unsigned long addr); | ||
286 | extern int __kernel_text_address(unsigned long addr); | 287 | extern int __kernel_text_address(unsigned long addr); |
287 | extern int kernel_text_address(unsigned long addr); | 288 | extern int kernel_text_address(unsigned long addr); |
288 | extern int func_ptr_is_kernel_text(void *ptr); | 289 | extern int func_ptr_is_kernel_text(void *ptr); |
diff --git a/include/linux/kmod.h b/include/linux/kmod.h index 6efd7a78de6a..310231823852 100644 --- a/include/linux/kmod.h +++ b/include/linux/kmod.h | |||
@@ -113,5 +113,6 @@ extern void usermodehelper_init(void); | |||
113 | 113 | ||
114 | extern int usermodehelper_disable(void); | 114 | extern int usermodehelper_disable(void); |
115 | extern void usermodehelper_enable(void); | 115 | extern void usermodehelper_enable(void); |
116 | extern bool usermodehelper_is_disabled(void); | ||
116 | 117 | ||
117 | #endif /* __LINUX_KMOD_H__ */ | 118 | #endif /* __LINUX_KMOD_H__ */ |
diff --git a/include/linux/list.h b/include/linux/list.h index 3a54266a1e85..cc6d2aa6b415 100644 --- a/include/linux/list.h +++ b/include/linux/list.h | |||
@@ -4,7 +4,7 @@ | |||
4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
5 | #include <linux/stddef.h> | 5 | #include <linux/stddef.h> |
6 | #include <linux/poison.h> | 6 | #include <linux/poison.h> |
7 | #include <linux/prefetch.h> | 7 | #include <linux/const.h> |
8 | 8 | ||
9 | /* | 9 | /* |
10 | * Simple doubly linked list implementation. | 10 | * Simple doubly linked list implementation. |
@@ -367,18 +367,15 @@ static inline void list_splice_tail_init(struct list_head *list, | |||
367 | * @head: the head for your list. | 367 | * @head: the head for your list. |
368 | */ | 368 | */ |
369 | #define list_for_each(pos, head) \ | 369 | #define list_for_each(pos, head) \ |
370 | for (pos = (head)->next; prefetch(pos->next), pos != (head); \ | 370 | for (pos = (head)->next; pos != (head); pos = pos->next) |
371 | pos = pos->next) | ||
372 | 371 | ||
373 | /** | 372 | /** |
374 | * __list_for_each - iterate over a list | 373 | * __list_for_each - iterate over a list |
375 | * @pos: the &struct list_head to use as a loop cursor. | 374 | * @pos: the &struct list_head to use as a loop cursor. |
376 | * @head: the head for your list. | 375 | * @head: the head for your list. |
377 | * | 376 | * |
378 | * This variant differs from list_for_each() in that it's the | 377 | * This variant doesn't differ from list_for_each() any more. |
379 | * simplest possible list iteration code, no prefetching is done. | 378 | * We don't do prefetching in either case. |
380 | * Use this for code that knows the list to be very short (empty | ||
381 | * or 1 entry) most of the time. | ||
382 | */ | 379 | */ |
383 | #define __list_for_each(pos, head) \ | 380 | #define __list_for_each(pos, head) \ |
384 | for (pos = (head)->next; pos != (head); pos = pos->next) | 381 | for (pos = (head)->next; pos != (head); pos = pos->next) |
@@ -389,8 +386,7 @@ static inline void list_splice_tail_init(struct list_head *list, | |||
389 | * @head: the head for your list. | 386 | * @head: the head for your list. |
390 | */ | 387 | */ |
391 | #define list_for_each_prev(pos, head) \ | 388 | #define list_for_each_prev(pos, head) \ |
392 | for (pos = (head)->prev; prefetch(pos->prev), pos != (head); \ | 389 | for (pos = (head)->prev; pos != (head); pos = pos->prev) |
393 | pos = pos->prev) | ||
394 | 390 | ||
395 | /** | 391 | /** |
396 | * list_for_each_safe - iterate over a list safe against removal of list entry | 392 | * list_for_each_safe - iterate over a list safe against removal of list entry |
@@ -410,7 +406,7 @@ static inline void list_splice_tail_init(struct list_head *list, | |||
410 | */ | 406 | */ |
411 | #define list_for_each_prev_safe(pos, n, head) \ | 407 | #define list_for_each_prev_safe(pos, n, head) \ |
412 | for (pos = (head)->prev, n = pos->prev; \ | 408 | for (pos = (head)->prev, n = pos->prev; \ |
413 | prefetch(pos->prev), pos != (head); \ | 409 | pos != (head); \ |
414 | pos = n, n = pos->prev) | 410 | pos = n, n = pos->prev) |
415 | 411 | ||
416 | /** | 412 | /** |
@@ -421,7 +417,7 @@ static inline void list_splice_tail_init(struct list_head *list, | |||
421 | */ | 417 | */ |
422 | #define list_for_each_entry(pos, head, member) \ | 418 | #define list_for_each_entry(pos, head, member) \ |
423 | for (pos = list_entry((head)->next, typeof(*pos), member); \ | 419 | for (pos = list_entry((head)->next, typeof(*pos), member); \ |
424 | prefetch(pos->member.next), &pos->member != (head); \ | 420 | &pos->member != (head); \ |
425 | pos = list_entry(pos->member.next, typeof(*pos), member)) | 421 | pos = list_entry(pos->member.next, typeof(*pos), member)) |
426 | 422 | ||
427 | /** | 423 | /** |
@@ -432,7 +428,7 @@ static inline void list_splice_tail_init(struct list_head *list, | |||
432 | */ | 428 | */ |
433 | #define list_for_each_entry_reverse(pos, head, member) \ | 429 | #define list_for_each_entry_reverse(pos, head, member) \ |
434 | for (pos = list_entry((head)->prev, typeof(*pos), member); \ | 430 | for (pos = list_entry((head)->prev, typeof(*pos), member); \ |
435 | prefetch(pos->member.prev), &pos->member != (head); \ | 431 | &pos->member != (head); \ |
436 | pos = list_entry(pos->member.prev, typeof(*pos), member)) | 432 | pos = list_entry(pos->member.prev, typeof(*pos), member)) |
437 | 433 | ||
438 | /** | 434 | /** |
@@ -457,7 +453,7 @@ static inline void list_splice_tail_init(struct list_head *list, | |||
457 | */ | 453 | */ |
458 | #define list_for_each_entry_continue(pos, head, member) \ | 454 | #define list_for_each_entry_continue(pos, head, member) \ |
459 | for (pos = list_entry(pos->member.next, typeof(*pos), member); \ | 455 | for (pos = list_entry(pos->member.next, typeof(*pos), member); \ |
460 | prefetch(pos->member.next), &pos->member != (head); \ | 456 | &pos->member != (head); \ |
461 | pos = list_entry(pos->member.next, typeof(*pos), member)) | 457 | pos = list_entry(pos->member.next, typeof(*pos), member)) |
462 | 458 | ||
463 | /** | 459 | /** |
@@ -471,7 +467,7 @@ static inline void list_splice_tail_init(struct list_head *list, | |||
471 | */ | 467 | */ |
472 | #define list_for_each_entry_continue_reverse(pos, head, member) \ | 468 | #define list_for_each_entry_continue_reverse(pos, head, member) \ |
473 | for (pos = list_entry(pos->member.prev, typeof(*pos), member); \ | 469 | for (pos = list_entry(pos->member.prev, typeof(*pos), member); \ |
474 | prefetch(pos->member.prev), &pos->member != (head); \ | 470 | &pos->member != (head); \ |
475 | pos = list_entry(pos->member.prev, typeof(*pos), member)) | 471 | pos = list_entry(pos->member.prev, typeof(*pos), member)) |
476 | 472 | ||
477 | /** | 473 | /** |
@@ -483,7 +479,7 @@ static inline void list_splice_tail_init(struct list_head *list, | |||
483 | * Iterate over list of given type, continuing from current position. | 479 | * Iterate over list of given type, continuing from current position. |
484 | */ | 480 | */ |
485 | #define list_for_each_entry_from(pos, head, member) \ | 481 | #define list_for_each_entry_from(pos, head, member) \ |
486 | for (; prefetch(pos->member.next), &pos->member != (head); \ | 482 | for (; &pos->member != (head); \ |
487 | pos = list_entry(pos->member.next, typeof(*pos), member)) | 483 | pos = list_entry(pos->member.next, typeof(*pos), member)) |
488 | 484 | ||
489 | /** | 485 | /** |
@@ -664,8 +660,7 @@ static inline void hlist_move_list(struct hlist_head *old, | |||
664 | #define hlist_entry(ptr, type, member) container_of(ptr,type,member) | 660 | #define hlist_entry(ptr, type, member) container_of(ptr,type,member) |
665 | 661 | ||
666 | #define hlist_for_each(pos, head) \ | 662 | #define hlist_for_each(pos, head) \ |
667 | for (pos = (head)->first; pos && ({ prefetch(pos->next); 1; }); \ | 663 | for (pos = (head)->first; pos ; pos = pos->next) |
668 | pos = pos->next) | ||
669 | 664 | ||
670 | #define hlist_for_each_safe(pos, n, head) \ | 665 | #define hlist_for_each_safe(pos, n, head) \ |
671 | for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \ | 666 | for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \ |
@@ -680,7 +675,7 @@ static inline void hlist_move_list(struct hlist_head *old, | |||
680 | */ | 675 | */ |
681 | #define hlist_for_each_entry(tpos, pos, head, member) \ | 676 | #define hlist_for_each_entry(tpos, pos, head, member) \ |
682 | for (pos = (head)->first; \ | 677 | for (pos = (head)->first; \ |
683 | pos && ({ prefetch(pos->next); 1;}) && \ | 678 | pos && \ |
684 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ | 679 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ |
685 | pos = pos->next) | 680 | pos = pos->next) |
686 | 681 | ||
@@ -692,7 +687,7 @@ static inline void hlist_move_list(struct hlist_head *old, | |||
692 | */ | 687 | */ |
693 | #define hlist_for_each_entry_continue(tpos, pos, member) \ | 688 | #define hlist_for_each_entry_continue(tpos, pos, member) \ |
694 | for (pos = (pos)->next; \ | 689 | for (pos = (pos)->next; \ |
695 | pos && ({ prefetch(pos->next); 1;}) && \ | 690 | pos && \ |
696 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ | 691 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ |
697 | pos = pos->next) | 692 | pos = pos->next) |
698 | 693 | ||
@@ -703,7 +698,7 @@ static inline void hlist_move_list(struct hlist_head *old, | |||
703 | * @member: the name of the hlist_node within the struct. | 698 | * @member: the name of the hlist_node within the struct. |
704 | */ | 699 | */ |
705 | #define hlist_for_each_entry_from(tpos, pos, member) \ | 700 | #define hlist_for_each_entry_from(tpos, pos, member) \ |
706 | for (; pos && ({ prefetch(pos->next); 1;}) && \ | 701 | for (; pos && \ |
707 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ | 702 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ |
708 | pos = pos->next) | 703 | pos = pos->next) |
709 | 704 | ||
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index eb792cb6d745..bcb793ec7374 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h | |||
@@ -183,6 +183,7 @@ struct mmc_host { | |||
183 | struct work_struct clk_gate_work; /* delayed clock gate */ | 183 | struct work_struct clk_gate_work; /* delayed clock gate */ |
184 | unsigned int clk_old; /* old clock value cache */ | 184 | unsigned int clk_old; /* old clock value cache */ |
185 | spinlock_t clk_lock; /* lock for clk fields */ | 185 | spinlock_t clk_lock; /* lock for clk fields */ |
186 | struct mutex clk_gate_mutex; /* mutex for clock gating */ | ||
186 | #endif | 187 | #endif |
187 | 188 | ||
188 | /* host specific block data */ | 189 | /* host specific block data */ |
diff --git a/include/linux/module.h b/include/linux/module.h index 5de42043dff0..d9ca2d5dc6d0 100644 --- a/include/linux/module.h +++ b/include/linux/module.h | |||
@@ -64,6 +64,9 @@ struct module_version_attribute { | |||
64 | const char *version; | 64 | const char *version; |
65 | } __attribute__ ((__aligned__(sizeof(void *)))); | 65 | } __attribute__ ((__aligned__(sizeof(void *)))); |
66 | 66 | ||
67 | extern ssize_t __modver_version_show(struct module_attribute *, | ||
68 | struct module *, char *); | ||
69 | |||
67 | struct module_kobject | 70 | struct module_kobject |
68 | { | 71 | { |
69 | struct kobject kobj; | 72 | struct kobject kobj; |
@@ -172,12 +175,7 @@ extern struct module __this_module; | |||
172 | #define MODULE_VERSION(_version) MODULE_INFO(version, _version) | 175 | #define MODULE_VERSION(_version) MODULE_INFO(version, _version) |
173 | #else | 176 | #else |
174 | #define MODULE_VERSION(_version) \ | 177 | #define MODULE_VERSION(_version) \ |
175 | extern ssize_t __modver_version_show(struct module_attribute *, \ | 178 | static struct module_version_attribute ___modver_attr = { \ |
176 | struct module *, char *); \ | ||
177 | static struct module_version_attribute __modver_version_attr \ | ||
178 | __used \ | ||
179 | __attribute__ ((__section__ ("__modver"),aligned(sizeof(void *)))) \ | ||
180 | = { \ | ||
181 | .mattr = { \ | 179 | .mattr = { \ |
182 | .attr = { \ | 180 | .attr = { \ |
183 | .name = "version", \ | 181 | .name = "version", \ |
@@ -187,7 +185,10 @@ extern struct module __this_module; | |||
187 | }, \ | 185 | }, \ |
188 | .module_name = KBUILD_MODNAME, \ | 186 | .module_name = KBUILD_MODNAME, \ |
189 | .version = _version, \ | 187 | .version = _version, \ |
190 | } | 188 | }; \ |
189 | static const struct module_version_attribute \ | ||
190 | __used __attribute__ ((__section__ ("__modver"))) \ | ||
191 | * __moduleparam_const __modver_attr = &___modver_attr | ||
191 | #endif | 192 | #endif |
192 | 193 | ||
193 | /* Optional firmware file (or files) needed by the module | 194 | /* Optional firmware file (or files) needed by the module |
@@ -223,7 +224,7 @@ struct module_use { | |||
223 | extern void *__crc_##sym __attribute__((weak)); \ | 224 | extern void *__crc_##sym __attribute__((weak)); \ |
224 | static const unsigned long __kcrctab_##sym \ | 225 | static const unsigned long __kcrctab_##sym \ |
225 | __used \ | 226 | __used \ |
226 | __attribute__((section("__kcrctab" sec), unused)) \ | 227 | __attribute__((section("___kcrctab" sec "+" #sym), unused)) \ |
227 | = (unsigned long) &__crc_##sym; | 228 | = (unsigned long) &__crc_##sym; |
228 | #else | 229 | #else |
229 | #define __CRC_SYMBOL(sym, sec) | 230 | #define __CRC_SYMBOL(sym, sec) |
@@ -238,7 +239,7 @@ struct module_use { | |||
238 | = MODULE_SYMBOL_PREFIX #sym; \ | 239 | = MODULE_SYMBOL_PREFIX #sym; \ |
239 | static const struct kernel_symbol __ksymtab_##sym \ | 240 | static const struct kernel_symbol __ksymtab_##sym \ |
240 | __used \ | 241 | __used \ |
241 | __attribute__((section("__ksymtab" sec), unused)) \ | 242 | __attribute__((section("___ksymtab" sec "+" #sym), unused)) \ |
242 | = { (unsigned long)&sym, __kstrtab_##sym } | 243 | = { (unsigned long)&sym, __kstrtab_##sym } |
243 | 244 | ||
244 | #define EXPORT_SYMBOL(sym) \ | 245 | #define EXPORT_SYMBOL(sym) \ |
@@ -367,34 +368,35 @@ struct module | |||
367 | struct module_notes_attrs *notes_attrs; | 368 | struct module_notes_attrs *notes_attrs; |
368 | #endif | 369 | #endif |
369 | 370 | ||
371 | /* The command line arguments (may be mangled). People like | ||
372 | keeping pointers to this stuff */ | ||
373 | char *args; | ||
374 | |||
370 | #ifdef CONFIG_SMP | 375 | #ifdef CONFIG_SMP |
371 | /* Per-cpu data. */ | 376 | /* Per-cpu data. */ |
372 | void __percpu *percpu; | 377 | void __percpu *percpu; |
373 | unsigned int percpu_size; | 378 | unsigned int percpu_size; |
374 | #endif | 379 | #endif |
375 | 380 | ||
376 | /* The command line arguments (may be mangled). People like | ||
377 | keeping pointers to this stuff */ | ||
378 | char *args; | ||
379 | #ifdef CONFIG_TRACEPOINTS | 381 | #ifdef CONFIG_TRACEPOINTS |
380 | struct tracepoint * const *tracepoints_ptrs; | ||
381 | unsigned int num_tracepoints; | 382 | unsigned int num_tracepoints; |
383 | struct tracepoint * const *tracepoints_ptrs; | ||
382 | #endif | 384 | #endif |
383 | #ifdef HAVE_JUMP_LABEL | 385 | #ifdef HAVE_JUMP_LABEL |
384 | struct jump_entry *jump_entries; | 386 | struct jump_entry *jump_entries; |
385 | unsigned int num_jump_entries; | 387 | unsigned int num_jump_entries; |
386 | #endif | 388 | #endif |
387 | #ifdef CONFIG_TRACING | 389 | #ifdef CONFIG_TRACING |
388 | const char **trace_bprintk_fmt_start; | ||
389 | unsigned int num_trace_bprintk_fmt; | 390 | unsigned int num_trace_bprintk_fmt; |
391 | const char **trace_bprintk_fmt_start; | ||
390 | #endif | 392 | #endif |
391 | #ifdef CONFIG_EVENT_TRACING | 393 | #ifdef CONFIG_EVENT_TRACING |
392 | struct ftrace_event_call **trace_events; | 394 | struct ftrace_event_call **trace_events; |
393 | unsigned int num_trace_events; | 395 | unsigned int num_trace_events; |
394 | #endif | 396 | #endif |
395 | #ifdef CONFIG_FTRACE_MCOUNT_RECORD | 397 | #ifdef CONFIG_FTRACE_MCOUNT_RECORD |
396 | unsigned long *ftrace_callsites; | ||
397 | unsigned int num_ftrace_callsites; | 398 | unsigned int num_ftrace_callsites; |
399 | unsigned long *ftrace_callsites; | ||
398 | #endif | 400 | #endif |
399 | 401 | ||
400 | #ifdef CONFIG_MODULE_UNLOAD | 402 | #ifdef CONFIG_MODULE_UNLOAD |
@@ -475,8 +477,9 @@ const struct kernel_symbol *find_symbol(const char *name, | |||
475 | bool warn); | 477 | bool warn); |
476 | 478 | ||
477 | /* Walk the exported symbol table */ | 479 | /* Walk the exported symbol table */ |
478 | bool each_symbol(bool (*fn)(const struct symsearch *arr, struct module *owner, | 480 | bool each_symbol_section(bool (*fn)(const struct symsearch *arr, |
479 | unsigned int symnum, void *data), void *data); | 481 | struct module *owner, |
482 | void *data), void *data); | ||
480 | 483 | ||
481 | /* Returns 0 and fills in value, defined and namebuf, or -ERANGE if | 484 | /* Returns 0 and fills in value, defined and namebuf, or -ERANGE if |
482 | symnum out of range. */ | 485 | symnum out of range. */ |
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h index 07b41951e3fa..ddaae98c53f9 100644 --- a/include/linux/moduleparam.h +++ b/include/linux/moduleparam.h | |||
@@ -67,9 +67,9 @@ struct kparam_string { | |||
67 | struct kparam_array | 67 | struct kparam_array |
68 | { | 68 | { |
69 | unsigned int max; | 69 | unsigned int max; |
70 | unsigned int elemsize; | ||
70 | unsigned int *num; | 71 | unsigned int *num; |
71 | const struct kernel_param_ops *ops; | 72 | const struct kernel_param_ops *ops; |
72 | unsigned int elemsize; | ||
73 | void *elem; | 73 | void *elem; |
74 | }; | 74 | }; |
75 | 75 | ||
@@ -371,8 +371,9 @@ extern int param_get_invbool(char *buffer, const struct kernel_param *kp); | |||
371 | */ | 371 | */ |
372 | #define module_param_array_named(name, array, type, nump, perm) \ | 372 | #define module_param_array_named(name, array, type, nump, perm) \ |
373 | static const struct kparam_array __param_arr_##name \ | 373 | static const struct kparam_array __param_arr_##name \ |
374 | = { ARRAY_SIZE(array), nump, ¶m_ops_##type, \ | 374 | = { .max = ARRAY_SIZE(array), .num = nump, \ |
375 | sizeof(array[0]), array }; \ | 375 | .ops = ¶m_ops_##type, \ |
376 | .elemsize = sizeof(array[0]), .elem = array }; \ | ||
376 | __module_param_call(MODULE_PARAM_PREFIX, name, \ | 377 | __module_param_call(MODULE_PARAM_PREFIX, name, \ |
377 | ¶m_array_ops, \ | 378 | ¶m_array_ops, \ |
378 | .arr = &__param_arr_##name, \ | 379 | .arr = &__param_arr_##name, \ |
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 890dce242639..7e371f7df9c4 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h | |||
@@ -233,6 +233,7 @@ struct nfs4_layoutget { | |||
233 | struct nfs4_layoutget_args args; | 233 | struct nfs4_layoutget_args args; |
234 | struct nfs4_layoutget_res res; | 234 | struct nfs4_layoutget_res res; |
235 | struct pnfs_layout_segment **lsegpp; | 235 | struct pnfs_layout_segment **lsegpp; |
236 | gfp_t gfp_flags; | ||
236 | }; | 237 | }; |
237 | 238 | ||
238 | struct nfs4_getdeviceinfo_args { | 239 | struct nfs4_getdeviceinfo_args { |
diff --git a/include/linux/of_device.h b/include/linux/of_device.h index 8bfe6c1d4365..ae5638480ef2 100644 --- a/include/linux/of_device.h +++ b/include/linux/of_device.h | |||
@@ -21,8 +21,7 @@ extern void of_device_make_bus_id(struct device *dev); | |||
21 | static inline int of_driver_match_device(struct device *dev, | 21 | static inline int of_driver_match_device(struct device *dev, |
22 | const struct device_driver *drv) | 22 | const struct device_driver *drv) |
23 | { | 23 | { |
24 | dev->of_match = of_match_device(drv->of_match_table, dev); | 24 | return of_match_device(drv->of_match_table, dev) != NULL; |
25 | return dev->of_match != NULL; | ||
26 | } | 25 | } |
27 | 26 | ||
28 | extern struct platform_device *of_dev_get(struct platform_device *dev); | 27 | extern struct platform_device *of_dev_get(struct platform_device *dev); |
@@ -58,6 +57,11 @@ static inline int of_device_uevent(struct device *dev, | |||
58 | 57 | ||
59 | static inline void of_device_node_put(struct device *dev) { } | 58 | static inline void of_device_node_put(struct device *dev) { } |
60 | 59 | ||
60 | static inline const struct of_device_id *of_match_device( | ||
61 | const struct of_device_id *matches, const struct device *dev) | ||
62 | { | ||
63 | return NULL; | ||
64 | } | ||
61 | #endif /* CONFIG_OF_DEVICE */ | 65 | #endif /* CONFIG_OF_DEVICE */ |
62 | 66 | ||
63 | #endif /* _LINUX_OF_DEVICE_H */ | 67 | #endif /* _LINUX_OF_DEVICE_H */ |
diff --git a/include/linux/pci-ats.h b/include/linux/pci-ats.h new file mode 100644 index 000000000000..655824fa4c76 --- /dev/null +++ b/include/linux/pci-ats.h | |||
@@ -0,0 +1,52 @@ | |||
1 | #ifndef LINUX_PCI_ATS_H | ||
2 | #define LINUX_PCI_ATS_H | ||
3 | |||
4 | /* Address Translation Service */ | ||
5 | struct pci_ats { | ||
6 | int pos; /* capability position */ | ||
7 | int stu; /* Smallest Translation Unit */ | ||
8 | int qdep; /* Invalidate Queue Depth */ | ||
9 | int ref_cnt; /* Physical Function reference count */ | ||
10 | unsigned int is_enabled:1; /* Enable bit is set */ | ||
11 | }; | ||
12 | |||
13 | #ifdef CONFIG_PCI_IOV | ||
14 | |||
15 | extern int pci_enable_ats(struct pci_dev *dev, int ps); | ||
16 | extern void pci_disable_ats(struct pci_dev *dev); | ||
17 | extern int pci_ats_queue_depth(struct pci_dev *dev); | ||
18 | /** | ||
19 | * pci_ats_enabled - query the ATS status | ||
20 | * @dev: the PCI device | ||
21 | * | ||
22 | * Returns 1 if ATS capability is enabled, or 0 if not. | ||
23 | */ | ||
24 | static inline int pci_ats_enabled(struct pci_dev *dev) | ||
25 | { | ||
26 | return dev->ats && dev->ats->is_enabled; | ||
27 | } | ||
28 | |||
29 | #else /* CONFIG_PCI_IOV */ | ||
30 | |||
31 | static inline int pci_enable_ats(struct pci_dev *dev, int ps) | ||
32 | { | ||
33 | return -ENODEV; | ||
34 | } | ||
35 | |||
36 | static inline void pci_disable_ats(struct pci_dev *dev) | ||
37 | { | ||
38 | } | ||
39 | |||
40 | static inline int pci_ats_queue_depth(struct pci_dev *dev) | ||
41 | { | ||
42 | return -ENODEV; | ||
43 | } | ||
44 | |||
45 | static inline int pci_ats_enabled(struct pci_dev *dev) | ||
46 | { | ||
47 | return 0; | ||
48 | } | ||
49 | |||
50 | #endif /* CONFIG_PCI_IOV */ | ||
51 | |||
52 | #endif /* LINUX_PCI_ATS_H*/ | ||
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index ee9f1e782800..3412684ce5d5 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h | |||
@@ -2,8 +2,8 @@ | |||
2 | * Performance events: | 2 | * Performance events: |
3 | * | 3 | * |
4 | * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de> | 4 | * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de> |
5 | * Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar | 5 | * Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar |
6 | * Copyright (C) 2008-2009, Red Hat, Inc., Peter Zijlstra | 6 | * Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra |
7 | * | 7 | * |
8 | * Data type definitions, declarations, prototypes. | 8 | * Data type definitions, declarations, prototypes. |
9 | * | 9 | * |
@@ -52,6 +52,8 @@ enum perf_hw_id { | |||
52 | PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4, | 52 | PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4, |
53 | PERF_COUNT_HW_BRANCH_MISSES = 5, | 53 | PERF_COUNT_HW_BRANCH_MISSES = 5, |
54 | PERF_COUNT_HW_BUS_CYCLES = 6, | 54 | PERF_COUNT_HW_BUS_CYCLES = 6, |
55 | PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7, | ||
56 | PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8, | ||
55 | 57 | ||
56 | PERF_COUNT_HW_MAX, /* non-ABI */ | 58 | PERF_COUNT_HW_MAX, /* non-ABI */ |
57 | }; | 59 | }; |
@@ -468,9 +470,9 @@ enum perf_callchain_context { | |||
468 | PERF_CONTEXT_MAX = (__u64)-4095, | 470 | PERF_CONTEXT_MAX = (__u64)-4095, |
469 | }; | 471 | }; |
470 | 472 | ||
471 | #define PERF_FLAG_FD_NO_GROUP (1U << 0) | 473 | #define PERF_FLAG_FD_NO_GROUP (1U << 0) |
472 | #define PERF_FLAG_FD_OUTPUT (1U << 1) | 474 | #define PERF_FLAG_FD_OUTPUT (1U << 1) |
473 | #define PERF_FLAG_PID_CGROUP (1U << 2) /* pid=cgroup id, per-cpu mode only */ | 475 | #define PERF_FLAG_PID_CGROUP (1U << 2) /* pid=cgroup id, per-cpu mode only */ |
474 | 476 | ||
475 | #ifdef __KERNEL__ | 477 | #ifdef __KERNEL__ |
476 | /* | 478 | /* |
@@ -484,9 +486,9 @@ enum perf_callchain_context { | |||
484 | #endif | 486 | #endif |
485 | 487 | ||
486 | struct perf_guest_info_callbacks { | 488 | struct perf_guest_info_callbacks { |
487 | int (*is_in_guest) (void); | 489 | int (*is_in_guest)(void); |
488 | int (*is_user_mode) (void); | 490 | int (*is_user_mode)(void); |
489 | unsigned long (*get_guest_ip) (void); | 491 | unsigned long (*get_guest_ip)(void); |
490 | }; | 492 | }; |
491 | 493 | ||
492 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | 494 | #ifdef CONFIG_HAVE_HW_BREAKPOINT |
@@ -505,7 +507,7 @@ struct perf_guest_info_callbacks { | |||
505 | #include <linux/ftrace.h> | 507 | #include <linux/ftrace.h> |
506 | #include <linux/cpu.h> | 508 | #include <linux/cpu.h> |
507 | #include <linux/irq_work.h> | 509 | #include <linux/irq_work.h> |
508 | #include <linux/jump_label_ref.h> | 510 | #include <linux/jump_label.h> |
509 | #include <asm/atomic.h> | 511 | #include <asm/atomic.h> |
510 | #include <asm/local.h> | 512 | #include <asm/local.h> |
511 | 513 | ||
@@ -652,19 +654,19 @@ struct pmu { | |||
652 | * Start the transaction, after this ->add() doesn't need to | 654 | * Start the transaction, after this ->add() doesn't need to |
653 | * do schedulability tests. | 655 | * do schedulability tests. |
654 | */ | 656 | */ |
655 | void (*start_txn) (struct pmu *pmu); /* optional */ | 657 | void (*start_txn) (struct pmu *pmu); /* optional */ |
656 | /* | 658 | /* |
657 | * If ->start_txn() disabled the ->add() schedulability test | 659 | * If ->start_txn() disabled the ->add() schedulability test |
658 | * then ->commit_txn() is required to perform one. On success | 660 | * then ->commit_txn() is required to perform one. On success |
659 | * the transaction is closed. On error the transaction is kept | 661 | * the transaction is closed. On error the transaction is kept |
660 | * open until ->cancel_txn() is called. | 662 | * open until ->cancel_txn() is called. |
661 | */ | 663 | */ |
662 | int (*commit_txn) (struct pmu *pmu); /* optional */ | 664 | int (*commit_txn) (struct pmu *pmu); /* optional */ |
663 | /* | 665 | /* |
664 | * Will cancel the transaction, assumes ->del() is called | 666 | * Will cancel the transaction, assumes ->del() is called |
665 | * for each successful ->add() during the transaction. | 667 | * for each successful ->add() during the transaction. |
666 | */ | 668 | */ |
667 | void (*cancel_txn) (struct pmu *pmu); /* optional */ | 669 | void (*cancel_txn) (struct pmu *pmu); /* optional */ |
668 | }; | 670 | }; |
669 | 671 | ||
670 | /** | 672 | /** |
@@ -712,15 +714,15 @@ typedef void (*perf_overflow_handler_t)(struct perf_event *, int, | |||
712 | struct pt_regs *regs); | 714 | struct pt_regs *regs); |
713 | 715 | ||
714 | enum perf_group_flag { | 716 | enum perf_group_flag { |
715 | PERF_GROUP_SOFTWARE = 0x1, | 717 | PERF_GROUP_SOFTWARE = 0x1, |
716 | }; | 718 | }; |
717 | 719 | ||
718 | #define SWEVENT_HLIST_BITS 8 | 720 | #define SWEVENT_HLIST_BITS 8 |
719 | #define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS) | 721 | #define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS) |
720 | 722 | ||
721 | struct swevent_hlist { | 723 | struct swevent_hlist { |
722 | struct hlist_head heads[SWEVENT_HLIST_SIZE]; | 724 | struct hlist_head heads[SWEVENT_HLIST_SIZE]; |
723 | struct rcu_head rcu_head; | 725 | struct rcu_head rcu_head; |
724 | }; | 726 | }; |
725 | 727 | ||
726 | #define PERF_ATTACH_CONTEXT 0x01 | 728 | #define PERF_ATTACH_CONTEXT 0x01 |
@@ -733,13 +735,13 @@ struct swevent_hlist { | |||
733 | * This is a per-cpu dynamically allocated data structure. | 735 | * This is a per-cpu dynamically allocated data structure. |
734 | */ | 736 | */ |
735 | struct perf_cgroup_info { | 737 | struct perf_cgroup_info { |
736 | u64 time; | 738 | u64 time; |
737 | u64 timestamp; | 739 | u64 timestamp; |
738 | }; | 740 | }; |
739 | 741 | ||
740 | struct perf_cgroup { | 742 | struct perf_cgroup { |
741 | struct cgroup_subsys_state css; | 743 | struct cgroup_subsys_state css; |
742 | struct perf_cgroup_info *info; /* timing info, one per cpu */ | 744 | struct perf_cgroup_info *info; /* timing info, one per cpu */ |
743 | }; | 745 | }; |
744 | #endif | 746 | #endif |
745 | 747 | ||
@@ -923,7 +925,7 @@ struct perf_event_context { | |||
923 | 925 | ||
924 | /* | 926 | /* |
925 | * Number of contexts where an event can trigger: | 927 | * Number of contexts where an event can trigger: |
926 | * task, softirq, hardirq, nmi. | 928 | * task, softirq, hardirq, nmi. |
927 | */ | 929 | */ |
928 | #define PERF_NR_CONTEXTS 4 | 930 | #define PERF_NR_CONTEXTS 4 |
929 | 931 | ||
@@ -1001,8 +1003,7 @@ struct perf_sample_data { | |||
1001 | struct perf_raw_record *raw; | 1003 | struct perf_raw_record *raw; |
1002 | }; | 1004 | }; |
1003 | 1005 | ||
1004 | static inline | 1006 | static inline void perf_sample_data_init(struct perf_sample_data *data, u64 addr) |
1005 | void perf_sample_data_init(struct perf_sample_data *data, u64 addr) | ||
1006 | { | 1007 | { |
1007 | data->addr = addr; | 1008 | data->addr = addr; |
1008 | data->raw = NULL; | 1009 | data->raw = NULL; |
@@ -1034,13 +1035,12 @@ static inline int is_software_event(struct perf_event *event) | |||
1034 | return event->pmu->task_ctx_nr == perf_sw_context; | 1035 | return event->pmu->task_ctx_nr == perf_sw_context; |
1035 | } | 1036 | } |
1036 | 1037 | ||
1037 | extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; | 1038 | extern struct jump_label_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; |
1038 | 1039 | ||
1039 | extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64); | 1040 | extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64); |
1040 | 1041 | ||
1041 | #ifndef perf_arch_fetch_caller_regs | 1042 | #ifndef perf_arch_fetch_caller_regs |
1042 | static inline void | 1043 | static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { } |
1043 | perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { } | ||
1044 | #endif | 1044 | #endif |
1045 | 1045 | ||
1046 | /* | 1046 | /* |
@@ -1063,26 +1063,24 @@ perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr) | |||
1063 | { | 1063 | { |
1064 | struct pt_regs hot_regs; | 1064 | struct pt_regs hot_regs; |
1065 | 1065 | ||
1066 | JUMP_LABEL(&perf_swevent_enabled[event_id], have_event); | 1066 | if (static_branch(&perf_swevent_enabled[event_id])) { |
1067 | return; | 1067 | if (!regs) { |
1068 | 1068 | perf_fetch_caller_regs(&hot_regs); | |
1069 | have_event: | 1069 | regs = &hot_regs; |
1070 | if (!regs) { | 1070 | } |
1071 | perf_fetch_caller_regs(&hot_regs); | 1071 | __perf_sw_event(event_id, nr, nmi, regs, addr); |
1072 | regs = &hot_regs; | ||
1073 | } | 1072 | } |
1074 | __perf_sw_event(event_id, nr, nmi, regs, addr); | ||
1075 | } | 1073 | } |
1076 | 1074 | ||
1077 | extern atomic_t perf_sched_events; | 1075 | extern struct jump_label_key perf_sched_events; |
1078 | 1076 | ||
1079 | static inline void perf_event_task_sched_in(struct task_struct *task) | 1077 | static inline void perf_event_task_sched_in(struct task_struct *task) |
1080 | { | 1078 | { |
1081 | COND_STMT(&perf_sched_events, __perf_event_task_sched_in(task)); | 1079 | if (static_branch(&perf_sched_events)) |
1080 | __perf_event_task_sched_in(task); | ||
1082 | } | 1081 | } |
1083 | 1082 | ||
1084 | static inline | 1083 | static inline void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next) |
1085 | void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next) | ||
1086 | { | 1084 | { |
1087 | perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0); | 1085 | perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0); |
1088 | 1086 | ||
@@ -1100,14 +1098,10 @@ extern void perf_event_fork(struct task_struct *tsk); | |||
1100 | /* Callchains */ | 1098 | /* Callchains */ |
1101 | DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry); | 1099 | DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry); |
1102 | 1100 | ||
1103 | extern void perf_callchain_user(struct perf_callchain_entry *entry, | 1101 | extern void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs); |
1104 | struct pt_regs *regs); | 1102 | extern void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs); |
1105 | extern void perf_callchain_kernel(struct perf_callchain_entry *entry, | ||
1106 | struct pt_regs *regs); | ||
1107 | |||
1108 | 1103 | ||
1109 | static inline void | 1104 | static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64 ip) |
1110 | perf_callchain_store(struct perf_callchain_entry *entry, u64 ip) | ||
1111 | { | 1105 | { |
1112 | if (entry->nr < PERF_MAX_STACK_DEPTH) | 1106 | if (entry->nr < PERF_MAX_STACK_DEPTH) |
1113 | entry->ip[entry->nr++] = ip; | 1107 | entry->ip[entry->nr++] = ip; |
@@ -1143,9 +1137,9 @@ extern void perf_tp_event(u64 addr, u64 count, void *record, | |||
1143 | extern void perf_bp_event(struct perf_event *event, void *data); | 1137 | extern void perf_bp_event(struct perf_event *event, void *data); |
1144 | 1138 | ||
1145 | #ifndef perf_misc_flags | 1139 | #ifndef perf_misc_flags |
1146 | #define perf_misc_flags(regs) (user_mode(regs) ? PERF_RECORD_MISC_USER : \ | 1140 | # define perf_misc_flags(regs) \ |
1147 | PERF_RECORD_MISC_KERNEL) | 1141 | (user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL) |
1148 | #define perf_instruction_pointer(regs) instruction_pointer(regs) | 1142 | # define perf_instruction_pointer(regs) instruction_pointer(regs) |
1149 | #endif | 1143 | #endif |
1150 | 1144 | ||
1151 | extern int perf_output_begin(struct perf_output_handle *handle, | 1145 | extern int perf_output_begin(struct perf_output_handle *handle, |
@@ -1180,9 +1174,9 @@ static inline void | |||
1180 | perf_bp_event(struct perf_event *event, void *data) { } | 1174 | perf_bp_event(struct perf_event *event, void *data) { } |
1181 | 1175 | ||
1182 | static inline int perf_register_guest_info_callbacks | 1176 | static inline int perf_register_guest_info_callbacks |
1183 | (struct perf_guest_info_callbacks *callbacks) { return 0; } | 1177 | (struct perf_guest_info_callbacks *callbacks) { return 0; } |
1184 | static inline int perf_unregister_guest_info_callbacks | 1178 | static inline int perf_unregister_guest_info_callbacks |
1185 | (struct perf_guest_info_callbacks *callbacks) { return 0; } | 1179 | (struct perf_guest_info_callbacks *callbacks) { return 0; } |
1186 | 1180 | ||
1187 | static inline void perf_event_mmap(struct vm_area_struct *vma) { } | 1181 | static inline void perf_event_mmap(struct vm_area_struct *vma) { } |
1188 | static inline void perf_event_comm(struct task_struct *tsk) { } | 1182 | static inline void perf_event_comm(struct task_struct *tsk) { } |
@@ -1195,23 +1189,22 @@ static inline void perf_event_disable(struct perf_event *event) { } | |||
1195 | static inline void perf_event_task_tick(void) { } | 1189 | static inline void perf_event_task_tick(void) { } |
1196 | #endif | 1190 | #endif |
1197 | 1191 | ||
1198 | #define perf_output_put(handle, x) \ | 1192 | #define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x)) |
1199 | perf_output_copy((handle), &(x), sizeof(x)) | ||
1200 | 1193 | ||
1201 | /* | 1194 | /* |
1202 | * This has to have a higher priority than migration_notifier in sched.c. | 1195 | * This has to have a higher priority than migration_notifier in sched.c. |
1203 | */ | 1196 | */ |
1204 | #define perf_cpu_notifier(fn) \ | 1197 | #define perf_cpu_notifier(fn) \ |
1205 | do { \ | 1198 | do { \ |
1206 | static struct notifier_block fn##_nb __cpuinitdata = \ | 1199 | static struct notifier_block fn##_nb __cpuinitdata = \ |
1207 | { .notifier_call = fn, .priority = CPU_PRI_PERF }; \ | 1200 | { .notifier_call = fn, .priority = CPU_PRI_PERF }; \ |
1208 | fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \ | 1201 | fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \ |
1209 | (void *)(unsigned long)smp_processor_id()); \ | 1202 | (void *)(unsigned long)smp_processor_id()); \ |
1210 | fn(&fn##_nb, (unsigned long)CPU_STARTING, \ | 1203 | fn(&fn##_nb, (unsigned long)CPU_STARTING, \ |
1211 | (void *)(unsigned long)smp_processor_id()); \ | 1204 | (void *)(unsigned long)smp_processor_id()); \ |
1212 | fn(&fn##_nb, (unsigned long)CPU_ONLINE, \ | 1205 | fn(&fn##_nb, (unsigned long)CPU_ONLINE, \ |
1213 | (void *)(unsigned long)smp_processor_id()); \ | 1206 | (void *)(unsigned long)smp_processor_id()); \ |
1214 | register_cpu_notifier(&fn##_nb); \ | 1207 | register_cpu_notifier(&fn##_nb); \ |
1215 | } while (0) | 1208 | } while (0) |
1216 | 1209 | ||
1217 | #endif /* __KERNEL__ */ | 1210 | #endif /* __KERNEL__ */ |
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h index 744942c95fec..ede1a80e3358 100644 --- a/include/linux/platform_device.h +++ b/include/linux/platform_device.h | |||
@@ -150,9 +150,6 @@ extern struct platform_device *platform_create_bundle(struct platform_driver *dr | |||
150 | struct resource *res, unsigned int n_res, | 150 | struct resource *res, unsigned int n_res, |
151 | const void *data, size_t size); | 151 | const void *data, size_t size); |
152 | 152 | ||
153 | extern const struct dev_pm_ops * platform_bus_get_pm_ops(void); | ||
154 | extern void platform_bus_set_pm_ops(const struct dev_pm_ops *pm); | ||
155 | |||
156 | /* early platform driver interface */ | 153 | /* early platform driver interface */ |
157 | struct early_platform_driver { | 154 | struct early_platform_driver { |
158 | const char *class_str; | 155 | const char *class_str; |
@@ -205,4 +202,64 @@ static inline char *early_platform_driver_setup_func(void) \ | |||
205 | } | 202 | } |
206 | #endif /* MODULE */ | 203 | #endif /* MODULE */ |
207 | 204 | ||
205 | #ifdef CONFIG_PM_SLEEP | ||
206 | extern int platform_pm_prepare(struct device *dev); | ||
207 | extern void platform_pm_complete(struct device *dev); | ||
208 | #else | ||
209 | #define platform_pm_prepare NULL | ||
210 | #define platform_pm_complete NULL | ||
211 | #endif | ||
212 | |||
213 | #ifdef CONFIG_SUSPEND | ||
214 | extern int platform_pm_suspend(struct device *dev); | ||
215 | extern int platform_pm_suspend_noirq(struct device *dev); | ||
216 | extern int platform_pm_resume(struct device *dev); | ||
217 | extern int platform_pm_resume_noirq(struct device *dev); | ||
218 | #else | ||
219 | #define platform_pm_suspend NULL | ||
220 | #define platform_pm_resume NULL | ||
221 | #define platform_pm_suspend_noirq NULL | ||
222 | #define platform_pm_resume_noirq NULL | ||
223 | #endif | ||
224 | |||
225 | #ifdef CONFIG_HIBERNATE_CALLBACKS | ||
226 | extern int platform_pm_freeze(struct device *dev); | ||
227 | extern int platform_pm_freeze_noirq(struct device *dev); | ||
228 | extern int platform_pm_thaw(struct device *dev); | ||
229 | extern int platform_pm_thaw_noirq(struct device *dev); | ||
230 | extern int platform_pm_poweroff(struct device *dev); | ||
231 | extern int platform_pm_poweroff_noirq(struct device *dev); | ||
232 | extern int platform_pm_restore(struct device *dev); | ||
233 | extern int platform_pm_restore_noirq(struct device *dev); | ||
234 | #else | ||
235 | #define platform_pm_freeze NULL | ||
236 | #define platform_pm_thaw NULL | ||
237 | #define platform_pm_poweroff NULL | ||
238 | #define platform_pm_restore NULL | ||
239 | #define platform_pm_freeze_noirq NULL | ||
240 | #define platform_pm_thaw_noirq NULL | ||
241 | #define platform_pm_poweroff_noirq NULL | ||
242 | #define platform_pm_restore_noirq NULL | ||
243 | #endif | ||
244 | |||
245 | #ifdef CONFIG_PM_SLEEP | ||
246 | #define USE_PLATFORM_PM_SLEEP_OPS \ | ||
247 | .prepare = platform_pm_prepare, \ | ||
248 | .complete = platform_pm_complete, \ | ||
249 | .suspend = platform_pm_suspend, \ | ||
250 | .resume = platform_pm_resume, \ | ||
251 | .freeze = platform_pm_freeze, \ | ||
252 | .thaw = platform_pm_thaw, \ | ||
253 | .poweroff = platform_pm_poweroff, \ | ||
254 | .restore = platform_pm_restore, \ | ||
255 | .suspend_noirq = platform_pm_suspend_noirq, \ | ||
256 | .resume_noirq = platform_pm_resume_noirq, \ | ||
257 | .freeze_noirq = platform_pm_freeze_noirq, \ | ||
258 | .thaw_noirq = platform_pm_thaw_noirq, \ | ||
259 | .poweroff_noirq = platform_pm_poweroff_noirq, \ | ||
260 | .restore_noirq = platform_pm_restore_noirq, | ||
261 | #else | ||
262 | #define USE_PLATFORM_PM_SLEEP_OPS | ||
263 | #endif | ||
264 | |||
208 | #endif /* _PLATFORM_DEVICE_H_ */ | 265 | #endif /* _PLATFORM_DEVICE_H_ */ |
diff --git a/include/linux/pm.h b/include/linux/pm.h index 512e09177e57..3160648ccdda 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h | |||
@@ -460,6 +460,7 @@ struct dev_pm_info { | |||
460 | unsigned long active_jiffies; | 460 | unsigned long active_jiffies; |
461 | unsigned long suspended_jiffies; | 461 | unsigned long suspended_jiffies; |
462 | unsigned long accounting_timestamp; | 462 | unsigned long accounting_timestamp; |
463 | void *subsys_data; /* Owned by the subsystem. */ | ||
463 | #endif | 464 | #endif |
464 | }; | 465 | }; |
465 | 466 | ||
@@ -529,21 +530,17 @@ struct dev_power_domain { | |||
529 | */ | 530 | */ |
530 | 531 | ||
531 | #ifdef CONFIG_PM_SLEEP | 532 | #ifdef CONFIG_PM_SLEEP |
532 | #ifndef CONFIG_ARCH_NO_SYSDEV_OPS | ||
533 | extern int sysdev_suspend(pm_message_t state); | ||
534 | extern int sysdev_resume(void); | ||
535 | #else | ||
536 | static inline int sysdev_suspend(pm_message_t state) { return 0; } | ||
537 | static inline int sysdev_resume(void) { return 0; } | ||
538 | #endif | ||
539 | |||
540 | extern void device_pm_lock(void); | 533 | extern void device_pm_lock(void); |
541 | extern void dpm_resume_noirq(pm_message_t state); | 534 | extern void dpm_resume_noirq(pm_message_t state); |
542 | extern void dpm_resume_end(pm_message_t state); | 535 | extern void dpm_resume_end(pm_message_t state); |
536 | extern void dpm_resume(pm_message_t state); | ||
537 | extern void dpm_complete(pm_message_t state); | ||
543 | 538 | ||
544 | extern void device_pm_unlock(void); | 539 | extern void device_pm_unlock(void); |
545 | extern int dpm_suspend_noirq(pm_message_t state); | 540 | extern int dpm_suspend_noirq(pm_message_t state); |
546 | extern int dpm_suspend_start(pm_message_t state); | 541 | extern int dpm_suspend_start(pm_message_t state); |
542 | extern int dpm_suspend(pm_message_t state); | ||
543 | extern int dpm_prepare(pm_message_t state); | ||
547 | 544 | ||
548 | extern void __suspend_report_result(const char *function, void *fn, int ret); | 545 | extern void __suspend_report_result(const char *function, void *fn, int ret); |
549 | 546 | ||
@@ -553,6 +550,16 @@ extern void __suspend_report_result(const char *function, void *fn, int ret); | |||
553 | } while (0) | 550 | } while (0) |
554 | 551 | ||
555 | extern int device_pm_wait_for_dev(struct device *sub, struct device *dev); | 552 | extern int device_pm_wait_for_dev(struct device *sub, struct device *dev); |
553 | |||
554 | extern int pm_generic_prepare(struct device *dev); | ||
555 | extern int pm_generic_suspend(struct device *dev); | ||
556 | extern int pm_generic_resume(struct device *dev); | ||
557 | extern int pm_generic_freeze(struct device *dev); | ||
558 | extern int pm_generic_thaw(struct device *dev); | ||
559 | extern int pm_generic_restore(struct device *dev); | ||
560 | extern int pm_generic_poweroff(struct device *dev); | ||
561 | extern void pm_generic_complete(struct device *dev); | ||
562 | |||
556 | #else /* !CONFIG_PM_SLEEP */ | 563 | #else /* !CONFIG_PM_SLEEP */ |
557 | 564 | ||
558 | #define device_pm_lock() do {} while (0) | 565 | #define device_pm_lock() do {} while (0) |
@@ -569,6 +576,15 @@ static inline int device_pm_wait_for_dev(struct device *a, struct device *b) | |||
569 | { | 576 | { |
570 | return 0; | 577 | return 0; |
571 | } | 578 | } |
579 | |||
580 | #define pm_generic_prepare NULL | ||
581 | #define pm_generic_suspend NULL | ||
582 | #define pm_generic_resume NULL | ||
583 | #define pm_generic_freeze NULL | ||
584 | #define pm_generic_thaw NULL | ||
585 | #define pm_generic_restore NULL | ||
586 | #define pm_generic_poweroff NULL | ||
587 | #define pm_generic_complete NULL | ||
572 | #endif /* !CONFIG_PM_SLEEP */ | 588 | #endif /* !CONFIG_PM_SLEEP */ |
573 | 589 | ||
574 | /* How to reorder dpm_list after device_move() */ | 590 | /* How to reorder dpm_list after device_move() */ |
@@ -579,11 +595,4 @@ enum dpm_order { | |||
579 | DPM_ORDER_DEV_LAST, | 595 | DPM_ORDER_DEV_LAST, |
580 | }; | 596 | }; |
581 | 597 | ||
582 | extern int pm_generic_suspend(struct device *dev); | ||
583 | extern int pm_generic_resume(struct device *dev); | ||
584 | extern int pm_generic_freeze(struct device *dev); | ||
585 | extern int pm_generic_thaw(struct device *dev); | ||
586 | extern int pm_generic_restore(struct device *dev); | ||
587 | extern int pm_generic_poweroff(struct device *dev); | ||
588 | |||
589 | #endif /* _LINUX_PM_H */ | 598 | #endif /* _LINUX_PM_H */ |
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index 8de9aa6e7def..878cf84baeb1 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h | |||
@@ -245,4 +245,46 @@ static inline void pm_runtime_dont_use_autosuspend(struct device *dev) | |||
245 | __pm_runtime_use_autosuspend(dev, false); | 245 | __pm_runtime_use_autosuspend(dev, false); |
246 | } | 246 | } |
247 | 247 | ||
248 | struct pm_clk_notifier_block { | ||
249 | struct notifier_block nb; | ||
250 | struct dev_power_domain *pwr_domain; | ||
251 | char *con_ids[]; | ||
252 | }; | ||
253 | |||
254 | #ifdef CONFIG_PM_RUNTIME_CLK | ||
255 | extern int pm_runtime_clk_init(struct device *dev); | ||
256 | extern void pm_runtime_clk_destroy(struct device *dev); | ||
257 | extern int pm_runtime_clk_add(struct device *dev, const char *con_id); | ||
258 | extern void pm_runtime_clk_remove(struct device *dev, const char *con_id); | ||
259 | extern int pm_runtime_clk_suspend(struct device *dev); | ||
260 | extern int pm_runtime_clk_resume(struct device *dev); | ||
261 | #else | ||
262 | static inline int pm_runtime_clk_init(struct device *dev) | ||
263 | { | ||
264 | return -EINVAL; | ||
265 | } | ||
266 | static inline void pm_runtime_clk_destroy(struct device *dev) | ||
267 | { | ||
268 | } | ||
269 | static inline int pm_runtime_clk_add(struct device *dev, const char *con_id) | ||
270 | { | ||
271 | return -EINVAL; | ||
272 | } | ||
273 | static inline void pm_runtime_clk_remove(struct device *dev, const char *con_id) | ||
274 | { | ||
275 | } | ||
276 | #define pm_runtime_clock_suspend NULL | ||
277 | #define pm_runtime_clock_resume NULL | ||
278 | #endif | ||
279 | |||
280 | #ifdef CONFIG_HAVE_CLK | ||
281 | extern void pm_runtime_clk_add_notifier(struct bus_type *bus, | ||
282 | struct pm_clk_notifier_block *clknb); | ||
283 | #else | ||
284 | static inline void pm_runtime_clk_add_notifier(struct bus_type *bus, | ||
285 | struct pm_clk_notifier_block *clknb) | ||
286 | { | ||
287 | } | ||
288 | #endif | ||
289 | |||
248 | #endif | 290 | #endif |
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h index 838c1149251a..eaf4350c0f90 100644 --- a/include/linux/proc_fs.h +++ b/include/linux/proc_fs.h | |||
@@ -208,6 +208,8 @@ static inline struct proc_dir_entry *proc_symlink(const char *name, | |||
208 | struct proc_dir_entry *parent,const char *dest) {return NULL;} | 208 | struct proc_dir_entry *parent,const char *dest) {return NULL;} |
209 | static inline struct proc_dir_entry *proc_mkdir(const char *name, | 209 | static inline struct proc_dir_entry *proc_mkdir(const char *name, |
210 | struct proc_dir_entry *parent) {return NULL;} | 210 | struct proc_dir_entry *parent) {return NULL;} |
211 | static inline struct proc_dir_entry *proc_mkdir_mode(const char *name, | ||
212 | mode_t mode, struct proc_dir_entry *parent) { return NULL; } | ||
211 | 213 | ||
212 | static inline struct proc_dir_entry *create_proc_read_entry(const char *name, | 214 | static inline struct proc_dir_entry *create_proc_read_entry(const char *name, |
213 | mode_t mode, struct proc_dir_entry *base, | 215 | mode_t mode, struct proc_dir_entry *base, |
diff --git a/include/linux/rculist.h b/include/linux/rculist.h index 2dea94fc4402..e3beb315517a 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h | |||
@@ -253,7 +253,7 @@ static inline void list_splice_init_rcu(struct list_head *list, | |||
253 | */ | 253 | */ |
254 | #define list_for_each_entry_rcu(pos, head, member) \ | 254 | #define list_for_each_entry_rcu(pos, head, member) \ |
255 | for (pos = list_entry_rcu((head)->next, typeof(*pos), member); \ | 255 | for (pos = list_entry_rcu((head)->next, typeof(*pos), member); \ |
256 | prefetch(pos->member.next), &pos->member != (head); \ | 256 | &pos->member != (head); \ |
257 | pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) | 257 | pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) |
258 | 258 | ||
259 | 259 | ||
@@ -270,7 +270,7 @@ static inline void list_splice_init_rcu(struct list_head *list, | |||
270 | */ | 270 | */ |
271 | #define list_for_each_continue_rcu(pos, head) \ | 271 | #define list_for_each_continue_rcu(pos, head) \ |
272 | for ((pos) = rcu_dereference_raw(list_next_rcu(pos)); \ | 272 | for ((pos) = rcu_dereference_raw(list_next_rcu(pos)); \ |
273 | prefetch((pos)->next), (pos) != (head); \ | 273 | (pos) != (head); \ |
274 | (pos) = rcu_dereference_raw(list_next_rcu(pos))) | 274 | (pos) = rcu_dereference_raw(list_next_rcu(pos))) |
275 | 275 | ||
276 | /** | 276 | /** |
@@ -284,7 +284,7 @@ static inline void list_splice_init_rcu(struct list_head *list, | |||
284 | */ | 284 | */ |
285 | #define list_for_each_entry_continue_rcu(pos, head, member) \ | 285 | #define list_for_each_entry_continue_rcu(pos, head, member) \ |
286 | for (pos = list_entry_rcu(pos->member.next, typeof(*pos), member); \ | 286 | for (pos = list_entry_rcu(pos->member.next, typeof(*pos), member); \ |
287 | prefetch(pos->member.next), &pos->member != (head); \ | 287 | &pos->member != (head); \ |
288 | pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) | 288 | pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) |
289 | 289 | ||
290 | /** | 290 | /** |
@@ -427,7 +427,7 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev, | |||
427 | 427 | ||
428 | #define __hlist_for_each_rcu(pos, head) \ | 428 | #define __hlist_for_each_rcu(pos, head) \ |
429 | for (pos = rcu_dereference(hlist_first_rcu(head)); \ | 429 | for (pos = rcu_dereference(hlist_first_rcu(head)); \ |
430 | pos && ({ prefetch(pos->next); 1; }); \ | 430 | pos; \ |
431 | pos = rcu_dereference(hlist_next_rcu(pos))) | 431 | pos = rcu_dereference(hlist_next_rcu(pos))) |
432 | 432 | ||
433 | /** | 433 | /** |
@@ -443,7 +443,7 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev, | |||
443 | */ | 443 | */ |
444 | #define hlist_for_each_entry_rcu(tpos, pos, head, member) \ | 444 | #define hlist_for_each_entry_rcu(tpos, pos, head, member) \ |
445 | for (pos = rcu_dereference_raw(hlist_first_rcu(head)); \ | 445 | for (pos = rcu_dereference_raw(hlist_first_rcu(head)); \ |
446 | pos && ({ prefetch(pos->next); 1; }) && \ | 446 | pos && \ |
447 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ | 447 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ |
448 | pos = rcu_dereference_raw(hlist_next_rcu(pos))) | 448 | pos = rcu_dereference_raw(hlist_next_rcu(pos))) |
449 | 449 | ||
@@ -460,7 +460,7 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev, | |||
460 | */ | 460 | */ |
461 | #define hlist_for_each_entry_rcu_bh(tpos, pos, head, member) \ | 461 | #define hlist_for_each_entry_rcu_bh(tpos, pos, head, member) \ |
462 | for (pos = rcu_dereference_bh((head)->first); \ | 462 | for (pos = rcu_dereference_bh((head)->first); \ |
463 | pos && ({ prefetch(pos->next); 1; }) && \ | 463 | pos && \ |
464 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ | 464 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ |
465 | pos = rcu_dereference_bh(pos->next)) | 465 | pos = rcu_dereference_bh(pos->next)) |
466 | 466 | ||
@@ -472,7 +472,7 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev, | |||
472 | */ | 472 | */ |
473 | #define hlist_for_each_entry_continue_rcu(tpos, pos, member) \ | 473 | #define hlist_for_each_entry_continue_rcu(tpos, pos, member) \ |
474 | for (pos = rcu_dereference((pos)->next); \ | 474 | for (pos = rcu_dereference((pos)->next); \ |
475 | pos && ({ prefetch(pos->next); 1; }) && \ | 475 | pos && \ |
476 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ | 476 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ |
477 | pos = rcu_dereference(pos->next)) | 477 | pos = rcu_dereference(pos->next)) |
478 | 478 | ||
@@ -484,7 +484,7 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev, | |||
484 | */ | 484 | */ |
485 | #define hlist_for_each_entry_continue_rcu_bh(tpos, pos, member) \ | 485 | #define hlist_for_each_entry_continue_rcu_bh(tpos, pos, member) \ |
486 | for (pos = rcu_dereference_bh((pos)->next); \ | 486 | for (pos = rcu_dereference_bh((pos)->next); \ |
487 | pos && ({ prefetch(pos->next); 1; }) && \ | 487 | pos && \ |
488 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ | 488 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ |
489 | pos = rcu_dereference_bh(pos->next)) | 489 | pos = rcu_dereference_bh(pos->next)) |
490 | 490 | ||
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index e98cd2e57194..06d69648fc86 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h | |||
@@ -88,12 +88,12 @@ static __always_inline unsigned read_seqbegin(const seqlock_t *sl) | |||
88 | unsigned ret; | 88 | unsigned ret; |
89 | 89 | ||
90 | repeat: | 90 | repeat: |
91 | ret = sl->sequence; | 91 | ret = ACCESS_ONCE(sl->sequence); |
92 | smp_rmb(); | ||
93 | if (unlikely(ret & 1)) { | 92 | if (unlikely(ret & 1)) { |
94 | cpu_relax(); | 93 | cpu_relax(); |
95 | goto repeat; | 94 | goto repeat; |
96 | } | 95 | } |
96 | smp_rmb(); | ||
97 | 97 | ||
98 | return ret; | 98 | return ret; |
99 | } | 99 | } |
diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h index 9659eff52ca2..045f72ab5dfd 100644 --- a/include/linux/ssb/ssb.h +++ b/include/linux/ssb/ssb.h | |||
@@ -404,7 +404,9 @@ extern bool ssb_is_sprom_available(struct ssb_bus *bus); | |||
404 | 404 | ||
405 | /* Set a fallback SPROM. | 405 | /* Set a fallback SPROM. |
406 | * See kdoc at the function definition for complete documentation. */ | 406 | * See kdoc at the function definition for complete documentation. */ |
407 | extern int ssb_arch_set_fallback_sprom(const struct ssb_sprom *sprom); | 407 | extern int ssb_arch_register_fallback_sprom( |
408 | int (*sprom_callback)(struct ssb_bus *bus, | ||
409 | struct ssb_sprom *out)); | ||
408 | 410 | ||
409 | /* Suspend a SSB bus. | 411 | /* Suspend a SSB bus. |
410 | * Call this from the parent bus suspend routine. */ | 412 | * Call this from the parent bus suspend routine. */ |
diff --git a/include/linux/string.h b/include/linux/string.h index a716ee2a8adb..a176db2f2c85 100644 --- a/include/linux/string.h +++ b/include/linux/string.h | |||
@@ -123,6 +123,7 @@ extern char **argv_split(gfp_t gfp, const char *str, int *argcp); | |||
123 | extern void argv_free(char **argv); | 123 | extern void argv_free(char **argv); |
124 | 124 | ||
125 | extern bool sysfs_streq(const char *s1, const char *s2); | 125 | extern bool sysfs_streq(const char *s1, const char *s2); |
126 | extern int strtobool(const char *s, bool *res); | ||
126 | 127 | ||
127 | #ifdef CONFIG_BINARY_PRINTF | 128 | #ifdef CONFIG_BINARY_PRINTF |
128 | int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args); | 129 | int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args); |
diff --git a/include/linux/sysdev.h b/include/linux/sysdev.h index dfb078db8ebb..d35e783a598c 100644 --- a/include/linux/sysdev.h +++ b/include/linux/sysdev.h | |||
@@ -34,12 +34,6 @@ struct sysdev_class { | |||
34 | struct list_head drivers; | 34 | struct list_head drivers; |
35 | struct sysdev_class_attribute **attrs; | 35 | struct sysdev_class_attribute **attrs; |
36 | struct kset kset; | 36 | struct kset kset; |
37 | #ifndef CONFIG_ARCH_NO_SYSDEV_OPS | ||
38 | /* Default operations for these types of devices */ | ||
39 | int (*shutdown)(struct sys_device *); | ||
40 | int (*suspend)(struct sys_device *, pm_message_t state); | ||
41 | int (*resume)(struct sys_device *); | ||
42 | #endif | ||
43 | }; | 37 | }; |
44 | 38 | ||
45 | struct sysdev_class_attribute { | 39 | struct sysdev_class_attribute { |
@@ -77,11 +71,6 @@ struct sysdev_driver { | |||
77 | struct list_head entry; | 71 | struct list_head entry; |
78 | int (*add)(struct sys_device *); | 72 | int (*add)(struct sys_device *); |
79 | int (*remove)(struct sys_device *); | 73 | int (*remove)(struct sys_device *); |
80 | #ifndef CONFIG_ARCH_NO_SYSDEV_OPS | ||
81 | int (*shutdown)(struct sys_device *); | ||
82 | int (*suspend)(struct sys_device *, pm_message_t state); | ||
83 | int (*resume)(struct sys_device *); | ||
84 | #endif | ||
85 | }; | 74 | }; |
86 | 75 | ||
87 | 76 | ||
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index 97c84a58efb8..d530a4460a0b 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h | |||
@@ -29,7 +29,7 @@ struct tracepoint_func { | |||
29 | 29 | ||
30 | struct tracepoint { | 30 | struct tracepoint { |
31 | const char *name; /* Tracepoint name */ | 31 | const char *name; /* Tracepoint name */ |
32 | int state; /* State. */ | 32 | struct jump_label_key key; |
33 | void (*regfunc)(void); | 33 | void (*regfunc)(void); |
34 | void (*unregfunc)(void); | 34 | void (*unregfunc)(void); |
35 | struct tracepoint_func __rcu *funcs; | 35 | struct tracepoint_func __rcu *funcs; |
@@ -146,9 +146,7 @@ void tracepoint_update_probe_range(struct tracepoint * const *begin, | |||
146 | extern struct tracepoint __tracepoint_##name; \ | 146 | extern struct tracepoint __tracepoint_##name; \ |
147 | static inline void trace_##name(proto) \ | 147 | static inline void trace_##name(proto) \ |
148 | { \ | 148 | { \ |
149 | JUMP_LABEL(&__tracepoint_##name.state, do_trace); \ | 149 | if (static_branch(&__tracepoint_##name.key)) \ |
150 | return; \ | ||
151 | do_trace: \ | ||
152 | __DO_TRACE(&__tracepoint_##name, \ | 150 | __DO_TRACE(&__tracepoint_##name, \ |
153 | TP_PROTO(data_proto), \ | 151 | TP_PROTO(data_proto), \ |
154 | TP_ARGS(data_args), \ | 152 | TP_ARGS(data_args), \ |
@@ -176,14 +174,14 @@ do_trace: \ | |||
176 | * structures, so we create an array of pointers that will be used for iteration | 174 | * structures, so we create an array of pointers that will be used for iteration |
177 | * on the tracepoints. | 175 | * on the tracepoints. |
178 | */ | 176 | */ |
179 | #define DEFINE_TRACE_FN(name, reg, unreg) \ | 177 | #define DEFINE_TRACE_FN(name, reg, unreg) \ |
180 | static const char __tpstrtab_##name[] \ | 178 | static const char __tpstrtab_##name[] \ |
181 | __attribute__((section("__tracepoints_strings"))) = #name; \ | 179 | __attribute__((section("__tracepoints_strings"))) = #name; \ |
182 | struct tracepoint __tracepoint_##name \ | 180 | struct tracepoint __tracepoint_##name \ |
183 | __attribute__((section("__tracepoints"))) = \ | 181 | __attribute__((section("__tracepoints"))) = \ |
184 | { __tpstrtab_##name, 0, reg, unreg, NULL }; \ | 182 | { __tpstrtab_##name, JUMP_LABEL_INIT, reg, unreg, NULL };\ |
185 | static struct tracepoint * const __tracepoint_ptr_##name __used \ | 183 | static struct tracepoint * const __tracepoint_ptr_##name __used \ |
186 | __attribute__((section("__tracepoints_ptrs"))) = \ | 184 | __attribute__((section("__tracepoints_ptrs"))) = \ |
187 | &__tracepoint_##name; | 185 | &__tracepoint_##name; |
188 | 186 | ||
189 | #define DEFINE_TRACE(name) \ | 187 | #define DEFINE_TRACE(name) \ |
diff --git a/include/net/inet_ecn.h b/include/net/inet_ecn.h index 88bdd010d65d..2fa8d1341a0a 100644 --- a/include/net/inet_ecn.h +++ b/include/net/inet_ecn.h | |||
@@ -38,9 +38,19 @@ static inline __u8 INET_ECN_encapsulate(__u8 outer, __u8 inner) | |||
38 | return outer; | 38 | return outer; |
39 | } | 39 | } |
40 | 40 | ||
41 | #define INET_ECN_xmit(sk) do { inet_sk(sk)->tos |= INET_ECN_ECT_0; } while (0) | 41 | static inline void INET_ECN_xmit(struct sock *sk) |
42 | #define INET_ECN_dontxmit(sk) \ | 42 | { |
43 | do { inet_sk(sk)->tos &= ~INET_ECN_MASK; } while (0) | 43 | inet_sk(sk)->tos |= INET_ECN_ECT_0; |
44 | if (inet6_sk(sk) != NULL) | ||
45 | inet6_sk(sk)->tclass |= INET_ECN_ECT_0; | ||
46 | } | ||
47 | |||
48 | static inline void INET_ECN_dontxmit(struct sock *sk) | ||
49 | { | ||
50 | inet_sk(sk)->tos &= ~INET_ECN_MASK; | ||
51 | if (inet6_sk(sk) != NULL) | ||
52 | inet6_sk(sk)->tclass &= ~INET_ECN_MASK; | ||
53 | } | ||
44 | 54 | ||
45 | #define IP6_ECN_flow_init(label) do { \ | 55 | #define IP6_ECN_flow_init(label) do { \ |
46 | (label) &= ~htonl(INET_ECN_MASK << 20); \ | 56 | (label) &= ~htonl(INET_ECN_MASK << 20); \ |
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index d516f00c8e0f..86aefed6140b 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h | |||
@@ -791,6 +791,7 @@ struct ip_vs_app { | |||
791 | /* IPVS in network namespace */ | 791 | /* IPVS in network namespace */ |
792 | struct netns_ipvs { | 792 | struct netns_ipvs { |
793 | int gen; /* Generation */ | 793 | int gen; /* Generation */ |
794 | int enable; /* enable like nf_hooks do */ | ||
794 | /* | 795 | /* |
795 | * Hash table: for real service lookups | 796 | * Hash table: for real service lookups |
796 | */ | 797 | */ |
@@ -1089,6 +1090,22 @@ ip_vs_control_add(struct ip_vs_conn *cp, struct ip_vs_conn *ctl_cp) | |||
1089 | atomic_inc(&ctl_cp->n_control); | 1090 | atomic_inc(&ctl_cp->n_control); |
1090 | } | 1091 | } |
1091 | 1092 | ||
1093 | /* | ||
1094 | * IPVS netns init & cleanup functions | ||
1095 | */ | ||
1096 | extern int __ip_vs_estimator_init(struct net *net); | ||
1097 | extern int __ip_vs_control_init(struct net *net); | ||
1098 | extern int __ip_vs_protocol_init(struct net *net); | ||
1099 | extern int __ip_vs_app_init(struct net *net); | ||
1100 | extern int __ip_vs_conn_init(struct net *net); | ||
1101 | extern int __ip_vs_sync_init(struct net *net); | ||
1102 | extern void __ip_vs_conn_cleanup(struct net *net); | ||
1103 | extern void __ip_vs_app_cleanup(struct net *net); | ||
1104 | extern void __ip_vs_protocol_cleanup(struct net *net); | ||
1105 | extern void __ip_vs_control_cleanup(struct net *net); | ||
1106 | extern void __ip_vs_estimator_cleanup(struct net *net); | ||
1107 | extern void __ip_vs_sync_cleanup(struct net *net); | ||
1108 | extern void __ip_vs_service_cleanup(struct net *net); | ||
1092 | 1109 | ||
1093 | /* | 1110 | /* |
1094 | * IPVS application functions | 1111 | * IPVS application functions |
diff --git a/include/net/llc_pdu.h b/include/net/llc_pdu.h index 75b8e2968c9b..f57e7d46a453 100644 --- a/include/net/llc_pdu.h +++ b/include/net/llc_pdu.h | |||
@@ -199,7 +199,7 @@ struct llc_pdu_sn { | |||
199 | u8 ssap; | 199 | u8 ssap; |
200 | u8 ctrl_1; | 200 | u8 ctrl_1; |
201 | u8 ctrl_2; | 201 | u8 ctrl_2; |
202 | }; | 202 | } __packed; |
203 | 203 | ||
204 | static inline struct llc_pdu_sn *llc_pdu_sn_hdr(struct sk_buff *skb) | 204 | static inline struct llc_pdu_sn *llc_pdu_sn_hdr(struct sk_buff *skb) |
205 | { | 205 | { |
@@ -211,7 +211,7 @@ struct llc_pdu_un { | |||
211 | u8 dsap; | 211 | u8 dsap; |
212 | u8 ssap; | 212 | u8 ssap; |
213 | u8 ctrl_1; | 213 | u8 ctrl_1; |
214 | }; | 214 | } __packed; |
215 | 215 | ||
216 | static inline struct llc_pdu_un *llc_pdu_un_hdr(struct sk_buff *skb) | 216 | static inline struct llc_pdu_un *llc_pdu_un_hdr(struct sk_buff *skb) |
217 | { | 217 | { |
@@ -359,7 +359,7 @@ struct llc_xid_info { | |||
359 | u8 fmt_id; /* always 0x81 for LLC */ | 359 | u8 fmt_id; /* always 0x81 for LLC */ |
360 | u8 type; /* different if NULL/non-NULL LSAP */ | 360 | u8 type; /* different if NULL/non-NULL LSAP */ |
361 | u8 rw; /* sender receive window */ | 361 | u8 rw; /* sender receive window */ |
362 | }; | 362 | } __packed; |
363 | 363 | ||
364 | /** | 364 | /** |
365 | * llc_pdu_init_as_xid_cmd - sets bytes 3, 4 & 5 of LLC header as XID | 365 | * llc_pdu_init_as_xid_cmd - sets bytes 3, 4 & 5 of LLC header as XID |
@@ -415,7 +415,7 @@ struct llc_frmr_info { | |||
415 | u8 curr_ssv; /* current send state variable val */ | 415 | u8 curr_ssv; /* current send state variable val */ |
416 | u8 curr_rsv; /* current receive state variable */ | 416 | u8 curr_rsv; /* current receive state variable */ |
417 | u8 ind_bits; /* indicator bits set with macro */ | 417 | u8 ind_bits; /* indicator bits set with macro */ |
418 | }; | 418 | } __packed; |
419 | 419 | ||
420 | extern void llc_pdu_set_cmd_rsp(struct sk_buff *skb, u8 type); | 420 | extern void llc_pdu_set_cmd_rsp(struct sk_buff *skb, u8 type); |
421 | extern void llc_pdu_set_pf_bit(struct sk_buff *skb, u8 bit_value); | 421 | extern void llc_pdu_set_pf_bit(struct sk_buff *skb, u8 bit_value); |
diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 6ae4bc5ce8a7..20afeaa39395 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h | |||
@@ -324,6 +324,7 @@ struct xfrm_state_afinfo { | |||
324 | int (*tmpl_sort)(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n); | 324 | int (*tmpl_sort)(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n); |
325 | int (*state_sort)(struct xfrm_state **dst, struct xfrm_state **src, int n); | 325 | int (*state_sort)(struct xfrm_state **dst, struct xfrm_state **src, int n); |
326 | int (*output)(struct sk_buff *skb); | 326 | int (*output)(struct sk_buff *skb); |
327 | int (*output_finish)(struct sk_buff *skb); | ||
327 | int (*extract_input)(struct xfrm_state *x, | 328 | int (*extract_input)(struct xfrm_state *x, |
328 | struct sk_buff *skb); | 329 | struct sk_buff *skb); |
329 | int (*extract_output)(struct xfrm_state *x, | 330 | int (*extract_output)(struct xfrm_state *x, |
@@ -1454,6 +1455,7 @@ static inline int xfrm4_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi) | |||
1454 | extern int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb); | 1455 | extern int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb); |
1455 | extern int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb); | 1456 | extern int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb); |
1456 | extern int xfrm4_output(struct sk_buff *skb); | 1457 | extern int xfrm4_output(struct sk_buff *skb); |
1458 | extern int xfrm4_output_finish(struct sk_buff *skb); | ||
1457 | extern int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family); | 1459 | extern int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family); |
1458 | extern int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family); | 1460 | extern int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family); |
1459 | extern int xfrm6_extract_header(struct sk_buff *skb); | 1461 | extern int xfrm6_extract_header(struct sk_buff *skb); |
@@ -1470,6 +1472,7 @@ extern __be32 xfrm6_tunnel_spi_lookup(struct net *net, xfrm_address_t *saddr); | |||
1470 | extern int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb); | 1472 | extern int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb); |
1471 | extern int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb); | 1473 | extern int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb); |
1472 | extern int xfrm6_output(struct sk_buff *skb); | 1474 | extern int xfrm6_output(struct sk_buff *skb); |
1475 | extern int xfrm6_output_finish(struct sk_buff *skb); | ||
1473 | extern int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb, | 1476 | extern int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb, |
1474 | u8 **prevhdr); | 1477 | u8 **prevhdr); |
1475 | 1478 | ||
diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h index cbb822e8d791..2d0191c90f9e 100644 --- a/include/rdma/iw_cm.h +++ b/include/rdma/iw_cm.h | |||
@@ -46,18 +46,9 @@ enum iw_cm_event_type { | |||
46 | IW_CM_EVENT_CLOSE /* close complete */ | 46 | IW_CM_EVENT_CLOSE /* close complete */ |
47 | }; | 47 | }; |
48 | 48 | ||
49 | enum iw_cm_event_status { | ||
50 | IW_CM_EVENT_STATUS_OK = 0, /* request successful */ | ||
51 | IW_CM_EVENT_STATUS_ACCEPTED = 0, /* connect request accepted */ | ||
52 | IW_CM_EVENT_STATUS_REJECTED, /* connect request rejected */ | ||
53 | IW_CM_EVENT_STATUS_TIMEOUT, /* the operation timed out */ | ||
54 | IW_CM_EVENT_STATUS_RESET, /* reset from remote peer */ | ||
55 | IW_CM_EVENT_STATUS_EINVAL, /* asynchronous failure for bad parm */ | ||
56 | }; | ||
57 | |||
58 | struct iw_cm_event { | 49 | struct iw_cm_event { |
59 | enum iw_cm_event_type event; | 50 | enum iw_cm_event_type event; |
60 | enum iw_cm_event_status status; | 51 | int status; |
61 | struct sockaddr_in local_addr; | 52 | struct sockaddr_in local_addr; |
62 | struct sockaddr_in remote_addr; | 53 | struct sockaddr_in remote_addr; |
63 | void *private_data; | 54 | void *private_data; |
diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h index 4fae90304648..169f7a53fb0c 100644 --- a/include/rdma/rdma_cm.h +++ b/include/rdma/rdma_cm.h | |||
@@ -329,4 +329,14 @@ void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr); | |||
329 | */ | 329 | */ |
330 | void rdma_set_service_type(struct rdma_cm_id *id, int tos); | 330 | void rdma_set_service_type(struct rdma_cm_id *id, int tos); |
331 | 331 | ||
332 | /** | ||
333 | * rdma_set_reuseaddr - Allow the reuse of local addresses when binding | ||
334 | * the rdma_cm_id. | ||
335 | * @id: Communication identifier to configure. | ||
336 | * @reuse: Value indicating if the bound address is reusable. | ||
337 | * | ||
338 | * Reuse must be set before an address is bound to the id. | ||
339 | */ | ||
340 | int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse); | ||
341 | |||
332 | #endif /* RDMA_CM_H */ | 342 | #endif /* RDMA_CM_H */ |
diff --git a/include/rdma/rdma_user_cm.h b/include/rdma/rdma_user_cm.h index 1d165022c02d..fc82c1896f75 100644 --- a/include/rdma/rdma_user_cm.h +++ b/include/rdma/rdma_user_cm.h | |||
@@ -221,8 +221,9 @@ enum { | |||
221 | 221 | ||
222 | /* Option details */ | 222 | /* Option details */ |
223 | enum { | 223 | enum { |
224 | RDMA_OPTION_ID_TOS = 0, | 224 | RDMA_OPTION_ID_TOS = 0, |
225 | RDMA_OPTION_IB_PATH = 1 | 225 | RDMA_OPTION_ID_REUSEADDR = 1, |
226 | RDMA_OPTION_IB_PATH = 1 | ||
226 | }; | 227 | }; |
227 | 228 | ||
228 | struct rdma_ucm_set_option { | 229 | struct rdma_ucm_set_option { |
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index 2d3ec5094685..dd82e02ddde3 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h | |||
@@ -169,6 +169,7 @@ struct scsi_device { | |||
169 | sdev_dev; | 169 | sdev_dev; |
170 | 170 | ||
171 | struct execute_work ew; /* used to get process context on put */ | 171 | struct execute_work ew; /* used to get process context on put */ |
172 | struct work_struct requeue_work; | ||
172 | 173 | ||
173 | struct scsi_dh_data *scsi_dh_data; | 174 | struct scsi_dh_data *scsi_dh_data; |
174 | enum scsi_device_state sdev_state; | 175 | enum scsi_device_state sdev_state; |
diff --git a/include/trace/events/gfpflags.h b/include/trace/events/gfpflags.h index e3615c093741..9fe3a36646e9 100644 --- a/include/trace/events/gfpflags.h +++ b/include/trace/events/gfpflags.h | |||
@@ -10,6 +10,7 @@ | |||
10 | */ | 10 | */ |
11 | #define show_gfp_flags(flags) \ | 11 | #define show_gfp_flags(flags) \ |
12 | (flags) ? __print_flags(flags, "|", \ | 12 | (flags) ? __print_flags(flags, "|", \ |
13 | {(unsigned long)GFP_TRANSHUGE, "GFP_TRANSHUGE"}, \ | ||
13 | {(unsigned long)GFP_HIGHUSER_MOVABLE, "GFP_HIGHUSER_MOVABLE"}, \ | 14 | {(unsigned long)GFP_HIGHUSER_MOVABLE, "GFP_HIGHUSER_MOVABLE"}, \ |
14 | {(unsigned long)GFP_HIGHUSER, "GFP_HIGHUSER"}, \ | 15 | {(unsigned long)GFP_HIGHUSER, "GFP_HIGHUSER"}, \ |
15 | {(unsigned long)GFP_USER, "GFP_USER"}, \ | 16 | {(unsigned long)GFP_USER, "GFP_USER"}, \ |
@@ -32,6 +33,9 @@ | |||
32 | {(unsigned long)__GFP_HARDWALL, "GFP_HARDWALL"}, \ | 33 | {(unsigned long)__GFP_HARDWALL, "GFP_HARDWALL"}, \ |
33 | {(unsigned long)__GFP_THISNODE, "GFP_THISNODE"}, \ | 34 | {(unsigned long)__GFP_THISNODE, "GFP_THISNODE"}, \ |
34 | {(unsigned long)__GFP_RECLAIMABLE, "GFP_RECLAIMABLE"}, \ | 35 | {(unsigned long)__GFP_RECLAIMABLE, "GFP_RECLAIMABLE"}, \ |
35 | {(unsigned long)__GFP_MOVABLE, "GFP_MOVABLE"} \ | 36 | {(unsigned long)__GFP_MOVABLE, "GFP_MOVABLE"}, \ |
37 | {(unsigned long)__GFP_NOTRACK, "GFP_NOTRACK"}, \ | ||
38 | {(unsigned long)__GFP_NO_KSWAPD, "GFP_NO_KSWAPD"}, \ | ||
39 | {(unsigned long)__GFP_OTHER_NODE, "GFP_OTHER_NODE"} \ | ||
36 | ) : "GFP_NOWAIT" | 40 | ) : "GFP_NOWAIT" |
37 | 41 | ||
diff --git a/include/xen/events.h b/include/xen/events.h index f1b87ad48ac7..9af21e19545a 100644 --- a/include/xen/events.h +++ b/include/xen/events.h | |||
@@ -85,7 +85,8 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi, | |||
85 | int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc); | 85 | int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc); |
86 | /* Bind an PSI pirq to an irq. */ | 86 | /* Bind an PSI pirq to an irq. */ |
87 | int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc, | 87 | int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc, |
88 | int pirq, int vector, const char *name); | 88 | int pirq, int vector, const char *name, |
89 | domid_t domid); | ||
89 | #endif | 90 | #endif |
90 | 91 | ||
91 | /* De-allocates the above mentioned physical interrupt. */ | 92 | /* De-allocates the above mentioned physical interrupt. */ |
@@ -94,4 +95,10 @@ int xen_destroy_irq(int irq); | |||
94 | /* Return irq from pirq */ | 95 | /* Return irq from pirq */ |
95 | int xen_irq_from_pirq(unsigned pirq); | 96 | int xen_irq_from_pirq(unsigned pirq); |
96 | 97 | ||
98 | /* Return the pirq allocated to the irq. */ | ||
99 | int xen_pirq_from_irq(unsigned irq); | ||
100 | |||
101 | /* Determine whether to ignore this IRQ if it is passed to a guest. */ | ||
102 | int xen_test_irq_shared(int irq); | ||
103 | |||
97 | #endif /* _XEN_EVENTS_H */ | 104 | #endif /* _XEN_EVENTS_H */ |
diff --git a/init/Kconfig b/init/Kconfig index 9c812c5fb6e8..af958ad26d60 100644 --- a/init/Kconfig +++ b/init/Kconfig | |||
@@ -1231,7 +1231,6 @@ config SLAB | |||
1231 | per cpu and per node queues. | 1231 | per cpu and per node queues. |
1232 | 1232 | ||
1233 | config SLUB | 1233 | config SLUB |
1234 | depends on BROKEN || NUMA || !DISCONTIGMEM | ||
1235 | bool "SLUB (Unqueued Allocator)" | 1234 | bool "SLUB (Unqueued Allocator)" |
1236 | help | 1235 | help |
1237 | SLUB is a slab allocator that minimizes cache line usage | 1236 | SLUB is a slab allocator that minimizes cache line usage |
diff --git a/init/main.c b/init/main.c index 4a9479ef4540..48df882d51d2 100644 --- a/init/main.c +++ b/init/main.c | |||
@@ -580,8 +580,8 @@ asmlinkage void __init start_kernel(void) | |||
580 | #endif | 580 | #endif |
581 | page_cgroup_init(); | 581 | page_cgroup_init(); |
582 | enable_debug_pagealloc(); | 582 | enable_debug_pagealloc(); |
583 | kmemleak_init(); | ||
584 | debug_objects_mem_init(); | 583 | debug_objects_mem_init(); |
584 | kmemleak_init(); | ||
585 | setup_per_cpu_pageset(); | 585 | setup_per_cpu_pageset(); |
586 | numa_policy_init(); | 586 | numa_policy_init(); |
587 | if (late_time_init) | 587 | if (late_time_init) |
diff --git a/kernel/Makefile b/kernel/Makefile index 85cbfb31e73e..e9cf19155b46 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
@@ -21,7 +21,6 @@ CFLAGS_REMOVE_mutex-debug.o = -pg | |||
21 | CFLAGS_REMOVE_rtmutex-debug.o = -pg | 21 | CFLAGS_REMOVE_rtmutex-debug.o = -pg |
22 | CFLAGS_REMOVE_cgroup-debug.o = -pg | 22 | CFLAGS_REMOVE_cgroup-debug.o = -pg |
23 | CFLAGS_REMOVE_sched_clock.o = -pg | 23 | CFLAGS_REMOVE_sched_clock.o = -pg |
24 | CFLAGS_REMOVE_perf_event.o = -pg | ||
25 | CFLAGS_REMOVE_irq_work.o = -pg | 24 | CFLAGS_REMOVE_irq_work.o = -pg |
26 | endif | 25 | endif |
27 | 26 | ||
@@ -103,8 +102,9 @@ obj-$(CONFIG_RING_BUFFER) += trace/ | |||
103 | obj-$(CONFIG_TRACEPOINTS) += trace/ | 102 | obj-$(CONFIG_TRACEPOINTS) += trace/ |
104 | obj-$(CONFIG_SMP) += sched_cpupri.o | 103 | obj-$(CONFIG_SMP) += sched_cpupri.o |
105 | obj-$(CONFIG_IRQ_WORK) += irq_work.o | 104 | obj-$(CONFIG_IRQ_WORK) += irq_work.o |
106 | obj-$(CONFIG_PERF_EVENTS) += perf_event.o | 105 | |
107 | obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o | 106 | obj-$(CONFIG_PERF_EVENTS) += events/ |
107 | |||
108 | obj-$(CONFIG_USER_RETURN_NOTIFIER) += user-return-notifier.o | 108 | obj-$(CONFIG_USER_RETURN_NOTIFIER) += user-return-notifier.o |
109 | obj-$(CONFIG_PADATA) += padata.o | 109 | obj-$(CONFIG_PADATA) += padata.o |
110 | obj-$(CONFIG_CRASH_DUMP) += crash_dump.o | 110 | obj-$(CONFIG_CRASH_DUMP) += crash_dump.o |
diff --git a/kernel/capability.c b/kernel/capability.c index bf0c734d0c12..32a80e08ff4b 100644 --- a/kernel/capability.c +++ b/kernel/capability.c | |||
@@ -399,3 +399,15 @@ bool task_ns_capable(struct task_struct *t, int cap) | |||
399 | return ns_capable(task_cred_xxx(t, user)->user_ns, cap); | 399 | return ns_capable(task_cred_xxx(t, user)->user_ns, cap); |
400 | } | 400 | } |
401 | EXPORT_SYMBOL(task_ns_capable); | 401 | EXPORT_SYMBOL(task_ns_capable); |
402 | |||
403 | /** | ||
404 | * nsown_capable - Check superior capability to one's own user_ns | ||
405 | * @cap: The capability in question | ||
406 | * | ||
407 | * Return true if the current task has the given superior capability | ||
408 | * targeted at its own user namespace. | ||
409 | */ | ||
410 | bool nsown_capable(int cap) | ||
411 | { | ||
412 | return ns_capable(current_user_ns(), cap); | ||
413 | } | ||
diff --git a/kernel/cred.c b/kernel/cred.c index 5557b55048df..8093c16b84b1 100644 --- a/kernel/cred.c +++ b/kernel/cred.c | |||
@@ -54,6 +54,7 @@ struct cred init_cred = { | |||
54 | .cap_effective = CAP_INIT_EFF_SET, | 54 | .cap_effective = CAP_INIT_EFF_SET, |
55 | .cap_bset = CAP_INIT_BSET, | 55 | .cap_bset = CAP_INIT_BSET, |
56 | .user = INIT_USER, | 56 | .user = INIT_USER, |
57 | .user_ns = &init_user_ns, | ||
57 | .group_info = &init_groups, | 58 | .group_info = &init_groups, |
58 | #ifdef CONFIG_KEYS | 59 | #ifdef CONFIG_KEYS |
59 | .tgcred = &init_tgcred, | 60 | .tgcred = &init_tgcred, |
@@ -410,6 +411,11 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags) | |||
410 | goto error_put; | 411 | goto error_put; |
411 | } | 412 | } |
412 | 413 | ||
414 | /* cache user_ns in cred. Doesn't need a refcount because it will | ||
415 | * stay pinned by cred->user | ||
416 | */ | ||
417 | new->user_ns = new->user->user_ns; | ||
418 | |||
413 | #ifdef CONFIG_KEYS | 419 | #ifdef CONFIG_KEYS |
414 | /* new threads get their own thread keyrings if their parent already | 420 | /* new threads get their own thread keyrings if their parent already |
415 | * had one */ | 421 | * had one */ |
@@ -741,12 +747,6 @@ int set_create_files_as(struct cred *new, struct inode *inode) | |||
741 | } | 747 | } |
742 | EXPORT_SYMBOL(set_create_files_as); | 748 | EXPORT_SYMBOL(set_create_files_as); |
743 | 749 | ||
744 | struct user_namespace *current_user_ns(void) | ||
745 | { | ||
746 | return _current_user_ns(); | ||
747 | } | ||
748 | EXPORT_SYMBOL(current_user_ns); | ||
749 | |||
750 | #ifdef CONFIG_DEBUG_CREDENTIALS | 750 | #ifdef CONFIG_DEBUG_CREDENTIALS |
751 | 751 | ||
752 | bool creds_are_invalid(const struct cred *cred) | 752 | bool creds_are_invalid(const struct cred *cred) |
diff --git a/kernel/events/Makefile b/kernel/events/Makefile new file mode 100644 index 000000000000..1ce23d3d8394 --- /dev/null +++ b/kernel/events/Makefile | |||
@@ -0,0 +1,6 @@ | |||
1 | ifdef CONFIG_FUNCTION_TRACER | ||
2 | CFLAGS_REMOVE_core.o = -pg | ||
3 | endif | ||
4 | |||
5 | obj-y := core.o | ||
6 | obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o | ||
diff --git a/kernel/perf_event.c b/kernel/events/core.c index 8e81a9860a0d..0fc34a370ba4 100644 --- a/kernel/perf_event.c +++ b/kernel/events/core.c | |||
@@ -2,8 +2,8 @@ | |||
2 | * Performance events core code: | 2 | * Performance events core code: |
3 | * | 3 | * |
4 | * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> | 4 | * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> |
5 | * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar | 5 | * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar |
6 | * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> | 6 | * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> |
7 | * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> | 7 | * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> |
8 | * | 8 | * |
9 | * For licensing details see kernel-base/COPYING | 9 | * For licensing details see kernel-base/COPYING |
@@ -39,10 +39,10 @@ | |||
39 | #include <asm/irq_regs.h> | 39 | #include <asm/irq_regs.h> |
40 | 40 | ||
41 | struct remote_function_call { | 41 | struct remote_function_call { |
42 | struct task_struct *p; | 42 | struct task_struct *p; |
43 | int (*func)(void *info); | 43 | int (*func)(void *info); |
44 | void *info; | 44 | void *info; |
45 | int ret; | 45 | int ret; |
46 | }; | 46 | }; |
47 | 47 | ||
48 | static void remote_function(void *data) | 48 | static void remote_function(void *data) |
@@ -76,10 +76,10 @@ static int | |||
76 | task_function_call(struct task_struct *p, int (*func) (void *info), void *info) | 76 | task_function_call(struct task_struct *p, int (*func) (void *info), void *info) |
77 | { | 77 | { |
78 | struct remote_function_call data = { | 78 | struct remote_function_call data = { |
79 | .p = p, | 79 | .p = p, |
80 | .func = func, | 80 | .func = func, |
81 | .info = info, | 81 | .info = info, |
82 | .ret = -ESRCH, /* No such (running) process */ | 82 | .ret = -ESRCH, /* No such (running) process */ |
83 | }; | 83 | }; |
84 | 84 | ||
85 | if (task_curr(p)) | 85 | if (task_curr(p)) |
@@ -100,10 +100,10 @@ task_function_call(struct task_struct *p, int (*func) (void *info), void *info) | |||
100 | static int cpu_function_call(int cpu, int (*func) (void *info), void *info) | 100 | static int cpu_function_call(int cpu, int (*func) (void *info), void *info) |
101 | { | 101 | { |
102 | struct remote_function_call data = { | 102 | struct remote_function_call data = { |
103 | .p = NULL, | 103 | .p = NULL, |
104 | .func = func, | 104 | .func = func, |
105 | .info = info, | 105 | .info = info, |
106 | .ret = -ENXIO, /* No such CPU */ | 106 | .ret = -ENXIO, /* No such CPU */ |
107 | }; | 107 | }; |
108 | 108 | ||
109 | smp_call_function_single(cpu, remote_function, &data, 1); | 109 | smp_call_function_single(cpu, remote_function, &data, 1); |
@@ -125,7 +125,7 @@ enum event_type_t { | |||
125 | * perf_sched_events : >0 events exist | 125 | * perf_sched_events : >0 events exist |
126 | * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu | 126 | * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu |
127 | */ | 127 | */ |
128 | atomic_t perf_sched_events __read_mostly; | 128 | struct jump_label_key perf_sched_events __read_mostly; |
129 | static DEFINE_PER_CPU(atomic_t, perf_cgroup_events); | 129 | static DEFINE_PER_CPU(atomic_t, perf_cgroup_events); |
130 | 130 | ||
131 | static atomic_t nr_mmap_events __read_mostly; | 131 | static atomic_t nr_mmap_events __read_mostly; |
@@ -5429,7 +5429,7 @@ fail: | |||
5429 | return err; | 5429 | return err; |
5430 | } | 5430 | } |
5431 | 5431 | ||
5432 | atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; | 5432 | struct jump_label_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; |
5433 | 5433 | ||
5434 | static void sw_perf_event_destroy(struct perf_event *event) | 5434 | static void sw_perf_event_destroy(struct perf_event *event) |
5435 | { | 5435 | { |
@@ -7445,11 +7445,11 @@ static void perf_cgroup_exit(struct cgroup_subsys *ss, struct cgroup *cgrp, | |||
7445 | } | 7445 | } |
7446 | 7446 | ||
7447 | struct cgroup_subsys perf_subsys = { | 7447 | struct cgroup_subsys perf_subsys = { |
7448 | .name = "perf_event", | 7448 | .name = "perf_event", |
7449 | .subsys_id = perf_subsys_id, | 7449 | .subsys_id = perf_subsys_id, |
7450 | .create = perf_cgroup_create, | 7450 | .create = perf_cgroup_create, |
7451 | .destroy = perf_cgroup_destroy, | 7451 | .destroy = perf_cgroup_destroy, |
7452 | .exit = perf_cgroup_exit, | 7452 | .exit = perf_cgroup_exit, |
7453 | .attach = perf_cgroup_attach, | 7453 | .attach = perf_cgroup_attach, |
7454 | }; | 7454 | }; |
7455 | #endif /* CONFIG_CGROUP_PERF */ | 7455 | #endif /* CONFIG_CGROUP_PERF */ |
diff --git a/kernel/hw_breakpoint.c b/kernel/events/hw_breakpoint.c index 086adf25a55e..086adf25a55e 100644 --- a/kernel/hw_breakpoint.c +++ b/kernel/events/hw_breakpoint.c | |||
diff --git a/kernel/extable.c b/kernel/extable.c index 7f8f263f8524..c2d625fcda77 100644 --- a/kernel/extable.c +++ b/kernel/extable.c | |||
@@ -72,6 +72,14 @@ int core_kernel_text(unsigned long addr) | |||
72 | return 0; | 72 | return 0; |
73 | } | 73 | } |
74 | 74 | ||
75 | int core_kernel_data(unsigned long addr) | ||
76 | { | ||
77 | if (addr >= (unsigned long)_sdata && | ||
78 | addr < (unsigned long)_edata) | ||
79 | return 1; | ||
80 | return 0; | ||
81 | } | ||
82 | |||
75 | int __kernel_text_address(unsigned long addr) | 83 | int __kernel_text_address(unsigned long addr) |
76 | { | 84 | { |
77 | if (core_kernel_text(addr)) | 85 | if (core_kernel_text(addr)) |
diff --git a/kernel/freezer.c b/kernel/freezer.c index 66ecd2ead215..7b01de98bb6a 100644 --- a/kernel/freezer.c +++ b/kernel/freezer.c | |||
@@ -17,7 +17,7 @@ static inline void frozen_process(void) | |||
17 | { | 17 | { |
18 | if (!unlikely(current->flags & PF_NOFREEZE)) { | 18 | if (!unlikely(current->flags & PF_NOFREEZE)) { |
19 | current->flags |= PF_FROZEN; | 19 | current->flags |= PF_FROZEN; |
20 | wmb(); | 20 | smp_wmb(); |
21 | } | 21 | } |
22 | clear_freeze_flag(current); | 22 | clear_freeze_flag(current); |
23 | } | 23 | } |
@@ -93,7 +93,7 @@ bool freeze_task(struct task_struct *p, bool sig_only) | |||
93 | * the task as frozen and next clears its TIF_FREEZE. | 93 | * the task as frozen and next clears its TIF_FREEZE. |
94 | */ | 94 | */ |
95 | if (!freezing(p)) { | 95 | if (!freezing(p)) { |
96 | rmb(); | 96 | smp_rmb(); |
97 | if (frozen(p)) | 97 | if (frozen(p)) |
98 | return false; | 98 | return false; |
99 | 99 | ||
diff --git a/kernel/hung_task.c b/kernel/hung_task.c index 53ead174da2f..ea640120ab86 100644 --- a/kernel/hung_task.c +++ b/kernel/hung_task.c | |||
@@ -33,7 +33,7 @@ unsigned long __read_mostly sysctl_hung_task_check_count = PID_MAX_LIMIT; | |||
33 | /* | 33 | /* |
34 | * Zero means infinite timeout - no checking done: | 34 | * Zero means infinite timeout - no checking done: |
35 | */ | 35 | */ |
36 | unsigned long __read_mostly sysctl_hung_task_timeout_secs = 120; | 36 | unsigned long __read_mostly sysctl_hung_task_timeout_secs = CONFIG_DEFAULT_HUNG_TASK_TIMEOUT; |
37 | 37 | ||
38 | unsigned long __read_mostly sysctl_hung_task_warnings = 10; | 38 | unsigned long __read_mostly sysctl_hung_task_warnings = 10; |
39 | 39 | ||
diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig index c574f9a12c48..d1d051b38e0b 100644 --- a/kernel/irq/Kconfig +++ b/kernel/irq/Kconfig | |||
@@ -48,6 +48,10 @@ config IRQ_PREFLOW_FASTEOI | |||
48 | config IRQ_EDGE_EOI_HANDLER | 48 | config IRQ_EDGE_EOI_HANDLER |
49 | bool | 49 | bool |
50 | 50 | ||
51 | # Generic configurable interrupt chip implementation | ||
52 | config GENERIC_IRQ_CHIP | ||
53 | bool | ||
54 | |||
51 | # Support forced irq threading | 55 | # Support forced irq threading |
52 | config IRQ_FORCED_THREADING | 56 | config IRQ_FORCED_THREADING |
53 | bool | 57 | bool |
diff --git a/kernel/irq/Makefile b/kernel/irq/Makefile index 54329cd7b3ee..73290056cfb6 100644 --- a/kernel/irq/Makefile +++ b/kernel/irq/Makefile | |||
@@ -1,5 +1,6 @@ | |||
1 | 1 | ||
2 | obj-y := irqdesc.o handle.o manage.o spurious.o resend.o chip.o dummychip.o devres.o | 2 | obj-y := irqdesc.o handle.o manage.o spurious.o resend.o chip.o dummychip.o devres.o |
3 | obj-$(CONFIG_GENERIC_IRQ_CHIP) += generic-chip.o | ||
3 | obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o | 4 | obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o |
4 | obj-$(CONFIG_PROC_FS) += proc.o | 5 | obj-$(CONFIG_PROC_FS) += proc.o |
5 | obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o | 6 | obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o |
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index 4af1e2b244cb..d5a3009da71a 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c | |||
@@ -310,6 +310,7 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc) | |||
310 | out_unlock: | 310 | out_unlock: |
311 | raw_spin_unlock(&desc->lock); | 311 | raw_spin_unlock(&desc->lock); |
312 | } | 312 | } |
313 | EXPORT_SYMBOL_GPL(handle_simple_irq); | ||
313 | 314 | ||
314 | /** | 315 | /** |
315 | * handle_level_irq - Level type irq handler | 316 | * handle_level_irq - Level type irq handler |
@@ -573,6 +574,7 @@ __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, | |||
573 | if (handle != handle_bad_irq && is_chained) { | 574 | if (handle != handle_bad_irq && is_chained) { |
574 | irq_settings_set_noprobe(desc); | 575 | irq_settings_set_noprobe(desc); |
575 | irq_settings_set_norequest(desc); | 576 | irq_settings_set_norequest(desc); |
577 | irq_settings_set_nothread(desc); | ||
576 | irq_startup(desc); | 578 | irq_startup(desc); |
577 | } | 579 | } |
578 | out: | 580 | out: |
@@ -612,6 +614,7 @@ void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set) | |||
612 | 614 | ||
613 | irq_put_desc_unlock(desc, flags); | 615 | irq_put_desc_unlock(desc, flags); |
614 | } | 616 | } |
617 | EXPORT_SYMBOL_GPL(irq_modify_status); | ||
615 | 618 | ||
616 | /** | 619 | /** |
617 | * irq_cpu_online - Invoke all irq_cpu_online functions. | 620 | * irq_cpu_online - Invoke all irq_cpu_online functions. |
diff --git a/kernel/irq/debug.h b/kernel/irq/debug.h index 306cba37e9a5..97a8bfadc88a 100644 --- a/kernel/irq/debug.h +++ b/kernel/irq/debug.h | |||
@@ -27,6 +27,7 @@ static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc) | |||
27 | P(IRQ_PER_CPU); | 27 | P(IRQ_PER_CPU); |
28 | P(IRQ_NOPROBE); | 28 | P(IRQ_NOPROBE); |
29 | P(IRQ_NOREQUEST); | 29 | P(IRQ_NOREQUEST); |
30 | P(IRQ_NOTHREAD); | ||
30 | P(IRQ_NOAUTOEN); | 31 | P(IRQ_NOAUTOEN); |
31 | 32 | ||
32 | PS(IRQS_AUTODETECT); | 33 | PS(IRQS_AUTODETECT); |
diff --git a/kernel/irq/generic-chip.c b/kernel/irq/generic-chip.c new file mode 100644 index 000000000000..31a9db711906 --- /dev/null +++ b/kernel/irq/generic-chip.c | |||
@@ -0,0 +1,354 @@ | |||
1 | /* | ||
2 | * Library implementing the most common irq chip callback functions | ||
3 | * | ||
4 | * Copyright (C) 2011, Thomas Gleixner | ||
5 | */ | ||
6 | #include <linux/io.h> | ||
7 | #include <linux/irq.h> | ||
8 | #include <linux/slab.h> | ||
9 | #include <linux/interrupt.h> | ||
10 | #include <linux/kernel_stat.h> | ||
11 | #include <linux/syscore_ops.h> | ||
12 | |||
13 | #include "internals.h" | ||
14 | |||
15 | static LIST_HEAD(gc_list); | ||
16 | static DEFINE_RAW_SPINLOCK(gc_lock); | ||
17 | |||
18 | static inline struct irq_chip_regs *cur_regs(struct irq_data *d) | ||
19 | { | ||
20 | return &container_of(d->chip, struct irq_chip_type, chip)->regs; | ||
21 | } | ||
22 | |||
23 | /** | ||
24 | * irq_gc_noop - NOOP function | ||
25 | * @d: irq_data | ||
26 | */ | ||
27 | void irq_gc_noop(struct irq_data *d) | ||
28 | { | ||
29 | } | ||
30 | |||
31 | /** | ||
32 | * irq_gc_mask_disable_reg - Mask chip via disable register | ||
33 | * @d: irq_data | ||
34 | * | ||
35 | * Chip has separate enable/disable registers instead of a single mask | ||
36 | * register. | ||
37 | */ | ||
38 | void irq_gc_mask_disable_reg(struct irq_data *d) | ||
39 | { | ||
40 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); | ||
41 | u32 mask = 1 << (d->irq - gc->irq_base); | ||
42 | |||
43 | irq_gc_lock(gc); | ||
44 | irq_reg_writel(mask, gc->reg_base + cur_regs(d)->disable); | ||
45 | gc->mask_cache &= ~mask; | ||
46 | irq_gc_unlock(gc); | ||
47 | } | ||
48 | |||
49 | /** | ||
50 | * irq_gc_mask_set_mask_bit - Mask chip via setting bit in mask register | ||
51 | * @d: irq_data | ||
52 | * | ||
53 | * Chip has a single mask register. Values of this register are cached | ||
54 | * and protected by gc->lock | ||
55 | */ | ||
56 | void irq_gc_mask_set_bit(struct irq_data *d) | ||
57 | { | ||
58 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); | ||
59 | u32 mask = 1 << (d->irq - gc->irq_base); | ||
60 | |||
61 | irq_gc_lock(gc); | ||
62 | gc->mask_cache |= mask; | ||
63 | irq_reg_writel(gc->mask_cache, gc->reg_base + cur_regs(d)->mask); | ||
64 | irq_gc_unlock(gc); | ||
65 | } | ||
66 | |||
67 | /** | ||
68 | * irq_gc_mask_set_mask_bit - Mask chip via clearing bit in mask register | ||
69 | * @d: irq_data | ||
70 | * | ||
71 | * Chip has a single mask register. Values of this register are cached | ||
72 | * and protected by gc->lock | ||
73 | */ | ||
74 | void irq_gc_mask_clr_bit(struct irq_data *d) | ||
75 | { | ||
76 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); | ||
77 | u32 mask = 1 << (d->irq - gc->irq_base); | ||
78 | |||
79 | irq_gc_lock(gc); | ||
80 | gc->mask_cache &= ~mask; | ||
81 | irq_reg_writel(gc->mask_cache, gc->reg_base + cur_regs(d)->mask); | ||
82 | irq_gc_unlock(gc); | ||
83 | } | ||
84 | |||
85 | /** | ||
86 | * irq_gc_unmask_enable_reg - Unmask chip via enable register | ||
87 | * @d: irq_data | ||
88 | * | ||
89 | * Chip has separate enable/disable registers instead of a single mask | ||
90 | * register. | ||
91 | */ | ||
92 | void irq_gc_unmask_enable_reg(struct irq_data *d) | ||
93 | { | ||
94 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); | ||
95 | u32 mask = 1 << (d->irq - gc->irq_base); | ||
96 | |||
97 | irq_gc_lock(gc); | ||
98 | irq_reg_writel(mask, gc->reg_base + cur_regs(d)->enable); | ||
99 | gc->mask_cache |= mask; | ||
100 | irq_gc_unlock(gc); | ||
101 | } | ||
102 | |||
103 | /** | ||
104 | * irq_gc_ack - Ack pending interrupt | ||
105 | * @d: irq_data | ||
106 | */ | ||
107 | void irq_gc_ack(struct irq_data *d) | ||
108 | { | ||
109 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); | ||
110 | u32 mask = 1 << (d->irq - gc->irq_base); | ||
111 | |||
112 | irq_gc_lock(gc); | ||
113 | irq_reg_writel(mask, gc->reg_base + cur_regs(d)->ack); | ||
114 | irq_gc_unlock(gc); | ||
115 | } | ||
116 | |||
117 | /** | ||
118 | * irq_gc_mask_disable_reg_and_ack- Mask and ack pending interrupt | ||
119 | * @d: irq_data | ||
120 | */ | ||
121 | void irq_gc_mask_disable_reg_and_ack(struct irq_data *d) | ||
122 | { | ||
123 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); | ||
124 | u32 mask = 1 << (d->irq - gc->irq_base); | ||
125 | |||
126 | irq_gc_lock(gc); | ||
127 | irq_reg_writel(mask, gc->reg_base + cur_regs(d)->mask); | ||
128 | irq_reg_writel(mask, gc->reg_base + cur_regs(d)->ack); | ||
129 | irq_gc_unlock(gc); | ||
130 | } | ||
131 | |||
132 | /** | ||
133 | * irq_gc_eoi - EOI interrupt | ||
134 | * @d: irq_data | ||
135 | */ | ||
136 | void irq_gc_eoi(struct irq_data *d) | ||
137 | { | ||
138 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); | ||
139 | u32 mask = 1 << (d->irq - gc->irq_base); | ||
140 | |||
141 | irq_gc_lock(gc); | ||
142 | irq_reg_writel(mask, gc->reg_base + cur_regs(d)->eoi); | ||
143 | irq_gc_unlock(gc); | ||
144 | } | ||
145 | |||
146 | /** | ||
147 | * irq_gc_set_wake - Set/clr wake bit for an interrupt | ||
148 | * @d: irq_data | ||
149 | * | ||
150 | * For chips where the wake from suspend functionality is not | ||
151 | * configured in a separate register and the wakeup active state is | ||
152 | * just stored in a bitmask. | ||
153 | */ | ||
154 | int irq_gc_set_wake(struct irq_data *d, unsigned int on) | ||
155 | { | ||
156 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); | ||
157 | u32 mask = 1 << (d->irq - gc->irq_base); | ||
158 | |||
159 | if (!(mask & gc->wake_enabled)) | ||
160 | return -EINVAL; | ||
161 | |||
162 | irq_gc_lock(gc); | ||
163 | if (on) | ||
164 | gc->wake_active |= mask; | ||
165 | else | ||
166 | gc->wake_active &= ~mask; | ||
167 | irq_gc_unlock(gc); | ||
168 | return 0; | ||
169 | } | ||
170 | |||
171 | /** | ||
172 | * irq_alloc_generic_chip - Allocate a generic chip and initialize it | ||
173 | * @name: Name of the irq chip | ||
174 | * @num_ct: Number of irq_chip_type instances associated with this | ||
175 | * @irq_base: Interrupt base nr for this chip | ||
176 | * @reg_base: Register base address (virtual) | ||
177 | * @handler: Default flow handler associated with this chip | ||
178 | * | ||
179 | * Returns an initialized irq_chip_generic structure. The chip defaults | ||
180 | * to the primary (index 0) irq_chip_type and @handler | ||
181 | */ | ||
182 | struct irq_chip_generic * | ||
183 | irq_alloc_generic_chip(const char *name, int num_ct, unsigned int irq_base, | ||
184 | void __iomem *reg_base, irq_flow_handler_t handler) | ||
185 | { | ||
186 | struct irq_chip_generic *gc; | ||
187 | unsigned long sz = sizeof(*gc) + num_ct * sizeof(struct irq_chip_type); | ||
188 | |||
189 | gc = kzalloc(sz, GFP_KERNEL); | ||
190 | if (gc) { | ||
191 | raw_spin_lock_init(&gc->lock); | ||
192 | gc->num_ct = num_ct; | ||
193 | gc->irq_base = irq_base; | ||
194 | gc->reg_base = reg_base; | ||
195 | gc->chip_types->chip.name = name; | ||
196 | gc->chip_types->handler = handler; | ||
197 | } | ||
198 | return gc; | ||
199 | } | ||
200 | |||
201 | /* | ||
202 | * Separate lockdep class for interrupt chip which can nest irq_desc | ||
203 | * lock. | ||
204 | */ | ||
205 | static struct lock_class_key irq_nested_lock_class; | ||
206 | |||
207 | /** | ||
208 | * irq_setup_generic_chip - Setup a range of interrupts with a generic chip | ||
209 | * @gc: Generic irq chip holding all data | ||
210 | * @msk: Bitmask holding the irqs to initialize relative to gc->irq_base | ||
211 | * @flags: Flags for initialization | ||
212 | * @clr: IRQ_* bits to clear | ||
213 | * @set: IRQ_* bits to set | ||
214 | * | ||
215 | * Set up max. 32 interrupts starting from gc->irq_base. Note, this | ||
216 | * initializes all interrupts to the primary irq_chip_type and its | ||
217 | * associated handler. | ||
218 | */ | ||
219 | void irq_setup_generic_chip(struct irq_chip_generic *gc, u32 msk, | ||
220 | enum irq_gc_flags flags, unsigned int clr, | ||
221 | unsigned int set) | ||
222 | { | ||
223 | struct irq_chip_type *ct = gc->chip_types; | ||
224 | unsigned int i; | ||
225 | |||
226 | raw_spin_lock(&gc_lock); | ||
227 | list_add_tail(&gc->list, &gc_list); | ||
228 | raw_spin_unlock(&gc_lock); | ||
229 | |||
230 | /* Init mask cache ? */ | ||
231 | if (flags & IRQ_GC_INIT_MASK_CACHE) | ||
232 | gc->mask_cache = irq_reg_readl(gc->reg_base + ct->regs.mask); | ||
233 | |||
234 | for (i = gc->irq_base; msk; msk >>= 1, i++) { | ||
235 | if (!msk & 0x01) | ||
236 | continue; | ||
237 | |||
238 | if (flags & IRQ_GC_INIT_NESTED_LOCK) | ||
239 | irq_set_lockdep_class(i, &irq_nested_lock_class); | ||
240 | |||
241 | irq_set_chip_and_handler(i, &ct->chip, ct->handler); | ||
242 | irq_set_chip_data(i, gc); | ||
243 | irq_modify_status(i, clr, set); | ||
244 | } | ||
245 | gc->irq_cnt = i - gc->irq_base; | ||
246 | } | ||
247 | |||
248 | /** | ||
249 | * irq_setup_alt_chip - Switch to alternative chip | ||
250 | * @d: irq_data for this interrupt | ||
251 | * @type Flow type to be initialized | ||
252 | * | ||
253 | * Only to be called from chip->irq_set_type() callbacks. | ||
254 | */ | ||
255 | int irq_setup_alt_chip(struct irq_data *d, unsigned int type) | ||
256 | { | ||
257 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); | ||
258 | struct irq_chip_type *ct = gc->chip_types; | ||
259 | unsigned int i; | ||
260 | |||
261 | for (i = 0; i < gc->num_ct; i++, ct++) { | ||
262 | if (ct->type & type) { | ||
263 | d->chip = &ct->chip; | ||
264 | irq_data_to_desc(d)->handle_irq = ct->handler; | ||
265 | return 0; | ||
266 | } | ||
267 | } | ||
268 | return -EINVAL; | ||
269 | } | ||
270 | |||
271 | /** | ||
272 | * irq_remove_generic_chip - Remove a chip | ||
273 | * @gc: Generic irq chip holding all data | ||
274 | * @msk: Bitmask holding the irqs to initialize relative to gc->irq_base | ||
275 | * @clr: IRQ_* bits to clear | ||
276 | * @set: IRQ_* bits to set | ||
277 | * | ||
278 | * Remove up to 32 interrupts starting from gc->irq_base. | ||
279 | */ | ||
280 | void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk, | ||
281 | unsigned int clr, unsigned int set) | ||
282 | { | ||
283 | unsigned int i = gc->irq_base; | ||
284 | |||
285 | raw_spin_lock(&gc_lock); | ||
286 | list_del(&gc->list); | ||
287 | raw_spin_unlock(&gc_lock); | ||
288 | |||
289 | for (; msk; msk >>= 1, i++) { | ||
290 | if (!msk & 0x01) | ||
291 | continue; | ||
292 | |||
293 | /* Remove handler first. That will mask the irq line */ | ||
294 | irq_set_handler(i, NULL); | ||
295 | irq_set_chip(i, &no_irq_chip); | ||
296 | irq_set_chip_data(i, NULL); | ||
297 | irq_modify_status(i, clr, set); | ||
298 | } | ||
299 | } | ||
300 | |||
301 | #ifdef CONFIG_PM | ||
302 | static int irq_gc_suspend(void) | ||
303 | { | ||
304 | struct irq_chip_generic *gc; | ||
305 | |||
306 | list_for_each_entry(gc, &gc_list, list) { | ||
307 | struct irq_chip_type *ct = gc->chip_types; | ||
308 | |||
309 | if (ct->chip.irq_suspend) | ||
310 | ct->chip.irq_suspend(irq_get_irq_data(gc->irq_base)); | ||
311 | } | ||
312 | return 0; | ||
313 | } | ||
314 | |||
315 | static void irq_gc_resume(void) | ||
316 | { | ||
317 | struct irq_chip_generic *gc; | ||
318 | |||
319 | list_for_each_entry(gc, &gc_list, list) { | ||
320 | struct irq_chip_type *ct = gc->chip_types; | ||
321 | |||
322 | if (ct->chip.irq_resume) | ||
323 | ct->chip.irq_resume(irq_get_irq_data(gc->irq_base)); | ||
324 | } | ||
325 | } | ||
326 | #else | ||
327 | #define irq_gc_suspend NULL | ||
328 | #define irq_gc_resume NULL | ||
329 | #endif | ||
330 | |||
331 | static void irq_gc_shutdown(void) | ||
332 | { | ||
333 | struct irq_chip_generic *gc; | ||
334 | |||
335 | list_for_each_entry(gc, &gc_list, list) { | ||
336 | struct irq_chip_type *ct = gc->chip_types; | ||
337 | |||
338 | if (ct->chip.irq_pm_shutdown) | ||
339 | ct->chip.irq_pm_shutdown(irq_get_irq_data(gc->irq_base)); | ||
340 | } | ||
341 | } | ||
342 | |||
343 | static struct syscore_ops irq_gc_syscore_ops = { | ||
344 | .suspend = irq_gc_suspend, | ||
345 | .resume = irq_gc_resume, | ||
346 | .shutdown = irq_gc_shutdown, | ||
347 | }; | ||
348 | |||
349 | static int __init irq_gc_init_ops(void) | ||
350 | { | ||
351 | register_syscore_ops(&irq_gc_syscore_ops); | ||
352 | return 0; | ||
353 | } | ||
354 | device_initcall(irq_gc_init_ops); | ||
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index 2c039c9b9383..886e80347b32 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c | |||
@@ -22,7 +22,7 @@ | |||
22 | */ | 22 | */ |
23 | static struct lock_class_key irq_desc_lock_class; | 23 | static struct lock_class_key irq_desc_lock_class; |
24 | 24 | ||
25 | #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) | 25 | #if defined(CONFIG_SMP) |
26 | static void __init init_irq_default_affinity(void) | 26 | static void __init init_irq_default_affinity(void) |
27 | { | 27 | { |
28 | alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); | 28 | alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); |
@@ -290,6 +290,22 @@ static int irq_expand_nr_irqs(unsigned int nr) | |||
290 | 290 | ||
291 | #endif /* !CONFIG_SPARSE_IRQ */ | 291 | #endif /* !CONFIG_SPARSE_IRQ */ |
292 | 292 | ||
293 | /** | ||
294 | * generic_handle_irq - Invoke the handler for a particular irq | ||
295 | * @irq: The irq number to handle | ||
296 | * | ||
297 | */ | ||
298 | int generic_handle_irq(unsigned int irq) | ||
299 | { | ||
300 | struct irq_desc *desc = irq_to_desc(irq); | ||
301 | |||
302 | if (!desc) | ||
303 | return -EINVAL; | ||
304 | generic_handle_irq_desc(irq, desc); | ||
305 | return 0; | ||
306 | } | ||
307 | EXPORT_SYMBOL_GPL(generic_handle_irq); | ||
308 | |||
293 | /* Dynamic interrupt handling */ | 309 | /* Dynamic interrupt handling */ |
294 | 310 | ||
295 | /** | 311 | /** |
@@ -311,6 +327,7 @@ void irq_free_descs(unsigned int from, unsigned int cnt) | |||
311 | bitmap_clear(allocated_irqs, from, cnt); | 327 | bitmap_clear(allocated_irqs, from, cnt); |
312 | mutex_unlock(&sparse_irq_lock); | 328 | mutex_unlock(&sparse_irq_lock); |
313 | } | 329 | } |
330 | EXPORT_SYMBOL_GPL(irq_free_descs); | ||
314 | 331 | ||
315 | /** | 332 | /** |
316 | * irq_alloc_descs - allocate and initialize a range of irq descriptors | 333 | * irq_alloc_descs - allocate and initialize a range of irq descriptors |
@@ -351,6 +368,7 @@ err: | |||
351 | mutex_unlock(&sparse_irq_lock); | 368 | mutex_unlock(&sparse_irq_lock); |
352 | return ret; | 369 | return ret; |
353 | } | 370 | } |
371 | EXPORT_SYMBOL_GPL(irq_alloc_descs); | ||
354 | 372 | ||
355 | /** | 373 | /** |
356 | * irq_reserve_irqs - mark irqs allocated | 374 | * irq_reserve_irqs - mark irqs allocated |
@@ -430,7 +448,6 @@ unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) | |||
430 | *per_cpu_ptr(desc->kstat_irqs, cpu) : 0; | 448 | *per_cpu_ptr(desc->kstat_irqs, cpu) : 0; |
431 | } | 449 | } |
432 | 450 | ||
433 | #ifdef CONFIG_GENERIC_HARDIRQS | ||
434 | unsigned int kstat_irqs(unsigned int irq) | 451 | unsigned int kstat_irqs(unsigned int irq) |
435 | { | 452 | { |
436 | struct irq_desc *desc = irq_to_desc(irq); | 453 | struct irq_desc *desc = irq_to_desc(irq); |
@@ -443,4 +460,3 @@ unsigned int kstat_irqs(unsigned int irq) | |||
443 | sum += *per_cpu_ptr(desc->kstat_irqs, cpu); | 460 | sum += *per_cpu_ptr(desc->kstat_irqs, cpu); |
444 | return sum; | 461 | return sum; |
445 | } | 462 | } |
446 | #endif /* CONFIG_GENERIC_HARDIRQS */ | ||
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 07c1611f3899..f7ce0021e1c4 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -900,7 +900,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
900 | */ | 900 | */ |
901 | new->handler = irq_nested_primary_handler; | 901 | new->handler = irq_nested_primary_handler; |
902 | } else { | 902 | } else { |
903 | irq_setup_forced_threading(new); | 903 | if (irq_settings_can_thread(desc)) |
904 | irq_setup_forced_threading(new); | ||
904 | } | 905 | } |
905 | 906 | ||
906 | /* | 907 | /* |
diff --git a/kernel/irq/settings.h b/kernel/irq/settings.h index 0d91730b6330..f1667833d444 100644 --- a/kernel/irq/settings.h +++ b/kernel/irq/settings.h | |||
@@ -8,6 +8,7 @@ enum { | |||
8 | _IRQ_LEVEL = IRQ_LEVEL, | 8 | _IRQ_LEVEL = IRQ_LEVEL, |
9 | _IRQ_NOPROBE = IRQ_NOPROBE, | 9 | _IRQ_NOPROBE = IRQ_NOPROBE, |
10 | _IRQ_NOREQUEST = IRQ_NOREQUEST, | 10 | _IRQ_NOREQUEST = IRQ_NOREQUEST, |
11 | _IRQ_NOTHREAD = IRQ_NOTHREAD, | ||
11 | _IRQ_NOAUTOEN = IRQ_NOAUTOEN, | 12 | _IRQ_NOAUTOEN = IRQ_NOAUTOEN, |
12 | _IRQ_MOVE_PCNTXT = IRQ_MOVE_PCNTXT, | 13 | _IRQ_MOVE_PCNTXT = IRQ_MOVE_PCNTXT, |
13 | _IRQ_NO_BALANCING = IRQ_NO_BALANCING, | 14 | _IRQ_NO_BALANCING = IRQ_NO_BALANCING, |
@@ -20,6 +21,7 @@ enum { | |||
20 | #define IRQ_LEVEL GOT_YOU_MORON | 21 | #define IRQ_LEVEL GOT_YOU_MORON |
21 | #define IRQ_NOPROBE GOT_YOU_MORON | 22 | #define IRQ_NOPROBE GOT_YOU_MORON |
22 | #define IRQ_NOREQUEST GOT_YOU_MORON | 23 | #define IRQ_NOREQUEST GOT_YOU_MORON |
24 | #define IRQ_NOTHREAD GOT_YOU_MORON | ||
23 | #define IRQ_NOAUTOEN GOT_YOU_MORON | 25 | #define IRQ_NOAUTOEN GOT_YOU_MORON |
24 | #define IRQ_NESTED_THREAD GOT_YOU_MORON | 26 | #define IRQ_NESTED_THREAD GOT_YOU_MORON |
25 | #undef IRQF_MODIFY_MASK | 27 | #undef IRQF_MODIFY_MASK |
@@ -94,6 +96,21 @@ static inline void irq_settings_set_norequest(struct irq_desc *desc) | |||
94 | desc->status_use_accessors |= _IRQ_NOREQUEST; | 96 | desc->status_use_accessors |= _IRQ_NOREQUEST; |
95 | } | 97 | } |
96 | 98 | ||
99 | static inline bool irq_settings_can_thread(struct irq_desc *desc) | ||
100 | { | ||
101 | return !(desc->status_use_accessors & _IRQ_NOTHREAD); | ||
102 | } | ||
103 | |||
104 | static inline void irq_settings_clr_nothread(struct irq_desc *desc) | ||
105 | { | ||
106 | desc->status_use_accessors &= ~_IRQ_NOTHREAD; | ||
107 | } | ||
108 | |||
109 | static inline void irq_settings_set_nothread(struct irq_desc *desc) | ||
110 | { | ||
111 | desc->status_use_accessors |= _IRQ_NOTHREAD; | ||
112 | } | ||
113 | |||
97 | static inline bool irq_settings_can_probe(struct irq_desc *desc) | 114 | static inline bool irq_settings_can_probe(struct irq_desc *desc) |
98 | { | 115 | { |
99 | return !(desc->status_use_accessors & _IRQ_NOPROBE); | 116 | return !(desc->status_use_accessors & _IRQ_NOPROBE); |
diff --git a/kernel/jump_label.c b/kernel/jump_label.c index 3b79bd938330..74d1c099fbd1 100644 --- a/kernel/jump_label.c +++ b/kernel/jump_label.c | |||
@@ -2,43 +2,23 @@ | |||
2 | * jump label support | 2 | * jump label support |
3 | * | 3 | * |
4 | * Copyright (C) 2009 Jason Baron <jbaron@redhat.com> | 4 | * Copyright (C) 2009 Jason Baron <jbaron@redhat.com> |
5 | * Copyright (C) 2011 Peter Zijlstra <pzijlstr@redhat.com> | ||
5 | * | 6 | * |
6 | */ | 7 | */ |
7 | #include <linux/jump_label.h> | ||
8 | #include <linux/memory.h> | 8 | #include <linux/memory.h> |
9 | #include <linux/uaccess.h> | 9 | #include <linux/uaccess.h> |
10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
11 | #include <linux/list.h> | 11 | #include <linux/list.h> |
12 | #include <linux/jhash.h> | ||
13 | #include <linux/slab.h> | 12 | #include <linux/slab.h> |
14 | #include <linux/sort.h> | 13 | #include <linux/sort.h> |
15 | #include <linux/err.h> | 14 | #include <linux/err.h> |
15 | #include <linux/jump_label.h> | ||
16 | 16 | ||
17 | #ifdef HAVE_JUMP_LABEL | 17 | #ifdef HAVE_JUMP_LABEL |
18 | 18 | ||
19 | #define JUMP_LABEL_HASH_BITS 6 | ||
20 | #define JUMP_LABEL_TABLE_SIZE (1 << JUMP_LABEL_HASH_BITS) | ||
21 | static struct hlist_head jump_label_table[JUMP_LABEL_TABLE_SIZE]; | ||
22 | |||
23 | /* mutex to protect coming/going of the the jump_label table */ | 19 | /* mutex to protect coming/going of the the jump_label table */ |
24 | static DEFINE_MUTEX(jump_label_mutex); | 20 | static DEFINE_MUTEX(jump_label_mutex); |
25 | 21 | ||
26 | struct jump_label_entry { | ||
27 | struct hlist_node hlist; | ||
28 | struct jump_entry *table; | ||
29 | int nr_entries; | ||
30 | /* hang modules off here */ | ||
31 | struct hlist_head modules; | ||
32 | unsigned long key; | ||
33 | }; | ||
34 | |||
35 | struct jump_label_module_entry { | ||
36 | struct hlist_node hlist; | ||
37 | struct jump_entry *table; | ||
38 | int nr_entries; | ||
39 | struct module *mod; | ||
40 | }; | ||
41 | |||
42 | void jump_label_lock(void) | 22 | void jump_label_lock(void) |
43 | { | 23 | { |
44 | mutex_lock(&jump_label_mutex); | 24 | mutex_lock(&jump_label_mutex); |
@@ -49,6 +29,11 @@ void jump_label_unlock(void) | |||
49 | mutex_unlock(&jump_label_mutex); | 29 | mutex_unlock(&jump_label_mutex); |
50 | } | 30 | } |
51 | 31 | ||
32 | bool jump_label_enabled(struct jump_label_key *key) | ||
33 | { | ||
34 | return !!atomic_read(&key->enabled); | ||
35 | } | ||
36 | |||
52 | static int jump_label_cmp(const void *a, const void *b) | 37 | static int jump_label_cmp(const void *a, const void *b) |
53 | { | 38 | { |
54 | const struct jump_entry *jea = a; | 39 | const struct jump_entry *jea = a; |
@@ -64,7 +49,7 @@ static int jump_label_cmp(const void *a, const void *b) | |||
64 | } | 49 | } |
65 | 50 | ||
66 | static void | 51 | static void |
67 | sort_jump_label_entries(struct jump_entry *start, struct jump_entry *stop) | 52 | jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop) |
68 | { | 53 | { |
69 | unsigned long size; | 54 | unsigned long size; |
70 | 55 | ||
@@ -73,118 +58,25 @@ sort_jump_label_entries(struct jump_entry *start, struct jump_entry *stop) | |||
73 | sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL); | 58 | sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL); |
74 | } | 59 | } |
75 | 60 | ||
76 | static struct jump_label_entry *get_jump_label_entry(jump_label_t key) | 61 | static void jump_label_update(struct jump_label_key *key, int enable); |
77 | { | ||
78 | struct hlist_head *head; | ||
79 | struct hlist_node *node; | ||
80 | struct jump_label_entry *e; | ||
81 | u32 hash = jhash((void *)&key, sizeof(jump_label_t), 0); | ||
82 | |||
83 | head = &jump_label_table[hash & (JUMP_LABEL_TABLE_SIZE - 1)]; | ||
84 | hlist_for_each_entry(e, node, head, hlist) { | ||
85 | if (key == e->key) | ||
86 | return e; | ||
87 | } | ||
88 | return NULL; | ||
89 | } | ||
90 | 62 | ||
91 | static struct jump_label_entry * | 63 | void jump_label_inc(struct jump_label_key *key) |
92 | add_jump_label_entry(jump_label_t key, int nr_entries, struct jump_entry *table) | ||
93 | { | 64 | { |
94 | struct hlist_head *head; | 65 | if (atomic_inc_not_zero(&key->enabled)) |
95 | struct jump_label_entry *e; | 66 | return; |
96 | u32 hash; | ||
97 | |||
98 | e = get_jump_label_entry(key); | ||
99 | if (e) | ||
100 | return ERR_PTR(-EEXIST); | ||
101 | |||
102 | e = kmalloc(sizeof(struct jump_label_entry), GFP_KERNEL); | ||
103 | if (!e) | ||
104 | return ERR_PTR(-ENOMEM); | ||
105 | |||
106 | hash = jhash((void *)&key, sizeof(jump_label_t), 0); | ||
107 | head = &jump_label_table[hash & (JUMP_LABEL_TABLE_SIZE - 1)]; | ||
108 | e->key = key; | ||
109 | e->table = table; | ||
110 | e->nr_entries = nr_entries; | ||
111 | INIT_HLIST_HEAD(&(e->modules)); | ||
112 | hlist_add_head(&e->hlist, head); | ||
113 | return e; | ||
114 | } | ||
115 | 67 | ||
116 | static int | 68 | jump_label_lock(); |
117 | build_jump_label_hashtable(struct jump_entry *start, struct jump_entry *stop) | 69 | if (atomic_add_return(1, &key->enabled) == 1) |
118 | { | 70 | jump_label_update(key, JUMP_LABEL_ENABLE); |
119 | struct jump_entry *iter, *iter_begin; | 71 | jump_label_unlock(); |
120 | struct jump_label_entry *entry; | ||
121 | int count; | ||
122 | |||
123 | sort_jump_label_entries(start, stop); | ||
124 | iter = start; | ||
125 | while (iter < stop) { | ||
126 | entry = get_jump_label_entry(iter->key); | ||
127 | if (!entry) { | ||
128 | iter_begin = iter; | ||
129 | count = 0; | ||
130 | while ((iter < stop) && | ||
131 | (iter->key == iter_begin->key)) { | ||
132 | iter++; | ||
133 | count++; | ||
134 | } | ||
135 | entry = add_jump_label_entry(iter_begin->key, | ||
136 | count, iter_begin); | ||
137 | if (IS_ERR(entry)) | ||
138 | return PTR_ERR(entry); | ||
139 | } else { | ||
140 | WARN_ONCE(1, KERN_ERR "build_jump_hashtable: unexpected entry!\n"); | ||
141 | return -1; | ||
142 | } | ||
143 | } | ||
144 | return 0; | ||
145 | } | 72 | } |
146 | 73 | ||
147 | /*** | 74 | void jump_label_dec(struct jump_label_key *key) |
148 | * jump_label_update - update jump label text | ||
149 | * @key - key value associated with a a jump label | ||
150 | * @type - enum set to JUMP_LABEL_ENABLE or JUMP_LABEL_DISABLE | ||
151 | * | ||
152 | * Will enable/disable the jump for jump label @key, depending on the | ||
153 | * value of @type. | ||
154 | * | ||
155 | */ | ||
156 | |||
157 | void jump_label_update(unsigned long key, enum jump_label_type type) | ||
158 | { | 75 | { |
159 | struct jump_entry *iter; | 76 | if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) |
160 | struct jump_label_entry *entry; | 77 | return; |
161 | struct hlist_node *module_node; | ||
162 | struct jump_label_module_entry *e_module; | ||
163 | int count; | ||
164 | 78 | ||
165 | jump_label_lock(); | 79 | jump_label_update(key, JUMP_LABEL_DISABLE); |
166 | entry = get_jump_label_entry((jump_label_t)key); | ||
167 | if (entry) { | ||
168 | count = entry->nr_entries; | ||
169 | iter = entry->table; | ||
170 | while (count--) { | ||
171 | if (kernel_text_address(iter->code)) | ||
172 | arch_jump_label_transform(iter, type); | ||
173 | iter++; | ||
174 | } | ||
175 | /* eanble/disable jump labels in modules */ | ||
176 | hlist_for_each_entry(e_module, module_node, &(entry->modules), | ||
177 | hlist) { | ||
178 | count = e_module->nr_entries; | ||
179 | iter = e_module->table; | ||
180 | while (count--) { | ||
181 | if (iter->key && | ||
182 | kernel_text_address(iter->code)) | ||
183 | arch_jump_label_transform(iter, type); | ||
184 | iter++; | ||
185 | } | ||
186 | } | ||
187 | } | ||
188 | jump_label_unlock(); | 80 | jump_label_unlock(); |
189 | } | 81 | } |
190 | 82 | ||
@@ -197,77 +89,33 @@ static int addr_conflict(struct jump_entry *entry, void *start, void *end) | |||
197 | return 0; | 89 | return 0; |
198 | } | 90 | } |
199 | 91 | ||
200 | #ifdef CONFIG_MODULES | 92 | static int __jump_label_text_reserved(struct jump_entry *iter_start, |
201 | 93 | struct jump_entry *iter_stop, void *start, void *end) | |
202 | static int module_conflict(void *start, void *end) | ||
203 | { | 94 | { |
204 | struct hlist_head *head; | ||
205 | struct hlist_node *node, *node_next, *module_node, *module_node_next; | ||
206 | struct jump_label_entry *e; | ||
207 | struct jump_label_module_entry *e_module; | ||
208 | struct jump_entry *iter; | 95 | struct jump_entry *iter; |
209 | int i, count; | ||
210 | int conflict = 0; | ||
211 | |||
212 | for (i = 0; i < JUMP_LABEL_TABLE_SIZE; i++) { | ||
213 | head = &jump_label_table[i]; | ||
214 | hlist_for_each_entry_safe(e, node, node_next, head, hlist) { | ||
215 | hlist_for_each_entry_safe(e_module, module_node, | ||
216 | module_node_next, | ||
217 | &(e->modules), hlist) { | ||
218 | count = e_module->nr_entries; | ||
219 | iter = e_module->table; | ||
220 | while (count--) { | ||
221 | if (addr_conflict(iter, start, end)) { | ||
222 | conflict = 1; | ||
223 | goto out; | ||
224 | } | ||
225 | iter++; | ||
226 | } | ||
227 | } | ||
228 | } | ||
229 | } | ||
230 | out: | ||
231 | return conflict; | ||
232 | } | ||
233 | |||
234 | #endif | ||
235 | |||
236 | /*** | ||
237 | * jump_label_text_reserved - check if addr range is reserved | ||
238 | * @start: start text addr | ||
239 | * @end: end text addr | ||
240 | * | ||
241 | * checks if the text addr located between @start and @end | ||
242 | * overlaps with any of the jump label patch addresses. Code | ||
243 | * that wants to modify kernel text should first verify that | ||
244 | * it does not overlap with any of the jump label addresses. | ||
245 | * Caller must hold jump_label_mutex. | ||
246 | * | ||
247 | * returns 1 if there is an overlap, 0 otherwise | ||
248 | */ | ||
249 | int jump_label_text_reserved(void *start, void *end) | ||
250 | { | ||
251 | struct jump_entry *iter; | ||
252 | struct jump_entry *iter_start = __start___jump_table; | ||
253 | struct jump_entry *iter_stop = __start___jump_table; | ||
254 | int conflict = 0; | ||
255 | 96 | ||
256 | iter = iter_start; | 97 | iter = iter_start; |
257 | while (iter < iter_stop) { | 98 | while (iter < iter_stop) { |
258 | if (addr_conflict(iter, start, end)) { | 99 | if (addr_conflict(iter, start, end)) |
259 | conflict = 1; | 100 | return 1; |
260 | goto out; | ||
261 | } | ||
262 | iter++; | 101 | iter++; |
263 | } | 102 | } |
264 | 103 | ||
265 | /* now check modules */ | 104 | return 0; |
266 | #ifdef CONFIG_MODULES | 105 | } |
267 | conflict = module_conflict(start, end); | 106 | |
268 | #endif | 107 | static void __jump_label_update(struct jump_label_key *key, |
269 | out: | 108 | struct jump_entry *entry, int enable) |
270 | return conflict; | 109 | { |
110 | for (; entry->key == (jump_label_t)(unsigned long)key; entry++) { | ||
111 | /* | ||
112 | * entry->code set to 0 invalidates module init text sections | ||
113 | * kernel_text_address() verifies we are not in core kernel | ||
114 | * init code, see jump_label_invalidate_module_init(). | ||
115 | */ | ||
116 | if (entry->code && kernel_text_address(entry->code)) | ||
117 | arch_jump_label_transform(entry, enable); | ||
118 | } | ||
271 | } | 119 | } |
272 | 120 | ||
273 | /* | 121 | /* |
@@ -277,142 +125,173 @@ void __weak arch_jump_label_text_poke_early(jump_label_t addr) | |||
277 | { | 125 | { |
278 | } | 126 | } |
279 | 127 | ||
280 | static __init int init_jump_label(void) | 128 | static __init int jump_label_init(void) |
281 | { | 129 | { |
282 | int ret; | ||
283 | struct jump_entry *iter_start = __start___jump_table; | 130 | struct jump_entry *iter_start = __start___jump_table; |
284 | struct jump_entry *iter_stop = __stop___jump_table; | 131 | struct jump_entry *iter_stop = __stop___jump_table; |
132 | struct jump_label_key *key = NULL; | ||
285 | struct jump_entry *iter; | 133 | struct jump_entry *iter; |
286 | 134 | ||
287 | jump_label_lock(); | 135 | jump_label_lock(); |
288 | ret = build_jump_label_hashtable(__start___jump_table, | 136 | jump_label_sort_entries(iter_start, iter_stop); |
289 | __stop___jump_table); | 137 | |
290 | iter = iter_start; | 138 | for (iter = iter_start; iter < iter_stop; iter++) { |
291 | while (iter < iter_stop) { | ||
292 | arch_jump_label_text_poke_early(iter->code); | 139 | arch_jump_label_text_poke_early(iter->code); |
293 | iter++; | 140 | if (iter->key == (jump_label_t)(unsigned long)key) |
141 | continue; | ||
142 | |||
143 | key = (struct jump_label_key *)(unsigned long)iter->key; | ||
144 | atomic_set(&key->enabled, 0); | ||
145 | key->entries = iter; | ||
146 | #ifdef CONFIG_MODULES | ||
147 | key->next = NULL; | ||
148 | #endif | ||
294 | } | 149 | } |
295 | jump_label_unlock(); | 150 | jump_label_unlock(); |
296 | return ret; | 151 | |
152 | return 0; | ||
297 | } | 153 | } |
298 | early_initcall(init_jump_label); | 154 | early_initcall(jump_label_init); |
299 | 155 | ||
300 | #ifdef CONFIG_MODULES | 156 | #ifdef CONFIG_MODULES |
301 | 157 | ||
302 | static struct jump_label_module_entry * | 158 | struct jump_label_mod { |
303 | add_jump_label_module_entry(struct jump_label_entry *entry, | 159 | struct jump_label_mod *next; |
304 | struct jump_entry *iter_begin, | 160 | struct jump_entry *entries; |
305 | int count, struct module *mod) | 161 | struct module *mod; |
162 | }; | ||
163 | |||
164 | static int __jump_label_mod_text_reserved(void *start, void *end) | ||
165 | { | ||
166 | struct module *mod; | ||
167 | |||
168 | mod = __module_text_address((unsigned long)start); | ||
169 | if (!mod) | ||
170 | return 0; | ||
171 | |||
172 | WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod); | ||
173 | |||
174 | return __jump_label_text_reserved(mod->jump_entries, | ||
175 | mod->jump_entries + mod->num_jump_entries, | ||
176 | start, end); | ||
177 | } | ||
178 | |||
179 | static void __jump_label_mod_update(struct jump_label_key *key, int enable) | ||
180 | { | ||
181 | struct jump_label_mod *mod = key->next; | ||
182 | |||
183 | while (mod) { | ||
184 | __jump_label_update(key, mod->entries, enable); | ||
185 | mod = mod->next; | ||
186 | } | ||
187 | } | ||
188 | |||
189 | /*** | ||
190 | * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop() | ||
191 | * @mod: module to patch | ||
192 | * | ||
193 | * Allow for run-time selection of the optimal nops. Before the module | ||
194 | * loads patch these with arch_get_jump_label_nop(), which is specified by | ||
195 | * the arch specific jump label code. | ||
196 | */ | ||
197 | void jump_label_apply_nops(struct module *mod) | ||
306 | { | 198 | { |
307 | struct jump_label_module_entry *e; | 199 | struct jump_entry *iter_start = mod->jump_entries; |
308 | 200 | struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; | |
309 | e = kmalloc(sizeof(struct jump_label_module_entry), GFP_KERNEL); | 201 | struct jump_entry *iter; |
310 | if (!e) | 202 | |
311 | return ERR_PTR(-ENOMEM); | 203 | /* if the module doesn't have jump label entries, just return */ |
312 | e->mod = mod; | 204 | if (iter_start == iter_stop) |
313 | e->nr_entries = count; | 205 | return; |
314 | e->table = iter_begin; | 206 | |
315 | hlist_add_head(&e->hlist, &entry->modules); | 207 | for (iter = iter_start; iter < iter_stop; iter++) |
316 | return e; | 208 | arch_jump_label_text_poke_early(iter->code); |
317 | } | 209 | } |
318 | 210 | ||
319 | static int add_jump_label_module(struct module *mod) | 211 | static int jump_label_add_module(struct module *mod) |
320 | { | 212 | { |
321 | struct jump_entry *iter, *iter_begin; | 213 | struct jump_entry *iter_start = mod->jump_entries; |
322 | struct jump_label_entry *entry; | 214 | struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; |
323 | struct jump_label_module_entry *module_entry; | 215 | struct jump_entry *iter; |
324 | int count; | 216 | struct jump_label_key *key = NULL; |
217 | struct jump_label_mod *jlm; | ||
325 | 218 | ||
326 | /* if the module doesn't have jump label entries, just return */ | 219 | /* if the module doesn't have jump label entries, just return */ |
327 | if (!mod->num_jump_entries) | 220 | if (iter_start == iter_stop) |
328 | return 0; | 221 | return 0; |
329 | 222 | ||
330 | sort_jump_label_entries(mod->jump_entries, | 223 | jump_label_sort_entries(iter_start, iter_stop); |
331 | mod->jump_entries + mod->num_jump_entries); | 224 | |
332 | iter = mod->jump_entries; | 225 | for (iter = iter_start; iter < iter_stop; iter++) { |
333 | while (iter < mod->jump_entries + mod->num_jump_entries) { | 226 | if (iter->key == (jump_label_t)(unsigned long)key) |
334 | entry = get_jump_label_entry(iter->key); | 227 | continue; |
335 | iter_begin = iter; | 228 | |
336 | count = 0; | 229 | key = (struct jump_label_key *)(unsigned long)iter->key; |
337 | while ((iter < mod->jump_entries + mod->num_jump_entries) && | 230 | |
338 | (iter->key == iter_begin->key)) { | 231 | if (__module_address(iter->key) == mod) { |
339 | iter++; | 232 | atomic_set(&key->enabled, 0); |
340 | count++; | 233 | key->entries = iter; |
341 | } | 234 | key->next = NULL; |
342 | if (!entry) { | 235 | continue; |
343 | entry = add_jump_label_entry(iter_begin->key, 0, NULL); | ||
344 | if (IS_ERR(entry)) | ||
345 | return PTR_ERR(entry); | ||
346 | } | 236 | } |
347 | module_entry = add_jump_label_module_entry(entry, iter_begin, | 237 | |
348 | count, mod); | 238 | jlm = kzalloc(sizeof(struct jump_label_mod), GFP_KERNEL); |
349 | if (IS_ERR(module_entry)) | 239 | if (!jlm) |
350 | return PTR_ERR(module_entry); | 240 | return -ENOMEM; |
241 | |||
242 | jlm->mod = mod; | ||
243 | jlm->entries = iter; | ||
244 | jlm->next = key->next; | ||
245 | key->next = jlm; | ||
246 | |||
247 | if (jump_label_enabled(key)) | ||
248 | __jump_label_update(key, iter, JUMP_LABEL_ENABLE); | ||
351 | } | 249 | } |
250 | |||
352 | return 0; | 251 | return 0; |
353 | } | 252 | } |
354 | 253 | ||
355 | static void remove_jump_label_module(struct module *mod) | 254 | static void jump_label_del_module(struct module *mod) |
356 | { | 255 | { |
357 | struct hlist_head *head; | 256 | struct jump_entry *iter_start = mod->jump_entries; |
358 | struct hlist_node *node, *node_next, *module_node, *module_node_next; | 257 | struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; |
359 | struct jump_label_entry *e; | 258 | struct jump_entry *iter; |
360 | struct jump_label_module_entry *e_module; | 259 | struct jump_label_key *key = NULL; |
361 | int i; | 260 | struct jump_label_mod *jlm, **prev; |
362 | 261 | ||
363 | /* if the module doesn't have jump label entries, just return */ | 262 | for (iter = iter_start; iter < iter_stop; iter++) { |
364 | if (!mod->num_jump_entries) | 263 | if (iter->key == (jump_label_t)(unsigned long)key) |
365 | return; | 264 | continue; |
265 | |||
266 | key = (struct jump_label_key *)(unsigned long)iter->key; | ||
267 | |||
268 | if (__module_address(iter->key) == mod) | ||
269 | continue; | ||
270 | |||
271 | prev = &key->next; | ||
272 | jlm = key->next; | ||
366 | 273 | ||
367 | for (i = 0; i < JUMP_LABEL_TABLE_SIZE; i++) { | 274 | while (jlm && jlm->mod != mod) { |
368 | head = &jump_label_table[i]; | 275 | prev = &jlm->next; |
369 | hlist_for_each_entry_safe(e, node, node_next, head, hlist) { | 276 | jlm = jlm->next; |
370 | hlist_for_each_entry_safe(e_module, module_node, | 277 | } |
371 | module_node_next, | 278 | |
372 | &(e->modules), hlist) { | 279 | if (jlm) { |
373 | if (e_module->mod == mod) { | 280 | *prev = jlm->next; |
374 | hlist_del(&e_module->hlist); | 281 | kfree(jlm); |
375 | kfree(e_module); | ||
376 | } | ||
377 | } | ||
378 | if (hlist_empty(&e->modules) && (e->nr_entries == 0)) { | ||
379 | hlist_del(&e->hlist); | ||
380 | kfree(e); | ||
381 | } | ||
382 | } | 282 | } |
383 | } | 283 | } |
384 | } | 284 | } |
385 | 285 | ||
386 | static void remove_jump_label_module_init(struct module *mod) | 286 | static void jump_label_invalidate_module_init(struct module *mod) |
387 | { | 287 | { |
388 | struct hlist_head *head; | 288 | struct jump_entry *iter_start = mod->jump_entries; |
389 | struct hlist_node *node, *node_next, *module_node, *module_node_next; | 289 | struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; |
390 | struct jump_label_entry *e; | ||
391 | struct jump_label_module_entry *e_module; | ||
392 | struct jump_entry *iter; | 290 | struct jump_entry *iter; |
393 | int i, count; | ||
394 | |||
395 | /* if the module doesn't have jump label entries, just return */ | ||
396 | if (!mod->num_jump_entries) | ||
397 | return; | ||
398 | 291 | ||
399 | for (i = 0; i < JUMP_LABEL_TABLE_SIZE; i++) { | 292 | for (iter = iter_start; iter < iter_stop; iter++) { |
400 | head = &jump_label_table[i]; | 293 | if (within_module_init(iter->code, mod)) |
401 | hlist_for_each_entry_safe(e, node, node_next, head, hlist) { | 294 | iter->code = 0; |
402 | hlist_for_each_entry_safe(e_module, module_node, | ||
403 | module_node_next, | ||
404 | &(e->modules), hlist) { | ||
405 | if (e_module->mod != mod) | ||
406 | continue; | ||
407 | count = e_module->nr_entries; | ||
408 | iter = e_module->table; | ||
409 | while (count--) { | ||
410 | if (within_module_init(iter->code, mod)) | ||
411 | iter->key = 0; | ||
412 | iter++; | ||
413 | } | ||
414 | } | ||
415 | } | ||
416 | } | 295 | } |
417 | } | 296 | } |
418 | 297 | ||
@@ -426,59 +305,77 @@ jump_label_module_notify(struct notifier_block *self, unsigned long val, | |||
426 | switch (val) { | 305 | switch (val) { |
427 | case MODULE_STATE_COMING: | 306 | case MODULE_STATE_COMING: |
428 | jump_label_lock(); | 307 | jump_label_lock(); |
429 | ret = add_jump_label_module(mod); | 308 | ret = jump_label_add_module(mod); |
430 | if (ret) | 309 | if (ret) |
431 | remove_jump_label_module(mod); | 310 | jump_label_del_module(mod); |
432 | jump_label_unlock(); | 311 | jump_label_unlock(); |
433 | break; | 312 | break; |
434 | case MODULE_STATE_GOING: | 313 | case MODULE_STATE_GOING: |
435 | jump_label_lock(); | 314 | jump_label_lock(); |
436 | remove_jump_label_module(mod); | 315 | jump_label_del_module(mod); |
437 | jump_label_unlock(); | 316 | jump_label_unlock(); |
438 | break; | 317 | break; |
439 | case MODULE_STATE_LIVE: | 318 | case MODULE_STATE_LIVE: |
440 | jump_label_lock(); | 319 | jump_label_lock(); |
441 | remove_jump_label_module_init(mod); | 320 | jump_label_invalidate_module_init(mod); |
442 | jump_label_unlock(); | 321 | jump_label_unlock(); |
443 | break; | 322 | break; |
444 | } | 323 | } |
445 | return ret; | ||
446 | } | ||
447 | 324 | ||
448 | /*** | 325 | return notifier_from_errno(ret); |
449 | * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop() | ||
450 | * @mod: module to patch | ||
451 | * | ||
452 | * Allow for run-time selection of the optimal nops. Before the module | ||
453 | * loads patch these with arch_get_jump_label_nop(), which is specified by | ||
454 | * the arch specific jump label code. | ||
455 | */ | ||
456 | void jump_label_apply_nops(struct module *mod) | ||
457 | { | ||
458 | struct jump_entry *iter; | ||
459 | |||
460 | /* if the module doesn't have jump label entries, just return */ | ||
461 | if (!mod->num_jump_entries) | ||
462 | return; | ||
463 | |||
464 | iter = mod->jump_entries; | ||
465 | while (iter < mod->jump_entries + mod->num_jump_entries) { | ||
466 | arch_jump_label_text_poke_early(iter->code); | ||
467 | iter++; | ||
468 | } | ||
469 | } | 326 | } |
470 | 327 | ||
471 | struct notifier_block jump_label_module_nb = { | 328 | struct notifier_block jump_label_module_nb = { |
472 | .notifier_call = jump_label_module_notify, | 329 | .notifier_call = jump_label_module_notify, |
473 | .priority = 0, | 330 | .priority = 1, /* higher than tracepoints */ |
474 | }; | 331 | }; |
475 | 332 | ||
476 | static __init int init_jump_label_module(void) | 333 | static __init int jump_label_init_module(void) |
477 | { | 334 | { |
478 | return register_module_notifier(&jump_label_module_nb); | 335 | return register_module_notifier(&jump_label_module_nb); |
479 | } | 336 | } |
480 | early_initcall(init_jump_label_module); | 337 | early_initcall(jump_label_init_module); |
481 | 338 | ||
482 | #endif /* CONFIG_MODULES */ | 339 | #endif /* CONFIG_MODULES */ |
483 | 340 | ||
341 | /*** | ||
342 | * jump_label_text_reserved - check if addr range is reserved | ||
343 | * @start: start text addr | ||
344 | * @end: end text addr | ||
345 | * | ||
346 | * checks if the text addr located between @start and @end | ||
347 | * overlaps with any of the jump label patch addresses. Code | ||
348 | * that wants to modify kernel text should first verify that | ||
349 | * it does not overlap with any of the jump label addresses. | ||
350 | * Caller must hold jump_label_mutex. | ||
351 | * | ||
352 | * returns 1 if there is an overlap, 0 otherwise | ||
353 | */ | ||
354 | int jump_label_text_reserved(void *start, void *end) | ||
355 | { | ||
356 | int ret = __jump_label_text_reserved(__start___jump_table, | ||
357 | __stop___jump_table, start, end); | ||
358 | |||
359 | if (ret) | ||
360 | return ret; | ||
361 | |||
362 | #ifdef CONFIG_MODULES | ||
363 | ret = __jump_label_mod_text_reserved(start, end); | ||
364 | #endif | ||
365 | return ret; | ||
366 | } | ||
367 | |||
368 | static void jump_label_update(struct jump_label_key *key, int enable) | ||
369 | { | ||
370 | struct jump_entry *entry = key->entries; | ||
371 | |||
372 | /* if there are no users, entry can be NULL */ | ||
373 | if (entry) | ||
374 | __jump_label_update(key, entry, enable); | ||
375 | |||
376 | #ifdef CONFIG_MODULES | ||
377 | __jump_label_mod_update(key, enable); | ||
378 | #endif | ||
379 | } | ||
380 | |||
484 | #endif | 381 | #endif |
diff --git a/kernel/kexec.c b/kernel/kexec.c index 87b77de03dd3..8d814cbc8109 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c | |||
@@ -1531,13 +1531,7 @@ int kernel_kexec(void) | |||
1531 | if (error) | 1531 | if (error) |
1532 | goto Enable_cpus; | 1532 | goto Enable_cpus; |
1533 | local_irq_disable(); | 1533 | local_irq_disable(); |
1534 | /* Suspend system devices */ | 1534 | error = syscore_suspend(); |
1535 | error = sysdev_suspend(PMSG_FREEZE); | ||
1536 | if (!error) { | ||
1537 | error = syscore_suspend(); | ||
1538 | if (error) | ||
1539 | sysdev_resume(); | ||
1540 | } | ||
1541 | if (error) | 1535 | if (error) |
1542 | goto Enable_irqs; | 1536 | goto Enable_irqs; |
1543 | } else | 1537 | } else |
@@ -1553,7 +1547,6 @@ int kernel_kexec(void) | |||
1553 | #ifdef CONFIG_KEXEC_JUMP | 1547 | #ifdef CONFIG_KEXEC_JUMP |
1554 | if (kexec_image->preserve_context) { | 1548 | if (kexec_image->preserve_context) { |
1555 | syscore_resume(); | 1549 | syscore_resume(); |
1556 | sysdev_resume(); | ||
1557 | Enable_irqs: | 1550 | Enable_irqs: |
1558 | local_irq_enable(); | 1551 | local_irq_enable(); |
1559 | Enable_cpus: | 1552 | Enable_cpus: |
diff --git a/kernel/kmod.c b/kernel/kmod.c index 9cd0591c96a2..5ae0ff38425f 100644 --- a/kernel/kmod.c +++ b/kernel/kmod.c | |||
@@ -245,7 +245,6 @@ static void __call_usermodehelper(struct work_struct *work) | |||
245 | } | 245 | } |
246 | } | 246 | } |
247 | 247 | ||
248 | #ifdef CONFIG_PM_SLEEP | ||
249 | /* | 248 | /* |
250 | * If set, call_usermodehelper_exec() will exit immediately returning -EBUSY | 249 | * If set, call_usermodehelper_exec() will exit immediately returning -EBUSY |
251 | * (used for preventing user land processes from being created after the user | 250 | * (used for preventing user land processes from being created after the user |
@@ -301,6 +300,15 @@ void usermodehelper_enable(void) | |||
301 | usermodehelper_disabled = 0; | 300 | usermodehelper_disabled = 0; |
302 | } | 301 | } |
303 | 302 | ||
303 | /** | ||
304 | * usermodehelper_is_disabled - check if new helpers are allowed to be started | ||
305 | */ | ||
306 | bool usermodehelper_is_disabled(void) | ||
307 | { | ||
308 | return usermodehelper_disabled; | ||
309 | } | ||
310 | EXPORT_SYMBOL_GPL(usermodehelper_is_disabled); | ||
311 | |||
304 | static void helper_lock(void) | 312 | static void helper_lock(void) |
305 | { | 313 | { |
306 | atomic_inc(&running_helpers); | 314 | atomic_inc(&running_helpers); |
@@ -312,12 +320,6 @@ static void helper_unlock(void) | |||
312 | if (atomic_dec_and_test(&running_helpers)) | 320 | if (atomic_dec_and_test(&running_helpers)) |
313 | wake_up(&running_helpers_waitq); | 321 | wake_up(&running_helpers_waitq); |
314 | } | 322 | } |
315 | #else /* CONFIG_PM_SLEEP */ | ||
316 | #define usermodehelper_disabled 0 | ||
317 | |||
318 | static inline void helper_lock(void) {} | ||
319 | static inline void helper_unlock(void) {} | ||
320 | #endif /* CONFIG_PM_SLEEP */ | ||
321 | 323 | ||
322 | /** | 324 | /** |
323 | * call_usermodehelper_setup - prepare to call a usermode helper | 325 | * call_usermodehelper_setup - prepare to call a usermode helper |
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 53a68956f131..63437d065ac8 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
@@ -490,6 +490,18 @@ void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS]) | |||
490 | usage[i] = '\0'; | 490 | usage[i] = '\0'; |
491 | } | 491 | } |
492 | 492 | ||
493 | static int __print_lock_name(struct lock_class *class) | ||
494 | { | ||
495 | char str[KSYM_NAME_LEN]; | ||
496 | const char *name; | ||
497 | |||
498 | name = class->name; | ||
499 | if (!name) | ||
500 | name = __get_key_name(class->key, str); | ||
501 | |||
502 | return printk("%s", name); | ||
503 | } | ||
504 | |||
493 | static void print_lock_name(struct lock_class *class) | 505 | static void print_lock_name(struct lock_class *class) |
494 | { | 506 | { |
495 | char str[KSYM_NAME_LEN], usage[LOCK_USAGE_CHARS]; | 507 | char str[KSYM_NAME_LEN], usage[LOCK_USAGE_CHARS]; |
@@ -1053,6 +1065,56 @@ print_circular_bug_entry(struct lock_list *target, int depth) | |||
1053 | return 0; | 1065 | return 0; |
1054 | } | 1066 | } |
1055 | 1067 | ||
1068 | static void | ||
1069 | print_circular_lock_scenario(struct held_lock *src, | ||
1070 | struct held_lock *tgt, | ||
1071 | struct lock_list *prt) | ||
1072 | { | ||
1073 | struct lock_class *source = hlock_class(src); | ||
1074 | struct lock_class *target = hlock_class(tgt); | ||
1075 | struct lock_class *parent = prt->class; | ||
1076 | |||
1077 | /* | ||
1078 | * A direct locking problem where unsafe_class lock is taken | ||
1079 | * directly by safe_class lock, then all we need to show | ||
1080 | * is the deadlock scenario, as it is obvious that the | ||
1081 | * unsafe lock is taken under the safe lock. | ||
1082 | * | ||
1083 | * But if there is a chain instead, where the safe lock takes | ||
1084 | * an intermediate lock (middle_class) where this lock is | ||
1085 | * not the same as the safe lock, then the lock chain is | ||
1086 | * used to describe the problem. Otherwise we would need | ||
1087 | * to show a different CPU case for each link in the chain | ||
1088 | * from the safe_class lock to the unsafe_class lock. | ||
1089 | */ | ||
1090 | if (parent != source) { | ||
1091 | printk("Chain exists of:\n "); | ||
1092 | __print_lock_name(source); | ||
1093 | printk(" --> "); | ||
1094 | __print_lock_name(parent); | ||
1095 | printk(" --> "); | ||
1096 | __print_lock_name(target); | ||
1097 | printk("\n\n"); | ||
1098 | } | ||
1099 | |||
1100 | printk(" Possible unsafe locking scenario:\n\n"); | ||
1101 | printk(" CPU0 CPU1\n"); | ||
1102 | printk(" ---- ----\n"); | ||
1103 | printk(" lock("); | ||
1104 | __print_lock_name(target); | ||
1105 | printk(");\n"); | ||
1106 | printk(" lock("); | ||
1107 | __print_lock_name(parent); | ||
1108 | printk(");\n"); | ||
1109 | printk(" lock("); | ||
1110 | __print_lock_name(target); | ||
1111 | printk(");\n"); | ||
1112 | printk(" lock("); | ||
1113 | __print_lock_name(source); | ||
1114 | printk(");\n"); | ||
1115 | printk("\n *** DEADLOCK ***\n\n"); | ||
1116 | } | ||
1117 | |||
1056 | /* | 1118 | /* |
1057 | * When a circular dependency is detected, print the | 1119 | * When a circular dependency is detected, print the |
1058 | * header first: | 1120 | * header first: |
@@ -1096,6 +1158,7 @@ static noinline int print_circular_bug(struct lock_list *this, | |||
1096 | { | 1158 | { |
1097 | struct task_struct *curr = current; | 1159 | struct task_struct *curr = current; |
1098 | struct lock_list *parent; | 1160 | struct lock_list *parent; |
1161 | struct lock_list *first_parent; | ||
1099 | int depth; | 1162 | int depth; |
1100 | 1163 | ||
1101 | if (!debug_locks_off_graph_unlock() || debug_locks_silent) | 1164 | if (!debug_locks_off_graph_unlock() || debug_locks_silent) |
@@ -1109,6 +1172,7 @@ static noinline int print_circular_bug(struct lock_list *this, | |||
1109 | print_circular_bug_header(target, depth, check_src, check_tgt); | 1172 | print_circular_bug_header(target, depth, check_src, check_tgt); |
1110 | 1173 | ||
1111 | parent = get_lock_parent(target); | 1174 | parent = get_lock_parent(target); |
1175 | first_parent = parent; | ||
1112 | 1176 | ||
1113 | while (parent) { | 1177 | while (parent) { |
1114 | print_circular_bug_entry(parent, --depth); | 1178 | print_circular_bug_entry(parent, --depth); |
@@ -1116,6 +1180,9 @@ static noinline int print_circular_bug(struct lock_list *this, | |||
1116 | } | 1180 | } |
1117 | 1181 | ||
1118 | printk("\nother info that might help us debug this:\n\n"); | 1182 | printk("\nother info that might help us debug this:\n\n"); |
1183 | print_circular_lock_scenario(check_src, check_tgt, | ||
1184 | first_parent); | ||
1185 | |||
1119 | lockdep_print_held_locks(curr); | 1186 | lockdep_print_held_locks(curr); |
1120 | 1187 | ||
1121 | printk("\nstack backtrace:\n"); | 1188 | printk("\nstack backtrace:\n"); |
@@ -1314,7 +1381,7 @@ print_shortest_lock_dependencies(struct lock_list *leaf, | |||
1314 | printk("\n"); | 1381 | printk("\n"); |
1315 | 1382 | ||
1316 | if (depth == 0 && (entry != root)) { | 1383 | if (depth == 0 && (entry != root)) { |
1317 | printk("lockdep:%s bad BFS generated tree\n", __func__); | 1384 | printk("lockdep:%s bad path found in chain graph\n", __func__); |
1318 | break; | 1385 | break; |
1319 | } | 1386 | } |
1320 | 1387 | ||
@@ -1325,6 +1392,62 @@ print_shortest_lock_dependencies(struct lock_list *leaf, | |||
1325 | return; | 1392 | return; |
1326 | } | 1393 | } |
1327 | 1394 | ||
1395 | static void | ||
1396 | print_irq_lock_scenario(struct lock_list *safe_entry, | ||
1397 | struct lock_list *unsafe_entry, | ||
1398 | struct lock_class *prev_class, | ||
1399 | struct lock_class *next_class) | ||
1400 | { | ||
1401 | struct lock_class *safe_class = safe_entry->class; | ||
1402 | struct lock_class *unsafe_class = unsafe_entry->class; | ||
1403 | struct lock_class *middle_class = prev_class; | ||
1404 | |||
1405 | if (middle_class == safe_class) | ||
1406 | middle_class = next_class; | ||
1407 | |||
1408 | /* | ||
1409 | * A direct locking problem where unsafe_class lock is taken | ||
1410 | * directly by safe_class lock, then all we need to show | ||
1411 | * is the deadlock scenario, as it is obvious that the | ||
1412 | * unsafe lock is taken under the safe lock. | ||
1413 | * | ||
1414 | * But if there is a chain instead, where the safe lock takes | ||
1415 | * an intermediate lock (middle_class) where this lock is | ||
1416 | * not the same as the safe lock, then the lock chain is | ||
1417 | * used to describe the problem. Otherwise we would need | ||
1418 | * to show a different CPU case for each link in the chain | ||
1419 | * from the safe_class lock to the unsafe_class lock. | ||
1420 | */ | ||
1421 | if (middle_class != unsafe_class) { | ||
1422 | printk("Chain exists of:\n "); | ||
1423 | __print_lock_name(safe_class); | ||
1424 | printk(" --> "); | ||
1425 | __print_lock_name(middle_class); | ||
1426 | printk(" --> "); | ||
1427 | __print_lock_name(unsafe_class); | ||
1428 | printk("\n\n"); | ||
1429 | } | ||
1430 | |||
1431 | printk(" Possible interrupt unsafe locking scenario:\n\n"); | ||
1432 | printk(" CPU0 CPU1\n"); | ||
1433 | printk(" ---- ----\n"); | ||
1434 | printk(" lock("); | ||
1435 | __print_lock_name(unsafe_class); | ||
1436 | printk(");\n"); | ||
1437 | printk(" local_irq_disable();\n"); | ||
1438 | printk(" lock("); | ||
1439 | __print_lock_name(safe_class); | ||
1440 | printk(");\n"); | ||
1441 | printk(" lock("); | ||
1442 | __print_lock_name(middle_class); | ||
1443 | printk(");\n"); | ||
1444 | printk(" <Interrupt>\n"); | ||
1445 | printk(" lock("); | ||
1446 | __print_lock_name(safe_class); | ||
1447 | printk(");\n"); | ||
1448 | printk("\n *** DEADLOCK ***\n\n"); | ||
1449 | } | ||
1450 | |||
1328 | static int | 1451 | static int |
1329 | print_bad_irq_dependency(struct task_struct *curr, | 1452 | print_bad_irq_dependency(struct task_struct *curr, |
1330 | struct lock_list *prev_root, | 1453 | struct lock_list *prev_root, |
@@ -1376,6 +1499,9 @@ print_bad_irq_dependency(struct task_struct *curr, | |||
1376 | print_stack_trace(forwards_entry->class->usage_traces + bit2, 1); | 1499 | print_stack_trace(forwards_entry->class->usage_traces + bit2, 1); |
1377 | 1500 | ||
1378 | printk("\nother info that might help us debug this:\n\n"); | 1501 | printk("\nother info that might help us debug this:\n\n"); |
1502 | print_irq_lock_scenario(backwards_entry, forwards_entry, | ||
1503 | hlock_class(prev), hlock_class(next)); | ||
1504 | |||
1379 | lockdep_print_held_locks(curr); | 1505 | lockdep_print_held_locks(curr); |
1380 | 1506 | ||
1381 | printk("\nthe dependencies between %s-irq-safe lock", irqclass); | 1507 | printk("\nthe dependencies between %s-irq-safe lock", irqclass); |
@@ -1539,6 +1665,26 @@ static inline void inc_chains(void) | |||
1539 | 1665 | ||
1540 | #endif | 1666 | #endif |
1541 | 1667 | ||
1668 | static void | ||
1669 | print_deadlock_scenario(struct held_lock *nxt, | ||
1670 | struct held_lock *prv) | ||
1671 | { | ||
1672 | struct lock_class *next = hlock_class(nxt); | ||
1673 | struct lock_class *prev = hlock_class(prv); | ||
1674 | |||
1675 | printk(" Possible unsafe locking scenario:\n\n"); | ||
1676 | printk(" CPU0\n"); | ||
1677 | printk(" ----\n"); | ||
1678 | printk(" lock("); | ||
1679 | __print_lock_name(prev); | ||
1680 | printk(");\n"); | ||
1681 | printk(" lock("); | ||
1682 | __print_lock_name(next); | ||
1683 | printk(");\n"); | ||
1684 | printk("\n *** DEADLOCK ***\n\n"); | ||
1685 | printk(" May be due to missing lock nesting notation\n\n"); | ||
1686 | } | ||
1687 | |||
1542 | static int | 1688 | static int |
1543 | print_deadlock_bug(struct task_struct *curr, struct held_lock *prev, | 1689 | print_deadlock_bug(struct task_struct *curr, struct held_lock *prev, |
1544 | struct held_lock *next) | 1690 | struct held_lock *next) |
@@ -1557,6 +1703,7 @@ print_deadlock_bug(struct task_struct *curr, struct held_lock *prev, | |||
1557 | print_lock(prev); | 1703 | print_lock(prev); |
1558 | 1704 | ||
1559 | printk("\nother info that might help us debug this:\n"); | 1705 | printk("\nother info that might help us debug this:\n"); |
1706 | print_deadlock_scenario(next, prev); | ||
1560 | lockdep_print_held_locks(curr); | 1707 | lockdep_print_held_locks(curr); |
1561 | 1708 | ||
1562 | printk("\nstack backtrace:\n"); | 1709 | printk("\nstack backtrace:\n"); |
@@ -1826,7 +1973,7 @@ static inline int lookup_chain_cache(struct task_struct *curr, | |||
1826 | struct list_head *hash_head = chainhashentry(chain_key); | 1973 | struct list_head *hash_head = chainhashentry(chain_key); |
1827 | struct lock_chain *chain; | 1974 | struct lock_chain *chain; |
1828 | struct held_lock *hlock_curr, *hlock_next; | 1975 | struct held_lock *hlock_curr, *hlock_next; |
1829 | int i, j, n, cn; | 1976 | int i, j; |
1830 | 1977 | ||
1831 | if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) | 1978 | if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) |
1832 | return 0; | 1979 | return 0; |
@@ -1886,15 +2033,9 @@ cache_hit: | |||
1886 | } | 2033 | } |
1887 | i++; | 2034 | i++; |
1888 | chain->depth = curr->lockdep_depth + 1 - i; | 2035 | chain->depth = curr->lockdep_depth + 1 - i; |
1889 | cn = nr_chain_hlocks; | 2036 | if (likely(nr_chain_hlocks + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) { |
1890 | while (cn + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS) { | 2037 | chain->base = nr_chain_hlocks; |
1891 | n = cmpxchg(&nr_chain_hlocks, cn, cn + chain->depth); | 2038 | nr_chain_hlocks += chain->depth; |
1892 | if (n == cn) | ||
1893 | break; | ||
1894 | cn = n; | ||
1895 | } | ||
1896 | if (likely(cn + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) { | ||
1897 | chain->base = cn; | ||
1898 | for (j = 0; j < chain->depth - 1; j++, i++) { | 2039 | for (j = 0; j < chain->depth - 1; j++, i++) { |
1899 | int lock_id = curr->held_locks[i].class_idx - 1; | 2040 | int lock_id = curr->held_locks[i].class_idx - 1; |
1900 | chain_hlocks[chain->base + j] = lock_id; | 2041 | chain_hlocks[chain->base + j] = lock_id; |
@@ -2011,6 +2152,24 @@ static void check_chain_key(struct task_struct *curr) | |||
2011 | #endif | 2152 | #endif |
2012 | } | 2153 | } |
2013 | 2154 | ||
2155 | static void | ||
2156 | print_usage_bug_scenario(struct held_lock *lock) | ||
2157 | { | ||
2158 | struct lock_class *class = hlock_class(lock); | ||
2159 | |||
2160 | printk(" Possible unsafe locking scenario:\n\n"); | ||
2161 | printk(" CPU0\n"); | ||
2162 | printk(" ----\n"); | ||
2163 | printk(" lock("); | ||
2164 | __print_lock_name(class); | ||
2165 | printk(");\n"); | ||
2166 | printk(" <Interrupt>\n"); | ||
2167 | printk(" lock("); | ||
2168 | __print_lock_name(class); | ||
2169 | printk(");\n"); | ||
2170 | printk("\n *** DEADLOCK ***\n\n"); | ||
2171 | } | ||
2172 | |||
2014 | static int | 2173 | static int |
2015 | print_usage_bug(struct task_struct *curr, struct held_lock *this, | 2174 | print_usage_bug(struct task_struct *curr, struct held_lock *this, |
2016 | enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit) | 2175 | enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit) |
@@ -2039,6 +2198,8 @@ print_usage_bug(struct task_struct *curr, struct held_lock *this, | |||
2039 | 2198 | ||
2040 | print_irqtrace_events(curr); | 2199 | print_irqtrace_events(curr); |
2041 | printk("\nother info that might help us debug this:\n"); | 2200 | printk("\nother info that might help us debug this:\n"); |
2201 | print_usage_bug_scenario(this); | ||
2202 | |||
2042 | lockdep_print_held_locks(curr); | 2203 | lockdep_print_held_locks(curr); |
2043 | 2204 | ||
2044 | printk("\nstack backtrace:\n"); | 2205 | printk("\nstack backtrace:\n"); |
@@ -2073,6 +2234,10 @@ print_irq_inversion_bug(struct task_struct *curr, | |||
2073 | struct held_lock *this, int forwards, | 2234 | struct held_lock *this, int forwards, |
2074 | const char *irqclass) | 2235 | const char *irqclass) |
2075 | { | 2236 | { |
2237 | struct lock_list *entry = other; | ||
2238 | struct lock_list *middle = NULL; | ||
2239 | int depth; | ||
2240 | |||
2076 | if (!debug_locks_off_graph_unlock() || debug_locks_silent) | 2241 | if (!debug_locks_off_graph_unlock() || debug_locks_silent) |
2077 | return 0; | 2242 | return 0; |
2078 | 2243 | ||
@@ -2091,6 +2256,25 @@ print_irq_inversion_bug(struct task_struct *curr, | |||
2091 | printk("\n\nand interrupts could create inverse lock ordering between them.\n\n"); | 2256 | printk("\n\nand interrupts could create inverse lock ordering between them.\n\n"); |
2092 | 2257 | ||
2093 | printk("\nother info that might help us debug this:\n"); | 2258 | printk("\nother info that might help us debug this:\n"); |
2259 | |||
2260 | /* Find a middle lock (if one exists) */ | ||
2261 | depth = get_lock_depth(other); | ||
2262 | do { | ||
2263 | if (depth == 0 && (entry != root)) { | ||
2264 | printk("lockdep:%s bad path found in chain graph\n", __func__); | ||
2265 | break; | ||
2266 | } | ||
2267 | middle = entry; | ||
2268 | entry = get_lock_parent(entry); | ||
2269 | depth--; | ||
2270 | } while (entry && entry != root && (depth >= 0)); | ||
2271 | if (forwards) | ||
2272 | print_irq_lock_scenario(root, other, | ||
2273 | middle ? middle->class : root->class, other->class); | ||
2274 | else | ||
2275 | print_irq_lock_scenario(other, root, | ||
2276 | middle ? middle->class : other->class, root->class); | ||
2277 | |||
2094 | lockdep_print_held_locks(curr); | 2278 | lockdep_print_held_locks(curr); |
2095 | 2279 | ||
2096 | printk("\nthe shortest dependencies between 2nd lock and 1st lock:\n"); | 2280 | printk("\nthe shortest dependencies between 2nd lock and 1st lock:\n"); |
diff --git a/kernel/module.c b/kernel/module.c index d5938a5c19c4..22879725678d 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
@@ -57,6 +57,7 @@ | |||
57 | #include <linux/kmemleak.h> | 57 | #include <linux/kmemleak.h> |
58 | #include <linux/jump_label.h> | 58 | #include <linux/jump_label.h> |
59 | #include <linux/pfn.h> | 59 | #include <linux/pfn.h> |
60 | #include <linux/bsearch.h> | ||
60 | 61 | ||
61 | #define CREATE_TRACE_POINTS | 62 | #define CREATE_TRACE_POINTS |
62 | #include <trace/events/module.h> | 63 | #include <trace/events/module.h> |
@@ -240,23 +241,24 @@ static bool each_symbol_in_section(const struct symsearch *arr, | |||
240 | struct module *owner, | 241 | struct module *owner, |
241 | bool (*fn)(const struct symsearch *syms, | 242 | bool (*fn)(const struct symsearch *syms, |
242 | struct module *owner, | 243 | struct module *owner, |
243 | unsigned int symnum, void *data), | 244 | void *data), |
244 | void *data) | 245 | void *data) |
245 | { | 246 | { |
246 | unsigned int i, j; | 247 | unsigned int j; |
247 | 248 | ||
248 | for (j = 0; j < arrsize; j++) { | 249 | for (j = 0; j < arrsize; j++) { |
249 | for (i = 0; i < arr[j].stop - arr[j].start; i++) | 250 | if (fn(&arr[j], owner, data)) |
250 | if (fn(&arr[j], owner, i, data)) | 251 | return true; |
251 | return true; | ||
252 | } | 252 | } |
253 | 253 | ||
254 | return false; | 254 | return false; |
255 | } | 255 | } |
256 | 256 | ||
257 | /* Returns true as soon as fn returns true, otherwise false. */ | 257 | /* Returns true as soon as fn returns true, otherwise false. */ |
258 | bool each_symbol(bool (*fn)(const struct symsearch *arr, struct module *owner, | 258 | bool each_symbol_section(bool (*fn)(const struct symsearch *arr, |
259 | unsigned int symnum, void *data), void *data) | 259 | struct module *owner, |
260 | void *data), | ||
261 | void *data) | ||
260 | { | 262 | { |
261 | struct module *mod; | 263 | struct module *mod; |
262 | static const struct symsearch arr[] = { | 264 | static const struct symsearch arr[] = { |
@@ -309,7 +311,7 @@ bool each_symbol(bool (*fn)(const struct symsearch *arr, struct module *owner, | |||
309 | } | 311 | } |
310 | return false; | 312 | return false; |
311 | } | 313 | } |
312 | EXPORT_SYMBOL_GPL(each_symbol); | 314 | EXPORT_SYMBOL_GPL(each_symbol_section); |
313 | 315 | ||
314 | struct find_symbol_arg { | 316 | struct find_symbol_arg { |
315 | /* Input */ | 317 | /* Input */ |
@@ -323,15 +325,12 @@ struct find_symbol_arg { | |||
323 | const struct kernel_symbol *sym; | 325 | const struct kernel_symbol *sym; |
324 | }; | 326 | }; |
325 | 327 | ||
326 | static bool find_symbol_in_section(const struct symsearch *syms, | 328 | static bool check_symbol(const struct symsearch *syms, |
327 | struct module *owner, | 329 | struct module *owner, |
328 | unsigned int symnum, void *data) | 330 | unsigned int symnum, void *data) |
329 | { | 331 | { |
330 | struct find_symbol_arg *fsa = data; | 332 | struct find_symbol_arg *fsa = data; |
331 | 333 | ||
332 | if (strcmp(syms->start[symnum].name, fsa->name) != 0) | ||
333 | return false; | ||
334 | |||
335 | if (!fsa->gplok) { | 334 | if (!fsa->gplok) { |
336 | if (syms->licence == GPL_ONLY) | 335 | if (syms->licence == GPL_ONLY) |
337 | return false; | 336 | return false; |
@@ -365,6 +364,30 @@ static bool find_symbol_in_section(const struct symsearch *syms, | |||
365 | return true; | 364 | return true; |
366 | } | 365 | } |
367 | 366 | ||
367 | static int cmp_name(const void *va, const void *vb) | ||
368 | { | ||
369 | const char *a; | ||
370 | const struct kernel_symbol *b; | ||
371 | a = va; b = vb; | ||
372 | return strcmp(a, b->name); | ||
373 | } | ||
374 | |||
375 | static bool find_symbol_in_section(const struct symsearch *syms, | ||
376 | struct module *owner, | ||
377 | void *data) | ||
378 | { | ||
379 | struct find_symbol_arg *fsa = data; | ||
380 | struct kernel_symbol *sym; | ||
381 | |||
382 | sym = bsearch(fsa->name, syms->start, syms->stop - syms->start, | ||
383 | sizeof(struct kernel_symbol), cmp_name); | ||
384 | |||
385 | if (sym != NULL && check_symbol(syms, owner, sym - syms->start, data)) | ||
386 | return true; | ||
387 | |||
388 | return false; | ||
389 | } | ||
390 | |||
368 | /* Find a symbol and return it, along with, (optional) crc and | 391 | /* Find a symbol and return it, along with, (optional) crc and |
369 | * (optional) module which owns it. Needs preempt disabled or module_mutex. */ | 392 | * (optional) module which owns it. Needs preempt disabled or module_mutex. */ |
370 | const struct kernel_symbol *find_symbol(const char *name, | 393 | const struct kernel_symbol *find_symbol(const char *name, |
@@ -379,7 +402,7 @@ const struct kernel_symbol *find_symbol(const char *name, | |||
379 | fsa.gplok = gplok; | 402 | fsa.gplok = gplok; |
380 | fsa.warn = warn; | 403 | fsa.warn = warn; |
381 | 404 | ||
382 | if (each_symbol(find_symbol_in_section, &fsa)) { | 405 | if (each_symbol_section(find_symbol_in_section, &fsa)) { |
383 | if (owner) | 406 | if (owner) |
384 | *owner = fsa.owner; | 407 | *owner = fsa.owner; |
385 | if (crc) | 408 | if (crc) |
@@ -1607,27 +1630,28 @@ static void set_section_ro_nx(void *base, | |||
1607 | } | 1630 | } |
1608 | } | 1631 | } |
1609 | 1632 | ||
1610 | /* Setting memory back to RW+NX before releasing it */ | 1633 | static void unset_module_core_ro_nx(struct module *mod) |
1611 | void unset_section_ro_nx(struct module *mod, void *module_region) | ||
1612 | { | 1634 | { |
1613 | unsigned long total_pages; | 1635 | set_page_attributes(mod->module_core + mod->core_text_size, |
1614 | 1636 | mod->module_core + mod->core_size, | |
1615 | if (mod->module_core == module_region) { | 1637 | set_memory_x); |
1616 | /* Set core as NX+RW */ | 1638 | set_page_attributes(mod->module_core, |
1617 | total_pages = MOD_NUMBER_OF_PAGES(mod->module_core, mod->core_size); | 1639 | mod->module_core + mod->core_ro_size, |
1618 | set_memory_nx((unsigned long)mod->module_core, total_pages); | 1640 | set_memory_rw); |
1619 | set_memory_rw((unsigned long)mod->module_core, total_pages); | 1641 | } |
1620 | 1642 | ||
1621 | } else if (mod->module_init == module_region) { | 1643 | static void unset_module_init_ro_nx(struct module *mod) |
1622 | /* Set init as NX+RW */ | 1644 | { |
1623 | total_pages = MOD_NUMBER_OF_PAGES(mod->module_init, mod->init_size); | 1645 | set_page_attributes(mod->module_init + mod->init_text_size, |
1624 | set_memory_nx((unsigned long)mod->module_init, total_pages); | 1646 | mod->module_init + mod->init_size, |
1625 | set_memory_rw((unsigned long)mod->module_init, total_pages); | 1647 | set_memory_x); |
1626 | } | 1648 | set_page_attributes(mod->module_init, |
1649 | mod->module_init + mod->init_ro_size, | ||
1650 | set_memory_rw); | ||
1627 | } | 1651 | } |
1628 | 1652 | ||
1629 | /* Iterate through all modules and set each module's text as RW */ | 1653 | /* Iterate through all modules and set each module's text as RW */ |
1630 | void set_all_modules_text_rw() | 1654 | void set_all_modules_text_rw(void) |
1631 | { | 1655 | { |
1632 | struct module *mod; | 1656 | struct module *mod; |
1633 | 1657 | ||
@@ -1648,7 +1672,7 @@ void set_all_modules_text_rw() | |||
1648 | } | 1672 | } |
1649 | 1673 | ||
1650 | /* Iterate through all modules and set each module's text as RO */ | 1674 | /* Iterate through all modules and set each module's text as RO */ |
1651 | void set_all_modules_text_ro() | 1675 | void set_all_modules_text_ro(void) |
1652 | { | 1676 | { |
1653 | struct module *mod; | 1677 | struct module *mod; |
1654 | 1678 | ||
@@ -1669,7 +1693,8 @@ void set_all_modules_text_ro() | |||
1669 | } | 1693 | } |
1670 | #else | 1694 | #else |
1671 | static inline void set_section_ro_nx(void *base, unsigned long text_size, unsigned long ro_size, unsigned long total_size) { } | 1695 | static inline void set_section_ro_nx(void *base, unsigned long text_size, unsigned long ro_size, unsigned long total_size) { } |
1672 | static inline void unset_section_ro_nx(struct module *mod, void *module_region) { } | 1696 | static void unset_module_core_ro_nx(struct module *mod) { } |
1697 | static void unset_module_init_ro_nx(struct module *mod) { } | ||
1673 | #endif | 1698 | #endif |
1674 | 1699 | ||
1675 | /* Free a module, remove from lists, etc. */ | 1700 | /* Free a module, remove from lists, etc. */ |
@@ -1696,7 +1721,7 @@ static void free_module(struct module *mod) | |||
1696 | destroy_params(mod->kp, mod->num_kp); | 1721 | destroy_params(mod->kp, mod->num_kp); |
1697 | 1722 | ||
1698 | /* This may be NULL, but that's OK */ | 1723 | /* This may be NULL, but that's OK */ |
1699 | unset_section_ro_nx(mod, mod->module_init); | 1724 | unset_module_init_ro_nx(mod); |
1700 | module_free(mod, mod->module_init); | 1725 | module_free(mod, mod->module_init); |
1701 | kfree(mod->args); | 1726 | kfree(mod->args); |
1702 | percpu_modfree(mod); | 1727 | percpu_modfree(mod); |
@@ -1705,7 +1730,7 @@ static void free_module(struct module *mod) | |||
1705 | lockdep_free_key_range(mod->module_core, mod->core_size); | 1730 | lockdep_free_key_range(mod->module_core, mod->core_size); |
1706 | 1731 | ||
1707 | /* Finally, free the core (containing the module structure) */ | 1732 | /* Finally, free the core (containing the module structure) */ |
1708 | unset_section_ro_nx(mod, mod->module_core); | 1733 | unset_module_core_ro_nx(mod); |
1709 | module_free(mod, mod->module_core); | 1734 | module_free(mod, mod->module_core); |
1710 | 1735 | ||
1711 | #ifdef CONFIG_MPU | 1736 | #ifdef CONFIG_MPU |
@@ -2030,11 +2055,8 @@ static const struct kernel_symbol *lookup_symbol(const char *name, | |||
2030 | const struct kernel_symbol *start, | 2055 | const struct kernel_symbol *start, |
2031 | const struct kernel_symbol *stop) | 2056 | const struct kernel_symbol *stop) |
2032 | { | 2057 | { |
2033 | const struct kernel_symbol *ks = start; | 2058 | return bsearch(name, start, stop - start, |
2034 | for (; ks < stop; ks++) | 2059 | sizeof(struct kernel_symbol), cmp_name); |
2035 | if (strcmp(ks->name, name) == 0) | ||
2036 | return ks; | ||
2037 | return NULL; | ||
2038 | } | 2060 | } |
2039 | 2061 | ||
2040 | static int is_exported(const char *name, unsigned long value, | 2062 | static int is_exported(const char *name, unsigned long value, |
@@ -2931,10 +2953,11 @@ SYSCALL_DEFINE3(init_module, void __user *, umod, | |||
2931 | mod->symtab = mod->core_symtab; | 2953 | mod->symtab = mod->core_symtab; |
2932 | mod->strtab = mod->core_strtab; | 2954 | mod->strtab = mod->core_strtab; |
2933 | #endif | 2955 | #endif |
2934 | unset_section_ro_nx(mod, mod->module_init); | 2956 | unset_module_init_ro_nx(mod); |
2935 | module_free(mod, mod->module_init); | 2957 | module_free(mod, mod->module_init); |
2936 | mod->module_init = NULL; | 2958 | mod->module_init = NULL; |
2937 | mod->init_size = 0; | 2959 | mod->init_size = 0; |
2960 | mod->init_ro_size = 0; | ||
2938 | mod->init_text_size = 0; | 2961 | mod->init_text_size = 0; |
2939 | mutex_unlock(&module_mutex); | 2962 | mutex_unlock(&module_mutex); |
2940 | 2963 | ||
diff --git a/kernel/params.c b/kernel/params.c index 7ab388a48a2e..ed72e1330862 100644 --- a/kernel/params.c +++ b/kernel/params.c | |||
@@ -297,21 +297,15 @@ EXPORT_SYMBOL(param_ops_charp); | |||
297 | int param_set_bool(const char *val, const struct kernel_param *kp) | 297 | int param_set_bool(const char *val, const struct kernel_param *kp) |
298 | { | 298 | { |
299 | bool v; | 299 | bool v; |
300 | int ret; | ||
300 | 301 | ||
301 | /* No equals means "set"... */ | 302 | /* No equals means "set"... */ |
302 | if (!val) val = "1"; | 303 | if (!val) val = "1"; |
303 | 304 | ||
304 | /* One of =[yYnN01] */ | 305 | /* One of =[yYnN01] */ |
305 | switch (val[0]) { | 306 | ret = strtobool(val, &v); |
306 | case 'y': case 'Y': case '1': | 307 | if (ret) |
307 | v = true; | 308 | return ret; |
308 | break; | ||
309 | case 'n': case 'N': case '0': | ||
310 | v = false; | ||
311 | break; | ||
312 | default: | ||
313 | return -EINVAL; | ||
314 | } | ||
315 | 309 | ||
316 | if (kp->flags & KPARAM_ISBOOL) | 310 | if (kp->flags & KPARAM_ISBOOL) |
317 | *(bool *)kp->arg = v; | 311 | *(bool *)kp->arg = v; |
@@ -821,15 +815,18 @@ ssize_t __modver_version_show(struct module_attribute *mattr, | |||
821 | return sprintf(buf, "%s\n", vattr->version); | 815 | return sprintf(buf, "%s\n", vattr->version); |
822 | } | 816 | } |
823 | 817 | ||
824 | extern struct module_version_attribute __start___modver[], __stop___modver[]; | 818 | extern const struct module_version_attribute *__start___modver[]; |
819 | extern const struct module_version_attribute *__stop___modver[]; | ||
825 | 820 | ||
826 | static void __init version_sysfs_builtin(void) | 821 | static void __init version_sysfs_builtin(void) |
827 | { | 822 | { |
828 | const struct module_version_attribute *vattr; | 823 | const struct module_version_attribute **p; |
829 | struct module_kobject *mk; | 824 | struct module_kobject *mk; |
830 | int err; | 825 | int err; |
831 | 826 | ||
832 | for (vattr = __start___modver; vattr < __stop___modver; vattr++) { | 827 | for (p = __start___modver; p < __stop___modver; p++) { |
828 | const struct module_version_attribute *vattr = *p; | ||
829 | |||
833 | mk = locate_module_kobject(vattr->module_name); | 830 | mk = locate_module_kobject(vattr->module_name); |
834 | if (mk) { | 831 | if (mk) { |
835 | err = sysfs_create_file(&mk->kobj, &vattr->mattr.attr); | 832 | err = sysfs_create_file(&mk->kobj, &vattr->mattr.attr); |
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig index 6de9a8fc3417..87f4d24b55b0 100644 --- a/kernel/power/Kconfig +++ b/kernel/power/Kconfig | |||
@@ -125,12 +125,6 @@ config PM_DEBUG | |||
125 | code. This is helpful when debugging and reporting PM bugs, like | 125 | code. This is helpful when debugging and reporting PM bugs, like |
126 | suspend support. | 126 | suspend support. |
127 | 127 | ||
128 | config PM_VERBOSE | ||
129 | bool "Verbose Power Management debugging" | ||
130 | depends on PM_DEBUG | ||
131 | ---help--- | ||
132 | This option enables verbose messages from the Power Management code. | ||
133 | |||
134 | config PM_ADVANCED_DEBUG | 128 | config PM_ADVANCED_DEBUG |
135 | bool "Extra PM attributes in sysfs for low-level debugging/testing" | 129 | bool "Extra PM attributes in sysfs for low-level debugging/testing" |
136 | depends on PM_DEBUG | 130 | depends on PM_DEBUG |
@@ -229,3 +223,7 @@ config PM_OPP | |||
229 | representing individual voltage domains and provides SOC | 223 | representing individual voltage domains and provides SOC |
230 | implementations a ready to use framework to manage OPPs. | 224 | implementations a ready to use framework to manage OPPs. |
231 | For more information, read <file:Documentation/power/opp.txt> | 225 | For more information, read <file:Documentation/power/opp.txt> |
226 | |||
227 | config PM_RUNTIME_CLK | ||
228 | def_bool y | ||
229 | depends on PM_RUNTIME && HAVE_CLK | ||
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index 50aae660174d..f9bec56d8825 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c | |||
@@ -272,12 +272,7 @@ static int create_image(int platform_mode) | |||
272 | 272 | ||
273 | local_irq_disable(); | 273 | local_irq_disable(); |
274 | 274 | ||
275 | error = sysdev_suspend(PMSG_FREEZE); | 275 | error = syscore_suspend(); |
276 | if (!error) { | ||
277 | error = syscore_suspend(); | ||
278 | if (error) | ||
279 | sysdev_resume(); | ||
280 | } | ||
281 | if (error) { | 276 | if (error) { |
282 | printk(KERN_ERR "PM: Some system devices failed to power down, " | 277 | printk(KERN_ERR "PM: Some system devices failed to power down, " |
283 | "aborting hibernation\n"); | 278 | "aborting hibernation\n"); |
@@ -302,7 +297,6 @@ static int create_image(int platform_mode) | |||
302 | 297 | ||
303 | Power_up: | 298 | Power_up: |
304 | syscore_resume(); | 299 | syscore_resume(); |
305 | sysdev_resume(); | ||
306 | /* NOTE: dpm_resume_noirq() is just a resume() for devices | 300 | /* NOTE: dpm_resume_noirq() is just a resume() for devices |
307 | * that suspended with irqs off ... no overall powerup. | 301 | * that suspended with irqs off ... no overall powerup. |
308 | */ | 302 | */ |
@@ -333,20 +327,25 @@ static int create_image(int platform_mode) | |||
333 | 327 | ||
334 | int hibernation_snapshot(int platform_mode) | 328 | int hibernation_snapshot(int platform_mode) |
335 | { | 329 | { |
330 | pm_message_t msg = PMSG_RECOVER; | ||
336 | int error; | 331 | int error; |
337 | 332 | ||
338 | error = platform_begin(platform_mode); | 333 | error = platform_begin(platform_mode); |
339 | if (error) | 334 | if (error) |
340 | goto Close; | 335 | goto Close; |
341 | 336 | ||
337 | error = dpm_prepare(PMSG_FREEZE); | ||
338 | if (error) | ||
339 | goto Complete_devices; | ||
340 | |||
342 | /* Preallocate image memory before shutting down devices. */ | 341 | /* Preallocate image memory before shutting down devices. */ |
343 | error = hibernate_preallocate_memory(); | 342 | error = hibernate_preallocate_memory(); |
344 | if (error) | 343 | if (error) |
345 | goto Close; | 344 | goto Complete_devices; |
346 | 345 | ||
347 | suspend_console(); | 346 | suspend_console(); |
348 | pm_restrict_gfp_mask(); | 347 | pm_restrict_gfp_mask(); |
349 | error = dpm_suspend_start(PMSG_FREEZE); | 348 | error = dpm_suspend(PMSG_FREEZE); |
350 | if (error) | 349 | if (error) |
351 | goto Recover_platform; | 350 | goto Recover_platform; |
352 | 351 | ||
@@ -364,13 +363,17 @@ int hibernation_snapshot(int platform_mode) | |||
364 | if (error || !in_suspend) | 363 | if (error || !in_suspend) |
365 | swsusp_free(); | 364 | swsusp_free(); |
366 | 365 | ||
367 | dpm_resume_end(in_suspend ? | 366 | msg = in_suspend ? (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE; |
368 | (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE); | 367 | dpm_resume(msg); |
369 | 368 | ||
370 | if (error || !in_suspend) | 369 | if (error || !in_suspend) |
371 | pm_restore_gfp_mask(); | 370 | pm_restore_gfp_mask(); |
372 | 371 | ||
373 | resume_console(); | 372 | resume_console(); |
373 | |||
374 | Complete_devices: | ||
375 | dpm_complete(msg); | ||
376 | |||
374 | Close: | 377 | Close: |
375 | platform_end(platform_mode); | 378 | platform_end(platform_mode); |
376 | return error; | 379 | return error; |
@@ -409,12 +412,7 @@ static int resume_target_kernel(bool platform_mode) | |||
409 | 412 | ||
410 | local_irq_disable(); | 413 | local_irq_disable(); |
411 | 414 | ||
412 | error = sysdev_suspend(PMSG_QUIESCE); | 415 | error = syscore_suspend(); |
413 | if (!error) { | ||
414 | error = syscore_suspend(); | ||
415 | if (error) | ||
416 | sysdev_resume(); | ||
417 | } | ||
418 | if (error) | 416 | if (error) |
419 | goto Enable_irqs; | 417 | goto Enable_irqs; |
420 | 418 | ||
@@ -442,7 +440,6 @@ static int resume_target_kernel(bool platform_mode) | |||
442 | touch_softlockup_watchdog(); | 440 | touch_softlockup_watchdog(); |
443 | 441 | ||
444 | syscore_resume(); | 442 | syscore_resume(); |
445 | sysdev_resume(); | ||
446 | 443 | ||
447 | Enable_irqs: | 444 | Enable_irqs: |
448 | local_irq_enable(); | 445 | local_irq_enable(); |
@@ -528,7 +525,6 @@ int hibernation_platform_enter(void) | |||
528 | goto Platform_finish; | 525 | goto Platform_finish; |
529 | 526 | ||
530 | local_irq_disable(); | 527 | local_irq_disable(); |
531 | sysdev_suspend(PMSG_HIBERNATE); | ||
532 | syscore_suspend(); | 528 | syscore_suspend(); |
533 | if (pm_wakeup_pending()) { | 529 | if (pm_wakeup_pending()) { |
534 | error = -EAGAIN; | 530 | error = -EAGAIN; |
@@ -541,7 +537,6 @@ int hibernation_platform_enter(void) | |||
541 | 537 | ||
542 | Power_up: | 538 | Power_up: |
543 | syscore_resume(); | 539 | syscore_resume(); |
544 | sysdev_resume(); | ||
545 | local_irq_enable(); | 540 | local_irq_enable(); |
546 | enable_nonboot_cpus(); | 541 | enable_nonboot_cpus(); |
547 | 542 | ||
@@ -982,10 +977,33 @@ static ssize_t image_size_store(struct kobject *kobj, struct kobj_attribute *att | |||
982 | 977 | ||
983 | power_attr(image_size); | 978 | power_attr(image_size); |
984 | 979 | ||
980 | static ssize_t reserved_size_show(struct kobject *kobj, | ||
981 | struct kobj_attribute *attr, char *buf) | ||
982 | { | ||
983 | return sprintf(buf, "%lu\n", reserved_size); | ||
984 | } | ||
985 | |||
986 | static ssize_t reserved_size_store(struct kobject *kobj, | ||
987 | struct kobj_attribute *attr, | ||
988 | const char *buf, size_t n) | ||
989 | { | ||
990 | unsigned long size; | ||
991 | |||
992 | if (sscanf(buf, "%lu", &size) == 1) { | ||
993 | reserved_size = size; | ||
994 | return n; | ||
995 | } | ||
996 | |||
997 | return -EINVAL; | ||
998 | } | ||
999 | |||
1000 | power_attr(reserved_size); | ||
1001 | |||
985 | static struct attribute * g[] = { | 1002 | static struct attribute * g[] = { |
986 | &disk_attr.attr, | 1003 | &disk_attr.attr, |
987 | &resume_attr.attr, | 1004 | &resume_attr.attr, |
988 | &image_size_attr.attr, | 1005 | &image_size_attr.attr, |
1006 | &reserved_size_attr.attr, | ||
989 | NULL, | 1007 | NULL, |
990 | }; | 1008 | }; |
991 | 1009 | ||
diff --git a/kernel/power/main.c b/kernel/power/main.c index de9aef8742f4..2981af4ce7cb 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c | |||
@@ -337,6 +337,7 @@ static int __init pm_init(void) | |||
337 | if (error) | 337 | if (error) |
338 | return error; | 338 | return error; |
339 | hibernate_image_size_init(); | 339 | hibernate_image_size_init(); |
340 | hibernate_reserved_size_init(); | ||
340 | power_kobj = kobject_create_and_add("power", NULL); | 341 | power_kobj = kobject_create_and_add("power", NULL); |
341 | if (!power_kobj) | 342 | if (!power_kobj) |
342 | return -ENOMEM; | 343 | return -ENOMEM; |
diff --git a/kernel/power/power.h b/kernel/power/power.h index 03634be55f62..9a00a0a26280 100644 --- a/kernel/power/power.h +++ b/kernel/power/power.h | |||
@@ -15,6 +15,7 @@ struct swsusp_info { | |||
15 | 15 | ||
16 | #ifdef CONFIG_HIBERNATION | 16 | #ifdef CONFIG_HIBERNATION |
17 | /* kernel/power/snapshot.c */ | 17 | /* kernel/power/snapshot.c */ |
18 | extern void __init hibernate_reserved_size_init(void); | ||
18 | extern void __init hibernate_image_size_init(void); | 19 | extern void __init hibernate_image_size_init(void); |
19 | 20 | ||
20 | #ifdef CONFIG_ARCH_HIBERNATION_HEADER | 21 | #ifdef CONFIG_ARCH_HIBERNATION_HEADER |
@@ -55,6 +56,7 @@ extern int hibernation_platform_enter(void); | |||
55 | 56 | ||
56 | #else /* !CONFIG_HIBERNATION */ | 57 | #else /* !CONFIG_HIBERNATION */ |
57 | 58 | ||
59 | static inline void hibernate_reserved_size_init(void) {} | ||
58 | static inline void hibernate_image_size_init(void) {} | 60 | static inline void hibernate_image_size_init(void) {} |
59 | #endif /* !CONFIG_HIBERNATION */ | 61 | #endif /* !CONFIG_HIBERNATION */ |
60 | 62 | ||
@@ -72,6 +74,8 @@ static struct kobj_attribute _name##_attr = { \ | |||
72 | 74 | ||
73 | /* Preferred image size in bytes (default 500 MB) */ | 75 | /* Preferred image size in bytes (default 500 MB) */ |
74 | extern unsigned long image_size; | 76 | extern unsigned long image_size; |
77 | /* Size of memory reserved for drivers (default SPARE_PAGES x PAGE_SIZE) */ | ||
78 | extern unsigned long reserved_size; | ||
75 | extern int in_suspend; | 79 | extern int in_suspend; |
76 | extern dev_t swsusp_resume_device; | 80 | extern dev_t swsusp_resume_device; |
77 | extern sector_t swsusp_resume_block; | 81 | extern sector_t swsusp_resume_block; |
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index ca0aacc24874..ace55889f702 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c | |||
@@ -41,16 +41,28 @@ static void swsusp_set_page_forbidden(struct page *); | |||
41 | static void swsusp_unset_page_forbidden(struct page *); | 41 | static void swsusp_unset_page_forbidden(struct page *); |
42 | 42 | ||
43 | /* | 43 | /* |
44 | * Number of bytes to reserve for memory allocations made by device drivers | ||
45 | * from their ->freeze() and ->freeze_noirq() callbacks so that they don't | ||
46 | * cause image creation to fail (tunable via /sys/power/reserved_size). | ||
47 | */ | ||
48 | unsigned long reserved_size; | ||
49 | |||
50 | void __init hibernate_reserved_size_init(void) | ||
51 | { | ||
52 | reserved_size = SPARE_PAGES * PAGE_SIZE; | ||
53 | } | ||
54 | |||
55 | /* | ||
44 | * Preferred image size in bytes (tunable via /sys/power/image_size). | 56 | * Preferred image size in bytes (tunable via /sys/power/image_size). |
45 | * When it is set to N, the image creating code will do its best to | 57 | * When it is set to N, swsusp will do its best to ensure the image |
46 | * ensure the image size will not exceed N bytes, but if that is | 58 | * size will not exceed N bytes, but if that is impossible, it will |
47 | * impossible, it will try to create the smallest image possible. | 59 | * try to create the smallest image possible. |
48 | */ | 60 | */ |
49 | unsigned long image_size; | 61 | unsigned long image_size; |
50 | 62 | ||
51 | void __init hibernate_image_size_init(void) | 63 | void __init hibernate_image_size_init(void) |
52 | { | 64 | { |
53 | image_size = (totalram_pages / 3) * PAGE_SIZE; | 65 | image_size = ((totalram_pages * 2) / 5) * PAGE_SIZE; |
54 | } | 66 | } |
55 | 67 | ||
56 | /* List of PBEs needed for restoring the pages that were allocated before | 68 | /* List of PBEs needed for restoring the pages that were allocated before |
@@ -1263,11 +1275,13 @@ static unsigned long minimum_image_size(unsigned long saveable) | |||
1263 | * frame in use. We also need a number of page frames to be free during | 1275 | * frame in use. We also need a number of page frames to be free during |
1264 | * hibernation for allocations made while saving the image and for device | 1276 | * hibernation for allocations made while saving the image and for device |
1265 | * drivers, in case they need to allocate memory from their hibernation | 1277 | * drivers, in case they need to allocate memory from their hibernation |
1266 | * callbacks (these two numbers are given by PAGES_FOR_IO and SPARE_PAGES, | 1278 | * callbacks (these two numbers are given by PAGES_FOR_IO (which is a rough |
1267 | * respectively, both of which are rough estimates). To make this happen, we | 1279 | * estimate) and reserverd_size divided by PAGE_SIZE (which is tunable through |
1268 | * compute the total number of available page frames and allocate at least | 1280 | * /sys/power/reserved_size, respectively). To make this happen, we compute the |
1281 | * total number of available page frames and allocate at least | ||
1269 | * | 1282 | * |
1270 | * ([page frames total] + PAGES_FOR_IO + [metadata pages]) / 2 + 2 * SPARE_PAGES | 1283 | * ([page frames total] + PAGES_FOR_IO + [metadata pages]) / 2 |
1284 | * + 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE) | ||
1271 | * | 1285 | * |
1272 | * of them, which corresponds to the maximum size of a hibernation image. | 1286 | * of them, which corresponds to the maximum size of a hibernation image. |
1273 | * | 1287 | * |
@@ -1322,7 +1336,8 @@ int hibernate_preallocate_memory(void) | |||
1322 | count -= totalreserve_pages; | 1336 | count -= totalreserve_pages; |
1323 | 1337 | ||
1324 | /* Compute the maximum number of saveable pages to leave in memory. */ | 1338 | /* Compute the maximum number of saveable pages to leave in memory. */ |
1325 | max_size = (count - (size + PAGES_FOR_IO)) / 2 - 2 * SPARE_PAGES; | 1339 | max_size = (count - (size + PAGES_FOR_IO)) / 2 |
1340 | - 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE); | ||
1326 | /* Compute the desired number of image pages specified by image_size. */ | 1341 | /* Compute the desired number of image pages specified by image_size. */ |
1327 | size = DIV_ROUND_UP(image_size, PAGE_SIZE); | 1342 | size = DIV_ROUND_UP(image_size, PAGE_SIZE); |
1328 | if (size > max_size) | 1343 | if (size > max_size) |
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index 8935369d503a..1c41ba215419 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c | |||
@@ -163,19 +163,13 @@ static int suspend_enter(suspend_state_t state) | |||
163 | arch_suspend_disable_irqs(); | 163 | arch_suspend_disable_irqs(); |
164 | BUG_ON(!irqs_disabled()); | 164 | BUG_ON(!irqs_disabled()); |
165 | 165 | ||
166 | error = sysdev_suspend(PMSG_SUSPEND); | 166 | error = syscore_suspend(); |
167 | if (!error) { | ||
168 | error = syscore_suspend(); | ||
169 | if (error) | ||
170 | sysdev_resume(); | ||
171 | } | ||
172 | if (!error) { | 167 | if (!error) { |
173 | if (!(suspend_test(TEST_CORE) || pm_wakeup_pending())) { | 168 | if (!(suspend_test(TEST_CORE) || pm_wakeup_pending())) { |
174 | error = suspend_ops->enter(state); | 169 | error = suspend_ops->enter(state); |
175 | events_check_enabled = false; | 170 | events_check_enabled = false; |
176 | } | 171 | } |
177 | syscore_resume(); | 172 | syscore_resume(); |
178 | sysdev_resume(); | ||
179 | } | 173 | } |
180 | 174 | ||
181 | arch_suspend_enable_irqs(); | 175 | arch_suspend_enable_irqs(); |
@@ -216,7 +210,6 @@ int suspend_devices_and_enter(suspend_state_t state) | |||
216 | goto Close; | 210 | goto Close; |
217 | } | 211 | } |
218 | suspend_console(); | 212 | suspend_console(); |
219 | pm_restrict_gfp_mask(); | ||
220 | suspend_test_start(); | 213 | suspend_test_start(); |
221 | error = dpm_suspend_start(PMSG_SUSPEND); | 214 | error = dpm_suspend_start(PMSG_SUSPEND); |
222 | if (error) { | 215 | if (error) { |
@@ -227,13 +220,12 @@ int suspend_devices_and_enter(suspend_state_t state) | |||
227 | if (suspend_test(TEST_DEVICES)) | 220 | if (suspend_test(TEST_DEVICES)) |
228 | goto Recover_platform; | 221 | goto Recover_platform; |
229 | 222 | ||
230 | suspend_enter(state); | 223 | error = suspend_enter(state); |
231 | 224 | ||
232 | Resume_devices: | 225 | Resume_devices: |
233 | suspend_test_start(); | 226 | suspend_test_start(); |
234 | dpm_resume_end(PMSG_RESUME); | 227 | dpm_resume_end(PMSG_RESUME); |
235 | suspend_test_finish("resume devices"); | 228 | suspend_test_finish("resume devices"); |
236 | pm_restore_gfp_mask(); | ||
237 | resume_console(); | 229 | resume_console(); |
238 | Close: | 230 | Close: |
239 | if (suspend_ops->end) | 231 | if (suspend_ops->end) |
@@ -294,7 +286,9 @@ int enter_state(suspend_state_t state) | |||
294 | goto Finish; | 286 | goto Finish; |
295 | 287 | ||
296 | pr_debug("PM: Entering %s sleep\n", pm_states[state]); | 288 | pr_debug("PM: Entering %s sleep\n", pm_states[state]); |
289 | pm_restrict_gfp_mask(); | ||
297 | error = suspend_devices_and_enter(state); | 290 | error = suspend_devices_and_enter(state); |
291 | pm_restore_gfp_mask(); | ||
298 | 292 | ||
299 | Finish: | 293 | Finish: |
300 | pr_debug("PM: Finishing wakeup.\n"); | 294 | pr_debug("PM: Finishing wakeup.\n"); |
diff --git a/kernel/power/user.c b/kernel/power/user.c index c36c3b9e8a84..7d02d33be699 100644 --- a/kernel/power/user.c +++ b/kernel/power/user.c | |||
@@ -135,8 +135,10 @@ static int snapshot_release(struct inode *inode, struct file *filp) | |||
135 | free_basic_memory_bitmaps(); | 135 | free_basic_memory_bitmaps(); |
136 | data = filp->private_data; | 136 | data = filp->private_data; |
137 | free_all_swap_pages(data->swap); | 137 | free_all_swap_pages(data->swap); |
138 | if (data->frozen) | 138 | if (data->frozen) { |
139 | pm_restore_gfp_mask(); | ||
139 | thaw_processes(); | 140 | thaw_processes(); |
141 | } | ||
140 | pm_notifier_call_chain(data->mode == O_RDONLY ? | 142 | pm_notifier_call_chain(data->mode == O_RDONLY ? |
141 | PM_POST_HIBERNATION : PM_POST_RESTORE); | 143 | PM_POST_HIBERNATION : PM_POST_RESTORE); |
142 | atomic_inc(&snapshot_device_available); | 144 | atomic_inc(&snapshot_device_available); |
@@ -379,6 +381,7 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd, | |||
379 | * PM_HIBERNATION_PREPARE | 381 | * PM_HIBERNATION_PREPARE |
380 | */ | 382 | */ |
381 | error = suspend_devices_and_enter(PM_SUSPEND_MEM); | 383 | error = suspend_devices_and_enter(PM_SUSPEND_MEM); |
384 | data->ready = 0; | ||
382 | break; | 385 | break; |
383 | 386 | ||
384 | case SNAPSHOT_PLATFORM_SUPPORT: | 387 | case SNAPSHOT_PLATFORM_SUPPORT: |
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 0943ed7a4038..64b2a37c07d0 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -183,6 +183,14 @@ static inline u64 sched_rt_period(struct rt_rq *rt_rq) | |||
183 | return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period); | 183 | return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period); |
184 | } | 184 | } |
185 | 185 | ||
186 | typedef struct task_group *rt_rq_iter_t; | ||
187 | |||
188 | #define for_each_rt_rq(rt_rq, iter, rq) \ | ||
189 | for (iter = list_entry_rcu(task_groups.next, typeof(*iter), list); \ | ||
190 | (&iter->list != &task_groups) && \ | ||
191 | (rt_rq = iter->rt_rq[cpu_of(rq)]); \ | ||
192 | iter = list_entry_rcu(iter->list.next, typeof(*iter), list)) | ||
193 | |||
186 | static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq) | 194 | static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq) |
187 | { | 195 | { |
188 | list_add_rcu(&rt_rq->leaf_rt_rq_list, | 196 | list_add_rcu(&rt_rq->leaf_rt_rq_list, |
@@ -288,6 +296,11 @@ static inline u64 sched_rt_period(struct rt_rq *rt_rq) | |||
288 | return ktime_to_ns(def_rt_bandwidth.rt_period); | 296 | return ktime_to_ns(def_rt_bandwidth.rt_period); |
289 | } | 297 | } |
290 | 298 | ||
299 | typedef struct rt_rq *rt_rq_iter_t; | ||
300 | |||
301 | #define for_each_rt_rq(rt_rq, iter, rq) \ | ||
302 | for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL) | ||
303 | |||
291 | static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq) | 304 | static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq) |
292 | { | 305 | { |
293 | } | 306 | } |
@@ -402,12 +415,13 @@ next: | |||
402 | static void __disable_runtime(struct rq *rq) | 415 | static void __disable_runtime(struct rq *rq) |
403 | { | 416 | { |
404 | struct root_domain *rd = rq->rd; | 417 | struct root_domain *rd = rq->rd; |
418 | rt_rq_iter_t iter; | ||
405 | struct rt_rq *rt_rq; | 419 | struct rt_rq *rt_rq; |
406 | 420 | ||
407 | if (unlikely(!scheduler_running)) | 421 | if (unlikely(!scheduler_running)) |
408 | return; | 422 | return; |
409 | 423 | ||
410 | for_each_leaf_rt_rq(rt_rq, rq) { | 424 | for_each_rt_rq(rt_rq, iter, rq) { |
411 | struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); | 425 | struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); |
412 | s64 want; | 426 | s64 want; |
413 | int i; | 427 | int i; |
@@ -487,6 +501,7 @@ static void disable_runtime(struct rq *rq) | |||
487 | 501 | ||
488 | static void __enable_runtime(struct rq *rq) | 502 | static void __enable_runtime(struct rq *rq) |
489 | { | 503 | { |
504 | rt_rq_iter_t iter; | ||
490 | struct rt_rq *rt_rq; | 505 | struct rt_rq *rt_rq; |
491 | 506 | ||
492 | if (unlikely(!scheduler_running)) | 507 | if (unlikely(!scheduler_running)) |
@@ -495,7 +510,7 @@ static void __enable_runtime(struct rq *rq) | |||
495 | /* | 510 | /* |
496 | * Reset each runqueue's bandwidth settings | 511 | * Reset each runqueue's bandwidth settings |
497 | */ | 512 | */ |
498 | for_each_leaf_rt_rq(rt_rq, rq) { | 513 | for_each_rt_rq(rt_rq, iter, rq) { |
499 | struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); | 514 | struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); |
500 | 515 | ||
501 | raw_spin_lock(&rt_b->rt_runtime_lock); | 516 | raw_spin_lock(&rt_b->rt_runtime_lock); |
@@ -1817,10 +1832,11 @@ extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq); | |||
1817 | 1832 | ||
1818 | static void print_rt_stats(struct seq_file *m, int cpu) | 1833 | static void print_rt_stats(struct seq_file *m, int cpu) |
1819 | { | 1834 | { |
1835 | rt_rq_iter_t iter; | ||
1820 | struct rt_rq *rt_rq; | 1836 | struct rt_rq *rt_rq; |
1821 | 1837 | ||
1822 | rcu_read_lock(); | 1838 | rcu_read_lock(); |
1823 | for_each_leaf_rt_rq(rt_rq, cpu_rq(cpu)) | 1839 | for_each_rt_rq(rt_rq, iter, cpu_rq(cpu)) |
1824 | print_rt_rq(m, cpu, rt_rq); | 1840 | print_rt_rq(m, cpu, rt_rq); |
1825 | rcu_read_unlock(); | 1841 | rcu_read_unlock(); |
1826 | } | 1842 | } |
diff --git a/kernel/sys.c b/kernel/sys.c index af468edf096a..f0c10385f30c 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
@@ -315,7 +315,6 @@ void kernel_restart_prepare(char *cmd) | |||
315 | blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd); | 315 | blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd); |
316 | system_state = SYSTEM_RESTART; | 316 | system_state = SYSTEM_RESTART; |
317 | device_shutdown(); | 317 | device_shutdown(); |
318 | sysdev_shutdown(); | ||
319 | syscore_shutdown(); | 318 | syscore_shutdown(); |
320 | } | 319 | } |
321 | 320 | ||
@@ -354,7 +353,6 @@ static void kernel_shutdown_prepare(enum system_states state) | |||
354 | void kernel_halt(void) | 353 | void kernel_halt(void) |
355 | { | 354 | { |
356 | kernel_shutdown_prepare(SYSTEM_HALT); | 355 | kernel_shutdown_prepare(SYSTEM_HALT); |
357 | sysdev_shutdown(); | ||
358 | syscore_shutdown(); | 356 | syscore_shutdown(); |
359 | printk(KERN_EMERG "System halted.\n"); | 357 | printk(KERN_EMERG "System halted.\n"); |
360 | kmsg_dump(KMSG_DUMP_HALT); | 358 | kmsg_dump(KMSG_DUMP_HALT); |
@@ -374,7 +372,6 @@ void kernel_power_off(void) | |||
374 | if (pm_power_off_prepare) | 372 | if (pm_power_off_prepare) |
375 | pm_power_off_prepare(); | 373 | pm_power_off_prepare(); |
376 | disable_nonboot_cpus(); | 374 | disable_nonboot_cpus(); |
377 | sysdev_shutdown(); | ||
378 | syscore_shutdown(); | 375 | syscore_shutdown(); |
379 | printk(KERN_EMERG "Power down.\n"); | 376 | printk(KERN_EMERG "Power down.\n"); |
380 | kmsg_dump(KMSG_DUMP_POWEROFF); | 377 | kmsg_dump(KMSG_DUMP_POWEROFF); |
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index 6519cf62d9cd..0e17c10f8a9d 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c | |||
@@ -685,8 +685,8 @@ int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq) | |||
685 | /* Add clocksource to the clcoksource list */ | 685 | /* Add clocksource to the clcoksource list */ |
686 | mutex_lock(&clocksource_mutex); | 686 | mutex_lock(&clocksource_mutex); |
687 | clocksource_enqueue(cs); | 687 | clocksource_enqueue(cs); |
688 | clocksource_select(); | ||
689 | clocksource_enqueue_watchdog(cs); | 688 | clocksource_enqueue_watchdog(cs); |
689 | clocksource_select(); | ||
690 | mutex_unlock(&clocksource_mutex); | 690 | mutex_unlock(&clocksource_mutex); |
691 | return 0; | 691 | return 0; |
692 | } | 692 | } |
@@ -706,8 +706,8 @@ int clocksource_register(struct clocksource *cs) | |||
706 | 706 | ||
707 | mutex_lock(&clocksource_mutex); | 707 | mutex_lock(&clocksource_mutex); |
708 | clocksource_enqueue(cs); | 708 | clocksource_enqueue(cs); |
709 | clocksource_select(); | ||
710 | clocksource_enqueue_watchdog(cs); | 709 | clocksource_enqueue_watchdog(cs); |
710 | clocksource_select(); | ||
711 | mutex_unlock(&clocksource_mutex); | 711 | mutex_unlock(&clocksource_mutex); |
712 | return 0; | 712 | return 0; |
713 | } | 713 | } |
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index da800ffa810c..723c7637e55a 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c | |||
@@ -522,10 +522,11 @@ static void tick_broadcast_init_next_event(struct cpumask *mask, | |||
522 | */ | 522 | */ |
523 | void tick_broadcast_setup_oneshot(struct clock_event_device *bc) | 523 | void tick_broadcast_setup_oneshot(struct clock_event_device *bc) |
524 | { | 524 | { |
525 | int cpu = smp_processor_id(); | ||
526 | |||
525 | /* Set it up only once ! */ | 527 | /* Set it up only once ! */ |
526 | if (bc->event_handler != tick_handle_oneshot_broadcast) { | 528 | if (bc->event_handler != tick_handle_oneshot_broadcast) { |
527 | int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC; | 529 | int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC; |
528 | int cpu = smp_processor_id(); | ||
529 | 530 | ||
530 | bc->event_handler = tick_handle_oneshot_broadcast; | 531 | bc->event_handler = tick_handle_oneshot_broadcast; |
531 | clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT); | 532 | clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT); |
@@ -551,6 +552,15 @@ void tick_broadcast_setup_oneshot(struct clock_event_device *bc) | |||
551 | tick_broadcast_set_event(tick_next_period, 1); | 552 | tick_broadcast_set_event(tick_next_period, 1); |
552 | } else | 553 | } else |
553 | bc->next_event.tv64 = KTIME_MAX; | 554 | bc->next_event.tv64 = KTIME_MAX; |
555 | } else { | ||
556 | /* | ||
557 | * The first cpu which switches to oneshot mode sets | ||
558 | * the bit for all other cpus which are in the general | ||
559 | * (periodic) broadcast mask. So the bit is set and | ||
560 | * would prevent the first broadcast enter after this | ||
561 | * to program the bc device. | ||
562 | */ | ||
563 | tick_broadcast_clear_oneshot(cpu); | ||
554 | } | 564 | } |
555 | } | 565 | } |
556 | 566 | ||
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index ee24fa1935ac..d017c2c82c44 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -39,20 +39,26 @@ | |||
39 | #include "trace_stat.h" | 39 | #include "trace_stat.h" |
40 | 40 | ||
41 | #define FTRACE_WARN_ON(cond) \ | 41 | #define FTRACE_WARN_ON(cond) \ |
42 | do { \ | 42 | ({ \ |
43 | if (WARN_ON(cond)) \ | 43 | int ___r = cond; \ |
44 | if (WARN_ON(___r)) \ | ||
44 | ftrace_kill(); \ | 45 | ftrace_kill(); \ |
45 | } while (0) | 46 | ___r; \ |
47 | }) | ||
46 | 48 | ||
47 | #define FTRACE_WARN_ON_ONCE(cond) \ | 49 | #define FTRACE_WARN_ON_ONCE(cond) \ |
48 | do { \ | 50 | ({ \ |
49 | if (WARN_ON_ONCE(cond)) \ | 51 | int ___r = cond; \ |
52 | if (WARN_ON_ONCE(___r)) \ | ||
50 | ftrace_kill(); \ | 53 | ftrace_kill(); \ |
51 | } while (0) | 54 | ___r; \ |
55 | }) | ||
52 | 56 | ||
53 | /* hash bits for specific function selection */ | 57 | /* hash bits for specific function selection */ |
54 | #define FTRACE_HASH_BITS 7 | 58 | #define FTRACE_HASH_BITS 7 |
55 | #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS) | 59 | #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS) |
60 | #define FTRACE_HASH_DEFAULT_BITS 10 | ||
61 | #define FTRACE_HASH_MAX_BITS 12 | ||
56 | 62 | ||
57 | /* ftrace_enabled is a method to turn ftrace on or off */ | 63 | /* ftrace_enabled is a method to turn ftrace on or off */ |
58 | int ftrace_enabled __read_mostly; | 64 | int ftrace_enabled __read_mostly; |
@@ -81,23 +87,29 @@ static struct ftrace_ops ftrace_list_end __read_mostly = | |||
81 | .func = ftrace_stub, | 87 | .func = ftrace_stub, |
82 | }; | 88 | }; |
83 | 89 | ||
84 | static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end; | 90 | static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end; |
91 | static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end; | ||
85 | ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; | 92 | ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; |
86 | ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub; | 93 | ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub; |
87 | ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub; | 94 | ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub; |
95 | static struct ftrace_ops global_ops; | ||
96 | |||
97 | static void | ||
98 | ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip); | ||
88 | 99 | ||
89 | /* | 100 | /* |
90 | * Traverse the ftrace_list, invoking all entries. The reason that we | 101 | * Traverse the ftrace_global_list, invoking all entries. The reason that we |
91 | * can use rcu_dereference_raw() is that elements removed from this list | 102 | * can use rcu_dereference_raw() is that elements removed from this list |
92 | * are simply leaked, so there is no need to interact with a grace-period | 103 | * are simply leaked, so there is no need to interact with a grace-period |
93 | * mechanism. The rcu_dereference_raw() calls are needed to handle | 104 | * mechanism. The rcu_dereference_raw() calls are needed to handle |
94 | * concurrent insertions into the ftrace_list. | 105 | * concurrent insertions into the ftrace_global_list. |
95 | * | 106 | * |
96 | * Silly Alpha and silly pointer-speculation compiler optimizations! | 107 | * Silly Alpha and silly pointer-speculation compiler optimizations! |
97 | */ | 108 | */ |
98 | static void ftrace_list_func(unsigned long ip, unsigned long parent_ip) | 109 | static void ftrace_global_list_func(unsigned long ip, |
110 | unsigned long parent_ip) | ||
99 | { | 111 | { |
100 | struct ftrace_ops *op = rcu_dereference_raw(ftrace_list); /*see above*/ | 112 | struct ftrace_ops *op = rcu_dereference_raw(ftrace_global_list); /*see above*/ |
101 | 113 | ||
102 | while (op != &ftrace_list_end) { | 114 | while (op != &ftrace_list_end) { |
103 | op->func(ip, parent_ip); | 115 | op->func(ip, parent_ip); |
@@ -147,46 +159,69 @@ static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip) | |||
147 | } | 159 | } |
148 | #endif | 160 | #endif |
149 | 161 | ||
150 | static int __register_ftrace_function(struct ftrace_ops *ops) | 162 | static void update_global_ops(void) |
151 | { | 163 | { |
152 | ops->next = ftrace_list; | 164 | ftrace_func_t func; |
165 | |||
153 | /* | 166 | /* |
154 | * We are entering ops into the ftrace_list but another | 167 | * If there's only one function registered, then call that |
155 | * CPU might be walking that list. We need to make sure | 168 | * function directly. Otherwise, we need to iterate over the |
156 | * the ops->next pointer is valid before another CPU sees | 169 | * registered callers. |
157 | * the ops pointer included into the ftrace_list. | ||
158 | */ | 170 | */ |
159 | rcu_assign_pointer(ftrace_list, ops); | 171 | if (ftrace_global_list == &ftrace_list_end || |
172 | ftrace_global_list->next == &ftrace_list_end) | ||
173 | func = ftrace_global_list->func; | ||
174 | else | ||
175 | func = ftrace_global_list_func; | ||
160 | 176 | ||
161 | if (ftrace_enabled) { | 177 | /* If we filter on pids, update to use the pid function */ |
162 | ftrace_func_t func; | 178 | if (!list_empty(&ftrace_pids)) { |
179 | set_ftrace_pid_function(func); | ||
180 | func = ftrace_pid_func; | ||
181 | } | ||
163 | 182 | ||
164 | if (ops->next == &ftrace_list_end) | 183 | global_ops.func = func; |
165 | func = ops->func; | 184 | } |
166 | else | ||
167 | func = ftrace_list_func; | ||
168 | 185 | ||
169 | if (!list_empty(&ftrace_pids)) { | 186 | static void update_ftrace_function(void) |
170 | set_ftrace_pid_function(func); | 187 | { |
171 | func = ftrace_pid_func; | 188 | ftrace_func_t func; |
172 | } | 189 | |
190 | update_global_ops(); | ||
191 | |||
192 | /* | ||
193 | * If we are at the end of the list and this ops is | ||
194 | * not dynamic, then have the mcount trampoline call | ||
195 | * the function directly | ||
196 | */ | ||
197 | if (ftrace_ops_list == &ftrace_list_end || | ||
198 | (ftrace_ops_list->next == &ftrace_list_end && | ||
199 | !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC))) | ||
200 | func = ftrace_ops_list->func; | ||
201 | else | ||
202 | func = ftrace_ops_list_func; | ||
173 | 203 | ||
174 | /* | ||
175 | * For one func, simply call it directly. | ||
176 | * For more than one func, call the chain. | ||
177 | */ | ||
178 | #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST | 204 | #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST |
179 | ftrace_trace_function = func; | 205 | ftrace_trace_function = func; |
180 | #else | 206 | #else |
181 | __ftrace_trace_function = func; | 207 | __ftrace_trace_function = func; |
182 | ftrace_trace_function = ftrace_test_stop_func; | 208 | ftrace_trace_function = ftrace_test_stop_func; |
183 | #endif | 209 | #endif |
184 | } | 210 | } |
185 | 211 | ||
186 | return 0; | 212 | static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops) |
213 | { | ||
214 | ops->next = *list; | ||
215 | /* | ||
216 | * We are entering ops into the list but another | ||
217 | * CPU might be walking that list. We need to make sure | ||
218 | * the ops->next pointer is valid before another CPU sees | ||
219 | * the ops pointer included into the list. | ||
220 | */ | ||
221 | rcu_assign_pointer(*list, ops); | ||
187 | } | 222 | } |
188 | 223 | ||
189 | static int __unregister_ftrace_function(struct ftrace_ops *ops) | 224 | static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops) |
190 | { | 225 | { |
191 | struct ftrace_ops **p; | 226 | struct ftrace_ops **p; |
192 | 227 | ||
@@ -194,13 +229,12 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops) | |||
194 | * If we are removing the last function, then simply point | 229 | * If we are removing the last function, then simply point |
195 | * to the ftrace_stub. | 230 | * to the ftrace_stub. |
196 | */ | 231 | */ |
197 | if (ftrace_list == ops && ops->next == &ftrace_list_end) { | 232 | if (*list == ops && ops->next == &ftrace_list_end) { |
198 | ftrace_trace_function = ftrace_stub; | 233 | *list = &ftrace_list_end; |
199 | ftrace_list = &ftrace_list_end; | ||
200 | return 0; | 234 | return 0; |
201 | } | 235 | } |
202 | 236 | ||
203 | for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next) | 237 | for (p = list; *p != &ftrace_list_end; p = &(*p)->next) |
204 | if (*p == ops) | 238 | if (*p == ops) |
205 | break; | 239 | break; |
206 | 240 | ||
@@ -208,53 +242,83 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops) | |||
208 | return -1; | 242 | return -1; |
209 | 243 | ||
210 | *p = (*p)->next; | 244 | *p = (*p)->next; |
245 | return 0; | ||
246 | } | ||
211 | 247 | ||
212 | if (ftrace_enabled) { | 248 | static int __register_ftrace_function(struct ftrace_ops *ops) |
213 | /* If we only have one func left, then call that directly */ | 249 | { |
214 | if (ftrace_list->next == &ftrace_list_end) { | 250 | if (ftrace_disabled) |
215 | ftrace_func_t func = ftrace_list->func; | 251 | return -ENODEV; |
216 | 252 | ||
217 | if (!list_empty(&ftrace_pids)) { | 253 | if (FTRACE_WARN_ON(ops == &global_ops)) |
218 | set_ftrace_pid_function(func); | 254 | return -EINVAL; |
219 | func = ftrace_pid_func; | 255 | |
220 | } | 256 | if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED)) |
221 | #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST | 257 | return -EBUSY; |
222 | ftrace_trace_function = func; | 258 | |
223 | #else | 259 | if (!core_kernel_data((unsigned long)ops)) |
224 | __ftrace_trace_function = func; | 260 | ops->flags |= FTRACE_OPS_FL_DYNAMIC; |
225 | #endif | 261 | |
226 | } | 262 | if (ops->flags & FTRACE_OPS_FL_GLOBAL) { |
227 | } | 263 | int first = ftrace_global_list == &ftrace_list_end; |
264 | add_ftrace_ops(&ftrace_global_list, ops); | ||
265 | ops->flags |= FTRACE_OPS_FL_ENABLED; | ||
266 | if (first) | ||
267 | add_ftrace_ops(&ftrace_ops_list, &global_ops); | ||
268 | } else | ||
269 | add_ftrace_ops(&ftrace_ops_list, ops); | ||
270 | |||
271 | if (ftrace_enabled) | ||
272 | update_ftrace_function(); | ||
228 | 273 | ||
229 | return 0; | 274 | return 0; |
230 | } | 275 | } |
231 | 276 | ||
232 | static void ftrace_update_pid_func(void) | 277 | static int __unregister_ftrace_function(struct ftrace_ops *ops) |
233 | { | 278 | { |
234 | ftrace_func_t func; | 279 | int ret; |
235 | 280 | ||
236 | if (ftrace_trace_function == ftrace_stub) | 281 | if (ftrace_disabled) |
237 | return; | 282 | return -ENODEV; |
238 | 283 | ||
239 | #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST | 284 | if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED))) |
240 | func = ftrace_trace_function; | 285 | return -EBUSY; |
241 | #else | ||
242 | func = __ftrace_trace_function; | ||
243 | #endif | ||
244 | 286 | ||
245 | if (!list_empty(&ftrace_pids)) { | 287 | if (FTRACE_WARN_ON(ops == &global_ops)) |
246 | set_ftrace_pid_function(func); | 288 | return -EINVAL; |
247 | func = ftrace_pid_func; | ||
248 | } else { | ||
249 | if (func == ftrace_pid_func) | ||
250 | func = ftrace_pid_function; | ||
251 | } | ||
252 | 289 | ||
253 | #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST | 290 | if (ops->flags & FTRACE_OPS_FL_GLOBAL) { |
254 | ftrace_trace_function = func; | 291 | ret = remove_ftrace_ops(&ftrace_global_list, ops); |
255 | #else | 292 | if (!ret && ftrace_global_list == &ftrace_list_end) |
256 | __ftrace_trace_function = func; | 293 | ret = remove_ftrace_ops(&ftrace_ops_list, &global_ops); |
257 | #endif | 294 | if (!ret) |
295 | ops->flags &= ~FTRACE_OPS_FL_ENABLED; | ||
296 | } else | ||
297 | ret = remove_ftrace_ops(&ftrace_ops_list, ops); | ||
298 | |||
299 | if (ret < 0) | ||
300 | return ret; | ||
301 | |||
302 | if (ftrace_enabled) | ||
303 | update_ftrace_function(); | ||
304 | |||
305 | /* | ||
306 | * Dynamic ops may be freed, we must make sure that all | ||
307 | * callers are done before leaving this function. | ||
308 | */ | ||
309 | if (ops->flags & FTRACE_OPS_FL_DYNAMIC) | ||
310 | synchronize_sched(); | ||
311 | |||
312 | return 0; | ||
313 | } | ||
314 | |||
315 | static void ftrace_update_pid_func(void) | ||
316 | { | ||
317 | /* Only do something if we are tracing something */ | ||
318 | if (ftrace_trace_function == ftrace_stub) | ||
319 | return; | ||
320 | |||
321 | update_ftrace_function(); | ||
258 | } | 322 | } |
259 | 323 | ||
260 | #ifdef CONFIG_FUNCTION_PROFILER | 324 | #ifdef CONFIG_FUNCTION_PROFILER |
@@ -888,8 +952,35 @@ enum { | |||
888 | FTRACE_START_FUNC_RET = (1 << 3), | 952 | FTRACE_START_FUNC_RET = (1 << 3), |
889 | FTRACE_STOP_FUNC_RET = (1 << 4), | 953 | FTRACE_STOP_FUNC_RET = (1 << 4), |
890 | }; | 954 | }; |
955 | struct ftrace_func_entry { | ||
956 | struct hlist_node hlist; | ||
957 | unsigned long ip; | ||
958 | }; | ||
891 | 959 | ||
892 | static int ftrace_filtered; | 960 | struct ftrace_hash { |
961 | unsigned long size_bits; | ||
962 | struct hlist_head *buckets; | ||
963 | unsigned long count; | ||
964 | struct rcu_head rcu; | ||
965 | }; | ||
966 | |||
967 | /* | ||
968 | * We make these constant because no one should touch them, | ||
969 | * but they are used as the default "empty hash", to avoid allocating | ||
970 | * it all the time. These are in a read only section such that if | ||
971 | * anyone does try to modify it, it will cause an exception. | ||
972 | */ | ||
973 | static const struct hlist_head empty_buckets[1]; | ||
974 | static const struct ftrace_hash empty_hash = { | ||
975 | .buckets = (struct hlist_head *)empty_buckets, | ||
976 | }; | ||
977 | #define EMPTY_HASH ((struct ftrace_hash *)&empty_hash) | ||
978 | |||
979 | static struct ftrace_ops global_ops = { | ||
980 | .func = ftrace_stub, | ||
981 | .notrace_hash = EMPTY_HASH, | ||
982 | .filter_hash = EMPTY_HASH, | ||
983 | }; | ||
893 | 984 | ||
894 | static struct dyn_ftrace *ftrace_new_addrs; | 985 | static struct dyn_ftrace *ftrace_new_addrs; |
895 | 986 | ||
@@ -912,6 +1003,269 @@ static struct ftrace_page *ftrace_pages; | |||
912 | 1003 | ||
913 | static struct dyn_ftrace *ftrace_free_records; | 1004 | static struct dyn_ftrace *ftrace_free_records; |
914 | 1005 | ||
1006 | static struct ftrace_func_entry * | ||
1007 | ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip) | ||
1008 | { | ||
1009 | unsigned long key; | ||
1010 | struct ftrace_func_entry *entry; | ||
1011 | struct hlist_head *hhd; | ||
1012 | struct hlist_node *n; | ||
1013 | |||
1014 | if (!hash->count) | ||
1015 | return NULL; | ||
1016 | |||
1017 | if (hash->size_bits > 0) | ||
1018 | key = hash_long(ip, hash->size_bits); | ||
1019 | else | ||
1020 | key = 0; | ||
1021 | |||
1022 | hhd = &hash->buckets[key]; | ||
1023 | |||
1024 | hlist_for_each_entry_rcu(entry, n, hhd, hlist) { | ||
1025 | if (entry->ip == ip) | ||
1026 | return entry; | ||
1027 | } | ||
1028 | return NULL; | ||
1029 | } | ||
1030 | |||
1031 | static void __add_hash_entry(struct ftrace_hash *hash, | ||
1032 | struct ftrace_func_entry *entry) | ||
1033 | { | ||
1034 | struct hlist_head *hhd; | ||
1035 | unsigned long key; | ||
1036 | |||
1037 | if (hash->size_bits) | ||
1038 | key = hash_long(entry->ip, hash->size_bits); | ||
1039 | else | ||
1040 | key = 0; | ||
1041 | |||
1042 | hhd = &hash->buckets[key]; | ||
1043 | hlist_add_head(&entry->hlist, hhd); | ||
1044 | hash->count++; | ||
1045 | } | ||
1046 | |||
1047 | static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip) | ||
1048 | { | ||
1049 | struct ftrace_func_entry *entry; | ||
1050 | |||
1051 | entry = kmalloc(sizeof(*entry), GFP_KERNEL); | ||
1052 | if (!entry) | ||
1053 | return -ENOMEM; | ||
1054 | |||
1055 | entry->ip = ip; | ||
1056 | __add_hash_entry(hash, entry); | ||
1057 | |||
1058 | return 0; | ||
1059 | } | ||
1060 | |||
1061 | static void | ||
1062 | free_hash_entry(struct ftrace_hash *hash, | ||
1063 | struct ftrace_func_entry *entry) | ||
1064 | { | ||
1065 | hlist_del(&entry->hlist); | ||
1066 | kfree(entry); | ||
1067 | hash->count--; | ||
1068 | } | ||
1069 | |||
1070 | static void | ||
1071 | remove_hash_entry(struct ftrace_hash *hash, | ||
1072 | struct ftrace_func_entry *entry) | ||
1073 | { | ||
1074 | hlist_del(&entry->hlist); | ||
1075 | hash->count--; | ||
1076 | } | ||
1077 | |||
1078 | static void ftrace_hash_clear(struct ftrace_hash *hash) | ||
1079 | { | ||
1080 | struct hlist_head *hhd; | ||
1081 | struct hlist_node *tp, *tn; | ||
1082 | struct ftrace_func_entry *entry; | ||
1083 | int size = 1 << hash->size_bits; | ||
1084 | int i; | ||
1085 | |||
1086 | if (!hash->count) | ||
1087 | return; | ||
1088 | |||
1089 | for (i = 0; i < size; i++) { | ||
1090 | hhd = &hash->buckets[i]; | ||
1091 | hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist) | ||
1092 | free_hash_entry(hash, entry); | ||
1093 | } | ||
1094 | FTRACE_WARN_ON(hash->count); | ||
1095 | } | ||
1096 | |||
1097 | static void free_ftrace_hash(struct ftrace_hash *hash) | ||
1098 | { | ||
1099 | if (!hash || hash == EMPTY_HASH) | ||
1100 | return; | ||
1101 | ftrace_hash_clear(hash); | ||
1102 | kfree(hash->buckets); | ||
1103 | kfree(hash); | ||
1104 | } | ||
1105 | |||
1106 | static void __free_ftrace_hash_rcu(struct rcu_head *rcu) | ||
1107 | { | ||
1108 | struct ftrace_hash *hash; | ||
1109 | |||
1110 | hash = container_of(rcu, struct ftrace_hash, rcu); | ||
1111 | free_ftrace_hash(hash); | ||
1112 | } | ||
1113 | |||
1114 | static void free_ftrace_hash_rcu(struct ftrace_hash *hash) | ||
1115 | { | ||
1116 | if (!hash || hash == EMPTY_HASH) | ||
1117 | return; | ||
1118 | call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu); | ||
1119 | } | ||
1120 | |||
1121 | static struct ftrace_hash *alloc_ftrace_hash(int size_bits) | ||
1122 | { | ||
1123 | struct ftrace_hash *hash; | ||
1124 | int size; | ||
1125 | |||
1126 | hash = kzalloc(sizeof(*hash), GFP_KERNEL); | ||
1127 | if (!hash) | ||
1128 | return NULL; | ||
1129 | |||
1130 | size = 1 << size_bits; | ||
1131 | hash->buckets = kzalloc(sizeof(*hash->buckets) * size, GFP_KERNEL); | ||
1132 | |||
1133 | if (!hash->buckets) { | ||
1134 | kfree(hash); | ||
1135 | return NULL; | ||
1136 | } | ||
1137 | |||
1138 | hash->size_bits = size_bits; | ||
1139 | |||
1140 | return hash; | ||
1141 | } | ||
1142 | |||
1143 | static struct ftrace_hash * | ||
1144 | alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash) | ||
1145 | { | ||
1146 | struct ftrace_func_entry *entry; | ||
1147 | struct ftrace_hash *new_hash; | ||
1148 | struct hlist_node *tp; | ||
1149 | int size; | ||
1150 | int ret; | ||
1151 | int i; | ||
1152 | |||
1153 | new_hash = alloc_ftrace_hash(size_bits); | ||
1154 | if (!new_hash) | ||
1155 | return NULL; | ||
1156 | |||
1157 | /* Empty hash? */ | ||
1158 | if (!hash || !hash->count) | ||
1159 | return new_hash; | ||
1160 | |||
1161 | size = 1 << hash->size_bits; | ||
1162 | for (i = 0; i < size; i++) { | ||
1163 | hlist_for_each_entry(entry, tp, &hash->buckets[i], hlist) { | ||
1164 | ret = add_hash_entry(new_hash, entry->ip); | ||
1165 | if (ret < 0) | ||
1166 | goto free_hash; | ||
1167 | } | ||
1168 | } | ||
1169 | |||
1170 | FTRACE_WARN_ON(new_hash->count != hash->count); | ||
1171 | |||
1172 | return new_hash; | ||
1173 | |||
1174 | free_hash: | ||
1175 | free_ftrace_hash(new_hash); | ||
1176 | return NULL; | ||
1177 | } | ||
1178 | |||
1179 | static int | ||
1180 | ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src) | ||
1181 | { | ||
1182 | struct ftrace_func_entry *entry; | ||
1183 | struct hlist_node *tp, *tn; | ||
1184 | struct hlist_head *hhd; | ||
1185 | struct ftrace_hash *old_hash; | ||
1186 | struct ftrace_hash *new_hash; | ||
1187 | unsigned long key; | ||
1188 | int size = src->count; | ||
1189 | int bits = 0; | ||
1190 | int i; | ||
1191 | |||
1192 | /* | ||
1193 | * If the new source is empty, just free dst and assign it | ||
1194 | * the empty_hash. | ||
1195 | */ | ||
1196 | if (!src->count) { | ||
1197 | free_ftrace_hash_rcu(*dst); | ||
1198 | rcu_assign_pointer(*dst, EMPTY_HASH); | ||
1199 | return 0; | ||
1200 | } | ||
1201 | |||
1202 | /* | ||
1203 | * Make the hash size about 1/2 the # found | ||
1204 | */ | ||
1205 | for (size /= 2; size; size >>= 1) | ||
1206 | bits++; | ||
1207 | |||
1208 | /* Don't allocate too much */ | ||
1209 | if (bits > FTRACE_HASH_MAX_BITS) | ||
1210 | bits = FTRACE_HASH_MAX_BITS; | ||
1211 | |||
1212 | new_hash = alloc_ftrace_hash(bits); | ||
1213 | if (!new_hash) | ||
1214 | return -ENOMEM; | ||
1215 | |||
1216 | size = 1 << src->size_bits; | ||
1217 | for (i = 0; i < size; i++) { | ||
1218 | hhd = &src->buckets[i]; | ||
1219 | hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist) { | ||
1220 | if (bits > 0) | ||
1221 | key = hash_long(entry->ip, bits); | ||
1222 | else | ||
1223 | key = 0; | ||
1224 | remove_hash_entry(src, entry); | ||
1225 | __add_hash_entry(new_hash, entry); | ||
1226 | } | ||
1227 | } | ||
1228 | |||
1229 | old_hash = *dst; | ||
1230 | rcu_assign_pointer(*dst, new_hash); | ||
1231 | free_ftrace_hash_rcu(old_hash); | ||
1232 | |||
1233 | return 0; | ||
1234 | } | ||
1235 | |||
1236 | /* | ||
1237 | * Test the hashes for this ops to see if we want to call | ||
1238 | * the ops->func or not. | ||
1239 | * | ||
1240 | * It's a match if the ip is in the ops->filter_hash or | ||
1241 | * the filter_hash does not exist or is empty, | ||
1242 | * AND | ||
1243 | * the ip is not in the ops->notrace_hash. | ||
1244 | * | ||
1245 | * This needs to be called with preemption disabled as | ||
1246 | * the hashes are freed with call_rcu_sched(). | ||
1247 | */ | ||
1248 | static int | ||
1249 | ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip) | ||
1250 | { | ||
1251 | struct ftrace_hash *filter_hash; | ||
1252 | struct ftrace_hash *notrace_hash; | ||
1253 | int ret; | ||
1254 | |||
1255 | filter_hash = rcu_dereference_raw(ops->filter_hash); | ||
1256 | notrace_hash = rcu_dereference_raw(ops->notrace_hash); | ||
1257 | |||
1258 | if ((!filter_hash || !filter_hash->count || | ||
1259 | ftrace_lookup_ip(filter_hash, ip)) && | ||
1260 | (!notrace_hash || !notrace_hash->count || | ||
1261 | !ftrace_lookup_ip(notrace_hash, ip))) | ||
1262 | ret = 1; | ||
1263 | else | ||
1264 | ret = 0; | ||
1265 | |||
1266 | return ret; | ||
1267 | } | ||
1268 | |||
915 | /* | 1269 | /* |
916 | * This is a double for. Do not use 'break' to break out of the loop, | 1270 | * This is a double for. Do not use 'break' to break out of the loop, |
917 | * you must use a goto. | 1271 | * you must use a goto. |
@@ -926,6 +1280,105 @@ static struct dyn_ftrace *ftrace_free_records; | |||
926 | } \ | 1280 | } \ |
927 | } | 1281 | } |
928 | 1282 | ||
1283 | static void __ftrace_hash_rec_update(struct ftrace_ops *ops, | ||
1284 | int filter_hash, | ||
1285 | bool inc) | ||
1286 | { | ||
1287 | struct ftrace_hash *hash; | ||
1288 | struct ftrace_hash *other_hash; | ||
1289 | struct ftrace_page *pg; | ||
1290 | struct dyn_ftrace *rec; | ||
1291 | int count = 0; | ||
1292 | int all = 0; | ||
1293 | |||
1294 | /* Only update if the ops has been registered */ | ||
1295 | if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) | ||
1296 | return; | ||
1297 | |||
1298 | /* | ||
1299 | * In the filter_hash case: | ||
1300 | * If the count is zero, we update all records. | ||
1301 | * Otherwise we just update the items in the hash. | ||
1302 | * | ||
1303 | * In the notrace_hash case: | ||
1304 | * We enable the update in the hash. | ||
1305 | * As disabling notrace means enabling the tracing, | ||
1306 | * and enabling notrace means disabling, the inc variable | ||
1307 | * gets inversed. | ||
1308 | */ | ||
1309 | if (filter_hash) { | ||
1310 | hash = ops->filter_hash; | ||
1311 | other_hash = ops->notrace_hash; | ||
1312 | if (!hash || !hash->count) | ||
1313 | all = 1; | ||
1314 | } else { | ||
1315 | inc = !inc; | ||
1316 | hash = ops->notrace_hash; | ||
1317 | other_hash = ops->filter_hash; | ||
1318 | /* | ||
1319 | * If the notrace hash has no items, | ||
1320 | * then there's nothing to do. | ||
1321 | */ | ||
1322 | if (hash && !hash->count) | ||
1323 | return; | ||
1324 | } | ||
1325 | |||
1326 | do_for_each_ftrace_rec(pg, rec) { | ||
1327 | int in_other_hash = 0; | ||
1328 | int in_hash = 0; | ||
1329 | int match = 0; | ||
1330 | |||
1331 | if (all) { | ||
1332 | /* | ||
1333 | * Only the filter_hash affects all records. | ||
1334 | * Update if the record is not in the notrace hash. | ||
1335 | */ | ||
1336 | if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip)) | ||
1337 | match = 1; | ||
1338 | } else { | ||
1339 | in_hash = hash && !!ftrace_lookup_ip(hash, rec->ip); | ||
1340 | in_other_hash = other_hash && !!ftrace_lookup_ip(other_hash, rec->ip); | ||
1341 | |||
1342 | /* | ||
1343 | * | ||
1344 | */ | ||
1345 | if (filter_hash && in_hash && !in_other_hash) | ||
1346 | match = 1; | ||
1347 | else if (!filter_hash && in_hash && | ||
1348 | (in_other_hash || !other_hash->count)) | ||
1349 | match = 1; | ||
1350 | } | ||
1351 | if (!match) | ||
1352 | continue; | ||
1353 | |||
1354 | if (inc) { | ||
1355 | rec->flags++; | ||
1356 | if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == FTRACE_REF_MAX)) | ||
1357 | return; | ||
1358 | } else { | ||
1359 | if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == 0)) | ||
1360 | return; | ||
1361 | rec->flags--; | ||
1362 | } | ||
1363 | count++; | ||
1364 | /* Shortcut, if we handled all records, we are done. */ | ||
1365 | if (!all && count == hash->count) | ||
1366 | return; | ||
1367 | } while_for_each_ftrace_rec(); | ||
1368 | } | ||
1369 | |||
1370 | static void ftrace_hash_rec_disable(struct ftrace_ops *ops, | ||
1371 | int filter_hash) | ||
1372 | { | ||
1373 | __ftrace_hash_rec_update(ops, filter_hash, 0); | ||
1374 | } | ||
1375 | |||
1376 | static void ftrace_hash_rec_enable(struct ftrace_ops *ops, | ||
1377 | int filter_hash) | ||
1378 | { | ||
1379 | __ftrace_hash_rec_update(ops, filter_hash, 1); | ||
1380 | } | ||
1381 | |||
929 | static void ftrace_free_rec(struct dyn_ftrace *rec) | 1382 | static void ftrace_free_rec(struct dyn_ftrace *rec) |
930 | { | 1383 | { |
931 | rec->freelist = ftrace_free_records; | 1384 | rec->freelist = ftrace_free_records; |
@@ -1047,18 +1500,18 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable) | |||
1047 | ftrace_addr = (unsigned long)FTRACE_ADDR; | 1500 | ftrace_addr = (unsigned long)FTRACE_ADDR; |
1048 | 1501 | ||
1049 | /* | 1502 | /* |
1050 | * If this record is not to be traced or we want to disable it, | 1503 | * If we are enabling tracing: |
1051 | * then disable it. | 1504 | * |
1505 | * If the record has a ref count, then we need to enable it | ||
1506 | * because someone is using it. | ||
1052 | * | 1507 | * |
1053 | * If we want to enable it and filtering is off, then enable it. | 1508 | * Otherwise we make sure its disabled. |
1054 | * | 1509 | * |
1055 | * If we want to enable it and filtering is on, enable it only if | 1510 | * If we are disabling tracing, then disable all records that |
1056 | * it's filtered | 1511 | * are enabled. |
1057 | */ | 1512 | */ |
1058 | if (enable && !(rec->flags & FTRACE_FL_NOTRACE)) { | 1513 | if (enable && (rec->flags & ~FTRACE_FL_MASK)) |
1059 | if (!ftrace_filtered || (rec->flags & FTRACE_FL_FILTER)) | 1514 | flag = FTRACE_FL_ENABLED; |
1060 | flag = FTRACE_FL_ENABLED; | ||
1061 | } | ||
1062 | 1515 | ||
1063 | /* If the state of this record hasn't changed, then do nothing */ | 1516 | /* If the state of this record hasn't changed, then do nothing */ |
1064 | if ((rec->flags & FTRACE_FL_ENABLED) == flag) | 1517 | if ((rec->flags & FTRACE_FL_ENABLED) == flag) |
@@ -1079,19 +1532,16 @@ static void ftrace_replace_code(int enable) | |||
1079 | struct ftrace_page *pg; | 1532 | struct ftrace_page *pg; |
1080 | int failed; | 1533 | int failed; |
1081 | 1534 | ||
1535 | if (unlikely(ftrace_disabled)) | ||
1536 | return; | ||
1537 | |||
1082 | do_for_each_ftrace_rec(pg, rec) { | 1538 | do_for_each_ftrace_rec(pg, rec) { |
1083 | /* | 1539 | /* Skip over free records */ |
1084 | * Skip over free records, records that have | 1540 | if (rec->flags & FTRACE_FL_FREE) |
1085 | * failed and not converted. | ||
1086 | */ | ||
1087 | if (rec->flags & FTRACE_FL_FREE || | ||
1088 | rec->flags & FTRACE_FL_FAILED || | ||
1089 | !(rec->flags & FTRACE_FL_CONVERTED)) | ||
1090 | continue; | 1541 | continue; |
1091 | 1542 | ||
1092 | failed = __ftrace_replace_code(rec, enable); | 1543 | failed = __ftrace_replace_code(rec, enable); |
1093 | if (failed) { | 1544 | if (failed) { |
1094 | rec->flags |= FTRACE_FL_FAILED; | ||
1095 | ftrace_bug(failed, rec->ip); | 1545 | ftrace_bug(failed, rec->ip); |
1096 | /* Stop processing */ | 1546 | /* Stop processing */ |
1097 | return; | 1547 | return; |
@@ -1107,10 +1557,12 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec) | |||
1107 | 1557 | ||
1108 | ip = rec->ip; | 1558 | ip = rec->ip; |
1109 | 1559 | ||
1560 | if (unlikely(ftrace_disabled)) | ||
1561 | return 0; | ||
1562 | |||
1110 | ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR); | 1563 | ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR); |
1111 | if (ret) { | 1564 | if (ret) { |
1112 | ftrace_bug(ret, ip); | 1565 | ftrace_bug(ret, ip); |
1113 | rec->flags |= FTRACE_FL_FAILED; | ||
1114 | return 0; | 1566 | return 0; |
1115 | } | 1567 | } |
1116 | return 1; | 1568 | return 1; |
@@ -1171,6 +1623,7 @@ static void ftrace_run_update_code(int command) | |||
1171 | 1623 | ||
1172 | static ftrace_func_t saved_ftrace_func; | 1624 | static ftrace_func_t saved_ftrace_func; |
1173 | static int ftrace_start_up; | 1625 | static int ftrace_start_up; |
1626 | static int global_start_up; | ||
1174 | 1627 | ||
1175 | static void ftrace_startup_enable(int command) | 1628 | static void ftrace_startup_enable(int command) |
1176 | { | 1629 | { |
@@ -1185,19 +1638,36 @@ static void ftrace_startup_enable(int command) | |||
1185 | ftrace_run_update_code(command); | 1638 | ftrace_run_update_code(command); |
1186 | } | 1639 | } |
1187 | 1640 | ||
1188 | static void ftrace_startup(int command) | 1641 | static void ftrace_startup(struct ftrace_ops *ops, int command) |
1189 | { | 1642 | { |
1643 | bool hash_enable = true; | ||
1644 | |||
1190 | if (unlikely(ftrace_disabled)) | 1645 | if (unlikely(ftrace_disabled)) |
1191 | return; | 1646 | return; |
1192 | 1647 | ||
1193 | ftrace_start_up++; | 1648 | ftrace_start_up++; |
1194 | command |= FTRACE_ENABLE_CALLS; | 1649 | command |= FTRACE_ENABLE_CALLS; |
1195 | 1650 | ||
1651 | /* ops marked global share the filter hashes */ | ||
1652 | if (ops->flags & FTRACE_OPS_FL_GLOBAL) { | ||
1653 | ops = &global_ops; | ||
1654 | /* Don't update hash if global is already set */ | ||
1655 | if (global_start_up) | ||
1656 | hash_enable = false; | ||
1657 | global_start_up++; | ||
1658 | } | ||
1659 | |||
1660 | ops->flags |= FTRACE_OPS_FL_ENABLED; | ||
1661 | if (hash_enable) | ||
1662 | ftrace_hash_rec_enable(ops, 1); | ||
1663 | |||
1196 | ftrace_startup_enable(command); | 1664 | ftrace_startup_enable(command); |
1197 | } | 1665 | } |
1198 | 1666 | ||
1199 | static void ftrace_shutdown(int command) | 1667 | static void ftrace_shutdown(struct ftrace_ops *ops, int command) |
1200 | { | 1668 | { |
1669 | bool hash_disable = true; | ||
1670 | |||
1201 | if (unlikely(ftrace_disabled)) | 1671 | if (unlikely(ftrace_disabled)) |
1202 | return; | 1672 | return; |
1203 | 1673 | ||
@@ -1209,6 +1679,23 @@ static void ftrace_shutdown(int command) | |||
1209 | */ | 1679 | */ |
1210 | WARN_ON_ONCE(ftrace_start_up < 0); | 1680 | WARN_ON_ONCE(ftrace_start_up < 0); |
1211 | 1681 | ||
1682 | if (ops->flags & FTRACE_OPS_FL_GLOBAL) { | ||
1683 | ops = &global_ops; | ||
1684 | global_start_up--; | ||
1685 | WARN_ON_ONCE(global_start_up < 0); | ||
1686 | /* Don't update hash if global still has users */ | ||
1687 | if (global_start_up) { | ||
1688 | WARN_ON_ONCE(!ftrace_start_up); | ||
1689 | hash_disable = false; | ||
1690 | } | ||
1691 | } | ||
1692 | |||
1693 | if (hash_disable) | ||
1694 | ftrace_hash_rec_disable(ops, 1); | ||
1695 | |||
1696 | if (ops != &global_ops || !global_start_up) | ||
1697 | ops->flags &= ~FTRACE_OPS_FL_ENABLED; | ||
1698 | |||
1212 | if (!ftrace_start_up) | 1699 | if (!ftrace_start_up) |
1213 | command |= FTRACE_DISABLE_CALLS; | 1700 | command |= FTRACE_DISABLE_CALLS; |
1214 | 1701 | ||
@@ -1273,10 +1760,10 @@ static int ftrace_update_code(struct module *mod) | |||
1273 | */ | 1760 | */ |
1274 | if (!ftrace_code_disable(mod, p)) { | 1761 | if (!ftrace_code_disable(mod, p)) { |
1275 | ftrace_free_rec(p); | 1762 | ftrace_free_rec(p); |
1276 | continue; | 1763 | /* Game over */ |
1764 | break; | ||
1277 | } | 1765 | } |
1278 | 1766 | ||
1279 | p->flags |= FTRACE_FL_CONVERTED; | ||
1280 | ftrace_update_cnt++; | 1767 | ftrace_update_cnt++; |
1281 | 1768 | ||
1282 | /* | 1769 | /* |
@@ -1351,9 +1838,9 @@ static int __init ftrace_dyn_table_alloc(unsigned long num_to_init) | |||
1351 | enum { | 1838 | enum { |
1352 | FTRACE_ITER_FILTER = (1 << 0), | 1839 | FTRACE_ITER_FILTER = (1 << 0), |
1353 | FTRACE_ITER_NOTRACE = (1 << 1), | 1840 | FTRACE_ITER_NOTRACE = (1 << 1), |
1354 | FTRACE_ITER_FAILURES = (1 << 2), | 1841 | FTRACE_ITER_PRINTALL = (1 << 2), |
1355 | FTRACE_ITER_PRINTALL = (1 << 3), | 1842 | FTRACE_ITER_HASH = (1 << 3), |
1356 | FTRACE_ITER_HASH = (1 << 4), | 1843 | FTRACE_ITER_ENABLED = (1 << 4), |
1357 | }; | 1844 | }; |
1358 | 1845 | ||
1359 | #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */ | 1846 | #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */ |
@@ -1365,6 +1852,8 @@ struct ftrace_iterator { | |||
1365 | struct dyn_ftrace *func; | 1852 | struct dyn_ftrace *func; |
1366 | struct ftrace_func_probe *probe; | 1853 | struct ftrace_func_probe *probe; |
1367 | struct trace_parser parser; | 1854 | struct trace_parser parser; |
1855 | struct ftrace_hash *hash; | ||
1856 | struct ftrace_ops *ops; | ||
1368 | int hidx; | 1857 | int hidx; |
1369 | int idx; | 1858 | int idx; |
1370 | unsigned flags; | 1859 | unsigned flags; |
@@ -1461,8 +1950,12 @@ static void * | |||
1461 | t_next(struct seq_file *m, void *v, loff_t *pos) | 1950 | t_next(struct seq_file *m, void *v, loff_t *pos) |
1462 | { | 1951 | { |
1463 | struct ftrace_iterator *iter = m->private; | 1952 | struct ftrace_iterator *iter = m->private; |
1953 | struct ftrace_ops *ops = &global_ops; | ||
1464 | struct dyn_ftrace *rec = NULL; | 1954 | struct dyn_ftrace *rec = NULL; |
1465 | 1955 | ||
1956 | if (unlikely(ftrace_disabled)) | ||
1957 | return NULL; | ||
1958 | |||
1466 | if (iter->flags & FTRACE_ITER_HASH) | 1959 | if (iter->flags & FTRACE_ITER_HASH) |
1467 | return t_hash_next(m, pos); | 1960 | return t_hash_next(m, pos); |
1468 | 1961 | ||
@@ -1483,17 +1976,15 @@ t_next(struct seq_file *m, void *v, loff_t *pos) | |||
1483 | rec = &iter->pg->records[iter->idx++]; | 1976 | rec = &iter->pg->records[iter->idx++]; |
1484 | if ((rec->flags & FTRACE_FL_FREE) || | 1977 | if ((rec->flags & FTRACE_FL_FREE) || |
1485 | 1978 | ||
1486 | (!(iter->flags & FTRACE_ITER_FAILURES) && | ||
1487 | (rec->flags & FTRACE_FL_FAILED)) || | ||
1488 | |||
1489 | ((iter->flags & FTRACE_ITER_FAILURES) && | ||
1490 | !(rec->flags & FTRACE_FL_FAILED)) || | ||
1491 | |||
1492 | ((iter->flags & FTRACE_ITER_FILTER) && | 1979 | ((iter->flags & FTRACE_ITER_FILTER) && |
1493 | !(rec->flags & FTRACE_FL_FILTER)) || | 1980 | !(ftrace_lookup_ip(ops->filter_hash, rec->ip))) || |
1494 | 1981 | ||
1495 | ((iter->flags & FTRACE_ITER_NOTRACE) && | 1982 | ((iter->flags & FTRACE_ITER_NOTRACE) && |
1496 | !(rec->flags & FTRACE_FL_NOTRACE))) { | 1983 | !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) || |
1984 | |||
1985 | ((iter->flags & FTRACE_ITER_ENABLED) && | ||
1986 | !(rec->flags & ~FTRACE_FL_MASK))) { | ||
1987 | |||
1497 | rec = NULL; | 1988 | rec = NULL; |
1498 | goto retry; | 1989 | goto retry; |
1499 | } | 1990 | } |
@@ -1517,10 +2008,15 @@ static void reset_iter_read(struct ftrace_iterator *iter) | |||
1517 | static void *t_start(struct seq_file *m, loff_t *pos) | 2008 | static void *t_start(struct seq_file *m, loff_t *pos) |
1518 | { | 2009 | { |
1519 | struct ftrace_iterator *iter = m->private; | 2010 | struct ftrace_iterator *iter = m->private; |
2011 | struct ftrace_ops *ops = &global_ops; | ||
1520 | void *p = NULL; | 2012 | void *p = NULL; |
1521 | loff_t l; | 2013 | loff_t l; |
1522 | 2014 | ||
1523 | mutex_lock(&ftrace_lock); | 2015 | mutex_lock(&ftrace_lock); |
2016 | |||
2017 | if (unlikely(ftrace_disabled)) | ||
2018 | return NULL; | ||
2019 | |||
1524 | /* | 2020 | /* |
1525 | * If an lseek was done, then reset and start from beginning. | 2021 | * If an lseek was done, then reset and start from beginning. |
1526 | */ | 2022 | */ |
@@ -1532,7 +2028,7 @@ static void *t_start(struct seq_file *m, loff_t *pos) | |||
1532 | * off, we can short cut and just print out that all | 2028 | * off, we can short cut and just print out that all |
1533 | * functions are enabled. | 2029 | * functions are enabled. |
1534 | */ | 2030 | */ |
1535 | if (iter->flags & FTRACE_ITER_FILTER && !ftrace_filtered) { | 2031 | if (iter->flags & FTRACE_ITER_FILTER && !ops->filter_hash->count) { |
1536 | if (*pos > 0) | 2032 | if (*pos > 0) |
1537 | return t_hash_start(m, pos); | 2033 | return t_hash_start(m, pos); |
1538 | iter->flags |= FTRACE_ITER_PRINTALL; | 2034 | iter->flags |= FTRACE_ITER_PRINTALL; |
@@ -1590,7 +2086,11 @@ static int t_show(struct seq_file *m, void *v) | |||
1590 | if (!rec) | 2086 | if (!rec) |
1591 | return 0; | 2087 | return 0; |
1592 | 2088 | ||
1593 | seq_printf(m, "%ps\n", (void *)rec->ip); | 2089 | seq_printf(m, "%ps", (void *)rec->ip); |
2090 | if (iter->flags & FTRACE_ITER_ENABLED) | ||
2091 | seq_printf(m, " (%ld)", | ||
2092 | rec->flags & ~FTRACE_FL_MASK); | ||
2093 | seq_printf(m, "\n"); | ||
1594 | 2094 | ||
1595 | return 0; | 2095 | return 0; |
1596 | } | 2096 | } |
@@ -1630,44 +2130,46 @@ ftrace_avail_open(struct inode *inode, struct file *file) | |||
1630 | } | 2130 | } |
1631 | 2131 | ||
1632 | static int | 2132 | static int |
1633 | ftrace_failures_open(struct inode *inode, struct file *file) | 2133 | ftrace_enabled_open(struct inode *inode, struct file *file) |
1634 | { | 2134 | { |
1635 | int ret; | ||
1636 | struct seq_file *m; | ||
1637 | struct ftrace_iterator *iter; | 2135 | struct ftrace_iterator *iter; |
2136 | int ret; | ||
2137 | |||
2138 | if (unlikely(ftrace_disabled)) | ||
2139 | return -ENODEV; | ||
2140 | |||
2141 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); | ||
2142 | if (!iter) | ||
2143 | return -ENOMEM; | ||
2144 | |||
2145 | iter->pg = ftrace_pages_start; | ||
2146 | iter->flags = FTRACE_ITER_ENABLED; | ||
1638 | 2147 | ||
1639 | ret = ftrace_avail_open(inode, file); | 2148 | ret = seq_open(file, &show_ftrace_seq_ops); |
1640 | if (!ret) { | 2149 | if (!ret) { |
1641 | m = file->private_data; | 2150 | struct seq_file *m = file->private_data; |
1642 | iter = m->private; | 2151 | |
1643 | iter->flags = FTRACE_ITER_FAILURES; | 2152 | m->private = iter; |
2153 | } else { | ||
2154 | kfree(iter); | ||
1644 | } | 2155 | } |
1645 | 2156 | ||
1646 | return ret; | 2157 | return ret; |
1647 | } | 2158 | } |
1648 | 2159 | ||
1649 | 2160 | static void ftrace_filter_reset(struct ftrace_hash *hash) | |
1650 | static void ftrace_filter_reset(int enable) | ||
1651 | { | 2161 | { |
1652 | struct ftrace_page *pg; | ||
1653 | struct dyn_ftrace *rec; | ||
1654 | unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; | ||
1655 | |||
1656 | mutex_lock(&ftrace_lock); | 2162 | mutex_lock(&ftrace_lock); |
1657 | if (enable) | 2163 | ftrace_hash_clear(hash); |
1658 | ftrace_filtered = 0; | ||
1659 | do_for_each_ftrace_rec(pg, rec) { | ||
1660 | if (rec->flags & FTRACE_FL_FAILED) | ||
1661 | continue; | ||
1662 | rec->flags &= ~type; | ||
1663 | } while_for_each_ftrace_rec(); | ||
1664 | mutex_unlock(&ftrace_lock); | 2164 | mutex_unlock(&ftrace_lock); |
1665 | } | 2165 | } |
1666 | 2166 | ||
1667 | static int | 2167 | static int |
1668 | ftrace_regex_open(struct inode *inode, struct file *file, int enable) | 2168 | ftrace_regex_open(struct ftrace_ops *ops, int flag, |
2169 | struct inode *inode, struct file *file) | ||
1669 | { | 2170 | { |
1670 | struct ftrace_iterator *iter; | 2171 | struct ftrace_iterator *iter; |
2172 | struct ftrace_hash *hash; | ||
1671 | int ret = 0; | 2173 | int ret = 0; |
1672 | 2174 | ||
1673 | if (unlikely(ftrace_disabled)) | 2175 | if (unlikely(ftrace_disabled)) |
@@ -1682,21 +2184,42 @@ ftrace_regex_open(struct inode *inode, struct file *file, int enable) | |||
1682 | return -ENOMEM; | 2184 | return -ENOMEM; |
1683 | } | 2185 | } |
1684 | 2186 | ||
2187 | if (flag & FTRACE_ITER_NOTRACE) | ||
2188 | hash = ops->notrace_hash; | ||
2189 | else | ||
2190 | hash = ops->filter_hash; | ||
2191 | |||
2192 | iter->ops = ops; | ||
2193 | iter->flags = flag; | ||
2194 | |||
2195 | if (file->f_mode & FMODE_WRITE) { | ||
2196 | mutex_lock(&ftrace_lock); | ||
2197 | iter->hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash); | ||
2198 | mutex_unlock(&ftrace_lock); | ||
2199 | |||
2200 | if (!iter->hash) { | ||
2201 | trace_parser_put(&iter->parser); | ||
2202 | kfree(iter); | ||
2203 | return -ENOMEM; | ||
2204 | } | ||
2205 | } | ||
2206 | |||
1685 | mutex_lock(&ftrace_regex_lock); | 2207 | mutex_lock(&ftrace_regex_lock); |
2208 | |||
1686 | if ((file->f_mode & FMODE_WRITE) && | 2209 | if ((file->f_mode & FMODE_WRITE) && |
1687 | (file->f_flags & O_TRUNC)) | 2210 | (file->f_flags & O_TRUNC)) |
1688 | ftrace_filter_reset(enable); | 2211 | ftrace_filter_reset(iter->hash); |
1689 | 2212 | ||
1690 | if (file->f_mode & FMODE_READ) { | 2213 | if (file->f_mode & FMODE_READ) { |
1691 | iter->pg = ftrace_pages_start; | 2214 | iter->pg = ftrace_pages_start; |
1692 | iter->flags = enable ? FTRACE_ITER_FILTER : | ||
1693 | FTRACE_ITER_NOTRACE; | ||
1694 | 2215 | ||
1695 | ret = seq_open(file, &show_ftrace_seq_ops); | 2216 | ret = seq_open(file, &show_ftrace_seq_ops); |
1696 | if (!ret) { | 2217 | if (!ret) { |
1697 | struct seq_file *m = file->private_data; | 2218 | struct seq_file *m = file->private_data; |
1698 | m->private = iter; | 2219 | m->private = iter; |
1699 | } else { | 2220 | } else { |
2221 | /* Failed */ | ||
2222 | free_ftrace_hash(iter->hash); | ||
1700 | trace_parser_put(&iter->parser); | 2223 | trace_parser_put(&iter->parser); |
1701 | kfree(iter); | 2224 | kfree(iter); |
1702 | } | 2225 | } |
@@ -1710,13 +2233,15 @@ ftrace_regex_open(struct inode *inode, struct file *file, int enable) | |||
1710 | static int | 2233 | static int |
1711 | ftrace_filter_open(struct inode *inode, struct file *file) | 2234 | ftrace_filter_open(struct inode *inode, struct file *file) |
1712 | { | 2235 | { |
1713 | return ftrace_regex_open(inode, file, 1); | 2236 | return ftrace_regex_open(&global_ops, FTRACE_ITER_FILTER, |
2237 | inode, file); | ||
1714 | } | 2238 | } |
1715 | 2239 | ||
1716 | static int | 2240 | static int |
1717 | ftrace_notrace_open(struct inode *inode, struct file *file) | 2241 | ftrace_notrace_open(struct inode *inode, struct file *file) |
1718 | { | 2242 | { |
1719 | return ftrace_regex_open(inode, file, 0); | 2243 | return ftrace_regex_open(&global_ops, FTRACE_ITER_NOTRACE, |
2244 | inode, file); | ||
1720 | } | 2245 | } |
1721 | 2246 | ||
1722 | static loff_t | 2247 | static loff_t |
@@ -1761,86 +2286,99 @@ static int ftrace_match(char *str, char *regex, int len, int type) | |||
1761 | } | 2286 | } |
1762 | 2287 | ||
1763 | static int | 2288 | static int |
1764 | ftrace_match_record(struct dyn_ftrace *rec, char *regex, int len, int type) | 2289 | enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int not) |
2290 | { | ||
2291 | struct ftrace_func_entry *entry; | ||
2292 | int ret = 0; | ||
2293 | |||
2294 | entry = ftrace_lookup_ip(hash, rec->ip); | ||
2295 | if (not) { | ||
2296 | /* Do nothing if it doesn't exist */ | ||
2297 | if (!entry) | ||
2298 | return 0; | ||
2299 | |||
2300 | free_hash_entry(hash, entry); | ||
2301 | } else { | ||
2302 | /* Do nothing if it exists */ | ||
2303 | if (entry) | ||
2304 | return 0; | ||
2305 | |||
2306 | ret = add_hash_entry(hash, rec->ip); | ||
2307 | } | ||
2308 | return ret; | ||
2309 | } | ||
2310 | |||
2311 | static int | ||
2312 | ftrace_match_record(struct dyn_ftrace *rec, char *mod, | ||
2313 | char *regex, int len, int type) | ||
1765 | { | 2314 | { |
1766 | char str[KSYM_SYMBOL_LEN]; | 2315 | char str[KSYM_SYMBOL_LEN]; |
2316 | char *modname; | ||
2317 | |||
2318 | kallsyms_lookup(rec->ip, NULL, NULL, &modname, str); | ||
2319 | |||
2320 | if (mod) { | ||
2321 | /* module lookup requires matching the module */ | ||
2322 | if (!modname || strcmp(modname, mod)) | ||
2323 | return 0; | ||
2324 | |||
2325 | /* blank search means to match all funcs in the mod */ | ||
2326 | if (!len) | ||
2327 | return 1; | ||
2328 | } | ||
1767 | 2329 | ||
1768 | kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); | ||
1769 | return ftrace_match(str, regex, len, type); | 2330 | return ftrace_match(str, regex, len, type); |
1770 | } | 2331 | } |
1771 | 2332 | ||
1772 | static int ftrace_match_records(char *buff, int len, int enable) | 2333 | static int |
2334 | match_records(struct ftrace_hash *hash, char *buff, | ||
2335 | int len, char *mod, int not) | ||
1773 | { | 2336 | { |
1774 | unsigned int search_len; | 2337 | unsigned search_len = 0; |
1775 | struct ftrace_page *pg; | 2338 | struct ftrace_page *pg; |
1776 | struct dyn_ftrace *rec; | 2339 | struct dyn_ftrace *rec; |
1777 | unsigned long flag; | 2340 | int type = MATCH_FULL; |
1778 | char *search; | 2341 | char *search = buff; |
1779 | int type; | ||
1780 | int not; | ||
1781 | int found = 0; | 2342 | int found = 0; |
2343 | int ret; | ||
1782 | 2344 | ||
1783 | flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; | 2345 | if (len) { |
1784 | type = filter_parse_regex(buff, len, &search, ¬); | 2346 | type = filter_parse_regex(buff, len, &search, ¬); |
1785 | 2347 | search_len = strlen(search); | |
1786 | search_len = strlen(search); | 2348 | } |
1787 | 2349 | ||
1788 | mutex_lock(&ftrace_lock); | 2350 | mutex_lock(&ftrace_lock); |
1789 | do_for_each_ftrace_rec(pg, rec) { | ||
1790 | 2351 | ||
1791 | if (rec->flags & FTRACE_FL_FAILED) | 2352 | if (unlikely(ftrace_disabled)) |
1792 | continue; | 2353 | goto out_unlock; |
1793 | 2354 | ||
1794 | if (ftrace_match_record(rec, search, search_len, type)) { | 2355 | do_for_each_ftrace_rec(pg, rec) { |
1795 | if (not) | 2356 | |
1796 | rec->flags &= ~flag; | 2357 | if (ftrace_match_record(rec, mod, search, search_len, type)) { |
1797 | else | 2358 | ret = enter_record(hash, rec, not); |
1798 | rec->flags |= flag; | 2359 | if (ret < 0) { |
2360 | found = ret; | ||
2361 | goto out_unlock; | ||
2362 | } | ||
1799 | found = 1; | 2363 | found = 1; |
1800 | } | 2364 | } |
1801 | /* | ||
1802 | * Only enable filtering if we have a function that | ||
1803 | * is filtered on. | ||
1804 | */ | ||
1805 | if (enable && (rec->flags & FTRACE_FL_FILTER)) | ||
1806 | ftrace_filtered = 1; | ||
1807 | } while_for_each_ftrace_rec(); | 2365 | } while_for_each_ftrace_rec(); |
2366 | out_unlock: | ||
1808 | mutex_unlock(&ftrace_lock); | 2367 | mutex_unlock(&ftrace_lock); |
1809 | 2368 | ||
1810 | return found; | 2369 | return found; |
1811 | } | 2370 | } |
1812 | 2371 | ||
1813 | static int | 2372 | static int |
1814 | ftrace_match_module_record(struct dyn_ftrace *rec, char *mod, | 2373 | ftrace_match_records(struct ftrace_hash *hash, char *buff, int len) |
1815 | char *regex, int len, int type) | ||
1816 | { | 2374 | { |
1817 | char str[KSYM_SYMBOL_LEN]; | 2375 | return match_records(hash, buff, len, NULL, 0); |
1818 | char *modname; | ||
1819 | |||
1820 | kallsyms_lookup(rec->ip, NULL, NULL, &modname, str); | ||
1821 | |||
1822 | if (!modname || strcmp(modname, mod)) | ||
1823 | return 0; | ||
1824 | |||
1825 | /* blank search means to match all funcs in the mod */ | ||
1826 | if (len) | ||
1827 | return ftrace_match(str, regex, len, type); | ||
1828 | else | ||
1829 | return 1; | ||
1830 | } | 2376 | } |
1831 | 2377 | ||
1832 | static int ftrace_match_module_records(char *buff, char *mod, int enable) | 2378 | static int |
2379 | ftrace_match_module_records(struct ftrace_hash *hash, char *buff, char *mod) | ||
1833 | { | 2380 | { |
1834 | unsigned search_len = 0; | ||
1835 | struct ftrace_page *pg; | ||
1836 | struct dyn_ftrace *rec; | ||
1837 | int type = MATCH_FULL; | ||
1838 | char *search = buff; | ||
1839 | unsigned long flag; | ||
1840 | int not = 0; | 2381 | int not = 0; |
1841 | int found = 0; | ||
1842 | |||
1843 | flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; | ||
1844 | 2382 | ||
1845 | /* blank or '*' mean the same */ | 2383 | /* blank or '*' mean the same */ |
1846 | if (strcmp(buff, "*") == 0) | 2384 | if (strcmp(buff, "*") == 0) |
@@ -1852,32 +2390,7 @@ static int ftrace_match_module_records(char *buff, char *mod, int enable) | |||
1852 | not = 1; | 2390 | not = 1; |
1853 | } | 2391 | } |
1854 | 2392 | ||
1855 | if (strlen(buff)) { | 2393 | return match_records(hash, buff, strlen(buff), mod, not); |
1856 | type = filter_parse_regex(buff, strlen(buff), &search, ¬); | ||
1857 | search_len = strlen(search); | ||
1858 | } | ||
1859 | |||
1860 | mutex_lock(&ftrace_lock); | ||
1861 | do_for_each_ftrace_rec(pg, rec) { | ||
1862 | |||
1863 | if (rec->flags & FTRACE_FL_FAILED) | ||
1864 | continue; | ||
1865 | |||
1866 | if (ftrace_match_module_record(rec, mod, | ||
1867 | search, search_len, type)) { | ||
1868 | if (not) | ||
1869 | rec->flags &= ~flag; | ||
1870 | else | ||
1871 | rec->flags |= flag; | ||
1872 | found = 1; | ||
1873 | } | ||
1874 | if (enable && (rec->flags & FTRACE_FL_FILTER)) | ||
1875 | ftrace_filtered = 1; | ||
1876 | |||
1877 | } while_for_each_ftrace_rec(); | ||
1878 | mutex_unlock(&ftrace_lock); | ||
1879 | |||
1880 | return found; | ||
1881 | } | 2394 | } |
1882 | 2395 | ||
1883 | /* | 2396 | /* |
@@ -1888,7 +2401,10 @@ static int ftrace_match_module_records(char *buff, char *mod, int enable) | |||
1888 | static int | 2401 | static int |
1889 | ftrace_mod_callback(char *func, char *cmd, char *param, int enable) | 2402 | ftrace_mod_callback(char *func, char *cmd, char *param, int enable) |
1890 | { | 2403 | { |
2404 | struct ftrace_ops *ops = &global_ops; | ||
2405 | struct ftrace_hash *hash; | ||
1891 | char *mod; | 2406 | char *mod; |
2407 | int ret = -EINVAL; | ||
1892 | 2408 | ||
1893 | /* | 2409 | /* |
1894 | * cmd == 'mod' because we only registered this func | 2410 | * cmd == 'mod' because we only registered this func |
@@ -1900,15 +2416,24 @@ ftrace_mod_callback(char *func, char *cmd, char *param, int enable) | |||
1900 | 2416 | ||
1901 | /* we must have a module name */ | 2417 | /* we must have a module name */ |
1902 | if (!param) | 2418 | if (!param) |
1903 | return -EINVAL; | 2419 | return ret; |
1904 | 2420 | ||
1905 | mod = strsep(¶m, ":"); | 2421 | mod = strsep(¶m, ":"); |
1906 | if (!strlen(mod)) | 2422 | if (!strlen(mod)) |
1907 | return -EINVAL; | 2423 | return ret; |
1908 | 2424 | ||
1909 | if (ftrace_match_module_records(func, mod, enable)) | 2425 | if (enable) |
1910 | return 0; | 2426 | hash = ops->filter_hash; |
1911 | return -EINVAL; | 2427 | else |
2428 | hash = ops->notrace_hash; | ||
2429 | |||
2430 | ret = ftrace_match_module_records(hash, func, mod); | ||
2431 | if (!ret) | ||
2432 | ret = -EINVAL; | ||
2433 | if (ret < 0) | ||
2434 | return ret; | ||
2435 | |||
2436 | return 0; | ||
1912 | } | 2437 | } |
1913 | 2438 | ||
1914 | static struct ftrace_func_command ftrace_mod_cmd = { | 2439 | static struct ftrace_func_command ftrace_mod_cmd = { |
@@ -1959,6 +2484,7 @@ static int ftrace_probe_registered; | |||
1959 | 2484 | ||
1960 | static void __enable_ftrace_function_probe(void) | 2485 | static void __enable_ftrace_function_probe(void) |
1961 | { | 2486 | { |
2487 | int ret; | ||
1962 | int i; | 2488 | int i; |
1963 | 2489 | ||
1964 | if (ftrace_probe_registered) | 2490 | if (ftrace_probe_registered) |
@@ -1973,13 +2499,16 @@ static void __enable_ftrace_function_probe(void) | |||
1973 | if (i == FTRACE_FUNC_HASHSIZE) | 2499 | if (i == FTRACE_FUNC_HASHSIZE) |
1974 | return; | 2500 | return; |
1975 | 2501 | ||
1976 | __register_ftrace_function(&trace_probe_ops); | 2502 | ret = __register_ftrace_function(&trace_probe_ops); |
1977 | ftrace_startup(0); | 2503 | if (!ret) |
2504 | ftrace_startup(&trace_probe_ops, 0); | ||
2505 | |||
1978 | ftrace_probe_registered = 1; | 2506 | ftrace_probe_registered = 1; |
1979 | } | 2507 | } |
1980 | 2508 | ||
1981 | static void __disable_ftrace_function_probe(void) | 2509 | static void __disable_ftrace_function_probe(void) |
1982 | { | 2510 | { |
2511 | int ret; | ||
1983 | int i; | 2512 | int i; |
1984 | 2513 | ||
1985 | if (!ftrace_probe_registered) | 2514 | if (!ftrace_probe_registered) |
@@ -1992,8 +2521,10 @@ static void __disable_ftrace_function_probe(void) | |||
1992 | } | 2521 | } |
1993 | 2522 | ||
1994 | /* no more funcs left */ | 2523 | /* no more funcs left */ |
1995 | __unregister_ftrace_function(&trace_probe_ops); | 2524 | ret = __unregister_ftrace_function(&trace_probe_ops); |
1996 | ftrace_shutdown(0); | 2525 | if (!ret) |
2526 | ftrace_shutdown(&trace_probe_ops, 0); | ||
2527 | |||
1997 | ftrace_probe_registered = 0; | 2528 | ftrace_probe_registered = 0; |
1998 | } | 2529 | } |
1999 | 2530 | ||
@@ -2029,12 +2560,13 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | |||
2029 | return -EINVAL; | 2560 | return -EINVAL; |
2030 | 2561 | ||
2031 | mutex_lock(&ftrace_lock); | 2562 | mutex_lock(&ftrace_lock); |
2032 | do_for_each_ftrace_rec(pg, rec) { | ||
2033 | 2563 | ||
2034 | if (rec->flags & FTRACE_FL_FAILED) | 2564 | if (unlikely(ftrace_disabled)) |
2035 | continue; | 2565 | goto out_unlock; |
2566 | |||
2567 | do_for_each_ftrace_rec(pg, rec) { | ||
2036 | 2568 | ||
2037 | if (!ftrace_match_record(rec, search, len, type)) | 2569 | if (!ftrace_match_record(rec, NULL, search, len, type)) |
2038 | continue; | 2570 | continue; |
2039 | 2571 | ||
2040 | entry = kmalloc(sizeof(*entry), GFP_KERNEL); | 2572 | entry = kmalloc(sizeof(*entry), GFP_KERNEL); |
@@ -2195,18 +2727,22 @@ int unregister_ftrace_command(struct ftrace_func_command *cmd) | |||
2195 | return ret; | 2727 | return ret; |
2196 | } | 2728 | } |
2197 | 2729 | ||
2198 | static int ftrace_process_regex(char *buff, int len, int enable) | 2730 | static int ftrace_process_regex(struct ftrace_hash *hash, |
2731 | char *buff, int len, int enable) | ||
2199 | { | 2732 | { |
2200 | char *func, *command, *next = buff; | 2733 | char *func, *command, *next = buff; |
2201 | struct ftrace_func_command *p; | 2734 | struct ftrace_func_command *p; |
2202 | int ret = -EINVAL; | 2735 | int ret; |
2203 | 2736 | ||
2204 | func = strsep(&next, ":"); | 2737 | func = strsep(&next, ":"); |
2205 | 2738 | ||
2206 | if (!next) { | 2739 | if (!next) { |
2207 | if (ftrace_match_records(func, len, enable)) | 2740 | ret = ftrace_match_records(hash, func, len); |
2208 | return 0; | 2741 | if (!ret) |
2209 | return ret; | 2742 | ret = -EINVAL; |
2743 | if (ret < 0) | ||
2744 | return ret; | ||
2745 | return 0; | ||
2210 | } | 2746 | } |
2211 | 2747 | ||
2212 | /* command found */ | 2748 | /* command found */ |
@@ -2239,6 +2775,10 @@ ftrace_regex_write(struct file *file, const char __user *ubuf, | |||
2239 | 2775 | ||
2240 | mutex_lock(&ftrace_regex_lock); | 2776 | mutex_lock(&ftrace_regex_lock); |
2241 | 2777 | ||
2778 | ret = -ENODEV; | ||
2779 | if (unlikely(ftrace_disabled)) | ||
2780 | goto out_unlock; | ||
2781 | |||
2242 | if (file->f_mode & FMODE_READ) { | 2782 | if (file->f_mode & FMODE_READ) { |
2243 | struct seq_file *m = file->private_data; | 2783 | struct seq_file *m = file->private_data; |
2244 | iter = m->private; | 2784 | iter = m->private; |
@@ -2250,7 +2790,7 @@ ftrace_regex_write(struct file *file, const char __user *ubuf, | |||
2250 | 2790 | ||
2251 | if (read >= 0 && trace_parser_loaded(parser) && | 2791 | if (read >= 0 && trace_parser_loaded(parser) && |
2252 | !trace_parser_cont(parser)) { | 2792 | !trace_parser_cont(parser)) { |
2253 | ret = ftrace_process_regex(parser->buffer, | 2793 | ret = ftrace_process_regex(iter->hash, parser->buffer, |
2254 | parser->idx, enable); | 2794 | parser->idx, enable); |
2255 | trace_parser_clear(parser); | 2795 | trace_parser_clear(parser); |
2256 | if (ret) | 2796 | if (ret) |
@@ -2278,22 +2818,49 @@ ftrace_notrace_write(struct file *file, const char __user *ubuf, | |||
2278 | return ftrace_regex_write(file, ubuf, cnt, ppos, 0); | 2818 | return ftrace_regex_write(file, ubuf, cnt, ppos, 0); |
2279 | } | 2819 | } |
2280 | 2820 | ||
2281 | static void | 2821 | static int |
2282 | ftrace_set_regex(unsigned char *buf, int len, int reset, int enable) | 2822 | ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len, |
2823 | int reset, int enable) | ||
2283 | { | 2824 | { |
2825 | struct ftrace_hash **orig_hash; | ||
2826 | struct ftrace_hash *hash; | ||
2827 | int ret; | ||
2828 | |||
2829 | /* All global ops uses the global ops filters */ | ||
2830 | if (ops->flags & FTRACE_OPS_FL_GLOBAL) | ||
2831 | ops = &global_ops; | ||
2832 | |||
2284 | if (unlikely(ftrace_disabled)) | 2833 | if (unlikely(ftrace_disabled)) |
2285 | return; | 2834 | return -ENODEV; |
2835 | |||
2836 | if (enable) | ||
2837 | orig_hash = &ops->filter_hash; | ||
2838 | else | ||
2839 | orig_hash = &ops->notrace_hash; | ||
2840 | |||
2841 | hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); | ||
2842 | if (!hash) | ||
2843 | return -ENOMEM; | ||
2286 | 2844 | ||
2287 | mutex_lock(&ftrace_regex_lock); | 2845 | mutex_lock(&ftrace_regex_lock); |
2288 | if (reset) | 2846 | if (reset) |
2289 | ftrace_filter_reset(enable); | 2847 | ftrace_filter_reset(hash); |
2290 | if (buf) | 2848 | if (buf) |
2291 | ftrace_match_records(buf, len, enable); | 2849 | ftrace_match_records(hash, buf, len); |
2850 | |||
2851 | mutex_lock(&ftrace_lock); | ||
2852 | ret = ftrace_hash_move(orig_hash, hash); | ||
2853 | mutex_unlock(&ftrace_lock); | ||
2854 | |||
2292 | mutex_unlock(&ftrace_regex_lock); | 2855 | mutex_unlock(&ftrace_regex_lock); |
2856 | |||
2857 | free_ftrace_hash(hash); | ||
2858 | return ret; | ||
2293 | } | 2859 | } |
2294 | 2860 | ||
2295 | /** | 2861 | /** |
2296 | * ftrace_set_filter - set a function to filter on in ftrace | 2862 | * ftrace_set_filter - set a function to filter on in ftrace |
2863 | * @ops - the ops to set the filter with | ||
2297 | * @buf - the string that holds the function filter text. | 2864 | * @buf - the string that holds the function filter text. |
2298 | * @len - the length of the string. | 2865 | * @len - the length of the string. |
2299 | * @reset - non zero to reset all filters before applying this filter. | 2866 | * @reset - non zero to reset all filters before applying this filter. |
@@ -2301,13 +2868,16 @@ ftrace_set_regex(unsigned char *buf, int len, int reset, int enable) | |||
2301 | * Filters denote which functions should be enabled when tracing is enabled. | 2868 | * Filters denote which functions should be enabled when tracing is enabled. |
2302 | * If @buf is NULL and reset is set, all functions will be enabled for tracing. | 2869 | * If @buf is NULL and reset is set, all functions will be enabled for tracing. |
2303 | */ | 2870 | */ |
2304 | void ftrace_set_filter(unsigned char *buf, int len, int reset) | 2871 | void ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, |
2872 | int len, int reset) | ||
2305 | { | 2873 | { |
2306 | ftrace_set_regex(buf, len, reset, 1); | 2874 | ftrace_set_regex(ops, buf, len, reset, 1); |
2307 | } | 2875 | } |
2876 | EXPORT_SYMBOL_GPL(ftrace_set_filter); | ||
2308 | 2877 | ||
2309 | /** | 2878 | /** |
2310 | * ftrace_set_notrace - set a function to not trace in ftrace | 2879 | * ftrace_set_notrace - set a function to not trace in ftrace |
2880 | * @ops - the ops to set the notrace filter with | ||
2311 | * @buf - the string that holds the function notrace text. | 2881 | * @buf - the string that holds the function notrace text. |
2312 | * @len - the length of the string. | 2882 | * @len - the length of the string. |
2313 | * @reset - non zero to reset all filters before applying this filter. | 2883 | * @reset - non zero to reset all filters before applying this filter. |
@@ -2316,10 +2886,44 @@ void ftrace_set_filter(unsigned char *buf, int len, int reset) | |||
2316 | * is enabled. If @buf is NULL and reset is set, all functions will be enabled | 2886 | * is enabled. If @buf is NULL and reset is set, all functions will be enabled |
2317 | * for tracing. | 2887 | * for tracing. |
2318 | */ | 2888 | */ |
2319 | void ftrace_set_notrace(unsigned char *buf, int len, int reset) | 2889 | void ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, |
2890 | int len, int reset) | ||
2320 | { | 2891 | { |
2321 | ftrace_set_regex(buf, len, reset, 0); | 2892 | ftrace_set_regex(ops, buf, len, reset, 0); |
2322 | } | 2893 | } |
2894 | EXPORT_SYMBOL_GPL(ftrace_set_notrace); | ||
2895 | /** | ||
2896 | * ftrace_set_filter - set a function to filter on in ftrace | ||
2897 | * @ops - the ops to set the filter with | ||
2898 | * @buf - the string that holds the function filter text. | ||
2899 | * @len - the length of the string. | ||
2900 | * @reset - non zero to reset all filters before applying this filter. | ||
2901 | * | ||
2902 | * Filters denote which functions should be enabled when tracing is enabled. | ||
2903 | * If @buf is NULL and reset is set, all functions will be enabled for tracing. | ||
2904 | */ | ||
2905 | void ftrace_set_global_filter(unsigned char *buf, int len, int reset) | ||
2906 | { | ||
2907 | ftrace_set_regex(&global_ops, buf, len, reset, 1); | ||
2908 | } | ||
2909 | EXPORT_SYMBOL_GPL(ftrace_set_global_filter); | ||
2910 | |||
2911 | /** | ||
2912 | * ftrace_set_notrace - set a function to not trace in ftrace | ||
2913 | * @ops - the ops to set the notrace filter with | ||
2914 | * @buf - the string that holds the function notrace text. | ||
2915 | * @len - the length of the string. | ||
2916 | * @reset - non zero to reset all filters before applying this filter. | ||
2917 | * | ||
2918 | * Notrace Filters denote which functions should not be enabled when tracing | ||
2919 | * is enabled. If @buf is NULL and reset is set, all functions will be enabled | ||
2920 | * for tracing. | ||
2921 | */ | ||
2922 | void ftrace_set_global_notrace(unsigned char *buf, int len, int reset) | ||
2923 | { | ||
2924 | ftrace_set_regex(&global_ops, buf, len, reset, 0); | ||
2925 | } | ||
2926 | EXPORT_SYMBOL_GPL(ftrace_set_global_notrace); | ||
2323 | 2927 | ||
2324 | /* | 2928 | /* |
2325 | * command line interface to allow users to set filters on boot up. | 2929 | * command line interface to allow users to set filters on boot up. |
@@ -2370,22 +2974,23 @@ static void __init set_ftrace_early_graph(char *buf) | |||
2370 | } | 2974 | } |
2371 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 2975 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
2372 | 2976 | ||
2373 | static void __init set_ftrace_early_filter(char *buf, int enable) | 2977 | static void __init |
2978 | set_ftrace_early_filter(struct ftrace_ops *ops, char *buf, int enable) | ||
2374 | { | 2979 | { |
2375 | char *func; | 2980 | char *func; |
2376 | 2981 | ||
2377 | while (buf) { | 2982 | while (buf) { |
2378 | func = strsep(&buf, ","); | 2983 | func = strsep(&buf, ","); |
2379 | ftrace_set_regex(func, strlen(func), 0, enable); | 2984 | ftrace_set_regex(ops, func, strlen(func), 0, enable); |
2380 | } | 2985 | } |
2381 | } | 2986 | } |
2382 | 2987 | ||
2383 | static void __init set_ftrace_early_filters(void) | 2988 | static void __init set_ftrace_early_filters(void) |
2384 | { | 2989 | { |
2385 | if (ftrace_filter_buf[0]) | 2990 | if (ftrace_filter_buf[0]) |
2386 | set_ftrace_early_filter(ftrace_filter_buf, 1); | 2991 | set_ftrace_early_filter(&global_ops, ftrace_filter_buf, 1); |
2387 | if (ftrace_notrace_buf[0]) | 2992 | if (ftrace_notrace_buf[0]) |
2388 | set_ftrace_early_filter(ftrace_notrace_buf, 0); | 2993 | set_ftrace_early_filter(&global_ops, ftrace_notrace_buf, 0); |
2389 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 2994 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
2390 | if (ftrace_graph_buf[0]) | 2995 | if (ftrace_graph_buf[0]) |
2391 | set_ftrace_early_graph(ftrace_graph_buf); | 2996 | set_ftrace_early_graph(ftrace_graph_buf); |
@@ -2393,11 +2998,14 @@ static void __init set_ftrace_early_filters(void) | |||
2393 | } | 2998 | } |
2394 | 2999 | ||
2395 | static int | 3000 | static int |
2396 | ftrace_regex_release(struct inode *inode, struct file *file, int enable) | 3001 | ftrace_regex_release(struct inode *inode, struct file *file) |
2397 | { | 3002 | { |
2398 | struct seq_file *m = (struct seq_file *)file->private_data; | 3003 | struct seq_file *m = (struct seq_file *)file->private_data; |
2399 | struct ftrace_iterator *iter; | 3004 | struct ftrace_iterator *iter; |
3005 | struct ftrace_hash **orig_hash; | ||
2400 | struct trace_parser *parser; | 3006 | struct trace_parser *parser; |
3007 | int filter_hash; | ||
3008 | int ret; | ||
2401 | 3009 | ||
2402 | mutex_lock(&ftrace_regex_lock); | 3010 | mutex_lock(&ftrace_regex_lock); |
2403 | if (file->f_mode & FMODE_READ) { | 3011 | if (file->f_mode & FMODE_READ) { |
@@ -2410,33 +3018,41 @@ ftrace_regex_release(struct inode *inode, struct file *file, int enable) | |||
2410 | parser = &iter->parser; | 3018 | parser = &iter->parser; |
2411 | if (trace_parser_loaded(parser)) { | 3019 | if (trace_parser_loaded(parser)) { |
2412 | parser->buffer[parser->idx] = 0; | 3020 | parser->buffer[parser->idx] = 0; |
2413 | ftrace_match_records(parser->buffer, parser->idx, enable); | 3021 | ftrace_match_records(iter->hash, parser->buffer, parser->idx); |
2414 | } | 3022 | } |
2415 | 3023 | ||
2416 | mutex_lock(&ftrace_lock); | ||
2417 | if (ftrace_start_up && ftrace_enabled) | ||
2418 | ftrace_run_update_code(FTRACE_ENABLE_CALLS); | ||
2419 | mutex_unlock(&ftrace_lock); | ||
2420 | |||
2421 | trace_parser_put(parser); | 3024 | trace_parser_put(parser); |
3025 | |||
3026 | if (file->f_mode & FMODE_WRITE) { | ||
3027 | filter_hash = !!(iter->flags & FTRACE_ITER_FILTER); | ||
3028 | |||
3029 | if (filter_hash) | ||
3030 | orig_hash = &iter->ops->filter_hash; | ||
3031 | else | ||
3032 | orig_hash = &iter->ops->notrace_hash; | ||
3033 | |||
3034 | mutex_lock(&ftrace_lock); | ||
3035 | /* | ||
3036 | * Remove the current set, update the hash and add | ||
3037 | * them back. | ||
3038 | */ | ||
3039 | ftrace_hash_rec_disable(iter->ops, filter_hash); | ||
3040 | ret = ftrace_hash_move(orig_hash, iter->hash); | ||
3041 | if (!ret) { | ||
3042 | ftrace_hash_rec_enable(iter->ops, filter_hash); | ||
3043 | if (iter->ops->flags & FTRACE_OPS_FL_ENABLED | ||
3044 | && ftrace_enabled) | ||
3045 | ftrace_run_update_code(FTRACE_ENABLE_CALLS); | ||
3046 | } | ||
3047 | mutex_unlock(&ftrace_lock); | ||
3048 | } | ||
3049 | free_ftrace_hash(iter->hash); | ||
2422 | kfree(iter); | 3050 | kfree(iter); |
2423 | 3051 | ||
2424 | mutex_unlock(&ftrace_regex_lock); | 3052 | mutex_unlock(&ftrace_regex_lock); |
2425 | return 0; | 3053 | return 0; |
2426 | } | 3054 | } |
2427 | 3055 | ||
2428 | static int | ||
2429 | ftrace_filter_release(struct inode *inode, struct file *file) | ||
2430 | { | ||
2431 | return ftrace_regex_release(inode, file, 1); | ||
2432 | } | ||
2433 | |||
2434 | static int | ||
2435 | ftrace_notrace_release(struct inode *inode, struct file *file) | ||
2436 | { | ||
2437 | return ftrace_regex_release(inode, file, 0); | ||
2438 | } | ||
2439 | |||
2440 | static const struct file_operations ftrace_avail_fops = { | 3056 | static const struct file_operations ftrace_avail_fops = { |
2441 | .open = ftrace_avail_open, | 3057 | .open = ftrace_avail_open, |
2442 | .read = seq_read, | 3058 | .read = seq_read, |
@@ -2444,8 +3060,8 @@ static const struct file_operations ftrace_avail_fops = { | |||
2444 | .release = seq_release_private, | 3060 | .release = seq_release_private, |
2445 | }; | 3061 | }; |
2446 | 3062 | ||
2447 | static const struct file_operations ftrace_failures_fops = { | 3063 | static const struct file_operations ftrace_enabled_fops = { |
2448 | .open = ftrace_failures_open, | 3064 | .open = ftrace_enabled_open, |
2449 | .read = seq_read, | 3065 | .read = seq_read, |
2450 | .llseek = seq_lseek, | 3066 | .llseek = seq_lseek, |
2451 | .release = seq_release_private, | 3067 | .release = seq_release_private, |
@@ -2456,7 +3072,7 @@ static const struct file_operations ftrace_filter_fops = { | |||
2456 | .read = seq_read, | 3072 | .read = seq_read, |
2457 | .write = ftrace_filter_write, | 3073 | .write = ftrace_filter_write, |
2458 | .llseek = ftrace_regex_lseek, | 3074 | .llseek = ftrace_regex_lseek, |
2459 | .release = ftrace_filter_release, | 3075 | .release = ftrace_regex_release, |
2460 | }; | 3076 | }; |
2461 | 3077 | ||
2462 | static const struct file_operations ftrace_notrace_fops = { | 3078 | static const struct file_operations ftrace_notrace_fops = { |
@@ -2464,7 +3080,7 @@ static const struct file_operations ftrace_notrace_fops = { | |||
2464 | .read = seq_read, | 3080 | .read = seq_read, |
2465 | .write = ftrace_notrace_write, | 3081 | .write = ftrace_notrace_write, |
2466 | .llseek = ftrace_regex_lseek, | 3082 | .llseek = ftrace_regex_lseek, |
2467 | .release = ftrace_notrace_release, | 3083 | .release = ftrace_regex_release, |
2468 | }; | 3084 | }; |
2469 | 3085 | ||
2470 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 3086 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
@@ -2573,9 +3189,6 @@ ftrace_set_func(unsigned long *array, int *idx, char *buffer) | |||
2573 | bool exists; | 3189 | bool exists; |
2574 | int i; | 3190 | int i; |
2575 | 3191 | ||
2576 | if (ftrace_disabled) | ||
2577 | return -ENODEV; | ||
2578 | |||
2579 | /* decode regex */ | 3192 | /* decode regex */ |
2580 | type = filter_parse_regex(buffer, strlen(buffer), &search, ¬); | 3193 | type = filter_parse_regex(buffer, strlen(buffer), &search, ¬); |
2581 | if (!not && *idx >= FTRACE_GRAPH_MAX_FUNCS) | 3194 | if (!not && *idx >= FTRACE_GRAPH_MAX_FUNCS) |
@@ -2584,12 +3197,18 @@ ftrace_set_func(unsigned long *array, int *idx, char *buffer) | |||
2584 | search_len = strlen(search); | 3197 | search_len = strlen(search); |
2585 | 3198 | ||
2586 | mutex_lock(&ftrace_lock); | 3199 | mutex_lock(&ftrace_lock); |
3200 | |||
3201 | if (unlikely(ftrace_disabled)) { | ||
3202 | mutex_unlock(&ftrace_lock); | ||
3203 | return -ENODEV; | ||
3204 | } | ||
3205 | |||
2587 | do_for_each_ftrace_rec(pg, rec) { | 3206 | do_for_each_ftrace_rec(pg, rec) { |
2588 | 3207 | ||
2589 | if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE)) | 3208 | if (rec->flags & FTRACE_FL_FREE) |
2590 | continue; | 3209 | continue; |
2591 | 3210 | ||
2592 | if (ftrace_match_record(rec, search, search_len, type)) { | 3211 | if (ftrace_match_record(rec, NULL, search, search_len, type)) { |
2593 | /* if it is in the array */ | 3212 | /* if it is in the array */ |
2594 | exists = false; | 3213 | exists = false; |
2595 | for (i = 0; i < *idx; i++) { | 3214 | for (i = 0; i < *idx; i++) { |
@@ -2679,8 +3298,8 @@ static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer) | |||
2679 | trace_create_file("available_filter_functions", 0444, | 3298 | trace_create_file("available_filter_functions", 0444, |
2680 | d_tracer, NULL, &ftrace_avail_fops); | 3299 | d_tracer, NULL, &ftrace_avail_fops); |
2681 | 3300 | ||
2682 | trace_create_file("failures", 0444, | 3301 | trace_create_file("enabled_functions", 0444, |
2683 | d_tracer, NULL, &ftrace_failures_fops); | 3302 | d_tracer, NULL, &ftrace_enabled_fops); |
2684 | 3303 | ||
2685 | trace_create_file("set_ftrace_filter", 0644, d_tracer, | 3304 | trace_create_file("set_ftrace_filter", 0644, d_tracer, |
2686 | NULL, &ftrace_filter_fops); | 3305 | NULL, &ftrace_filter_fops); |
@@ -2703,7 +3322,6 @@ static int ftrace_process_locs(struct module *mod, | |||
2703 | { | 3322 | { |
2704 | unsigned long *p; | 3323 | unsigned long *p; |
2705 | unsigned long addr; | 3324 | unsigned long addr; |
2706 | unsigned long flags; | ||
2707 | 3325 | ||
2708 | mutex_lock(&ftrace_lock); | 3326 | mutex_lock(&ftrace_lock); |
2709 | p = start; | 3327 | p = start; |
@@ -2720,10 +3338,7 @@ static int ftrace_process_locs(struct module *mod, | |||
2720 | ftrace_record_ip(addr); | 3338 | ftrace_record_ip(addr); |
2721 | } | 3339 | } |
2722 | 3340 | ||
2723 | /* disable interrupts to prevent kstop machine */ | ||
2724 | local_irq_save(flags); | ||
2725 | ftrace_update_code(mod); | 3341 | ftrace_update_code(mod); |
2726 | local_irq_restore(flags); | ||
2727 | mutex_unlock(&ftrace_lock); | 3342 | mutex_unlock(&ftrace_lock); |
2728 | 3343 | ||
2729 | return 0; | 3344 | return 0; |
@@ -2735,10 +3350,11 @@ void ftrace_release_mod(struct module *mod) | |||
2735 | struct dyn_ftrace *rec; | 3350 | struct dyn_ftrace *rec; |
2736 | struct ftrace_page *pg; | 3351 | struct ftrace_page *pg; |
2737 | 3352 | ||
3353 | mutex_lock(&ftrace_lock); | ||
3354 | |||
2738 | if (ftrace_disabled) | 3355 | if (ftrace_disabled) |
2739 | return; | 3356 | goto out_unlock; |
2740 | 3357 | ||
2741 | mutex_lock(&ftrace_lock); | ||
2742 | do_for_each_ftrace_rec(pg, rec) { | 3358 | do_for_each_ftrace_rec(pg, rec) { |
2743 | if (within_module_core(rec->ip, mod)) { | 3359 | if (within_module_core(rec->ip, mod)) { |
2744 | /* | 3360 | /* |
@@ -2749,6 +3365,7 @@ void ftrace_release_mod(struct module *mod) | |||
2749 | ftrace_free_rec(rec); | 3365 | ftrace_free_rec(rec); |
2750 | } | 3366 | } |
2751 | } while_for_each_ftrace_rec(); | 3367 | } while_for_each_ftrace_rec(); |
3368 | out_unlock: | ||
2752 | mutex_unlock(&ftrace_lock); | 3369 | mutex_unlock(&ftrace_lock); |
2753 | } | 3370 | } |
2754 | 3371 | ||
@@ -2835,6 +3452,10 @@ void __init ftrace_init(void) | |||
2835 | 3452 | ||
2836 | #else | 3453 | #else |
2837 | 3454 | ||
3455 | static struct ftrace_ops global_ops = { | ||
3456 | .func = ftrace_stub, | ||
3457 | }; | ||
3458 | |||
2838 | static int __init ftrace_nodyn_init(void) | 3459 | static int __init ftrace_nodyn_init(void) |
2839 | { | 3460 | { |
2840 | ftrace_enabled = 1; | 3461 | ftrace_enabled = 1; |
@@ -2845,12 +3466,38 @@ device_initcall(ftrace_nodyn_init); | |||
2845 | static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; } | 3466 | static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; } |
2846 | static inline void ftrace_startup_enable(int command) { } | 3467 | static inline void ftrace_startup_enable(int command) { } |
2847 | /* Keep as macros so we do not need to define the commands */ | 3468 | /* Keep as macros so we do not need to define the commands */ |
2848 | # define ftrace_startup(command) do { } while (0) | 3469 | # define ftrace_startup(ops, command) do { } while (0) |
2849 | # define ftrace_shutdown(command) do { } while (0) | 3470 | # define ftrace_shutdown(ops, command) do { } while (0) |
2850 | # define ftrace_startup_sysctl() do { } while (0) | 3471 | # define ftrace_startup_sysctl() do { } while (0) |
2851 | # define ftrace_shutdown_sysctl() do { } while (0) | 3472 | # define ftrace_shutdown_sysctl() do { } while (0) |
3473 | |||
3474 | static inline int | ||
3475 | ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip) | ||
3476 | { | ||
3477 | return 1; | ||
3478 | } | ||
3479 | |||
2852 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 3480 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
2853 | 3481 | ||
3482 | static void | ||
3483 | ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip) | ||
3484 | { | ||
3485 | struct ftrace_ops *op; | ||
3486 | |||
3487 | /* | ||
3488 | * Some of the ops may be dynamically allocated, | ||
3489 | * they must be freed after a synchronize_sched(). | ||
3490 | */ | ||
3491 | preempt_disable_notrace(); | ||
3492 | op = rcu_dereference_raw(ftrace_ops_list); | ||
3493 | while (op != &ftrace_list_end) { | ||
3494 | if (ftrace_ops_test(op, ip)) | ||
3495 | op->func(ip, parent_ip); | ||
3496 | op = rcu_dereference_raw(op->next); | ||
3497 | }; | ||
3498 | preempt_enable_notrace(); | ||
3499 | } | ||
3500 | |||
2854 | static void clear_ftrace_swapper(void) | 3501 | static void clear_ftrace_swapper(void) |
2855 | { | 3502 | { |
2856 | struct task_struct *p; | 3503 | struct task_struct *p; |
@@ -3143,19 +3790,23 @@ void ftrace_kill(void) | |||
3143 | */ | 3790 | */ |
3144 | int register_ftrace_function(struct ftrace_ops *ops) | 3791 | int register_ftrace_function(struct ftrace_ops *ops) |
3145 | { | 3792 | { |
3146 | int ret; | 3793 | int ret = -1; |
3147 | |||
3148 | if (unlikely(ftrace_disabled)) | ||
3149 | return -1; | ||
3150 | 3794 | ||
3151 | mutex_lock(&ftrace_lock); | 3795 | mutex_lock(&ftrace_lock); |
3152 | 3796 | ||
3797 | if (unlikely(ftrace_disabled)) | ||
3798 | goto out_unlock; | ||
3799 | |||
3153 | ret = __register_ftrace_function(ops); | 3800 | ret = __register_ftrace_function(ops); |
3154 | ftrace_startup(0); | 3801 | if (!ret) |
3802 | ftrace_startup(ops, 0); | ||
3155 | 3803 | ||
3804 | |||
3805 | out_unlock: | ||
3156 | mutex_unlock(&ftrace_lock); | 3806 | mutex_unlock(&ftrace_lock); |
3157 | return ret; | 3807 | return ret; |
3158 | } | 3808 | } |
3809 | EXPORT_SYMBOL_GPL(register_ftrace_function); | ||
3159 | 3810 | ||
3160 | /** | 3811 | /** |
3161 | * unregister_ftrace_function - unregister a function for profiling. | 3812 | * unregister_ftrace_function - unregister a function for profiling. |
@@ -3169,25 +3820,27 @@ int unregister_ftrace_function(struct ftrace_ops *ops) | |||
3169 | 3820 | ||
3170 | mutex_lock(&ftrace_lock); | 3821 | mutex_lock(&ftrace_lock); |
3171 | ret = __unregister_ftrace_function(ops); | 3822 | ret = __unregister_ftrace_function(ops); |
3172 | ftrace_shutdown(0); | 3823 | if (!ret) |
3824 | ftrace_shutdown(ops, 0); | ||
3173 | mutex_unlock(&ftrace_lock); | 3825 | mutex_unlock(&ftrace_lock); |
3174 | 3826 | ||
3175 | return ret; | 3827 | return ret; |
3176 | } | 3828 | } |
3829 | EXPORT_SYMBOL_GPL(unregister_ftrace_function); | ||
3177 | 3830 | ||
3178 | int | 3831 | int |
3179 | ftrace_enable_sysctl(struct ctl_table *table, int write, | 3832 | ftrace_enable_sysctl(struct ctl_table *table, int write, |
3180 | void __user *buffer, size_t *lenp, | 3833 | void __user *buffer, size_t *lenp, |
3181 | loff_t *ppos) | 3834 | loff_t *ppos) |
3182 | { | 3835 | { |
3183 | int ret; | 3836 | int ret = -ENODEV; |
3184 | |||
3185 | if (unlikely(ftrace_disabled)) | ||
3186 | return -ENODEV; | ||
3187 | 3837 | ||
3188 | mutex_lock(&ftrace_lock); | 3838 | mutex_lock(&ftrace_lock); |
3189 | 3839 | ||
3190 | ret = proc_dointvec(table, write, buffer, lenp, ppos); | 3840 | if (unlikely(ftrace_disabled)) |
3841 | goto out; | ||
3842 | |||
3843 | ret = proc_dointvec(table, write, buffer, lenp, ppos); | ||
3191 | 3844 | ||
3192 | if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled)) | 3845 | if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled)) |
3193 | goto out; | 3846 | goto out; |
@@ -3199,11 +3852,11 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, | |||
3199 | ftrace_startup_sysctl(); | 3852 | ftrace_startup_sysctl(); |
3200 | 3853 | ||
3201 | /* we are starting ftrace again */ | 3854 | /* we are starting ftrace again */ |
3202 | if (ftrace_list != &ftrace_list_end) { | 3855 | if (ftrace_ops_list != &ftrace_list_end) { |
3203 | if (ftrace_list->next == &ftrace_list_end) | 3856 | if (ftrace_ops_list->next == &ftrace_list_end) |
3204 | ftrace_trace_function = ftrace_list->func; | 3857 | ftrace_trace_function = ftrace_ops_list->func; |
3205 | else | 3858 | else |
3206 | ftrace_trace_function = ftrace_list_func; | 3859 | ftrace_trace_function = ftrace_ops_list_func; |
3207 | } | 3860 | } |
3208 | 3861 | ||
3209 | } else { | 3862 | } else { |
@@ -3392,7 +4045,7 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc, | |||
3392 | ftrace_graph_return = retfunc; | 4045 | ftrace_graph_return = retfunc; |
3393 | ftrace_graph_entry = entryfunc; | 4046 | ftrace_graph_entry = entryfunc; |
3394 | 4047 | ||
3395 | ftrace_startup(FTRACE_START_FUNC_RET); | 4048 | ftrace_startup(&global_ops, FTRACE_START_FUNC_RET); |
3396 | 4049 | ||
3397 | out: | 4050 | out: |
3398 | mutex_unlock(&ftrace_lock); | 4051 | mutex_unlock(&ftrace_lock); |
@@ -3409,7 +4062,7 @@ void unregister_ftrace_graph(void) | |||
3409 | ftrace_graph_active--; | 4062 | ftrace_graph_active--; |
3410 | ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; | 4063 | ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; |
3411 | ftrace_graph_entry = ftrace_graph_entry_stub; | 4064 | ftrace_graph_entry = ftrace_graph_entry_stub; |
3412 | ftrace_shutdown(FTRACE_STOP_FUNC_RET); | 4065 | ftrace_shutdown(&global_ops, FTRACE_STOP_FUNC_RET); |
3413 | unregister_pm_notifier(&ftrace_suspend_notifier); | 4066 | unregister_pm_notifier(&ftrace_suspend_notifier); |
3414 | unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); | 4067 | unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); |
3415 | 4068 | ||
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 1cb49be7c7fb..ee9c921d7f21 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -2014,9 +2014,10 @@ enum print_line_t print_trace_line(struct trace_iterator *iter) | |||
2014 | { | 2014 | { |
2015 | enum print_line_t ret; | 2015 | enum print_line_t ret; |
2016 | 2016 | ||
2017 | if (iter->lost_events) | 2017 | if (iter->lost_events && |
2018 | trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n", | 2018 | !trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n", |
2019 | iter->cpu, iter->lost_events); | 2019 | iter->cpu, iter->lost_events)) |
2020 | return TRACE_TYPE_PARTIAL_LINE; | ||
2020 | 2021 | ||
2021 | if (iter->trace && iter->trace->print_line) { | 2022 | if (iter->trace && iter->trace->print_line) { |
2022 | ret = iter->trace->print_line(iter); | 2023 | ret = iter->trace->print_line(iter); |
@@ -3230,6 +3231,14 @@ waitagain: | |||
3230 | 3231 | ||
3231 | if (iter->seq.len >= cnt) | 3232 | if (iter->seq.len >= cnt) |
3232 | break; | 3233 | break; |
3234 | |||
3235 | /* | ||
3236 | * Setting the full flag means we reached the trace_seq buffer | ||
3237 | * size and we should leave by partial output condition above. | ||
3238 | * One of the trace_seq_* functions is not used properly. | ||
3239 | */ | ||
3240 | WARN_ONCE(iter->seq.full, "full flag set for trace type %d", | ||
3241 | iter->ent->type); | ||
3233 | } | 3242 | } |
3234 | trace_access_unlock(iter->cpu_file); | 3243 | trace_access_unlock(iter->cpu_file); |
3235 | trace_event_read_unlock(); | 3244 | trace_event_read_unlock(); |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 5e9dfc6286dd..6b69c4bd306f 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -419,6 +419,8 @@ extern void trace_find_cmdline(int pid, char comm[]); | |||
419 | extern unsigned long ftrace_update_tot_cnt; | 419 | extern unsigned long ftrace_update_tot_cnt; |
420 | #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func | 420 | #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func |
421 | extern int DYN_FTRACE_TEST_NAME(void); | 421 | extern int DYN_FTRACE_TEST_NAME(void); |
422 | #define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2 | ||
423 | extern int DYN_FTRACE_TEST_NAME2(void); | ||
422 | #endif | 424 | #endif |
423 | 425 | ||
424 | extern int ring_buffer_expanded; | 426 | extern int ring_buffer_expanded; |
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index 16aee4d44e8f..8d0e1cc4e974 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c | |||
@@ -149,11 +149,13 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip) | |||
149 | static struct ftrace_ops trace_ops __read_mostly = | 149 | static struct ftrace_ops trace_ops __read_mostly = |
150 | { | 150 | { |
151 | .func = function_trace_call, | 151 | .func = function_trace_call, |
152 | .flags = FTRACE_OPS_FL_GLOBAL, | ||
152 | }; | 153 | }; |
153 | 154 | ||
154 | static struct ftrace_ops trace_stack_ops __read_mostly = | 155 | static struct ftrace_ops trace_stack_ops __read_mostly = |
155 | { | 156 | { |
156 | .func = function_stack_trace_call, | 157 | .func = function_stack_trace_call, |
158 | .flags = FTRACE_OPS_FL_GLOBAL, | ||
157 | }; | 159 | }; |
158 | 160 | ||
159 | /* Our two options */ | 161 | /* Our two options */ |
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index a4969b47afc1..c77424be284d 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c | |||
@@ -153,6 +153,7 @@ irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip) | |||
153 | static struct ftrace_ops trace_ops __read_mostly = | 153 | static struct ftrace_ops trace_ops __read_mostly = |
154 | { | 154 | { |
155 | .func = irqsoff_tracer_call, | 155 | .func = irqsoff_tracer_call, |
156 | .flags = FTRACE_OPS_FL_GLOBAL, | ||
156 | }; | 157 | }; |
157 | #endif /* CONFIG_FUNCTION_TRACER */ | 158 | #endif /* CONFIG_FUNCTION_TRACER */ |
158 | 159 | ||
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index 456be9063c2d..cf535ccedc86 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c | |||
@@ -830,6 +830,9 @@ EXPORT_SYMBOL_GPL(unregister_ftrace_event); | |||
830 | enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags, | 830 | enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags, |
831 | struct trace_event *event) | 831 | struct trace_event *event) |
832 | { | 832 | { |
833 | if (!trace_seq_printf(&iter->seq, "type: %d\n", iter->ent->type)) | ||
834 | return TRACE_TYPE_PARTIAL_LINE; | ||
835 | |||
833 | return TRACE_TYPE_HANDLED; | 836 | return TRACE_TYPE_HANDLED; |
834 | } | 837 | } |
835 | 838 | ||
diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c index 2547d8813cf0..dff763b7baf1 100644 --- a/kernel/trace/trace_printk.c +++ b/kernel/trace/trace_printk.c | |||
@@ -32,7 +32,7 @@ static DEFINE_MUTEX(btrace_mutex); | |||
32 | 32 | ||
33 | struct trace_bprintk_fmt { | 33 | struct trace_bprintk_fmt { |
34 | struct list_head list; | 34 | struct list_head list; |
35 | char fmt[0]; | 35 | const char *fmt; |
36 | }; | 36 | }; |
37 | 37 | ||
38 | static inline struct trace_bprintk_fmt *lookup_format(const char *fmt) | 38 | static inline struct trace_bprintk_fmt *lookup_format(const char *fmt) |
@@ -49,6 +49,7 @@ static | |||
49 | void hold_module_trace_bprintk_format(const char **start, const char **end) | 49 | void hold_module_trace_bprintk_format(const char **start, const char **end) |
50 | { | 50 | { |
51 | const char **iter; | 51 | const char **iter; |
52 | char *fmt; | ||
52 | 53 | ||
53 | mutex_lock(&btrace_mutex); | 54 | mutex_lock(&btrace_mutex); |
54 | for (iter = start; iter < end; iter++) { | 55 | for (iter = start; iter < end; iter++) { |
@@ -58,14 +59,18 @@ void hold_module_trace_bprintk_format(const char **start, const char **end) | |||
58 | continue; | 59 | continue; |
59 | } | 60 | } |
60 | 61 | ||
61 | tb_fmt = kmalloc(offsetof(struct trace_bprintk_fmt, fmt) | 62 | tb_fmt = kmalloc(sizeof(*tb_fmt), GFP_KERNEL); |
62 | + strlen(*iter) + 1, GFP_KERNEL); | 63 | if (tb_fmt) |
63 | if (tb_fmt) { | 64 | fmt = kmalloc(strlen(*iter) + 1, GFP_KERNEL); |
65 | if (tb_fmt && fmt) { | ||
64 | list_add_tail(&tb_fmt->list, &trace_bprintk_fmt_list); | 66 | list_add_tail(&tb_fmt->list, &trace_bprintk_fmt_list); |
65 | strcpy(tb_fmt->fmt, *iter); | 67 | strcpy(fmt, *iter); |
68 | tb_fmt->fmt = fmt; | ||
66 | *iter = tb_fmt->fmt; | 69 | *iter = tb_fmt->fmt; |
67 | } else | 70 | } else { |
71 | kfree(tb_fmt); | ||
68 | *iter = NULL; | 72 | *iter = NULL; |
73 | } | ||
69 | } | 74 | } |
70 | mutex_unlock(&btrace_mutex); | 75 | mutex_unlock(&btrace_mutex); |
71 | } | 76 | } |
@@ -84,6 +89,76 @@ static int module_trace_bprintk_format_notify(struct notifier_block *self, | |||
84 | return 0; | 89 | return 0; |
85 | } | 90 | } |
86 | 91 | ||
92 | /* | ||
93 | * The debugfs/tracing/printk_formats file maps the addresses with | ||
94 | * the ASCII formats that are used in the bprintk events in the | ||
95 | * buffer. For userspace tools to be able to decode the events from | ||
96 | * the buffer, they need to be able to map the address with the format. | ||
97 | * | ||
98 | * The addresses of the bprintk formats are in their own section | ||
99 | * __trace_printk_fmt. But for modules we copy them into a link list. | ||
100 | * The code to print the formats and their addresses passes around the | ||
101 | * address of the fmt string. If the fmt address passed into the seq | ||
102 | * functions is within the kernel core __trace_printk_fmt section, then | ||
103 | * it simply uses the next pointer in the list. | ||
104 | * | ||
105 | * When the fmt pointer is outside the kernel core __trace_printk_fmt | ||
106 | * section, then we need to read the link list pointers. The trick is | ||
107 | * we pass the address of the string to the seq function just like | ||
108 | * we do for the kernel core formats. To get back the structure that | ||
109 | * holds the format, we simply use containerof() and then go to the | ||
110 | * next format in the list. | ||
111 | */ | ||
112 | static const char ** | ||
113 | find_next_mod_format(int start_index, void *v, const char **fmt, loff_t *pos) | ||
114 | { | ||
115 | struct trace_bprintk_fmt *mod_fmt; | ||
116 | |||
117 | if (list_empty(&trace_bprintk_fmt_list)) | ||
118 | return NULL; | ||
119 | |||
120 | /* | ||
121 | * v will point to the address of the fmt record from t_next | ||
122 | * v will be NULL from t_start. | ||
123 | * If this is the first pointer or called from start | ||
124 | * then we need to walk the list. | ||
125 | */ | ||
126 | if (!v || start_index == *pos) { | ||
127 | struct trace_bprintk_fmt *p; | ||
128 | |||
129 | /* search the module list */ | ||
130 | list_for_each_entry(p, &trace_bprintk_fmt_list, list) { | ||
131 | if (start_index == *pos) | ||
132 | return &p->fmt; | ||
133 | start_index++; | ||
134 | } | ||
135 | /* pos > index */ | ||
136 | return NULL; | ||
137 | } | ||
138 | |||
139 | /* | ||
140 | * v points to the address of the fmt field in the mod list | ||
141 | * structure that holds the module print format. | ||
142 | */ | ||
143 | mod_fmt = container_of(v, typeof(*mod_fmt), fmt); | ||
144 | if (mod_fmt->list.next == &trace_bprintk_fmt_list) | ||
145 | return NULL; | ||
146 | |||
147 | mod_fmt = container_of(mod_fmt->list.next, typeof(*mod_fmt), list); | ||
148 | |||
149 | return &mod_fmt->fmt; | ||
150 | } | ||
151 | |||
152 | static void format_mod_start(void) | ||
153 | { | ||
154 | mutex_lock(&btrace_mutex); | ||
155 | } | ||
156 | |||
157 | static void format_mod_stop(void) | ||
158 | { | ||
159 | mutex_unlock(&btrace_mutex); | ||
160 | } | ||
161 | |||
87 | #else /* !CONFIG_MODULES */ | 162 | #else /* !CONFIG_MODULES */ |
88 | __init static int | 163 | __init static int |
89 | module_trace_bprintk_format_notify(struct notifier_block *self, | 164 | module_trace_bprintk_format_notify(struct notifier_block *self, |
@@ -91,6 +166,13 @@ module_trace_bprintk_format_notify(struct notifier_block *self, | |||
91 | { | 166 | { |
92 | return 0; | 167 | return 0; |
93 | } | 168 | } |
169 | static inline const char ** | ||
170 | find_next_mod_format(int start_index, void *v, const char **fmt, loff_t *pos) | ||
171 | { | ||
172 | return NULL; | ||
173 | } | ||
174 | static inline void format_mod_start(void) { } | ||
175 | static inline void format_mod_stop(void) { } | ||
94 | #endif /* CONFIG_MODULES */ | 176 | #endif /* CONFIG_MODULES */ |
95 | 177 | ||
96 | 178 | ||
@@ -153,20 +235,33 @@ int __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap) | |||
153 | } | 235 | } |
154 | EXPORT_SYMBOL_GPL(__ftrace_vprintk); | 236 | EXPORT_SYMBOL_GPL(__ftrace_vprintk); |
155 | 237 | ||
238 | static const char **find_next(void *v, loff_t *pos) | ||
239 | { | ||
240 | const char **fmt = v; | ||
241 | int start_index; | ||
242 | |||
243 | if (!fmt) | ||
244 | fmt = __start___trace_bprintk_fmt + *pos; | ||
245 | |||
246 | start_index = __stop___trace_bprintk_fmt - __start___trace_bprintk_fmt; | ||
247 | |||
248 | if (*pos < start_index) | ||
249 | return fmt; | ||
250 | |||
251 | return find_next_mod_format(start_index, v, fmt, pos); | ||
252 | } | ||
253 | |||
156 | static void * | 254 | static void * |
157 | t_start(struct seq_file *m, loff_t *pos) | 255 | t_start(struct seq_file *m, loff_t *pos) |
158 | { | 256 | { |
159 | const char **fmt = __start___trace_bprintk_fmt + *pos; | 257 | format_mod_start(); |
160 | 258 | return find_next(NULL, pos); | |
161 | if ((unsigned long)fmt >= (unsigned long)__stop___trace_bprintk_fmt) | ||
162 | return NULL; | ||
163 | return fmt; | ||
164 | } | 259 | } |
165 | 260 | ||
166 | static void *t_next(struct seq_file *m, void * v, loff_t *pos) | 261 | static void *t_next(struct seq_file *m, void * v, loff_t *pos) |
167 | { | 262 | { |
168 | (*pos)++; | 263 | (*pos)++; |
169 | return t_start(m, pos); | 264 | return find_next(v, pos); |
170 | } | 265 | } |
171 | 266 | ||
172 | static int t_show(struct seq_file *m, void *v) | 267 | static int t_show(struct seq_file *m, void *v) |
@@ -205,6 +300,7 @@ static int t_show(struct seq_file *m, void *v) | |||
205 | 300 | ||
206 | static void t_stop(struct seq_file *m, void *p) | 301 | static void t_stop(struct seq_file *m, void *p) |
207 | { | 302 | { |
303 | format_mod_stop(); | ||
208 | } | 304 | } |
209 | 305 | ||
210 | static const struct seq_operations show_format_seq_ops = { | 306 | static const struct seq_operations show_format_seq_ops = { |
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 7319559ed59f..f029dd4fd2ca 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c | |||
@@ -129,6 +129,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip) | |||
129 | static struct ftrace_ops trace_ops __read_mostly = | 129 | static struct ftrace_ops trace_ops __read_mostly = |
130 | { | 130 | { |
131 | .func = wakeup_tracer_call, | 131 | .func = wakeup_tracer_call, |
132 | .flags = FTRACE_OPS_FL_GLOBAL, | ||
132 | }; | 133 | }; |
133 | #endif /* CONFIG_FUNCTION_TRACER */ | 134 | #endif /* CONFIG_FUNCTION_TRACER */ |
134 | 135 | ||
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index 659732eba07c..288541f977fb 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c | |||
@@ -101,6 +101,206 @@ static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret) | |||
101 | 101 | ||
102 | #ifdef CONFIG_DYNAMIC_FTRACE | 102 | #ifdef CONFIG_DYNAMIC_FTRACE |
103 | 103 | ||
104 | static int trace_selftest_test_probe1_cnt; | ||
105 | static void trace_selftest_test_probe1_func(unsigned long ip, | ||
106 | unsigned long pip) | ||
107 | { | ||
108 | trace_selftest_test_probe1_cnt++; | ||
109 | } | ||
110 | |||
111 | static int trace_selftest_test_probe2_cnt; | ||
112 | static void trace_selftest_test_probe2_func(unsigned long ip, | ||
113 | unsigned long pip) | ||
114 | { | ||
115 | trace_selftest_test_probe2_cnt++; | ||
116 | } | ||
117 | |||
118 | static int trace_selftest_test_probe3_cnt; | ||
119 | static void trace_selftest_test_probe3_func(unsigned long ip, | ||
120 | unsigned long pip) | ||
121 | { | ||
122 | trace_selftest_test_probe3_cnt++; | ||
123 | } | ||
124 | |||
125 | static int trace_selftest_test_global_cnt; | ||
126 | static void trace_selftest_test_global_func(unsigned long ip, | ||
127 | unsigned long pip) | ||
128 | { | ||
129 | trace_selftest_test_global_cnt++; | ||
130 | } | ||
131 | |||
132 | static int trace_selftest_test_dyn_cnt; | ||
133 | static void trace_selftest_test_dyn_func(unsigned long ip, | ||
134 | unsigned long pip) | ||
135 | { | ||
136 | trace_selftest_test_dyn_cnt++; | ||
137 | } | ||
138 | |||
139 | static struct ftrace_ops test_probe1 = { | ||
140 | .func = trace_selftest_test_probe1_func, | ||
141 | }; | ||
142 | |||
143 | static struct ftrace_ops test_probe2 = { | ||
144 | .func = trace_selftest_test_probe2_func, | ||
145 | }; | ||
146 | |||
147 | static struct ftrace_ops test_probe3 = { | ||
148 | .func = trace_selftest_test_probe3_func, | ||
149 | }; | ||
150 | |||
151 | static struct ftrace_ops test_global = { | ||
152 | .func = trace_selftest_test_global_func, | ||
153 | .flags = FTRACE_OPS_FL_GLOBAL, | ||
154 | }; | ||
155 | |||
156 | static void print_counts(void) | ||
157 | { | ||
158 | printk("(%d %d %d %d %d) ", | ||
159 | trace_selftest_test_probe1_cnt, | ||
160 | trace_selftest_test_probe2_cnt, | ||
161 | trace_selftest_test_probe3_cnt, | ||
162 | trace_selftest_test_global_cnt, | ||
163 | trace_selftest_test_dyn_cnt); | ||
164 | } | ||
165 | |||
166 | static void reset_counts(void) | ||
167 | { | ||
168 | trace_selftest_test_probe1_cnt = 0; | ||
169 | trace_selftest_test_probe2_cnt = 0; | ||
170 | trace_selftest_test_probe3_cnt = 0; | ||
171 | trace_selftest_test_global_cnt = 0; | ||
172 | trace_selftest_test_dyn_cnt = 0; | ||
173 | } | ||
174 | |||
175 | static int trace_selftest_ops(int cnt) | ||
176 | { | ||
177 | int save_ftrace_enabled = ftrace_enabled; | ||
178 | struct ftrace_ops *dyn_ops; | ||
179 | char *func1_name; | ||
180 | char *func2_name; | ||
181 | int len1; | ||
182 | int len2; | ||
183 | int ret = -1; | ||
184 | |||
185 | printk(KERN_CONT "PASSED\n"); | ||
186 | pr_info("Testing dynamic ftrace ops #%d: ", cnt); | ||
187 | |||
188 | ftrace_enabled = 1; | ||
189 | reset_counts(); | ||
190 | |||
191 | /* Handle PPC64 '.' name */ | ||
192 | func1_name = "*" __stringify(DYN_FTRACE_TEST_NAME); | ||
193 | func2_name = "*" __stringify(DYN_FTRACE_TEST_NAME2); | ||
194 | len1 = strlen(func1_name); | ||
195 | len2 = strlen(func2_name); | ||
196 | |||
197 | /* | ||
198 | * Probe 1 will trace function 1. | ||
199 | * Probe 2 will trace function 2. | ||
200 | * Probe 3 will trace functions 1 and 2. | ||
201 | */ | ||
202 | ftrace_set_filter(&test_probe1, func1_name, len1, 1); | ||
203 | ftrace_set_filter(&test_probe2, func2_name, len2, 1); | ||
204 | ftrace_set_filter(&test_probe3, func1_name, len1, 1); | ||
205 | ftrace_set_filter(&test_probe3, func2_name, len2, 0); | ||
206 | |||
207 | register_ftrace_function(&test_probe1); | ||
208 | register_ftrace_function(&test_probe2); | ||
209 | register_ftrace_function(&test_probe3); | ||
210 | register_ftrace_function(&test_global); | ||
211 | |||
212 | DYN_FTRACE_TEST_NAME(); | ||
213 | |||
214 | print_counts(); | ||
215 | |||
216 | if (trace_selftest_test_probe1_cnt != 1) | ||
217 | goto out; | ||
218 | if (trace_selftest_test_probe2_cnt != 0) | ||
219 | goto out; | ||
220 | if (trace_selftest_test_probe3_cnt != 1) | ||
221 | goto out; | ||
222 | if (trace_selftest_test_global_cnt == 0) | ||
223 | goto out; | ||
224 | |||
225 | DYN_FTRACE_TEST_NAME2(); | ||
226 | |||
227 | print_counts(); | ||
228 | |||
229 | if (trace_selftest_test_probe1_cnt != 1) | ||
230 | goto out; | ||
231 | if (trace_selftest_test_probe2_cnt != 1) | ||
232 | goto out; | ||
233 | if (trace_selftest_test_probe3_cnt != 2) | ||
234 | goto out; | ||
235 | |||
236 | /* Add a dynamic probe */ | ||
237 | dyn_ops = kzalloc(sizeof(*dyn_ops), GFP_KERNEL); | ||
238 | if (!dyn_ops) { | ||
239 | printk("MEMORY ERROR "); | ||
240 | goto out; | ||
241 | } | ||
242 | |||
243 | dyn_ops->func = trace_selftest_test_dyn_func; | ||
244 | |||
245 | register_ftrace_function(dyn_ops); | ||
246 | |||
247 | trace_selftest_test_global_cnt = 0; | ||
248 | |||
249 | DYN_FTRACE_TEST_NAME(); | ||
250 | |||
251 | print_counts(); | ||
252 | |||
253 | if (trace_selftest_test_probe1_cnt != 2) | ||
254 | goto out_free; | ||
255 | if (trace_selftest_test_probe2_cnt != 1) | ||
256 | goto out_free; | ||
257 | if (trace_selftest_test_probe3_cnt != 3) | ||
258 | goto out_free; | ||
259 | if (trace_selftest_test_global_cnt == 0) | ||
260 | goto out; | ||
261 | if (trace_selftest_test_dyn_cnt == 0) | ||
262 | goto out_free; | ||
263 | |||
264 | DYN_FTRACE_TEST_NAME2(); | ||
265 | |||
266 | print_counts(); | ||
267 | |||
268 | if (trace_selftest_test_probe1_cnt != 2) | ||
269 | goto out_free; | ||
270 | if (trace_selftest_test_probe2_cnt != 2) | ||
271 | goto out_free; | ||
272 | if (trace_selftest_test_probe3_cnt != 4) | ||
273 | goto out_free; | ||
274 | |||
275 | ret = 0; | ||
276 | out_free: | ||
277 | unregister_ftrace_function(dyn_ops); | ||
278 | kfree(dyn_ops); | ||
279 | |||
280 | out: | ||
281 | /* Purposely unregister in the same order */ | ||
282 | unregister_ftrace_function(&test_probe1); | ||
283 | unregister_ftrace_function(&test_probe2); | ||
284 | unregister_ftrace_function(&test_probe3); | ||
285 | unregister_ftrace_function(&test_global); | ||
286 | |||
287 | /* Make sure everything is off */ | ||
288 | reset_counts(); | ||
289 | DYN_FTRACE_TEST_NAME(); | ||
290 | DYN_FTRACE_TEST_NAME(); | ||
291 | |||
292 | if (trace_selftest_test_probe1_cnt || | ||
293 | trace_selftest_test_probe2_cnt || | ||
294 | trace_selftest_test_probe3_cnt || | ||
295 | trace_selftest_test_global_cnt || | ||
296 | trace_selftest_test_dyn_cnt) | ||
297 | ret = -1; | ||
298 | |||
299 | ftrace_enabled = save_ftrace_enabled; | ||
300 | |||
301 | return ret; | ||
302 | } | ||
303 | |||
104 | /* Test dynamic code modification and ftrace filters */ | 304 | /* Test dynamic code modification and ftrace filters */ |
105 | int trace_selftest_startup_dynamic_tracing(struct tracer *trace, | 305 | int trace_selftest_startup_dynamic_tracing(struct tracer *trace, |
106 | struct trace_array *tr, | 306 | struct trace_array *tr, |
@@ -131,7 +331,7 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace, | |||
131 | func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); | 331 | func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); |
132 | 332 | ||
133 | /* filter only on our function */ | 333 | /* filter only on our function */ |
134 | ftrace_set_filter(func_name, strlen(func_name), 1); | 334 | ftrace_set_global_filter(func_name, strlen(func_name), 1); |
135 | 335 | ||
136 | /* enable tracing */ | 336 | /* enable tracing */ |
137 | ret = tracer_init(trace, tr); | 337 | ret = tracer_init(trace, tr); |
@@ -166,22 +366,30 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace, | |||
166 | 366 | ||
167 | /* check the trace buffer */ | 367 | /* check the trace buffer */ |
168 | ret = trace_test_buffer(tr, &count); | 368 | ret = trace_test_buffer(tr, &count); |
169 | trace->reset(tr); | ||
170 | tracing_start(); | 369 | tracing_start(); |
171 | 370 | ||
172 | /* we should only have one item */ | 371 | /* we should only have one item */ |
173 | if (!ret && count != 1) { | 372 | if (!ret && count != 1) { |
373 | trace->reset(tr); | ||
174 | printk(KERN_CONT ".. filter failed count=%ld ..", count); | 374 | printk(KERN_CONT ".. filter failed count=%ld ..", count); |
175 | ret = -1; | 375 | ret = -1; |
176 | goto out; | 376 | goto out; |
177 | } | 377 | } |
178 | 378 | ||
379 | /* Test the ops with global tracing running */ | ||
380 | ret = trace_selftest_ops(1); | ||
381 | trace->reset(tr); | ||
382 | |||
179 | out: | 383 | out: |
180 | ftrace_enabled = save_ftrace_enabled; | 384 | ftrace_enabled = save_ftrace_enabled; |
181 | tracer_enabled = save_tracer_enabled; | 385 | tracer_enabled = save_tracer_enabled; |
182 | 386 | ||
183 | /* Enable tracing on all functions again */ | 387 | /* Enable tracing on all functions again */ |
184 | ftrace_set_filter(NULL, 0, 1); | 388 | ftrace_set_global_filter(NULL, 0, 1); |
389 | |||
390 | /* Test the ops with global tracing off */ | ||
391 | if (!ret) | ||
392 | ret = trace_selftest_ops(2); | ||
185 | 393 | ||
186 | return ret; | 394 | return ret; |
187 | } | 395 | } |
diff --git a/kernel/trace/trace_selftest_dynamic.c b/kernel/trace/trace_selftest_dynamic.c index 54dd77cce5bf..b4c475a0a48b 100644 --- a/kernel/trace/trace_selftest_dynamic.c +++ b/kernel/trace/trace_selftest_dynamic.c | |||
@@ -5,3 +5,9 @@ int DYN_FTRACE_TEST_NAME(void) | |||
5 | /* used to call mcount */ | 5 | /* used to call mcount */ |
6 | return 0; | 6 | return 0; |
7 | } | 7 | } |
8 | |||
9 | int DYN_FTRACE_TEST_NAME2(void) | ||
10 | { | ||
11 | /* used to call mcount */ | ||
12 | return 0; | ||
13 | } | ||
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index 4c5dead0c239..b0b53b8e4c25 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c | |||
@@ -133,6 +133,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip) | |||
133 | static struct ftrace_ops trace_ops __read_mostly = | 133 | static struct ftrace_ops trace_ops __read_mostly = |
134 | { | 134 | { |
135 | .func = stack_trace_call, | 135 | .func = stack_trace_call, |
136 | .flags = FTRACE_OPS_FL_GLOBAL, | ||
136 | }; | 137 | }; |
137 | 138 | ||
138 | static ssize_t | 139 | static ssize_t |
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c index 68187af4889e..b219f1449c54 100644 --- a/kernel/tracepoint.c +++ b/kernel/tracepoint.c | |||
@@ -251,9 +251,9 @@ static void set_tracepoint(struct tracepoint_entry **entry, | |||
251 | { | 251 | { |
252 | WARN_ON(strcmp((*entry)->name, elem->name) != 0); | 252 | WARN_ON(strcmp((*entry)->name, elem->name) != 0); |
253 | 253 | ||
254 | if (elem->regfunc && !elem->state && active) | 254 | if (elem->regfunc && !jump_label_enabled(&elem->key) && active) |
255 | elem->regfunc(); | 255 | elem->regfunc(); |
256 | else if (elem->unregfunc && elem->state && !active) | 256 | else if (elem->unregfunc && jump_label_enabled(&elem->key) && !active) |
257 | elem->unregfunc(); | 257 | elem->unregfunc(); |
258 | 258 | ||
259 | /* | 259 | /* |
@@ -264,13 +264,10 @@ static void set_tracepoint(struct tracepoint_entry **entry, | |||
264 | * is used. | 264 | * is used. |
265 | */ | 265 | */ |
266 | rcu_assign_pointer(elem->funcs, (*entry)->funcs); | 266 | rcu_assign_pointer(elem->funcs, (*entry)->funcs); |
267 | if (!elem->state && active) { | 267 | if (active && !jump_label_enabled(&elem->key)) |
268 | jump_label_enable(&elem->state); | 268 | jump_label_inc(&elem->key); |
269 | elem->state = active; | 269 | else if (!active && jump_label_enabled(&elem->key)) |
270 | } else if (elem->state && !active) { | 270 | jump_label_dec(&elem->key); |
271 | jump_label_disable(&elem->state); | ||
272 | elem->state = active; | ||
273 | } | ||
274 | } | 271 | } |
275 | 272 | ||
276 | /* | 273 | /* |
@@ -281,13 +278,11 @@ static void set_tracepoint(struct tracepoint_entry **entry, | |||
281 | */ | 278 | */ |
282 | static void disable_tracepoint(struct tracepoint *elem) | 279 | static void disable_tracepoint(struct tracepoint *elem) |
283 | { | 280 | { |
284 | if (elem->unregfunc && elem->state) | 281 | if (elem->unregfunc && jump_label_enabled(&elem->key)) |
285 | elem->unregfunc(); | 282 | elem->unregfunc(); |
286 | 283 | ||
287 | if (elem->state) { | 284 | if (jump_label_enabled(&elem->key)) |
288 | jump_label_disable(&elem->state); | 285 | jump_label_dec(&elem->key); |
289 | elem->state = 0; | ||
290 | } | ||
291 | rcu_assign_pointer(elem->funcs, NULL); | 286 | rcu_assign_pointer(elem->funcs, NULL); |
292 | } | 287 | } |
293 | 288 | ||
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index c768bcdda1b7..9b1707b5f646 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
@@ -238,6 +238,21 @@ config DETECT_HUNG_TASK | |||
238 | enabled then all held locks will also be reported. This | 238 | enabled then all held locks will also be reported. This |
239 | feature has negligible overhead. | 239 | feature has negligible overhead. |
240 | 240 | ||
241 | config DEFAULT_HUNG_TASK_TIMEOUT | ||
242 | int "Default timeout for hung task detection (in seconds)" | ||
243 | depends on DETECT_HUNG_TASK | ||
244 | default 120 | ||
245 | help | ||
246 | This option controls the default timeout (in seconds) used | ||
247 | to determine when a task has become non-responsive and should | ||
248 | be considered hung. | ||
249 | |||
250 | It can be adjusted at runtime via the kernel.hung_task_timeout | ||
251 | sysctl or by writing a value to /proc/sys/kernel/hung_task_timeout. | ||
252 | |||
253 | A timeout of 0 disables the check. The default is two minutes. | ||
254 | Keeping the default should be fine in most cases. | ||
255 | |||
241 | config BOOTPARAM_HUNG_TASK_PANIC | 256 | config BOOTPARAM_HUNG_TASK_PANIC |
242 | bool "Panic (Reboot) On Hung Tasks" | 257 | bool "Panic (Reboot) On Hung Tasks" |
243 | depends on DETECT_HUNG_TASK | 258 | depends on DETECT_HUNG_TASK |
@@ -398,9 +413,9 @@ config SLUB_STATS | |||
398 | config DEBUG_KMEMLEAK | 413 | config DEBUG_KMEMLEAK |
399 | bool "Kernel memory leak detector" | 414 | bool "Kernel memory leak detector" |
400 | depends on DEBUG_KERNEL && EXPERIMENTAL && !MEMORY_HOTPLUG && \ | 415 | depends on DEBUG_KERNEL && EXPERIMENTAL && !MEMORY_HOTPLUG && \ |
401 | (X86 || ARM || PPC || S390 || SPARC64 || SUPERH || MICROBLAZE || TILE) | 416 | (X86 || ARM || PPC || MIPS || S390 || SPARC64 || SUPERH || MICROBLAZE || TILE) |
402 | 417 | ||
403 | select DEBUG_FS if SYSFS | 418 | select DEBUG_FS |
404 | select STACKTRACE if STACKTRACE_SUPPORT | 419 | select STACKTRACE if STACKTRACE_SUPPORT |
405 | select KALLSYMS | 420 | select KALLSYMS |
406 | select CRC32 | 421 | select CRC32 |
diff --git a/lib/Makefile b/lib/Makefile index ef0f28571156..4b49a249064b 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
@@ -21,7 +21,8 @@ lib-y += kobject.o kref.o klist.o | |||
21 | 21 | ||
22 | obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ | 22 | obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ |
23 | bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ | 23 | bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ |
24 | string_helpers.o gcd.o lcm.o list_sort.o uuid.o flex_array.o | 24 | string_helpers.o gcd.o lcm.o list_sort.o uuid.o flex_array.o \ |
25 | bsearch.o | ||
25 | obj-y += kstrtox.o | 26 | obj-y += kstrtox.o |
26 | obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o | 27 | obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o |
27 | 28 | ||
diff --git a/lib/bsearch.c b/lib/bsearch.c new file mode 100644 index 000000000000..5b54758e2afb --- /dev/null +++ b/lib/bsearch.c | |||
@@ -0,0 +1,53 @@ | |||
1 | /* | ||
2 | * A generic implementation of binary search for the Linux kernel | ||
3 | * | ||
4 | * Copyright (C) 2008-2009 Ksplice, Inc. | ||
5 | * Author: Tim Abbott <tabbott@ksplice.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License as | ||
9 | * published by the Free Software Foundation; version 2. | ||
10 | */ | ||
11 | |||
12 | #include <linux/module.h> | ||
13 | #include <linux/bsearch.h> | ||
14 | |||
15 | /* | ||
16 | * bsearch - binary search an array of elements | ||
17 | * @key: pointer to item being searched for | ||
18 | * @base: pointer to first element to search | ||
19 | * @num: number of elements | ||
20 | * @size: size of each element | ||
21 | * @cmp: pointer to comparison function | ||
22 | * | ||
23 | * This function does a binary search on the given array. The | ||
24 | * contents of the array should already be in ascending sorted order | ||
25 | * under the provided comparison function. | ||
26 | * | ||
27 | * Note that the key need not have the same type as the elements in | ||
28 | * the array, e.g. key could be a string and the comparison function | ||
29 | * could compare the string with the struct's name field. However, if | ||
30 | * the key and elements in the array are of the same type, you can use | ||
31 | * the same comparison function for both sort() and bsearch(). | ||
32 | */ | ||
33 | void *bsearch(const void *key, const void *base, size_t num, size_t size, | ||
34 | int (*cmp)(const void *key, const void *elt)) | ||
35 | { | ||
36 | size_t start = 0, end = num; | ||
37 | int result; | ||
38 | |||
39 | while (start < end) { | ||
40 | size_t mid = start + (end - start) / 2; | ||
41 | |||
42 | result = cmp(key, base + mid * size); | ||
43 | if (result < 0) | ||
44 | end = mid; | ||
45 | else if (result > 0) | ||
46 | start = mid + 1; | ||
47 | else | ||
48 | return (void *)base + mid * size; | ||
49 | } | ||
50 | |||
51 | return NULL; | ||
52 | } | ||
53 | EXPORT_SYMBOL(bsearch); | ||
diff --git a/lib/dma-debug.c b/lib/dma-debug.c index 4bfb0471f106..db07bfd9298e 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c | |||
@@ -649,7 +649,7 @@ out_err: | |||
649 | return -ENOMEM; | 649 | return -ENOMEM; |
650 | } | 650 | } |
651 | 651 | ||
652 | static int device_dma_allocations(struct device *dev) | 652 | static int device_dma_allocations(struct device *dev, struct dma_debug_entry **out_entry) |
653 | { | 653 | { |
654 | struct dma_debug_entry *entry; | 654 | struct dma_debug_entry *entry; |
655 | unsigned long flags; | 655 | unsigned long flags; |
@@ -660,8 +660,10 @@ static int device_dma_allocations(struct device *dev) | |||
660 | for (i = 0; i < HASH_SIZE; ++i) { | 660 | for (i = 0; i < HASH_SIZE; ++i) { |
661 | spin_lock(&dma_entry_hash[i].lock); | 661 | spin_lock(&dma_entry_hash[i].lock); |
662 | list_for_each_entry(entry, &dma_entry_hash[i].list, list) { | 662 | list_for_each_entry(entry, &dma_entry_hash[i].list, list) { |
663 | if (entry->dev == dev) | 663 | if (entry->dev == dev) { |
664 | count += 1; | 664 | count += 1; |
665 | *out_entry = entry; | ||
666 | } | ||
665 | } | 667 | } |
666 | spin_unlock(&dma_entry_hash[i].lock); | 668 | spin_unlock(&dma_entry_hash[i].lock); |
667 | } | 669 | } |
@@ -674,6 +676,7 @@ static int device_dma_allocations(struct device *dev) | |||
674 | static int dma_debug_device_change(struct notifier_block *nb, unsigned long action, void *data) | 676 | static int dma_debug_device_change(struct notifier_block *nb, unsigned long action, void *data) |
675 | { | 677 | { |
676 | struct device *dev = data; | 678 | struct device *dev = data; |
679 | struct dma_debug_entry *uninitialized_var(entry); | ||
677 | int count; | 680 | int count; |
678 | 681 | ||
679 | if (global_disable) | 682 | if (global_disable) |
@@ -681,12 +684,17 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti | |||
681 | 684 | ||
682 | switch (action) { | 685 | switch (action) { |
683 | case BUS_NOTIFY_UNBOUND_DRIVER: | 686 | case BUS_NOTIFY_UNBOUND_DRIVER: |
684 | count = device_dma_allocations(dev); | 687 | count = device_dma_allocations(dev, &entry); |
685 | if (count == 0) | 688 | if (count == 0) |
686 | break; | 689 | break; |
687 | err_printk(dev, NULL, "DMA-API: device driver has pending " | 690 | err_printk(dev, entry, "DMA-API: device driver has pending " |
688 | "DMA allocations while released from device " | 691 | "DMA allocations while released from device " |
689 | "[count=%d]\n", count); | 692 | "[count=%d]\n" |
693 | "One of leaked entries details: " | ||
694 | "[device address=0x%016llx] [size=%llu bytes] " | ||
695 | "[mapped with %s] [mapped as %s]\n", | ||
696 | count, entry->dev_addr, entry->size, | ||
697 | dir2name[entry->direction], type2name[entry->type]); | ||
690 | break; | 698 | break; |
691 | default: | 699 | default: |
692 | break; | 700 | break; |
diff --git a/lib/string.c b/lib/string.c index f71bead1be3e..01fad9b203e1 100644 --- a/lib/string.c +++ b/lib/string.c | |||
@@ -535,6 +535,35 @@ bool sysfs_streq(const char *s1, const char *s2) | |||
535 | } | 535 | } |
536 | EXPORT_SYMBOL(sysfs_streq); | 536 | EXPORT_SYMBOL(sysfs_streq); |
537 | 537 | ||
538 | /** | ||
539 | * strtobool - convert common user inputs into boolean values | ||
540 | * @s: input string | ||
541 | * @res: result | ||
542 | * | ||
543 | * This routine returns 0 iff the first character is one of 'Yy1Nn0'. | ||
544 | * Otherwise it will return -EINVAL. Value pointed to by res is | ||
545 | * updated upon finding a match. | ||
546 | */ | ||
547 | int strtobool(const char *s, bool *res) | ||
548 | { | ||
549 | switch (s[0]) { | ||
550 | case 'y': | ||
551 | case 'Y': | ||
552 | case '1': | ||
553 | *res = true; | ||
554 | break; | ||
555 | case 'n': | ||
556 | case 'N': | ||
557 | case '0': | ||
558 | *res = false; | ||
559 | break; | ||
560 | default: | ||
561 | return -EINVAL; | ||
562 | } | ||
563 | return 0; | ||
564 | } | ||
565 | EXPORT_SYMBOL(strtobool); | ||
566 | |||
538 | #ifndef __HAVE_ARCH_MEMSET | 567 | #ifndef __HAVE_ARCH_MEMSET |
539 | /** | 568 | /** |
540 | * memset - Fill a region of memory with the given value | 569 | * memset - Fill a region of memory with the given value |
diff --git a/lib/vsprintf.c b/lib/vsprintf.c index bc0ac6b333dc..dfd60192bc2e 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c | |||
@@ -797,7 +797,7 @@ char *uuid_string(char *buf, char *end, const u8 *addr, | |||
797 | return string(buf, end, uuid, spec); | 797 | return string(buf, end, uuid, spec); |
798 | } | 798 | } |
799 | 799 | ||
800 | int kptr_restrict = 1; | 800 | int kptr_restrict __read_mostly; |
801 | 801 | ||
802 | /* | 802 | /* |
803 | * Show a '%p' thing. A kernel extension is that the '%p' is followed | 803 | * Show a '%p' thing. A kernel extension is that the '%p' is followed |
diff --git a/mm/kmemleak.c b/mm/kmemleak.c index c1d5867543e4..aacee45616fc 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c | |||
@@ -1414,9 +1414,12 @@ static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
1414 | ++(*pos); | 1414 | ++(*pos); |
1415 | 1415 | ||
1416 | list_for_each_continue_rcu(n, &object_list) { | 1416 | list_for_each_continue_rcu(n, &object_list) { |
1417 | next_obj = list_entry(n, struct kmemleak_object, object_list); | 1417 | struct kmemleak_object *obj = |
1418 | if (get_object(next_obj)) | 1418 | list_entry(n, struct kmemleak_object, object_list); |
1419 | if (get_object(obj)) { | ||
1420 | next_obj = obj; | ||
1419 | break; | 1421 | break; |
1422 | } | ||
1420 | } | 1423 | } |
1421 | 1424 | ||
1422 | put_object(prev_obj); | 1425 | put_object(prev_obj); |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 9f8a97b9a350..3f8bce264df6 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -2317,6 +2317,21 @@ void free_pages(unsigned long addr, unsigned int order) | |||
2317 | 2317 | ||
2318 | EXPORT_SYMBOL(free_pages); | 2318 | EXPORT_SYMBOL(free_pages); |
2319 | 2319 | ||
2320 | static void *make_alloc_exact(unsigned long addr, unsigned order, size_t size) | ||
2321 | { | ||
2322 | if (addr) { | ||
2323 | unsigned long alloc_end = addr + (PAGE_SIZE << order); | ||
2324 | unsigned long used = addr + PAGE_ALIGN(size); | ||
2325 | |||
2326 | split_page(virt_to_page((void *)addr), order); | ||
2327 | while (used < alloc_end) { | ||
2328 | free_page(used); | ||
2329 | used += PAGE_SIZE; | ||
2330 | } | ||
2331 | } | ||
2332 | return (void *)addr; | ||
2333 | } | ||
2334 | |||
2320 | /** | 2335 | /** |
2321 | * alloc_pages_exact - allocate an exact number physically-contiguous pages. | 2336 | * alloc_pages_exact - allocate an exact number physically-contiguous pages. |
2322 | * @size: the number of bytes to allocate | 2337 | * @size: the number of bytes to allocate |
@@ -2336,22 +2351,33 @@ void *alloc_pages_exact(size_t size, gfp_t gfp_mask) | |||
2336 | unsigned long addr; | 2351 | unsigned long addr; |
2337 | 2352 | ||
2338 | addr = __get_free_pages(gfp_mask, order); | 2353 | addr = __get_free_pages(gfp_mask, order); |
2339 | if (addr) { | 2354 | return make_alloc_exact(addr, order, size); |
2340 | unsigned long alloc_end = addr + (PAGE_SIZE << order); | ||
2341 | unsigned long used = addr + PAGE_ALIGN(size); | ||
2342 | |||
2343 | split_page(virt_to_page((void *)addr), order); | ||
2344 | while (used < alloc_end) { | ||
2345 | free_page(used); | ||
2346 | used += PAGE_SIZE; | ||
2347 | } | ||
2348 | } | ||
2349 | |||
2350 | return (void *)addr; | ||
2351 | } | 2355 | } |
2352 | EXPORT_SYMBOL(alloc_pages_exact); | 2356 | EXPORT_SYMBOL(alloc_pages_exact); |
2353 | 2357 | ||
2354 | /** | 2358 | /** |
2359 | * alloc_pages_exact_nid - allocate an exact number of physically-contiguous | ||
2360 | * pages on a node. | ||
2361 | * @nid: the preferred node ID where memory should be allocated | ||
2362 | * @size: the number of bytes to allocate | ||
2363 | * @gfp_mask: GFP flags for the allocation | ||
2364 | * | ||
2365 | * Like alloc_pages_exact(), but try to allocate on node nid first before falling | ||
2366 | * back. | ||
2367 | * Note this is not alloc_pages_exact_node() which allocates on a specific node, | ||
2368 | * but is not exact. | ||
2369 | */ | ||
2370 | void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) | ||
2371 | { | ||
2372 | unsigned order = get_order(size); | ||
2373 | struct page *p = alloc_pages_node(nid, gfp_mask, order); | ||
2374 | if (!p) | ||
2375 | return NULL; | ||
2376 | return make_alloc_exact((unsigned long)page_address(p), order, size); | ||
2377 | } | ||
2378 | EXPORT_SYMBOL(alloc_pages_exact_nid); | ||
2379 | |||
2380 | /** | ||
2355 | * free_pages_exact - release memory allocated via alloc_pages_exact() | 2381 | * free_pages_exact - release memory allocated via alloc_pages_exact() |
2356 | * @virt: the value returned by alloc_pages_exact. | 2382 | * @virt: the value returned by alloc_pages_exact. |
2357 | * @size: size of allocation, same value as passed to alloc_pages_exact(). | 2383 | * @size: size of allocation, same value as passed to alloc_pages_exact(). |
@@ -3564,7 +3590,7 @@ int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages) | |||
3564 | 3590 | ||
3565 | if (!slab_is_available()) { | 3591 | if (!slab_is_available()) { |
3566 | zone->wait_table = (wait_queue_head_t *) | 3592 | zone->wait_table = (wait_queue_head_t *) |
3567 | alloc_bootmem_node(pgdat, alloc_size); | 3593 | alloc_bootmem_node_nopanic(pgdat, alloc_size); |
3568 | } else { | 3594 | } else { |
3569 | /* | 3595 | /* |
3570 | * This case means that a zone whose size was 0 gets new memory | 3596 | * This case means that a zone whose size was 0 gets new memory |
@@ -4141,7 +4167,8 @@ static void __init setup_usemap(struct pglist_data *pgdat, | |||
4141 | unsigned long usemapsize = usemap_size(zonesize); | 4167 | unsigned long usemapsize = usemap_size(zonesize); |
4142 | zone->pageblock_flags = NULL; | 4168 | zone->pageblock_flags = NULL; |
4143 | if (usemapsize) | 4169 | if (usemapsize) |
4144 | zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize); | 4170 | zone->pageblock_flags = alloc_bootmem_node_nopanic(pgdat, |
4171 | usemapsize); | ||
4145 | } | 4172 | } |
4146 | #else | 4173 | #else |
4147 | static inline void setup_usemap(struct pglist_data *pgdat, | 4174 | static inline void setup_usemap(struct pglist_data *pgdat, |
@@ -4307,7 +4334,7 @@ static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat) | |||
4307 | size = (end - start) * sizeof(struct page); | 4334 | size = (end - start) * sizeof(struct page); |
4308 | map = alloc_remap(pgdat->node_id, size); | 4335 | map = alloc_remap(pgdat->node_id, size); |
4309 | if (!map) | 4336 | if (!map) |
4310 | map = alloc_bootmem_node(pgdat, size); | 4337 | map = alloc_bootmem_node_nopanic(pgdat, size); |
4311 | pgdat->node_mem_map = map + (pgdat->node_start_pfn - start); | 4338 | pgdat->node_mem_map = map + (pgdat->node_start_pfn - start); |
4312 | } | 4339 | } |
4313 | #ifndef CONFIG_NEED_MULTIPLE_NODES | 4340 | #ifndef CONFIG_NEED_MULTIPLE_NODES |
diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c index 99055010cece..2daadc322ba6 100644 --- a/mm/page_cgroup.c +++ b/mm/page_cgroup.c | |||
@@ -134,7 +134,7 @@ static void *__init_refok alloc_page_cgroup(size_t size, int nid) | |||
134 | { | 134 | { |
135 | void *addr = NULL; | 135 | void *addr = NULL; |
136 | 136 | ||
137 | addr = alloc_pages_exact(size, GFP_KERNEL | __GFP_NOWARN); | 137 | addr = alloc_pages_exact_nid(nid, size, GFP_KERNEL | __GFP_NOWARN); |
138 | if (addr) | 138 | if (addr) |
139 | return addr; | 139 | return addr; |
140 | 140 | ||
diff --git a/mm/shmem.c b/mm/shmem.c index 8fa27e4e582a..dfc7069102ee 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
@@ -852,7 +852,7 @@ static inline int shmem_find_swp(swp_entry_t entry, swp_entry_t *dir, swp_entry_ | |||
852 | 852 | ||
853 | static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, struct page *page) | 853 | static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, struct page *page) |
854 | { | 854 | { |
855 | struct inode *inode; | 855 | struct address_space *mapping; |
856 | unsigned long idx; | 856 | unsigned long idx; |
857 | unsigned long size; | 857 | unsigned long size; |
858 | unsigned long limit; | 858 | unsigned long limit; |
@@ -875,8 +875,10 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, s | |||
875 | if (size > SHMEM_NR_DIRECT) | 875 | if (size > SHMEM_NR_DIRECT) |
876 | size = SHMEM_NR_DIRECT; | 876 | size = SHMEM_NR_DIRECT; |
877 | offset = shmem_find_swp(entry, ptr, ptr+size); | 877 | offset = shmem_find_swp(entry, ptr, ptr+size); |
878 | if (offset >= 0) | 878 | if (offset >= 0) { |
879 | shmem_swp_balance_unmap(); | ||
879 | goto found; | 880 | goto found; |
881 | } | ||
880 | if (!info->i_indirect) | 882 | if (!info->i_indirect) |
881 | goto lost2; | 883 | goto lost2; |
882 | 884 | ||
@@ -914,11 +916,11 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, s | |||
914 | if (size > ENTRIES_PER_PAGE) | 916 | if (size > ENTRIES_PER_PAGE) |
915 | size = ENTRIES_PER_PAGE; | 917 | size = ENTRIES_PER_PAGE; |
916 | offset = shmem_find_swp(entry, ptr, ptr+size); | 918 | offset = shmem_find_swp(entry, ptr, ptr+size); |
917 | shmem_swp_unmap(ptr); | ||
918 | if (offset >= 0) { | 919 | if (offset >= 0) { |
919 | shmem_dir_unmap(dir); | 920 | shmem_dir_unmap(dir); |
920 | goto found; | 921 | goto found; |
921 | } | 922 | } |
923 | shmem_swp_unmap(ptr); | ||
922 | } | 924 | } |
923 | } | 925 | } |
924 | lost1: | 926 | lost1: |
@@ -928,8 +930,7 @@ lost2: | |||
928 | return 0; | 930 | return 0; |
929 | found: | 931 | found: |
930 | idx += offset; | 932 | idx += offset; |
931 | inode = igrab(&info->vfs_inode); | 933 | ptr += offset; |
932 | spin_unlock(&info->lock); | ||
933 | 934 | ||
934 | /* | 935 | /* |
935 | * Move _head_ to start search for next from here. | 936 | * Move _head_ to start search for next from here. |
@@ -940,37 +941,18 @@ found: | |||
940 | */ | 941 | */ |
941 | if (shmem_swaplist.next != &info->swaplist) | 942 | if (shmem_swaplist.next != &info->swaplist) |
942 | list_move_tail(&shmem_swaplist, &info->swaplist); | 943 | list_move_tail(&shmem_swaplist, &info->swaplist); |
943 | mutex_unlock(&shmem_swaplist_mutex); | ||
944 | 944 | ||
945 | error = 1; | ||
946 | if (!inode) | ||
947 | goto out; | ||
948 | /* | 945 | /* |
949 | * Charge page using GFP_KERNEL while we can wait. | 946 | * We rely on shmem_swaplist_mutex, not only to protect the swaplist, |
950 | * Charged back to the user(not to caller) when swap account is used. | 947 | * but also to hold up shmem_evict_inode(): so inode cannot be freed |
951 | * add_to_page_cache() will be called with GFP_NOWAIT. | 948 | * beneath us (pagelock doesn't help until the page is in pagecache). |
952 | */ | 949 | */ |
953 | error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL); | 950 | mapping = info->vfs_inode.i_mapping; |
954 | if (error) | 951 | error = add_to_page_cache_locked(page, mapping, idx, GFP_NOWAIT); |
955 | goto out; | 952 | /* which does mem_cgroup_uncharge_cache_page on error */ |
956 | error = radix_tree_preload(GFP_KERNEL); | ||
957 | if (error) { | ||
958 | mem_cgroup_uncharge_cache_page(page); | ||
959 | goto out; | ||
960 | } | ||
961 | error = 1; | ||
962 | |||
963 | spin_lock(&info->lock); | ||
964 | ptr = shmem_swp_entry(info, idx, NULL); | ||
965 | if (ptr && ptr->val == entry.val) { | ||
966 | error = add_to_page_cache_locked(page, inode->i_mapping, | ||
967 | idx, GFP_NOWAIT); | ||
968 | /* does mem_cgroup_uncharge_cache_page on error */ | ||
969 | } else /* we must compensate for our precharge above */ | ||
970 | mem_cgroup_uncharge_cache_page(page); | ||
971 | 953 | ||
972 | if (error == -EEXIST) { | 954 | if (error == -EEXIST) { |
973 | struct page *filepage = find_get_page(inode->i_mapping, idx); | 955 | struct page *filepage = find_get_page(mapping, idx); |
974 | error = 1; | 956 | error = 1; |
975 | if (filepage) { | 957 | if (filepage) { |
976 | /* | 958 | /* |
@@ -990,14 +972,8 @@ found: | |||
990 | swap_free(entry); | 972 | swap_free(entry); |
991 | error = 1; /* not an error, but entry was found */ | 973 | error = 1; /* not an error, but entry was found */ |
992 | } | 974 | } |
993 | if (ptr) | 975 | shmem_swp_unmap(ptr); |
994 | shmem_swp_unmap(ptr); | ||
995 | spin_unlock(&info->lock); | 976 | spin_unlock(&info->lock); |
996 | radix_tree_preload_end(); | ||
997 | out: | ||
998 | unlock_page(page); | ||
999 | page_cache_release(page); | ||
1000 | iput(inode); /* allows for NULL */ | ||
1001 | return error; | 977 | return error; |
1002 | } | 978 | } |
1003 | 979 | ||
@@ -1009,6 +985,26 @@ int shmem_unuse(swp_entry_t entry, struct page *page) | |||
1009 | struct list_head *p, *next; | 985 | struct list_head *p, *next; |
1010 | struct shmem_inode_info *info; | 986 | struct shmem_inode_info *info; |
1011 | int found = 0; | 987 | int found = 0; |
988 | int error; | ||
989 | |||
990 | /* | ||
991 | * Charge page using GFP_KERNEL while we can wait, before taking | ||
992 | * the shmem_swaplist_mutex which might hold up shmem_writepage(). | ||
993 | * Charged back to the user (not to caller) when swap account is used. | ||
994 | * add_to_page_cache() will be called with GFP_NOWAIT. | ||
995 | */ | ||
996 | error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL); | ||
997 | if (error) | ||
998 | goto out; | ||
999 | /* | ||
1000 | * Try to preload while we can wait, to not make a habit of | ||
1001 | * draining atomic reserves; but don't latch on to this cpu, | ||
1002 | * it's okay if sometimes we get rescheduled after this. | ||
1003 | */ | ||
1004 | error = radix_tree_preload(GFP_KERNEL); | ||
1005 | if (error) | ||
1006 | goto uncharge; | ||
1007 | radix_tree_preload_end(); | ||
1012 | 1008 | ||
1013 | mutex_lock(&shmem_swaplist_mutex); | 1009 | mutex_lock(&shmem_swaplist_mutex); |
1014 | list_for_each_safe(p, next, &shmem_swaplist) { | 1010 | list_for_each_safe(p, next, &shmem_swaplist) { |
@@ -1016,17 +1012,19 @@ int shmem_unuse(swp_entry_t entry, struct page *page) | |||
1016 | found = shmem_unuse_inode(info, entry, page); | 1012 | found = shmem_unuse_inode(info, entry, page); |
1017 | cond_resched(); | 1013 | cond_resched(); |
1018 | if (found) | 1014 | if (found) |
1019 | goto out; | 1015 | break; |
1020 | } | 1016 | } |
1021 | mutex_unlock(&shmem_swaplist_mutex); | 1017 | mutex_unlock(&shmem_swaplist_mutex); |
1022 | /* | 1018 | |
1023 | * Can some race bring us here? We've been holding page lock, | 1019 | uncharge: |
1024 | * so I think not; but would rather try again later than BUG() | 1020 | if (!found) |
1025 | */ | 1021 | mem_cgroup_uncharge_cache_page(page); |
1022 | if (found < 0) | ||
1023 | error = found; | ||
1024 | out: | ||
1026 | unlock_page(page); | 1025 | unlock_page(page); |
1027 | page_cache_release(page); | 1026 | page_cache_release(page); |
1028 | out: | 1027 | return error; |
1029 | return (found < 0) ? found : 0; | ||
1030 | } | 1028 | } |
1031 | 1029 | ||
1032 | /* | 1030 | /* |
@@ -1064,7 +1062,25 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc) | |||
1064 | else | 1062 | else |
1065 | swap.val = 0; | 1063 | swap.val = 0; |
1066 | 1064 | ||
1065 | /* | ||
1066 | * Add inode to shmem_unuse()'s list of swapped-out inodes, | ||
1067 | * if it's not already there. Do it now because we cannot take | ||
1068 | * mutex while holding spinlock, and must do so before the page | ||
1069 | * is moved to swap cache, when its pagelock no longer protects | ||
1070 | * the inode from eviction. But don't unlock the mutex until | ||
1071 | * we've taken the spinlock, because shmem_unuse_inode() will | ||
1072 | * prune a !swapped inode from the swaplist under both locks. | ||
1073 | */ | ||
1074 | if (swap.val) { | ||
1075 | mutex_lock(&shmem_swaplist_mutex); | ||
1076 | if (list_empty(&info->swaplist)) | ||
1077 | list_add_tail(&info->swaplist, &shmem_swaplist); | ||
1078 | } | ||
1079 | |||
1067 | spin_lock(&info->lock); | 1080 | spin_lock(&info->lock); |
1081 | if (swap.val) | ||
1082 | mutex_unlock(&shmem_swaplist_mutex); | ||
1083 | |||
1068 | if (index >= info->next_index) { | 1084 | if (index >= info->next_index) { |
1069 | BUG_ON(!(info->flags & SHMEM_TRUNCATE)); | 1085 | BUG_ON(!(info->flags & SHMEM_TRUNCATE)); |
1070 | goto unlock; | 1086 | goto unlock; |
@@ -1084,21 +1100,10 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc) | |||
1084 | delete_from_page_cache(page); | 1100 | delete_from_page_cache(page); |
1085 | shmem_swp_set(info, entry, swap.val); | 1101 | shmem_swp_set(info, entry, swap.val); |
1086 | shmem_swp_unmap(entry); | 1102 | shmem_swp_unmap(entry); |
1087 | if (list_empty(&info->swaplist)) | ||
1088 | inode = igrab(inode); | ||
1089 | else | ||
1090 | inode = NULL; | ||
1091 | spin_unlock(&info->lock); | 1103 | spin_unlock(&info->lock); |
1092 | swap_shmem_alloc(swap); | 1104 | swap_shmem_alloc(swap); |
1093 | BUG_ON(page_mapped(page)); | 1105 | BUG_ON(page_mapped(page)); |
1094 | swap_writepage(page, wbc); | 1106 | swap_writepage(page, wbc); |
1095 | if (inode) { | ||
1096 | mutex_lock(&shmem_swaplist_mutex); | ||
1097 | /* move instead of add in case we're racing */ | ||
1098 | list_move_tail(&info->swaplist, &shmem_swaplist); | ||
1099 | mutex_unlock(&shmem_swaplist_mutex); | ||
1100 | iput(inode); | ||
1101 | } | ||
1102 | return 0; | 1107 | return 0; |
1103 | } | 1108 | } |
1104 | 1109 | ||
@@ -1400,20 +1405,14 @@ repeat: | |||
1400 | if (sbinfo->max_blocks) { | 1405 | if (sbinfo->max_blocks) { |
1401 | if (percpu_counter_compare(&sbinfo->used_blocks, | 1406 | if (percpu_counter_compare(&sbinfo->used_blocks, |
1402 | sbinfo->max_blocks) >= 0 || | 1407 | sbinfo->max_blocks) >= 0 || |
1403 | shmem_acct_block(info->flags)) { | 1408 | shmem_acct_block(info->flags)) |
1404 | spin_unlock(&info->lock); | 1409 | goto nospace; |
1405 | error = -ENOSPC; | ||
1406 | goto failed; | ||
1407 | } | ||
1408 | percpu_counter_inc(&sbinfo->used_blocks); | 1410 | percpu_counter_inc(&sbinfo->used_blocks); |
1409 | spin_lock(&inode->i_lock); | 1411 | spin_lock(&inode->i_lock); |
1410 | inode->i_blocks += BLOCKS_PER_PAGE; | 1412 | inode->i_blocks += BLOCKS_PER_PAGE; |
1411 | spin_unlock(&inode->i_lock); | 1413 | spin_unlock(&inode->i_lock); |
1412 | } else if (shmem_acct_block(info->flags)) { | 1414 | } else if (shmem_acct_block(info->flags)) |
1413 | spin_unlock(&info->lock); | 1415 | goto nospace; |
1414 | error = -ENOSPC; | ||
1415 | goto failed; | ||
1416 | } | ||
1417 | 1416 | ||
1418 | if (!filepage) { | 1417 | if (!filepage) { |
1419 | int ret; | 1418 | int ret; |
@@ -1493,6 +1492,24 @@ done: | |||
1493 | error = 0; | 1492 | error = 0; |
1494 | goto out; | 1493 | goto out; |
1495 | 1494 | ||
1495 | nospace: | ||
1496 | /* | ||
1497 | * Perhaps the page was brought in from swap between find_lock_page | ||
1498 | * and taking info->lock? We allow for that at add_to_page_cache_lru, | ||
1499 | * but must also avoid reporting a spurious ENOSPC while working on a | ||
1500 | * full tmpfs. (When filepage has been passed in to shmem_getpage, it | ||
1501 | * is already in page cache, which prevents this race from occurring.) | ||
1502 | */ | ||
1503 | if (!filepage) { | ||
1504 | struct page *page = find_get_page(mapping, idx); | ||
1505 | if (page) { | ||
1506 | spin_unlock(&info->lock); | ||
1507 | page_cache_release(page); | ||
1508 | goto repeat; | ||
1509 | } | ||
1510 | } | ||
1511 | spin_unlock(&info->lock); | ||
1512 | error = -ENOSPC; | ||
1496 | failed: | 1513 | failed: |
1497 | if (*pagep != filepage) { | 1514 | if (*pagep != filepage) { |
1498 | unlock_page(filepage); | 1515 | unlock_page(filepage); |
@@ -396,6 +396,9 @@ static void lru_deactivate_fn(struct page *page, void *arg) | |||
396 | if (!PageLRU(page)) | 396 | if (!PageLRU(page)) |
397 | return; | 397 | return; |
398 | 398 | ||
399 | if (PageUnevictable(page)) | ||
400 | return; | ||
401 | |||
399 | /* Some processes are using the page */ | 402 | /* Some processes are using the page */ |
400 | if (page_mapped(page)) | 403 | if (page_mapped(page)) |
401 | return; | 404 | return; |
diff --git a/mm/vmscan.c b/mm/vmscan.c index f6b435c80079..8bfd45050a61 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -937,7 +937,7 @@ keep_lumpy: | |||
937 | * back off and wait for congestion to clear because further reclaim | 937 | * back off and wait for congestion to clear because further reclaim |
938 | * will encounter the same problem | 938 | * will encounter the same problem |
939 | */ | 939 | */ |
940 | if (nr_dirty == nr_congested && nr_dirty != 0) | 940 | if (nr_dirty && nr_dirty == nr_congested && scanning_global_lru(sc)) |
941 | zone_set_flag(zone, ZONE_CONGESTED); | 941 | zone_set_flag(zone, ZONE_CONGESTED); |
942 | 942 | ||
943 | free_page_list(&free_pages); | 943 | free_page_list(&free_pages); |
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index 7850412f52b7..0eb1a886b370 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c | |||
@@ -124,6 +124,9 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head) | |||
124 | 124 | ||
125 | grp->nr_vlans--; | 125 | grp->nr_vlans--; |
126 | 126 | ||
127 | if (vlan->flags & VLAN_FLAG_GVRP) | ||
128 | vlan_gvrp_request_leave(dev); | ||
129 | |||
127 | vlan_group_set_device(grp, vlan_id, NULL); | 130 | vlan_group_set_device(grp, vlan_id, NULL); |
128 | if (!grp->killall) | 131 | if (!grp->killall) |
129 | synchronize_net(); | 132 | synchronize_net(); |
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index e34ea9e5e28b..b2ff6c8d3603 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c | |||
@@ -487,9 +487,6 @@ static int vlan_dev_stop(struct net_device *dev) | |||
487 | struct vlan_dev_info *vlan = vlan_dev_info(dev); | 487 | struct vlan_dev_info *vlan = vlan_dev_info(dev); |
488 | struct net_device *real_dev = vlan->real_dev; | 488 | struct net_device *real_dev = vlan->real_dev; |
489 | 489 | ||
490 | if (vlan->flags & VLAN_FLAG_GVRP) | ||
491 | vlan_gvrp_request_leave(dev); | ||
492 | |||
493 | dev_mc_unsync(real_dev, dev); | 490 | dev_mc_unsync(real_dev, dev); |
494 | dev_uc_unsync(real_dev, dev); | 491 | dev_uc_unsync(real_dev, dev); |
495 | if (dev->flags & IFF_ALLMULTI) | 492 | if (dev->flags & IFF_ALLMULTI) |
diff --git a/net/9p/client.c b/net/9p/client.c index 77367745be9b..a9aa2dd66482 100644 --- a/net/9p/client.c +++ b/net/9p/client.c | |||
@@ -614,7 +614,7 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...) | |||
614 | 614 | ||
615 | err = c->trans_mod->request(c, req); | 615 | err = c->trans_mod->request(c, req); |
616 | if (err < 0) { | 616 | if (err < 0) { |
617 | if (err != -ERESTARTSYS) | 617 | if (err != -ERESTARTSYS && err != -EFAULT) |
618 | c->status = Disconnected; | 618 | c->status = Disconnected; |
619 | goto reterr; | 619 | goto reterr; |
620 | } | 620 | } |
diff --git a/net/9p/protocol.c b/net/9p/protocol.c index b58a501cf3d1..a873277cb996 100644 --- a/net/9p/protocol.c +++ b/net/9p/protocol.c | |||
@@ -674,6 +674,7 @@ int p9dirent_read(char *buf, int len, struct p9_dirent *dirent, | |||
674 | } | 674 | } |
675 | 675 | ||
676 | strcpy(dirent->d_name, nameptr); | 676 | strcpy(dirent->d_name, nameptr); |
677 | kfree(nameptr); | ||
677 | 678 | ||
678 | out: | 679 | out: |
679 | return fake_pdu.offset; | 680 | return fake_pdu.offset; |
diff --git a/net/9p/trans_common.c b/net/9p/trans_common.c index e883172f9aa2..9a70ebdec56e 100644 --- a/net/9p/trans_common.c +++ b/net/9p/trans_common.c | |||
@@ -63,7 +63,7 @@ p9_payload_gup(struct p9_req_t *req, size_t *pdata_off, int *pdata_len, | |||
63 | int nr_pages, u8 rw) | 63 | int nr_pages, u8 rw) |
64 | { | 64 | { |
65 | uint32_t first_page_bytes = 0; | 65 | uint32_t first_page_bytes = 0; |
66 | uint32_t pdata_mapped_pages; | 66 | int32_t pdata_mapped_pages; |
67 | struct trans_rpage_info *rpinfo; | 67 | struct trans_rpage_info *rpinfo; |
68 | 68 | ||
69 | *pdata_off = (__force size_t)req->tc->pubuf & (PAGE_SIZE-1); | 69 | *pdata_off = (__force size_t)req->tc->pubuf & (PAGE_SIZE-1); |
@@ -75,14 +75,9 @@ p9_payload_gup(struct p9_req_t *req, size_t *pdata_off, int *pdata_len, | |||
75 | rpinfo = req->tc->private; | 75 | rpinfo = req->tc->private; |
76 | pdata_mapped_pages = get_user_pages_fast((unsigned long)req->tc->pubuf, | 76 | pdata_mapped_pages = get_user_pages_fast((unsigned long)req->tc->pubuf, |
77 | nr_pages, rw, &rpinfo->rp_data[0]); | 77 | nr_pages, rw, &rpinfo->rp_data[0]); |
78 | if (pdata_mapped_pages <= 0) | ||
79 | return pdata_mapped_pages; | ||
78 | 80 | ||
79 | if (pdata_mapped_pages < 0) { | ||
80 | printk(KERN_ERR "get_user_pages_fast failed:%d udata:%p" | ||
81 | "nr_pages:%d\n", pdata_mapped_pages, | ||
82 | req->tc->pubuf, nr_pages); | ||
83 | pdata_mapped_pages = 0; | ||
84 | return -EIO; | ||
85 | } | ||
86 | rpinfo->rp_nr_pages = pdata_mapped_pages; | 81 | rpinfo->rp_nr_pages = pdata_mapped_pages; |
87 | if (*pdata_off) { | 82 | if (*pdata_off) { |
88 | *pdata_len = first_page_bytes; | 83 | *pdata_len = first_page_bytes; |
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index 94954c74f6ae..42fdffd1d76c 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c | |||
@@ -369,15 +369,6 @@ static void __sco_sock_close(struct sock *sk) | |||
369 | 369 | ||
370 | case BT_CONNECTED: | 370 | case BT_CONNECTED: |
371 | case BT_CONFIG: | 371 | case BT_CONFIG: |
372 | if (sco_pi(sk)->conn) { | ||
373 | sk->sk_state = BT_DISCONN; | ||
374 | sco_sock_set_timer(sk, SCO_DISCONN_TIMEOUT); | ||
375 | hci_conn_put(sco_pi(sk)->conn->hcon); | ||
376 | sco_pi(sk)->conn = NULL; | ||
377 | } else | ||
378 | sco_chan_del(sk, ECONNRESET); | ||
379 | break; | ||
380 | |||
381 | case BT_CONNECT: | 372 | case BT_CONNECT: |
382 | case BT_DISCONN: | 373 | case BT_DISCONN: |
383 | sco_chan_del(sk, ECONNRESET); | 374 | sco_chan_del(sk, ECONNRESET); |
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index f3bc322c5891..74ef4d4846a4 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c | |||
@@ -737,7 +737,7 @@ static unsigned int br_nf_forward_ip(unsigned int hook, struct sk_buff *skb, | |||
737 | nf_bridge->mask |= BRNF_PKT_TYPE; | 737 | nf_bridge->mask |= BRNF_PKT_TYPE; |
738 | } | 738 | } |
739 | 739 | ||
740 | if (br_parse_ip_options(skb)) | 740 | if (pf == PF_INET && br_parse_ip_options(skb)) |
741 | return NF_DROP; | 741 | return NF_DROP; |
742 | 742 | ||
743 | /* The physdev module checks on this */ | 743 | /* The physdev module checks on this */ |
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index 893669caa8de..1a92b369c820 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c | |||
@@ -1766,7 +1766,7 @@ static int compat_table_info(const struct ebt_table_info *info, | |||
1766 | 1766 | ||
1767 | newinfo->entries_size = size; | 1767 | newinfo->entries_size = size; |
1768 | 1768 | ||
1769 | xt_compat_init_offsets(AF_INET, info->nentries); | 1769 | xt_compat_init_offsets(NFPROTO_BRIDGE, info->nentries); |
1770 | return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info, | 1770 | return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info, |
1771 | entries, newinfo); | 1771 | entries, newinfo); |
1772 | } | 1772 | } |
@@ -1882,7 +1882,7 @@ static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt, | |||
1882 | struct xt_match *match; | 1882 | struct xt_match *match; |
1883 | struct xt_target *wt; | 1883 | struct xt_target *wt; |
1884 | void *dst = NULL; | 1884 | void *dst = NULL; |
1885 | int off, pad = 0, ret = 0; | 1885 | int off, pad = 0; |
1886 | unsigned int size_kern, entry_offset, match_size = mwt->match_size; | 1886 | unsigned int size_kern, entry_offset, match_size = mwt->match_size; |
1887 | 1887 | ||
1888 | strlcpy(name, mwt->u.name, sizeof(name)); | 1888 | strlcpy(name, mwt->u.name, sizeof(name)); |
@@ -1935,13 +1935,6 @@ static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt, | |||
1935 | break; | 1935 | break; |
1936 | } | 1936 | } |
1937 | 1937 | ||
1938 | if (!dst) { | ||
1939 | ret = xt_compat_add_offset(NFPROTO_BRIDGE, entry_offset, | ||
1940 | off + ebt_compat_entry_padsize()); | ||
1941 | if (ret < 0) | ||
1942 | return ret; | ||
1943 | } | ||
1944 | |||
1945 | state->buf_kern_offset += match_size + off; | 1938 | state->buf_kern_offset += match_size + off; |
1946 | state->buf_user_offset += match_size; | 1939 | state->buf_user_offset += match_size; |
1947 | pad = XT_ALIGN(size_kern) - size_kern; | 1940 | pad = XT_ALIGN(size_kern) - size_kern; |
@@ -2016,50 +2009,6 @@ static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32, | |||
2016 | return growth; | 2009 | return growth; |
2017 | } | 2010 | } |
2018 | 2011 | ||
2019 | #define EBT_COMPAT_WATCHER_ITERATE(e, fn, args...) \ | ||
2020 | ({ \ | ||
2021 | unsigned int __i; \ | ||
2022 | int __ret = 0; \ | ||
2023 | struct compat_ebt_entry_mwt *__watcher; \ | ||
2024 | \ | ||
2025 | for (__i = e->watchers_offset; \ | ||
2026 | __i < (e)->target_offset; \ | ||
2027 | __i += __watcher->watcher_size + \ | ||
2028 | sizeof(struct compat_ebt_entry_mwt)) { \ | ||
2029 | __watcher = (void *)(e) + __i; \ | ||
2030 | __ret = fn(__watcher , ## args); \ | ||
2031 | if (__ret != 0) \ | ||
2032 | break; \ | ||
2033 | } \ | ||
2034 | if (__ret == 0) { \ | ||
2035 | if (__i != (e)->target_offset) \ | ||
2036 | __ret = -EINVAL; \ | ||
2037 | } \ | ||
2038 | __ret; \ | ||
2039 | }) | ||
2040 | |||
2041 | #define EBT_COMPAT_MATCH_ITERATE(e, fn, args...) \ | ||
2042 | ({ \ | ||
2043 | unsigned int __i; \ | ||
2044 | int __ret = 0; \ | ||
2045 | struct compat_ebt_entry_mwt *__match; \ | ||
2046 | \ | ||
2047 | for (__i = sizeof(struct ebt_entry); \ | ||
2048 | __i < (e)->watchers_offset; \ | ||
2049 | __i += __match->match_size + \ | ||
2050 | sizeof(struct compat_ebt_entry_mwt)) { \ | ||
2051 | __match = (void *)(e) + __i; \ | ||
2052 | __ret = fn(__match , ## args); \ | ||
2053 | if (__ret != 0) \ | ||
2054 | break; \ | ||
2055 | } \ | ||
2056 | if (__ret == 0) { \ | ||
2057 | if (__i != (e)->watchers_offset) \ | ||
2058 | __ret = -EINVAL; \ | ||
2059 | } \ | ||
2060 | __ret; \ | ||
2061 | }) | ||
2062 | |||
2063 | /* called for all ebt_entry structures. */ | 2012 | /* called for all ebt_entry structures. */ |
2064 | static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base, | 2013 | static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base, |
2065 | unsigned int *total, | 2014 | unsigned int *total, |
@@ -2132,6 +2081,14 @@ static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base, | |||
2132 | } | 2081 | } |
2133 | } | 2082 | } |
2134 | 2083 | ||
2084 | if (state->buf_kern_start == NULL) { | ||
2085 | unsigned int offset = buf_start - (char *) base; | ||
2086 | |||
2087 | ret = xt_compat_add_offset(NFPROTO_BRIDGE, offset, new_offset); | ||
2088 | if (ret < 0) | ||
2089 | return ret; | ||
2090 | } | ||
2091 | |||
2135 | startoff = state->buf_user_offset - startoff; | 2092 | startoff = state->buf_user_offset - startoff; |
2136 | 2093 | ||
2137 | BUG_ON(*total < startoff); | 2094 | BUG_ON(*total < startoff); |
@@ -2240,6 +2197,7 @@ static int compat_do_replace(struct net *net, void __user *user, | |||
2240 | 2197 | ||
2241 | xt_compat_lock(NFPROTO_BRIDGE); | 2198 | xt_compat_lock(NFPROTO_BRIDGE); |
2242 | 2199 | ||
2200 | xt_compat_init_offsets(NFPROTO_BRIDGE, tmp.nentries); | ||
2243 | ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state); | 2201 | ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state); |
2244 | if (ret < 0) | 2202 | if (ret < 0) |
2245 | goto out_unlock; | 2203 | goto out_unlock; |
diff --git a/net/core/dev.c b/net/core/dev.c index 856b6ee9a1d5..b624fe4d9bd7 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -1284,11 +1284,13 @@ static int dev_close_many(struct list_head *head) | |||
1284 | */ | 1284 | */ |
1285 | int dev_close(struct net_device *dev) | 1285 | int dev_close(struct net_device *dev) |
1286 | { | 1286 | { |
1287 | LIST_HEAD(single); | 1287 | if (dev->flags & IFF_UP) { |
1288 | LIST_HEAD(single); | ||
1288 | 1289 | ||
1289 | list_add(&dev->unreg_list, &single); | 1290 | list_add(&dev->unreg_list, &single); |
1290 | dev_close_many(&single); | 1291 | dev_close_many(&single); |
1291 | list_del(&single); | 1292 | list_del(&single); |
1293 | } | ||
1292 | return 0; | 1294 | return 0; |
1293 | } | 1295 | } |
1294 | EXPORT_SYMBOL(dev_close); | 1296 | EXPORT_SYMBOL(dev_close); |
@@ -5184,27 +5186,27 @@ u32 netdev_fix_features(struct net_device *dev, u32 features) | |||
5184 | /* Fix illegal checksum combinations */ | 5186 | /* Fix illegal checksum combinations */ |
5185 | if ((features & NETIF_F_HW_CSUM) && | 5187 | if ((features & NETIF_F_HW_CSUM) && |
5186 | (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { | 5188 | (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { |
5187 | netdev_info(dev, "mixed HW and IP checksum settings.\n"); | 5189 | netdev_warn(dev, "mixed HW and IP checksum settings.\n"); |
5188 | features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); | 5190 | features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); |
5189 | } | 5191 | } |
5190 | 5192 | ||
5191 | if ((features & NETIF_F_NO_CSUM) && | 5193 | if ((features & NETIF_F_NO_CSUM) && |
5192 | (features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { | 5194 | (features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { |
5193 | netdev_info(dev, "mixed no checksumming and other settings.\n"); | 5195 | netdev_warn(dev, "mixed no checksumming and other settings.\n"); |
5194 | features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM); | 5196 | features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM); |
5195 | } | 5197 | } |
5196 | 5198 | ||
5197 | /* Fix illegal SG+CSUM combinations. */ | 5199 | /* Fix illegal SG+CSUM combinations. */ |
5198 | if ((features & NETIF_F_SG) && | 5200 | if ((features & NETIF_F_SG) && |
5199 | !(features & NETIF_F_ALL_CSUM)) { | 5201 | !(features & NETIF_F_ALL_CSUM)) { |
5200 | netdev_info(dev, | 5202 | netdev_dbg(dev, |
5201 | "Dropping NETIF_F_SG since no checksum feature.\n"); | 5203 | "Dropping NETIF_F_SG since no checksum feature.\n"); |
5202 | features &= ~NETIF_F_SG; | 5204 | features &= ~NETIF_F_SG; |
5203 | } | 5205 | } |
5204 | 5206 | ||
5205 | /* TSO requires that SG is present as well. */ | 5207 | /* TSO requires that SG is present as well. */ |
5206 | if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) { | 5208 | if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) { |
5207 | netdev_info(dev, "Dropping TSO features since no SG feature.\n"); | 5209 | netdev_dbg(dev, "Dropping TSO features since no SG feature.\n"); |
5208 | features &= ~NETIF_F_ALL_TSO; | 5210 | features &= ~NETIF_F_ALL_TSO; |
5209 | } | 5211 | } |
5210 | 5212 | ||
@@ -5214,7 +5216,7 @@ u32 netdev_fix_features(struct net_device *dev, u32 features) | |||
5214 | 5216 | ||
5215 | /* Software GSO depends on SG. */ | 5217 | /* Software GSO depends on SG. */ |
5216 | if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) { | 5218 | if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) { |
5217 | netdev_info(dev, "Dropping NETIF_F_GSO since no SG feature.\n"); | 5219 | netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n"); |
5218 | features &= ~NETIF_F_GSO; | 5220 | features &= ~NETIF_F_GSO; |
5219 | } | 5221 | } |
5220 | 5222 | ||
@@ -5224,13 +5226,13 @@ u32 netdev_fix_features(struct net_device *dev, u32 features) | |||
5224 | if (!((features & NETIF_F_GEN_CSUM) || | 5226 | if (!((features & NETIF_F_GEN_CSUM) || |
5225 | (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM)) | 5227 | (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM)) |
5226 | == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { | 5228 | == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { |
5227 | netdev_info(dev, | 5229 | netdev_dbg(dev, |
5228 | "Dropping NETIF_F_UFO since no checksum offload features.\n"); | 5230 | "Dropping NETIF_F_UFO since no checksum offload features.\n"); |
5229 | features &= ~NETIF_F_UFO; | 5231 | features &= ~NETIF_F_UFO; |
5230 | } | 5232 | } |
5231 | 5233 | ||
5232 | if (!(features & NETIF_F_SG)) { | 5234 | if (!(features & NETIF_F_SG)) { |
5233 | netdev_info(dev, | 5235 | netdev_dbg(dev, |
5234 | "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n"); | 5236 | "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n"); |
5235 | features &= ~NETIF_F_UFO; | 5237 | features &= ~NETIF_F_UFO; |
5236 | } | 5238 | } |
@@ -5412,12 +5414,6 @@ int register_netdevice(struct net_device *dev) | |||
5412 | dev->features |= NETIF_F_SOFT_FEATURES; | 5414 | dev->features |= NETIF_F_SOFT_FEATURES; |
5413 | dev->wanted_features = dev->features & dev->hw_features; | 5415 | dev->wanted_features = dev->features & dev->hw_features; |
5414 | 5416 | ||
5415 | /* Avoid warning from netdev_fix_features() for GSO without SG */ | ||
5416 | if (!(dev->wanted_features & NETIF_F_SG)) { | ||
5417 | dev->wanted_features &= ~NETIF_F_GSO; | ||
5418 | dev->features &= ~NETIF_F_GSO; | ||
5419 | } | ||
5420 | |||
5421 | /* Enable GRO and NETIF_F_HIGHDMA for vlans by default, | 5417 | /* Enable GRO and NETIF_F_HIGHDMA for vlans by default, |
5422 | * vlan_dev_init() will do the dev->features check, so these features | 5418 | * vlan_dev_init() will do the dev->features check, so these features |
5423 | * are enabled only if supported by underlying device. | 5419 | * are enabled only if supported by underlying device. |
diff --git a/net/dccp/options.c b/net/dccp/options.c index f06ffcfc8d71..4b2ab657ac8e 100644 --- a/net/dccp/options.c +++ b/net/dccp/options.c | |||
@@ -123,6 +123,8 @@ int dccp_parse_options(struct sock *sk, struct dccp_request_sock *dreq, | |||
123 | case DCCPO_CHANGE_L ... DCCPO_CONFIRM_R: | 123 | case DCCPO_CHANGE_L ... DCCPO_CONFIRM_R: |
124 | if (pkt_type == DCCP_PKT_DATA) /* RFC 4340, 6 */ | 124 | if (pkt_type == DCCP_PKT_DATA) /* RFC 4340, 6 */ |
125 | break; | 125 | break; |
126 | if (len == 0) | ||
127 | goto out_invalid_option; | ||
126 | rc = dccp_feat_parse_options(sk, dreq, mandatory, opt, | 128 | rc = dccp_feat_parse_options(sk, dreq, mandatory, opt, |
127 | *value, value + 1, len - 1); | 129 | *value, value + 1, len - 1); |
128 | if (rc) | 130 | if (rc) |
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index a1151b8adf3c..b1d282f11be7 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c | |||
@@ -223,31 +223,30 @@ static void ip_expire(unsigned long arg) | |||
223 | 223 | ||
224 | if ((qp->q.last_in & INET_FRAG_FIRST_IN) && qp->q.fragments != NULL) { | 224 | if ((qp->q.last_in & INET_FRAG_FIRST_IN) && qp->q.fragments != NULL) { |
225 | struct sk_buff *head = qp->q.fragments; | 225 | struct sk_buff *head = qp->q.fragments; |
226 | const struct iphdr *iph; | ||
227 | int err; | ||
226 | 228 | ||
227 | rcu_read_lock(); | 229 | rcu_read_lock(); |
228 | head->dev = dev_get_by_index_rcu(net, qp->iif); | 230 | head->dev = dev_get_by_index_rcu(net, qp->iif); |
229 | if (!head->dev) | 231 | if (!head->dev) |
230 | goto out_rcu_unlock; | 232 | goto out_rcu_unlock; |
231 | 233 | ||
234 | /* skb dst is stale, drop it, and perform route lookup again */ | ||
235 | skb_dst_drop(head); | ||
236 | iph = ip_hdr(head); | ||
237 | err = ip_route_input_noref(head, iph->daddr, iph->saddr, | ||
238 | iph->tos, head->dev); | ||
239 | if (err) | ||
240 | goto out_rcu_unlock; | ||
241 | |||
232 | /* | 242 | /* |
233 | * Only search router table for the head fragment, | 243 | * Only an end host needs to send an ICMP |
234 | * when defraging timeout at PRE_ROUTING HOOK. | 244 | * "Fragment Reassembly Timeout" message, per RFC792. |
235 | */ | 245 | */ |
236 | if (qp->user == IP_DEFRAG_CONNTRACK_IN && !skb_dst(head)) { | 246 | if (qp->user == IP_DEFRAG_CONNTRACK_IN && |
237 | const struct iphdr *iph = ip_hdr(head); | 247 | skb_rtable(head)->rt_type != RTN_LOCAL) |
238 | int err = ip_route_input(head, iph->daddr, iph->saddr, | 248 | goto out_rcu_unlock; |
239 | iph->tos, head->dev); | ||
240 | if (unlikely(err)) | ||
241 | goto out_rcu_unlock; | ||
242 | |||
243 | /* | ||
244 | * Only an end host needs to send an ICMP | ||
245 | * "Fragment Reassembly Timeout" message, per RFC792. | ||
246 | */ | ||
247 | if (skb_rtable(head)->rt_type != RTN_LOCAL) | ||
248 | goto out_rcu_unlock; | ||
249 | 249 | ||
250 | } | ||
251 | 250 | ||
252 | /* Send an ICMP "Fragment Reassembly Timeout" message. */ | 251 | /* Send an ICMP "Fragment Reassembly Timeout" message. */ |
253 | icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0); | 252 | icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0); |
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c index 34340c9c95fa..f376b05cca81 100644 --- a/net/ipv4/tcp_cubic.c +++ b/net/ipv4/tcp_cubic.c | |||
@@ -93,6 +93,7 @@ struct bictcp { | |||
93 | u32 ack_cnt; /* number of acks */ | 93 | u32 ack_cnt; /* number of acks */ |
94 | u32 tcp_cwnd; /* estimated tcp cwnd */ | 94 | u32 tcp_cwnd; /* estimated tcp cwnd */ |
95 | #define ACK_RATIO_SHIFT 4 | 95 | #define ACK_RATIO_SHIFT 4 |
96 | #define ACK_RATIO_LIMIT (32u << ACK_RATIO_SHIFT) | ||
96 | u16 delayed_ack; /* estimate the ratio of Packets/ACKs << 4 */ | 97 | u16 delayed_ack; /* estimate the ratio of Packets/ACKs << 4 */ |
97 | u8 sample_cnt; /* number of samples to decide curr_rtt */ | 98 | u8 sample_cnt; /* number of samples to decide curr_rtt */ |
98 | u8 found; /* the exit point is found? */ | 99 | u8 found; /* the exit point is found? */ |
@@ -398,8 +399,12 @@ static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us) | |||
398 | u32 delay; | 399 | u32 delay; |
399 | 400 | ||
400 | if (icsk->icsk_ca_state == TCP_CA_Open) { | 401 | if (icsk->icsk_ca_state == TCP_CA_Open) { |
401 | cnt -= ca->delayed_ack >> ACK_RATIO_SHIFT; | 402 | u32 ratio = ca->delayed_ack; |
402 | ca->delayed_ack += cnt; | 403 | |
404 | ratio -= ca->delayed_ack >> ACK_RATIO_SHIFT; | ||
405 | ratio += cnt; | ||
406 | |||
407 | ca->delayed_ack = min(ratio, ACK_RATIO_LIMIT); | ||
403 | } | 408 | } |
404 | 409 | ||
405 | /* Some calls are for duplicates without timetamps */ | 410 | /* Some calls are for duplicates without timetamps */ |
diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c index 571aa96a175c..2d51840e53a1 100644 --- a/net/ipv4/xfrm4_output.c +++ b/net/ipv4/xfrm4_output.c | |||
@@ -69,7 +69,7 @@ int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb) | |||
69 | } | 69 | } |
70 | EXPORT_SYMBOL(xfrm4_prepare_output); | 70 | EXPORT_SYMBOL(xfrm4_prepare_output); |
71 | 71 | ||
72 | static int xfrm4_output_finish(struct sk_buff *skb) | 72 | int xfrm4_output_finish(struct sk_buff *skb) |
73 | { | 73 | { |
74 | #ifdef CONFIG_NETFILTER | 74 | #ifdef CONFIG_NETFILTER |
75 | if (!skb_dst(skb)->xfrm) { | 75 | if (!skb_dst(skb)->xfrm) { |
@@ -86,7 +86,11 @@ static int xfrm4_output_finish(struct sk_buff *skb) | |||
86 | 86 | ||
87 | int xfrm4_output(struct sk_buff *skb) | 87 | int xfrm4_output(struct sk_buff *skb) |
88 | { | 88 | { |
89 | struct dst_entry *dst = skb_dst(skb); | ||
90 | struct xfrm_state *x = dst->xfrm; | ||
91 | |||
89 | return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, | 92 | return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, |
90 | NULL, skb_dst(skb)->dev, xfrm4_output_finish, | 93 | NULL, dst->dev, |
94 | x->outer_mode->afinfo->output_finish, | ||
91 | !(IPCB(skb)->flags & IPSKB_REROUTED)); | 95 | !(IPCB(skb)->flags & IPSKB_REROUTED)); |
92 | } | 96 | } |
diff --git a/net/ipv4/xfrm4_state.c b/net/ipv4/xfrm4_state.c index 1717c64628d1..805d63ef4340 100644 --- a/net/ipv4/xfrm4_state.c +++ b/net/ipv4/xfrm4_state.c | |||
@@ -78,6 +78,7 @@ static struct xfrm_state_afinfo xfrm4_state_afinfo = { | |||
78 | .init_tempsel = __xfrm4_init_tempsel, | 78 | .init_tempsel = __xfrm4_init_tempsel, |
79 | .init_temprop = xfrm4_init_temprop, | 79 | .init_temprop = xfrm4_init_temprop, |
80 | .output = xfrm4_output, | 80 | .output = xfrm4_output, |
81 | .output_finish = xfrm4_output_finish, | ||
81 | .extract_input = xfrm4_extract_input, | 82 | .extract_input = xfrm4_extract_input, |
82 | .extract_output = xfrm4_extract_output, | 83 | .extract_output = xfrm4_extract_output, |
83 | .transport_finish = xfrm4_transport_finish, | 84 | .transport_finish = xfrm4_transport_finish, |
diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c index 28e74488a329..a5a4c5dd5396 100644 --- a/net/ipv6/netfilter/ip6t_REJECT.c +++ b/net/ipv6/netfilter/ip6t_REJECT.c | |||
@@ -45,6 +45,8 @@ static void send_reset(struct net *net, struct sk_buff *oldskb) | |||
45 | int tcphoff, needs_ack; | 45 | int tcphoff, needs_ack; |
46 | const struct ipv6hdr *oip6h = ipv6_hdr(oldskb); | 46 | const struct ipv6hdr *oip6h = ipv6_hdr(oldskb); |
47 | struct ipv6hdr *ip6h; | 47 | struct ipv6hdr *ip6h; |
48 | #define DEFAULT_TOS_VALUE 0x0U | ||
49 | const __u8 tclass = DEFAULT_TOS_VALUE; | ||
48 | struct dst_entry *dst = NULL; | 50 | struct dst_entry *dst = NULL; |
49 | u8 proto; | 51 | u8 proto; |
50 | struct flowi6 fl6; | 52 | struct flowi6 fl6; |
@@ -124,7 +126,7 @@ static void send_reset(struct net *net, struct sk_buff *oldskb) | |||
124 | skb_put(nskb, sizeof(struct ipv6hdr)); | 126 | skb_put(nskb, sizeof(struct ipv6hdr)); |
125 | skb_reset_network_header(nskb); | 127 | skb_reset_network_header(nskb); |
126 | ip6h = ipv6_hdr(nskb); | 128 | ip6h = ipv6_hdr(nskb); |
127 | ip6h->version = 6; | 129 | *(__be32 *)ip6h = htonl(0x60000000 | (tclass << 20)); |
128 | ip6h->hop_limit = ip6_dst_hoplimit(dst); | 130 | ip6h->hop_limit = ip6_dst_hoplimit(dst); |
129 | ip6h->nexthdr = IPPROTO_TCP; | 131 | ip6h->nexthdr = IPPROTO_TCP; |
130 | ipv6_addr_copy(&ip6h->saddr, &oip6h->daddr); | 132 | ipv6_addr_copy(&ip6h->saddr, &oip6h->daddr); |
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c index 8e688b3de9ab..49a91c5f5623 100644 --- a/net/ipv6/xfrm6_output.c +++ b/net/ipv6/xfrm6_output.c | |||
@@ -79,7 +79,7 @@ int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb) | |||
79 | } | 79 | } |
80 | EXPORT_SYMBOL(xfrm6_prepare_output); | 80 | EXPORT_SYMBOL(xfrm6_prepare_output); |
81 | 81 | ||
82 | static int xfrm6_output_finish(struct sk_buff *skb) | 82 | int xfrm6_output_finish(struct sk_buff *skb) |
83 | { | 83 | { |
84 | #ifdef CONFIG_NETFILTER | 84 | #ifdef CONFIG_NETFILTER |
85 | IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED; | 85 | IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED; |
@@ -97,9 +97,9 @@ static int __xfrm6_output(struct sk_buff *skb) | |||
97 | if ((x && x->props.mode == XFRM_MODE_TUNNEL) && | 97 | if ((x && x->props.mode == XFRM_MODE_TUNNEL) && |
98 | ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) || | 98 | ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) || |
99 | dst_allfrag(skb_dst(skb)))) { | 99 | dst_allfrag(skb_dst(skb)))) { |
100 | return ip6_fragment(skb, xfrm6_output_finish); | 100 | return ip6_fragment(skb, x->outer_mode->afinfo->output_finish); |
101 | } | 101 | } |
102 | return xfrm6_output_finish(skb); | 102 | return x->outer_mode->afinfo->output_finish(skb); |
103 | } | 103 | } |
104 | 104 | ||
105 | int xfrm6_output(struct sk_buff *skb) | 105 | int xfrm6_output(struct sk_buff *skb) |
diff --git a/net/ipv6/xfrm6_state.c b/net/ipv6/xfrm6_state.c index afe941e9415c..248f0b2a7ee9 100644 --- a/net/ipv6/xfrm6_state.c +++ b/net/ipv6/xfrm6_state.c | |||
@@ -178,6 +178,7 @@ static struct xfrm_state_afinfo xfrm6_state_afinfo = { | |||
178 | .tmpl_sort = __xfrm6_tmpl_sort, | 178 | .tmpl_sort = __xfrm6_tmpl_sort, |
179 | .state_sort = __xfrm6_state_sort, | 179 | .state_sort = __xfrm6_state_sort, |
180 | .output = xfrm6_output, | 180 | .output = xfrm6_output, |
181 | .output_finish = xfrm6_output_finish, | ||
181 | .extract_input = xfrm6_extract_input, | 182 | .extract_input = xfrm6_extract_input, |
182 | .extract_output = xfrm6_extract_output, | 183 | .extract_output = xfrm6_extract_output, |
183 | .transport_finish = xfrm6_transport_finish, | 184 | .transport_finish = xfrm6_transport_finish, |
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index ce4596ed1268..bd1224fd216a 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c | |||
@@ -237,6 +237,10 @@ ieee80211_tx_h_dynamic_ps(struct ieee80211_tx_data *tx) | |||
237 | &local->dynamic_ps_disable_work); | 237 | &local->dynamic_ps_disable_work); |
238 | } | 238 | } |
239 | 239 | ||
240 | /* Don't restart the timer if we're not disassociated */ | ||
241 | if (!ifmgd->associated) | ||
242 | return TX_CONTINUE; | ||
243 | |||
240 | mod_timer(&local->dynamic_ps_timer, jiffies + | 244 | mod_timer(&local->dynamic_ps_timer, jiffies + |
241 | msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout)); | 245 | msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout)); |
242 | 246 | ||
diff --git a/net/netfilter/ipvs/ip_vs_app.c b/net/netfilter/ipvs/ip_vs_app.c index 2dc6de13ac18..059af3120be7 100644 --- a/net/netfilter/ipvs/ip_vs_app.c +++ b/net/netfilter/ipvs/ip_vs_app.c | |||
@@ -572,11 +572,11 @@ static const struct file_operations ip_vs_app_fops = { | |||
572 | .open = ip_vs_app_open, | 572 | .open = ip_vs_app_open, |
573 | .read = seq_read, | 573 | .read = seq_read, |
574 | .llseek = seq_lseek, | 574 | .llseek = seq_lseek, |
575 | .release = seq_release, | 575 | .release = seq_release_net, |
576 | }; | 576 | }; |
577 | #endif | 577 | #endif |
578 | 578 | ||
579 | static int __net_init __ip_vs_app_init(struct net *net) | 579 | int __net_init __ip_vs_app_init(struct net *net) |
580 | { | 580 | { |
581 | struct netns_ipvs *ipvs = net_ipvs(net); | 581 | struct netns_ipvs *ipvs = net_ipvs(net); |
582 | 582 | ||
@@ -585,26 +585,17 @@ static int __net_init __ip_vs_app_init(struct net *net) | |||
585 | return 0; | 585 | return 0; |
586 | } | 586 | } |
587 | 587 | ||
588 | static void __net_exit __ip_vs_app_cleanup(struct net *net) | 588 | void __net_exit __ip_vs_app_cleanup(struct net *net) |
589 | { | 589 | { |
590 | proc_net_remove(net, "ip_vs_app"); | 590 | proc_net_remove(net, "ip_vs_app"); |
591 | } | 591 | } |
592 | 592 | ||
593 | static struct pernet_operations ip_vs_app_ops = { | ||
594 | .init = __ip_vs_app_init, | ||
595 | .exit = __ip_vs_app_cleanup, | ||
596 | }; | ||
597 | |||
598 | int __init ip_vs_app_init(void) | 593 | int __init ip_vs_app_init(void) |
599 | { | 594 | { |
600 | int rv; | 595 | return 0; |
601 | |||
602 | rv = register_pernet_subsys(&ip_vs_app_ops); | ||
603 | return rv; | ||
604 | } | 596 | } |
605 | 597 | ||
606 | 598 | ||
607 | void ip_vs_app_cleanup(void) | 599 | void ip_vs_app_cleanup(void) |
608 | { | 600 | { |
609 | unregister_pernet_subsys(&ip_vs_app_ops); | ||
610 | } | 601 | } |
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c index c97bd45975be..bf28ac2fc99b 100644 --- a/net/netfilter/ipvs/ip_vs_conn.c +++ b/net/netfilter/ipvs/ip_vs_conn.c | |||
@@ -1046,7 +1046,7 @@ static const struct file_operations ip_vs_conn_fops = { | |||
1046 | .open = ip_vs_conn_open, | 1046 | .open = ip_vs_conn_open, |
1047 | .read = seq_read, | 1047 | .read = seq_read, |
1048 | .llseek = seq_lseek, | 1048 | .llseek = seq_lseek, |
1049 | .release = seq_release, | 1049 | .release = seq_release_net, |
1050 | }; | 1050 | }; |
1051 | 1051 | ||
1052 | static const char *ip_vs_origin_name(unsigned flags) | 1052 | static const char *ip_vs_origin_name(unsigned flags) |
@@ -1114,7 +1114,7 @@ static const struct file_operations ip_vs_conn_sync_fops = { | |||
1114 | .open = ip_vs_conn_sync_open, | 1114 | .open = ip_vs_conn_sync_open, |
1115 | .read = seq_read, | 1115 | .read = seq_read, |
1116 | .llseek = seq_lseek, | 1116 | .llseek = seq_lseek, |
1117 | .release = seq_release, | 1117 | .release = seq_release_net, |
1118 | }; | 1118 | }; |
1119 | 1119 | ||
1120 | #endif | 1120 | #endif |
@@ -1258,22 +1258,17 @@ int __net_init __ip_vs_conn_init(struct net *net) | |||
1258 | return 0; | 1258 | return 0; |
1259 | } | 1259 | } |
1260 | 1260 | ||
1261 | static void __net_exit __ip_vs_conn_cleanup(struct net *net) | 1261 | void __net_exit __ip_vs_conn_cleanup(struct net *net) |
1262 | { | 1262 | { |
1263 | /* flush all the connection entries first */ | 1263 | /* flush all the connection entries first */ |
1264 | ip_vs_conn_flush(net); | 1264 | ip_vs_conn_flush(net); |
1265 | proc_net_remove(net, "ip_vs_conn"); | 1265 | proc_net_remove(net, "ip_vs_conn"); |
1266 | proc_net_remove(net, "ip_vs_conn_sync"); | 1266 | proc_net_remove(net, "ip_vs_conn_sync"); |
1267 | } | 1267 | } |
1268 | static struct pernet_operations ipvs_conn_ops = { | ||
1269 | .init = __ip_vs_conn_init, | ||
1270 | .exit = __ip_vs_conn_cleanup, | ||
1271 | }; | ||
1272 | 1268 | ||
1273 | int __init ip_vs_conn_init(void) | 1269 | int __init ip_vs_conn_init(void) |
1274 | { | 1270 | { |
1275 | int idx; | 1271 | int idx; |
1276 | int retc; | ||
1277 | 1272 | ||
1278 | /* Compute size and mask */ | 1273 | /* Compute size and mask */ |
1279 | ip_vs_conn_tab_size = 1 << ip_vs_conn_tab_bits; | 1274 | ip_vs_conn_tab_size = 1 << ip_vs_conn_tab_bits; |
@@ -1309,17 +1304,14 @@ int __init ip_vs_conn_init(void) | |||
1309 | rwlock_init(&__ip_vs_conntbl_lock_array[idx].l); | 1304 | rwlock_init(&__ip_vs_conntbl_lock_array[idx].l); |
1310 | } | 1305 | } |
1311 | 1306 | ||
1312 | retc = register_pernet_subsys(&ipvs_conn_ops); | ||
1313 | |||
1314 | /* calculate the random value for connection hash */ | 1307 | /* calculate the random value for connection hash */ |
1315 | get_random_bytes(&ip_vs_conn_rnd, sizeof(ip_vs_conn_rnd)); | 1308 | get_random_bytes(&ip_vs_conn_rnd, sizeof(ip_vs_conn_rnd)); |
1316 | 1309 | ||
1317 | return retc; | 1310 | return 0; |
1318 | } | 1311 | } |
1319 | 1312 | ||
1320 | void ip_vs_conn_cleanup(void) | 1313 | void ip_vs_conn_cleanup(void) |
1321 | { | 1314 | { |
1322 | unregister_pernet_subsys(&ipvs_conn_ops); | ||
1323 | /* Release the empty cache */ | 1315 | /* Release the empty cache */ |
1324 | kmem_cache_destroy(ip_vs_conn_cachep); | 1316 | kmem_cache_destroy(ip_vs_conn_cachep); |
1325 | vfree(ip_vs_conn_tab); | 1317 | vfree(ip_vs_conn_tab); |
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index 07accf6b2401..a74dae6c5dbc 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c | |||
@@ -1113,6 +1113,9 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af) | |||
1113 | return NF_ACCEPT; | 1113 | return NF_ACCEPT; |
1114 | 1114 | ||
1115 | net = skb_net(skb); | 1115 | net = skb_net(skb); |
1116 | if (!net_ipvs(net)->enable) | ||
1117 | return NF_ACCEPT; | ||
1118 | |||
1116 | ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); | 1119 | ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); |
1117 | #ifdef CONFIG_IP_VS_IPV6 | 1120 | #ifdef CONFIG_IP_VS_IPV6 |
1118 | if (af == AF_INET6) { | 1121 | if (af == AF_INET6) { |
@@ -1343,6 +1346,7 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum) | |||
1343 | return NF_ACCEPT; /* The packet looks wrong, ignore */ | 1346 | return NF_ACCEPT; /* The packet looks wrong, ignore */ |
1344 | 1347 | ||
1345 | net = skb_net(skb); | 1348 | net = skb_net(skb); |
1349 | |||
1346 | pd = ip_vs_proto_data_get(net, cih->protocol); | 1350 | pd = ip_vs_proto_data_get(net, cih->protocol); |
1347 | if (!pd) | 1351 | if (!pd) |
1348 | return NF_ACCEPT; | 1352 | return NF_ACCEPT; |
@@ -1529,6 +1533,11 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af) | |||
1529 | IP_VS_DBG_ADDR(af, &iph.daddr), hooknum); | 1533 | IP_VS_DBG_ADDR(af, &iph.daddr), hooknum); |
1530 | return NF_ACCEPT; | 1534 | return NF_ACCEPT; |
1531 | } | 1535 | } |
1536 | /* ipvs enabled in this netns ? */ | ||
1537 | net = skb_net(skb); | ||
1538 | if (!net_ipvs(net)->enable) | ||
1539 | return NF_ACCEPT; | ||
1540 | |||
1532 | ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); | 1541 | ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); |
1533 | 1542 | ||
1534 | /* Bad... Do not break raw sockets */ | 1543 | /* Bad... Do not break raw sockets */ |
@@ -1562,7 +1571,6 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af) | |||
1562 | ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); | 1571 | ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); |
1563 | } | 1572 | } |
1564 | 1573 | ||
1565 | net = skb_net(skb); | ||
1566 | /* Protocol supported? */ | 1574 | /* Protocol supported? */ |
1567 | pd = ip_vs_proto_data_get(net, iph.protocol); | 1575 | pd = ip_vs_proto_data_get(net, iph.protocol); |
1568 | if (unlikely(!pd)) | 1576 | if (unlikely(!pd)) |
@@ -1588,7 +1596,6 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af) | |||
1588 | } | 1596 | } |
1589 | 1597 | ||
1590 | IP_VS_DBG_PKT(11, af, pp, skb, 0, "Incoming packet"); | 1598 | IP_VS_DBG_PKT(11, af, pp, skb, 0, "Incoming packet"); |
1591 | net = skb_net(skb); | ||
1592 | ipvs = net_ipvs(net); | 1599 | ipvs = net_ipvs(net); |
1593 | /* Check the server status */ | 1600 | /* Check the server status */ |
1594 | if (cp->dest && !(cp->dest->flags & IP_VS_DEST_F_AVAILABLE)) { | 1601 | if (cp->dest && !(cp->dest->flags & IP_VS_DEST_F_AVAILABLE)) { |
@@ -1743,10 +1750,16 @@ ip_vs_forward_icmp(unsigned int hooknum, struct sk_buff *skb, | |||
1743 | int (*okfn)(struct sk_buff *)) | 1750 | int (*okfn)(struct sk_buff *)) |
1744 | { | 1751 | { |
1745 | int r; | 1752 | int r; |
1753 | struct net *net; | ||
1746 | 1754 | ||
1747 | if (ip_hdr(skb)->protocol != IPPROTO_ICMP) | 1755 | if (ip_hdr(skb)->protocol != IPPROTO_ICMP) |
1748 | return NF_ACCEPT; | 1756 | return NF_ACCEPT; |
1749 | 1757 | ||
1758 | /* ipvs enabled in this netns ? */ | ||
1759 | net = skb_net(skb); | ||
1760 | if (!net_ipvs(net)->enable) | ||
1761 | return NF_ACCEPT; | ||
1762 | |||
1750 | return ip_vs_in_icmp(skb, &r, hooknum); | 1763 | return ip_vs_in_icmp(skb, &r, hooknum); |
1751 | } | 1764 | } |
1752 | 1765 | ||
@@ -1757,10 +1770,16 @@ ip_vs_forward_icmp_v6(unsigned int hooknum, struct sk_buff *skb, | |||
1757 | int (*okfn)(struct sk_buff *)) | 1770 | int (*okfn)(struct sk_buff *)) |
1758 | { | 1771 | { |
1759 | int r; | 1772 | int r; |
1773 | struct net *net; | ||
1760 | 1774 | ||
1761 | if (ipv6_hdr(skb)->nexthdr != IPPROTO_ICMPV6) | 1775 | if (ipv6_hdr(skb)->nexthdr != IPPROTO_ICMPV6) |
1762 | return NF_ACCEPT; | 1776 | return NF_ACCEPT; |
1763 | 1777 | ||
1778 | /* ipvs enabled in this netns ? */ | ||
1779 | net = skb_net(skb); | ||
1780 | if (!net_ipvs(net)->enable) | ||
1781 | return NF_ACCEPT; | ||
1782 | |||
1764 | return ip_vs_in_icmp_v6(skb, &r, hooknum); | 1783 | return ip_vs_in_icmp_v6(skb, &r, hooknum); |
1765 | } | 1784 | } |
1766 | #endif | 1785 | #endif |
@@ -1884,19 +1903,70 @@ static int __net_init __ip_vs_init(struct net *net) | |||
1884 | pr_err("%s(): no memory.\n", __func__); | 1903 | pr_err("%s(): no memory.\n", __func__); |
1885 | return -ENOMEM; | 1904 | return -ENOMEM; |
1886 | } | 1905 | } |
1906 | /* Hold the beast until a service is registerd */ | ||
1907 | ipvs->enable = 0; | ||
1887 | ipvs->net = net; | 1908 | ipvs->net = net; |
1888 | /* Counters used for creating unique names */ | 1909 | /* Counters used for creating unique names */ |
1889 | ipvs->gen = atomic_read(&ipvs_netns_cnt); | 1910 | ipvs->gen = atomic_read(&ipvs_netns_cnt); |
1890 | atomic_inc(&ipvs_netns_cnt); | 1911 | atomic_inc(&ipvs_netns_cnt); |
1891 | net->ipvs = ipvs; | 1912 | net->ipvs = ipvs; |
1913 | |||
1914 | if (__ip_vs_estimator_init(net) < 0) | ||
1915 | goto estimator_fail; | ||
1916 | |||
1917 | if (__ip_vs_control_init(net) < 0) | ||
1918 | goto control_fail; | ||
1919 | |||
1920 | if (__ip_vs_protocol_init(net) < 0) | ||
1921 | goto protocol_fail; | ||
1922 | |||
1923 | if (__ip_vs_app_init(net) < 0) | ||
1924 | goto app_fail; | ||
1925 | |||
1926 | if (__ip_vs_conn_init(net) < 0) | ||
1927 | goto conn_fail; | ||
1928 | |||
1929 | if (__ip_vs_sync_init(net) < 0) | ||
1930 | goto sync_fail; | ||
1931 | |||
1892 | printk(KERN_INFO "IPVS: Creating netns size=%zu id=%d\n", | 1932 | printk(KERN_INFO "IPVS: Creating netns size=%zu id=%d\n", |
1893 | sizeof(struct netns_ipvs), ipvs->gen); | 1933 | sizeof(struct netns_ipvs), ipvs->gen); |
1894 | return 0; | 1934 | return 0; |
1935 | /* | ||
1936 | * Error handling | ||
1937 | */ | ||
1938 | |||
1939 | sync_fail: | ||
1940 | __ip_vs_conn_cleanup(net); | ||
1941 | conn_fail: | ||
1942 | __ip_vs_app_cleanup(net); | ||
1943 | app_fail: | ||
1944 | __ip_vs_protocol_cleanup(net); | ||
1945 | protocol_fail: | ||
1946 | __ip_vs_control_cleanup(net); | ||
1947 | control_fail: | ||
1948 | __ip_vs_estimator_cleanup(net); | ||
1949 | estimator_fail: | ||
1950 | return -ENOMEM; | ||
1895 | } | 1951 | } |
1896 | 1952 | ||
1897 | static void __net_exit __ip_vs_cleanup(struct net *net) | 1953 | static void __net_exit __ip_vs_cleanup(struct net *net) |
1898 | { | 1954 | { |
1899 | IP_VS_DBG(10, "ipvs netns %d released\n", net_ipvs(net)->gen); | 1955 | __ip_vs_service_cleanup(net); /* ip_vs_flush() with locks */ |
1956 | __ip_vs_conn_cleanup(net); | ||
1957 | __ip_vs_app_cleanup(net); | ||
1958 | __ip_vs_protocol_cleanup(net); | ||
1959 | __ip_vs_control_cleanup(net); | ||
1960 | __ip_vs_estimator_cleanup(net); | ||
1961 | IP_VS_DBG(2, "ipvs netns %d released\n", net_ipvs(net)->gen); | ||
1962 | } | ||
1963 | |||
1964 | static void __net_exit __ip_vs_dev_cleanup(struct net *net) | ||
1965 | { | ||
1966 | EnterFunction(2); | ||
1967 | net_ipvs(net)->enable = 0; /* Disable packet reception */ | ||
1968 | __ip_vs_sync_cleanup(net); | ||
1969 | LeaveFunction(2); | ||
1900 | } | 1970 | } |
1901 | 1971 | ||
1902 | static struct pernet_operations ipvs_core_ops = { | 1972 | static struct pernet_operations ipvs_core_ops = { |
@@ -1906,6 +1976,10 @@ static struct pernet_operations ipvs_core_ops = { | |||
1906 | .size = sizeof(struct netns_ipvs), | 1976 | .size = sizeof(struct netns_ipvs), |
1907 | }; | 1977 | }; |
1908 | 1978 | ||
1979 | static struct pernet_operations ipvs_core_dev_ops = { | ||
1980 | .exit = __ip_vs_dev_cleanup, | ||
1981 | }; | ||
1982 | |||
1909 | /* | 1983 | /* |
1910 | * Initialize IP Virtual Server | 1984 | * Initialize IP Virtual Server |
1911 | */ | 1985 | */ |
@@ -1913,10 +1987,6 @@ static int __init ip_vs_init(void) | |||
1913 | { | 1987 | { |
1914 | int ret; | 1988 | int ret; |
1915 | 1989 | ||
1916 | ret = register_pernet_subsys(&ipvs_core_ops); /* Alloc ip_vs struct */ | ||
1917 | if (ret < 0) | ||
1918 | return ret; | ||
1919 | |||
1920 | ip_vs_estimator_init(); | 1990 | ip_vs_estimator_init(); |
1921 | ret = ip_vs_control_init(); | 1991 | ret = ip_vs_control_init(); |
1922 | if (ret < 0) { | 1992 | if (ret < 0) { |
@@ -1944,15 +2014,28 @@ static int __init ip_vs_init(void) | |||
1944 | goto cleanup_conn; | 2014 | goto cleanup_conn; |
1945 | } | 2015 | } |
1946 | 2016 | ||
2017 | ret = register_pernet_subsys(&ipvs_core_ops); /* Alloc ip_vs struct */ | ||
2018 | if (ret < 0) | ||
2019 | goto cleanup_sync; | ||
2020 | |||
2021 | ret = register_pernet_device(&ipvs_core_dev_ops); | ||
2022 | if (ret < 0) | ||
2023 | goto cleanup_sub; | ||
2024 | |||
1947 | ret = nf_register_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops)); | 2025 | ret = nf_register_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops)); |
1948 | if (ret < 0) { | 2026 | if (ret < 0) { |
1949 | pr_err("can't register hooks.\n"); | 2027 | pr_err("can't register hooks.\n"); |
1950 | goto cleanup_sync; | 2028 | goto cleanup_dev; |
1951 | } | 2029 | } |
1952 | 2030 | ||
1953 | pr_info("ipvs loaded.\n"); | 2031 | pr_info("ipvs loaded.\n"); |
2032 | |||
1954 | return ret; | 2033 | return ret; |
1955 | 2034 | ||
2035 | cleanup_dev: | ||
2036 | unregister_pernet_device(&ipvs_core_dev_ops); | ||
2037 | cleanup_sub: | ||
2038 | unregister_pernet_subsys(&ipvs_core_ops); | ||
1956 | cleanup_sync: | 2039 | cleanup_sync: |
1957 | ip_vs_sync_cleanup(); | 2040 | ip_vs_sync_cleanup(); |
1958 | cleanup_conn: | 2041 | cleanup_conn: |
@@ -1964,20 +2047,20 @@ cleanup_sync: | |||
1964 | ip_vs_control_cleanup(); | 2047 | ip_vs_control_cleanup(); |
1965 | cleanup_estimator: | 2048 | cleanup_estimator: |
1966 | ip_vs_estimator_cleanup(); | 2049 | ip_vs_estimator_cleanup(); |
1967 | unregister_pernet_subsys(&ipvs_core_ops); /* free ip_vs struct */ | ||
1968 | return ret; | 2050 | return ret; |
1969 | } | 2051 | } |
1970 | 2052 | ||
1971 | static void __exit ip_vs_cleanup(void) | 2053 | static void __exit ip_vs_cleanup(void) |
1972 | { | 2054 | { |
1973 | nf_unregister_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops)); | 2055 | nf_unregister_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops)); |
2056 | unregister_pernet_device(&ipvs_core_dev_ops); | ||
2057 | unregister_pernet_subsys(&ipvs_core_ops); /* free ip_vs struct */ | ||
1974 | ip_vs_sync_cleanup(); | 2058 | ip_vs_sync_cleanup(); |
1975 | ip_vs_conn_cleanup(); | 2059 | ip_vs_conn_cleanup(); |
1976 | ip_vs_app_cleanup(); | 2060 | ip_vs_app_cleanup(); |
1977 | ip_vs_protocol_cleanup(); | 2061 | ip_vs_protocol_cleanup(); |
1978 | ip_vs_control_cleanup(); | 2062 | ip_vs_control_cleanup(); |
1979 | ip_vs_estimator_cleanup(); | 2063 | ip_vs_estimator_cleanup(); |
1980 | unregister_pernet_subsys(&ipvs_core_ops); /* free ip_vs struct */ | ||
1981 | pr_info("ipvs unloaded.\n"); | 2064 | pr_info("ipvs unloaded.\n"); |
1982 | } | 2065 | } |
1983 | 2066 | ||
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index ae47090bf45f..37890f228b19 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c | |||
@@ -69,6 +69,11 @@ int ip_vs_get_debug_level(void) | |||
69 | } | 69 | } |
70 | #endif | 70 | #endif |
71 | 71 | ||
72 | |||
73 | /* Protos */ | ||
74 | static void __ip_vs_del_service(struct ip_vs_service *svc); | ||
75 | |||
76 | |||
72 | #ifdef CONFIG_IP_VS_IPV6 | 77 | #ifdef CONFIG_IP_VS_IPV6 |
73 | /* Taken from rt6_fill_node() in net/ipv6/route.c, is there a better way? */ | 78 | /* Taken from rt6_fill_node() in net/ipv6/route.c, is there a better way? */ |
74 | static int __ip_vs_addr_is_local_v6(struct net *net, | 79 | static int __ip_vs_addr_is_local_v6(struct net *net, |
@@ -1214,6 +1219,8 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u, | |||
1214 | write_unlock_bh(&__ip_vs_svc_lock); | 1219 | write_unlock_bh(&__ip_vs_svc_lock); |
1215 | 1220 | ||
1216 | *svc_p = svc; | 1221 | *svc_p = svc; |
1222 | /* Now there is a service - full throttle */ | ||
1223 | ipvs->enable = 1; | ||
1217 | return 0; | 1224 | return 0; |
1218 | 1225 | ||
1219 | 1226 | ||
@@ -1472,6 +1479,84 @@ static int ip_vs_flush(struct net *net) | |||
1472 | return 0; | 1479 | return 0; |
1473 | } | 1480 | } |
1474 | 1481 | ||
1482 | /* | ||
1483 | * Delete service by {netns} in the service table. | ||
1484 | * Called by __ip_vs_cleanup() | ||
1485 | */ | ||
1486 | void __ip_vs_service_cleanup(struct net *net) | ||
1487 | { | ||
1488 | EnterFunction(2); | ||
1489 | /* Check for "full" addressed entries */ | ||
1490 | mutex_lock(&__ip_vs_mutex); | ||
1491 | ip_vs_flush(net); | ||
1492 | mutex_unlock(&__ip_vs_mutex); | ||
1493 | LeaveFunction(2); | ||
1494 | } | ||
1495 | /* | ||
1496 | * Release dst hold by dst_cache | ||
1497 | */ | ||
1498 | static inline void | ||
1499 | __ip_vs_dev_reset(struct ip_vs_dest *dest, struct net_device *dev) | ||
1500 | { | ||
1501 | spin_lock_bh(&dest->dst_lock); | ||
1502 | if (dest->dst_cache && dest->dst_cache->dev == dev) { | ||
1503 | IP_VS_DBG_BUF(3, "Reset dev:%s dest %s:%u ,dest->refcnt=%d\n", | ||
1504 | dev->name, | ||
1505 | IP_VS_DBG_ADDR(dest->af, &dest->addr), | ||
1506 | ntohs(dest->port), | ||
1507 | atomic_read(&dest->refcnt)); | ||
1508 | ip_vs_dst_reset(dest); | ||
1509 | } | ||
1510 | spin_unlock_bh(&dest->dst_lock); | ||
1511 | |||
1512 | } | ||
1513 | /* | ||
1514 | * Netdev event receiver | ||
1515 | * Currently only NETDEV_UNREGISTER is handled, i.e. if we hold a reference to | ||
1516 | * a device that is "unregister" it must be released. | ||
1517 | */ | ||
1518 | static int ip_vs_dst_event(struct notifier_block *this, unsigned long event, | ||
1519 | void *ptr) | ||
1520 | { | ||
1521 | struct net_device *dev = ptr; | ||
1522 | struct net *net = dev_net(dev); | ||
1523 | struct ip_vs_service *svc; | ||
1524 | struct ip_vs_dest *dest; | ||
1525 | unsigned int idx; | ||
1526 | |||
1527 | if (event != NETDEV_UNREGISTER) | ||
1528 | return NOTIFY_DONE; | ||
1529 | IP_VS_DBG(3, "%s() dev=%s\n", __func__, dev->name); | ||
1530 | EnterFunction(2); | ||
1531 | mutex_lock(&__ip_vs_mutex); | ||
1532 | for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) { | ||
1533 | list_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) { | ||
1534 | if (net_eq(svc->net, net)) { | ||
1535 | list_for_each_entry(dest, &svc->destinations, | ||
1536 | n_list) { | ||
1537 | __ip_vs_dev_reset(dest, dev); | ||
1538 | } | ||
1539 | } | ||
1540 | } | ||
1541 | |||
1542 | list_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) { | ||
1543 | if (net_eq(svc->net, net)) { | ||
1544 | list_for_each_entry(dest, &svc->destinations, | ||
1545 | n_list) { | ||
1546 | __ip_vs_dev_reset(dest, dev); | ||
1547 | } | ||
1548 | } | ||
1549 | |||
1550 | } | ||
1551 | } | ||
1552 | |||
1553 | list_for_each_entry(dest, &net_ipvs(net)->dest_trash, n_list) { | ||
1554 | __ip_vs_dev_reset(dest, dev); | ||
1555 | } | ||
1556 | mutex_unlock(&__ip_vs_mutex); | ||
1557 | LeaveFunction(2); | ||
1558 | return NOTIFY_DONE; | ||
1559 | } | ||
1475 | 1560 | ||
1476 | /* | 1561 | /* |
1477 | * Zero counters in a service or all services | 1562 | * Zero counters in a service or all services |
@@ -1981,7 +2066,7 @@ static const struct file_operations ip_vs_info_fops = { | |||
1981 | .open = ip_vs_info_open, | 2066 | .open = ip_vs_info_open, |
1982 | .read = seq_read, | 2067 | .read = seq_read, |
1983 | .llseek = seq_lseek, | 2068 | .llseek = seq_lseek, |
1984 | .release = seq_release_private, | 2069 | .release = seq_release_net, |
1985 | }; | 2070 | }; |
1986 | 2071 | ||
1987 | #endif | 2072 | #endif |
@@ -2024,7 +2109,7 @@ static const struct file_operations ip_vs_stats_fops = { | |||
2024 | .open = ip_vs_stats_seq_open, | 2109 | .open = ip_vs_stats_seq_open, |
2025 | .read = seq_read, | 2110 | .read = seq_read, |
2026 | .llseek = seq_lseek, | 2111 | .llseek = seq_lseek, |
2027 | .release = single_release, | 2112 | .release = single_release_net, |
2028 | }; | 2113 | }; |
2029 | 2114 | ||
2030 | static int ip_vs_stats_percpu_show(struct seq_file *seq, void *v) | 2115 | static int ip_vs_stats_percpu_show(struct seq_file *seq, void *v) |
@@ -2093,7 +2178,7 @@ static const struct file_operations ip_vs_stats_percpu_fops = { | |||
2093 | .open = ip_vs_stats_percpu_seq_open, | 2178 | .open = ip_vs_stats_percpu_seq_open, |
2094 | .read = seq_read, | 2179 | .read = seq_read, |
2095 | .llseek = seq_lseek, | 2180 | .llseek = seq_lseek, |
2096 | .release = single_release, | 2181 | .release = single_release_net, |
2097 | }; | 2182 | }; |
2098 | #endif | 2183 | #endif |
2099 | 2184 | ||
@@ -3588,6 +3673,10 @@ void __net_init __ip_vs_control_cleanup_sysctl(struct net *net) { } | |||
3588 | 3673 | ||
3589 | #endif | 3674 | #endif |
3590 | 3675 | ||
3676 | static struct notifier_block ip_vs_dst_notifier = { | ||
3677 | .notifier_call = ip_vs_dst_event, | ||
3678 | }; | ||
3679 | |||
3591 | int __net_init __ip_vs_control_init(struct net *net) | 3680 | int __net_init __ip_vs_control_init(struct net *net) |
3592 | { | 3681 | { |
3593 | int idx; | 3682 | int idx; |
@@ -3626,7 +3715,7 @@ err: | |||
3626 | return -ENOMEM; | 3715 | return -ENOMEM; |
3627 | } | 3716 | } |
3628 | 3717 | ||
3629 | static void __net_exit __ip_vs_control_cleanup(struct net *net) | 3718 | void __net_exit __ip_vs_control_cleanup(struct net *net) |
3630 | { | 3719 | { |
3631 | struct netns_ipvs *ipvs = net_ipvs(net); | 3720 | struct netns_ipvs *ipvs = net_ipvs(net); |
3632 | 3721 | ||
@@ -3639,11 +3728,6 @@ static void __net_exit __ip_vs_control_cleanup(struct net *net) | |||
3639 | free_percpu(ipvs->tot_stats.cpustats); | 3728 | free_percpu(ipvs->tot_stats.cpustats); |
3640 | } | 3729 | } |
3641 | 3730 | ||
3642 | static struct pernet_operations ipvs_control_ops = { | ||
3643 | .init = __ip_vs_control_init, | ||
3644 | .exit = __ip_vs_control_cleanup, | ||
3645 | }; | ||
3646 | |||
3647 | int __init ip_vs_control_init(void) | 3731 | int __init ip_vs_control_init(void) |
3648 | { | 3732 | { |
3649 | int idx; | 3733 | int idx; |
@@ -3657,33 +3741,32 @@ int __init ip_vs_control_init(void) | |||
3657 | INIT_LIST_HEAD(&ip_vs_svc_fwm_table[idx]); | 3741 | INIT_LIST_HEAD(&ip_vs_svc_fwm_table[idx]); |
3658 | } | 3742 | } |
3659 | 3743 | ||
3660 | ret = register_pernet_subsys(&ipvs_control_ops); | ||
3661 | if (ret) { | ||
3662 | pr_err("cannot register namespace.\n"); | ||
3663 | goto err; | ||
3664 | } | ||
3665 | |||
3666 | smp_wmb(); /* Do we really need it now ? */ | 3744 | smp_wmb(); /* Do we really need it now ? */ |
3667 | 3745 | ||
3668 | ret = nf_register_sockopt(&ip_vs_sockopts); | 3746 | ret = nf_register_sockopt(&ip_vs_sockopts); |
3669 | if (ret) { | 3747 | if (ret) { |
3670 | pr_err("cannot register sockopt.\n"); | 3748 | pr_err("cannot register sockopt.\n"); |
3671 | goto err_net; | 3749 | goto err_sock; |
3672 | } | 3750 | } |
3673 | 3751 | ||
3674 | ret = ip_vs_genl_register(); | 3752 | ret = ip_vs_genl_register(); |
3675 | if (ret) { | 3753 | if (ret) { |
3676 | pr_err("cannot register Generic Netlink interface.\n"); | 3754 | pr_err("cannot register Generic Netlink interface.\n"); |
3677 | nf_unregister_sockopt(&ip_vs_sockopts); | 3755 | goto err_genl; |
3678 | goto err_net; | ||
3679 | } | 3756 | } |
3680 | 3757 | ||
3758 | ret = register_netdevice_notifier(&ip_vs_dst_notifier); | ||
3759 | if (ret < 0) | ||
3760 | goto err_notf; | ||
3761 | |||
3681 | LeaveFunction(2); | 3762 | LeaveFunction(2); |
3682 | return 0; | 3763 | return 0; |
3683 | 3764 | ||
3684 | err_net: | 3765 | err_notf: |
3685 | unregister_pernet_subsys(&ipvs_control_ops); | 3766 | ip_vs_genl_unregister(); |
3686 | err: | 3767 | err_genl: |
3768 | nf_unregister_sockopt(&ip_vs_sockopts); | ||
3769 | err_sock: | ||
3687 | return ret; | 3770 | return ret; |
3688 | } | 3771 | } |
3689 | 3772 | ||
@@ -3691,7 +3774,6 @@ err: | |||
3691 | void ip_vs_control_cleanup(void) | 3774 | void ip_vs_control_cleanup(void) |
3692 | { | 3775 | { |
3693 | EnterFunction(2); | 3776 | EnterFunction(2); |
3694 | unregister_pernet_subsys(&ipvs_control_ops); | ||
3695 | ip_vs_genl_unregister(); | 3777 | ip_vs_genl_unregister(); |
3696 | nf_unregister_sockopt(&ip_vs_sockopts); | 3778 | nf_unregister_sockopt(&ip_vs_sockopts); |
3697 | LeaveFunction(2); | 3779 | LeaveFunction(2); |
diff --git a/net/netfilter/ipvs/ip_vs_est.c b/net/netfilter/ipvs/ip_vs_est.c index 8c8766ca56ad..508cce98777c 100644 --- a/net/netfilter/ipvs/ip_vs_est.c +++ b/net/netfilter/ipvs/ip_vs_est.c | |||
@@ -192,7 +192,7 @@ void ip_vs_read_estimator(struct ip_vs_stats_user *dst, | |||
192 | dst->outbps = (e->outbps + 0xF) >> 5; | 192 | dst->outbps = (e->outbps + 0xF) >> 5; |
193 | } | 193 | } |
194 | 194 | ||
195 | static int __net_init __ip_vs_estimator_init(struct net *net) | 195 | int __net_init __ip_vs_estimator_init(struct net *net) |
196 | { | 196 | { |
197 | struct netns_ipvs *ipvs = net_ipvs(net); | 197 | struct netns_ipvs *ipvs = net_ipvs(net); |
198 | 198 | ||
@@ -203,24 +203,16 @@ static int __net_init __ip_vs_estimator_init(struct net *net) | |||
203 | return 0; | 203 | return 0; |
204 | } | 204 | } |
205 | 205 | ||
206 | static void __net_exit __ip_vs_estimator_exit(struct net *net) | 206 | void __net_exit __ip_vs_estimator_cleanup(struct net *net) |
207 | { | 207 | { |
208 | del_timer_sync(&net_ipvs(net)->est_timer); | 208 | del_timer_sync(&net_ipvs(net)->est_timer); |
209 | } | 209 | } |
210 | static struct pernet_operations ip_vs_app_ops = { | ||
211 | .init = __ip_vs_estimator_init, | ||
212 | .exit = __ip_vs_estimator_exit, | ||
213 | }; | ||
214 | 210 | ||
215 | int __init ip_vs_estimator_init(void) | 211 | int __init ip_vs_estimator_init(void) |
216 | { | 212 | { |
217 | int rv; | 213 | return 0; |
218 | |||
219 | rv = register_pernet_subsys(&ip_vs_app_ops); | ||
220 | return rv; | ||
221 | } | 214 | } |
222 | 215 | ||
223 | void ip_vs_estimator_cleanup(void) | 216 | void ip_vs_estimator_cleanup(void) |
224 | { | 217 | { |
225 | unregister_pernet_subsys(&ip_vs_app_ops); | ||
226 | } | 218 | } |
diff --git a/net/netfilter/ipvs/ip_vs_proto.c b/net/netfilter/ipvs/ip_vs_proto.c index 17484a4416ef..eb86028536fc 100644 --- a/net/netfilter/ipvs/ip_vs_proto.c +++ b/net/netfilter/ipvs/ip_vs_proto.c | |||
@@ -316,7 +316,7 @@ ip_vs_tcpudp_debug_packet(int af, struct ip_vs_protocol *pp, | |||
316 | /* | 316 | /* |
317 | * per network name-space init | 317 | * per network name-space init |
318 | */ | 318 | */ |
319 | static int __net_init __ip_vs_protocol_init(struct net *net) | 319 | int __net_init __ip_vs_protocol_init(struct net *net) |
320 | { | 320 | { |
321 | #ifdef CONFIG_IP_VS_PROTO_TCP | 321 | #ifdef CONFIG_IP_VS_PROTO_TCP |
322 | register_ip_vs_proto_netns(net, &ip_vs_protocol_tcp); | 322 | register_ip_vs_proto_netns(net, &ip_vs_protocol_tcp); |
@@ -336,7 +336,7 @@ static int __net_init __ip_vs_protocol_init(struct net *net) | |||
336 | return 0; | 336 | return 0; |
337 | } | 337 | } |
338 | 338 | ||
339 | static void __net_exit __ip_vs_protocol_cleanup(struct net *net) | 339 | void __net_exit __ip_vs_protocol_cleanup(struct net *net) |
340 | { | 340 | { |
341 | struct netns_ipvs *ipvs = net_ipvs(net); | 341 | struct netns_ipvs *ipvs = net_ipvs(net); |
342 | struct ip_vs_proto_data *pd; | 342 | struct ip_vs_proto_data *pd; |
@@ -349,11 +349,6 @@ static void __net_exit __ip_vs_protocol_cleanup(struct net *net) | |||
349 | } | 349 | } |
350 | } | 350 | } |
351 | 351 | ||
352 | static struct pernet_operations ipvs_proto_ops = { | ||
353 | .init = __ip_vs_protocol_init, | ||
354 | .exit = __ip_vs_protocol_cleanup, | ||
355 | }; | ||
356 | |||
357 | int __init ip_vs_protocol_init(void) | 352 | int __init ip_vs_protocol_init(void) |
358 | { | 353 | { |
359 | char protocols[64]; | 354 | char protocols[64]; |
@@ -382,7 +377,6 @@ int __init ip_vs_protocol_init(void) | |||
382 | REGISTER_PROTOCOL(&ip_vs_protocol_esp); | 377 | REGISTER_PROTOCOL(&ip_vs_protocol_esp); |
383 | #endif | 378 | #endif |
384 | pr_info("Registered protocols (%s)\n", &protocols[2]); | 379 | pr_info("Registered protocols (%s)\n", &protocols[2]); |
385 | return register_pernet_subsys(&ipvs_proto_ops); | ||
386 | 380 | ||
387 | return 0; | 381 | return 0; |
388 | } | 382 | } |
@@ -393,7 +387,6 @@ void ip_vs_protocol_cleanup(void) | |||
393 | struct ip_vs_protocol *pp; | 387 | struct ip_vs_protocol *pp; |
394 | int i; | 388 | int i; |
395 | 389 | ||
396 | unregister_pernet_subsys(&ipvs_proto_ops); | ||
397 | /* unregister all the ipvs protocols */ | 390 | /* unregister all the ipvs protocols */ |
398 | for (i = 0; i < IP_VS_PROTO_TAB_SIZE; i++) { | 391 | for (i = 0; i < IP_VS_PROTO_TAB_SIZE; i++) { |
399 | while ((pp = ip_vs_proto_table[i]) != NULL) | 392 | while ((pp = ip_vs_proto_table[i]) != NULL) |
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c index 3e7961e85e9c..e292e5bddc70 100644 --- a/net/netfilter/ipvs/ip_vs_sync.c +++ b/net/netfilter/ipvs/ip_vs_sync.c | |||
@@ -1303,13 +1303,18 @@ static struct socket *make_send_sock(struct net *net) | |||
1303 | struct socket *sock; | 1303 | struct socket *sock; |
1304 | int result; | 1304 | int result; |
1305 | 1305 | ||
1306 | /* First create a socket */ | 1306 | /* First create a socket move it to right name space later */ |
1307 | result = __sock_create(net, PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock, 1); | 1307 | result = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock); |
1308 | if (result < 0) { | 1308 | if (result < 0) { |
1309 | pr_err("Error during creation of socket; terminating\n"); | 1309 | pr_err("Error during creation of socket; terminating\n"); |
1310 | return ERR_PTR(result); | 1310 | return ERR_PTR(result); |
1311 | } | 1311 | } |
1312 | 1312 | /* | |
1313 | * Kernel sockets that are a part of a namespace, should not | ||
1314 | * hold a reference to a namespace in order to allow to stop it. | ||
1315 | * After sk_change_net should be released using sk_release_kernel. | ||
1316 | */ | ||
1317 | sk_change_net(sock->sk, net); | ||
1313 | result = set_mcast_if(sock->sk, ipvs->master_mcast_ifn); | 1318 | result = set_mcast_if(sock->sk, ipvs->master_mcast_ifn); |
1314 | if (result < 0) { | 1319 | if (result < 0) { |
1315 | pr_err("Error setting outbound mcast interface\n"); | 1320 | pr_err("Error setting outbound mcast interface\n"); |
@@ -1334,8 +1339,8 @@ static struct socket *make_send_sock(struct net *net) | |||
1334 | 1339 | ||
1335 | return sock; | 1340 | return sock; |
1336 | 1341 | ||
1337 | error: | 1342 | error: |
1338 | sock_release(sock); | 1343 | sk_release_kernel(sock->sk); |
1339 | return ERR_PTR(result); | 1344 | return ERR_PTR(result); |
1340 | } | 1345 | } |
1341 | 1346 | ||
@@ -1350,12 +1355,17 @@ static struct socket *make_receive_sock(struct net *net) | |||
1350 | int result; | 1355 | int result; |
1351 | 1356 | ||
1352 | /* First create a socket */ | 1357 | /* First create a socket */ |
1353 | result = __sock_create(net, PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock, 1); | 1358 | result = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock); |
1354 | if (result < 0) { | 1359 | if (result < 0) { |
1355 | pr_err("Error during creation of socket; terminating\n"); | 1360 | pr_err("Error during creation of socket; terminating\n"); |
1356 | return ERR_PTR(result); | 1361 | return ERR_PTR(result); |
1357 | } | 1362 | } |
1358 | 1363 | /* | |
1364 | * Kernel sockets that are a part of a namespace, should not | ||
1365 | * hold a reference to a namespace in order to allow to stop it. | ||
1366 | * After sk_change_net should be released using sk_release_kernel. | ||
1367 | */ | ||
1368 | sk_change_net(sock->sk, net); | ||
1359 | /* it is equivalent to the REUSEADDR option in user-space */ | 1369 | /* it is equivalent to the REUSEADDR option in user-space */ |
1360 | sock->sk->sk_reuse = 1; | 1370 | sock->sk->sk_reuse = 1; |
1361 | 1371 | ||
@@ -1377,8 +1387,8 @@ static struct socket *make_receive_sock(struct net *net) | |||
1377 | 1387 | ||
1378 | return sock; | 1388 | return sock; |
1379 | 1389 | ||
1380 | error: | 1390 | error: |
1381 | sock_release(sock); | 1391 | sk_release_kernel(sock->sk); |
1382 | return ERR_PTR(result); | 1392 | return ERR_PTR(result); |
1383 | } | 1393 | } |
1384 | 1394 | ||
@@ -1473,7 +1483,7 @@ static int sync_thread_master(void *data) | |||
1473 | ip_vs_sync_buff_release(sb); | 1483 | ip_vs_sync_buff_release(sb); |
1474 | 1484 | ||
1475 | /* release the sending multicast socket */ | 1485 | /* release the sending multicast socket */ |
1476 | sock_release(tinfo->sock); | 1486 | sk_release_kernel(tinfo->sock->sk); |
1477 | kfree(tinfo); | 1487 | kfree(tinfo); |
1478 | 1488 | ||
1479 | return 0; | 1489 | return 0; |
@@ -1513,7 +1523,7 @@ static int sync_thread_backup(void *data) | |||
1513 | } | 1523 | } |
1514 | 1524 | ||
1515 | /* release the sending multicast socket */ | 1525 | /* release the sending multicast socket */ |
1516 | sock_release(tinfo->sock); | 1526 | sk_release_kernel(tinfo->sock->sk); |
1517 | kfree(tinfo->buf); | 1527 | kfree(tinfo->buf); |
1518 | kfree(tinfo); | 1528 | kfree(tinfo); |
1519 | 1529 | ||
@@ -1601,7 +1611,7 @@ outtinfo: | |||
1601 | outbuf: | 1611 | outbuf: |
1602 | kfree(buf); | 1612 | kfree(buf); |
1603 | outsocket: | 1613 | outsocket: |
1604 | sock_release(sock); | 1614 | sk_release_kernel(sock->sk); |
1605 | out: | 1615 | out: |
1606 | return result; | 1616 | return result; |
1607 | } | 1617 | } |
@@ -1610,6 +1620,7 @@ out: | |||
1610 | int stop_sync_thread(struct net *net, int state) | 1620 | int stop_sync_thread(struct net *net, int state) |
1611 | { | 1621 | { |
1612 | struct netns_ipvs *ipvs = net_ipvs(net); | 1622 | struct netns_ipvs *ipvs = net_ipvs(net); |
1623 | int retc = -EINVAL; | ||
1613 | 1624 | ||
1614 | IP_VS_DBG(7, "%s(): pid %d\n", __func__, task_pid_nr(current)); | 1625 | IP_VS_DBG(7, "%s(): pid %d\n", __func__, task_pid_nr(current)); |
1615 | 1626 | ||
@@ -1629,7 +1640,7 @@ int stop_sync_thread(struct net *net, int state) | |||
1629 | spin_lock_bh(&ipvs->sync_lock); | 1640 | spin_lock_bh(&ipvs->sync_lock); |
1630 | ipvs->sync_state &= ~IP_VS_STATE_MASTER; | 1641 | ipvs->sync_state &= ~IP_VS_STATE_MASTER; |
1631 | spin_unlock_bh(&ipvs->sync_lock); | 1642 | spin_unlock_bh(&ipvs->sync_lock); |
1632 | kthread_stop(ipvs->master_thread); | 1643 | retc = kthread_stop(ipvs->master_thread); |
1633 | ipvs->master_thread = NULL; | 1644 | ipvs->master_thread = NULL; |
1634 | } else if (state == IP_VS_STATE_BACKUP) { | 1645 | } else if (state == IP_VS_STATE_BACKUP) { |
1635 | if (!ipvs->backup_thread) | 1646 | if (!ipvs->backup_thread) |
@@ -1639,22 +1650,20 @@ int stop_sync_thread(struct net *net, int state) | |||
1639 | task_pid_nr(ipvs->backup_thread)); | 1650 | task_pid_nr(ipvs->backup_thread)); |
1640 | 1651 | ||
1641 | ipvs->sync_state &= ~IP_VS_STATE_BACKUP; | 1652 | ipvs->sync_state &= ~IP_VS_STATE_BACKUP; |
1642 | kthread_stop(ipvs->backup_thread); | 1653 | retc = kthread_stop(ipvs->backup_thread); |
1643 | ipvs->backup_thread = NULL; | 1654 | ipvs->backup_thread = NULL; |
1644 | } else { | ||
1645 | return -EINVAL; | ||
1646 | } | 1655 | } |
1647 | 1656 | ||
1648 | /* decrease the module use count */ | 1657 | /* decrease the module use count */ |
1649 | ip_vs_use_count_dec(); | 1658 | ip_vs_use_count_dec(); |
1650 | 1659 | ||
1651 | return 0; | 1660 | return retc; |
1652 | } | 1661 | } |
1653 | 1662 | ||
1654 | /* | 1663 | /* |
1655 | * Initialize data struct for each netns | 1664 | * Initialize data struct for each netns |
1656 | */ | 1665 | */ |
1657 | static int __net_init __ip_vs_sync_init(struct net *net) | 1666 | int __net_init __ip_vs_sync_init(struct net *net) |
1658 | { | 1667 | { |
1659 | struct netns_ipvs *ipvs = net_ipvs(net); | 1668 | struct netns_ipvs *ipvs = net_ipvs(net); |
1660 | 1669 | ||
@@ -1668,24 +1677,24 @@ static int __net_init __ip_vs_sync_init(struct net *net) | |||
1668 | return 0; | 1677 | return 0; |
1669 | } | 1678 | } |
1670 | 1679 | ||
1671 | static void __ip_vs_sync_cleanup(struct net *net) | 1680 | void __ip_vs_sync_cleanup(struct net *net) |
1672 | { | 1681 | { |
1673 | stop_sync_thread(net, IP_VS_STATE_MASTER); | 1682 | int retc; |
1674 | stop_sync_thread(net, IP_VS_STATE_BACKUP); | ||
1675 | } | ||
1676 | 1683 | ||
1677 | static struct pernet_operations ipvs_sync_ops = { | 1684 | retc = stop_sync_thread(net, IP_VS_STATE_MASTER); |
1678 | .init = __ip_vs_sync_init, | 1685 | if (retc && retc != -ESRCH) |
1679 | .exit = __ip_vs_sync_cleanup, | 1686 | pr_err("Failed to stop Master Daemon\n"); |
1680 | }; | ||
1681 | 1687 | ||
1688 | retc = stop_sync_thread(net, IP_VS_STATE_BACKUP); | ||
1689 | if (retc && retc != -ESRCH) | ||
1690 | pr_err("Failed to stop Backup Daemon\n"); | ||
1691 | } | ||
1682 | 1692 | ||
1683 | int __init ip_vs_sync_init(void) | 1693 | int __init ip_vs_sync_init(void) |
1684 | { | 1694 | { |
1685 | return register_pernet_subsys(&ipvs_sync_ops); | 1695 | return 0; |
1686 | } | 1696 | } |
1687 | 1697 | ||
1688 | void ip_vs_sync_cleanup(void) | 1698 | void ip_vs_sync_cleanup(void) |
1689 | { | 1699 | { |
1690 | unregister_pernet_subsys(&ipvs_sync_ops); | ||
1691 | } | 1700 | } |
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 30bf8a167fc8..482e90c61850 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c | |||
@@ -1334,6 +1334,7 @@ ctnetlink_create_conntrack(struct net *net, u16 zone, | |||
1334 | struct nf_conn *ct; | 1334 | struct nf_conn *ct; |
1335 | int err = -EINVAL; | 1335 | int err = -EINVAL; |
1336 | struct nf_conntrack_helper *helper; | 1336 | struct nf_conntrack_helper *helper; |
1337 | struct nf_conn_tstamp *tstamp; | ||
1337 | 1338 | ||
1338 | ct = nf_conntrack_alloc(net, zone, otuple, rtuple, GFP_ATOMIC); | 1339 | ct = nf_conntrack_alloc(net, zone, otuple, rtuple, GFP_ATOMIC); |
1339 | if (IS_ERR(ct)) | 1340 | if (IS_ERR(ct)) |
@@ -1451,6 +1452,9 @@ ctnetlink_create_conntrack(struct net *net, u16 zone, | |||
1451 | __set_bit(IPS_EXPECTED_BIT, &ct->status); | 1452 | __set_bit(IPS_EXPECTED_BIT, &ct->status); |
1452 | ct->master = master_ct; | 1453 | ct->master = master_ct; |
1453 | } | 1454 | } |
1455 | tstamp = nf_conn_tstamp_find(ct); | ||
1456 | if (tstamp) | ||
1457 | tstamp->start = ktime_to_ns(ktime_get_real()); | ||
1454 | 1458 | ||
1455 | add_timer(&ct->timeout); | 1459 | add_timer(&ct->timeout); |
1456 | nf_conntrack_hash_insert(ct); | 1460 | nf_conntrack_hash_insert(ct); |
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index a9adf4c6b299..8a025a585d2f 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c | |||
@@ -455,6 +455,7 @@ void xt_compat_flush_offsets(u_int8_t af) | |||
455 | vfree(xt[af].compat_tab); | 455 | vfree(xt[af].compat_tab); |
456 | xt[af].compat_tab = NULL; | 456 | xt[af].compat_tab = NULL; |
457 | xt[af].number = 0; | 457 | xt[af].number = 0; |
458 | xt[af].cur = 0; | ||
458 | } | 459 | } |
459 | } | 460 | } |
460 | EXPORT_SYMBOL_GPL(xt_compat_flush_offsets); | 461 | EXPORT_SYMBOL_GPL(xt_compat_flush_offsets); |
@@ -473,8 +474,7 @@ int xt_compat_calc_jump(u_int8_t af, unsigned int offset) | |||
473 | else | 474 | else |
474 | return mid ? tmp[mid - 1].delta : 0; | 475 | return mid ? tmp[mid - 1].delta : 0; |
475 | } | 476 | } |
476 | WARN_ON_ONCE(1); | 477 | return left ? tmp[left - 1].delta : 0; |
477 | return 0; | ||
478 | } | 478 | } |
479 | EXPORT_SYMBOL_GPL(xt_compat_calc_jump); | 479 | EXPORT_SYMBOL_GPL(xt_compat_calc_jump); |
480 | 480 | ||
diff --git a/net/netfilter/xt_DSCP.c b/net/netfilter/xt_DSCP.c index 0a229191e55b..ae8271652efa 100644 --- a/net/netfilter/xt_DSCP.c +++ b/net/netfilter/xt_DSCP.c | |||
@@ -99,7 +99,7 @@ tos_tg6(struct sk_buff *skb, const struct xt_action_param *par) | |||
99 | u_int8_t orig, nv; | 99 | u_int8_t orig, nv; |
100 | 100 | ||
101 | orig = ipv6_get_dsfield(iph); | 101 | orig = ipv6_get_dsfield(iph); |
102 | nv = (orig & info->tos_mask) ^ info->tos_value; | 102 | nv = (orig & ~info->tos_mask) ^ info->tos_value; |
103 | 103 | ||
104 | if (orig != nv) { | 104 | if (orig != nv) { |
105 | if (!skb_make_writable(skb, sizeof(struct iphdr))) | 105 | if (!skb_make_writable(skb, sizeof(struct iphdr))) |
diff --git a/net/netfilter/xt_conntrack.c b/net/netfilter/xt_conntrack.c index 481a86fdc409..61805d7b38aa 100644 --- a/net/netfilter/xt_conntrack.c +++ b/net/netfilter/xt_conntrack.c | |||
@@ -272,11 +272,6 @@ static int conntrack_mt_check(const struct xt_mtchk_param *par) | |||
272 | { | 272 | { |
273 | int ret; | 273 | int ret; |
274 | 274 | ||
275 | if (strcmp(par->table, "raw") == 0) { | ||
276 | pr_info("state is undetermined at the time of raw table\n"); | ||
277 | return -EINVAL; | ||
278 | } | ||
279 | |||
280 | ret = nf_ct_l3proto_try_module_get(par->family); | 275 | ret = nf_ct_l3proto_try_module_get(par->family); |
281 | if (ret < 0) | 276 | if (ret < 0) |
282 | pr_info("cannot load conntrack support for proto=%u\n", | 277 | pr_info("cannot load conntrack support for proto=%u\n", |
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 15792d8b6272..b4d745ea8ee1 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c | |||
@@ -1406,6 +1406,7 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy, | |||
1406 | struct net *net = xp_net(policy); | 1406 | struct net *net = xp_net(policy); |
1407 | unsigned long now = jiffies; | 1407 | unsigned long now = jiffies; |
1408 | struct net_device *dev; | 1408 | struct net_device *dev; |
1409 | struct xfrm_mode *inner_mode; | ||
1409 | struct dst_entry *dst_prev = NULL; | 1410 | struct dst_entry *dst_prev = NULL; |
1410 | struct dst_entry *dst0 = NULL; | 1411 | struct dst_entry *dst0 = NULL; |
1411 | int i = 0; | 1412 | int i = 0; |
@@ -1436,6 +1437,17 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy, | |||
1436 | goto put_states; | 1437 | goto put_states; |
1437 | } | 1438 | } |
1438 | 1439 | ||
1440 | if (xfrm[i]->sel.family == AF_UNSPEC) { | ||
1441 | inner_mode = xfrm_ip2inner_mode(xfrm[i], | ||
1442 | xfrm_af2proto(family)); | ||
1443 | if (!inner_mode) { | ||
1444 | err = -EAFNOSUPPORT; | ||
1445 | dst_release(dst); | ||
1446 | goto put_states; | ||
1447 | } | ||
1448 | } else | ||
1449 | inner_mode = xfrm[i]->inner_mode; | ||
1450 | |||
1439 | if (!dst_prev) | 1451 | if (!dst_prev) |
1440 | dst0 = dst1; | 1452 | dst0 = dst1; |
1441 | else { | 1453 | else { |
@@ -1464,7 +1476,7 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy, | |||
1464 | dst1->lastuse = now; | 1476 | dst1->lastuse = now; |
1465 | 1477 | ||
1466 | dst1->input = dst_discard; | 1478 | dst1->input = dst_discard; |
1467 | dst1->output = xfrm[i]->outer_mode->afinfo->output; | 1479 | dst1->output = inner_mode->afinfo->output; |
1468 | 1480 | ||
1469 | dst1->next = dst_prev; | 1481 | dst1->next = dst_prev; |
1470 | dst_prev = dst1; | 1482 | dst_prev = dst1; |
diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c index e8a781422feb..47f1b8638df9 100644 --- a/net/xfrm/xfrm_replay.c +++ b/net/xfrm/xfrm_replay.c | |||
@@ -535,6 +535,9 @@ int xfrm_init_replay(struct xfrm_state *x) | |||
535 | replay_esn->bmp_len * sizeof(__u32) * 8) | 535 | replay_esn->bmp_len * sizeof(__u32) * 8) |
536 | return -EINVAL; | 536 | return -EINVAL; |
537 | 537 | ||
538 | if ((x->props.flags & XFRM_STATE_ESN) && replay_esn->replay_window == 0) | ||
539 | return -EINVAL; | ||
540 | |||
538 | if ((x->props.flags & XFRM_STATE_ESN) && x->replay_esn) | 541 | if ((x->props.flags & XFRM_STATE_ESN) && x->replay_esn) |
539 | x->repl = &xfrm_replay_esn; | 542 | x->repl = &xfrm_replay_esn; |
540 | else | 543 | else |
diff --git a/scripts/Makefile.build b/scripts/Makefile.build index d5f925abe4d2..6165622c3e29 100644 --- a/scripts/Makefile.build +++ b/scripts/Makefile.build | |||
@@ -244,14 +244,19 @@ endif | |||
244 | 244 | ||
245 | ifdef CONFIG_FTRACE_MCOUNT_RECORD | 245 | ifdef CONFIG_FTRACE_MCOUNT_RECORD |
246 | ifdef BUILD_C_RECORDMCOUNT | 246 | ifdef BUILD_C_RECORDMCOUNT |
247 | ifeq ("$(origin RECORDMCOUNT_WARN)", "command line") | ||
248 | RECORDMCOUNT_FLAGS = -w | ||
249 | endif | ||
247 | # Due to recursion, we must skip empty.o. | 250 | # Due to recursion, we must skip empty.o. |
248 | # The empty.o file is created in the make process in order to determine | 251 | # The empty.o file is created in the make process in order to determine |
249 | # the target endianness and word size. It is made before all other C | 252 | # the target endianness and word size. It is made before all other C |
250 | # files, including recordmcount. | 253 | # files, including recordmcount. |
251 | sub_cmd_record_mcount = \ | 254 | sub_cmd_record_mcount = \ |
252 | if [ $(@) != "scripts/mod/empty.o" ]; then \ | 255 | if [ $(@) != "scripts/mod/empty.o" ]; then \ |
253 | $(objtree)/scripts/recordmcount "$(@)"; \ | 256 | $(objtree)/scripts/recordmcount $(RECORDMCOUNT_FLAGS) "$(@)"; \ |
254 | fi; | 257 | fi; |
258 | recordmcount_source := $(srctree)/scripts/recordmcount.c \ | ||
259 | $(srctree)/scripts/recordmcount.h | ||
255 | else | 260 | else |
256 | sub_cmd_record_mcount = set -e ; perl $(srctree)/scripts/recordmcount.pl "$(ARCH)" \ | 261 | sub_cmd_record_mcount = set -e ; perl $(srctree)/scripts/recordmcount.pl "$(ARCH)" \ |
257 | "$(if $(CONFIG_CPU_BIG_ENDIAN),big,little)" \ | 262 | "$(if $(CONFIG_CPU_BIG_ENDIAN),big,little)" \ |
@@ -259,6 +264,7 @@ sub_cmd_record_mcount = set -e ; perl $(srctree)/scripts/recordmcount.pl "$(ARCH | |||
259 | "$(OBJDUMP)" "$(OBJCOPY)" "$(CC) $(KBUILD_CFLAGS)" \ | 264 | "$(OBJDUMP)" "$(OBJCOPY)" "$(CC) $(KBUILD_CFLAGS)" \ |
260 | "$(LD)" "$(NM)" "$(RM)" "$(MV)" \ | 265 | "$(LD)" "$(NM)" "$(RM)" "$(MV)" \ |
261 | "$(if $(part-of-module),1,0)" "$(@)"; | 266 | "$(if $(part-of-module),1,0)" "$(@)"; |
267 | recordmcount_source := $(srctree)/scripts/recordmcount.pl | ||
262 | endif | 268 | endif |
263 | cmd_record_mcount = \ | 269 | cmd_record_mcount = \ |
264 | if [ "$(findstring -pg,$(_c_flags))" = "-pg" ]; then \ | 270 | if [ "$(findstring -pg,$(_c_flags))" = "-pg" ]; then \ |
@@ -279,13 +285,13 @@ define rule_cc_o_c | |||
279 | endef | 285 | endef |
280 | 286 | ||
281 | # Built-in and composite module parts | 287 | # Built-in and composite module parts |
282 | $(obj)/%.o: $(src)/%.c FORCE | 288 | $(obj)/%.o: $(src)/%.c $(recordmcount_source) FORCE |
283 | $(call cmd,force_checksrc) | 289 | $(call cmd,force_checksrc) |
284 | $(call if_changed_rule,cc_o_c) | 290 | $(call if_changed_rule,cc_o_c) |
285 | 291 | ||
286 | # Single-part modules are special since we need to mark them in $(MODVERDIR) | 292 | # Single-part modules are special since we need to mark them in $(MODVERDIR) |
287 | 293 | ||
288 | $(single-used-m): $(obj)/%.o: $(src)/%.c FORCE | 294 | $(single-used-m): $(obj)/%.o: $(src)/%.c $(recordmcount_source) FORCE |
289 | $(call cmd,force_checksrc) | 295 | $(call cmd,force_checksrc) |
290 | $(call if_changed_rule,cc_o_c) | 296 | $(call if_changed_rule,cc_o_c) |
291 | @{ echo $(@:.o=.ko); echo $@; } > $(MODVERDIR)/$(@F:.o=.mod) | 297 | @{ echo $(@:.o=.ko); echo $@; } > $(MODVERDIR)/$(@F:.o=.mod) |
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c index cd104afcc5f2..413c53693e62 100644 --- a/scripts/mod/modpost.c +++ b/scripts/mod/modpost.c | |||
@@ -420,11 +420,10 @@ static int parse_elf(struct elf_info *info, const char *filename) | |||
420 | return 0; | 420 | return 0; |
421 | } | 421 | } |
422 | 422 | ||
423 | if (hdr->e_shnum == 0) { | 423 | if (hdr->e_shnum == SHN_UNDEF) { |
424 | /* | 424 | /* |
425 | * There are more than 64k sections, | 425 | * There are more than 64k sections, |
426 | * read count from .sh_size. | 426 | * read count from .sh_size. |
427 | * note: it doesn't need shndx2secindex() | ||
428 | */ | 427 | */ |
429 | info->num_sections = TO_NATIVE(sechdrs[0].sh_size); | 428 | info->num_sections = TO_NATIVE(sechdrs[0].sh_size); |
430 | } | 429 | } |
@@ -432,8 +431,7 @@ static int parse_elf(struct elf_info *info, const char *filename) | |||
432 | info->num_sections = hdr->e_shnum; | 431 | info->num_sections = hdr->e_shnum; |
433 | } | 432 | } |
434 | if (hdr->e_shstrndx == SHN_XINDEX) { | 433 | if (hdr->e_shstrndx == SHN_XINDEX) { |
435 | info->secindex_strings = | 434 | info->secindex_strings = TO_NATIVE(sechdrs[0].sh_link); |
436 | shndx2secindex(TO_NATIVE(sechdrs[0].sh_link)); | ||
437 | } | 435 | } |
438 | else { | 436 | else { |
439 | info->secindex_strings = hdr->e_shstrndx; | 437 | info->secindex_strings = hdr->e_shstrndx; |
@@ -489,7 +487,7 @@ static int parse_elf(struct elf_info *info, const char *filename) | |||
489 | sechdrs[i].sh_offset; | 487 | sechdrs[i].sh_offset; |
490 | info->symtab_stop = (void *)hdr + | 488 | info->symtab_stop = (void *)hdr + |
491 | sechdrs[i].sh_offset + sechdrs[i].sh_size; | 489 | sechdrs[i].sh_offset + sechdrs[i].sh_size; |
492 | sh_link_idx = shndx2secindex(sechdrs[i].sh_link); | 490 | sh_link_idx = sechdrs[i].sh_link; |
493 | info->strtab = (void *)hdr + | 491 | info->strtab = (void *)hdr + |
494 | sechdrs[sh_link_idx].sh_offset; | 492 | sechdrs[sh_link_idx].sh_offset; |
495 | } | 493 | } |
@@ -516,11 +514,9 @@ static int parse_elf(struct elf_info *info, const char *filename) | |||
516 | 514 | ||
517 | if (symtab_shndx_idx != ~0U) { | 515 | if (symtab_shndx_idx != ~0U) { |
518 | Elf32_Word *p; | 516 | Elf32_Word *p; |
519 | if (symtab_idx != | 517 | if (symtab_idx != sechdrs[symtab_shndx_idx].sh_link) |
520 | shndx2secindex(sechdrs[symtab_shndx_idx].sh_link)) | ||
521 | fatal("%s: SYMTAB_SHNDX has bad sh_link: %u!=%u\n", | 518 | fatal("%s: SYMTAB_SHNDX has bad sh_link: %u!=%u\n", |
522 | filename, | 519 | filename, sechdrs[symtab_shndx_idx].sh_link, |
523 | shndx2secindex(sechdrs[symtab_shndx_idx].sh_link), | ||
524 | symtab_idx); | 520 | symtab_idx); |
525 | /* Fix endianness */ | 521 | /* Fix endianness */ |
526 | for (p = info->symtab_shndx_start; p < info->symtab_shndx_stop; | 522 | for (p = info->symtab_shndx_start; p < info->symtab_shndx_stop; |
@@ -1446,7 +1442,7 @@ static unsigned int *reloc_location(struct elf_info *elf, | |||
1446 | Elf_Shdr *sechdr, Elf_Rela *r) | 1442 | Elf_Shdr *sechdr, Elf_Rela *r) |
1447 | { | 1443 | { |
1448 | Elf_Shdr *sechdrs = elf->sechdrs; | 1444 | Elf_Shdr *sechdrs = elf->sechdrs; |
1449 | int section = shndx2secindex(sechdr->sh_info); | 1445 | int section = sechdr->sh_info; |
1450 | 1446 | ||
1451 | return (void *)elf->hdr + sechdrs[section].sh_offset + | 1447 | return (void *)elf->hdr + sechdrs[section].sh_offset + |
1452 | r->r_offset; | 1448 | r->r_offset; |
diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h index 0388cfccac8d..2031119080dc 100644 --- a/scripts/mod/modpost.h +++ b/scripts/mod/modpost.h | |||
@@ -145,33 +145,22 @@ static inline int is_shndx_special(unsigned int i) | |||
145 | return i != SHN_XINDEX && i >= SHN_LORESERVE && i <= SHN_HIRESERVE; | 145 | return i != SHN_XINDEX && i >= SHN_LORESERVE && i <= SHN_HIRESERVE; |
146 | } | 146 | } |
147 | 147 | ||
148 | /* shndx is in [0..SHN_LORESERVE) U (SHN_HIRESERVE, 0xfffffff], thus: | 148 | /* |
149 | * shndx == 0 <=> sechdrs[0] | 149 | * Move reserved section indices SHN_LORESERVE..SHN_HIRESERVE out of |
150 | * ...... | 150 | * the way to -256..-1, to avoid conflicting with real section |
151 | * shndx == SHN_LORESERVE-1 <=> sechdrs[SHN_LORESERVE-1] | 151 | * indices. |
152 | * shndx == SHN_HIRESERVE+1 <=> sechdrs[SHN_LORESERVE] | ||
153 | * shndx == SHN_HIRESERVE+2 <=> sechdrs[SHN_LORESERVE+1] | ||
154 | * ...... | ||
155 | * fyi: sym->st_shndx is uint16, SHN_LORESERVE = ff00, SHN_HIRESERVE = ffff, | ||
156 | * so basically we map 0000..feff -> 0000..feff | ||
157 | * ff00..ffff -> (you are a bad boy, dont do it) | ||
158 | * 10000..xxxx -> ff00..(xxxx-0x100) | ||
159 | */ | 152 | */ |
160 | static inline unsigned int shndx2secindex(unsigned int i) | 153 | #define SPECIAL(i) ((i) - (SHN_HIRESERVE + 1)) |
161 | { | ||
162 | if (i <= SHN_HIRESERVE) | ||
163 | return i; | ||
164 | return i - (SHN_HIRESERVE + 1 - SHN_LORESERVE); | ||
165 | } | ||
166 | 154 | ||
167 | /* Accessor for sym->st_shndx, hides ugliness of "64k sections" */ | 155 | /* Accessor for sym->st_shndx, hides ugliness of "64k sections" */ |
168 | static inline unsigned int get_secindex(const struct elf_info *info, | 156 | static inline unsigned int get_secindex(const struct elf_info *info, |
169 | const Elf_Sym *sym) | 157 | const Elf_Sym *sym) |
170 | { | 158 | { |
159 | if (is_shndx_special(sym->st_shndx)) | ||
160 | return SPECIAL(sym->st_shndx); | ||
171 | if (sym->st_shndx != SHN_XINDEX) | 161 | if (sym->st_shndx != SHN_XINDEX) |
172 | return sym->st_shndx; | 162 | return sym->st_shndx; |
173 | return shndx2secindex(info->symtab_shndx_start[sym - | 163 | return info->symtab_shndx_start[sym - info->symtab_start]; |
174 | info->symtab_start]); | ||
175 | } | 164 | } |
176 | 165 | ||
177 | /* file2alias.c */ | 166 | /* file2alias.c */ |
diff --git a/scripts/module-common.lds b/scripts/module-common.lds index 47a1f9ae0ede..0865b3e752be 100644 --- a/scripts/module-common.lds +++ b/scripts/module-common.lds | |||
@@ -5,4 +5,15 @@ | |||
5 | */ | 5 | */ |
6 | SECTIONS { | 6 | SECTIONS { |
7 | /DISCARD/ : { *(.discard) } | 7 | /DISCARD/ : { *(.discard) } |
8 | |||
9 | __ksymtab : { *(SORT(___ksymtab+*)) } | ||
10 | __ksymtab_gpl : { *(SORT(___ksymtab_gpl+*)) } | ||
11 | __ksymtab_unused : { *(SORT(___ksymtab_unused+*)) } | ||
12 | __ksymtab_unused_gpl : { *(SORT(___ksymtab_unused_gpl+*)) } | ||
13 | __ksymtab_gpl_future : { *(SORT(___ksymtab_gpl_future+*)) } | ||
14 | __kcrctab : { *(SORT(___kcrctab+*)) } | ||
15 | __kcrctab_gpl : { *(SORT(___kcrctab_gpl+*)) } | ||
16 | __kcrctab_unused : { *(SORT(___kcrctab_unused+*)) } | ||
17 | __kcrctab_unused_gpl : { *(SORT(___kcrctab_unused_gpl+*)) } | ||
18 | __kcrctab_gpl_future : { *(SORT(___kcrctab_gpl_future+*)) } | ||
8 | } | 19 | } |
diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c index f9f6f52db772..ee52cb8e17ad 100644 --- a/scripts/recordmcount.c +++ b/scripts/recordmcount.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <sys/types.h> | 24 | #include <sys/types.h> |
25 | #include <sys/mman.h> | 25 | #include <sys/mman.h> |
26 | #include <sys/stat.h> | 26 | #include <sys/stat.h> |
27 | #include <getopt.h> | ||
27 | #include <elf.h> | 28 | #include <elf.h> |
28 | #include <fcntl.h> | 29 | #include <fcntl.h> |
29 | #include <setjmp.h> | 30 | #include <setjmp.h> |
@@ -39,6 +40,7 @@ static char gpfx; /* prefix for global symbol name (sometimes '_') */ | |||
39 | static struct stat sb; /* Remember .st_size, etc. */ | 40 | static struct stat sb; /* Remember .st_size, etc. */ |
40 | static jmp_buf jmpenv; /* setjmp/longjmp per-file error escape */ | 41 | static jmp_buf jmpenv; /* setjmp/longjmp per-file error escape */ |
41 | static const char *altmcount; /* alternate mcount symbol name */ | 42 | static const char *altmcount; /* alternate mcount symbol name */ |
43 | static int warn_on_notrace_sect; /* warn when section has mcount not being recorded */ | ||
42 | 44 | ||
43 | /* setjmp() return values */ | 45 | /* setjmp() return values */ |
44 | enum { | 46 | enum { |
@@ -78,7 +80,7 @@ static off_t | |||
78 | ulseek(int const fd, off_t const offset, int const whence) | 80 | ulseek(int const fd, off_t const offset, int const whence) |
79 | { | 81 | { |
80 | off_t const w = lseek(fd, offset, whence); | 82 | off_t const w = lseek(fd, offset, whence); |
81 | if ((off_t)-1 == w) { | 83 | if (w == (off_t)-1) { |
82 | perror("lseek"); | 84 | perror("lseek"); |
83 | fail_file(); | 85 | fail_file(); |
84 | } | 86 | } |
@@ -111,13 +113,41 @@ static void * | |||
111 | umalloc(size_t size) | 113 | umalloc(size_t size) |
112 | { | 114 | { |
113 | void *const addr = malloc(size); | 115 | void *const addr = malloc(size); |
114 | if (0 == addr) { | 116 | if (addr == 0) { |
115 | fprintf(stderr, "malloc failed: %zu bytes\n", size); | 117 | fprintf(stderr, "malloc failed: %zu bytes\n", size); |
116 | fail_file(); | 118 | fail_file(); |
117 | } | 119 | } |
118 | return addr; | 120 | return addr; |
119 | } | 121 | } |
120 | 122 | ||
123 | static unsigned char ideal_nop5_x86_64[5] = { 0x0f, 0x1f, 0x44, 0x00, 0x00 }; | ||
124 | static unsigned char ideal_nop5_x86_32[5] = { 0x3e, 0x8d, 0x74, 0x26, 0x00 }; | ||
125 | static unsigned char *ideal_nop; | ||
126 | |||
127 | static char rel_type_nop; | ||
128 | |||
129 | static int (*make_nop)(void *map, size_t const offset); | ||
130 | |||
131 | static int make_nop_x86(void *map, size_t const offset) | ||
132 | { | ||
133 | uint32_t *ptr; | ||
134 | unsigned char *op; | ||
135 | |||
136 | /* Confirm we have 0xe8 0x0 0x0 0x0 0x0 */ | ||
137 | ptr = map + offset; | ||
138 | if (*ptr != 0) | ||
139 | return -1; | ||
140 | |||
141 | op = map + offset - 1; | ||
142 | if (*op != 0xe8) | ||
143 | return -1; | ||
144 | |||
145 | /* convert to nop */ | ||
146 | ulseek(fd_map, offset - 1, SEEK_SET); | ||
147 | uwrite(fd_map, ideal_nop, 5); | ||
148 | return 0; | ||
149 | } | ||
150 | |||
121 | /* | 151 | /* |
122 | * Get the whole file as a programming convenience in order to avoid | 152 | * Get the whole file as a programming convenience in order to avoid |
123 | * malloc+lseek+read+free of many pieces. If successful, then mmap | 153 | * malloc+lseek+read+free of many pieces. If successful, then mmap |
@@ -136,7 +166,7 @@ static void *mmap_file(char const *fname) | |||
136 | void *addr; | 166 | void *addr; |
137 | 167 | ||
138 | fd_map = open(fname, O_RDWR); | 168 | fd_map = open(fname, O_RDWR); |
139 | if (0 > fd_map || 0 > fstat(fd_map, &sb)) { | 169 | if (fd_map < 0 || fstat(fd_map, &sb) < 0) { |
140 | perror(fname); | 170 | perror(fname); |
141 | fail_file(); | 171 | fail_file(); |
142 | } | 172 | } |
@@ -147,7 +177,7 @@ static void *mmap_file(char const *fname) | |||
147 | addr = mmap(0, sb.st_size, PROT_READ|PROT_WRITE, MAP_PRIVATE, | 177 | addr = mmap(0, sb.st_size, PROT_READ|PROT_WRITE, MAP_PRIVATE, |
148 | fd_map, 0); | 178 | fd_map, 0); |
149 | mmap_failed = 0; | 179 | mmap_failed = 0; |
150 | if (MAP_FAILED == addr) { | 180 | if (addr == MAP_FAILED) { |
151 | mmap_failed = 1; | 181 | mmap_failed = 1; |
152 | addr = umalloc(sb.st_size); | 182 | addr = umalloc(sb.st_size); |
153 | uread(fd_map, addr, sb.st_size); | 183 | uread(fd_map, addr, sb.st_size); |
@@ -206,12 +236,13 @@ static uint32_t (*w2)(uint16_t); | |||
206 | static int | 236 | static int |
207 | is_mcounted_section_name(char const *const txtname) | 237 | is_mcounted_section_name(char const *const txtname) |
208 | { | 238 | { |
209 | return 0 == strcmp(".text", txtname) || | 239 | return strcmp(".text", txtname) == 0 || |
210 | 0 == strcmp(".ref.text", txtname) || | 240 | strcmp(".ref.text", txtname) == 0 || |
211 | 0 == strcmp(".sched.text", txtname) || | 241 | strcmp(".sched.text", txtname) == 0 || |
212 | 0 == strcmp(".spinlock.text", txtname) || | 242 | strcmp(".spinlock.text", txtname) == 0 || |
213 | 0 == strcmp(".irqentry.text", txtname) || | 243 | strcmp(".irqentry.text", txtname) == 0 || |
214 | 0 == strcmp(".text.unlikely", txtname); | 244 | strcmp(".kprobes.text", txtname) == 0 || |
245 | strcmp(".text.unlikely", txtname) == 0; | ||
215 | } | 246 | } |
216 | 247 | ||
217 | /* 32 bit and 64 bit are very similar */ | 248 | /* 32 bit and 64 bit are very similar */ |
@@ -264,43 +295,48 @@ do_file(char const *const fname) | |||
264 | w8 = w8nat; | 295 | w8 = w8nat; |
265 | switch (ehdr->e_ident[EI_DATA]) { | 296 | switch (ehdr->e_ident[EI_DATA]) { |
266 | static unsigned int const endian = 1; | 297 | static unsigned int const endian = 1; |
267 | default: { | 298 | default: |
268 | fprintf(stderr, "unrecognized ELF data encoding %d: %s\n", | 299 | fprintf(stderr, "unrecognized ELF data encoding %d: %s\n", |
269 | ehdr->e_ident[EI_DATA], fname); | 300 | ehdr->e_ident[EI_DATA], fname); |
270 | fail_file(); | 301 | fail_file(); |
271 | } break; | 302 | break; |
272 | case ELFDATA2LSB: { | 303 | case ELFDATA2LSB: |
273 | if (1 != *(unsigned char const *)&endian) { | 304 | if (*(unsigned char const *)&endian != 1) { |
274 | /* main() is big endian, file.o is little endian. */ | 305 | /* main() is big endian, file.o is little endian. */ |
275 | w = w4rev; | 306 | w = w4rev; |
276 | w2 = w2rev; | 307 | w2 = w2rev; |
277 | w8 = w8rev; | 308 | w8 = w8rev; |
278 | } | 309 | } |
279 | } break; | 310 | break; |
280 | case ELFDATA2MSB: { | 311 | case ELFDATA2MSB: |
281 | if (0 != *(unsigned char const *)&endian) { | 312 | if (*(unsigned char const *)&endian != 0) { |
282 | /* main() is little endian, file.o is big endian. */ | 313 | /* main() is little endian, file.o is big endian. */ |
283 | w = w4rev; | 314 | w = w4rev; |
284 | w2 = w2rev; | 315 | w2 = w2rev; |
285 | w8 = w8rev; | 316 | w8 = w8rev; |
286 | } | 317 | } |
287 | } break; | 318 | break; |
288 | } /* end switch */ | 319 | } /* end switch */ |
289 | if (0 != memcmp(ELFMAG, ehdr->e_ident, SELFMAG) | 320 | if (memcmp(ELFMAG, ehdr->e_ident, SELFMAG) != 0 |
290 | || ET_REL != w2(ehdr->e_type) | 321 | || w2(ehdr->e_type) != ET_REL |
291 | || EV_CURRENT != ehdr->e_ident[EI_VERSION]) { | 322 | || ehdr->e_ident[EI_VERSION] != EV_CURRENT) { |
292 | fprintf(stderr, "unrecognized ET_REL file %s\n", fname); | 323 | fprintf(stderr, "unrecognized ET_REL file %s\n", fname); |
293 | fail_file(); | 324 | fail_file(); |
294 | } | 325 | } |
295 | 326 | ||
296 | gpfx = 0; | 327 | gpfx = 0; |
297 | switch (w2(ehdr->e_machine)) { | 328 | switch (w2(ehdr->e_machine)) { |
298 | default: { | 329 | default: |
299 | fprintf(stderr, "unrecognized e_machine %d %s\n", | 330 | fprintf(stderr, "unrecognized e_machine %d %s\n", |
300 | w2(ehdr->e_machine), fname); | 331 | w2(ehdr->e_machine), fname); |
301 | fail_file(); | 332 | fail_file(); |
302 | } break; | 333 | break; |
303 | case EM_386: reltype = R_386_32; break; | 334 | case EM_386: |
335 | reltype = R_386_32; | ||
336 | make_nop = make_nop_x86; | ||
337 | ideal_nop = ideal_nop5_x86_32; | ||
338 | mcount_adjust_32 = -1; | ||
339 | break; | ||
304 | case EM_ARM: reltype = R_ARM_ABS32; | 340 | case EM_ARM: reltype = R_ARM_ABS32; |
305 | altmcount = "__gnu_mcount_nc"; | 341 | altmcount = "__gnu_mcount_nc"; |
306 | break; | 342 | break; |
@@ -311,67 +347,91 @@ do_file(char const *const fname) | |||
311 | case EM_S390: /* reltype: e_class */ gpfx = '_'; break; | 347 | case EM_S390: /* reltype: e_class */ gpfx = '_'; break; |
312 | case EM_SH: reltype = R_SH_DIR32; break; | 348 | case EM_SH: reltype = R_SH_DIR32; break; |
313 | case EM_SPARCV9: reltype = R_SPARC_64; gpfx = '_'; break; | 349 | case EM_SPARCV9: reltype = R_SPARC_64; gpfx = '_'; break; |
314 | case EM_X86_64: reltype = R_X86_64_64; break; | 350 | case EM_X86_64: |
351 | make_nop = make_nop_x86; | ||
352 | ideal_nop = ideal_nop5_x86_64; | ||
353 | reltype = R_X86_64_64; | ||
354 | mcount_adjust_64 = -1; | ||
355 | break; | ||
315 | } /* end switch */ | 356 | } /* end switch */ |
316 | 357 | ||
317 | switch (ehdr->e_ident[EI_CLASS]) { | 358 | switch (ehdr->e_ident[EI_CLASS]) { |
318 | default: { | 359 | default: |
319 | fprintf(stderr, "unrecognized ELF class %d %s\n", | 360 | fprintf(stderr, "unrecognized ELF class %d %s\n", |
320 | ehdr->e_ident[EI_CLASS], fname); | 361 | ehdr->e_ident[EI_CLASS], fname); |
321 | fail_file(); | 362 | fail_file(); |
322 | } break; | 363 | break; |
323 | case ELFCLASS32: { | 364 | case ELFCLASS32: |
324 | if (sizeof(Elf32_Ehdr) != w2(ehdr->e_ehsize) | 365 | if (w2(ehdr->e_ehsize) != sizeof(Elf32_Ehdr) |
325 | || sizeof(Elf32_Shdr) != w2(ehdr->e_shentsize)) { | 366 | || w2(ehdr->e_shentsize) != sizeof(Elf32_Shdr)) { |
326 | fprintf(stderr, | 367 | fprintf(stderr, |
327 | "unrecognized ET_REL file: %s\n", fname); | 368 | "unrecognized ET_REL file: %s\n", fname); |
328 | fail_file(); | 369 | fail_file(); |
329 | } | 370 | } |
330 | if (EM_S390 == w2(ehdr->e_machine)) | 371 | if (w2(ehdr->e_machine) == EM_S390) { |
331 | reltype = R_390_32; | 372 | reltype = R_390_32; |
332 | if (EM_MIPS == w2(ehdr->e_machine)) { | 373 | mcount_adjust_32 = -4; |
374 | } | ||
375 | if (w2(ehdr->e_machine) == EM_MIPS) { | ||
333 | reltype = R_MIPS_32; | 376 | reltype = R_MIPS_32; |
334 | is_fake_mcount32 = MIPS32_is_fake_mcount; | 377 | is_fake_mcount32 = MIPS32_is_fake_mcount; |
335 | } | 378 | } |
336 | do32(ehdr, fname, reltype); | 379 | do32(ehdr, fname, reltype); |
337 | } break; | 380 | break; |
338 | case ELFCLASS64: { | 381 | case ELFCLASS64: { |
339 | Elf64_Ehdr *const ghdr = (Elf64_Ehdr *)ehdr; | 382 | Elf64_Ehdr *const ghdr = (Elf64_Ehdr *)ehdr; |
340 | if (sizeof(Elf64_Ehdr) != w2(ghdr->e_ehsize) | 383 | if (w2(ghdr->e_ehsize) != sizeof(Elf64_Ehdr) |
341 | || sizeof(Elf64_Shdr) != w2(ghdr->e_shentsize)) { | 384 | || w2(ghdr->e_shentsize) != sizeof(Elf64_Shdr)) { |
342 | fprintf(stderr, | 385 | fprintf(stderr, |
343 | "unrecognized ET_REL file: %s\n", fname); | 386 | "unrecognized ET_REL file: %s\n", fname); |
344 | fail_file(); | 387 | fail_file(); |
345 | } | 388 | } |
346 | if (EM_S390 == w2(ghdr->e_machine)) | 389 | if (w2(ghdr->e_machine) == EM_S390) { |
347 | reltype = R_390_64; | 390 | reltype = R_390_64; |
348 | if (EM_MIPS == w2(ghdr->e_machine)) { | 391 | mcount_adjust_64 = -8; |
392 | } | ||
393 | if (w2(ghdr->e_machine) == EM_MIPS) { | ||
349 | reltype = R_MIPS_64; | 394 | reltype = R_MIPS_64; |
350 | Elf64_r_sym = MIPS64_r_sym; | 395 | Elf64_r_sym = MIPS64_r_sym; |
351 | Elf64_r_info = MIPS64_r_info; | 396 | Elf64_r_info = MIPS64_r_info; |
352 | is_fake_mcount64 = MIPS64_is_fake_mcount; | 397 | is_fake_mcount64 = MIPS64_is_fake_mcount; |
353 | } | 398 | } |
354 | do64(ghdr, fname, reltype); | 399 | do64(ghdr, fname, reltype); |
355 | } break; | 400 | break; |
401 | } | ||
356 | } /* end switch */ | 402 | } /* end switch */ |
357 | 403 | ||
358 | cleanup(); | 404 | cleanup(); |
359 | } | 405 | } |
360 | 406 | ||
361 | int | 407 | int |
362 | main(int argc, char const *argv[]) | 408 | main(int argc, char *argv[]) |
363 | { | 409 | { |
364 | const char ftrace[] = "/ftrace.o"; | 410 | const char ftrace[] = "/ftrace.o"; |
365 | int ftrace_size = sizeof(ftrace) - 1; | 411 | int ftrace_size = sizeof(ftrace) - 1; |
366 | int n_error = 0; /* gcc-4.3.0 false positive complaint */ | 412 | int n_error = 0; /* gcc-4.3.0 false positive complaint */ |
413 | int c; | ||
414 | int i; | ||
415 | |||
416 | while ((c = getopt(argc, argv, "w")) >= 0) { | ||
417 | switch (c) { | ||
418 | case 'w': | ||
419 | warn_on_notrace_sect = 1; | ||
420 | break; | ||
421 | default: | ||
422 | fprintf(stderr, "usage: recordmcount [-w] file.o...\n"); | ||
423 | return 0; | ||
424 | } | ||
425 | } | ||
367 | 426 | ||
368 | if (argc <= 1) { | 427 | if ((argc - optind) < 1) { |
369 | fprintf(stderr, "usage: recordmcount file.o...\n"); | 428 | fprintf(stderr, "usage: recordmcount [-w] file.o...\n"); |
370 | return 0; | 429 | return 0; |
371 | } | 430 | } |
372 | 431 | ||
373 | /* Process each file in turn, allowing deep failure. */ | 432 | /* Process each file in turn, allowing deep failure. */ |
374 | for (--argc, ++argv; 0 < argc; --argc, ++argv) { | 433 | for (i = optind; i < argc; i++) { |
434 | char *file = argv[i]; | ||
375 | int const sjval = setjmp(jmpenv); | 435 | int const sjval = setjmp(jmpenv); |
376 | int len; | 436 | int len; |
377 | 437 | ||
@@ -380,29 +440,29 @@ main(int argc, char const *argv[]) | |||
380 | * function but does not call it. Since ftrace.o should | 440 | * function but does not call it. Since ftrace.o should |
381 | * not be traced anyway, we just skip it. | 441 | * not be traced anyway, we just skip it. |
382 | */ | 442 | */ |
383 | len = strlen(argv[0]); | 443 | len = strlen(file); |
384 | if (len >= ftrace_size && | 444 | if (len >= ftrace_size && |
385 | strcmp(argv[0] + (len - ftrace_size), ftrace) == 0) | 445 | strcmp(file + (len - ftrace_size), ftrace) == 0) |
386 | continue; | 446 | continue; |
387 | 447 | ||
388 | switch (sjval) { | 448 | switch (sjval) { |
389 | default: { | 449 | default: |
390 | fprintf(stderr, "internal error: %s\n", argv[0]); | 450 | fprintf(stderr, "internal error: %s\n", file); |
391 | exit(1); | 451 | exit(1); |
392 | } break; | 452 | break; |
393 | case SJ_SETJMP: { /* normal sequence */ | 453 | case SJ_SETJMP: /* normal sequence */ |
394 | /* Avoid problems if early cleanup() */ | 454 | /* Avoid problems if early cleanup() */ |
395 | fd_map = -1; | 455 | fd_map = -1; |
396 | ehdr_curr = NULL; | 456 | ehdr_curr = NULL; |
397 | mmap_failed = 1; | 457 | mmap_failed = 1; |
398 | do_file(argv[0]); | 458 | do_file(file); |
399 | } break; | 459 | break; |
400 | case SJ_FAIL: { /* error in do_file or below */ | 460 | case SJ_FAIL: /* error in do_file or below */ |
401 | ++n_error; | 461 | ++n_error; |
402 | } break; | 462 | break; |
403 | case SJ_SUCCEED: { /* premature success */ | 463 | case SJ_SUCCEED: /* premature success */ |
404 | /* do nothing */ | 464 | /* do nothing */ |
405 | } break; | 465 | break; |
406 | } /* end switch */ | 466 | } /* end switch */ |
407 | } | 467 | } |
408 | return !!n_error; | 468 | return !!n_error; |
diff --git a/scripts/recordmcount.h b/scripts/recordmcount.h index baf187bee983..4be60364a405 100644 --- a/scripts/recordmcount.h +++ b/scripts/recordmcount.h | |||
@@ -22,11 +22,15 @@ | |||
22 | #undef is_fake_mcount | 22 | #undef is_fake_mcount |
23 | #undef fn_is_fake_mcount | 23 | #undef fn_is_fake_mcount |
24 | #undef MIPS_is_fake_mcount | 24 | #undef MIPS_is_fake_mcount |
25 | #undef mcount_adjust | ||
25 | #undef sift_rel_mcount | 26 | #undef sift_rel_mcount |
27 | #undef nop_mcount | ||
26 | #undef find_secsym_ndx | 28 | #undef find_secsym_ndx |
27 | #undef __has_rel_mcount | 29 | #undef __has_rel_mcount |
28 | #undef has_rel_mcount | 30 | #undef has_rel_mcount |
29 | #undef tot_relsize | 31 | #undef tot_relsize |
32 | #undef get_mcountsym | ||
33 | #undef get_sym_str_and_relp | ||
30 | #undef do_func | 34 | #undef do_func |
31 | #undef Elf_Addr | 35 | #undef Elf_Addr |
32 | #undef Elf_Ehdr | 36 | #undef Elf_Ehdr |
@@ -49,14 +53,18 @@ | |||
49 | #ifdef RECORD_MCOUNT_64 | 53 | #ifdef RECORD_MCOUNT_64 |
50 | # define append_func append64 | 54 | # define append_func append64 |
51 | # define sift_rel_mcount sift64_rel_mcount | 55 | # define sift_rel_mcount sift64_rel_mcount |
56 | # define nop_mcount nop_mcount_64 | ||
52 | # define find_secsym_ndx find64_secsym_ndx | 57 | # define find_secsym_ndx find64_secsym_ndx |
53 | # define __has_rel_mcount __has64_rel_mcount | 58 | # define __has_rel_mcount __has64_rel_mcount |
54 | # define has_rel_mcount has64_rel_mcount | 59 | # define has_rel_mcount has64_rel_mcount |
55 | # define tot_relsize tot64_relsize | 60 | # define tot_relsize tot64_relsize |
61 | # define get_sym_str_and_relp get_sym_str_and_relp_64 | ||
56 | # define do_func do64 | 62 | # define do_func do64 |
63 | # define get_mcountsym get_mcountsym_64 | ||
57 | # define is_fake_mcount is_fake_mcount64 | 64 | # define is_fake_mcount is_fake_mcount64 |
58 | # define fn_is_fake_mcount fn_is_fake_mcount64 | 65 | # define fn_is_fake_mcount fn_is_fake_mcount64 |
59 | # define MIPS_is_fake_mcount MIPS64_is_fake_mcount | 66 | # define MIPS_is_fake_mcount MIPS64_is_fake_mcount |
67 | # define mcount_adjust mcount_adjust_64 | ||
60 | # define Elf_Addr Elf64_Addr | 68 | # define Elf_Addr Elf64_Addr |
61 | # define Elf_Ehdr Elf64_Ehdr | 69 | # define Elf_Ehdr Elf64_Ehdr |
62 | # define Elf_Shdr Elf64_Shdr | 70 | # define Elf_Shdr Elf64_Shdr |
@@ -77,14 +85,18 @@ | |||
77 | #else | 85 | #else |
78 | # define append_func append32 | 86 | # define append_func append32 |
79 | # define sift_rel_mcount sift32_rel_mcount | 87 | # define sift_rel_mcount sift32_rel_mcount |
88 | # define nop_mcount nop_mcount_32 | ||
80 | # define find_secsym_ndx find32_secsym_ndx | 89 | # define find_secsym_ndx find32_secsym_ndx |
81 | # define __has_rel_mcount __has32_rel_mcount | 90 | # define __has_rel_mcount __has32_rel_mcount |
82 | # define has_rel_mcount has32_rel_mcount | 91 | # define has_rel_mcount has32_rel_mcount |
83 | # define tot_relsize tot32_relsize | 92 | # define tot_relsize tot32_relsize |
93 | # define get_sym_str_and_relp get_sym_str_and_relp_32 | ||
84 | # define do_func do32 | 94 | # define do_func do32 |
95 | # define get_mcountsym get_mcountsym_32 | ||
85 | # define is_fake_mcount is_fake_mcount32 | 96 | # define is_fake_mcount is_fake_mcount32 |
86 | # define fn_is_fake_mcount fn_is_fake_mcount32 | 97 | # define fn_is_fake_mcount fn_is_fake_mcount32 |
87 | # define MIPS_is_fake_mcount MIPS32_is_fake_mcount | 98 | # define MIPS_is_fake_mcount MIPS32_is_fake_mcount |
99 | # define mcount_adjust mcount_adjust_32 | ||
88 | # define Elf_Addr Elf32_Addr | 100 | # define Elf_Addr Elf32_Addr |
89 | # define Elf_Ehdr Elf32_Ehdr | 101 | # define Elf_Ehdr Elf32_Ehdr |
90 | # define Elf_Shdr Elf32_Shdr | 102 | # define Elf_Shdr Elf32_Shdr |
@@ -123,6 +135,8 @@ static void fn_ELF_R_INFO(Elf_Rel *const rp, unsigned sym, unsigned type) | |||
123 | } | 135 | } |
124 | static void (*Elf_r_info)(Elf_Rel *const rp, unsigned sym, unsigned type) = fn_ELF_R_INFO; | 136 | static void (*Elf_r_info)(Elf_Rel *const rp, unsigned sym, unsigned type) = fn_ELF_R_INFO; |
125 | 137 | ||
138 | static int mcount_adjust = 0; | ||
139 | |||
126 | /* | 140 | /* |
127 | * MIPS mcount long call has 2 _mcount symbols, only the position of the 1st | 141 | * MIPS mcount long call has 2 _mcount symbols, only the position of the 1st |
128 | * _mcount symbol is needed for dynamic function tracer, with it, to disable | 142 | * _mcount symbol is needed for dynamic function tracer, with it, to disable |
@@ -234,6 +248,49 @@ static void append_func(Elf_Ehdr *const ehdr, | |||
234 | uwrite(fd_map, ehdr, sizeof(*ehdr)); | 248 | uwrite(fd_map, ehdr, sizeof(*ehdr)); |
235 | } | 249 | } |
236 | 250 | ||
251 | static unsigned get_mcountsym(Elf_Sym const *const sym0, | ||
252 | Elf_Rel const *relp, | ||
253 | char const *const str0) | ||
254 | { | ||
255 | unsigned mcountsym = 0; | ||
256 | |||
257 | Elf_Sym const *const symp = | ||
258 | &sym0[Elf_r_sym(relp)]; | ||
259 | char const *symname = &str0[w(symp->st_name)]; | ||
260 | char const *mcount = gpfx == '_' ? "_mcount" : "mcount"; | ||
261 | |||
262 | if (symname[0] == '.') | ||
263 | ++symname; /* ppc64 hack */ | ||
264 | if (strcmp(mcount, symname) == 0 || | ||
265 | (altmcount && strcmp(altmcount, symname) == 0)) | ||
266 | mcountsym = Elf_r_sym(relp); | ||
267 | |||
268 | return mcountsym; | ||
269 | } | ||
270 | |||
271 | static void get_sym_str_and_relp(Elf_Shdr const *const relhdr, | ||
272 | Elf_Ehdr const *const ehdr, | ||
273 | Elf_Sym const **sym0, | ||
274 | char const **str0, | ||
275 | Elf_Rel const **relp) | ||
276 | { | ||
277 | Elf_Shdr *const shdr0 = (Elf_Shdr *)(_w(ehdr->e_shoff) | ||
278 | + (void *)ehdr); | ||
279 | unsigned const symsec_sh_link = w(relhdr->sh_link); | ||
280 | Elf_Shdr const *const symsec = &shdr0[symsec_sh_link]; | ||
281 | Elf_Shdr const *const strsec = &shdr0[w(symsec->sh_link)]; | ||
282 | Elf_Rel const *const rel0 = (Elf_Rel const *)(_w(relhdr->sh_offset) | ||
283 | + (void *)ehdr); | ||
284 | |||
285 | *sym0 = (Elf_Sym const *)(_w(symsec->sh_offset) | ||
286 | + (void *)ehdr); | ||
287 | |||
288 | *str0 = (char const *)(_w(strsec->sh_offset) | ||
289 | + (void *)ehdr); | ||
290 | |||
291 | *relp = rel0; | ||
292 | } | ||
293 | |||
237 | /* | 294 | /* |
238 | * Look at the relocations in order to find the calls to mcount. | 295 | * Look at the relocations in order to find the calls to mcount. |
239 | * Accumulate the section offsets that are found, and their relocation info, | 296 | * Accumulate the section offsets that are found, and their relocation info, |
@@ -250,47 +307,27 @@ static uint_t *sift_rel_mcount(uint_t *mlocp, | |||
250 | { | 307 | { |
251 | uint_t *const mloc0 = mlocp; | 308 | uint_t *const mloc0 = mlocp; |
252 | Elf_Rel *mrelp = *mrelpp; | 309 | Elf_Rel *mrelp = *mrelpp; |
253 | Elf_Shdr *const shdr0 = (Elf_Shdr *)(_w(ehdr->e_shoff) | 310 | Elf_Sym const *sym0; |
254 | + (void *)ehdr); | 311 | char const *str0; |
255 | unsigned const symsec_sh_link = w(relhdr->sh_link); | 312 | Elf_Rel const *relp; |
256 | Elf_Shdr const *const symsec = &shdr0[symsec_sh_link]; | ||
257 | Elf_Sym const *const sym0 = (Elf_Sym const *)(_w(symsec->sh_offset) | ||
258 | + (void *)ehdr); | ||
259 | |||
260 | Elf_Shdr const *const strsec = &shdr0[w(symsec->sh_link)]; | ||
261 | char const *const str0 = (char const *)(_w(strsec->sh_offset) | ||
262 | + (void *)ehdr); | ||
263 | |||
264 | Elf_Rel const *const rel0 = (Elf_Rel const *)(_w(relhdr->sh_offset) | ||
265 | + (void *)ehdr); | ||
266 | unsigned rel_entsize = _w(relhdr->sh_entsize); | 313 | unsigned rel_entsize = _w(relhdr->sh_entsize); |
267 | unsigned const nrel = _w(relhdr->sh_size) / rel_entsize; | 314 | unsigned const nrel = _w(relhdr->sh_size) / rel_entsize; |
268 | Elf_Rel const *relp = rel0; | ||
269 | |||
270 | unsigned mcountsym = 0; | 315 | unsigned mcountsym = 0; |
271 | unsigned t; | 316 | unsigned t; |
272 | 317 | ||
318 | get_sym_str_and_relp(relhdr, ehdr, &sym0, &str0, &relp); | ||
319 | |||
273 | for (t = nrel; t; --t) { | 320 | for (t = nrel; t; --t) { |
274 | if (!mcountsym) { | 321 | if (!mcountsym) |
275 | Elf_Sym const *const symp = | 322 | mcountsym = get_mcountsym(sym0, relp, str0); |
276 | &sym0[Elf_r_sym(relp)]; | ||
277 | char const *symname = &str0[w(symp->st_name)]; | ||
278 | char const *mcount = '_' == gpfx ? "_mcount" : "mcount"; | ||
279 | |||
280 | if ('.' == symname[0]) | ||
281 | ++symname; /* ppc64 hack */ | ||
282 | if (0 == strcmp(mcount, symname) || | ||
283 | (altmcount && 0 == strcmp(altmcount, symname))) | ||
284 | mcountsym = Elf_r_sym(relp); | ||
285 | } | ||
286 | 323 | ||
287 | if (mcountsym == Elf_r_sym(relp) && !is_fake_mcount(relp)) { | 324 | if (mcountsym == Elf_r_sym(relp) && !is_fake_mcount(relp)) { |
288 | uint_t const addend = _w(_w(relp->r_offset) - recval); | 325 | uint_t const addend = |
289 | 326 | _w(_w(relp->r_offset) - recval + mcount_adjust); | |
290 | mrelp->r_offset = _w(offbase | 327 | mrelp->r_offset = _w(offbase |
291 | + ((void *)mlocp - (void *)mloc0)); | 328 | + ((void *)mlocp - (void *)mloc0)); |
292 | Elf_r_info(mrelp, recsym, reltype); | 329 | Elf_r_info(mrelp, recsym, reltype); |
293 | if (sizeof(Elf_Rela) == rel_entsize) { | 330 | if (rel_entsize == sizeof(Elf_Rela)) { |
294 | ((Elf_Rela *)mrelp)->r_addend = addend; | 331 | ((Elf_Rela *)mrelp)->r_addend = addend; |
295 | *mlocp++ = 0; | 332 | *mlocp++ = 0; |
296 | } else | 333 | } else |
@@ -304,6 +341,63 @@ static uint_t *sift_rel_mcount(uint_t *mlocp, | |||
304 | return mlocp; | 341 | return mlocp; |
305 | } | 342 | } |
306 | 343 | ||
344 | /* | ||
345 | * Read the relocation table again, but this time its called on sections | ||
346 | * that are not going to be traced. The mcount calls here will be converted | ||
347 | * into nops. | ||
348 | */ | ||
349 | static void nop_mcount(Elf_Shdr const *const relhdr, | ||
350 | Elf_Ehdr const *const ehdr, | ||
351 | const char *const txtname) | ||
352 | { | ||
353 | Elf_Shdr *const shdr0 = (Elf_Shdr *)(_w(ehdr->e_shoff) | ||
354 | + (void *)ehdr); | ||
355 | Elf_Sym const *sym0; | ||
356 | char const *str0; | ||
357 | Elf_Rel const *relp; | ||
358 | Elf_Shdr const *const shdr = &shdr0[w(relhdr->sh_info)]; | ||
359 | unsigned rel_entsize = _w(relhdr->sh_entsize); | ||
360 | unsigned const nrel = _w(relhdr->sh_size) / rel_entsize; | ||
361 | unsigned mcountsym = 0; | ||
362 | unsigned t; | ||
363 | int once = 0; | ||
364 | |||
365 | get_sym_str_and_relp(relhdr, ehdr, &sym0, &str0, &relp); | ||
366 | |||
367 | for (t = nrel; t; --t) { | ||
368 | int ret = -1; | ||
369 | |||
370 | if (!mcountsym) | ||
371 | mcountsym = get_mcountsym(sym0, relp, str0); | ||
372 | |||
373 | if (mcountsym == Elf_r_sym(relp) && !is_fake_mcount(relp)) { | ||
374 | if (make_nop) | ||
375 | ret = make_nop((void *)ehdr, shdr->sh_offset + relp->r_offset); | ||
376 | if (warn_on_notrace_sect && !once) { | ||
377 | printf("Section %s has mcount callers being ignored\n", | ||
378 | txtname); | ||
379 | once = 1; | ||
380 | /* just warn? */ | ||
381 | if (!make_nop) | ||
382 | return; | ||
383 | } | ||
384 | } | ||
385 | |||
386 | /* | ||
387 | * If we successfully removed the mcount, mark the relocation | ||
388 | * as a nop (don't do anything with it). | ||
389 | */ | ||
390 | if (!ret) { | ||
391 | Elf_Rel rel; | ||
392 | rel = *(Elf_Rel *)relp; | ||
393 | Elf_r_info(&rel, Elf_r_sym(relp), rel_type_nop); | ||
394 | ulseek(fd_map, (void *)relp - (void *)ehdr, SEEK_SET); | ||
395 | uwrite(fd_map, &rel, sizeof(rel)); | ||
396 | } | ||
397 | relp = (Elf_Rel const *)(rel_entsize + (void *)relp); | ||
398 | } | ||
399 | } | ||
400 | |||
307 | 401 | ||
308 | /* | 402 | /* |
309 | * Find a symbol in the given section, to be used as the base for relocating | 403 | * Find a symbol in the given section, to be used as the base for relocating |
@@ -354,13 +448,13 @@ __has_rel_mcount(Elf_Shdr const *const relhdr, /* is SHT_REL or SHT_RELA */ | |||
354 | Elf_Shdr const *const txthdr = &shdr0[w(relhdr->sh_info)]; | 448 | Elf_Shdr const *const txthdr = &shdr0[w(relhdr->sh_info)]; |
355 | char const *const txtname = &shstrtab[w(txthdr->sh_name)]; | 449 | char const *const txtname = &shstrtab[w(txthdr->sh_name)]; |
356 | 450 | ||
357 | if (0 == strcmp("__mcount_loc", txtname)) { | 451 | if (strcmp("__mcount_loc", txtname) == 0) { |
358 | fprintf(stderr, "warning: __mcount_loc already exists: %s\n", | 452 | fprintf(stderr, "warning: __mcount_loc already exists: %s\n", |
359 | fname); | 453 | fname); |
360 | succeed_file(); | 454 | succeed_file(); |
361 | } | 455 | } |
362 | if (SHT_PROGBITS != w(txthdr->sh_type) || | 456 | if (w(txthdr->sh_type) != SHT_PROGBITS || |
363 | !is_mcounted_section_name(txtname)) | 457 | !(w(txthdr->sh_flags) & SHF_EXECINSTR)) |
364 | return NULL; | 458 | return NULL; |
365 | return txtname; | 459 | return txtname; |
366 | } | 460 | } |
@@ -370,7 +464,7 @@ static char const *has_rel_mcount(Elf_Shdr const *const relhdr, | |||
370 | char const *const shstrtab, | 464 | char const *const shstrtab, |
371 | char const *const fname) | 465 | char const *const fname) |
372 | { | 466 | { |
373 | if (SHT_REL != w(relhdr->sh_type) && SHT_RELA != w(relhdr->sh_type)) | 467 | if (w(relhdr->sh_type) != SHT_REL && w(relhdr->sh_type) != SHT_RELA) |
374 | return NULL; | 468 | return NULL; |
375 | return __has_rel_mcount(relhdr, shdr0, shstrtab, fname); | 469 | return __has_rel_mcount(relhdr, shdr0, shstrtab, fname); |
376 | } | 470 | } |
@@ -383,9 +477,11 @@ static unsigned tot_relsize(Elf_Shdr const *const shdr0, | |||
383 | { | 477 | { |
384 | unsigned totrelsz = 0; | 478 | unsigned totrelsz = 0; |
385 | Elf_Shdr const *shdrp = shdr0; | 479 | Elf_Shdr const *shdrp = shdr0; |
480 | char const *txtname; | ||
386 | 481 | ||
387 | for (; nhdr; --nhdr, ++shdrp) { | 482 | for (; nhdr; --nhdr, ++shdrp) { |
388 | if (has_rel_mcount(shdrp, shdr0, shstrtab, fname)) | 483 | txtname = has_rel_mcount(shdrp, shdr0, shstrtab, fname); |
484 | if (txtname && is_mcounted_section_name(txtname)) | ||
389 | totrelsz += _w(shdrp->sh_size); | 485 | totrelsz += _w(shdrp->sh_size); |
390 | } | 486 | } |
391 | return totrelsz; | 487 | return totrelsz; |
@@ -421,7 +517,7 @@ do_func(Elf_Ehdr *const ehdr, char const *const fname, unsigned const reltype) | |||
421 | for (relhdr = shdr0, k = nhdr; k; --k, ++relhdr) { | 517 | for (relhdr = shdr0, k = nhdr; k; --k, ++relhdr) { |
422 | char const *const txtname = has_rel_mcount(relhdr, shdr0, | 518 | char const *const txtname = has_rel_mcount(relhdr, shdr0, |
423 | shstrtab, fname); | 519 | shstrtab, fname); |
424 | if (txtname) { | 520 | if (txtname && is_mcounted_section_name(txtname)) { |
425 | uint_t recval = 0; | 521 | uint_t recval = 0; |
426 | unsigned const recsym = find_secsym_ndx( | 522 | unsigned const recsym = find_secsym_ndx( |
427 | w(relhdr->sh_info), txtname, &recval, | 523 | w(relhdr->sh_info), txtname, &recval, |
@@ -432,6 +528,12 @@ do_func(Elf_Ehdr *const ehdr, char const *const fname, unsigned const reltype) | |||
432 | mlocp = sift_rel_mcount(mlocp, | 528 | mlocp = sift_rel_mcount(mlocp, |
433 | (void *)mlocp - (void *)mloc0, &mrelp, | 529 | (void *)mlocp - (void *)mloc0, &mrelp, |
434 | relhdr, ehdr, recsym, recval, reltype); | 530 | relhdr, ehdr, recsym, recval, reltype); |
531 | } else if (txtname && (warn_on_notrace_sect || make_nop)) { | ||
532 | /* | ||
533 | * This section is ignored by ftrace, but still | ||
534 | * has mcount calls. Convert them to nops now. | ||
535 | */ | ||
536 | nop_mcount(relhdr, ehdr, txtname); | ||
435 | } | 537 | } |
436 | } | 538 | } |
437 | if (mloc0 != mlocp) { | 539 | if (mloc0 != mlocp) { |
diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl index 4be0deea71ca..858966ab019c 100755 --- a/scripts/recordmcount.pl +++ b/scripts/recordmcount.pl | |||
@@ -134,6 +134,7 @@ my %text_sections = ( | |||
134 | ".sched.text" => 1, | 134 | ".sched.text" => 1, |
135 | ".spinlock.text" => 1, | 135 | ".spinlock.text" => 1, |
136 | ".irqentry.text" => 1, | 136 | ".irqentry.text" => 1, |
137 | ".kprobes.text" => 1, | ||
137 | ".text.unlikely" => 1, | 138 | ".text.unlikely" => 1, |
138 | ); | 139 | ); |
139 | 140 | ||
@@ -222,6 +223,7 @@ if ($arch eq "x86_64") { | |||
222 | $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\smcount([+-]0x[0-9a-zA-Z]+)?\$"; | 223 | $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\smcount([+-]0x[0-9a-zA-Z]+)?\$"; |
223 | $type = ".quad"; | 224 | $type = ".quad"; |
224 | $alignment = 8; | 225 | $alignment = 8; |
226 | $mcount_adjust = -1; | ||
225 | 227 | ||
226 | # force flags for this arch | 228 | # force flags for this arch |
227 | $ld .= " -m elf_x86_64"; | 229 | $ld .= " -m elf_x86_64"; |
@@ -231,6 +233,7 @@ if ($arch eq "x86_64") { | |||
231 | 233 | ||
232 | } elsif ($arch eq "i386") { | 234 | } elsif ($arch eq "i386") { |
233 | $alignment = 4; | 235 | $alignment = 4; |
236 | $mcount_adjust = -1; | ||
234 | 237 | ||
235 | # force flags for this arch | 238 | # force flags for this arch |
236 | $ld .= " -m elf_i386"; | 239 | $ld .= " -m elf_i386"; |
@@ -240,12 +243,14 @@ if ($arch eq "x86_64") { | |||
240 | 243 | ||
241 | } elsif ($arch eq "s390" && $bits == 32) { | 244 | } elsif ($arch eq "s390" && $bits == 32) { |
242 | $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*R_390_32\\s+_mcount\$"; | 245 | $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*R_390_32\\s+_mcount\$"; |
246 | $mcount_adjust = -4; | ||
243 | $alignment = 4; | 247 | $alignment = 4; |
244 | $ld .= " -m elf_s390"; | 248 | $ld .= " -m elf_s390"; |
245 | $cc .= " -m31"; | 249 | $cc .= " -m31"; |
246 | 250 | ||
247 | } elsif ($arch eq "s390" && $bits == 64) { | 251 | } elsif ($arch eq "s390" && $bits == 64) { |
248 | $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*R_390_(PC|PLT)32DBL\\s+_mcount\\+0x2\$"; | 252 | $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*R_390_(PC|PLT)32DBL\\s+_mcount\\+0x2\$"; |
253 | $mcount_adjust = -8; | ||
249 | $alignment = 8; | 254 | $alignment = 8; |
250 | $type = ".quad"; | 255 | $type = ".quad"; |
251 | $ld .= " -m elf64_s390"; | 256 | $ld .= " -m elf64_s390"; |
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c index e6e7ce0d3d55..7102457661d6 100644 --- a/security/selinux/ss/policydb.c +++ b/security/selinux/ss/policydb.c | |||
@@ -1819,8 +1819,6 @@ static int filename_trans_read(struct policydb *p, void *fp) | |||
1819 | goto out; | 1819 | goto out; |
1820 | nel = le32_to_cpu(buf[0]); | 1820 | nel = le32_to_cpu(buf[0]); |
1821 | 1821 | ||
1822 | printk(KERN_ERR "%s: nel=%d\n", __func__, nel); | ||
1823 | |||
1824 | last = p->filename_trans; | 1822 | last = p->filename_trans; |
1825 | while (last && last->next) | 1823 | while (last && last->next) |
1826 | last = last->next; | 1824 | last = last->next; |
@@ -1857,8 +1855,6 @@ static int filename_trans_read(struct policydb *p, void *fp) | |||
1857 | goto out; | 1855 | goto out; |
1858 | name[len] = 0; | 1856 | name[len] = 0; |
1859 | 1857 | ||
1860 | printk(KERN_ERR "%s: ft=%p ft->name=%p ft->name=%s\n", __func__, ft, ft->name, ft->name); | ||
1861 | |||
1862 | rc = next_entry(buf, fp, sizeof(u32) * 4); | 1858 | rc = next_entry(buf, fp, sizeof(u32) * 4); |
1863 | if (rc) | 1859 | if (rc) |
1864 | goto out; | 1860 | goto out; |
diff --git a/sound/soc/codecs/ssm2602.c b/sound/soc/codecs/ssm2602.c index 2727befd158e..b04d28039c16 100644 --- a/sound/soc/codecs/ssm2602.c +++ b/sound/soc/codecs/ssm2602.c | |||
@@ -139,7 +139,7 @@ SOC_DOUBLE_R("Capture Volume", SSM2602_LINVOL, SSM2602_RINVOL, 0, 31, 0), | |||
139 | SOC_DOUBLE_R("Capture Switch", SSM2602_LINVOL, SSM2602_RINVOL, 7, 1, 1), | 139 | SOC_DOUBLE_R("Capture Switch", SSM2602_LINVOL, SSM2602_RINVOL, 7, 1, 1), |
140 | 140 | ||
141 | SOC_SINGLE("Mic Boost (+20dB)", SSM2602_APANA, 0, 1, 0), | 141 | SOC_SINGLE("Mic Boost (+20dB)", SSM2602_APANA, 0, 1, 0), |
142 | SOC_SINGLE("Mic Boost2 (+20dB)", SSM2602_APANA, 7, 1, 0), | 142 | SOC_SINGLE("Mic Boost2 (+20dB)", SSM2602_APANA, 8, 1, 0), |
143 | SOC_SINGLE("Mic Switch", SSM2602_APANA, 1, 1, 1), | 143 | SOC_SINGLE("Mic Switch", SSM2602_APANA, 1, 1, 1), |
144 | 144 | ||
145 | SOC_SINGLE("Sidetone Playback Volume", SSM2602_APANA, 6, 3, 1), | 145 | SOC_SINGLE("Sidetone Playback Volume", SSM2602_APANA, 6, 3, 1), |
@@ -602,7 +602,7 @@ static struct snd_soc_codec_driver soc_codec_dev_ssm2602 = { | |||
602 | .read = ssm2602_read_reg_cache, | 602 | .read = ssm2602_read_reg_cache, |
603 | .write = ssm2602_write, | 603 | .write = ssm2602_write, |
604 | .set_bias_level = ssm2602_set_bias_level, | 604 | .set_bias_level = ssm2602_set_bias_level, |
605 | .reg_cache_size = sizeof(ssm2602_reg), | 605 | .reg_cache_size = ARRAY_SIZE(ssm2602_reg), |
606 | .reg_word_size = sizeof(u16), | 606 | .reg_word_size = sizeof(u16), |
607 | .reg_cache_default = ssm2602_reg, | 607 | .reg_cache_default = ssm2602_reg, |
608 | }; | 608 | }; |
@@ -614,7 +614,7 @@ static struct snd_soc_codec_driver soc_codec_dev_ssm2602 = { | |||
614 | * low = 0x1a | 614 | * low = 0x1a |
615 | * high = 0x1b | 615 | * high = 0x1b |
616 | */ | 616 | */ |
617 | static int ssm2602_i2c_probe(struct i2c_client *i2c, | 617 | static int __devinit ssm2602_i2c_probe(struct i2c_client *i2c, |
618 | const struct i2c_device_id *id) | 618 | const struct i2c_device_id *id) |
619 | { | 619 | { |
620 | struct ssm2602_priv *ssm2602; | 620 | struct ssm2602_priv *ssm2602; |
@@ -635,7 +635,7 @@ static int ssm2602_i2c_probe(struct i2c_client *i2c, | |||
635 | return ret; | 635 | return ret; |
636 | } | 636 | } |
637 | 637 | ||
638 | static int ssm2602_i2c_remove(struct i2c_client *client) | 638 | static int __devexit ssm2602_i2c_remove(struct i2c_client *client) |
639 | { | 639 | { |
640 | snd_soc_unregister_codec(&client->dev); | 640 | snd_soc_unregister_codec(&client->dev); |
641 | kfree(i2c_get_clientdata(client)); | 641 | kfree(i2c_get_clientdata(client)); |
@@ -655,7 +655,7 @@ static struct i2c_driver ssm2602_i2c_driver = { | |||
655 | .owner = THIS_MODULE, | 655 | .owner = THIS_MODULE, |
656 | }, | 656 | }, |
657 | .probe = ssm2602_i2c_probe, | 657 | .probe = ssm2602_i2c_probe, |
658 | .remove = ssm2602_i2c_remove, | 658 | .remove = __devexit_p(ssm2602_i2c_remove), |
659 | .id_table = ssm2602_i2c_id, | 659 | .id_table = ssm2602_i2c_id, |
660 | }; | 660 | }; |
661 | #endif | 661 | #endif |
diff --git a/sound/soc/codecs/uda134x.c b/sound/soc/codecs/uda134x.c index 48ffd406a71d..a7b8f301bad3 100644 --- a/sound/soc/codecs/uda134x.c +++ b/sound/soc/codecs/uda134x.c | |||
@@ -601,9 +601,7 @@ static struct snd_soc_codec_driver soc_codec_dev_uda134x = { | |||
601 | .reg_cache_step = 1, | 601 | .reg_cache_step = 1, |
602 | .read = uda134x_read_reg_cache, | 602 | .read = uda134x_read_reg_cache, |
603 | .write = uda134x_write, | 603 | .write = uda134x_write, |
604 | #ifdef POWER_OFF_ON_STANDBY | ||
605 | .set_bias_level = uda134x_set_bias_level, | 604 | .set_bias_level = uda134x_set_bias_level, |
606 | #endif | ||
607 | }; | 605 | }; |
608 | 606 | ||
609 | static int __devinit uda134x_codec_probe(struct platform_device *pdev) | 607 | static int __devinit uda134x_codec_probe(struct platform_device *pdev) |
diff --git a/sound/soc/codecs/wm8903.c b/sound/soc/codecs/wm8903.c index f52b623bb692..824d1c8c8a35 100644 --- a/sound/soc/codecs/wm8903.c +++ b/sound/soc/codecs/wm8903.c | |||
@@ -692,7 +692,7 @@ SOC_ENUM("DRC Smoothing Threshold", drc_smoothing), | |||
692 | SOC_SINGLE_TLV("DRC Startup Volume", WM8903_DRC_0, 6, 18, 0, drc_tlv_startup), | 692 | SOC_SINGLE_TLV("DRC Startup Volume", WM8903_DRC_0, 6, 18, 0, drc_tlv_startup), |
693 | 693 | ||
694 | SOC_DOUBLE_R_TLV("Digital Capture Volume", WM8903_ADC_DIGITAL_VOLUME_LEFT, | 694 | SOC_DOUBLE_R_TLV("Digital Capture Volume", WM8903_ADC_DIGITAL_VOLUME_LEFT, |
695 | WM8903_ADC_DIGITAL_VOLUME_RIGHT, 1, 96, 0, digital_tlv), | 695 | WM8903_ADC_DIGITAL_VOLUME_RIGHT, 1, 120, 0, digital_tlv), |
696 | SOC_ENUM("ADC Companding Mode", adc_companding), | 696 | SOC_ENUM("ADC Companding Mode", adc_companding), |
697 | SOC_SINGLE("ADC Companding Switch", WM8903_AUDIO_INTERFACE_0, 3, 1, 0), | 697 | SOC_SINGLE("ADC Companding Switch", WM8903_AUDIO_INTERFACE_0, 3, 1, 0), |
698 | 698 | ||
diff --git a/sound/soc/jz4740/jz4740-i2s.c b/sound/soc/jz4740/jz4740-i2s.c index 419bf4f5534a..cd22a54b2f14 100644 --- a/sound/soc/jz4740/jz4740-i2s.c +++ b/sound/soc/jz4740/jz4740-i2s.c | |||
@@ -133,7 +133,7 @@ static void jz4740_i2s_shutdown(struct snd_pcm_substream *substream, | |||
133 | struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai); | 133 | struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai); |
134 | uint32_t conf; | 134 | uint32_t conf; |
135 | 135 | ||
136 | if (!dai->active) | 136 | if (dai->active) |
137 | return; | 137 | return; |
138 | 138 | ||
139 | conf = jz4740_i2s_read(i2s, JZ_REG_AIC_CONF); | 139 | conf = jz4740_i2s_read(i2s, JZ_REG_AIC_CONF); |
diff --git a/sound/soc/mid-x86/sst_platform.c b/sound/soc/mid-x86/sst_platform.c index d567c322a2fb..6b1f9d3bf34e 100644 --- a/sound/soc/mid-x86/sst_platform.c +++ b/sound/soc/mid-x86/sst_platform.c | |||
@@ -376,6 +376,11 @@ static int sst_platform_pcm_hw_params(struct snd_pcm_substream *substream, | |||
376 | return 0; | 376 | return 0; |
377 | } | 377 | } |
378 | 378 | ||
379 | static int sst_platform_pcm_hw_free(struct snd_pcm_substream *substream) | ||
380 | { | ||
381 | return snd_pcm_lib_free_pages(substream); | ||
382 | } | ||
383 | |||
379 | static struct snd_pcm_ops sst_platform_ops = { | 384 | static struct snd_pcm_ops sst_platform_ops = { |
380 | .open = sst_platform_open, | 385 | .open = sst_platform_open, |
381 | .close = sst_platform_close, | 386 | .close = sst_platform_close, |
@@ -384,6 +389,7 @@ static struct snd_pcm_ops sst_platform_ops = { | |||
384 | .trigger = sst_platform_pcm_trigger, | 389 | .trigger = sst_platform_pcm_trigger, |
385 | .pointer = sst_platform_pcm_pointer, | 390 | .pointer = sst_platform_pcm_pointer, |
386 | .hw_params = sst_platform_pcm_hw_params, | 391 | .hw_params = sst_platform_pcm_hw_params, |
392 | .hw_free = sst_platform_pcm_hw_free, | ||
387 | }; | 393 | }; |
388 | 394 | ||
389 | static void sst_pcm_free(struct snd_pcm *pcm) | 395 | static void sst_pcm_free(struct snd_pcm *pcm) |
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index d8562ce4de7a..dd55d1069468 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c | |||
@@ -3291,6 +3291,8 @@ int snd_soc_register_card(struct snd_soc_card *card) | |||
3291 | if (!card->name || !card->dev) | 3291 | if (!card->name || !card->dev) |
3292 | return -EINVAL; | 3292 | return -EINVAL; |
3293 | 3293 | ||
3294 | dev_set_drvdata(card->dev, card); | ||
3295 | |||
3294 | snd_soc_initialize_card_lists(card); | 3296 | snd_soc_initialize_card_lists(card); |
3295 | 3297 | ||
3296 | soc_init_card_debugfs(card); | 3298 | soc_init_card_debugfs(card); |
diff --git a/tools/perf/Documentation/perf-script.txt b/tools/perf/Documentation/perf-script.txt index 66f040b30729..86c87e214b11 100644 --- a/tools/perf/Documentation/perf-script.txt +++ b/tools/perf/Documentation/perf-script.txt | |||
@@ -113,13 +113,61 @@ OPTIONS | |||
113 | Do various checks like samples ordering and lost events. | 113 | Do various checks like samples ordering and lost events. |
114 | 114 | ||
115 | -f:: | 115 | -f:: |
116 | --fields | 116 | --fields:: |
117 | Comma separated list of fields to print. Options are: | 117 | Comma separated list of fields to print. Options are: |
118 | comm, tid, pid, time, cpu, event, trace, sym. Field | 118 | comm, tid, pid, time, cpu, event, trace, sym. Field |
119 | list must be prepended with the type, trace, sw or hw, | 119 | list can be prepended with the type, trace, sw or hw, |
120 | to indicate to which event type the field list applies. | 120 | to indicate to which event type the field list applies. |
121 | e.g., -f sw:comm,tid,time,sym and -f trace:time,cpu,trace | 121 | e.g., -f sw:comm,tid,time,sym and -f trace:time,cpu,trace |
122 | 122 | ||
123 | perf script -f <fields> | ||
124 | |||
125 | is equivalent to: | ||
126 | |||
127 | perf script -f trace:<fields> -f sw:<fields> -f hw:<fields> | ||
128 | |||
129 | i.e., the specified fields apply to all event types if the type string | ||
130 | is not given. | ||
131 | |||
132 | The arguments are processed in the order received. A later usage can | ||
133 | reset a prior request. e.g.: | ||
134 | |||
135 | -f trace: -f comm,tid,time,sym | ||
136 | |||
137 | The first -f suppresses trace events (field list is ""), but then the | ||
138 | second invocation sets the fields to comm,tid,time,sym. In this case a | ||
139 | warning is given to the user: | ||
140 | |||
141 | "Overriding previous field request for all events." | ||
142 | |||
143 | Alternativey, consider the order: | ||
144 | |||
145 | -f comm,tid,time,sym -f trace: | ||
146 | |||
147 | The first -f sets the fields for all events and the second -f | ||
148 | suppresses trace events. The user is given a warning message about | ||
149 | the override, and the result of the above is that only S/W and H/W | ||
150 | events are displayed with the given fields. | ||
151 | |||
152 | For the 'wildcard' option if a user selected field is invalid for an | ||
153 | event type, a message is displayed to the user that the option is | ||
154 | ignored for that type. For example: | ||
155 | |||
156 | $ perf script -f comm,tid,trace | ||
157 | 'trace' not valid for hardware events. Ignoring. | ||
158 | 'trace' not valid for software events. Ignoring. | ||
159 | |||
160 | Alternatively, if the type is given an invalid field is specified it | ||
161 | is an error. For example: | ||
162 | |||
163 | perf script -v -f sw:comm,tid,trace | ||
164 | 'trace' not valid for software events. | ||
165 | |||
166 | At this point usage is displayed, and perf-script exits. | ||
167 | |||
168 | Finally, a user may not set fields to none for all event types. | ||
169 | i.e., -f "" is not allowed. | ||
170 | |||
123 | -k:: | 171 | -k:: |
124 | --vmlinux=<file>:: | 172 | --vmlinux=<file>:: |
125 | vmlinux pathname | 173 | vmlinux pathname |
diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 0c542563ea6c..1455413ec7a7 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile | |||
@@ -5,6 +5,8 @@ endif | |||
5 | # The default target of this Makefile is... | 5 | # The default target of this Makefile is... |
6 | all: | 6 | all: |
7 | 7 | ||
8 | include config/utilities.mak | ||
9 | |||
8 | ifneq ($(OUTPUT),) | 10 | ifneq ($(OUTPUT),) |
9 | # check that the output directory actually exists | 11 | # check that the output directory actually exists |
10 | OUTDIR := $(shell cd $(OUTPUT) && /bin/pwd) | 12 | OUTDIR := $(shell cd $(OUTPUT) && /bin/pwd) |
@@ -13,6 +15,12 @@ endif | |||
13 | 15 | ||
14 | # Define V to have a more verbose compile. | 16 | # Define V to have a more verbose compile. |
15 | # | 17 | # |
18 | # Define PYTHON to point to the python binary if the default | ||
19 | # `python' is not correct; for example: PYTHON=python2 | ||
20 | # | ||
21 | # Define PYTHON_CONFIG to point to the python-config binary if | ||
22 | # the default `$(PYTHON)-config' is not correct. | ||
23 | # | ||
16 | # Define ASCIIDOC8 if you want to format documentation with AsciiDoc 8 | 24 | # Define ASCIIDOC8 if you want to format documentation with AsciiDoc 8 |
17 | # | 25 | # |
18 | # Define DOCBOOK_XSL_172 if you want to format man pages with DocBook XSL v1.72. | 26 | # Define DOCBOOK_XSL_172 if you want to format man pages with DocBook XSL v1.72. |
@@ -134,7 +142,7 @@ INSTALL = install | |||
134 | # explicitly what architecture to check for. Fix this up for yours.. | 142 | # explicitly what architecture to check for. Fix this up for yours.. |
135 | SPARSE_FLAGS = -D__BIG_ENDIAN__ -D__powerpc__ | 143 | SPARSE_FLAGS = -D__BIG_ENDIAN__ -D__powerpc__ |
136 | 144 | ||
137 | -include feature-tests.mak | 145 | -include config/feature-tests.mak |
138 | 146 | ||
139 | ifeq ($(call try-cc,$(SOURCE_HELLO),-Werror -fstack-protector-all),y) | 147 | ifeq ($(call try-cc,$(SOURCE_HELLO),-Werror -fstack-protector-all),y) |
140 | CFLAGS := $(CFLAGS) -fstack-protector-all | 148 | CFLAGS := $(CFLAGS) -fstack-protector-all |
@@ -169,12 +177,10 @@ grep-libs = $(filter -l%,$(1)) | |||
169 | strip-libs = $(filter-out -l%,$(1)) | 177 | strip-libs = $(filter-out -l%,$(1)) |
170 | 178 | ||
171 | $(OUTPUT)python/perf.so: $(PYRF_OBJS) | 179 | $(OUTPUT)python/perf.so: $(PYRF_OBJS) |
172 | $(QUIET_GEN)( \ | 180 | $(QUIET_GEN)CFLAGS='$(BASIC_CFLAGS)' $(PYTHON_WORD) util/setup.py \ |
173 | export CFLAGS="$(BASIC_CFLAGS)"; \ | 181 | --quiet build_ext \ |
174 | python util/setup.py --quiet build_ext --build-lib='$(OUTPUT)python' \ | 182 | --build-lib='$(OUTPUT)python' \ |
175 | --build-temp='$(OUTPUT)python/temp' \ | 183 | --build-temp='$(OUTPUT)python/temp' |
176 | ) | ||
177 | |||
178 | # | 184 | # |
179 | # No Perl scripts right now: | 185 | # No Perl scripts right now: |
180 | # | 186 | # |
@@ -479,24 +485,74 @@ else | |||
479 | endif | 485 | endif |
480 | endif | 486 | endif |
481 | 487 | ||
482 | ifdef NO_LIBPYTHON | 488 | disable-python = $(eval $(disable-python_code)) |
483 | BASIC_CFLAGS += -DNO_LIBPYTHON | 489 | define disable-python_code |
490 | BASIC_CFLAGS += -DNO_LIBPYTHON | ||
491 | $(if $(1),$(warning No $(1) was found)) | ||
492 | $(warning Python support won't be built) | ||
493 | endef | ||
494 | |||
495 | override PYTHON := \ | ||
496 | $(call get-executable-or-default,PYTHON,python) | ||
497 | |||
498 | ifndef PYTHON | ||
499 | $(call disable-python,python interpreter) | ||
500 | python-clean := | ||
484 | else | 501 | else |
485 | PYTHON_EMBED_LDOPTS = $(shell python-config --ldflags 2>/dev/null) | 502 | |
486 | PYTHON_EMBED_LDFLAGS = $(call strip-libs,$(PYTHON_EMBED_LDOPTS)) | 503 | PYTHON_WORD := $(call shell-wordify,$(PYTHON)) |
487 | PYTHON_EMBED_LIBADD = $(call grep-libs,$(PYTHON_EMBED_LDOPTS)) | 504 | |
488 | PYTHON_EMBED_CCOPTS = `python-config --cflags 2>/dev/null` | 505 | python-clean := $(PYTHON_WORD) util/setup.py clean \ |
489 | FLAGS_PYTHON_EMBED=$(PYTHON_EMBED_CCOPTS) $(PYTHON_EMBED_LDOPTS) | 506 | --build-lib='$(OUTPUT)python' \ |
490 | ifneq ($(call try-cc,$(SOURCE_PYTHON_EMBED),$(FLAGS_PYTHON_EMBED)),y) | 507 | --build-temp='$(OUTPUT)python/temp' |
491 | msg := $(warning No Python.h found, install python-dev[el] to have python support in 'perf script' and to build the python bindings) | 508 | |
492 | BASIC_CFLAGS += -DNO_LIBPYTHON | 509 | ifdef NO_LIBPYTHON |
493 | else | 510 | $(call disable-python) |
494 | ALL_LDFLAGS += $(PYTHON_EMBED_LDFLAGS) | 511 | else |
495 | EXTLIBS += $(PYTHON_EMBED_LIBADD) | 512 | |
496 | LIB_OBJS += $(OUTPUT)util/scripting-engines/trace-event-python.o | 513 | override PYTHON_CONFIG := \ |
497 | LIB_OBJS += $(OUTPUT)scripts/python/Perf-Trace-Util/Context.o | 514 | $(call get-executable-or-default,PYTHON_CONFIG,$(PYTHON)-config) |
498 | LANG_BINDINGS += $(OUTPUT)python/perf.so | 515 | |
499 | endif | 516 | ifndef PYTHON_CONFIG |
517 | $(call disable-python,python-config tool) | ||
518 | else | ||
519 | |||
520 | PYTHON_CONFIG_SQ := $(call shell-sq,$(PYTHON_CONFIG)) | ||
521 | |||
522 | PYTHON_EMBED_LDOPTS := $(shell $(PYTHON_CONFIG_SQ) --ldflags 2>/dev/null) | ||
523 | PYTHON_EMBED_LDFLAGS := $(call strip-libs,$(PYTHON_EMBED_LDOPTS)) | ||
524 | PYTHON_EMBED_LIBADD := $(call grep-libs,$(PYTHON_EMBED_LDOPTS)) | ||
525 | PYTHON_EMBED_CCOPTS := $(shell $(PYTHON_CONFIG_SQ) --cflags 2>/dev/null) | ||
526 | FLAGS_PYTHON_EMBED := $(PYTHON_EMBED_CCOPTS) $(PYTHON_EMBED_LDOPTS) | ||
527 | |||
528 | ifneq ($(call try-cc,$(SOURCE_PYTHON_EMBED),$(FLAGS_PYTHON_EMBED)),y) | ||
529 | $(call disable-python,Python.h (for Python 2.x)) | ||
530 | else | ||
531 | |||
532 | ifneq ($(call try-cc,$(SOURCE_PYTHON_VERSION),$(FLAGS_PYTHON_EMBED)),y) | ||
533 | $(warning Python 3 is not yet supported; please set) | ||
534 | $(warning PYTHON and/or PYTHON_CONFIG appropriately.) | ||
535 | $(warning If you also have Python 2 installed, then) | ||
536 | $(warning try something like:) | ||
537 | $(warning $(and ,)) | ||
538 | $(warning $(and ,) make PYTHON=python2) | ||
539 | $(warning $(and ,)) | ||
540 | $(warning Otherwise, disable Python support entirely:) | ||
541 | $(warning $(and ,)) | ||
542 | $(warning $(and ,) make NO_LIBPYTHON=1) | ||
543 | $(warning $(and ,)) | ||
544 | $(error $(and ,)) | ||
545 | else | ||
546 | ALL_LDFLAGS += $(PYTHON_EMBED_LDFLAGS) | ||
547 | EXTLIBS += $(PYTHON_EMBED_LIBADD) | ||
548 | LIB_OBJS += $(OUTPUT)util/scripting-engines/trace-event-python.o | ||
549 | LIB_OBJS += $(OUTPUT)scripts/python/Perf-Trace-Util/Context.o | ||
550 | LANG_BINDINGS += $(OUTPUT)python/perf.so | ||
551 | endif | ||
552 | |||
553 | endif | ||
554 | endif | ||
555 | endif | ||
500 | endif | 556 | endif |
501 | 557 | ||
502 | ifdef NO_DEMANGLE | 558 | ifdef NO_DEMANGLE |
@@ -837,8 +893,7 @@ clean: | |||
837 | $(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo $(OUTPUT)common-cmds.h TAGS tags cscope* | 893 | $(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo $(OUTPUT)common-cmds.h TAGS tags cscope* |
838 | $(MAKE) -C Documentation/ clean | 894 | $(MAKE) -C Documentation/ clean |
839 | $(RM) $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)PERF-CFLAGS | 895 | $(RM) $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)PERF-CFLAGS |
840 | @python util/setup.py clean --build-lib='$(OUTPUT)python' \ | 896 | $(python-clean) |
841 | --build-temp='$(OUTPUT)python/temp' | ||
842 | 897 | ||
843 | .PHONY: all install clean strip | 898 | .PHONY: all install clean strip |
844 | .PHONY: shell_compatibility_test please_set_SHELL_PATH_to_a_more_modern_shell | 899 | .PHONY: shell_compatibility_test please_set_SHELL_PATH_to_a_more_modern_shell |
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 416538248a4b..0974f957b8fa 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c | |||
@@ -427,7 +427,7 @@ static void mmap_read_all(void) | |||
427 | { | 427 | { |
428 | int i; | 428 | int i; |
429 | 429 | ||
430 | for (i = 0; i < evsel_list->cpus->nr; i++) { | 430 | for (i = 0; i < evsel_list->nr_mmaps; i++) { |
431 | if (evsel_list->mmap[i].base) | 431 | if (evsel_list->mmap[i].base) |
432 | mmap_read(&evsel_list->mmap[i]); | 432 | mmap_read(&evsel_list->mmap[i]); |
433 | } | 433 | } |
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c index ac574ea23917..974f6d3f4e53 100644 --- a/tools/perf/builtin-script.c +++ b/tools/perf/builtin-script.c | |||
@@ -49,57 +49,169 @@ struct output_option { | |||
49 | }; | 49 | }; |
50 | 50 | ||
51 | /* default set to maintain compatibility with current format */ | 51 | /* default set to maintain compatibility with current format */ |
52 | static u64 output_fields[PERF_TYPE_MAX] = { | 52 | static struct { |
53 | [PERF_TYPE_HARDWARE] = PERF_OUTPUT_COMM | PERF_OUTPUT_TID | \ | 53 | bool user_set; |
54 | PERF_OUTPUT_CPU | PERF_OUTPUT_TIME | \ | 54 | bool wildcard_set; |
55 | PERF_OUTPUT_EVNAME | PERF_OUTPUT_SYM, | 55 | u64 fields; |
56 | 56 | u64 invalid_fields; | |
57 | [PERF_TYPE_SOFTWARE] = PERF_OUTPUT_COMM | PERF_OUTPUT_TID | \ | 57 | } output[PERF_TYPE_MAX] = { |
58 | PERF_OUTPUT_CPU | PERF_OUTPUT_TIME | \ | 58 | |
59 | PERF_OUTPUT_EVNAME | PERF_OUTPUT_SYM, | 59 | [PERF_TYPE_HARDWARE] = { |
60 | 60 | .user_set = false, | |
61 | [PERF_TYPE_TRACEPOINT] = PERF_OUTPUT_COMM | PERF_OUTPUT_TID | \ | 61 | |
62 | PERF_OUTPUT_CPU | PERF_OUTPUT_TIME | \ | 62 | .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID | |
63 | PERF_OUTPUT_EVNAME | PERF_OUTPUT_TRACE, | 63 | PERF_OUTPUT_CPU | PERF_OUTPUT_TIME | |
64 | PERF_OUTPUT_EVNAME | PERF_OUTPUT_SYM, | ||
65 | |||
66 | .invalid_fields = PERF_OUTPUT_TRACE, | ||
67 | }, | ||
68 | |||
69 | [PERF_TYPE_SOFTWARE] = { | ||
70 | .user_set = false, | ||
71 | |||
72 | .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID | | ||
73 | PERF_OUTPUT_CPU | PERF_OUTPUT_TIME | | ||
74 | PERF_OUTPUT_EVNAME | PERF_OUTPUT_SYM, | ||
75 | |||
76 | .invalid_fields = PERF_OUTPUT_TRACE, | ||
77 | }, | ||
78 | |||
79 | [PERF_TYPE_TRACEPOINT] = { | ||
80 | .user_set = false, | ||
81 | |||
82 | .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID | | ||
83 | PERF_OUTPUT_CPU | PERF_OUTPUT_TIME | | ||
84 | PERF_OUTPUT_EVNAME | PERF_OUTPUT_TRACE, | ||
85 | }, | ||
86 | |||
87 | [PERF_TYPE_RAW] = { | ||
88 | .user_set = false, | ||
89 | |||
90 | .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID | | ||
91 | PERF_OUTPUT_CPU | PERF_OUTPUT_TIME | | ||
92 | PERF_OUTPUT_EVNAME | PERF_OUTPUT_SYM, | ||
93 | |||
94 | .invalid_fields = PERF_OUTPUT_TRACE, | ||
95 | }, | ||
64 | }; | 96 | }; |
65 | 97 | ||
66 | static bool output_set_by_user; | 98 | static bool output_set_by_user(void) |
99 | { | ||
100 | int j; | ||
101 | for (j = 0; j < PERF_TYPE_MAX; ++j) { | ||
102 | if (output[j].user_set) | ||
103 | return true; | ||
104 | } | ||
105 | return false; | ||
106 | } | ||
107 | |||
108 | static const char *output_field2str(enum perf_output_field field) | ||
109 | { | ||
110 | int i, imax = ARRAY_SIZE(all_output_options); | ||
111 | const char *str = ""; | ||
112 | |||
113 | for (i = 0; i < imax; ++i) { | ||
114 | if (all_output_options[i].field == field) { | ||
115 | str = all_output_options[i].str; | ||
116 | break; | ||
117 | } | ||
118 | } | ||
119 | return str; | ||
120 | } | ||
67 | 121 | ||
68 | #define PRINT_FIELD(x) (output_fields[attr->type] & PERF_OUTPUT_##x) | 122 | #define PRINT_FIELD(x) (output[attr->type].fields & PERF_OUTPUT_##x) |
69 | 123 | ||
70 | static int perf_session__check_attr(struct perf_session *session, | 124 | static int perf_event_attr__check_stype(struct perf_event_attr *attr, |
71 | struct perf_event_attr *attr) | 125 | u64 sample_type, const char *sample_msg, |
126 | enum perf_output_field field) | ||
72 | { | 127 | { |
128 | int type = attr->type; | ||
129 | const char *evname; | ||
130 | |||
131 | if (attr->sample_type & sample_type) | ||
132 | return 0; | ||
133 | |||
134 | if (output[type].user_set) { | ||
135 | evname = __event_name(attr->type, attr->config); | ||
136 | pr_err("Samples for '%s' event do not have %s attribute set. " | ||
137 | "Cannot print '%s' field.\n", | ||
138 | evname, sample_msg, output_field2str(field)); | ||
139 | return -1; | ||
140 | } | ||
141 | |||
142 | /* user did not ask for it explicitly so remove from the default list */ | ||
143 | output[type].fields &= ~field; | ||
144 | evname = __event_name(attr->type, attr->config); | ||
145 | pr_debug("Samples for '%s' event do not have %s attribute set. " | ||
146 | "Skipping '%s' field.\n", | ||
147 | evname, sample_msg, output_field2str(field)); | ||
148 | |||
149 | return 0; | ||
150 | } | ||
151 | |||
152 | static int perf_evsel__check_attr(struct perf_evsel *evsel, | ||
153 | struct perf_session *session) | ||
154 | { | ||
155 | struct perf_event_attr *attr = &evsel->attr; | ||
156 | |||
73 | if (PRINT_FIELD(TRACE) && | 157 | if (PRINT_FIELD(TRACE) && |
74 | !perf_session__has_traces(session, "record -R")) | 158 | !perf_session__has_traces(session, "record -R")) |
75 | return -EINVAL; | 159 | return -EINVAL; |
76 | 160 | ||
77 | if (PRINT_FIELD(SYM)) { | 161 | if (PRINT_FIELD(SYM)) { |
78 | if (!(session->sample_type & PERF_SAMPLE_IP)) { | 162 | if (perf_event_attr__check_stype(attr, PERF_SAMPLE_IP, "IP", |
79 | pr_err("Samples do not contain IP data.\n"); | 163 | PERF_OUTPUT_SYM)) |
80 | return -EINVAL; | 164 | return -EINVAL; |
81 | } | 165 | |
82 | if (!no_callchain && | 166 | if (!no_callchain && |
83 | !(session->sample_type & PERF_SAMPLE_CALLCHAIN)) | 167 | !(attr->sample_type & PERF_SAMPLE_CALLCHAIN)) |
84 | symbol_conf.use_callchain = false; | 168 | symbol_conf.use_callchain = false; |
85 | } | 169 | } |
86 | 170 | ||
87 | if ((PRINT_FIELD(PID) || PRINT_FIELD(TID)) && | 171 | if ((PRINT_FIELD(PID) || PRINT_FIELD(TID)) && |
88 | !(session->sample_type & PERF_SAMPLE_TID)) { | 172 | perf_event_attr__check_stype(attr, PERF_SAMPLE_TID, "TID", |
89 | pr_err("Samples do not contain TID/PID data.\n"); | 173 | PERF_OUTPUT_TID|PERF_OUTPUT_PID)) |
90 | return -EINVAL; | 174 | return -EINVAL; |
91 | } | ||
92 | 175 | ||
93 | if (PRINT_FIELD(TIME) && | 176 | if (PRINT_FIELD(TIME) && |
94 | !(session->sample_type & PERF_SAMPLE_TIME)) { | 177 | perf_event_attr__check_stype(attr, PERF_SAMPLE_TIME, "TIME", |
95 | pr_err("Samples do not contain timestamps.\n"); | 178 | PERF_OUTPUT_TIME)) |
96 | return -EINVAL; | 179 | return -EINVAL; |
97 | } | ||
98 | 180 | ||
99 | if (PRINT_FIELD(CPU) && | 181 | if (PRINT_FIELD(CPU) && |
100 | !(session->sample_type & PERF_SAMPLE_CPU)) { | 182 | perf_event_attr__check_stype(attr, PERF_SAMPLE_CPU, "CPU", |
101 | pr_err("Samples do not contain cpu.\n"); | 183 | PERF_OUTPUT_CPU)) |
102 | return -EINVAL; | 184 | return -EINVAL; |
185 | |||
186 | return 0; | ||
187 | } | ||
188 | |||
189 | /* | ||
190 | * verify all user requested events exist and the samples | ||
191 | * have the expected data | ||
192 | */ | ||
193 | static int perf_session__check_output_opt(struct perf_session *session) | ||
194 | { | ||
195 | int j; | ||
196 | struct perf_evsel *evsel; | ||
197 | |||
198 | for (j = 0; j < PERF_TYPE_MAX; ++j) { | ||
199 | evsel = perf_session__find_first_evtype(session, j); | ||
200 | |||
201 | /* | ||
202 | * even if fields is set to 0 (ie., show nothing) event must | ||
203 | * exist if user explicitly includes it on the command line | ||
204 | */ | ||
205 | if (!evsel && output[j].user_set && !output[j].wildcard_set) { | ||
206 | pr_err("%s events do not exist. " | ||
207 | "Remove corresponding -f option to proceed.\n", | ||
208 | event_type(j)); | ||
209 | return -1; | ||
210 | } | ||
211 | |||
212 | if (evsel && output[j].fields && | ||
213 | perf_evsel__check_attr(evsel, session)) | ||
214 | return -1; | ||
103 | } | 215 | } |
104 | 216 | ||
105 | return 0; | 217 | return 0; |
@@ -168,10 +280,7 @@ static void process_event(union perf_event *event __unused, | |||
168 | { | 280 | { |
169 | struct perf_event_attr *attr = &evsel->attr; | 281 | struct perf_event_attr *attr = &evsel->attr; |
170 | 282 | ||
171 | if (output_fields[attr->type] == 0) | 283 | if (output[attr->type].fields == 0) |
172 | return; | ||
173 | |||
174 | if (perf_session__check_attr(session, attr) < 0) | ||
175 | return; | 284 | return; |
176 | 285 | ||
177 | print_sample_start(sample, thread, attr); | 286 | print_sample_start(sample, thread, attr); |
@@ -451,6 +560,7 @@ static int parse_output_fields(const struct option *opt __used, | |||
451 | { | 560 | { |
452 | char *tok; | 561 | char *tok; |
453 | int i, imax = sizeof(all_output_options) / sizeof(struct output_option); | 562 | int i, imax = sizeof(all_output_options) / sizeof(struct output_option); |
563 | int j; | ||
454 | int rc = 0; | 564 | int rc = 0; |
455 | char *str = strdup(arg); | 565 | char *str = strdup(arg); |
456 | int type = -1; | 566 | int type = -1; |
@@ -458,52 +568,99 @@ static int parse_output_fields(const struct option *opt __used, | |||
458 | if (!str) | 568 | if (!str) |
459 | return -ENOMEM; | 569 | return -ENOMEM; |
460 | 570 | ||
461 | tok = strtok(str, ":"); | 571 | /* first word can state for which event type the user is specifying |
462 | if (!tok) { | 572 | * the fields. If no type exists, the specified fields apply to all |
463 | fprintf(stderr, | 573 | * event types found in the file minus the invalid fields for a type. |
464 | "Invalid field string - not prepended with type."); | ||
465 | return -EINVAL; | ||
466 | } | ||
467 | |||
468 | /* first word should state which event type user | ||
469 | * is specifying the fields | ||
470 | */ | 574 | */ |
471 | if (!strcmp(tok, "hw")) | 575 | tok = strchr(str, ':'); |
472 | type = PERF_TYPE_HARDWARE; | 576 | if (tok) { |
473 | else if (!strcmp(tok, "sw")) | 577 | *tok = '\0'; |
474 | type = PERF_TYPE_SOFTWARE; | 578 | tok++; |
475 | else if (!strcmp(tok, "trace")) | 579 | if (!strcmp(str, "hw")) |
476 | type = PERF_TYPE_TRACEPOINT; | 580 | type = PERF_TYPE_HARDWARE; |
477 | else { | 581 | else if (!strcmp(str, "sw")) |
478 | fprintf(stderr, "Invalid event type in field string."); | 582 | type = PERF_TYPE_SOFTWARE; |
479 | return -EINVAL; | 583 | else if (!strcmp(str, "trace")) |
584 | type = PERF_TYPE_TRACEPOINT; | ||
585 | else if (!strcmp(str, "raw")) | ||
586 | type = PERF_TYPE_RAW; | ||
587 | else { | ||
588 | fprintf(stderr, "Invalid event type in field string.\n"); | ||
589 | return -EINVAL; | ||
590 | } | ||
591 | |||
592 | if (output[type].user_set) | ||
593 | pr_warning("Overriding previous field request for %s events.\n", | ||
594 | event_type(type)); | ||
595 | |||
596 | output[type].fields = 0; | ||
597 | output[type].user_set = true; | ||
598 | output[type].wildcard_set = false; | ||
599 | |||
600 | } else { | ||
601 | tok = str; | ||
602 | if (strlen(str) == 0) { | ||
603 | fprintf(stderr, | ||
604 | "Cannot set fields to 'none' for all event types.\n"); | ||
605 | rc = -EINVAL; | ||
606 | goto out; | ||
607 | } | ||
608 | |||
609 | if (output_set_by_user()) | ||
610 | pr_warning("Overriding previous field request for all events.\n"); | ||
611 | |||
612 | for (j = 0; j < PERF_TYPE_MAX; ++j) { | ||
613 | output[j].fields = 0; | ||
614 | output[j].user_set = true; | ||
615 | output[j].wildcard_set = true; | ||
616 | } | ||
480 | } | 617 | } |
481 | 618 | ||
482 | output_fields[type] = 0; | 619 | tok = strtok(tok, ","); |
483 | while (1) { | 620 | while (tok) { |
484 | tok = strtok(NULL, ","); | ||
485 | if (!tok) | ||
486 | break; | ||
487 | for (i = 0; i < imax; ++i) { | 621 | for (i = 0; i < imax; ++i) { |
488 | if (strcmp(tok, all_output_options[i].str) == 0) { | 622 | if (strcmp(tok, all_output_options[i].str) == 0) |
489 | output_fields[type] |= all_output_options[i].field; | ||
490 | break; | 623 | break; |
491 | } | ||
492 | } | 624 | } |
493 | if (i == imax) { | 625 | if (i == imax) { |
494 | fprintf(stderr, "Invalid field requested."); | 626 | fprintf(stderr, "Invalid field requested.\n"); |
495 | rc = -EINVAL; | 627 | rc = -EINVAL; |
496 | break; | 628 | goto out; |
497 | } | 629 | } |
498 | } | ||
499 | 630 | ||
500 | if (output_fields[type] == 0) { | 631 | if (type == -1) { |
501 | pr_debug("No fields requested for %s type. " | 632 | /* add user option to all events types for |
502 | "Events will not be displayed\n", event_type(type)); | 633 | * which it is valid |
634 | */ | ||
635 | for (j = 0; j < PERF_TYPE_MAX; ++j) { | ||
636 | if (output[j].invalid_fields & all_output_options[i].field) { | ||
637 | pr_warning("\'%s\' not valid for %s events. Ignoring.\n", | ||
638 | all_output_options[i].str, event_type(j)); | ||
639 | } else | ||
640 | output[j].fields |= all_output_options[i].field; | ||
641 | } | ||
642 | } else { | ||
643 | if (output[type].invalid_fields & all_output_options[i].field) { | ||
644 | fprintf(stderr, "\'%s\' not valid for %s events.\n", | ||
645 | all_output_options[i].str, event_type(type)); | ||
646 | |||
647 | rc = -EINVAL; | ||
648 | goto out; | ||
649 | } | ||
650 | output[type].fields |= all_output_options[i].field; | ||
651 | } | ||
652 | |||
653 | tok = strtok(NULL, ","); | ||
503 | } | 654 | } |
504 | 655 | ||
505 | output_set_by_user = true; | 656 | if (type >= 0) { |
657 | if (output[type].fields == 0) { | ||
658 | pr_debug("No fields requested for %s type. " | ||
659 | "Events will not be displayed.\n", event_type(type)); | ||
660 | } | ||
661 | } | ||
506 | 662 | ||
663 | out: | ||
507 | free(str); | 664 | free(str); |
508 | return rc; | 665 | return rc; |
509 | } | 666 | } |
@@ -829,7 +986,7 @@ static const struct option options[] = { | |||
829 | OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory", | 986 | OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory", |
830 | "Look for files with symbols relative to this directory"), | 987 | "Look for files with symbols relative to this directory"), |
831 | OPT_CALLBACK('f', "fields", NULL, "str", | 988 | OPT_CALLBACK('f', "fields", NULL, "str", |
832 | "comma separated output fields prepend with 'type:'. Valid types: hw,sw,trace. Fields: comm,tid,pid,time,cpu,event,trace,sym", | 989 | "comma separated output fields prepend with 'type:'. Valid types: hw,sw,trace,raw. Fields: comm,tid,pid,time,cpu,event,trace,sym", |
833 | parse_output_fields), | 990 | parse_output_fields), |
834 | 991 | ||
835 | OPT_END() | 992 | OPT_END() |
@@ -1020,7 +1177,7 @@ int cmd_script(int argc, const char **argv, const char *prefix __used) | |||
1020 | struct stat perf_stat; | 1177 | struct stat perf_stat; |
1021 | int input; | 1178 | int input; |
1022 | 1179 | ||
1023 | if (output_set_by_user) { | 1180 | if (output_set_by_user()) { |
1024 | fprintf(stderr, | 1181 | fprintf(stderr, |
1025 | "custom fields not supported for generated scripts"); | 1182 | "custom fields not supported for generated scripts"); |
1026 | return -1; | 1183 | return -1; |
@@ -1060,6 +1217,11 @@ int cmd_script(int argc, const char **argv, const char *prefix __used) | |||
1060 | pr_debug("perf script started with script %s\n\n", script_name); | 1217 | pr_debug("perf script started with script %s\n\n", script_name); |
1061 | } | 1218 | } |
1062 | 1219 | ||
1220 | |||
1221 | err = perf_session__check_output_opt(session); | ||
1222 | if (err < 0) | ||
1223 | goto out; | ||
1224 | |||
1063 | err = __cmd_script(session); | 1225 | err = __cmd_script(session); |
1064 | 1226 | ||
1065 | perf_session__delete(session); | 1227 | perf_session__delete(session); |
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 03f0e45f1479..a9f06715e44d 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c | |||
@@ -6,24 +6,28 @@ | |||
6 | * | 6 | * |
7 | * Sample output: | 7 | * Sample output: |
8 | 8 | ||
9 | $ perf stat ~/hackbench 10 | 9 | $ perf stat ./hackbench 10 |
10 | Time: 0.104 | ||
11 | 10 | ||
12 | Performance counter stats for '/home/mingo/hackbench': | 11 | Time: 0.118 |
13 | 12 | ||
14 | 1255.538611 task clock ticks # 10.143 CPU utilization factor | 13 | Performance counter stats for './hackbench 10': |
15 | 54011 context switches # 0.043 M/sec | ||
16 | 385 CPU migrations # 0.000 M/sec | ||
17 | 17755 pagefaults # 0.014 M/sec | ||
18 | 3808323185 CPU cycles # 3033.219 M/sec | ||
19 | 1575111190 instructions # 1254.530 M/sec | ||
20 | 17367895 cache references # 13.833 M/sec | ||
21 | 7674421 cache misses # 6.112 M/sec | ||
22 | 14 | ||
23 | Wall-clock time elapsed: 123.786620 msecs | 15 | 1708.761321 task-clock # 11.037 CPUs utilized |
16 | 41,190 context-switches # 0.024 M/sec | ||
17 | 6,735 CPU-migrations # 0.004 M/sec | ||
18 | 17,318 page-faults # 0.010 M/sec | ||
19 | 5,205,202,243 cycles # 3.046 GHz | ||
20 | 3,856,436,920 stalled-cycles-frontend # 74.09% frontend cycles idle | ||
21 | 1,600,790,871 stalled-cycles-backend # 30.75% backend cycles idle | ||
22 | 2,603,501,247 instructions # 0.50 insns per cycle | ||
23 | # 1.48 stalled cycles per insn | ||
24 | 484,357,498 branches # 283.455 M/sec | ||
25 | 6,388,934 branch-misses # 1.32% of all branches | ||
26 | |||
27 | 0.154822978 seconds time elapsed | ||
24 | 28 | ||
25 | * | 29 | * |
26 | * Copyright (C) 2008, Red Hat Inc, Ingo Molnar <mingo@redhat.com> | 30 | * Copyright (C) 2008-2011, Red Hat Inc, Ingo Molnar <mingo@redhat.com> |
27 | * | 31 | * |
28 | * Improvements and fixes by: | 32 | * Improvements and fixes by: |
29 | * | 33 | * |
@@ -46,6 +50,7 @@ | |||
46 | #include "util/evlist.h" | 50 | #include "util/evlist.h" |
47 | #include "util/evsel.h" | 51 | #include "util/evsel.h" |
48 | #include "util/debug.h" | 52 | #include "util/debug.h" |
53 | #include "util/color.h" | ||
49 | #include "util/header.h" | 54 | #include "util/header.h" |
50 | #include "util/cpumap.h" | 55 | #include "util/cpumap.h" |
51 | #include "util/thread.h" | 56 | #include "util/thread.h" |
@@ -65,14 +70,107 @@ static struct perf_event_attr default_attrs[] = { | |||
65 | { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS }, | 70 | { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS }, |
66 | 71 | ||
67 | { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES }, | 72 | { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES }, |
73 | { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_FRONTEND }, | ||
74 | { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_BACKEND }, | ||
68 | { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS }, | 75 | { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS }, |
69 | { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS }, | 76 | { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS }, |
70 | { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES }, | 77 | { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES }, |
71 | { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CACHE_REFERENCES }, | ||
72 | { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CACHE_MISSES }, | ||
73 | 78 | ||
74 | }; | 79 | }; |
75 | 80 | ||
81 | /* | ||
82 | * Detailed stats (-d), covering the L1 and last level data caches: | ||
83 | */ | ||
84 | static struct perf_event_attr detailed_attrs[] = { | ||
85 | |||
86 | { .type = PERF_TYPE_HW_CACHE, | ||
87 | .config = | ||
88 | PERF_COUNT_HW_CACHE_L1D << 0 | | ||
89 | (PERF_COUNT_HW_CACHE_OP_READ << 8) | | ||
90 | (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, | ||
91 | |||
92 | { .type = PERF_TYPE_HW_CACHE, | ||
93 | .config = | ||
94 | PERF_COUNT_HW_CACHE_L1D << 0 | | ||
95 | (PERF_COUNT_HW_CACHE_OP_READ << 8) | | ||
96 | (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, | ||
97 | |||
98 | { .type = PERF_TYPE_HW_CACHE, | ||
99 | .config = | ||
100 | PERF_COUNT_HW_CACHE_LL << 0 | | ||
101 | (PERF_COUNT_HW_CACHE_OP_READ << 8) | | ||
102 | (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, | ||
103 | |||
104 | { .type = PERF_TYPE_HW_CACHE, | ||
105 | .config = | ||
106 | PERF_COUNT_HW_CACHE_LL << 0 | | ||
107 | (PERF_COUNT_HW_CACHE_OP_READ << 8) | | ||
108 | (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, | ||
109 | }; | ||
110 | |||
111 | /* | ||
112 | * Very detailed stats (-d -d), covering the instruction cache and the TLB caches: | ||
113 | */ | ||
114 | static struct perf_event_attr very_detailed_attrs[] = { | ||
115 | |||
116 | { .type = PERF_TYPE_HW_CACHE, | ||
117 | .config = | ||
118 | PERF_COUNT_HW_CACHE_L1I << 0 | | ||
119 | (PERF_COUNT_HW_CACHE_OP_READ << 8) | | ||
120 | (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, | ||
121 | |||
122 | { .type = PERF_TYPE_HW_CACHE, | ||
123 | .config = | ||
124 | PERF_COUNT_HW_CACHE_L1I << 0 | | ||
125 | (PERF_COUNT_HW_CACHE_OP_READ << 8) | | ||
126 | (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, | ||
127 | |||
128 | { .type = PERF_TYPE_HW_CACHE, | ||
129 | .config = | ||
130 | PERF_COUNT_HW_CACHE_DTLB << 0 | | ||
131 | (PERF_COUNT_HW_CACHE_OP_READ << 8) | | ||
132 | (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, | ||
133 | |||
134 | { .type = PERF_TYPE_HW_CACHE, | ||
135 | .config = | ||
136 | PERF_COUNT_HW_CACHE_DTLB << 0 | | ||
137 | (PERF_COUNT_HW_CACHE_OP_READ << 8) | | ||
138 | (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, | ||
139 | |||
140 | { .type = PERF_TYPE_HW_CACHE, | ||
141 | .config = | ||
142 | PERF_COUNT_HW_CACHE_ITLB << 0 | | ||
143 | (PERF_COUNT_HW_CACHE_OP_READ << 8) | | ||
144 | (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, | ||
145 | |||
146 | { .type = PERF_TYPE_HW_CACHE, | ||
147 | .config = | ||
148 | PERF_COUNT_HW_CACHE_ITLB << 0 | | ||
149 | (PERF_COUNT_HW_CACHE_OP_READ << 8) | | ||
150 | (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, | ||
151 | |||
152 | }; | ||
153 | |||
154 | /* | ||
155 | * Very, very detailed stats (-d -d -d), adding prefetch events: | ||
156 | */ | ||
157 | static struct perf_event_attr very_very_detailed_attrs[] = { | ||
158 | |||
159 | { .type = PERF_TYPE_HW_CACHE, | ||
160 | .config = | ||
161 | PERF_COUNT_HW_CACHE_L1D << 0 | | ||
162 | (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) | | ||
163 | (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, | ||
164 | |||
165 | { .type = PERF_TYPE_HW_CACHE, | ||
166 | .config = | ||
167 | PERF_COUNT_HW_CACHE_L1D << 0 | | ||
168 | (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) | | ||
169 | (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, | ||
170 | }; | ||
171 | |||
172 | |||
173 | |||
76 | struct perf_evlist *evsel_list; | 174 | struct perf_evlist *evsel_list; |
77 | 175 | ||
78 | static bool system_wide = false; | 176 | static bool system_wide = false; |
@@ -86,6 +184,8 @@ static pid_t target_pid = -1; | |||
86 | static pid_t target_tid = -1; | 184 | static pid_t target_tid = -1; |
87 | static pid_t child_pid = -1; | 185 | static pid_t child_pid = -1; |
88 | static bool null_run = false; | 186 | static bool null_run = false; |
187 | static int detailed_run = 0; | ||
188 | static bool sync_run = false; | ||
89 | static bool big_num = true; | 189 | static bool big_num = true; |
90 | static int big_num_opt = -1; | 190 | static int big_num_opt = -1; |
91 | static const char *cpu_list; | 191 | static const char *cpu_list; |
@@ -156,7 +256,15 @@ static double stddev_stats(struct stats *stats) | |||
156 | 256 | ||
157 | struct stats runtime_nsecs_stats[MAX_NR_CPUS]; | 257 | struct stats runtime_nsecs_stats[MAX_NR_CPUS]; |
158 | struct stats runtime_cycles_stats[MAX_NR_CPUS]; | 258 | struct stats runtime_cycles_stats[MAX_NR_CPUS]; |
259 | struct stats runtime_stalled_cycles_front_stats[MAX_NR_CPUS]; | ||
260 | struct stats runtime_stalled_cycles_back_stats[MAX_NR_CPUS]; | ||
159 | struct stats runtime_branches_stats[MAX_NR_CPUS]; | 261 | struct stats runtime_branches_stats[MAX_NR_CPUS]; |
262 | struct stats runtime_cacherefs_stats[MAX_NR_CPUS]; | ||
263 | struct stats runtime_l1_dcache_stats[MAX_NR_CPUS]; | ||
264 | struct stats runtime_l1_icache_stats[MAX_NR_CPUS]; | ||
265 | struct stats runtime_ll_cache_stats[MAX_NR_CPUS]; | ||
266 | struct stats runtime_itlb_cache_stats[MAX_NR_CPUS]; | ||
267 | struct stats runtime_dtlb_cache_stats[MAX_NR_CPUS]; | ||
160 | struct stats walltime_nsecs_stats; | 268 | struct stats walltime_nsecs_stats; |
161 | 269 | ||
162 | static int create_perf_stat_counter(struct perf_evsel *evsel) | 270 | static int create_perf_stat_counter(struct perf_evsel *evsel) |
@@ -193,6 +301,37 @@ static inline int nsec_counter(struct perf_evsel *evsel) | |||
193 | } | 301 | } |
194 | 302 | ||
195 | /* | 303 | /* |
304 | * Update various tracking values we maintain to print | ||
305 | * more semantic information such as miss/hit ratios, | ||
306 | * instruction rates, etc: | ||
307 | */ | ||
308 | static void update_shadow_stats(struct perf_evsel *counter, u64 *count) | ||
309 | { | ||
310 | if (perf_evsel__match(counter, SOFTWARE, SW_TASK_CLOCK)) | ||
311 | update_stats(&runtime_nsecs_stats[0], count[0]); | ||
312 | else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES)) | ||
313 | update_stats(&runtime_cycles_stats[0], count[0]); | ||
314 | else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_FRONTEND)) | ||
315 | update_stats(&runtime_stalled_cycles_front_stats[0], count[0]); | ||
316 | else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_BACKEND)) | ||
317 | update_stats(&runtime_stalled_cycles_back_stats[0], count[0]); | ||
318 | else if (perf_evsel__match(counter, HARDWARE, HW_BRANCH_INSTRUCTIONS)) | ||
319 | update_stats(&runtime_branches_stats[0], count[0]); | ||
320 | else if (perf_evsel__match(counter, HARDWARE, HW_CACHE_REFERENCES)) | ||
321 | update_stats(&runtime_cacherefs_stats[0], count[0]); | ||
322 | else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_L1D)) | ||
323 | update_stats(&runtime_l1_dcache_stats[0], count[0]); | ||
324 | else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_L1I)) | ||
325 | update_stats(&runtime_l1_icache_stats[0], count[0]); | ||
326 | else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_LL)) | ||
327 | update_stats(&runtime_ll_cache_stats[0], count[0]); | ||
328 | else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_DTLB)) | ||
329 | update_stats(&runtime_dtlb_cache_stats[0], count[0]); | ||
330 | else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_ITLB)) | ||
331 | update_stats(&runtime_itlb_cache_stats[0], count[0]); | ||
332 | } | ||
333 | |||
334 | /* | ||
196 | * Read out the results of a single counter: | 335 | * Read out the results of a single counter: |
197 | * aggregate counts across CPUs in system-wide mode | 336 | * aggregate counts across CPUs in system-wide mode |
198 | */ | 337 | */ |
@@ -217,12 +356,7 @@ static int read_counter_aggr(struct perf_evsel *counter) | |||
217 | /* | 356 | /* |
218 | * Save the full runtime - to allow normalization during printout: | 357 | * Save the full runtime - to allow normalization during printout: |
219 | */ | 358 | */ |
220 | if (perf_evsel__match(counter, SOFTWARE, SW_TASK_CLOCK)) | 359 | update_shadow_stats(counter, count); |
221 | update_stats(&runtime_nsecs_stats[0], count[0]); | ||
222 | if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES)) | ||
223 | update_stats(&runtime_cycles_stats[0], count[0]); | ||
224 | if (perf_evsel__match(counter, HARDWARE, HW_BRANCH_INSTRUCTIONS)) | ||
225 | update_stats(&runtime_branches_stats[0], count[0]); | ||
226 | 360 | ||
227 | return 0; | 361 | return 0; |
228 | } | 362 | } |
@@ -242,12 +376,7 @@ static int read_counter(struct perf_evsel *counter) | |||
242 | 376 | ||
243 | count = counter->counts->cpu[cpu].values; | 377 | count = counter->counts->cpu[cpu].values; |
244 | 378 | ||
245 | if (perf_evsel__match(counter, SOFTWARE, SW_TASK_CLOCK)) | 379 | update_shadow_stats(counter, count); |
246 | update_stats(&runtime_nsecs_stats[cpu], count[0]); | ||
247 | if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES)) | ||
248 | update_stats(&runtime_cycles_stats[cpu], count[0]); | ||
249 | if (perf_evsel__match(counter, HARDWARE, HW_BRANCH_INSTRUCTIONS)) | ||
250 | update_stats(&runtime_branches_stats[cpu], count[0]); | ||
251 | } | 380 | } |
252 | 381 | ||
253 | return 0; | 382 | return 0; |
@@ -315,13 +444,18 @@ static int run_perf_stat(int argc __used, const char **argv) | |||
315 | 444 | ||
316 | list_for_each_entry(counter, &evsel_list->entries, node) { | 445 | list_for_each_entry(counter, &evsel_list->entries, node) { |
317 | if (create_perf_stat_counter(counter) < 0) { | 446 | if (create_perf_stat_counter(counter) < 0) { |
318 | if (errno == -EPERM || errno == -EACCES) { | 447 | if (errno == EINVAL || errno == ENOSYS || errno == ENOENT) { |
448 | if (verbose) | ||
449 | ui__warning("%s event is not supported by the kernel.\n", | ||
450 | event_name(counter)); | ||
451 | continue; | ||
452 | } | ||
453 | |||
454 | if (errno == EPERM || errno == EACCES) { | ||
319 | error("You may not have permission to collect %sstats.\n" | 455 | error("You may not have permission to collect %sstats.\n" |
320 | "\t Consider tweaking" | 456 | "\t Consider tweaking" |
321 | " /proc/sys/kernel/perf_event_paranoid or running as root.", | 457 | " /proc/sys/kernel/perf_event_paranoid or running as root.", |
322 | system_wide ? "system-wide " : ""); | 458 | system_wide ? "system-wide " : ""); |
323 | } else if (errno == ENOENT) { | ||
324 | error("%s event is not supported. ", event_name(counter)); | ||
325 | } else { | 459 | } else { |
326 | error("open_counter returned with %d (%s). " | 460 | error("open_counter returned with %d (%s). " |
327 | "/bin/dmesg may provide additional information.\n", | 461 | "/bin/dmesg may provide additional information.\n", |
@@ -372,6 +506,16 @@ static int run_perf_stat(int argc __used, const char **argv) | |||
372 | return WEXITSTATUS(status); | 506 | return WEXITSTATUS(status); |
373 | } | 507 | } |
374 | 508 | ||
509 | static void print_noise_pct(double total, double avg) | ||
510 | { | ||
511 | double pct = 0.0; | ||
512 | |||
513 | if (avg) | ||
514 | pct = 100.0*total/avg; | ||
515 | |||
516 | fprintf(stderr, " ( +-%6.2f%% )", pct); | ||
517 | } | ||
518 | |||
375 | static void print_noise(struct perf_evsel *evsel, double avg) | 519 | static void print_noise(struct perf_evsel *evsel, double avg) |
376 | { | 520 | { |
377 | struct perf_stat *ps; | 521 | struct perf_stat *ps; |
@@ -380,15 +524,14 @@ static void print_noise(struct perf_evsel *evsel, double avg) | |||
380 | return; | 524 | return; |
381 | 525 | ||
382 | ps = evsel->priv; | 526 | ps = evsel->priv; |
383 | fprintf(stderr, " ( +- %7.3f%% )", | 527 | print_noise_pct(stddev_stats(&ps->res_stats[0]), avg); |
384 | 100 * stddev_stats(&ps->res_stats[0]) / avg); | ||
385 | } | 528 | } |
386 | 529 | ||
387 | static void nsec_printout(int cpu, struct perf_evsel *evsel, double avg) | 530 | static void nsec_printout(int cpu, struct perf_evsel *evsel, double avg) |
388 | { | 531 | { |
389 | double msecs = avg / 1e6; | 532 | double msecs = avg / 1e6; |
390 | char cpustr[16] = { '\0', }; | 533 | char cpustr[16] = { '\0', }; |
391 | const char *fmt = csv_output ? "%s%.6f%s%s" : "%s%18.6f%s%-24s"; | 534 | const char *fmt = csv_output ? "%s%.6f%s%s" : "%s%18.6f%s%-25s"; |
392 | 535 | ||
393 | if (no_aggr) | 536 | if (no_aggr) |
394 | sprintf(cpustr, "CPU%*d%s", | 537 | sprintf(cpustr, "CPU%*d%s", |
@@ -404,8 +547,191 @@ static void nsec_printout(int cpu, struct perf_evsel *evsel, double avg) | |||
404 | return; | 547 | return; |
405 | 548 | ||
406 | if (perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK)) | 549 | if (perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK)) |
407 | fprintf(stderr, " # %10.3f CPUs ", | 550 | fprintf(stderr, " # %8.3f CPUs utilized ", avg / avg_stats(&walltime_nsecs_stats)); |
408 | avg / avg_stats(&walltime_nsecs_stats)); | 551 | } |
552 | |||
553 | static void print_stalled_cycles_frontend(int cpu, struct perf_evsel *evsel __used, double avg) | ||
554 | { | ||
555 | double total, ratio = 0.0; | ||
556 | const char *color; | ||
557 | |||
558 | total = avg_stats(&runtime_cycles_stats[cpu]); | ||
559 | |||
560 | if (total) | ||
561 | ratio = avg / total * 100.0; | ||
562 | |||
563 | color = PERF_COLOR_NORMAL; | ||
564 | if (ratio > 50.0) | ||
565 | color = PERF_COLOR_RED; | ||
566 | else if (ratio > 30.0) | ||
567 | color = PERF_COLOR_MAGENTA; | ||
568 | else if (ratio > 10.0) | ||
569 | color = PERF_COLOR_YELLOW; | ||
570 | |||
571 | fprintf(stderr, " # "); | ||
572 | color_fprintf(stderr, color, "%6.2f%%", ratio); | ||
573 | fprintf(stderr, " frontend cycles idle "); | ||
574 | } | ||
575 | |||
576 | static void print_stalled_cycles_backend(int cpu, struct perf_evsel *evsel __used, double avg) | ||
577 | { | ||
578 | double total, ratio = 0.0; | ||
579 | const char *color; | ||
580 | |||
581 | total = avg_stats(&runtime_cycles_stats[cpu]); | ||
582 | |||
583 | if (total) | ||
584 | ratio = avg / total * 100.0; | ||
585 | |||
586 | color = PERF_COLOR_NORMAL; | ||
587 | if (ratio > 75.0) | ||
588 | color = PERF_COLOR_RED; | ||
589 | else if (ratio > 50.0) | ||
590 | color = PERF_COLOR_MAGENTA; | ||
591 | else if (ratio > 20.0) | ||
592 | color = PERF_COLOR_YELLOW; | ||
593 | |||
594 | fprintf(stderr, " # "); | ||
595 | color_fprintf(stderr, color, "%6.2f%%", ratio); | ||
596 | fprintf(stderr, " backend cycles idle "); | ||
597 | } | ||
598 | |||
599 | static void print_branch_misses(int cpu, struct perf_evsel *evsel __used, double avg) | ||
600 | { | ||
601 | double total, ratio = 0.0; | ||
602 | const char *color; | ||
603 | |||
604 | total = avg_stats(&runtime_branches_stats[cpu]); | ||
605 | |||
606 | if (total) | ||
607 | ratio = avg / total * 100.0; | ||
608 | |||
609 | color = PERF_COLOR_NORMAL; | ||
610 | if (ratio > 20.0) | ||
611 | color = PERF_COLOR_RED; | ||
612 | else if (ratio > 10.0) | ||
613 | color = PERF_COLOR_MAGENTA; | ||
614 | else if (ratio > 5.0) | ||
615 | color = PERF_COLOR_YELLOW; | ||
616 | |||
617 | fprintf(stderr, " # "); | ||
618 | color_fprintf(stderr, color, "%6.2f%%", ratio); | ||
619 | fprintf(stderr, " of all branches "); | ||
620 | } | ||
621 | |||
622 | static void print_l1_dcache_misses(int cpu, struct perf_evsel *evsel __used, double avg) | ||
623 | { | ||
624 | double total, ratio = 0.0; | ||
625 | const char *color; | ||
626 | |||
627 | total = avg_stats(&runtime_l1_dcache_stats[cpu]); | ||
628 | |||
629 | if (total) | ||
630 | ratio = avg / total * 100.0; | ||
631 | |||
632 | color = PERF_COLOR_NORMAL; | ||
633 | if (ratio > 20.0) | ||
634 | color = PERF_COLOR_RED; | ||
635 | else if (ratio > 10.0) | ||
636 | color = PERF_COLOR_MAGENTA; | ||
637 | else if (ratio > 5.0) | ||
638 | color = PERF_COLOR_YELLOW; | ||
639 | |||
640 | fprintf(stderr, " # "); | ||
641 | color_fprintf(stderr, color, "%6.2f%%", ratio); | ||
642 | fprintf(stderr, " of all L1-dcache hits "); | ||
643 | } | ||
644 | |||
645 | static void print_l1_icache_misses(int cpu, struct perf_evsel *evsel __used, double avg) | ||
646 | { | ||
647 | double total, ratio = 0.0; | ||
648 | const char *color; | ||
649 | |||
650 | total = avg_stats(&runtime_l1_icache_stats[cpu]); | ||
651 | |||
652 | if (total) | ||
653 | ratio = avg / total * 100.0; | ||
654 | |||
655 | color = PERF_COLOR_NORMAL; | ||
656 | if (ratio > 20.0) | ||
657 | color = PERF_COLOR_RED; | ||
658 | else if (ratio > 10.0) | ||
659 | color = PERF_COLOR_MAGENTA; | ||
660 | else if (ratio > 5.0) | ||
661 | color = PERF_COLOR_YELLOW; | ||
662 | |||
663 | fprintf(stderr, " # "); | ||
664 | color_fprintf(stderr, color, "%6.2f%%", ratio); | ||
665 | fprintf(stderr, " of all L1-icache hits "); | ||
666 | } | ||
667 | |||
668 | static void print_dtlb_cache_misses(int cpu, struct perf_evsel *evsel __used, double avg) | ||
669 | { | ||
670 | double total, ratio = 0.0; | ||
671 | const char *color; | ||
672 | |||
673 | total = avg_stats(&runtime_dtlb_cache_stats[cpu]); | ||
674 | |||
675 | if (total) | ||
676 | ratio = avg / total * 100.0; | ||
677 | |||
678 | color = PERF_COLOR_NORMAL; | ||
679 | if (ratio > 20.0) | ||
680 | color = PERF_COLOR_RED; | ||
681 | else if (ratio > 10.0) | ||
682 | color = PERF_COLOR_MAGENTA; | ||
683 | else if (ratio > 5.0) | ||
684 | color = PERF_COLOR_YELLOW; | ||
685 | |||
686 | fprintf(stderr, " # "); | ||
687 | color_fprintf(stderr, color, "%6.2f%%", ratio); | ||
688 | fprintf(stderr, " of all dTLB cache hits "); | ||
689 | } | ||
690 | |||
691 | static void print_itlb_cache_misses(int cpu, struct perf_evsel *evsel __used, double avg) | ||
692 | { | ||
693 | double total, ratio = 0.0; | ||
694 | const char *color; | ||
695 | |||
696 | total = avg_stats(&runtime_itlb_cache_stats[cpu]); | ||
697 | |||
698 | if (total) | ||
699 | ratio = avg / total * 100.0; | ||
700 | |||
701 | color = PERF_COLOR_NORMAL; | ||
702 | if (ratio > 20.0) | ||
703 | color = PERF_COLOR_RED; | ||
704 | else if (ratio > 10.0) | ||
705 | color = PERF_COLOR_MAGENTA; | ||
706 | else if (ratio > 5.0) | ||
707 | color = PERF_COLOR_YELLOW; | ||
708 | |||
709 | fprintf(stderr, " # "); | ||
710 | color_fprintf(stderr, color, "%6.2f%%", ratio); | ||
711 | fprintf(stderr, " of all iTLB cache hits "); | ||
712 | } | ||
713 | |||
714 | static void print_ll_cache_misses(int cpu, struct perf_evsel *evsel __used, double avg) | ||
715 | { | ||
716 | double total, ratio = 0.0; | ||
717 | const char *color; | ||
718 | |||
719 | total = avg_stats(&runtime_ll_cache_stats[cpu]); | ||
720 | |||
721 | if (total) | ||
722 | ratio = avg / total * 100.0; | ||
723 | |||
724 | color = PERF_COLOR_NORMAL; | ||
725 | if (ratio > 20.0) | ||
726 | color = PERF_COLOR_RED; | ||
727 | else if (ratio > 10.0) | ||
728 | color = PERF_COLOR_MAGENTA; | ||
729 | else if (ratio > 5.0) | ||
730 | color = PERF_COLOR_YELLOW; | ||
731 | |||
732 | fprintf(stderr, " # "); | ||
733 | color_fprintf(stderr, color, "%6.2f%%", ratio); | ||
734 | fprintf(stderr, " of all LL-cache hits "); | ||
409 | } | 735 | } |
410 | 736 | ||
411 | static void abs_printout(int cpu, struct perf_evsel *evsel, double avg) | 737 | static void abs_printout(int cpu, struct perf_evsel *evsel, double avg) |
@@ -417,9 +743,9 @@ static void abs_printout(int cpu, struct perf_evsel *evsel, double avg) | |||
417 | if (csv_output) | 743 | if (csv_output) |
418 | fmt = "%s%.0f%s%s"; | 744 | fmt = "%s%.0f%s%s"; |
419 | else if (big_num) | 745 | else if (big_num) |
420 | fmt = "%s%'18.0f%s%-24s"; | 746 | fmt = "%s%'18.0f%s%-25s"; |
421 | else | 747 | else |
422 | fmt = "%s%18.0f%s%-24s"; | 748 | fmt = "%s%18.0f%s%-25s"; |
423 | 749 | ||
424 | if (no_aggr) | 750 | if (no_aggr) |
425 | sprintf(cpustr, "CPU%*d%s", | 751 | sprintf(cpustr, "CPU%*d%s", |
@@ -442,23 +768,83 @@ static void abs_printout(int cpu, struct perf_evsel *evsel, double avg) | |||
442 | if (total) | 768 | if (total) |
443 | ratio = avg / total; | 769 | ratio = avg / total; |
444 | 770 | ||
445 | fprintf(stderr, " # %10.3f IPC ", ratio); | 771 | fprintf(stderr, " # %5.2f insns per cycle ", ratio); |
772 | |||
773 | total = avg_stats(&runtime_stalled_cycles_front_stats[cpu]); | ||
774 | total = max(total, avg_stats(&runtime_stalled_cycles_back_stats[cpu])); | ||
775 | |||
776 | if (total && avg) { | ||
777 | ratio = total / avg; | ||
778 | fprintf(stderr, "\n # %5.2f stalled cycles per insn", ratio); | ||
779 | } | ||
780 | |||
446 | } else if (perf_evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES) && | 781 | } else if (perf_evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES) && |
447 | runtime_branches_stats[cpu].n != 0) { | 782 | runtime_branches_stats[cpu].n != 0) { |
448 | total = avg_stats(&runtime_branches_stats[cpu]); | 783 | print_branch_misses(cpu, evsel, avg); |
784 | } else if ( | ||
785 | evsel->attr.type == PERF_TYPE_HW_CACHE && | ||
786 | evsel->attr.config == ( PERF_COUNT_HW_CACHE_L1D | | ||
787 | ((PERF_COUNT_HW_CACHE_OP_READ) << 8) | | ||
788 | ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) && | ||
789 | runtime_l1_dcache_stats[cpu].n != 0) { | ||
790 | print_l1_dcache_misses(cpu, evsel, avg); | ||
791 | } else if ( | ||
792 | evsel->attr.type == PERF_TYPE_HW_CACHE && | ||
793 | evsel->attr.config == ( PERF_COUNT_HW_CACHE_L1I | | ||
794 | ((PERF_COUNT_HW_CACHE_OP_READ) << 8) | | ||
795 | ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) && | ||
796 | runtime_l1_icache_stats[cpu].n != 0) { | ||
797 | print_l1_icache_misses(cpu, evsel, avg); | ||
798 | } else if ( | ||
799 | evsel->attr.type == PERF_TYPE_HW_CACHE && | ||
800 | evsel->attr.config == ( PERF_COUNT_HW_CACHE_DTLB | | ||
801 | ((PERF_COUNT_HW_CACHE_OP_READ) << 8) | | ||
802 | ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) && | ||
803 | runtime_dtlb_cache_stats[cpu].n != 0) { | ||
804 | print_dtlb_cache_misses(cpu, evsel, avg); | ||
805 | } else if ( | ||
806 | evsel->attr.type == PERF_TYPE_HW_CACHE && | ||
807 | evsel->attr.config == ( PERF_COUNT_HW_CACHE_ITLB | | ||
808 | ((PERF_COUNT_HW_CACHE_OP_READ) << 8) | | ||
809 | ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) && | ||
810 | runtime_itlb_cache_stats[cpu].n != 0) { | ||
811 | print_itlb_cache_misses(cpu, evsel, avg); | ||
812 | } else if ( | ||
813 | evsel->attr.type == PERF_TYPE_HW_CACHE && | ||
814 | evsel->attr.config == ( PERF_COUNT_HW_CACHE_LL | | ||
815 | ((PERF_COUNT_HW_CACHE_OP_READ) << 8) | | ||
816 | ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) && | ||
817 | runtime_ll_cache_stats[cpu].n != 0) { | ||
818 | print_ll_cache_misses(cpu, evsel, avg); | ||
819 | } else if (perf_evsel__match(evsel, HARDWARE, HW_CACHE_MISSES) && | ||
820 | runtime_cacherefs_stats[cpu].n != 0) { | ||
821 | total = avg_stats(&runtime_cacherefs_stats[cpu]); | ||
449 | 822 | ||
450 | if (total) | 823 | if (total) |
451 | ratio = avg * 100 / total; | 824 | ratio = avg * 100 / total; |
452 | 825 | ||
453 | fprintf(stderr, " # %10.3f %% ", ratio); | 826 | fprintf(stderr, " # %8.3f %% of all cache refs ", ratio); |
827 | |||
828 | } else if (perf_evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_FRONTEND)) { | ||
829 | print_stalled_cycles_frontend(cpu, evsel, avg); | ||
830 | } else if (perf_evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_BACKEND)) { | ||
831 | print_stalled_cycles_backend(cpu, evsel, avg); | ||
832 | } else if (perf_evsel__match(evsel, HARDWARE, HW_CPU_CYCLES)) { | ||
833 | total = avg_stats(&runtime_nsecs_stats[cpu]); | ||
454 | 834 | ||
835 | if (total) | ||
836 | ratio = 1.0 * avg / total; | ||
837 | |||
838 | fprintf(stderr, " # %8.3f GHz ", ratio); | ||
455 | } else if (runtime_nsecs_stats[cpu].n != 0) { | 839 | } else if (runtime_nsecs_stats[cpu].n != 0) { |
456 | total = avg_stats(&runtime_nsecs_stats[cpu]); | 840 | total = avg_stats(&runtime_nsecs_stats[cpu]); |
457 | 841 | ||
458 | if (total) | 842 | if (total) |
459 | ratio = 1000.0 * avg / total; | 843 | ratio = 1000.0 * avg / total; |
460 | 844 | ||
461 | fprintf(stderr, " # %10.3f M/sec", ratio); | 845 | fprintf(stderr, " # %8.3f M/sec ", ratio); |
846 | } else { | ||
847 | fprintf(stderr, " "); | ||
462 | } | 848 | } |
463 | } | 849 | } |
464 | 850 | ||
@@ -505,8 +891,7 @@ static void print_counter_aggr(struct perf_evsel *counter) | |||
505 | avg_enabled = avg_stats(&ps->res_stats[1]); | 891 | avg_enabled = avg_stats(&ps->res_stats[1]); |
506 | avg_running = avg_stats(&ps->res_stats[2]); | 892 | avg_running = avg_stats(&ps->res_stats[2]); |
507 | 893 | ||
508 | fprintf(stderr, " (scaled from %.2f%%)", | 894 | fprintf(stderr, " [%5.2f%%]", 100 * avg_running / avg_enabled); |
509 | 100 * avg_running / avg_enabled); | ||
510 | } | 895 | } |
511 | fprintf(stderr, "\n"); | 896 | fprintf(stderr, "\n"); |
512 | } | 897 | } |
@@ -548,10 +933,8 @@ static void print_counter(struct perf_evsel *counter) | |||
548 | if (!csv_output) { | 933 | if (!csv_output) { |
549 | print_noise(counter, 1.0); | 934 | print_noise(counter, 1.0); |
550 | 935 | ||
551 | if (run != ena) { | 936 | if (run != ena) |
552 | fprintf(stderr, " (scaled from %.2f%%)", | 937 | fprintf(stderr, " (%.2f%%)", 100.0 * run / ena); |
553 | 100.0 * run / ena); | ||
554 | } | ||
555 | } | 938 | } |
556 | fputc('\n', stderr); | 939 | fputc('\n', stderr); |
557 | } | 940 | } |
@@ -591,13 +974,14 @@ static void print_stat(int argc, const char **argv) | |||
591 | } | 974 | } |
592 | 975 | ||
593 | if (!csv_output) { | 976 | if (!csv_output) { |
594 | fprintf(stderr, "\n"); | 977 | if (!null_run) |
595 | fprintf(stderr, " %18.9f seconds time elapsed", | 978 | fprintf(stderr, "\n"); |
979 | fprintf(stderr, " %17.9f seconds time elapsed", | ||
596 | avg_stats(&walltime_nsecs_stats)/1e9); | 980 | avg_stats(&walltime_nsecs_stats)/1e9); |
597 | if (run_count > 1) { | 981 | if (run_count > 1) { |
598 | fprintf(stderr, " ( +- %7.3f%% )", | 982 | fprintf(stderr, " "); |
599 | 100*stddev_stats(&walltime_nsecs_stats) / | 983 | print_noise_pct(stddev_stats(&walltime_nsecs_stats), |
600 | avg_stats(&walltime_nsecs_stats)); | 984 | avg_stats(&walltime_nsecs_stats)); |
601 | } | 985 | } |
602 | fprintf(stderr, "\n\n"); | 986 | fprintf(stderr, "\n\n"); |
603 | } | 987 | } |
@@ -659,6 +1043,10 @@ static const struct option options[] = { | |||
659 | "repeat command and print average + stddev (max: 100)"), | 1043 | "repeat command and print average + stddev (max: 100)"), |
660 | OPT_BOOLEAN('n', "null", &null_run, | 1044 | OPT_BOOLEAN('n', "null", &null_run, |
661 | "null run - dont start any counters"), | 1045 | "null run - dont start any counters"), |
1046 | OPT_INCR('d', "detailed", &detailed_run, | ||
1047 | "detailed run - start a lot of events"), | ||
1048 | OPT_BOOLEAN('S', "sync", &sync_run, | ||
1049 | "call sync() before starting a run"), | ||
662 | OPT_CALLBACK_NOOPT('B', "big-num", NULL, NULL, | 1050 | OPT_CALLBACK_NOOPT('B', "big-num", NULL, NULL, |
663 | "print large numbers with thousands\' separators", | 1051 | "print large numbers with thousands\' separators", |
664 | stat__set_big_num), | 1052 | stat__set_big_num), |
@@ -674,6 +1062,70 @@ static const struct option options[] = { | |||
674 | OPT_END() | 1062 | OPT_END() |
675 | }; | 1063 | }; |
676 | 1064 | ||
1065 | /* | ||
1066 | * Add default attributes, if there were no attributes specified or | ||
1067 | * if -d/--detailed, -d -d or -d -d -d is used: | ||
1068 | */ | ||
1069 | static int add_default_attributes(void) | ||
1070 | { | ||
1071 | struct perf_evsel *pos; | ||
1072 | size_t attr_nr = 0; | ||
1073 | size_t c; | ||
1074 | |||
1075 | /* Set attrs if no event is selected and !null_run: */ | ||
1076 | if (null_run) | ||
1077 | return 0; | ||
1078 | |||
1079 | if (!evsel_list->nr_entries) { | ||
1080 | for (c = 0; c < ARRAY_SIZE(default_attrs); c++) { | ||
1081 | pos = perf_evsel__new(default_attrs + c, c + attr_nr); | ||
1082 | if (pos == NULL) | ||
1083 | return -1; | ||
1084 | perf_evlist__add(evsel_list, pos); | ||
1085 | } | ||
1086 | attr_nr += c; | ||
1087 | } | ||
1088 | |||
1089 | /* Detailed events get appended to the event list: */ | ||
1090 | |||
1091 | if (detailed_run < 1) | ||
1092 | return 0; | ||
1093 | |||
1094 | /* Append detailed run extra attributes: */ | ||
1095 | for (c = 0; c < ARRAY_SIZE(detailed_attrs); c++) { | ||
1096 | pos = perf_evsel__new(detailed_attrs + c, c + attr_nr); | ||
1097 | if (pos == NULL) | ||
1098 | return -1; | ||
1099 | perf_evlist__add(evsel_list, pos); | ||
1100 | } | ||
1101 | attr_nr += c; | ||
1102 | |||
1103 | if (detailed_run < 2) | ||
1104 | return 0; | ||
1105 | |||
1106 | /* Append very detailed run extra attributes: */ | ||
1107 | for (c = 0; c < ARRAY_SIZE(very_detailed_attrs); c++) { | ||
1108 | pos = perf_evsel__new(very_detailed_attrs + c, c + attr_nr); | ||
1109 | if (pos == NULL) | ||
1110 | return -1; | ||
1111 | perf_evlist__add(evsel_list, pos); | ||
1112 | } | ||
1113 | |||
1114 | if (detailed_run < 3) | ||
1115 | return 0; | ||
1116 | |||
1117 | /* Append very, very detailed run extra attributes: */ | ||
1118 | for (c = 0; c < ARRAY_SIZE(very_very_detailed_attrs); c++) { | ||
1119 | pos = perf_evsel__new(very_very_detailed_attrs + c, c + attr_nr); | ||
1120 | if (pos == NULL) | ||
1121 | return -1; | ||
1122 | perf_evlist__add(evsel_list, pos); | ||
1123 | } | ||
1124 | |||
1125 | |||
1126 | return 0; | ||
1127 | } | ||
1128 | |||
677 | int cmd_stat(int argc, const char **argv, const char *prefix __used) | 1129 | int cmd_stat(int argc, const char **argv, const char *prefix __used) |
678 | { | 1130 | { |
679 | struct perf_evsel *pos; | 1131 | struct perf_evsel *pos; |
@@ -719,17 +1171,8 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used) | |||
719 | usage_with_options(stat_usage, options); | 1171 | usage_with_options(stat_usage, options); |
720 | } | 1172 | } |
721 | 1173 | ||
722 | /* Set attrs and nr_counters if no event is selected and !null_run */ | 1174 | if (add_default_attributes()) |
723 | if (!null_run && !evsel_list->nr_entries) { | 1175 | goto out; |
724 | size_t c; | ||
725 | |||
726 | for (c = 0; c < ARRAY_SIZE(default_attrs); ++c) { | ||
727 | pos = perf_evsel__new(&default_attrs[c], c); | ||
728 | if (pos == NULL) | ||
729 | goto out; | ||
730 | perf_evlist__add(evsel_list, pos); | ||
731 | } | ||
732 | } | ||
733 | 1176 | ||
734 | if (target_pid != -1) | 1177 | if (target_pid != -1) |
735 | target_tid = target_pid; | 1178 | target_tid = target_pid; |
@@ -773,6 +1216,10 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used) | |||
773 | for (run_idx = 0; run_idx < run_count; run_idx++) { | 1216 | for (run_idx = 0; run_idx < run_count; run_idx++) { |
774 | if (run_count != 1 && verbose) | 1217 | if (run_count != 1 && verbose) |
775 | fprintf(stderr, "[ perf stat: executing run #%d ... ]\n", run_idx + 1); | 1218 | fprintf(stderr, "[ perf stat: executing run #%d ... ]\n", run_idx + 1); |
1219 | |||
1220 | if (sync_run) | ||
1221 | sync(); | ||
1222 | |||
776 | status = run_perf_stat(argc, argv); | 1223 | status = run_perf_stat(argc, argv); |
777 | } | 1224 | } |
778 | 1225 | ||
diff --git a/tools/perf/builtin-test.c b/tools/perf/builtin-test.c index 11e3c8458362..2f9a337b182f 100644 --- a/tools/perf/builtin-test.c +++ b/tools/perf/builtin-test.c | |||
@@ -549,7 +549,7 @@ static int test__basic_mmap(void) | |||
549 | ++foo; | 549 | ++foo; |
550 | } | 550 | } |
551 | 551 | ||
552 | while ((event = perf_evlist__read_on_cpu(evlist, 0)) != NULL) { | 552 | while ((event = perf_evlist__mmap_read(evlist, 0)) != NULL) { |
553 | struct perf_sample sample; | 553 | struct perf_sample sample; |
554 | 554 | ||
555 | if (event->header.type != PERF_RECORD_SAMPLE) { | 555 | if (event->header.type != PERF_RECORD_SAMPLE) { |
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 7e3d6e310bf8..ebfc7cf5f63b 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c | |||
@@ -801,12 +801,12 @@ static void perf_event__process_sample(const union perf_event *event, | |||
801 | } | 801 | } |
802 | } | 802 | } |
803 | 803 | ||
804 | static void perf_session__mmap_read_cpu(struct perf_session *self, int cpu) | 804 | static void perf_session__mmap_read_idx(struct perf_session *self, int idx) |
805 | { | 805 | { |
806 | struct perf_sample sample; | 806 | struct perf_sample sample; |
807 | union perf_event *event; | 807 | union perf_event *event; |
808 | 808 | ||
809 | while ((event = perf_evlist__read_on_cpu(top.evlist, cpu)) != NULL) { | 809 | while ((event = perf_evlist__mmap_read(top.evlist, idx)) != NULL) { |
810 | perf_session__parse_sample(self, event, &sample); | 810 | perf_session__parse_sample(self, event, &sample); |
811 | 811 | ||
812 | if (event->header.type == PERF_RECORD_SAMPLE) | 812 | if (event->header.type == PERF_RECORD_SAMPLE) |
@@ -820,8 +820,8 @@ static void perf_session__mmap_read(struct perf_session *self) | |||
820 | { | 820 | { |
821 | int i; | 821 | int i; |
822 | 822 | ||
823 | for (i = 0; i < top.evlist->cpus->nr; i++) | 823 | for (i = 0; i < top.evlist->nr_mmaps; i++) |
824 | perf_session__mmap_read_cpu(self, i); | 824 | perf_session__mmap_read_idx(self, i); |
825 | } | 825 | } |
826 | 826 | ||
827 | static void start_counters(struct perf_evlist *evlist) | 827 | static void start_counters(struct perf_evlist *evlist) |
diff --git a/tools/perf/feature-tests.mak b/tools/perf/config/feature-tests.mak index b041ca67a2cb..6170fd2531b5 100644 --- a/tools/perf/feature-tests.mak +++ b/tools/perf/config/feature-tests.mak | |||
@@ -79,9 +79,15 @@ endef | |||
79 | endif | 79 | endif |
80 | 80 | ||
81 | ifndef NO_LIBPYTHON | 81 | ifndef NO_LIBPYTHON |
82 | define SOURCE_PYTHON_VERSION | ||
83 | #include <Python.h> | ||
84 | #if PY_VERSION_HEX >= 0x03000000 | ||
85 | #error | ||
86 | #endif | ||
87 | int main(void){} | ||
88 | endef | ||
82 | define SOURCE_PYTHON_EMBED | 89 | define SOURCE_PYTHON_EMBED |
83 | #include <Python.h> | 90 | #include <Python.h> |
84 | |||
85 | int main(void) | 91 | int main(void) |
86 | { | 92 | { |
87 | Py_Initialize(); | 93 | Py_Initialize(); |
@@ -120,11 +126,3 @@ int main(void) | |||
120 | return 0; | 126 | return 0; |
121 | } | 127 | } |
122 | endef | 128 | endef |
123 | |||
124 | # try-cc | ||
125 | # Usage: option = $(call try-cc, source-to-build, cc-options) | ||
126 | try-cc = $(shell sh -c \ | ||
127 | 'TMP="$(OUTPUT)$(TMPOUT).$$$$"; \ | ||
128 | echo "$(1)" | \ | ||
129 | $(CC) -x c - $(2) -o "$$TMP" > /dev/null 2>&1 && echo y; \ | ||
130 | rm -f "$$TMP"') | ||
diff --git a/tools/perf/config/utilities.mak b/tools/perf/config/utilities.mak new file mode 100644 index 000000000000..8046182a19eb --- /dev/null +++ b/tools/perf/config/utilities.mak | |||
@@ -0,0 +1,188 @@ | |||
1 | # This allows us to work with the newline character: | ||
2 | define newline | ||
3 | |||
4 | |||
5 | endef | ||
6 | newline := $(newline) | ||
7 | |||
8 | # nl-escape | ||
9 | # | ||
10 | # Usage: escape = $(call nl-escape[,escape]) | ||
11 | # | ||
12 | # This is used as the common way to specify | ||
13 | # what should replace a newline when escaping | ||
14 | # newlines; the default is a bizarre string. | ||
15 | # | ||
16 | nl-escape = $(or $(1),m822df3020w6a44id34bt574ctac44eb9f4n) | ||
17 | |||
18 | # escape-nl | ||
19 | # | ||
20 | # Usage: escaped-text = $(call escape-nl,text[,escape]) | ||
21 | # | ||
22 | # GNU make's $(shell ...) function converts to a | ||
23 | # single space each newline character in the output | ||
24 | # produced during the expansion; this may not be | ||
25 | # desirable. | ||
26 | # | ||
27 | # The only solution is to change each newline into | ||
28 | # something that won't be converted, so that the | ||
29 | # information can be recovered later with | ||
30 | # $(call unescape-nl...) | ||
31 | # | ||
32 | escape-nl = $(subst $(newline),$(call nl-escape,$(2)),$(1)) | ||
33 | |||
34 | # unescape-nl | ||
35 | # | ||
36 | # Usage: text = $(call unescape-nl,escaped-text[,escape]) | ||
37 | # | ||
38 | # See escape-nl. | ||
39 | # | ||
40 | unescape-nl = $(subst $(call nl-escape,$(2)),$(newline),$(1)) | ||
41 | |||
42 | # shell-escape-nl | ||
43 | # | ||
44 | # Usage: $(shell some-command | $(call shell-escape-nl[,escape])) | ||
45 | # | ||
46 | # Use this to escape newlines from within a shell call; | ||
47 | # the default escape is a bizarre string. | ||
48 | # | ||
49 | # NOTE: The escape is used directly as a string constant | ||
50 | # in an `awk' program that is delimited by shell | ||
51 | # single-quotes, so be wary of the characters | ||
52 | # that are chosen. | ||
53 | # | ||
54 | define shell-escape-nl | ||
55 | awk 'NR==1 {t=$$0} NR>1 {t=t "$(nl-escape)" $$0} END {printf t}' | ||
56 | endef | ||
57 | |||
58 | # shell-unescape-nl | ||
59 | # | ||
60 | # Usage: $(shell some-command | $(call shell-unescape-nl[,escape])) | ||
61 | # | ||
62 | # Use this to unescape newlines from within a shell call; | ||
63 | # the default escape is a bizarre string. | ||
64 | # | ||
65 | # NOTE: The escape is used directly as an extended regular | ||
66 | # expression constant in an `awk' program that is | ||
67 | # delimited by shell single-quotes, so be wary | ||
68 | # of the characters that are chosen. | ||
69 | # | ||
70 | # (The bash shell has a bug where `{gsub(...),...}' is | ||
71 | # misinterpreted as a brace expansion; this can be | ||
72 | # overcome by putting a space between `{' and `gsub'). | ||
73 | # | ||
74 | define shell-unescape-nl | ||
75 | awk 'NR==1 {t=$$0} NR>1 {t=t "\n" $$0} END { gsub(/$(nl-escape)/,"\n",t); printf t }' | ||
76 | endef | ||
77 | |||
78 | # escape-for-shell-sq | ||
79 | # | ||
80 | # Usage: embeddable-text = $(call escape-for-shell-sq,text) | ||
81 | # | ||
82 | # This function produces text that is suitable for | ||
83 | # embedding in a shell string that is delimited by | ||
84 | # single-quotes. | ||
85 | # | ||
86 | escape-for-shell-sq = $(subst ','\'',$(1)) | ||
87 | |||
88 | # shell-sq | ||
89 | # | ||
90 | # Usage: single-quoted-and-escaped-text = $(call shell-sq,text) | ||
91 | # | ||
92 | shell-sq = '$(escape-for-shell-sq)' | ||
93 | |||
94 | # shell-wordify | ||
95 | # | ||
96 | # Usage: wordified-text = $(call shell-wordify,text) | ||
97 | # | ||
98 | # For instance: | ||
99 | # | ||
100 | # |define text | ||
101 | # |hello | ||
102 | # |world | ||
103 | # |endef | ||
104 | # | | ||
105 | # |target: | ||
106 | # | echo $(call shell-wordify,$(text)) | ||
107 | # | ||
108 | # At least GNU make gets confused by expanding a newline | ||
109 | # within the context of a command line of a makefile rule | ||
110 | # (this is in constrast to a `$(shell ...)' function call, | ||
111 | # which can handle it just fine). | ||
112 | # | ||
113 | # This function avoids the problem by producing a string | ||
114 | # that works as a shell word, regardless of whether or | ||
115 | # not it contains a newline. | ||
116 | # | ||
117 | # If the text to be wordified contains a newline, then | ||
118 | # an intrictate shell command substitution is constructed | ||
119 | # to render the text as a single line; when the shell | ||
120 | # processes the resulting escaped text, it transforms | ||
121 | # it into the original unescaped text. | ||
122 | # | ||
123 | # If the text does not contain a newline, then this function | ||
124 | # produces the same results as the `$(shell-sq)' function. | ||
125 | # | ||
126 | shell-wordify = $(if $(findstring $(newline),$(1)),$(_sw-esc-nl),$(shell-sq)) | ||
127 | define _sw-esc-nl | ||
128 | "$$(echo $(call escape-nl,$(shell-sq),$(2)) | $(call shell-unescape-nl,$(2)))" | ||
129 | endef | ||
130 | |||
131 | # is-absolute | ||
132 | # | ||
133 | # Usage: bool-value = $(call is-absolute,path) | ||
134 | # | ||
135 | is-absolute = $(shell echo $(shell-sq) | grep ^/ -q && echo y) | ||
136 | |||
137 | # lookup | ||
138 | # | ||
139 | # Usage: absolute-executable-path-or-empty = $(call lookup,path) | ||
140 | # | ||
141 | # (It's necessary to use `sh -c' because GNU make messes up by | ||
142 | # trying too hard and getting things wrong). | ||
143 | # | ||
144 | lookup = $(call unescape-nl,$(shell sh -c $(_l-sh))) | ||
145 | _l-sh = $(call shell-sq,command -v $(shell-sq) | $(call shell-escape-nl,)) | ||
146 | |||
147 | # is-executable | ||
148 | # | ||
149 | # Usage: bool-value = $(call is-executable,path) | ||
150 | # | ||
151 | # (It's necessary to use `sh -c' because GNU make messes up by | ||
152 | # trying too hard and getting things wrong). | ||
153 | # | ||
154 | is-executable = $(call _is-executable-helper,$(shell-sq)) | ||
155 | _is-executable-helper = $(shell sh -c $(_is-executable-sh)) | ||
156 | _is-executable-sh = $(call shell-sq,test -f $(1) -a -x $(1) && echo y) | ||
157 | |||
158 | # get-executable | ||
159 | # | ||
160 | # Usage: absolute-executable-path-or-empty = $(call get-executable,path) | ||
161 | # | ||
162 | # The goal is to get an absolute path for an executable; | ||
163 | # the `command -v' is defined by POSIX, but it's not | ||
164 | # necessarily very portable, so it's only used if | ||
165 | # relative path resolution is requested, as determined | ||
166 | # by the presence of a leading `/'. | ||
167 | # | ||
168 | get-executable = $(if $(1),$(if $(is-absolute),$(_ge-abspath),$(lookup))) | ||
169 | _ge-abspath = $(if $(is-executable),$(1)) | ||
170 | |||
171 | # get-supplied-or-default-executable | ||
172 | # | ||
173 | # Usage: absolute-executable-path-or-empty = $(call get-executable-or-default,variable,default) | ||
174 | # | ||
175 | define get-executable-or-default | ||
176 | $(if $($(1)),$(call _ge_attempt,$($(1)),$(1)),$(call _ge_attempt,$(2))) | ||
177 | endef | ||
178 | _ge_attempt = $(or $(get-executable),$(_gea_warn),$(call _gea_err,$(2))) | ||
179 | _gea_warn = $(warning The path '$(1)' is not executable.) | ||
180 | _gea_err = $(if $(1),$(error Please set '$(1)' appropriately)) | ||
181 | |||
182 | # try-cc | ||
183 | # Usage: option = $(call try-cc, source-to-build, cc-options) | ||
184 | try-cc = $(shell sh -c \ | ||
185 | 'TMP="$(OUTPUT)$(TMPOUT).$$$$"; \ | ||
186 | echo "$(1)" | \ | ||
187 | $(CC) -x c - $(2) -o "$$TMP" > /dev/null 2>&1 && echo y; \ | ||
188 | rm -f "$$TMP"') | ||
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index 45da8d186b49..23eb22b05d27 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c | |||
@@ -166,11 +166,11 @@ struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id) | |||
166 | return NULL; | 166 | return NULL; |
167 | } | 167 | } |
168 | 168 | ||
169 | union perf_event *perf_evlist__read_on_cpu(struct perf_evlist *evlist, int cpu) | 169 | union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx) |
170 | { | 170 | { |
171 | /* XXX Move this to perf.c, making it generally available */ | 171 | /* XXX Move this to perf.c, making it generally available */ |
172 | unsigned int page_size = sysconf(_SC_PAGE_SIZE); | 172 | unsigned int page_size = sysconf(_SC_PAGE_SIZE); |
173 | struct perf_mmap *md = &evlist->mmap[cpu]; | 173 | struct perf_mmap *md = &evlist->mmap[idx]; |
174 | unsigned int head = perf_mmap__read_head(md); | 174 | unsigned int head = perf_mmap__read_head(md); |
175 | unsigned int old = md->prev; | 175 | unsigned int old = md->prev; |
176 | unsigned char *data = md->base + page_size; | 176 | unsigned char *data = md->base + page_size; |
@@ -235,31 +235,37 @@ union perf_event *perf_evlist__read_on_cpu(struct perf_evlist *evlist, int cpu) | |||
235 | 235 | ||
236 | void perf_evlist__munmap(struct perf_evlist *evlist) | 236 | void perf_evlist__munmap(struct perf_evlist *evlist) |
237 | { | 237 | { |
238 | int cpu; | 238 | int i; |
239 | 239 | ||
240 | for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { | 240 | for (i = 0; i < evlist->nr_mmaps; i++) { |
241 | if (evlist->mmap[cpu].base != NULL) { | 241 | if (evlist->mmap[i].base != NULL) { |
242 | munmap(evlist->mmap[cpu].base, evlist->mmap_len); | 242 | munmap(evlist->mmap[i].base, evlist->mmap_len); |
243 | evlist->mmap[cpu].base = NULL; | 243 | evlist->mmap[i].base = NULL; |
244 | } | 244 | } |
245 | } | 245 | } |
246 | |||
247 | free(evlist->mmap); | ||
248 | evlist->mmap = NULL; | ||
246 | } | 249 | } |
247 | 250 | ||
248 | int perf_evlist__alloc_mmap(struct perf_evlist *evlist) | 251 | int perf_evlist__alloc_mmap(struct perf_evlist *evlist) |
249 | { | 252 | { |
250 | evlist->mmap = zalloc(evlist->cpus->nr * sizeof(struct perf_mmap)); | 253 | evlist->nr_mmaps = evlist->cpus->nr; |
254 | if (evlist->cpus->map[0] == -1) | ||
255 | evlist->nr_mmaps = evlist->threads->nr; | ||
256 | evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap)); | ||
251 | return evlist->mmap != NULL ? 0 : -ENOMEM; | 257 | return evlist->mmap != NULL ? 0 : -ENOMEM; |
252 | } | 258 | } |
253 | 259 | ||
254 | static int __perf_evlist__mmap(struct perf_evlist *evlist, struct perf_evsel *evsel, | 260 | static int __perf_evlist__mmap(struct perf_evlist *evlist, struct perf_evsel *evsel, |
255 | int cpu, int prot, int mask, int fd) | 261 | int idx, int prot, int mask, int fd) |
256 | { | 262 | { |
257 | evlist->mmap[cpu].prev = 0; | 263 | evlist->mmap[idx].prev = 0; |
258 | evlist->mmap[cpu].mask = mask; | 264 | evlist->mmap[idx].mask = mask; |
259 | evlist->mmap[cpu].base = mmap(NULL, evlist->mmap_len, prot, | 265 | evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot, |
260 | MAP_SHARED, fd, 0); | 266 | MAP_SHARED, fd, 0); |
261 | if (evlist->mmap[cpu].base == MAP_FAILED) { | 267 | if (evlist->mmap[idx].base == MAP_FAILED) { |
262 | if (evlist->cpus->map[cpu] == -1 && evsel->attr.inherit) | 268 | if (evlist->cpus->map[idx] == -1 && evsel->attr.inherit) |
263 | ui__warning("Inherit is not allowed on per-task " | 269 | ui__warning("Inherit is not allowed on per-task " |
264 | "events using mmap.\n"); | 270 | "events using mmap.\n"); |
265 | return -1; | 271 | return -1; |
@@ -269,6 +275,86 @@ static int __perf_evlist__mmap(struct perf_evlist *evlist, struct perf_evsel *ev | |||
269 | return 0; | 275 | return 0; |
270 | } | 276 | } |
271 | 277 | ||
278 | static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int mask) | ||
279 | { | ||
280 | struct perf_evsel *evsel; | ||
281 | int cpu, thread; | ||
282 | |||
283 | for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { | ||
284 | int output = -1; | ||
285 | |||
286 | for (thread = 0; thread < evlist->threads->nr; thread++) { | ||
287 | list_for_each_entry(evsel, &evlist->entries, node) { | ||
288 | int fd = FD(evsel, cpu, thread); | ||
289 | |||
290 | if (output == -1) { | ||
291 | output = fd; | ||
292 | if (__perf_evlist__mmap(evlist, evsel, cpu, | ||
293 | prot, mask, output) < 0) | ||
294 | goto out_unmap; | ||
295 | } else { | ||
296 | if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0) | ||
297 | goto out_unmap; | ||
298 | } | ||
299 | |||
300 | if ((evsel->attr.read_format & PERF_FORMAT_ID) && | ||
301 | perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0) | ||
302 | goto out_unmap; | ||
303 | } | ||
304 | } | ||
305 | } | ||
306 | |||
307 | return 0; | ||
308 | |||
309 | out_unmap: | ||
310 | for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { | ||
311 | if (evlist->mmap[cpu].base != NULL) { | ||
312 | munmap(evlist->mmap[cpu].base, evlist->mmap_len); | ||
313 | evlist->mmap[cpu].base = NULL; | ||
314 | } | ||
315 | } | ||
316 | return -1; | ||
317 | } | ||
318 | |||
319 | static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, int mask) | ||
320 | { | ||
321 | struct perf_evsel *evsel; | ||
322 | int thread; | ||
323 | |||
324 | for (thread = 0; thread < evlist->threads->nr; thread++) { | ||
325 | int output = -1; | ||
326 | |||
327 | list_for_each_entry(evsel, &evlist->entries, node) { | ||
328 | int fd = FD(evsel, 0, thread); | ||
329 | |||
330 | if (output == -1) { | ||
331 | output = fd; | ||
332 | if (__perf_evlist__mmap(evlist, evsel, thread, | ||
333 | prot, mask, output) < 0) | ||
334 | goto out_unmap; | ||
335 | } else { | ||
336 | if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0) | ||
337 | goto out_unmap; | ||
338 | } | ||
339 | |||
340 | if ((evsel->attr.read_format & PERF_FORMAT_ID) && | ||
341 | perf_evlist__id_add_fd(evlist, evsel, 0, thread, fd) < 0) | ||
342 | goto out_unmap; | ||
343 | } | ||
344 | } | ||
345 | |||
346 | return 0; | ||
347 | |||
348 | out_unmap: | ||
349 | for (thread = 0; thread < evlist->threads->nr; thread++) { | ||
350 | if (evlist->mmap[thread].base != NULL) { | ||
351 | munmap(evlist->mmap[thread].base, evlist->mmap_len); | ||
352 | evlist->mmap[thread].base = NULL; | ||
353 | } | ||
354 | } | ||
355 | return -1; | ||
356 | } | ||
357 | |||
272 | /** perf_evlist__mmap - Create per cpu maps to receive events | 358 | /** perf_evlist__mmap - Create per cpu maps to receive events |
273 | * | 359 | * |
274 | * @evlist - list of events | 360 | * @evlist - list of events |
@@ -287,11 +373,11 @@ static int __perf_evlist__mmap(struct perf_evlist *evlist, struct perf_evsel *ev | |||
287 | int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite) | 373 | int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite) |
288 | { | 374 | { |
289 | unsigned int page_size = sysconf(_SC_PAGE_SIZE); | 375 | unsigned int page_size = sysconf(_SC_PAGE_SIZE); |
290 | int mask = pages * page_size - 1, cpu; | 376 | int mask = pages * page_size - 1; |
291 | struct perf_evsel *first_evsel, *evsel; | 377 | struct perf_evsel *evsel; |
292 | const struct cpu_map *cpus = evlist->cpus; | 378 | const struct cpu_map *cpus = evlist->cpus; |
293 | const struct thread_map *threads = evlist->threads; | 379 | const struct thread_map *threads = evlist->threads; |
294 | int thread, prot = PROT_READ | (overwrite ? 0 : PROT_WRITE); | 380 | int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE); |
295 | 381 | ||
296 | if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0) | 382 | if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0) |
297 | return -ENOMEM; | 383 | return -ENOMEM; |
@@ -301,43 +387,18 @@ int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite) | |||
301 | 387 | ||
302 | evlist->overwrite = overwrite; | 388 | evlist->overwrite = overwrite; |
303 | evlist->mmap_len = (pages + 1) * page_size; | 389 | evlist->mmap_len = (pages + 1) * page_size; |
304 | first_evsel = list_entry(evlist->entries.next, struct perf_evsel, node); | ||
305 | 390 | ||
306 | list_for_each_entry(evsel, &evlist->entries, node) { | 391 | list_for_each_entry(evsel, &evlist->entries, node) { |
307 | if ((evsel->attr.read_format & PERF_FORMAT_ID) && | 392 | if ((evsel->attr.read_format & PERF_FORMAT_ID) && |
308 | evsel->sample_id == NULL && | 393 | evsel->sample_id == NULL && |
309 | perf_evsel__alloc_id(evsel, cpus->nr, threads->nr) < 0) | 394 | perf_evsel__alloc_id(evsel, cpus->nr, threads->nr) < 0) |
310 | return -ENOMEM; | 395 | return -ENOMEM; |
311 | |||
312 | for (cpu = 0; cpu < cpus->nr; cpu++) { | ||
313 | for (thread = 0; thread < threads->nr; thread++) { | ||
314 | int fd = FD(evsel, cpu, thread); | ||
315 | |||
316 | if (evsel->idx || thread) { | ||
317 | if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, | ||
318 | FD(first_evsel, cpu, 0)) != 0) | ||
319 | goto out_unmap; | ||
320 | } else if (__perf_evlist__mmap(evlist, evsel, cpu, | ||
321 | prot, mask, fd) < 0) | ||
322 | goto out_unmap; | ||
323 | |||
324 | if ((evsel->attr.read_format & PERF_FORMAT_ID) && | ||
325 | perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0) | ||
326 | goto out_unmap; | ||
327 | } | ||
328 | } | ||
329 | } | 396 | } |
330 | 397 | ||
331 | return 0; | 398 | if (evlist->cpus->map[0] == -1) |
399 | return perf_evlist__mmap_per_thread(evlist, prot, mask); | ||
332 | 400 | ||
333 | out_unmap: | 401 | return perf_evlist__mmap_per_cpu(evlist, prot, mask); |
334 | for (cpu = 0; cpu < cpus->nr; cpu++) { | ||
335 | if (evlist->mmap[cpu].base != NULL) { | ||
336 | munmap(evlist->mmap[cpu].base, evlist->mmap_len); | ||
337 | evlist->mmap[cpu].base = NULL; | ||
338 | } | ||
339 | } | ||
340 | return -1; | ||
341 | } | 402 | } |
342 | 403 | ||
343 | int perf_evlist__create_maps(struct perf_evlist *evlist, pid_t target_pid, | 404 | int perf_evlist__create_maps(struct perf_evlist *evlist, pid_t target_pid, |
@@ -348,7 +409,7 @@ int perf_evlist__create_maps(struct perf_evlist *evlist, pid_t target_pid, | |||
348 | if (evlist->threads == NULL) | 409 | if (evlist->threads == NULL) |
349 | return -1; | 410 | return -1; |
350 | 411 | ||
351 | if (target_tid != -1) | 412 | if (cpu_list == NULL && target_tid != -1) |
352 | evlist->cpus = cpu_map__dummy_new(); | 413 | evlist->cpus = cpu_map__dummy_new(); |
353 | else | 414 | else |
354 | evlist->cpus = cpu_map__new(cpu_list); | 415 | evlist->cpus = cpu_map__new(cpu_list); |
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h index 8b1cb7a4c5f1..7109d7add14e 100644 --- a/tools/perf/util/evlist.h +++ b/tools/perf/util/evlist.h | |||
@@ -17,6 +17,7 @@ struct perf_evlist { | |||
17 | struct hlist_head heads[PERF_EVLIST__HLIST_SIZE]; | 17 | struct hlist_head heads[PERF_EVLIST__HLIST_SIZE]; |
18 | int nr_entries; | 18 | int nr_entries; |
19 | int nr_fds; | 19 | int nr_fds; |
20 | int nr_mmaps; | ||
20 | int mmap_len; | 21 | int mmap_len; |
21 | bool overwrite; | 22 | bool overwrite; |
22 | union perf_event event_copy; | 23 | union perf_event event_copy; |
@@ -46,7 +47,7 @@ void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd); | |||
46 | 47 | ||
47 | struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id); | 48 | struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id); |
48 | 49 | ||
49 | union perf_event *perf_evlist__read_on_cpu(struct perf_evlist *self, int cpu); | 50 | union perf_event *perf_evlist__mmap_read(struct perf_evlist *self, int idx); |
50 | 51 | ||
51 | int perf_evlist__alloc_mmap(struct perf_evlist *evlist); | 52 | int perf_evlist__alloc_mmap(struct perf_evlist *evlist); |
52 | int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite); | 53 | int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite); |
diff --git a/tools/perf/util/include/asm/alternative-asm.h b/tools/perf/util/include/asm/alternative-asm.h new file mode 100644 index 000000000000..6789d788d494 --- /dev/null +++ b/tools/perf/util/include/asm/alternative-asm.h | |||
@@ -0,0 +1,8 @@ | |||
1 | #ifndef _PERF_ASM_ALTERNATIVE_ASM_H | ||
2 | #define _PERF_ASM_ALTERNATIVE_ASM_H | ||
3 | |||
4 | /* Just disable it so we can build arch/x86/lib/memcpy_64.S for perf bench: */ | ||
5 | |||
6 | #define altinstruction_entry # | ||
7 | |||
8 | #endif | ||
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index 952b4ae3d954..41982c373faf 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c | |||
@@ -31,34 +31,36 @@ char debugfs_path[MAXPATHLEN]; | |||
31 | #define CSW(x) .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_##x | 31 | #define CSW(x) .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_##x |
32 | 32 | ||
33 | static struct event_symbol event_symbols[] = { | 33 | static struct event_symbol event_symbols[] = { |
34 | { CHW(CPU_CYCLES), "cpu-cycles", "cycles" }, | 34 | { CHW(CPU_CYCLES), "cpu-cycles", "cycles" }, |
35 | { CHW(INSTRUCTIONS), "instructions", "" }, | 35 | { CHW(STALLED_CYCLES_FRONTEND), "stalled-cycles-frontend", "idle-cycles-frontend" }, |
36 | { CHW(CACHE_REFERENCES), "cache-references", "" }, | 36 | { CHW(STALLED_CYCLES_BACKEND), "stalled-cycles-backend", "idle-cycles-backend" }, |
37 | { CHW(CACHE_MISSES), "cache-misses", "" }, | 37 | { CHW(INSTRUCTIONS), "instructions", "" }, |
38 | { CHW(BRANCH_INSTRUCTIONS), "branch-instructions", "branches" }, | 38 | { CHW(CACHE_REFERENCES), "cache-references", "" }, |
39 | { CHW(BRANCH_MISSES), "branch-misses", "" }, | 39 | { CHW(CACHE_MISSES), "cache-misses", "" }, |
40 | { CHW(BUS_CYCLES), "bus-cycles", "" }, | 40 | { CHW(BRANCH_INSTRUCTIONS), "branch-instructions", "branches" }, |
41 | 41 | { CHW(BRANCH_MISSES), "branch-misses", "" }, | |
42 | { CSW(CPU_CLOCK), "cpu-clock", "" }, | 42 | { CHW(BUS_CYCLES), "bus-cycles", "" }, |
43 | { CSW(TASK_CLOCK), "task-clock", "" }, | 43 | |
44 | { CSW(PAGE_FAULTS), "page-faults", "faults" }, | 44 | { CSW(CPU_CLOCK), "cpu-clock", "" }, |
45 | { CSW(PAGE_FAULTS_MIN), "minor-faults", "" }, | 45 | { CSW(TASK_CLOCK), "task-clock", "" }, |
46 | { CSW(PAGE_FAULTS_MAJ), "major-faults", "" }, | 46 | { CSW(PAGE_FAULTS), "page-faults", "faults" }, |
47 | { CSW(CONTEXT_SWITCHES), "context-switches", "cs" }, | 47 | { CSW(PAGE_FAULTS_MIN), "minor-faults", "" }, |
48 | { CSW(CPU_MIGRATIONS), "cpu-migrations", "migrations" }, | 48 | { CSW(PAGE_FAULTS_MAJ), "major-faults", "" }, |
49 | { CSW(ALIGNMENT_FAULTS), "alignment-faults", "" }, | 49 | { CSW(CONTEXT_SWITCHES), "context-switches", "cs" }, |
50 | { CSW(EMULATION_FAULTS), "emulation-faults", "" }, | 50 | { CSW(CPU_MIGRATIONS), "cpu-migrations", "migrations" }, |
51 | { CSW(ALIGNMENT_FAULTS), "alignment-faults", "" }, | ||
52 | { CSW(EMULATION_FAULTS), "emulation-faults", "" }, | ||
51 | }; | 53 | }; |
52 | 54 | ||
53 | #define __PERF_EVENT_FIELD(config, name) \ | 55 | #define __PERF_EVENT_FIELD(config, name) \ |
54 | ((config & PERF_EVENT_##name##_MASK) >> PERF_EVENT_##name##_SHIFT) | 56 | ((config & PERF_EVENT_##name##_MASK) >> PERF_EVENT_##name##_SHIFT) |
55 | 57 | ||
56 | #define PERF_EVENT_RAW(config) __PERF_EVENT_FIELD(config, RAW) | 58 | #define PERF_EVENT_RAW(config) __PERF_EVENT_FIELD(config, RAW) |
57 | #define PERF_EVENT_CONFIG(config) __PERF_EVENT_FIELD(config, CONFIG) | 59 | #define PERF_EVENT_CONFIG(config) __PERF_EVENT_FIELD(config, CONFIG) |
58 | #define PERF_EVENT_TYPE(config) __PERF_EVENT_FIELD(config, TYPE) | 60 | #define PERF_EVENT_TYPE(config) __PERF_EVENT_FIELD(config, TYPE) |
59 | #define PERF_EVENT_ID(config) __PERF_EVENT_FIELD(config, EVENT) | 61 | #define PERF_EVENT_ID(config) __PERF_EVENT_FIELD(config, EVENT) |
60 | 62 | ||
61 | static const char *hw_event_names[] = { | 63 | static const char *hw_event_names[PERF_COUNT_HW_MAX] = { |
62 | "cycles", | 64 | "cycles", |
63 | "instructions", | 65 | "instructions", |
64 | "cache-references", | 66 | "cache-references", |
@@ -66,11 +68,13 @@ static const char *hw_event_names[] = { | |||
66 | "branches", | 68 | "branches", |
67 | "branch-misses", | 69 | "branch-misses", |
68 | "bus-cycles", | 70 | "bus-cycles", |
71 | "stalled-cycles-frontend", | ||
72 | "stalled-cycles-backend", | ||
69 | }; | 73 | }; |
70 | 74 | ||
71 | static const char *sw_event_names[] = { | 75 | static const char *sw_event_names[PERF_COUNT_SW_MAX] = { |
72 | "cpu-clock-msecs", | 76 | "cpu-clock", |
73 | "task-clock-msecs", | 77 | "task-clock", |
74 | "page-faults", | 78 | "page-faults", |
75 | "context-switches", | 79 | "context-switches", |
76 | "CPU-migrations", | 80 | "CPU-migrations", |
@@ -307,7 +311,7 @@ const char *__event_name(int type, u64 config) | |||
307 | 311 | ||
308 | switch (type) { | 312 | switch (type) { |
309 | case PERF_TYPE_HARDWARE: | 313 | case PERF_TYPE_HARDWARE: |
310 | if (config < PERF_COUNT_HW_MAX) | 314 | if (config < PERF_COUNT_HW_MAX && hw_event_names[config]) |
311 | return hw_event_names[config]; | 315 | return hw_event_names[config]; |
312 | return "unknown-hardware"; | 316 | return "unknown-hardware"; |
313 | 317 | ||
@@ -333,7 +337,7 @@ const char *__event_name(int type, u64 config) | |||
333 | } | 337 | } |
334 | 338 | ||
335 | case PERF_TYPE_SOFTWARE: | 339 | case PERF_TYPE_SOFTWARE: |
336 | if (config < PERF_COUNT_SW_MAX) | 340 | if (config < PERF_COUNT_SW_MAX && sw_event_names[config]) |
337 | return sw_event_names[config]; | 341 | return sw_event_names[config]; |
338 | return "unknown-software"; | 342 | return "unknown-software"; |
339 | 343 | ||
@@ -648,13 +652,15 @@ static int check_events(const char *str, unsigned int i) | |||
648 | int n; | 652 | int n; |
649 | 653 | ||
650 | n = strlen(event_symbols[i].symbol); | 654 | n = strlen(event_symbols[i].symbol); |
651 | if (!strncmp(str, event_symbols[i].symbol, n)) | 655 | if (!strncasecmp(str, event_symbols[i].symbol, n)) |
652 | return n; | 656 | return n; |
653 | 657 | ||
654 | n = strlen(event_symbols[i].alias); | 658 | n = strlen(event_symbols[i].alias); |
655 | if (n) | 659 | if (n) { |
656 | if (!strncmp(str, event_symbols[i].alias, n)) | 660 | if (!strncasecmp(str, event_symbols[i].alias, n)) |
657 | return n; | 661 | return n; |
662 | } | ||
663 | |||
658 | return 0; | 664 | return 0; |
659 | } | 665 | } |
660 | 666 | ||
@@ -718,15 +724,22 @@ parse_numeric_event(const char **strp, struct perf_event_attr *attr) | |||
718 | return EVT_FAILED; | 724 | return EVT_FAILED; |
719 | } | 725 | } |
720 | 726 | ||
721 | static enum event_result | 727 | static int |
722 | parse_event_modifier(const char **strp, struct perf_event_attr *attr) | 728 | parse_event_modifier(const char **strp, struct perf_event_attr *attr) |
723 | { | 729 | { |
724 | const char *str = *strp; | 730 | const char *str = *strp; |
725 | int exclude = 0; | 731 | int exclude = 0; |
726 | int eu = 0, ek = 0, eh = 0, precise = 0; | 732 | int eu = 0, ek = 0, eh = 0, precise = 0; |
727 | 733 | ||
728 | if (*str++ != ':') | 734 | if (!*str) |
735 | return 0; | ||
736 | |||
737 | if (*str == ',') | ||
729 | return 0; | 738 | return 0; |
739 | |||
740 | if (*str++ != ':') | ||
741 | return -1; | ||
742 | |||
730 | while (*str) { | 743 | while (*str) { |
731 | if (*str == 'u') { | 744 | if (*str == 'u') { |
732 | if (!exclude) | 745 | if (!exclude) |
@@ -747,14 +760,16 @@ parse_event_modifier(const char **strp, struct perf_event_attr *attr) | |||
747 | 760 | ||
748 | ++str; | 761 | ++str; |
749 | } | 762 | } |
750 | if (str >= *strp + 2) { | 763 | if (str < *strp + 2) |
751 | *strp = str; | 764 | return -1; |
752 | attr->exclude_user = eu; | 765 | |
753 | attr->exclude_kernel = ek; | 766 | *strp = str; |
754 | attr->exclude_hv = eh; | 767 | |
755 | attr->precise_ip = precise; | 768 | attr->exclude_user = eu; |
756 | return 1; | 769 | attr->exclude_kernel = ek; |
757 | } | 770 | attr->exclude_hv = eh; |
771 | attr->precise_ip = precise; | ||
772 | |||
758 | return 0; | 773 | return 0; |
759 | } | 774 | } |
760 | 775 | ||
@@ -797,7 +812,12 @@ parse_event_symbols(const struct option *opt, const char **str, | |||
797 | return EVT_FAILED; | 812 | return EVT_FAILED; |
798 | 813 | ||
799 | modifier: | 814 | modifier: |
800 | parse_event_modifier(str, attr); | 815 | if (parse_event_modifier(str, attr) < 0) { |
816 | fprintf(stderr, "invalid event modifier: '%s'\n", *str); | ||
817 | fprintf(stderr, "Run 'perf list' for a list of valid events and modifiers\n"); | ||
818 | |||
819 | return EVT_FAILED; | ||
820 | } | ||
801 | 821 | ||
802 | return ret; | 822 | return ret; |
803 | } | 823 | } |
@@ -912,7 +932,7 @@ void print_tracepoint_events(const char *subsys_glob, const char *event_glob) | |||
912 | 932 | ||
913 | snprintf(evt_path, MAXPATHLEN, "%s:%s", | 933 | snprintf(evt_path, MAXPATHLEN, "%s:%s", |
914 | sys_dirent.d_name, evt_dirent.d_name); | 934 | sys_dirent.d_name, evt_dirent.d_name); |
915 | printf(" %-42s [%s]\n", evt_path, | 935 | printf(" %-50s [%s]\n", evt_path, |
916 | event_type_descriptors[PERF_TYPE_TRACEPOINT]); | 936 | event_type_descriptors[PERF_TYPE_TRACEPOINT]); |
917 | } | 937 | } |
918 | closedir(evt_dir); | 938 | closedir(evt_dir); |
@@ -977,7 +997,7 @@ void print_events_type(u8 type) | |||
977 | else | 997 | else |
978 | snprintf(name, sizeof(name), "%s", syms->symbol); | 998 | snprintf(name, sizeof(name), "%s", syms->symbol); |
979 | 999 | ||
980 | printf(" %-42s [%s]\n", name, | 1000 | printf(" %-50s [%s]\n", name, |
981 | event_type_descriptors[type]); | 1001 | event_type_descriptors[type]); |
982 | } | 1002 | } |
983 | } | 1003 | } |
@@ -995,11 +1015,10 @@ int print_hwcache_events(const char *event_glob) | |||
995 | for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) { | 1015 | for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) { |
996 | char *name = event_cache_name(type, op, i); | 1016 | char *name = event_cache_name(type, op, i); |
997 | 1017 | ||
998 | if (event_glob != NULL && | 1018 | if (event_glob != NULL && !strglobmatch(name, event_glob)) |
999 | !strglobmatch(name, event_glob)) | ||
1000 | continue; | 1019 | continue; |
1001 | 1020 | ||
1002 | printf(" %-42s [%s]\n", name, | 1021 | printf(" %-50s [%s]\n", name, |
1003 | event_type_descriptors[PERF_TYPE_HW_CACHE]); | 1022 | event_type_descriptors[PERF_TYPE_HW_CACHE]); |
1004 | ++printed; | 1023 | ++printed; |
1005 | } | 1024 | } |
@@ -1009,14 +1028,16 @@ int print_hwcache_events(const char *event_glob) | |||
1009 | return printed; | 1028 | return printed; |
1010 | } | 1029 | } |
1011 | 1030 | ||
1031 | #define MAX_NAME_LEN 100 | ||
1032 | |||
1012 | /* | 1033 | /* |
1013 | * Print the help text for the event symbols: | 1034 | * Print the help text for the event symbols: |
1014 | */ | 1035 | */ |
1015 | void print_events(const char *event_glob) | 1036 | void print_events(const char *event_glob) |
1016 | { | 1037 | { |
1017 | struct event_symbol *syms = event_symbols; | ||
1018 | unsigned int i, type, prev_type = -1, printed = 0, ntypes_printed = 0; | 1038 | unsigned int i, type, prev_type = -1, printed = 0, ntypes_printed = 0; |
1019 | char name[40]; | 1039 | struct event_symbol *syms = event_symbols; |
1040 | char name[MAX_NAME_LEN]; | ||
1020 | 1041 | ||
1021 | printf("\n"); | 1042 | printf("\n"); |
1022 | printf("List of pre-defined events (to be used in -e):\n"); | 1043 | printf("List of pre-defined events (to be used in -e):\n"); |
@@ -1036,10 +1057,10 @@ void print_events(const char *event_glob) | |||
1036 | continue; | 1057 | continue; |
1037 | 1058 | ||
1038 | if (strlen(syms->alias)) | 1059 | if (strlen(syms->alias)) |
1039 | sprintf(name, "%s OR %s", syms->symbol, syms->alias); | 1060 | snprintf(name, MAX_NAME_LEN, "%s OR %s", syms->symbol, syms->alias); |
1040 | else | 1061 | else |
1041 | strcpy(name, syms->symbol); | 1062 | strncpy(name, syms->symbol, MAX_NAME_LEN); |
1042 | printf(" %-42s [%s]\n", name, | 1063 | printf(" %-50s [%s]\n", name, |
1043 | event_type_descriptors[type]); | 1064 | event_type_descriptors[type]); |
1044 | 1065 | ||
1045 | prev_type = type; | 1066 | prev_type = type; |
@@ -1056,12 +1077,12 @@ void print_events(const char *event_glob) | |||
1056 | return; | 1077 | return; |
1057 | 1078 | ||
1058 | printf("\n"); | 1079 | printf("\n"); |
1059 | printf(" %-42s [%s]\n", | 1080 | printf(" %-50s [%s]\n", |
1060 | "rNNN (see 'perf list --help' on how to encode it)", | 1081 | "rNNN (see 'perf list --help' on how to encode it)", |
1061 | event_type_descriptors[PERF_TYPE_RAW]); | 1082 | event_type_descriptors[PERF_TYPE_RAW]); |
1062 | printf("\n"); | 1083 | printf("\n"); |
1063 | 1084 | ||
1064 | printf(" %-42s [%s]\n", | 1085 | printf(" %-50s [%s]\n", |
1065 | "mem:<addr>[:access]", | 1086 | "mem:<addr>[:access]", |
1066 | event_type_descriptors[PERF_TYPE_BREAKPOINT]); | 1087 | event_type_descriptors[PERF_TYPE_BREAKPOINT]); |
1067 | printf("\n"); | 1088 | printf("\n"); |
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index b7c85ce466a1..3b9d0b800d5c 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c | |||
@@ -1471,6 +1471,38 @@ static int find_probe_point_by_func(struct probe_finder *pf) | |||
1471 | return _param.retval; | 1471 | return _param.retval; |
1472 | } | 1472 | } |
1473 | 1473 | ||
1474 | struct pubname_callback_param { | ||
1475 | char *function; | ||
1476 | char *file; | ||
1477 | Dwarf_Die *cu_die; | ||
1478 | Dwarf_Die *sp_die; | ||
1479 | int found; | ||
1480 | }; | ||
1481 | |||
1482 | static int pubname_search_cb(Dwarf *dbg, Dwarf_Global *gl, void *data) | ||
1483 | { | ||
1484 | struct pubname_callback_param *param = data; | ||
1485 | |||
1486 | if (dwarf_offdie(dbg, gl->die_offset, param->sp_die)) { | ||
1487 | if (dwarf_tag(param->sp_die) != DW_TAG_subprogram) | ||
1488 | return DWARF_CB_OK; | ||
1489 | |||
1490 | if (die_compare_name(param->sp_die, param->function)) { | ||
1491 | if (!dwarf_offdie(dbg, gl->cu_offset, param->cu_die)) | ||
1492 | return DWARF_CB_OK; | ||
1493 | |||
1494 | if (param->file && | ||
1495 | strtailcmp(param->file, dwarf_decl_file(param->sp_die))) | ||
1496 | return DWARF_CB_OK; | ||
1497 | |||
1498 | param->found = 1; | ||
1499 | return DWARF_CB_ABORT; | ||
1500 | } | ||
1501 | } | ||
1502 | |||
1503 | return DWARF_CB_OK; | ||
1504 | } | ||
1505 | |||
1474 | /* Find probe points from debuginfo */ | 1506 | /* Find probe points from debuginfo */ |
1475 | static int find_probes(int fd, struct probe_finder *pf) | 1507 | static int find_probes(int fd, struct probe_finder *pf) |
1476 | { | 1508 | { |
@@ -1498,6 +1530,28 @@ static int find_probes(int fd, struct probe_finder *pf) | |||
1498 | 1530 | ||
1499 | off = 0; | 1531 | off = 0; |
1500 | line_list__init(&pf->lcache); | 1532 | line_list__init(&pf->lcache); |
1533 | |||
1534 | /* Fastpath: lookup by function name from .debug_pubnames section */ | ||
1535 | if (pp->function) { | ||
1536 | struct pubname_callback_param pubname_param = { | ||
1537 | .function = pp->function, | ||
1538 | .file = pp->file, | ||
1539 | .cu_die = &pf->cu_die, | ||
1540 | .sp_die = &pf->sp_die, | ||
1541 | .found = 0, | ||
1542 | }; | ||
1543 | struct dwarf_callback_param probe_param = { | ||
1544 | .data = pf, | ||
1545 | }; | ||
1546 | |||
1547 | dwarf_getpubnames(dbg, pubname_search_cb, &pubname_param, 0); | ||
1548 | if (pubname_param.found) { | ||
1549 | ret = probe_point_search_cb(&pf->sp_die, &probe_param); | ||
1550 | if (ret) | ||
1551 | goto found; | ||
1552 | } | ||
1553 | } | ||
1554 | |||
1501 | /* Loop on CUs (Compilation Unit) */ | 1555 | /* Loop on CUs (Compilation Unit) */ |
1502 | while (!dwarf_nextcu(dbg, off, &noff, &cuhl, NULL, NULL, NULL)) { | 1556 | while (!dwarf_nextcu(dbg, off, &noff, &cuhl, NULL, NULL, NULL)) { |
1503 | /* Get the DIE(Debugging Information Entry) of this CU */ | 1557 | /* Get the DIE(Debugging Information Entry) of this CU */ |
@@ -1525,6 +1579,8 @@ static int find_probes(int fd, struct probe_finder *pf) | |||
1525 | } | 1579 | } |
1526 | off = noff; | 1580 | off = noff; |
1527 | } | 1581 | } |
1582 | |||
1583 | found: | ||
1528 | line_list__free(&pf->lcache); | 1584 | line_list__free(&pf->lcache); |
1529 | if (dwfl) | 1585 | if (dwfl) |
1530 | dwfl_end(dwfl); | 1586 | dwfl_end(dwfl); |
@@ -1946,6 +2002,22 @@ int find_line_range(int fd, struct line_range *lr) | |||
1946 | return -EBADF; | 2002 | return -EBADF; |
1947 | } | 2003 | } |
1948 | 2004 | ||
2005 | /* Fastpath: lookup by function name from .debug_pubnames section */ | ||
2006 | if (lr->function) { | ||
2007 | struct pubname_callback_param pubname_param = { | ||
2008 | .function = lr->function, .file = lr->file, | ||
2009 | .cu_die = &lf.cu_die, .sp_die = &lf.sp_die, .found = 0}; | ||
2010 | struct dwarf_callback_param line_range_param = { | ||
2011 | .data = (void *)&lf, .retval = 0}; | ||
2012 | |||
2013 | dwarf_getpubnames(dbg, pubname_search_cb, &pubname_param, 0); | ||
2014 | if (pubname_param.found) { | ||
2015 | line_range_search_cb(&lf.sp_die, &line_range_param); | ||
2016 | if (lf.found) | ||
2017 | goto found; | ||
2018 | } | ||
2019 | } | ||
2020 | |||
1949 | /* Loop on CUs (Compilation Unit) */ | 2021 | /* Loop on CUs (Compilation Unit) */ |
1950 | while (!lf.found && ret >= 0) { | 2022 | while (!lf.found && ret >= 0) { |
1951 | if (dwarf_nextcu(dbg, off, &noff, &cuhl, NULL, NULL, NULL) != 0) | 2023 | if (dwarf_nextcu(dbg, off, &noff, &cuhl, NULL, NULL, NULL) != 0) |
@@ -1974,6 +2046,7 @@ int find_line_range(int fd, struct line_range *lr) | |||
1974 | off = noff; | 2046 | off = noff; |
1975 | } | 2047 | } |
1976 | 2048 | ||
2049 | found: | ||
1977 | /* Store comp_dir */ | 2050 | /* Store comp_dir */ |
1978 | if (lf.found) { | 2051 | if (lf.found) { |
1979 | comp_dir = cu_get_comp_dir(&lf.cu_die); | 2052 | comp_dir = cu_get_comp_dir(&lf.cu_die); |
diff --git a/tools/perf/util/probe-finder.h b/tools/perf/util/probe-finder.h index beaefc3c1223..605730a366db 100644 --- a/tools/perf/util/probe-finder.h +++ b/tools/perf/util/probe-finder.h | |||
@@ -49,6 +49,7 @@ struct probe_finder { | |||
49 | Dwarf_Addr addr; /* Address */ | 49 | Dwarf_Addr addr; /* Address */ |
50 | const char *fname; /* Real file name */ | 50 | const char *fname; /* Real file name */ |
51 | Dwarf_Die cu_die; /* Current CU */ | 51 | Dwarf_Die cu_die; /* Current CU */ |
52 | Dwarf_Die sp_die; | ||
52 | struct list_head lcache; /* Line cache for lazy match */ | 53 | struct list_head lcache; /* Line cache for lazy match */ |
53 | 54 | ||
54 | /* For variable searching */ | 55 | /* For variable searching */ |
@@ -83,6 +84,7 @@ struct line_finder { | |||
83 | int lno_s; /* Start line number */ | 84 | int lno_s; /* Start line number */ |
84 | int lno_e; /* End line number */ | 85 | int lno_e; /* End line number */ |
85 | Dwarf_Die cu_die; /* Current CU */ | 86 | Dwarf_Die cu_die; /* Current CU */ |
87 | Dwarf_Die sp_die; | ||
86 | int found; | 88 | int found; |
87 | }; | 89 | }; |
88 | 90 | ||
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c index f5e38451fdc5..b5c7d818001c 100644 --- a/tools/perf/util/python.c +++ b/tools/perf/util/python.c | |||
@@ -680,7 +680,7 @@ static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist, | |||
680 | &cpu, &sample_id_all)) | 680 | &cpu, &sample_id_all)) |
681 | return NULL; | 681 | return NULL; |
682 | 682 | ||
683 | event = perf_evlist__read_on_cpu(evlist, cpu); | 683 | event = perf_evlist__mmap_read(evlist, cpu); |
684 | if (event != NULL) { | 684 | if (event != NULL) { |
685 | struct perf_evsel *first; | 685 | struct perf_evsel *first; |
686 | PyObject *pyevent = pyrf_event__new(event); | 686 | PyObject *pyevent = pyrf_event__new(event); |
@@ -810,6 +810,9 @@ static struct { | |||
810 | { "COUNT_HW_CACHE_RESULT_ACCESS", PERF_COUNT_HW_CACHE_RESULT_ACCESS }, | 810 | { "COUNT_HW_CACHE_RESULT_ACCESS", PERF_COUNT_HW_CACHE_RESULT_ACCESS }, |
811 | { "COUNT_HW_CACHE_RESULT_MISS", PERF_COUNT_HW_CACHE_RESULT_MISS }, | 811 | { "COUNT_HW_CACHE_RESULT_MISS", PERF_COUNT_HW_CACHE_RESULT_MISS }, |
812 | 812 | ||
813 | { "COUNT_HW_STALLED_CYCLES_FRONTEND", PERF_COUNT_HW_STALLED_CYCLES_FRONTEND }, | ||
814 | { "COUNT_HW_STALLED_CYCLES_BACKEND", PERF_COUNT_HW_STALLED_CYCLES_BACKEND }, | ||
815 | |||
813 | { "COUNT_SW_CPU_CLOCK", PERF_COUNT_SW_CPU_CLOCK }, | 816 | { "COUNT_SW_CPU_CLOCK", PERF_COUNT_SW_CPU_CLOCK }, |
814 | { "COUNT_SW_TASK_CLOCK", PERF_COUNT_SW_TASK_CLOCK }, | 817 | { "COUNT_SW_TASK_CLOCK", PERF_COUNT_SW_TASK_CLOCK }, |
815 | { "COUNT_SW_PAGE_FAULTS", PERF_COUNT_SW_PAGE_FAULTS }, | 818 | { "COUNT_SW_PAGE_FAULTS", PERF_COUNT_SW_PAGE_FAULTS }, |
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index caa224522fea..fff66741f18d 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c | |||
@@ -1156,6 +1156,18 @@ size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp) | |||
1156 | return ret; | 1156 | return ret; |
1157 | } | 1157 | } |
1158 | 1158 | ||
1159 | struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session, | ||
1160 | unsigned int type) | ||
1161 | { | ||
1162 | struct perf_evsel *pos; | ||
1163 | |||
1164 | list_for_each_entry(pos, &session->evlist->entries, node) { | ||
1165 | if (pos->attr.type == type) | ||
1166 | return pos; | ||
1167 | } | ||
1168 | return NULL; | ||
1169 | } | ||
1170 | |||
1159 | void perf_session__print_symbols(union perf_event *event, | 1171 | void perf_session__print_symbols(union perf_event *event, |
1160 | struct perf_sample *sample, | 1172 | struct perf_sample *sample, |
1161 | struct perf_session *session) | 1173 | struct perf_session *session) |
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h index 1ac481fc1100..8daaa2d15396 100644 --- a/tools/perf/util/session.h +++ b/tools/perf/util/session.h | |||
@@ -162,6 +162,9 @@ static inline int perf_session__parse_sample(struct perf_session *session, | |||
162 | session->sample_id_all, sample); | 162 | session->sample_id_all, sample); |
163 | } | 163 | } |
164 | 164 | ||
165 | struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session, | ||
166 | unsigned int type); | ||
167 | |||
165 | void perf_session__print_symbols(union perf_event *event, | 168 | void perf_session__print_symbols(union perf_event *event, |
166 | struct perf_sample *sample, | 169 | struct perf_sample *sample, |
167 | struct perf_session *session); | 170 | struct perf_session *session); |
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index f06c10f092ba..516876dfbe52 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c | |||
@@ -31,13 +31,13 @@ | |||
31 | #define NT_GNU_BUILD_ID 3 | 31 | #define NT_GNU_BUILD_ID 3 |
32 | #endif | 32 | #endif |
33 | 33 | ||
34 | static bool dso__build_id_equal(const struct dso *self, u8 *build_id); | 34 | static bool dso__build_id_equal(const struct dso *dso, u8 *build_id); |
35 | static int elf_read_build_id(Elf *elf, void *bf, size_t size); | 35 | static int elf_read_build_id(Elf *elf, void *bf, size_t size); |
36 | static void dsos__add(struct list_head *head, struct dso *dso); | 36 | static void dsos__add(struct list_head *head, struct dso *dso); |
37 | static struct map *map__new2(u64 start, struct dso *dso, enum map_type type); | 37 | static struct map *map__new2(u64 start, struct dso *dso, enum map_type type); |
38 | static int dso__load_kernel_sym(struct dso *self, struct map *map, | 38 | static int dso__load_kernel_sym(struct dso *dso, struct map *map, |
39 | symbol_filter_t filter); | 39 | symbol_filter_t filter); |
40 | static int dso__load_guest_kernel_sym(struct dso *self, struct map *map, | 40 | static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map, |
41 | symbol_filter_t filter); | 41 | symbol_filter_t filter); |
42 | static int vmlinux_path__nr_entries; | 42 | static int vmlinux_path__nr_entries; |
43 | static char **vmlinux_path; | 43 | static char **vmlinux_path; |
@@ -49,27 +49,27 @@ struct symbol_conf symbol_conf = { | |||
49 | .symfs = "", | 49 | .symfs = "", |
50 | }; | 50 | }; |
51 | 51 | ||
52 | int dso__name_len(const struct dso *self) | 52 | int dso__name_len(const struct dso *dso) |
53 | { | 53 | { |
54 | if (verbose) | 54 | if (verbose) |
55 | return self->long_name_len; | 55 | return dso->long_name_len; |
56 | 56 | ||
57 | return self->short_name_len; | 57 | return dso->short_name_len; |
58 | } | 58 | } |
59 | 59 | ||
60 | bool dso__loaded(const struct dso *self, enum map_type type) | 60 | bool dso__loaded(const struct dso *dso, enum map_type type) |
61 | { | 61 | { |
62 | return self->loaded & (1 << type); | 62 | return dso->loaded & (1 << type); |
63 | } | 63 | } |
64 | 64 | ||
65 | bool dso__sorted_by_name(const struct dso *self, enum map_type type) | 65 | bool dso__sorted_by_name(const struct dso *dso, enum map_type type) |
66 | { | 66 | { |
67 | return self->sorted_by_name & (1 << type); | 67 | return dso->sorted_by_name & (1 << type); |
68 | } | 68 | } |
69 | 69 | ||
70 | static void dso__set_sorted_by_name(struct dso *self, enum map_type type) | 70 | static void dso__set_sorted_by_name(struct dso *dso, enum map_type type) |
71 | { | 71 | { |
72 | self->sorted_by_name |= (1 << type); | 72 | dso->sorted_by_name |= (1 << type); |
73 | } | 73 | } |
74 | 74 | ||
75 | bool symbol_type__is_a(char symbol_type, enum map_type map_type) | 75 | bool symbol_type__is_a(char symbol_type, enum map_type map_type) |
@@ -84,9 +84,9 @@ bool symbol_type__is_a(char symbol_type, enum map_type map_type) | |||
84 | } | 84 | } |
85 | } | 85 | } |
86 | 86 | ||
87 | static void symbols__fixup_end(struct rb_root *self) | 87 | static void symbols__fixup_end(struct rb_root *symbols) |
88 | { | 88 | { |
89 | struct rb_node *nd, *prevnd = rb_first(self); | 89 | struct rb_node *nd, *prevnd = rb_first(symbols); |
90 | struct symbol *curr, *prev; | 90 | struct symbol *curr, *prev; |
91 | 91 | ||
92 | if (prevnd == NULL) | 92 | if (prevnd == NULL) |
@@ -107,10 +107,10 @@ static void symbols__fixup_end(struct rb_root *self) | |||
107 | curr->end = roundup(curr->start, 4096); | 107 | curr->end = roundup(curr->start, 4096); |
108 | } | 108 | } |
109 | 109 | ||
110 | static void __map_groups__fixup_end(struct map_groups *self, enum map_type type) | 110 | static void __map_groups__fixup_end(struct map_groups *mg, enum map_type type) |
111 | { | 111 | { |
112 | struct map *prev, *curr; | 112 | struct map *prev, *curr; |
113 | struct rb_node *nd, *prevnd = rb_first(&self->maps[type]); | 113 | struct rb_node *nd, *prevnd = rb_first(&mg->maps[type]); |
114 | 114 | ||
115 | if (prevnd == NULL) | 115 | if (prevnd == NULL) |
116 | return; | 116 | return; |
@@ -130,128 +130,128 @@ static void __map_groups__fixup_end(struct map_groups *self, enum map_type type) | |||
130 | curr->end = ~0ULL; | 130 | curr->end = ~0ULL; |
131 | } | 131 | } |
132 | 132 | ||
133 | static void map_groups__fixup_end(struct map_groups *self) | 133 | static void map_groups__fixup_end(struct map_groups *mg) |
134 | { | 134 | { |
135 | int i; | 135 | int i; |
136 | for (i = 0; i < MAP__NR_TYPES; ++i) | 136 | for (i = 0; i < MAP__NR_TYPES; ++i) |
137 | __map_groups__fixup_end(self, i); | 137 | __map_groups__fixup_end(mg, i); |
138 | } | 138 | } |
139 | 139 | ||
140 | static struct symbol *symbol__new(u64 start, u64 len, u8 binding, | 140 | static struct symbol *symbol__new(u64 start, u64 len, u8 binding, |
141 | const char *name) | 141 | const char *name) |
142 | { | 142 | { |
143 | size_t namelen = strlen(name) + 1; | 143 | size_t namelen = strlen(name) + 1; |
144 | struct symbol *self = calloc(1, (symbol_conf.priv_size + | 144 | struct symbol *sym = calloc(1, (symbol_conf.priv_size + |
145 | sizeof(*self) + namelen)); | 145 | sizeof(*sym) + namelen)); |
146 | if (self == NULL) | 146 | if (sym == NULL) |
147 | return NULL; | 147 | return NULL; |
148 | 148 | ||
149 | if (symbol_conf.priv_size) | 149 | if (symbol_conf.priv_size) |
150 | self = ((void *)self) + symbol_conf.priv_size; | 150 | sym = ((void *)sym) + symbol_conf.priv_size; |
151 | |||
152 | self->start = start; | ||
153 | self->end = len ? start + len - 1 : start; | ||
154 | self->binding = binding; | ||
155 | self->namelen = namelen - 1; | ||
156 | 151 | ||
157 | pr_debug4("%s: %s %#" PRIx64 "-%#" PRIx64 "\n", __func__, name, start, self->end); | 152 | sym->start = start; |
153 | sym->end = len ? start + len - 1 : start; | ||
154 | sym->binding = binding; | ||
155 | sym->namelen = namelen - 1; | ||
158 | 156 | ||
159 | memcpy(self->name, name, namelen); | 157 | pr_debug4("%s: %s %#" PRIx64 "-%#" PRIx64 "\n", |
158 | __func__, name, start, sym->end); | ||
159 | memcpy(sym->name, name, namelen); | ||
160 | 160 | ||
161 | return self; | 161 | return sym; |
162 | } | 162 | } |
163 | 163 | ||
164 | void symbol__delete(struct symbol *self) | 164 | void symbol__delete(struct symbol *sym) |
165 | { | 165 | { |
166 | free(((void *)self) - symbol_conf.priv_size); | 166 | free(((void *)sym) - symbol_conf.priv_size); |
167 | } | 167 | } |
168 | 168 | ||
169 | static size_t symbol__fprintf(struct symbol *self, FILE *fp) | 169 | static size_t symbol__fprintf(struct symbol *sym, FILE *fp) |
170 | { | 170 | { |
171 | return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %c %s\n", | 171 | return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %c %s\n", |
172 | self->start, self->end, | 172 | sym->start, sym->end, |
173 | self->binding == STB_GLOBAL ? 'g' : | 173 | sym->binding == STB_GLOBAL ? 'g' : |
174 | self->binding == STB_LOCAL ? 'l' : 'w', | 174 | sym->binding == STB_LOCAL ? 'l' : 'w', |
175 | self->name); | 175 | sym->name); |
176 | } | 176 | } |
177 | 177 | ||
178 | void dso__set_long_name(struct dso *self, char *name) | 178 | void dso__set_long_name(struct dso *dso, char *name) |
179 | { | 179 | { |
180 | if (name == NULL) | 180 | if (name == NULL) |
181 | return; | 181 | return; |
182 | self->long_name = name; | 182 | dso->long_name = name; |
183 | self->long_name_len = strlen(name); | 183 | dso->long_name_len = strlen(name); |
184 | } | 184 | } |
185 | 185 | ||
186 | static void dso__set_short_name(struct dso *self, const char *name) | 186 | static void dso__set_short_name(struct dso *dso, const char *name) |
187 | { | 187 | { |
188 | if (name == NULL) | 188 | if (name == NULL) |
189 | return; | 189 | return; |
190 | self->short_name = name; | 190 | dso->short_name = name; |
191 | self->short_name_len = strlen(name); | 191 | dso->short_name_len = strlen(name); |
192 | } | 192 | } |
193 | 193 | ||
194 | static void dso__set_basename(struct dso *self) | 194 | static void dso__set_basename(struct dso *dso) |
195 | { | 195 | { |
196 | dso__set_short_name(self, basename(self->long_name)); | 196 | dso__set_short_name(dso, basename(dso->long_name)); |
197 | } | 197 | } |
198 | 198 | ||
199 | struct dso *dso__new(const char *name) | 199 | struct dso *dso__new(const char *name) |
200 | { | 200 | { |
201 | struct dso *self = calloc(1, sizeof(*self) + strlen(name) + 1); | 201 | struct dso *dso = calloc(1, sizeof(*dso) + strlen(name) + 1); |
202 | 202 | ||
203 | if (self != NULL) { | 203 | if (dso != NULL) { |
204 | int i; | 204 | int i; |
205 | strcpy(self->name, name); | 205 | strcpy(dso->name, name); |
206 | dso__set_long_name(self, self->name); | 206 | dso__set_long_name(dso, dso->name); |
207 | dso__set_short_name(self, self->name); | 207 | dso__set_short_name(dso, dso->name); |
208 | for (i = 0; i < MAP__NR_TYPES; ++i) | 208 | for (i = 0; i < MAP__NR_TYPES; ++i) |
209 | self->symbols[i] = self->symbol_names[i] = RB_ROOT; | 209 | dso->symbols[i] = dso->symbol_names[i] = RB_ROOT; |
210 | self->symtab_type = SYMTAB__NOT_FOUND; | 210 | dso->symtab_type = SYMTAB__NOT_FOUND; |
211 | self->loaded = 0; | 211 | dso->loaded = 0; |
212 | self->sorted_by_name = 0; | 212 | dso->sorted_by_name = 0; |
213 | self->has_build_id = 0; | 213 | dso->has_build_id = 0; |
214 | self->kernel = DSO_TYPE_USER; | 214 | dso->kernel = DSO_TYPE_USER; |
215 | INIT_LIST_HEAD(&self->node); | 215 | INIT_LIST_HEAD(&dso->node); |
216 | } | 216 | } |
217 | 217 | ||
218 | return self; | 218 | return dso; |
219 | } | 219 | } |
220 | 220 | ||
221 | static void symbols__delete(struct rb_root *self) | 221 | static void symbols__delete(struct rb_root *symbols) |
222 | { | 222 | { |
223 | struct symbol *pos; | 223 | struct symbol *pos; |
224 | struct rb_node *next = rb_first(self); | 224 | struct rb_node *next = rb_first(symbols); |
225 | 225 | ||
226 | while (next) { | 226 | while (next) { |
227 | pos = rb_entry(next, struct symbol, rb_node); | 227 | pos = rb_entry(next, struct symbol, rb_node); |
228 | next = rb_next(&pos->rb_node); | 228 | next = rb_next(&pos->rb_node); |
229 | rb_erase(&pos->rb_node, self); | 229 | rb_erase(&pos->rb_node, symbols); |
230 | symbol__delete(pos); | 230 | symbol__delete(pos); |
231 | } | 231 | } |
232 | } | 232 | } |
233 | 233 | ||
234 | void dso__delete(struct dso *self) | 234 | void dso__delete(struct dso *dso) |
235 | { | 235 | { |
236 | int i; | 236 | int i; |
237 | for (i = 0; i < MAP__NR_TYPES; ++i) | 237 | for (i = 0; i < MAP__NR_TYPES; ++i) |
238 | symbols__delete(&self->symbols[i]); | 238 | symbols__delete(&dso->symbols[i]); |
239 | if (self->sname_alloc) | 239 | if (dso->sname_alloc) |
240 | free((char *)self->short_name); | 240 | free((char *)dso->short_name); |
241 | if (self->lname_alloc) | 241 | if (dso->lname_alloc) |
242 | free(self->long_name); | 242 | free(dso->long_name); |
243 | free(self); | 243 | free(dso); |
244 | } | 244 | } |
245 | 245 | ||
246 | void dso__set_build_id(struct dso *self, void *build_id) | 246 | void dso__set_build_id(struct dso *dso, void *build_id) |
247 | { | 247 | { |
248 | memcpy(self->build_id, build_id, sizeof(self->build_id)); | 248 | memcpy(dso->build_id, build_id, sizeof(dso->build_id)); |
249 | self->has_build_id = 1; | 249 | dso->has_build_id = 1; |
250 | } | 250 | } |
251 | 251 | ||
252 | static void symbols__insert(struct rb_root *self, struct symbol *sym) | 252 | static void symbols__insert(struct rb_root *symbols, struct symbol *sym) |
253 | { | 253 | { |
254 | struct rb_node **p = &self->rb_node; | 254 | struct rb_node **p = &symbols->rb_node; |
255 | struct rb_node *parent = NULL; | 255 | struct rb_node *parent = NULL; |
256 | const u64 ip = sym->start; | 256 | const u64 ip = sym->start; |
257 | struct symbol *s; | 257 | struct symbol *s; |
@@ -265,17 +265,17 @@ static void symbols__insert(struct rb_root *self, struct symbol *sym) | |||
265 | p = &(*p)->rb_right; | 265 | p = &(*p)->rb_right; |
266 | } | 266 | } |
267 | rb_link_node(&sym->rb_node, parent, p); | 267 | rb_link_node(&sym->rb_node, parent, p); |
268 | rb_insert_color(&sym->rb_node, self); | 268 | rb_insert_color(&sym->rb_node, symbols); |
269 | } | 269 | } |
270 | 270 | ||
271 | static struct symbol *symbols__find(struct rb_root *self, u64 ip) | 271 | static struct symbol *symbols__find(struct rb_root *symbols, u64 ip) |
272 | { | 272 | { |
273 | struct rb_node *n; | 273 | struct rb_node *n; |
274 | 274 | ||
275 | if (self == NULL) | 275 | if (symbols == NULL) |
276 | return NULL; | 276 | return NULL; |
277 | 277 | ||
278 | n = self->rb_node; | 278 | n = symbols->rb_node; |
279 | 279 | ||
280 | while (n) { | 280 | while (n) { |
281 | struct symbol *s = rb_entry(n, struct symbol, rb_node); | 281 | struct symbol *s = rb_entry(n, struct symbol, rb_node); |
@@ -296,9 +296,9 @@ struct symbol_name_rb_node { | |||
296 | struct symbol sym; | 296 | struct symbol sym; |
297 | }; | 297 | }; |
298 | 298 | ||
299 | static void symbols__insert_by_name(struct rb_root *self, struct symbol *sym) | 299 | static void symbols__insert_by_name(struct rb_root *symbols, struct symbol *sym) |
300 | { | 300 | { |
301 | struct rb_node **p = &self->rb_node; | 301 | struct rb_node **p = &symbols->rb_node; |
302 | struct rb_node *parent = NULL; | 302 | struct rb_node *parent = NULL; |
303 | struct symbol_name_rb_node *symn, *s; | 303 | struct symbol_name_rb_node *symn, *s; |
304 | 304 | ||
@@ -313,27 +313,29 @@ static void symbols__insert_by_name(struct rb_root *self, struct symbol *sym) | |||
313 | p = &(*p)->rb_right; | 313 | p = &(*p)->rb_right; |
314 | } | 314 | } |
315 | rb_link_node(&symn->rb_node, parent, p); | 315 | rb_link_node(&symn->rb_node, parent, p); |
316 | rb_insert_color(&symn->rb_node, self); | 316 | rb_insert_color(&symn->rb_node, symbols); |
317 | } | 317 | } |
318 | 318 | ||
319 | static void symbols__sort_by_name(struct rb_root *self, struct rb_root *source) | 319 | static void symbols__sort_by_name(struct rb_root *symbols, |
320 | struct rb_root *source) | ||
320 | { | 321 | { |
321 | struct rb_node *nd; | 322 | struct rb_node *nd; |
322 | 323 | ||
323 | for (nd = rb_first(source); nd; nd = rb_next(nd)) { | 324 | for (nd = rb_first(source); nd; nd = rb_next(nd)) { |
324 | struct symbol *pos = rb_entry(nd, struct symbol, rb_node); | 325 | struct symbol *pos = rb_entry(nd, struct symbol, rb_node); |
325 | symbols__insert_by_name(self, pos); | 326 | symbols__insert_by_name(symbols, pos); |
326 | } | 327 | } |
327 | } | 328 | } |
328 | 329 | ||
329 | static struct symbol *symbols__find_by_name(struct rb_root *self, const char *name) | 330 | static struct symbol *symbols__find_by_name(struct rb_root *symbols, |
331 | const char *name) | ||
330 | { | 332 | { |
331 | struct rb_node *n; | 333 | struct rb_node *n; |
332 | 334 | ||
333 | if (self == NULL) | 335 | if (symbols == NULL) |
334 | return NULL; | 336 | return NULL; |
335 | 337 | ||
336 | n = self->rb_node; | 338 | n = symbols->rb_node; |
337 | 339 | ||
338 | while (n) { | 340 | while (n) { |
339 | struct symbol_name_rb_node *s; | 341 | struct symbol_name_rb_node *s; |
@@ -353,29 +355,29 @@ static struct symbol *symbols__find_by_name(struct rb_root *self, const char *na | |||
353 | return NULL; | 355 | return NULL; |
354 | } | 356 | } |
355 | 357 | ||
356 | struct symbol *dso__find_symbol(struct dso *self, | 358 | struct symbol *dso__find_symbol(struct dso *dso, |
357 | enum map_type type, u64 addr) | 359 | enum map_type type, u64 addr) |
358 | { | 360 | { |
359 | return symbols__find(&self->symbols[type], addr); | 361 | return symbols__find(&dso->symbols[type], addr); |
360 | } | 362 | } |
361 | 363 | ||
362 | struct symbol *dso__find_symbol_by_name(struct dso *self, enum map_type type, | 364 | struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type, |
363 | const char *name) | 365 | const char *name) |
364 | { | 366 | { |
365 | return symbols__find_by_name(&self->symbol_names[type], name); | 367 | return symbols__find_by_name(&dso->symbol_names[type], name); |
366 | } | 368 | } |
367 | 369 | ||
368 | void dso__sort_by_name(struct dso *self, enum map_type type) | 370 | void dso__sort_by_name(struct dso *dso, enum map_type type) |
369 | { | 371 | { |
370 | dso__set_sorted_by_name(self, type); | 372 | dso__set_sorted_by_name(dso, type); |
371 | return symbols__sort_by_name(&self->symbol_names[type], | 373 | return symbols__sort_by_name(&dso->symbol_names[type], |
372 | &self->symbols[type]); | 374 | &dso->symbols[type]); |
373 | } | 375 | } |
374 | 376 | ||
375 | int build_id__sprintf(const u8 *self, int len, char *bf) | 377 | int build_id__sprintf(const u8 *build_id, int len, char *bf) |
376 | { | 378 | { |
377 | char *bid = bf; | 379 | char *bid = bf; |
378 | const u8 *raw = self; | 380 | const u8 *raw = build_id; |
379 | int i; | 381 | int i; |
380 | 382 | ||
381 | for (i = 0; i < len; ++i) { | 383 | for (i = 0; i < len; ++i) { |
@@ -384,24 +386,25 @@ int build_id__sprintf(const u8 *self, int len, char *bf) | |||
384 | bid += 2; | 386 | bid += 2; |
385 | } | 387 | } |
386 | 388 | ||
387 | return raw - self; | 389 | return raw - build_id; |
388 | } | 390 | } |
389 | 391 | ||
390 | size_t dso__fprintf_buildid(struct dso *self, FILE *fp) | 392 | size_t dso__fprintf_buildid(struct dso *dso, FILE *fp) |
391 | { | 393 | { |
392 | char sbuild_id[BUILD_ID_SIZE * 2 + 1]; | 394 | char sbuild_id[BUILD_ID_SIZE * 2 + 1]; |
393 | 395 | ||
394 | build_id__sprintf(self->build_id, sizeof(self->build_id), sbuild_id); | 396 | build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id); |
395 | return fprintf(fp, "%s", sbuild_id); | 397 | return fprintf(fp, "%s", sbuild_id); |
396 | } | 398 | } |
397 | 399 | ||
398 | size_t dso__fprintf_symbols_by_name(struct dso *self, enum map_type type, FILE *fp) | 400 | size_t dso__fprintf_symbols_by_name(struct dso *dso, |
401 | enum map_type type, FILE *fp) | ||
399 | { | 402 | { |
400 | size_t ret = 0; | 403 | size_t ret = 0; |
401 | struct rb_node *nd; | 404 | struct rb_node *nd; |
402 | struct symbol_name_rb_node *pos; | 405 | struct symbol_name_rb_node *pos; |
403 | 406 | ||
404 | for (nd = rb_first(&self->symbol_names[type]); nd; nd = rb_next(nd)) { | 407 | for (nd = rb_first(&dso->symbol_names[type]); nd; nd = rb_next(nd)) { |
405 | pos = rb_entry(nd, struct symbol_name_rb_node, rb_node); | 408 | pos = rb_entry(nd, struct symbol_name_rb_node, rb_node); |
406 | fprintf(fp, "%s\n", pos->sym.name); | 409 | fprintf(fp, "%s\n", pos->sym.name); |
407 | } | 410 | } |
@@ -409,18 +412,18 @@ size_t dso__fprintf_symbols_by_name(struct dso *self, enum map_type type, FILE * | |||
409 | return ret; | 412 | return ret; |
410 | } | 413 | } |
411 | 414 | ||
412 | size_t dso__fprintf(struct dso *self, enum map_type type, FILE *fp) | 415 | size_t dso__fprintf(struct dso *dso, enum map_type type, FILE *fp) |
413 | { | 416 | { |
414 | struct rb_node *nd; | 417 | struct rb_node *nd; |
415 | size_t ret = fprintf(fp, "dso: %s (", self->short_name); | 418 | size_t ret = fprintf(fp, "dso: %s (", dso->short_name); |
416 | 419 | ||
417 | if (self->short_name != self->long_name) | 420 | if (dso->short_name != dso->long_name) |
418 | ret += fprintf(fp, "%s, ", self->long_name); | 421 | ret += fprintf(fp, "%s, ", dso->long_name); |
419 | ret += fprintf(fp, "%s, %sloaded, ", map_type__name[type], | 422 | ret += fprintf(fp, "%s, %sloaded, ", map_type__name[type], |
420 | self->loaded ? "" : "NOT "); | 423 | dso->loaded ? "" : "NOT "); |
421 | ret += dso__fprintf_buildid(self, fp); | 424 | ret += dso__fprintf_buildid(dso, fp); |
422 | ret += fprintf(fp, ")\n"); | 425 | ret += fprintf(fp, ")\n"); |
423 | for (nd = rb_first(&self->symbols[type]); nd; nd = rb_next(nd)) { | 426 | for (nd = rb_first(&dso->symbols[type]); nd; nd = rb_next(nd)) { |
424 | struct symbol *pos = rb_entry(nd, struct symbol, rb_node); | 427 | struct symbol *pos = rb_entry(nd, struct symbol, rb_node); |
425 | ret += symbol__fprintf(pos, fp); | 428 | ret += symbol__fprintf(pos, fp); |
426 | } | 429 | } |
@@ -543,10 +546,10 @@ static int map__process_kallsym_symbol(void *arg, const char *name, | |||
543 | * so that we can in the next step set the symbol ->end address and then | 546 | * so that we can in the next step set the symbol ->end address and then |
544 | * call kernel_maps__split_kallsyms. | 547 | * call kernel_maps__split_kallsyms. |
545 | */ | 548 | */ |
546 | static int dso__load_all_kallsyms(struct dso *self, const char *filename, | 549 | static int dso__load_all_kallsyms(struct dso *dso, const char *filename, |
547 | struct map *map) | 550 | struct map *map) |
548 | { | 551 | { |
549 | struct process_kallsyms_args args = { .map = map, .dso = self, }; | 552 | struct process_kallsyms_args args = { .map = map, .dso = dso, }; |
550 | return kallsyms__parse(filename, &args, map__process_kallsym_symbol); | 553 | return kallsyms__parse(filename, &args, map__process_kallsym_symbol); |
551 | } | 554 | } |
552 | 555 | ||
@@ -555,7 +558,7 @@ static int dso__load_all_kallsyms(struct dso *self, const char *filename, | |||
555 | * kernel range is broken in several maps, named [kernel].N, as we don't have | 558 | * kernel range is broken in several maps, named [kernel].N, as we don't have |
556 | * the original ELF section names vmlinux have. | 559 | * the original ELF section names vmlinux have. |
557 | */ | 560 | */ |
558 | static int dso__split_kallsyms(struct dso *self, struct map *map, | 561 | static int dso__split_kallsyms(struct dso *dso, struct map *map, |
559 | symbol_filter_t filter) | 562 | symbol_filter_t filter) |
560 | { | 563 | { |
561 | struct map_groups *kmaps = map__kmap(map)->kmaps; | 564 | struct map_groups *kmaps = map__kmap(map)->kmaps; |
@@ -563,7 +566,7 @@ static int dso__split_kallsyms(struct dso *self, struct map *map, | |||
563 | struct map *curr_map = map; | 566 | struct map *curr_map = map; |
564 | struct symbol *pos; | 567 | struct symbol *pos; |
565 | int count = 0, moved = 0; | 568 | int count = 0, moved = 0; |
566 | struct rb_root *root = &self->symbols[map->type]; | 569 | struct rb_root *root = &dso->symbols[map->type]; |
567 | struct rb_node *next = rb_first(root); | 570 | struct rb_node *next = rb_first(root); |
568 | int kernel_range = 0; | 571 | int kernel_range = 0; |
569 | 572 | ||
@@ -582,7 +585,7 @@ static int dso__split_kallsyms(struct dso *self, struct map *map, | |||
582 | 585 | ||
583 | if (strcmp(curr_map->dso->short_name, module)) { | 586 | if (strcmp(curr_map->dso->short_name, module)) { |
584 | if (curr_map != map && | 587 | if (curr_map != map && |
585 | self->kernel == DSO_TYPE_GUEST_KERNEL && | 588 | dso->kernel == DSO_TYPE_GUEST_KERNEL && |
586 | machine__is_default_guest(machine)) { | 589 | machine__is_default_guest(machine)) { |
587 | /* | 590 | /* |
588 | * We assume all symbols of a module are | 591 | * We assume all symbols of a module are |
@@ -618,14 +621,14 @@ static int dso__split_kallsyms(struct dso *self, struct map *map, | |||
618 | pos->end = curr_map->map_ip(curr_map, pos->end); | 621 | pos->end = curr_map->map_ip(curr_map, pos->end); |
619 | } else if (curr_map != map) { | 622 | } else if (curr_map != map) { |
620 | char dso_name[PATH_MAX]; | 623 | char dso_name[PATH_MAX]; |
621 | struct dso *dso; | 624 | struct dso *ndso; |
622 | 625 | ||
623 | if (count == 0) { | 626 | if (count == 0) { |
624 | curr_map = map; | 627 | curr_map = map; |
625 | goto filter_symbol; | 628 | goto filter_symbol; |
626 | } | 629 | } |
627 | 630 | ||
628 | if (self->kernel == DSO_TYPE_GUEST_KERNEL) | 631 | if (dso->kernel == DSO_TYPE_GUEST_KERNEL) |
629 | snprintf(dso_name, sizeof(dso_name), | 632 | snprintf(dso_name, sizeof(dso_name), |
630 | "[guest.kernel].%d", | 633 | "[guest.kernel].%d", |
631 | kernel_range++); | 634 | kernel_range++); |
@@ -634,15 +637,15 @@ static int dso__split_kallsyms(struct dso *self, struct map *map, | |||
634 | "[kernel].%d", | 637 | "[kernel].%d", |
635 | kernel_range++); | 638 | kernel_range++); |
636 | 639 | ||
637 | dso = dso__new(dso_name); | 640 | ndso = dso__new(dso_name); |
638 | if (dso == NULL) | 641 | if (ndso == NULL) |
639 | return -1; | 642 | return -1; |
640 | 643 | ||
641 | dso->kernel = self->kernel; | 644 | ndso->kernel = dso->kernel; |
642 | 645 | ||
643 | curr_map = map__new2(pos->start, dso, map->type); | 646 | curr_map = map__new2(pos->start, ndso, map->type); |
644 | if (curr_map == NULL) { | 647 | if (curr_map == NULL) { |
645 | dso__delete(dso); | 648 | dso__delete(ndso); |
646 | return -1; | 649 | return -1; |
647 | } | 650 | } |
648 | 651 | ||
@@ -665,7 +668,7 @@ discard_symbol: rb_erase(&pos->rb_node, root); | |||
665 | } | 668 | } |
666 | 669 | ||
667 | if (curr_map != map && | 670 | if (curr_map != map && |
668 | self->kernel == DSO_TYPE_GUEST_KERNEL && | 671 | dso->kernel == DSO_TYPE_GUEST_KERNEL && |
669 | machine__is_default_guest(kmaps->machine)) { | 672 | machine__is_default_guest(kmaps->machine)) { |
670 | dso__set_loaded(curr_map->dso, curr_map->type); | 673 | dso__set_loaded(curr_map->dso, curr_map->type); |
671 | } | 674 | } |
@@ -673,21 +676,21 @@ discard_symbol: rb_erase(&pos->rb_node, root); | |||
673 | return count + moved; | 676 | return count + moved; |
674 | } | 677 | } |
675 | 678 | ||
676 | int dso__load_kallsyms(struct dso *self, const char *filename, | 679 | int dso__load_kallsyms(struct dso *dso, const char *filename, |
677 | struct map *map, symbol_filter_t filter) | 680 | struct map *map, symbol_filter_t filter) |
678 | { | 681 | { |
679 | if (dso__load_all_kallsyms(self, filename, map) < 0) | 682 | if (dso__load_all_kallsyms(dso, filename, map) < 0) |
680 | return -1; | 683 | return -1; |
681 | 684 | ||
682 | if (self->kernel == DSO_TYPE_GUEST_KERNEL) | 685 | if (dso->kernel == DSO_TYPE_GUEST_KERNEL) |
683 | self->symtab_type = SYMTAB__GUEST_KALLSYMS; | 686 | dso->symtab_type = SYMTAB__GUEST_KALLSYMS; |
684 | else | 687 | else |
685 | self->symtab_type = SYMTAB__KALLSYMS; | 688 | dso->symtab_type = SYMTAB__KALLSYMS; |
686 | 689 | ||
687 | return dso__split_kallsyms(self, map, filter); | 690 | return dso__split_kallsyms(dso, map, filter); |
688 | } | 691 | } |
689 | 692 | ||
690 | static int dso__load_perf_map(struct dso *self, struct map *map, | 693 | static int dso__load_perf_map(struct dso *dso, struct map *map, |
691 | symbol_filter_t filter) | 694 | symbol_filter_t filter) |
692 | { | 695 | { |
693 | char *line = NULL; | 696 | char *line = NULL; |
@@ -695,7 +698,7 @@ static int dso__load_perf_map(struct dso *self, struct map *map, | |||
695 | FILE *file; | 698 | FILE *file; |
696 | int nr_syms = 0; | 699 | int nr_syms = 0; |
697 | 700 | ||
698 | file = fopen(self->long_name, "r"); | 701 | file = fopen(dso->long_name, "r"); |
699 | if (file == NULL) | 702 | if (file == NULL) |
700 | goto out_failure; | 703 | goto out_failure; |
701 | 704 | ||
@@ -733,7 +736,7 @@ static int dso__load_perf_map(struct dso *self, struct map *map, | |||
733 | if (filter && filter(map, sym)) | 736 | if (filter && filter(map, sym)) |
734 | symbol__delete(sym); | 737 | symbol__delete(sym); |
735 | else { | 738 | else { |
736 | symbols__insert(&self->symbols[map->type], sym); | 739 | symbols__insert(&dso->symbols[map->type], sym); |
737 | nr_syms++; | 740 | nr_syms++; |
738 | } | 741 | } |
739 | } | 742 | } |
@@ -752,7 +755,7 @@ out_failure: | |||
752 | /** | 755 | /** |
753 | * elf_symtab__for_each_symbol - iterate thru all the symbols | 756 | * elf_symtab__for_each_symbol - iterate thru all the symbols |
754 | * | 757 | * |
755 | * @self: struct elf_symtab instance to iterate | 758 | * @syms: struct elf_symtab instance to iterate |
756 | * @idx: uint32_t idx | 759 | * @idx: uint32_t idx |
757 | * @sym: GElf_Sym iterator | 760 | * @sym: GElf_Sym iterator |
758 | */ | 761 | */ |
@@ -852,7 +855,7 @@ static Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep, | |||
852 | * And always look at the original dso, not at debuginfo packages, that | 855 | * And always look at the original dso, not at debuginfo packages, that |
853 | * have the PLT data stripped out (shdr_rel_plt.sh_type == SHT_NOBITS). | 856 | * have the PLT data stripped out (shdr_rel_plt.sh_type == SHT_NOBITS). |
854 | */ | 857 | */ |
855 | static int dso__synthesize_plt_symbols(struct dso *self, struct map *map, | 858 | static int dso__synthesize_plt_symbols(struct dso *dso, struct map *map, |
856 | symbol_filter_t filter) | 859 | symbol_filter_t filter) |
857 | { | 860 | { |
858 | uint32_t nr_rel_entries, idx; | 861 | uint32_t nr_rel_entries, idx; |
@@ -871,7 +874,7 @@ static int dso__synthesize_plt_symbols(struct dso *self, struct map *map, | |||
871 | char name[PATH_MAX]; | 874 | char name[PATH_MAX]; |
872 | 875 | ||
873 | snprintf(name, sizeof(name), "%s%s", | 876 | snprintf(name, sizeof(name), "%s%s", |
874 | symbol_conf.symfs, self->long_name); | 877 | symbol_conf.symfs, dso->long_name); |
875 | fd = open(name, O_RDONLY); | 878 | fd = open(name, O_RDONLY); |
876 | if (fd < 0) | 879 | if (fd < 0) |
877 | goto out; | 880 | goto out; |
@@ -947,7 +950,7 @@ static int dso__synthesize_plt_symbols(struct dso *self, struct map *map, | |||
947 | if (filter && filter(map, f)) | 950 | if (filter && filter(map, f)) |
948 | symbol__delete(f); | 951 | symbol__delete(f); |
949 | else { | 952 | else { |
950 | symbols__insert(&self->symbols[map->type], f); | 953 | symbols__insert(&dso->symbols[map->type], f); |
951 | ++nr; | 954 | ++nr; |
952 | } | 955 | } |
953 | } | 956 | } |
@@ -969,7 +972,7 @@ static int dso__synthesize_plt_symbols(struct dso *self, struct map *map, | |||
969 | if (filter && filter(map, f)) | 972 | if (filter && filter(map, f)) |
970 | symbol__delete(f); | 973 | symbol__delete(f); |
971 | else { | 974 | else { |
972 | symbols__insert(&self->symbols[map->type], f); | 975 | symbols__insert(&dso->symbols[map->type], f); |
973 | ++nr; | 976 | ++nr; |
974 | } | 977 | } |
975 | } | 978 | } |
@@ -985,29 +988,30 @@ out_close: | |||
985 | return nr; | 988 | return nr; |
986 | out: | 989 | out: |
987 | pr_debug("%s: problems reading %s PLT info.\n", | 990 | pr_debug("%s: problems reading %s PLT info.\n", |
988 | __func__, self->long_name); | 991 | __func__, dso->long_name); |
989 | return 0; | 992 | return 0; |
990 | } | 993 | } |
991 | 994 | ||
992 | static bool elf_sym__is_a(GElf_Sym *self, enum map_type type) | 995 | static bool elf_sym__is_a(GElf_Sym *sym, enum map_type type) |
993 | { | 996 | { |
994 | switch (type) { | 997 | switch (type) { |
995 | case MAP__FUNCTION: | 998 | case MAP__FUNCTION: |
996 | return elf_sym__is_function(self); | 999 | return elf_sym__is_function(sym); |
997 | case MAP__VARIABLE: | 1000 | case MAP__VARIABLE: |
998 | return elf_sym__is_object(self); | 1001 | return elf_sym__is_object(sym); |
999 | default: | 1002 | default: |
1000 | return false; | 1003 | return false; |
1001 | } | 1004 | } |
1002 | } | 1005 | } |
1003 | 1006 | ||
1004 | static bool elf_sec__is_a(GElf_Shdr *self, Elf_Data *secstrs, enum map_type type) | 1007 | static bool elf_sec__is_a(GElf_Shdr *shdr, Elf_Data *secstrs, |
1008 | enum map_type type) | ||
1005 | { | 1009 | { |
1006 | switch (type) { | 1010 | switch (type) { |
1007 | case MAP__FUNCTION: | 1011 | case MAP__FUNCTION: |
1008 | return elf_sec__is_text(self, secstrs); | 1012 | return elf_sec__is_text(shdr, secstrs); |
1009 | case MAP__VARIABLE: | 1013 | case MAP__VARIABLE: |
1010 | return elf_sec__is_data(self, secstrs); | 1014 | return elf_sec__is_data(shdr, secstrs); |
1011 | default: | 1015 | default: |
1012 | return false; | 1016 | return false; |
1013 | } | 1017 | } |
@@ -1032,13 +1036,13 @@ static size_t elf_addr_to_index(Elf *elf, GElf_Addr addr) | |||
1032 | return -1; | 1036 | return -1; |
1033 | } | 1037 | } |
1034 | 1038 | ||
1035 | static int dso__load_sym(struct dso *self, struct map *map, const char *name, | 1039 | static int dso__load_sym(struct dso *dso, struct map *map, const char *name, |
1036 | int fd, symbol_filter_t filter, int kmodule, | 1040 | int fd, symbol_filter_t filter, int kmodule, |
1037 | int want_symtab) | 1041 | int want_symtab) |
1038 | { | 1042 | { |
1039 | struct kmap *kmap = self->kernel ? map__kmap(map) : NULL; | 1043 | struct kmap *kmap = dso->kernel ? map__kmap(map) : NULL; |
1040 | struct map *curr_map = map; | 1044 | struct map *curr_map = map; |
1041 | struct dso *curr_dso = self; | 1045 | struct dso *curr_dso = dso; |
1042 | Elf_Data *symstrs, *secstrs; | 1046 | Elf_Data *symstrs, *secstrs; |
1043 | uint32_t nr_syms; | 1047 | uint32_t nr_syms; |
1044 | int err = -1; | 1048 | int err = -1; |
@@ -1064,14 +1068,14 @@ static int dso__load_sym(struct dso *self, struct map *map, const char *name, | |||
1064 | } | 1068 | } |
1065 | 1069 | ||
1066 | /* Always reject images with a mismatched build-id: */ | 1070 | /* Always reject images with a mismatched build-id: */ |
1067 | if (self->has_build_id) { | 1071 | if (dso->has_build_id) { |
1068 | u8 build_id[BUILD_ID_SIZE]; | 1072 | u8 build_id[BUILD_ID_SIZE]; |
1069 | 1073 | ||
1070 | if (elf_read_build_id(elf, build_id, | 1074 | if (elf_read_build_id(elf, build_id, |
1071 | BUILD_ID_SIZE) != BUILD_ID_SIZE) | 1075 | BUILD_ID_SIZE) != BUILD_ID_SIZE) |
1072 | goto out_elf_end; | 1076 | goto out_elf_end; |
1073 | 1077 | ||
1074 | if (!dso__build_id_equal(self, build_id)) | 1078 | if (!dso__build_id_equal(dso, build_id)) |
1075 | goto out_elf_end; | 1079 | goto out_elf_end; |
1076 | } | 1080 | } |
1077 | 1081 | ||
@@ -1112,13 +1116,14 @@ static int dso__load_sym(struct dso *self, struct map *map, const char *name, | |||
1112 | nr_syms = shdr.sh_size / shdr.sh_entsize; | 1116 | nr_syms = shdr.sh_size / shdr.sh_entsize; |
1113 | 1117 | ||
1114 | memset(&sym, 0, sizeof(sym)); | 1118 | memset(&sym, 0, sizeof(sym)); |
1115 | if (self->kernel == DSO_TYPE_USER) { | 1119 | if (dso->kernel == DSO_TYPE_USER) { |
1116 | self->adjust_symbols = (ehdr.e_type == ET_EXEC || | 1120 | dso->adjust_symbols = (ehdr.e_type == ET_EXEC || |
1117 | elf_section_by_name(elf, &ehdr, &shdr, | 1121 | elf_section_by_name(elf, &ehdr, &shdr, |
1118 | ".gnu.prelink_undo", | 1122 | ".gnu.prelink_undo", |
1119 | NULL) != NULL); | 1123 | NULL) != NULL); |
1120 | } else self->adjust_symbols = 0; | 1124 | } else { |
1121 | 1125 | dso->adjust_symbols = 0; | |
1126 | } | ||
1122 | elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) { | 1127 | elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) { |
1123 | struct symbol *f; | 1128 | struct symbol *f; |
1124 | const char *elf_name = elf_sym__name(&sym, symstrs); | 1129 | const char *elf_name = elf_sym__name(&sym, symstrs); |
@@ -1168,22 +1173,22 @@ static int dso__load_sym(struct dso *self, struct map *map, const char *name, | |||
1168 | (sym.st_value & 1)) | 1173 | (sym.st_value & 1)) |
1169 | --sym.st_value; | 1174 | --sym.st_value; |
1170 | 1175 | ||
1171 | if (self->kernel != DSO_TYPE_USER || kmodule) { | 1176 | if (dso->kernel != DSO_TYPE_USER || kmodule) { |
1172 | char dso_name[PATH_MAX]; | 1177 | char dso_name[PATH_MAX]; |
1173 | 1178 | ||
1174 | if (strcmp(section_name, | 1179 | if (strcmp(section_name, |
1175 | (curr_dso->short_name + | 1180 | (curr_dso->short_name + |
1176 | self->short_name_len)) == 0) | 1181 | dso->short_name_len)) == 0) |
1177 | goto new_symbol; | 1182 | goto new_symbol; |
1178 | 1183 | ||
1179 | if (strcmp(section_name, ".text") == 0) { | 1184 | if (strcmp(section_name, ".text") == 0) { |
1180 | curr_map = map; | 1185 | curr_map = map; |
1181 | curr_dso = self; | 1186 | curr_dso = dso; |
1182 | goto new_symbol; | 1187 | goto new_symbol; |
1183 | } | 1188 | } |
1184 | 1189 | ||
1185 | snprintf(dso_name, sizeof(dso_name), | 1190 | snprintf(dso_name, sizeof(dso_name), |
1186 | "%s%s", self->short_name, section_name); | 1191 | "%s%s", dso->short_name, section_name); |
1187 | 1192 | ||
1188 | curr_map = map_groups__find_by_name(kmap->kmaps, map->type, dso_name); | 1193 | curr_map = map_groups__find_by_name(kmap->kmaps, map->type, dso_name); |
1189 | if (curr_map == NULL) { | 1194 | if (curr_map == NULL) { |
@@ -1195,9 +1200,9 @@ static int dso__load_sym(struct dso *self, struct map *map, const char *name, | |||
1195 | curr_dso = dso__new(dso_name); | 1200 | curr_dso = dso__new(dso_name); |
1196 | if (curr_dso == NULL) | 1201 | if (curr_dso == NULL) |
1197 | goto out_elf_end; | 1202 | goto out_elf_end; |
1198 | curr_dso->kernel = self->kernel; | 1203 | curr_dso->kernel = dso->kernel; |
1199 | curr_dso->long_name = self->long_name; | 1204 | curr_dso->long_name = dso->long_name; |
1200 | curr_dso->long_name_len = self->long_name_len; | 1205 | curr_dso->long_name_len = dso->long_name_len; |
1201 | curr_map = map__new2(start, curr_dso, | 1206 | curr_map = map__new2(start, curr_dso, |
1202 | map->type); | 1207 | map->type); |
1203 | if (curr_map == NULL) { | 1208 | if (curr_map == NULL) { |
@@ -1206,9 +1211,9 @@ static int dso__load_sym(struct dso *self, struct map *map, const char *name, | |||
1206 | } | 1211 | } |
1207 | curr_map->map_ip = identity__map_ip; | 1212 | curr_map->map_ip = identity__map_ip; |
1208 | curr_map->unmap_ip = identity__map_ip; | 1213 | curr_map->unmap_ip = identity__map_ip; |
1209 | curr_dso->symtab_type = self->symtab_type; | 1214 | curr_dso->symtab_type = dso->symtab_type; |
1210 | map_groups__insert(kmap->kmaps, curr_map); | 1215 | map_groups__insert(kmap->kmaps, curr_map); |
1211 | dsos__add(&self->node, curr_dso); | 1216 | dsos__add(&dso->node, curr_dso); |
1212 | dso__set_loaded(curr_dso, map->type); | 1217 | dso__set_loaded(curr_dso, map->type); |
1213 | } else | 1218 | } else |
1214 | curr_dso = curr_map->dso; | 1219 | curr_dso = curr_map->dso; |
@@ -1250,7 +1255,7 @@ new_symbol: | |||
1250 | * For misannotated, zeroed, ASM function sizes. | 1255 | * For misannotated, zeroed, ASM function sizes. |
1251 | */ | 1256 | */ |
1252 | if (nr > 0) { | 1257 | if (nr > 0) { |
1253 | symbols__fixup_end(&self->symbols[map->type]); | 1258 | symbols__fixup_end(&dso->symbols[map->type]); |
1254 | if (kmap) { | 1259 | if (kmap) { |
1255 | /* | 1260 | /* |
1256 | * We need to fixup this here too because we create new | 1261 | * We need to fixup this here too because we create new |
@@ -1266,9 +1271,9 @@ out_close: | |||
1266 | return err; | 1271 | return err; |
1267 | } | 1272 | } |
1268 | 1273 | ||
1269 | static bool dso__build_id_equal(const struct dso *self, u8 *build_id) | 1274 | static bool dso__build_id_equal(const struct dso *dso, u8 *build_id) |
1270 | { | 1275 | { |
1271 | return memcmp(self->build_id, build_id, sizeof(self->build_id)) == 0; | 1276 | return memcmp(dso->build_id, build_id, sizeof(dso->build_id)) == 0; |
1272 | } | 1277 | } |
1273 | 1278 | ||
1274 | bool __dsos__read_build_ids(struct list_head *head, bool with_hits) | 1279 | bool __dsos__read_build_ids(struct list_head *head, bool with_hits) |
@@ -1429,7 +1434,7 @@ out: | |||
1429 | return err; | 1434 | return err; |
1430 | } | 1435 | } |
1431 | 1436 | ||
1432 | char dso__symtab_origin(const struct dso *self) | 1437 | char dso__symtab_origin(const struct dso *dso) |
1433 | { | 1438 | { |
1434 | static const char origin[] = { | 1439 | static const char origin[] = { |
1435 | [SYMTAB__KALLSYMS] = 'k', | 1440 | [SYMTAB__KALLSYMS] = 'k', |
@@ -1444,12 +1449,12 @@ char dso__symtab_origin(const struct dso *self) | |||
1444 | [SYMTAB__GUEST_KMODULE] = 'G', | 1449 | [SYMTAB__GUEST_KMODULE] = 'G', |
1445 | }; | 1450 | }; |
1446 | 1451 | ||
1447 | if (self == NULL || self->symtab_type == SYMTAB__NOT_FOUND) | 1452 | if (dso == NULL || dso->symtab_type == SYMTAB__NOT_FOUND) |
1448 | return '!'; | 1453 | return '!'; |
1449 | return origin[self->symtab_type]; | 1454 | return origin[dso->symtab_type]; |
1450 | } | 1455 | } |
1451 | 1456 | ||
1452 | int dso__load(struct dso *self, struct map *map, symbol_filter_t filter) | 1457 | int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter) |
1453 | { | 1458 | { |
1454 | int size = PATH_MAX; | 1459 | int size = PATH_MAX; |
1455 | char *name; | 1460 | char *name; |
@@ -1459,12 +1464,12 @@ int dso__load(struct dso *self, struct map *map, symbol_filter_t filter) | |||
1459 | const char *root_dir; | 1464 | const char *root_dir; |
1460 | int want_symtab; | 1465 | int want_symtab; |
1461 | 1466 | ||
1462 | dso__set_loaded(self, map->type); | 1467 | dso__set_loaded(dso, map->type); |
1463 | 1468 | ||
1464 | if (self->kernel == DSO_TYPE_KERNEL) | 1469 | if (dso->kernel == DSO_TYPE_KERNEL) |
1465 | return dso__load_kernel_sym(self, map, filter); | 1470 | return dso__load_kernel_sym(dso, map, filter); |
1466 | else if (self->kernel == DSO_TYPE_GUEST_KERNEL) | 1471 | else if (dso->kernel == DSO_TYPE_GUEST_KERNEL) |
1467 | return dso__load_guest_kernel_sym(self, map, filter); | 1472 | return dso__load_guest_kernel_sym(dso, map, filter); |
1468 | 1473 | ||
1469 | if (map->groups && map->groups->machine) | 1474 | if (map->groups && map->groups->machine) |
1470 | machine = map->groups->machine; | 1475 | machine = map->groups->machine; |
@@ -1475,11 +1480,11 @@ int dso__load(struct dso *self, struct map *map, symbol_filter_t filter) | |||
1475 | if (!name) | 1480 | if (!name) |
1476 | return -1; | 1481 | return -1; |
1477 | 1482 | ||
1478 | self->adjust_symbols = 0; | 1483 | dso->adjust_symbols = 0; |
1479 | 1484 | ||
1480 | if (strncmp(self->name, "/tmp/perf-", 10) == 0) { | 1485 | if (strncmp(dso->name, "/tmp/perf-", 10) == 0) { |
1481 | ret = dso__load_perf_map(self, map, filter); | 1486 | ret = dso__load_perf_map(dso, map, filter); |
1482 | self->symtab_type = ret > 0 ? SYMTAB__JAVA_JIT : | 1487 | dso->symtab_type = ret > 0 ? SYMTAB__JAVA_JIT : |
1483 | SYMTAB__NOT_FOUND; | 1488 | SYMTAB__NOT_FOUND; |
1484 | return ret; | 1489 | return ret; |
1485 | } | 1490 | } |
@@ -1490,33 +1495,33 @@ int dso__load(struct dso *self, struct map *map, symbol_filter_t filter) | |||
1490 | */ | 1495 | */ |
1491 | want_symtab = 1; | 1496 | want_symtab = 1; |
1492 | restart: | 1497 | restart: |
1493 | for (self->symtab_type = SYMTAB__BUILD_ID_CACHE; | 1498 | for (dso->symtab_type = SYMTAB__BUILD_ID_CACHE; |
1494 | self->symtab_type != SYMTAB__NOT_FOUND; | 1499 | dso->symtab_type != SYMTAB__NOT_FOUND; |
1495 | self->symtab_type++) { | 1500 | dso->symtab_type++) { |
1496 | switch (self->symtab_type) { | 1501 | switch (dso->symtab_type) { |
1497 | case SYMTAB__BUILD_ID_CACHE: | 1502 | case SYMTAB__BUILD_ID_CACHE: |
1498 | /* skip the locally configured cache if a symfs is given */ | 1503 | /* skip the locally configured cache if a symfs is given */ |
1499 | if (symbol_conf.symfs[0] || | 1504 | if (symbol_conf.symfs[0] || |
1500 | (dso__build_id_filename(self, name, size) == NULL)) { | 1505 | (dso__build_id_filename(dso, name, size) == NULL)) { |
1501 | continue; | 1506 | continue; |
1502 | } | 1507 | } |
1503 | break; | 1508 | break; |
1504 | case SYMTAB__FEDORA_DEBUGINFO: | 1509 | case SYMTAB__FEDORA_DEBUGINFO: |
1505 | snprintf(name, size, "%s/usr/lib/debug%s.debug", | 1510 | snprintf(name, size, "%s/usr/lib/debug%s.debug", |
1506 | symbol_conf.symfs, self->long_name); | 1511 | symbol_conf.symfs, dso->long_name); |
1507 | break; | 1512 | break; |
1508 | case SYMTAB__UBUNTU_DEBUGINFO: | 1513 | case SYMTAB__UBUNTU_DEBUGINFO: |
1509 | snprintf(name, size, "%s/usr/lib/debug%s", | 1514 | snprintf(name, size, "%s/usr/lib/debug%s", |
1510 | symbol_conf.symfs, self->long_name); | 1515 | symbol_conf.symfs, dso->long_name); |
1511 | break; | 1516 | break; |
1512 | case SYMTAB__BUILDID_DEBUGINFO: { | 1517 | case SYMTAB__BUILDID_DEBUGINFO: { |
1513 | char build_id_hex[BUILD_ID_SIZE * 2 + 1]; | 1518 | char build_id_hex[BUILD_ID_SIZE * 2 + 1]; |
1514 | 1519 | ||
1515 | if (!self->has_build_id) | 1520 | if (!dso->has_build_id) |
1516 | continue; | 1521 | continue; |
1517 | 1522 | ||
1518 | build_id__sprintf(self->build_id, | 1523 | build_id__sprintf(dso->build_id, |
1519 | sizeof(self->build_id), | 1524 | sizeof(dso->build_id), |
1520 | build_id_hex); | 1525 | build_id_hex); |
1521 | snprintf(name, size, | 1526 | snprintf(name, size, |
1522 | "%s/usr/lib/debug/.build-id/%.2s/%s.debug", | 1527 | "%s/usr/lib/debug/.build-id/%.2s/%s.debug", |
@@ -1525,7 +1530,7 @@ restart: | |||
1525 | break; | 1530 | break; |
1526 | case SYMTAB__SYSTEM_PATH_DSO: | 1531 | case SYMTAB__SYSTEM_PATH_DSO: |
1527 | snprintf(name, size, "%s%s", | 1532 | snprintf(name, size, "%s%s", |
1528 | symbol_conf.symfs, self->long_name); | 1533 | symbol_conf.symfs, dso->long_name); |
1529 | break; | 1534 | break; |
1530 | case SYMTAB__GUEST_KMODULE: | 1535 | case SYMTAB__GUEST_KMODULE: |
1531 | if (map->groups && machine) | 1536 | if (map->groups && machine) |
@@ -1533,12 +1538,12 @@ restart: | |||
1533 | else | 1538 | else |
1534 | root_dir = ""; | 1539 | root_dir = ""; |
1535 | snprintf(name, size, "%s%s%s", symbol_conf.symfs, | 1540 | snprintf(name, size, "%s%s%s", symbol_conf.symfs, |
1536 | root_dir, self->long_name); | 1541 | root_dir, dso->long_name); |
1537 | break; | 1542 | break; |
1538 | 1543 | ||
1539 | case SYMTAB__SYSTEM_PATH_KMODULE: | 1544 | case SYMTAB__SYSTEM_PATH_KMODULE: |
1540 | snprintf(name, size, "%s%s", symbol_conf.symfs, | 1545 | snprintf(name, size, "%s%s", symbol_conf.symfs, |
1541 | self->long_name); | 1546 | dso->long_name); |
1542 | break; | 1547 | break; |
1543 | default:; | 1548 | default:; |
1544 | } | 1549 | } |
@@ -1548,7 +1553,7 @@ restart: | |||
1548 | if (fd < 0) | 1553 | if (fd < 0) |
1549 | continue; | 1554 | continue; |
1550 | 1555 | ||
1551 | ret = dso__load_sym(self, map, name, fd, filter, 0, | 1556 | ret = dso__load_sym(dso, map, name, fd, filter, 0, |
1552 | want_symtab); | 1557 | want_symtab); |
1553 | close(fd); | 1558 | close(fd); |
1554 | 1559 | ||
@@ -1560,7 +1565,8 @@ restart: | |||
1560 | continue; | 1565 | continue; |
1561 | 1566 | ||
1562 | if (ret > 0) { | 1567 | if (ret > 0) { |
1563 | int nr_plt = dso__synthesize_plt_symbols(self, map, filter); | 1568 | int nr_plt = dso__synthesize_plt_symbols(dso, map, |
1569 | filter); | ||
1564 | if (nr_plt > 0) | 1570 | if (nr_plt > 0) |
1565 | ret += nr_plt; | 1571 | ret += nr_plt; |
1566 | break; | 1572 | break; |
@@ -1577,17 +1583,17 @@ restart: | |||
1577 | } | 1583 | } |
1578 | 1584 | ||
1579 | free(name); | 1585 | free(name); |
1580 | if (ret < 0 && strstr(self->name, " (deleted)") != NULL) | 1586 | if (ret < 0 && strstr(dso->name, " (deleted)") != NULL) |
1581 | return 0; | 1587 | return 0; |
1582 | return ret; | 1588 | return ret; |
1583 | } | 1589 | } |
1584 | 1590 | ||
1585 | struct map *map_groups__find_by_name(struct map_groups *self, | 1591 | struct map *map_groups__find_by_name(struct map_groups *mg, |
1586 | enum map_type type, const char *name) | 1592 | enum map_type type, const char *name) |
1587 | { | 1593 | { |
1588 | struct rb_node *nd; | 1594 | struct rb_node *nd; |
1589 | 1595 | ||
1590 | for (nd = rb_first(&self->maps[type]); nd; nd = rb_next(nd)) { | 1596 | for (nd = rb_first(&mg->maps[type]); nd; nd = rb_next(nd)) { |
1591 | struct map *map = rb_entry(nd, struct map, rb_node); | 1597 | struct map *map = rb_entry(nd, struct map, rb_node); |
1592 | 1598 | ||
1593 | if (map->dso && strcmp(map->dso->short_name, name) == 0) | 1599 | if (map->dso && strcmp(map->dso->short_name, name) == 0) |
@@ -1597,28 +1603,28 @@ struct map *map_groups__find_by_name(struct map_groups *self, | |||
1597 | return NULL; | 1603 | return NULL; |
1598 | } | 1604 | } |
1599 | 1605 | ||
1600 | static int dso__kernel_module_get_build_id(struct dso *self, | 1606 | static int dso__kernel_module_get_build_id(struct dso *dso, |
1601 | const char *root_dir) | 1607 | const char *root_dir) |
1602 | { | 1608 | { |
1603 | char filename[PATH_MAX]; | 1609 | char filename[PATH_MAX]; |
1604 | /* | 1610 | /* |
1605 | * kernel module short names are of the form "[module]" and | 1611 | * kernel module short names are of the form "[module]" and |
1606 | * we need just "module" here. | 1612 | * we need just "module" here. |
1607 | */ | 1613 | */ |
1608 | const char *name = self->short_name + 1; | 1614 | const char *name = dso->short_name + 1; |
1609 | 1615 | ||
1610 | snprintf(filename, sizeof(filename), | 1616 | snprintf(filename, sizeof(filename), |
1611 | "%s/sys/module/%.*s/notes/.note.gnu.build-id", | 1617 | "%s/sys/module/%.*s/notes/.note.gnu.build-id", |
1612 | root_dir, (int)strlen(name) - 1, name); | 1618 | root_dir, (int)strlen(name) - 1, name); |
1613 | 1619 | ||
1614 | if (sysfs__read_build_id(filename, self->build_id, | 1620 | if (sysfs__read_build_id(filename, dso->build_id, |
1615 | sizeof(self->build_id)) == 0) | 1621 | sizeof(dso->build_id)) == 0) |
1616 | self->has_build_id = true; | 1622 | dso->has_build_id = true; |
1617 | 1623 | ||
1618 | return 0; | 1624 | return 0; |
1619 | } | 1625 | } |
1620 | 1626 | ||
1621 | static int map_groups__set_modules_path_dir(struct map_groups *self, | 1627 | static int map_groups__set_modules_path_dir(struct map_groups *mg, |
1622 | const char *dir_name) | 1628 | const char *dir_name) |
1623 | { | 1629 | { |
1624 | struct dirent *dent; | 1630 | struct dirent *dent; |
@@ -1646,7 +1652,7 @@ static int map_groups__set_modules_path_dir(struct map_groups *self, | |||
1646 | 1652 | ||
1647 | snprintf(path, sizeof(path), "%s/%s", | 1653 | snprintf(path, sizeof(path), "%s/%s", |
1648 | dir_name, dent->d_name); | 1654 | dir_name, dent->d_name); |
1649 | ret = map_groups__set_modules_path_dir(self, path); | 1655 | ret = map_groups__set_modules_path_dir(mg, path); |
1650 | if (ret < 0) | 1656 | if (ret < 0) |
1651 | goto out; | 1657 | goto out; |
1652 | } else { | 1658 | } else { |
@@ -1661,7 +1667,8 @@ static int map_groups__set_modules_path_dir(struct map_groups *self, | |||
1661 | (int)(dot - dent->d_name), dent->d_name); | 1667 | (int)(dot - dent->d_name), dent->d_name); |
1662 | 1668 | ||
1663 | strxfrchar(dso_name, '-', '_'); | 1669 | strxfrchar(dso_name, '-', '_'); |
1664 | map = map_groups__find_by_name(self, MAP__FUNCTION, dso_name); | 1670 | map = map_groups__find_by_name(mg, MAP__FUNCTION, |
1671 | dso_name); | ||
1665 | if (map == NULL) | 1672 | if (map == NULL) |
1666 | continue; | 1673 | continue; |
1667 | 1674 | ||
@@ -1711,20 +1718,20 @@ static char *get_kernel_version(const char *root_dir) | |||
1711 | return strdup(name); | 1718 | return strdup(name); |
1712 | } | 1719 | } |
1713 | 1720 | ||
1714 | static int machine__set_modules_path(struct machine *self) | 1721 | static int machine__set_modules_path(struct machine *machine) |
1715 | { | 1722 | { |
1716 | char *version; | 1723 | char *version; |
1717 | char modules_path[PATH_MAX]; | 1724 | char modules_path[PATH_MAX]; |
1718 | 1725 | ||
1719 | version = get_kernel_version(self->root_dir); | 1726 | version = get_kernel_version(machine->root_dir); |
1720 | if (!version) | 1727 | if (!version) |
1721 | return -1; | 1728 | return -1; |
1722 | 1729 | ||
1723 | snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s/kernel", | 1730 | snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s/kernel", |
1724 | self->root_dir, version); | 1731 | machine->root_dir, version); |
1725 | free(version); | 1732 | free(version); |
1726 | 1733 | ||
1727 | return map_groups__set_modules_path_dir(&self->kmaps, modules_path); | 1734 | return map_groups__set_modules_path_dir(&machine->kmaps, modules_path); |
1728 | } | 1735 | } |
1729 | 1736 | ||
1730 | /* | 1737 | /* |
@@ -1734,23 +1741,23 @@ static int machine__set_modules_path(struct machine *self) | |||
1734 | */ | 1741 | */ |
1735 | static struct map *map__new2(u64 start, struct dso *dso, enum map_type type) | 1742 | static struct map *map__new2(u64 start, struct dso *dso, enum map_type type) |
1736 | { | 1743 | { |
1737 | struct map *self = calloc(1, (sizeof(*self) + | 1744 | struct map *map = calloc(1, (sizeof(*map) + |
1738 | (dso->kernel ? sizeof(struct kmap) : 0))); | 1745 | (dso->kernel ? sizeof(struct kmap) : 0))); |
1739 | if (self != NULL) { | 1746 | if (map != NULL) { |
1740 | /* | 1747 | /* |
1741 | * ->end will be filled after we load all the symbols | 1748 | * ->end will be filled after we load all the symbols |
1742 | */ | 1749 | */ |
1743 | map__init(self, type, start, 0, 0, dso); | 1750 | map__init(map, type, start, 0, 0, dso); |
1744 | } | 1751 | } |
1745 | 1752 | ||
1746 | return self; | 1753 | return map; |
1747 | } | 1754 | } |
1748 | 1755 | ||
1749 | struct map *machine__new_module(struct machine *self, u64 start, | 1756 | struct map *machine__new_module(struct machine *machine, u64 start, |
1750 | const char *filename) | 1757 | const char *filename) |
1751 | { | 1758 | { |
1752 | struct map *map; | 1759 | struct map *map; |
1753 | struct dso *dso = __dsos__findnew(&self->kernel_dsos, filename); | 1760 | struct dso *dso = __dsos__findnew(&machine->kernel_dsos, filename); |
1754 | 1761 | ||
1755 | if (dso == NULL) | 1762 | if (dso == NULL) |
1756 | return NULL; | 1763 | return NULL; |
@@ -1759,15 +1766,15 @@ struct map *machine__new_module(struct machine *self, u64 start, | |||
1759 | if (map == NULL) | 1766 | if (map == NULL) |
1760 | return NULL; | 1767 | return NULL; |
1761 | 1768 | ||
1762 | if (machine__is_host(self)) | 1769 | if (machine__is_host(machine)) |
1763 | dso->symtab_type = SYMTAB__SYSTEM_PATH_KMODULE; | 1770 | dso->symtab_type = SYMTAB__SYSTEM_PATH_KMODULE; |
1764 | else | 1771 | else |
1765 | dso->symtab_type = SYMTAB__GUEST_KMODULE; | 1772 | dso->symtab_type = SYMTAB__GUEST_KMODULE; |
1766 | map_groups__insert(&self->kmaps, map); | 1773 | map_groups__insert(&machine->kmaps, map); |
1767 | return map; | 1774 | return map; |
1768 | } | 1775 | } |
1769 | 1776 | ||
1770 | static int machine__create_modules(struct machine *self) | 1777 | static int machine__create_modules(struct machine *machine) |
1771 | { | 1778 | { |
1772 | char *line = NULL; | 1779 | char *line = NULL; |
1773 | size_t n; | 1780 | size_t n; |
@@ -1776,10 +1783,10 @@ static int machine__create_modules(struct machine *self) | |||
1776 | const char *modules; | 1783 | const char *modules; |
1777 | char path[PATH_MAX]; | 1784 | char path[PATH_MAX]; |
1778 | 1785 | ||
1779 | if (machine__is_default_guest(self)) | 1786 | if (machine__is_default_guest(machine)) |
1780 | modules = symbol_conf.default_guest_modules; | 1787 | modules = symbol_conf.default_guest_modules; |
1781 | else { | 1788 | else { |
1782 | sprintf(path, "%s/proc/modules", self->root_dir); | 1789 | sprintf(path, "%s/proc/modules", machine->root_dir); |
1783 | modules = path; | 1790 | modules = path; |
1784 | } | 1791 | } |
1785 | 1792 | ||
@@ -1815,16 +1822,16 @@ static int machine__create_modules(struct machine *self) | |||
1815 | *sep = '\0'; | 1822 | *sep = '\0'; |
1816 | 1823 | ||
1817 | snprintf(name, sizeof(name), "[%s]", line); | 1824 | snprintf(name, sizeof(name), "[%s]", line); |
1818 | map = machine__new_module(self, start, name); | 1825 | map = machine__new_module(machine, start, name); |
1819 | if (map == NULL) | 1826 | if (map == NULL) |
1820 | goto out_delete_line; | 1827 | goto out_delete_line; |
1821 | dso__kernel_module_get_build_id(map->dso, self->root_dir); | 1828 | dso__kernel_module_get_build_id(map->dso, machine->root_dir); |
1822 | } | 1829 | } |
1823 | 1830 | ||
1824 | free(line); | 1831 | free(line); |
1825 | fclose(file); | 1832 | fclose(file); |
1826 | 1833 | ||
1827 | return machine__set_modules_path(self); | 1834 | return machine__set_modules_path(machine); |
1828 | 1835 | ||
1829 | out_delete_line: | 1836 | out_delete_line: |
1830 | free(line); | 1837 | free(line); |
@@ -1832,7 +1839,7 @@ out_failure: | |||
1832 | return -1; | 1839 | return -1; |
1833 | } | 1840 | } |
1834 | 1841 | ||
1835 | int dso__load_vmlinux(struct dso *self, struct map *map, | 1842 | int dso__load_vmlinux(struct dso *dso, struct map *map, |
1836 | const char *vmlinux, symbol_filter_t filter) | 1843 | const char *vmlinux, symbol_filter_t filter) |
1837 | { | 1844 | { |
1838 | int err = -1, fd; | 1845 | int err = -1, fd; |
@@ -1844,9 +1851,9 @@ int dso__load_vmlinux(struct dso *self, struct map *map, | |||
1844 | if (fd < 0) | 1851 | if (fd < 0) |
1845 | return -1; | 1852 | return -1; |
1846 | 1853 | ||
1847 | dso__set_long_name(self, (char *)vmlinux); | 1854 | dso__set_long_name(dso, (char *)vmlinux); |
1848 | dso__set_loaded(self, map->type); | 1855 | dso__set_loaded(dso, map->type); |
1849 | err = dso__load_sym(self, map, symfs_vmlinux, fd, filter, 0, 0); | 1856 | err = dso__load_sym(dso, map, symfs_vmlinux, fd, filter, 0, 0); |
1850 | close(fd); | 1857 | close(fd); |
1851 | 1858 | ||
1852 | if (err > 0) | 1859 | if (err > 0) |
@@ -1855,7 +1862,7 @@ int dso__load_vmlinux(struct dso *self, struct map *map, | |||
1855 | return err; | 1862 | return err; |
1856 | } | 1863 | } |
1857 | 1864 | ||
1858 | int dso__load_vmlinux_path(struct dso *self, struct map *map, | 1865 | int dso__load_vmlinux_path(struct dso *dso, struct map *map, |
1859 | symbol_filter_t filter) | 1866 | symbol_filter_t filter) |
1860 | { | 1867 | { |
1861 | int i, err = 0; | 1868 | int i, err = 0; |
@@ -1864,20 +1871,20 @@ int dso__load_vmlinux_path(struct dso *self, struct map *map, | |||
1864 | pr_debug("Looking at the vmlinux_path (%d entries long)\n", | 1871 | pr_debug("Looking at the vmlinux_path (%d entries long)\n", |
1865 | vmlinux_path__nr_entries + 1); | 1872 | vmlinux_path__nr_entries + 1); |
1866 | 1873 | ||
1867 | filename = dso__build_id_filename(self, NULL, 0); | 1874 | filename = dso__build_id_filename(dso, NULL, 0); |
1868 | if (filename != NULL) { | 1875 | if (filename != NULL) { |
1869 | err = dso__load_vmlinux(self, map, filename, filter); | 1876 | err = dso__load_vmlinux(dso, map, filename, filter); |
1870 | if (err > 0) { | 1877 | if (err > 0) { |
1871 | dso__set_long_name(self, filename); | 1878 | dso__set_long_name(dso, filename); |
1872 | goto out; | 1879 | goto out; |
1873 | } | 1880 | } |
1874 | free(filename); | 1881 | free(filename); |
1875 | } | 1882 | } |
1876 | 1883 | ||
1877 | for (i = 0; i < vmlinux_path__nr_entries; ++i) { | 1884 | for (i = 0; i < vmlinux_path__nr_entries; ++i) { |
1878 | err = dso__load_vmlinux(self, map, vmlinux_path[i], filter); | 1885 | err = dso__load_vmlinux(dso, map, vmlinux_path[i], filter); |
1879 | if (err > 0) { | 1886 | if (err > 0) { |
1880 | dso__set_long_name(self, strdup(vmlinux_path[i])); | 1887 | dso__set_long_name(dso, strdup(vmlinux_path[i])); |
1881 | break; | 1888 | break; |
1882 | } | 1889 | } |
1883 | } | 1890 | } |
@@ -1885,7 +1892,7 @@ out: | |||
1885 | return err; | 1892 | return err; |
1886 | } | 1893 | } |
1887 | 1894 | ||
1888 | static int dso__load_kernel_sym(struct dso *self, struct map *map, | 1895 | static int dso__load_kernel_sym(struct dso *dso, struct map *map, |
1889 | symbol_filter_t filter) | 1896 | symbol_filter_t filter) |
1890 | { | 1897 | { |
1891 | int err; | 1898 | int err; |
@@ -1912,10 +1919,10 @@ static int dso__load_kernel_sym(struct dso *self, struct map *map, | |||
1912 | } | 1919 | } |
1913 | 1920 | ||
1914 | if (symbol_conf.vmlinux_name != NULL) { | 1921 | if (symbol_conf.vmlinux_name != NULL) { |
1915 | err = dso__load_vmlinux(self, map, | 1922 | err = dso__load_vmlinux(dso, map, |
1916 | symbol_conf.vmlinux_name, filter); | 1923 | symbol_conf.vmlinux_name, filter); |
1917 | if (err > 0) { | 1924 | if (err > 0) { |
1918 | dso__set_long_name(self, | 1925 | dso__set_long_name(dso, |
1919 | strdup(symbol_conf.vmlinux_name)); | 1926 | strdup(symbol_conf.vmlinux_name)); |
1920 | goto out_fixup; | 1927 | goto out_fixup; |
1921 | } | 1928 | } |
@@ -1923,7 +1930,7 @@ static int dso__load_kernel_sym(struct dso *self, struct map *map, | |||
1923 | } | 1930 | } |
1924 | 1931 | ||
1925 | if (vmlinux_path != NULL) { | 1932 | if (vmlinux_path != NULL) { |
1926 | err = dso__load_vmlinux_path(self, map, filter); | 1933 | err = dso__load_vmlinux_path(dso, map, filter); |
1927 | if (err > 0) | 1934 | if (err > 0) |
1928 | goto out_fixup; | 1935 | goto out_fixup; |
1929 | } | 1936 | } |
@@ -1937,13 +1944,13 @@ static int dso__load_kernel_sym(struct dso *self, struct map *map, | |||
1937 | * we have a build-id, so check if it is the same as the running kernel, | 1944 | * we have a build-id, so check if it is the same as the running kernel, |
1938 | * using it if it is. | 1945 | * using it if it is. |
1939 | */ | 1946 | */ |
1940 | if (self->has_build_id) { | 1947 | if (dso->has_build_id) { |
1941 | u8 kallsyms_build_id[BUILD_ID_SIZE]; | 1948 | u8 kallsyms_build_id[BUILD_ID_SIZE]; |
1942 | char sbuild_id[BUILD_ID_SIZE * 2 + 1]; | 1949 | char sbuild_id[BUILD_ID_SIZE * 2 + 1]; |
1943 | 1950 | ||
1944 | if (sysfs__read_build_id("/sys/kernel/notes", kallsyms_build_id, | 1951 | if (sysfs__read_build_id("/sys/kernel/notes", kallsyms_build_id, |
1945 | sizeof(kallsyms_build_id)) == 0) { | 1952 | sizeof(kallsyms_build_id)) == 0) { |
1946 | if (dso__build_id_equal(self, kallsyms_build_id)) { | 1953 | if (dso__build_id_equal(dso, kallsyms_build_id)) { |
1947 | kallsyms_filename = "/proc/kallsyms"; | 1954 | kallsyms_filename = "/proc/kallsyms"; |
1948 | goto do_kallsyms; | 1955 | goto do_kallsyms; |
1949 | } | 1956 | } |
@@ -1952,7 +1959,7 @@ static int dso__load_kernel_sym(struct dso *self, struct map *map, | |||
1952 | * Now look if we have it on the build-id cache in | 1959 | * Now look if we have it on the build-id cache in |
1953 | * $HOME/.debug/[kernel.kallsyms]. | 1960 | * $HOME/.debug/[kernel.kallsyms]. |
1954 | */ | 1961 | */ |
1955 | build_id__sprintf(self->build_id, sizeof(self->build_id), | 1962 | build_id__sprintf(dso->build_id, sizeof(dso->build_id), |
1956 | sbuild_id); | 1963 | sbuild_id); |
1957 | 1964 | ||
1958 | if (asprintf(&kallsyms_allocated_filename, | 1965 | if (asprintf(&kallsyms_allocated_filename, |
@@ -1979,7 +1986,7 @@ static int dso__load_kernel_sym(struct dso *self, struct map *map, | |||
1979 | } | 1986 | } |
1980 | 1987 | ||
1981 | do_kallsyms: | 1988 | do_kallsyms: |
1982 | err = dso__load_kallsyms(self, kallsyms_filename, map, filter); | 1989 | err = dso__load_kallsyms(dso, kallsyms_filename, map, filter); |
1983 | if (err > 0) | 1990 | if (err > 0) |
1984 | pr_debug("Using %s for symbols\n", kallsyms_filename); | 1991 | pr_debug("Using %s for symbols\n", kallsyms_filename); |
1985 | free(kallsyms_allocated_filename); | 1992 | free(kallsyms_allocated_filename); |
@@ -1987,7 +1994,7 @@ do_kallsyms: | |||
1987 | if (err > 0) { | 1994 | if (err > 0) { |
1988 | out_fixup: | 1995 | out_fixup: |
1989 | if (kallsyms_filename != NULL) | 1996 | if (kallsyms_filename != NULL) |
1990 | dso__set_long_name(self, strdup("[kernel.kallsyms]")); | 1997 | dso__set_long_name(dso, strdup("[kernel.kallsyms]")); |
1991 | map__fixup_start(map); | 1998 | map__fixup_start(map); |
1992 | map__fixup_end(map); | 1999 | map__fixup_end(map); |
1993 | } | 2000 | } |
@@ -1995,8 +2002,8 @@ out_fixup: | |||
1995 | return err; | 2002 | return err; |
1996 | } | 2003 | } |
1997 | 2004 | ||
1998 | static int dso__load_guest_kernel_sym(struct dso *self, struct map *map, | 2005 | static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map, |
1999 | symbol_filter_t filter) | 2006 | symbol_filter_t filter) |
2000 | { | 2007 | { |
2001 | int err; | 2008 | int err; |
2002 | const char *kallsyms_filename = NULL; | 2009 | const char *kallsyms_filename = NULL; |
@@ -2016,7 +2023,7 @@ static int dso__load_guest_kernel_sym(struct dso *self, struct map *map, | |||
2016 | * Or use file guest_kallsyms inputted by user on commandline | 2023 | * Or use file guest_kallsyms inputted by user on commandline |
2017 | */ | 2024 | */ |
2018 | if (symbol_conf.default_guest_vmlinux_name != NULL) { | 2025 | if (symbol_conf.default_guest_vmlinux_name != NULL) { |
2019 | err = dso__load_vmlinux(self, map, | 2026 | err = dso__load_vmlinux(dso, map, |
2020 | symbol_conf.default_guest_vmlinux_name, filter); | 2027 | symbol_conf.default_guest_vmlinux_name, filter); |
2021 | goto out_try_fixup; | 2028 | goto out_try_fixup; |
2022 | } | 2029 | } |
@@ -2029,7 +2036,7 @@ static int dso__load_guest_kernel_sym(struct dso *self, struct map *map, | |||
2029 | kallsyms_filename = path; | 2036 | kallsyms_filename = path; |
2030 | } | 2037 | } |
2031 | 2038 | ||
2032 | err = dso__load_kallsyms(self, kallsyms_filename, map, filter); | 2039 | err = dso__load_kallsyms(dso, kallsyms_filename, map, filter); |
2033 | if (err > 0) | 2040 | if (err > 0) |
2034 | pr_debug("Using %s for symbols\n", kallsyms_filename); | 2041 | pr_debug("Using %s for symbols\n", kallsyms_filename); |
2035 | 2042 | ||
@@ -2037,7 +2044,7 @@ out_try_fixup: | |||
2037 | if (err > 0) { | 2044 | if (err > 0) { |
2038 | if (kallsyms_filename != NULL) { | 2045 | if (kallsyms_filename != NULL) { |
2039 | machine__mmap_name(machine, path, sizeof(path)); | 2046 | machine__mmap_name(machine, path, sizeof(path)); |
2040 | dso__set_long_name(self, strdup(path)); | 2047 | dso__set_long_name(dso, strdup(path)); |
2041 | } | 2048 | } |
2042 | map__fixup_start(map); | 2049 | map__fixup_start(map); |
2043 | map__fixup_end(map); | 2050 | map__fixup_end(map); |
@@ -2090,12 +2097,12 @@ size_t __dsos__fprintf(struct list_head *head, FILE *fp) | |||
2090 | return ret; | 2097 | return ret; |
2091 | } | 2098 | } |
2092 | 2099 | ||
2093 | size_t machines__fprintf_dsos(struct rb_root *self, FILE *fp) | 2100 | size_t machines__fprintf_dsos(struct rb_root *machines, FILE *fp) |
2094 | { | 2101 | { |
2095 | struct rb_node *nd; | 2102 | struct rb_node *nd; |
2096 | size_t ret = 0; | 2103 | size_t ret = 0; |
2097 | 2104 | ||
2098 | for (nd = rb_first(self); nd; nd = rb_next(nd)) { | 2105 | for (nd = rb_first(machines); nd; nd = rb_next(nd)) { |
2099 | struct machine *pos = rb_entry(nd, struct machine, rb_node); | 2106 | struct machine *pos = rb_entry(nd, struct machine, rb_node); |
2100 | ret += __dsos__fprintf(&pos->kernel_dsos, fp); | 2107 | ret += __dsos__fprintf(&pos->kernel_dsos, fp); |
2101 | ret += __dsos__fprintf(&pos->user_dsos, fp); | 2108 | ret += __dsos__fprintf(&pos->user_dsos, fp); |
@@ -2119,18 +2126,20 @@ static size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp, | |||
2119 | return ret; | 2126 | return ret; |
2120 | } | 2127 | } |
2121 | 2128 | ||
2122 | size_t machine__fprintf_dsos_buildid(struct machine *self, FILE *fp, bool with_hits) | 2129 | size_t machine__fprintf_dsos_buildid(struct machine *machine, FILE *fp, |
2130 | bool with_hits) | ||
2123 | { | 2131 | { |
2124 | return __dsos__fprintf_buildid(&self->kernel_dsos, fp, with_hits) + | 2132 | return __dsos__fprintf_buildid(&machine->kernel_dsos, fp, with_hits) + |
2125 | __dsos__fprintf_buildid(&self->user_dsos, fp, with_hits); | 2133 | __dsos__fprintf_buildid(&machine->user_dsos, fp, with_hits); |
2126 | } | 2134 | } |
2127 | 2135 | ||
2128 | size_t machines__fprintf_dsos_buildid(struct rb_root *self, FILE *fp, bool with_hits) | 2136 | size_t machines__fprintf_dsos_buildid(struct rb_root *machines, |
2137 | FILE *fp, bool with_hits) | ||
2129 | { | 2138 | { |
2130 | struct rb_node *nd; | 2139 | struct rb_node *nd; |
2131 | size_t ret = 0; | 2140 | size_t ret = 0; |
2132 | 2141 | ||
2133 | for (nd = rb_first(self); nd; nd = rb_next(nd)) { | 2142 | for (nd = rb_first(machines); nd; nd = rb_next(nd)) { |
2134 | struct machine *pos = rb_entry(nd, struct machine, rb_node); | 2143 | struct machine *pos = rb_entry(nd, struct machine, rb_node); |
2135 | ret += machine__fprintf_dsos_buildid(pos, fp, with_hits); | 2144 | ret += machine__fprintf_dsos_buildid(pos, fp, with_hits); |
2136 | } | 2145 | } |
@@ -2139,59 +2148,59 @@ size_t machines__fprintf_dsos_buildid(struct rb_root *self, FILE *fp, bool with_ | |||
2139 | 2148 | ||
2140 | struct dso *dso__new_kernel(const char *name) | 2149 | struct dso *dso__new_kernel(const char *name) |
2141 | { | 2150 | { |
2142 | struct dso *self = dso__new(name ?: "[kernel.kallsyms]"); | 2151 | struct dso *dso = dso__new(name ?: "[kernel.kallsyms]"); |
2143 | 2152 | ||
2144 | if (self != NULL) { | 2153 | if (dso != NULL) { |
2145 | dso__set_short_name(self, "[kernel]"); | 2154 | dso__set_short_name(dso, "[kernel]"); |
2146 | self->kernel = DSO_TYPE_KERNEL; | 2155 | dso->kernel = DSO_TYPE_KERNEL; |
2147 | } | 2156 | } |
2148 | 2157 | ||
2149 | return self; | 2158 | return dso; |
2150 | } | 2159 | } |
2151 | 2160 | ||
2152 | static struct dso *dso__new_guest_kernel(struct machine *machine, | 2161 | static struct dso *dso__new_guest_kernel(struct machine *machine, |
2153 | const char *name) | 2162 | const char *name) |
2154 | { | 2163 | { |
2155 | char bf[PATH_MAX]; | 2164 | char bf[PATH_MAX]; |
2156 | struct dso *self = dso__new(name ?: machine__mmap_name(machine, bf, sizeof(bf))); | 2165 | struct dso *dso = dso__new(name ?: machine__mmap_name(machine, bf, |
2157 | 2166 | sizeof(bf))); | |
2158 | if (self != NULL) { | 2167 | if (dso != NULL) { |
2159 | dso__set_short_name(self, "[guest.kernel]"); | 2168 | dso__set_short_name(dso, "[guest.kernel]"); |
2160 | self->kernel = DSO_TYPE_GUEST_KERNEL; | 2169 | dso->kernel = DSO_TYPE_GUEST_KERNEL; |
2161 | } | 2170 | } |
2162 | 2171 | ||
2163 | return self; | 2172 | return dso; |
2164 | } | 2173 | } |
2165 | 2174 | ||
2166 | void dso__read_running_kernel_build_id(struct dso *self, struct machine *machine) | 2175 | void dso__read_running_kernel_build_id(struct dso *dso, struct machine *machine) |
2167 | { | 2176 | { |
2168 | char path[PATH_MAX]; | 2177 | char path[PATH_MAX]; |
2169 | 2178 | ||
2170 | if (machine__is_default_guest(machine)) | 2179 | if (machine__is_default_guest(machine)) |
2171 | return; | 2180 | return; |
2172 | sprintf(path, "%s/sys/kernel/notes", machine->root_dir); | 2181 | sprintf(path, "%s/sys/kernel/notes", machine->root_dir); |
2173 | if (sysfs__read_build_id(path, self->build_id, | 2182 | if (sysfs__read_build_id(path, dso->build_id, |
2174 | sizeof(self->build_id)) == 0) | 2183 | sizeof(dso->build_id)) == 0) |
2175 | self->has_build_id = true; | 2184 | dso->has_build_id = true; |
2176 | } | 2185 | } |
2177 | 2186 | ||
2178 | static struct dso *machine__create_kernel(struct machine *self) | 2187 | static struct dso *machine__create_kernel(struct machine *machine) |
2179 | { | 2188 | { |
2180 | const char *vmlinux_name = NULL; | 2189 | const char *vmlinux_name = NULL; |
2181 | struct dso *kernel; | 2190 | struct dso *kernel; |
2182 | 2191 | ||
2183 | if (machine__is_host(self)) { | 2192 | if (machine__is_host(machine)) { |
2184 | vmlinux_name = symbol_conf.vmlinux_name; | 2193 | vmlinux_name = symbol_conf.vmlinux_name; |
2185 | kernel = dso__new_kernel(vmlinux_name); | 2194 | kernel = dso__new_kernel(vmlinux_name); |
2186 | } else { | 2195 | } else { |
2187 | if (machine__is_default_guest(self)) | 2196 | if (machine__is_default_guest(machine)) |
2188 | vmlinux_name = symbol_conf.default_guest_vmlinux_name; | 2197 | vmlinux_name = symbol_conf.default_guest_vmlinux_name; |
2189 | kernel = dso__new_guest_kernel(self, vmlinux_name); | 2198 | kernel = dso__new_guest_kernel(machine, vmlinux_name); |
2190 | } | 2199 | } |
2191 | 2200 | ||
2192 | if (kernel != NULL) { | 2201 | if (kernel != NULL) { |
2193 | dso__read_running_kernel_build_id(kernel, self); | 2202 | dso__read_running_kernel_build_id(kernel, machine); |
2194 | dsos__add(&self->kernel_dsos, kernel); | 2203 | dsos__add(&machine->kernel_dsos, kernel); |
2195 | } | 2204 | } |
2196 | return kernel; | 2205 | return kernel; |
2197 | } | 2206 | } |
@@ -2236,41 +2245,43 @@ static u64 machine__get_kernel_start_addr(struct machine *machine) | |||
2236 | return args.start; | 2245 | return args.start; |
2237 | } | 2246 | } |
2238 | 2247 | ||
2239 | int __machine__create_kernel_maps(struct machine *self, struct dso *kernel) | 2248 | int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel) |
2240 | { | 2249 | { |
2241 | enum map_type type; | 2250 | enum map_type type; |
2242 | u64 start = machine__get_kernel_start_addr(self); | 2251 | u64 start = machine__get_kernel_start_addr(machine); |
2243 | 2252 | ||
2244 | for (type = 0; type < MAP__NR_TYPES; ++type) { | 2253 | for (type = 0; type < MAP__NR_TYPES; ++type) { |
2245 | struct kmap *kmap; | 2254 | struct kmap *kmap; |
2246 | 2255 | ||
2247 | self->vmlinux_maps[type] = map__new2(start, kernel, type); | 2256 | machine->vmlinux_maps[type] = map__new2(start, kernel, type); |
2248 | if (self->vmlinux_maps[type] == NULL) | 2257 | if (machine->vmlinux_maps[type] == NULL) |
2249 | return -1; | 2258 | return -1; |
2250 | 2259 | ||
2251 | self->vmlinux_maps[type]->map_ip = | 2260 | machine->vmlinux_maps[type]->map_ip = |
2252 | self->vmlinux_maps[type]->unmap_ip = identity__map_ip; | 2261 | machine->vmlinux_maps[type]->unmap_ip = |
2253 | 2262 | identity__map_ip; | |
2254 | kmap = map__kmap(self->vmlinux_maps[type]); | 2263 | kmap = map__kmap(machine->vmlinux_maps[type]); |
2255 | kmap->kmaps = &self->kmaps; | 2264 | kmap->kmaps = &machine->kmaps; |
2256 | map_groups__insert(&self->kmaps, self->vmlinux_maps[type]); | 2265 | map_groups__insert(&machine->kmaps, |
2266 | machine->vmlinux_maps[type]); | ||
2257 | } | 2267 | } |
2258 | 2268 | ||
2259 | return 0; | 2269 | return 0; |
2260 | } | 2270 | } |
2261 | 2271 | ||
2262 | void machine__destroy_kernel_maps(struct machine *self) | 2272 | void machine__destroy_kernel_maps(struct machine *machine) |
2263 | { | 2273 | { |
2264 | enum map_type type; | 2274 | enum map_type type; |
2265 | 2275 | ||
2266 | for (type = 0; type < MAP__NR_TYPES; ++type) { | 2276 | for (type = 0; type < MAP__NR_TYPES; ++type) { |
2267 | struct kmap *kmap; | 2277 | struct kmap *kmap; |
2268 | 2278 | ||
2269 | if (self->vmlinux_maps[type] == NULL) | 2279 | if (machine->vmlinux_maps[type] == NULL) |
2270 | continue; | 2280 | continue; |
2271 | 2281 | ||
2272 | kmap = map__kmap(self->vmlinux_maps[type]); | 2282 | kmap = map__kmap(machine->vmlinux_maps[type]); |
2273 | map_groups__remove(&self->kmaps, self->vmlinux_maps[type]); | 2283 | map_groups__remove(&machine->kmaps, |
2284 | machine->vmlinux_maps[type]); | ||
2274 | if (kmap->ref_reloc_sym) { | 2285 | if (kmap->ref_reloc_sym) { |
2275 | /* | 2286 | /* |
2276 | * ref_reloc_sym is shared among all maps, so free just | 2287 | * ref_reloc_sym is shared among all maps, so free just |
@@ -2284,25 +2295,25 @@ void machine__destroy_kernel_maps(struct machine *self) | |||
2284 | kmap->ref_reloc_sym = NULL; | 2295 | kmap->ref_reloc_sym = NULL; |
2285 | } | 2296 | } |
2286 | 2297 | ||
2287 | map__delete(self->vmlinux_maps[type]); | 2298 | map__delete(machine->vmlinux_maps[type]); |
2288 | self->vmlinux_maps[type] = NULL; | 2299 | machine->vmlinux_maps[type] = NULL; |
2289 | } | 2300 | } |
2290 | } | 2301 | } |
2291 | 2302 | ||
2292 | int machine__create_kernel_maps(struct machine *self) | 2303 | int machine__create_kernel_maps(struct machine *machine) |
2293 | { | 2304 | { |
2294 | struct dso *kernel = machine__create_kernel(self); | 2305 | struct dso *kernel = machine__create_kernel(machine); |
2295 | 2306 | ||
2296 | if (kernel == NULL || | 2307 | if (kernel == NULL || |
2297 | __machine__create_kernel_maps(self, kernel) < 0) | 2308 | __machine__create_kernel_maps(machine, kernel) < 0) |
2298 | return -1; | 2309 | return -1; |
2299 | 2310 | ||
2300 | if (symbol_conf.use_modules && machine__create_modules(self) < 0) | 2311 | if (symbol_conf.use_modules && machine__create_modules(machine) < 0) |
2301 | pr_debug("Problems creating module maps, continuing anyway...\n"); | 2312 | pr_debug("Problems creating module maps, continuing anyway...\n"); |
2302 | /* | 2313 | /* |
2303 | * Now that we have all the maps created, just set the ->end of them: | 2314 | * Now that we have all the maps created, just set the ->end of them: |
2304 | */ | 2315 | */ |
2305 | map_groups__fixup_end(&self->kmaps); | 2316 | map_groups__fixup_end(&machine->kmaps); |
2306 | return 0; | 2317 | return 0; |
2307 | } | 2318 | } |
2308 | 2319 | ||
@@ -2366,11 +2377,11 @@ out_fail: | |||
2366 | return -1; | 2377 | return -1; |
2367 | } | 2378 | } |
2368 | 2379 | ||
2369 | size_t machine__fprintf_vmlinux_path(struct machine *self, FILE *fp) | 2380 | size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp) |
2370 | { | 2381 | { |
2371 | int i; | 2382 | int i; |
2372 | size_t printed = 0; | 2383 | size_t printed = 0; |
2373 | struct dso *kdso = self->vmlinux_maps[MAP__FUNCTION]->dso; | 2384 | struct dso *kdso = machine->vmlinux_maps[MAP__FUNCTION]->dso; |
2374 | 2385 | ||
2375 | if (kdso->has_build_id) { | 2386 | if (kdso->has_build_id) { |
2376 | char filename[PATH_MAX]; | 2387 | char filename[PATH_MAX]; |
@@ -2467,9 +2478,9 @@ void symbol__exit(void) | |||
2467 | symbol_conf.initialized = false; | 2478 | symbol_conf.initialized = false; |
2468 | } | 2479 | } |
2469 | 2480 | ||
2470 | int machines__create_kernel_maps(struct rb_root *self, pid_t pid) | 2481 | int machines__create_kernel_maps(struct rb_root *machines, pid_t pid) |
2471 | { | 2482 | { |
2472 | struct machine *machine = machines__findnew(self, pid); | 2483 | struct machine *machine = machines__findnew(machines, pid); |
2473 | 2484 | ||
2474 | if (machine == NULL) | 2485 | if (machine == NULL) |
2475 | return -1; | 2486 | return -1; |
@@ -2520,7 +2531,7 @@ char *strxfrchar(char *s, char from, char to) | |||
2520 | return s; | 2531 | return s; |
2521 | } | 2532 | } |
2522 | 2533 | ||
2523 | int machines__create_guest_kernel_maps(struct rb_root *self) | 2534 | int machines__create_guest_kernel_maps(struct rb_root *machines) |
2524 | { | 2535 | { |
2525 | int ret = 0; | 2536 | int ret = 0; |
2526 | struct dirent **namelist = NULL; | 2537 | struct dirent **namelist = NULL; |
@@ -2531,7 +2542,7 @@ int machines__create_guest_kernel_maps(struct rb_root *self) | |||
2531 | if (symbol_conf.default_guest_vmlinux_name || | 2542 | if (symbol_conf.default_guest_vmlinux_name || |
2532 | symbol_conf.default_guest_modules || | 2543 | symbol_conf.default_guest_modules || |
2533 | symbol_conf.default_guest_kallsyms) { | 2544 | symbol_conf.default_guest_kallsyms) { |
2534 | machines__create_kernel_maps(self, DEFAULT_GUEST_KERNEL_ID); | 2545 | machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID); |
2535 | } | 2546 | } |
2536 | 2547 | ||
2537 | if (symbol_conf.guestmount) { | 2548 | if (symbol_conf.guestmount) { |
@@ -2552,7 +2563,7 @@ int machines__create_guest_kernel_maps(struct rb_root *self) | |||
2552 | pr_debug("Can't access file %s\n", path); | 2563 | pr_debug("Can't access file %s\n", path); |
2553 | goto failure; | 2564 | goto failure; |
2554 | } | 2565 | } |
2555 | machines__create_kernel_maps(self, pid); | 2566 | machines__create_kernel_maps(machines, pid); |
2556 | } | 2567 | } |
2557 | failure: | 2568 | failure: |
2558 | free(namelist); | 2569 | free(namelist); |
@@ -2561,23 +2572,23 @@ failure: | |||
2561 | return ret; | 2572 | return ret; |
2562 | } | 2573 | } |
2563 | 2574 | ||
2564 | void machines__destroy_guest_kernel_maps(struct rb_root *self) | 2575 | void machines__destroy_guest_kernel_maps(struct rb_root *machines) |
2565 | { | 2576 | { |
2566 | struct rb_node *next = rb_first(self); | 2577 | struct rb_node *next = rb_first(machines); |
2567 | 2578 | ||
2568 | while (next) { | 2579 | while (next) { |
2569 | struct machine *pos = rb_entry(next, struct machine, rb_node); | 2580 | struct machine *pos = rb_entry(next, struct machine, rb_node); |
2570 | 2581 | ||
2571 | next = rb_next(&pos->rb_node); | 2582 | next = rb_next(&pos->rb_node); |
2572 | rb_erase(&pos->rb_node, self); | 2583 | rb_erase(&pos->rb_node, machines); |
2573 | machine__delete(pos); | 2584 | machine__delete(pos); |
2574 | } | 2585 | } |
2575 | } | 2586 | } |
2576 | 2587 | ||
2577 | int machine__load_kallsyms(struct machine *self, const char *filename, | 2588 | int machine__load_kallsyms(struct machine *machine, const char *filename, |
2578 | enum map_type type, symbol_filter_t filter) | 2589 | enum map_type type, symbol_filter_t filter) |
2579 | { | 2590 | { |
2580 | struct map *map = self->vmlinux_maps[type]; | 2591 | struct map *map = machine->vmlinux_maps[type]; |
2581 | int ret = dso__load_kallsyms(map->dso, filename, map, filter); | 2592 | int ret = dso__load_kallsyms(map->dso, filename, map, filter); |
2582 | 2593 | ||
2583 | if (ret > 0) { | 2594 | if (ret > 0) { |
@@ -2587,16 +2598,16 @@ int machine__load_kallsyms(struct machine *self, const char *filename, | |||
2587 | * kernel, with modules between them, fixup the end of all | 2598 | * kernel, with modules between them, fixup the end of all |
2588 | * sections. | 2599 | * sections. |
2589 | */ | 2600 | */ |
2590 | __map_groups__fixup_end(&self->kmaps, type); | 2601 | __map_groups__fixup_end(&machine->kmaps, type); |
2591 | } | 2602 | } |
2592 | 2603 | ||
2593 | return ret; | 2604 | return ret; |
2594 | } | 2605 | } |
2595 | 2606 | ||
2596 | int machine__load_vmlinux_path(struct machine *self, enum map_type type, | 2607 | int machine__load_vmlinux_path(struct machine *machine, enum map_type type, |
2597 | symbol_filter_t filter) | 2608 | symbol_filter_t filter) |
2598 | { | 2609 | { |
2599 | struct map *map = self->vmlinux_maps[type]; | 2610 | struct map *map = machine->vmlinux_maps[type]; |
2600 | int ret = dso__load_vmlinux_path(map->dso, map, filter); | 2611 | int ret = dso__load_vmlinux_path(map->dso, map, filter); |
2601 | 2612 | ||
2602 | if (ret > 0) { | 2613 | if (ret > 0) { |
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index 713b0b40cc4a..242de0101a86 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h | |||
@@ -62,7 +62,7 @@ struct symbol { | |||
62 | char name[0]; | 62 | char name[0]; |
63 | }; | 63 | }; |
64 | 64 | ||
65 | void symbol__delete(struct symbol *self); | 65 | void symbol__delete(struct symbol *sym); |
66 | 66 | ||
67 | struct strlist; | 67 | struct strlist; |
68 | 68 | ||
@@ -96,9 +96,9 @@ struct symbol_conf { | |||
96 | 96 | ||
97 | extern struct symbol_conf symbol_conf; | 97 | extern struct symbol_conf symbol_conf; |
98 | 98 | ||
99 | static inline void *symbol__priv(struct symbol *self) | 99 | static inline void *symbol__priv(struct symbol *sym) |
100 | { | 100 | { |
101 | return ((void *)self) - symbol_conf.priv_size; | 101 | return ((void *)sym) - symbol_conf.priv_size; |
102 | } | 102 | } |
103 | 103 | ||
104 | struct ref_reloc_sym { | 104 | struct ref_reloc_sym { |
@@ -155,43 +155,45 @@ struct dso { | |||
155 | 155 | ||
156 | struct dso *dso__new(const char *name); | 156 | struct dso *dso__new(const char *name); |
157 | struct dso *dso__new_kernel(const char *name); | 157 | struct dso *dso__new_kernel(const char *name); |
158 | void dso__delete(struct dso *self); | 158 | void dso__delete(struct dso *dso); |
159 | 159 | ||
160 | int dso__name_len(const struct dso *self); | 160 | int dso__name_len(const struct dso *dso); |
161 | 161 | ||
162 | bool dso__loaded(const struct dso *self, enum map_type type); | 162 | bool dso__loaded(const struct dso *dso, enum map_type type); |
163 | bool dso__sorted_by_name(const struct dso *self, enum map_type type); | 163 | bool dso__sorted_by_name(const struct dso *dso, enum map_type type); |
164 | 164 | ||
165 | static inline void dso__set_loaded(struct dso *self, enum map_type type) | 165 | static inline void dso__set_loaded(struct dso *dso, enum map_type type) |
166 | { | 166 | { |
167 | self->loaded |= (1 << type); | 167 | dso->loaded |= (1 << type); |
168 | } | 168 | } |
169 | 169 | ||
170 | void dso__sort_by_name(struct dso *self, enum map_type type); | 170 | void dso__sort_by_name(struct dso *dso, enum map_type type); |
171 | 171 | ||
172 | struct dso *__dsos__findnew(struct list_head *head, const char *name); | 172 | struct dso *__dsos__findnew(struct list_head *head, const char *name); |
173 | 173 | ||
174 | int dso__load(struct dso *self, struct map *map, symbol_filter_t filter); | 174 | int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter); |
175 | int dso__load_vmlinux(struct dso *self, struct map *map, | 175 | int dso__load_vmlinux(struct dso *dso, struct map *map, |
176 | const char *vmlinux, symbol_filter_t filter); | 176 | const char *vmlinux, symbol_filter_t filter); |
177 | int dso__load_vmlinux_path(struct dso *self, struct map *map, | 177 | int dso__load_vmlinux_path(struct dso *dso, struct map *map, |
178 | symbol_filter_t filter); | 178 | symbol_filter_t filter); |
179 | int dso__load_kallsyms(struct dso *self, const char *filename, struct map *map, | 179 | int dso__load_kallsyms(struct dso *dso, const char *filename, struct map *map, |
180 | symbol_filter_t filter); | 180 | symbol_filter_t filter); |
181 | int machine__load_kallsyms(struct machine *self, const char *filename, | 181 | int machine__load_kallsyms(struct machine *machine, const char *filename, |
182 | enum map_type type, symbol_filter_t filter); | 182 | enum map_type type, symbol_filter_t filter); |
183 | int machine__load_vmlinux_path(struct machine *self, enum map_type type, | 183 | int machine__load_vmlinux_path(struct machine *machine, enum map_type type, |
184 | symbol_filter_t filter); | 184 | symbol_filter_t filter); |
185 | 185 | ||
186 | size_t __dsos__fprintf(struct list_head *head, FILE *fp); | 186 | size_t __dsos__fprintf(struct list_head *head, FILE *fp); |
187 | 187 | ||
188 | size_t machine__fprintf_dsos_buildid(struct machine *self, FILE *fp, bool with_hits); | 188 | size_t machine__fprintf_dsos_buildid(struct machine *machine, |
189 | size_t machines__fprintf_dsos(struct rb_root *self, FILE *fp); | 189 | FILE *fp, bool with_hits); |
190 | size_t machines__fprintf_dsos_buildid(struct rb_root *self, FILE *fp, bool with_hits); | 190 | size_t machines__fprintf_dsos(struct rb_root *machines, FILE *fp); |
191 | 191 | size_t machines__fprintf_dsos_buildid(struct rb_root *machines, | |
192 | size_t dso__fprintf_buildid(struct dso *self, FILE *fp); | 192 | FILE *fp, bool with_hits); |
193 | size_t dso__fprintf_symbols_by_name(struct dso *self, enum map_type type, FILE *fp); | 193 | size_t dso__fprintf_buildid(struct dso *dso, FILE *fp); |
194 | size_t dso__fprintf(struct dso *self, enum map_type type, FILE *fp); | 194 | size_t dso__fprintf_symbols_by_name(struct dso *dso, |
195 | enum map_type type, FILE *fp); | ||
196 | size_t dso__fprintf(struct dso *dso, enum map_type type, FILE *fp); | ||
195 | 197 | ||
196 | enum symtab_type { | 198 | enum symtab_type { |
197 | SYMTAB__KALLSYMS = 0, | 199 | SYMTAB__KALLSYMS = 0, |
@@ -207,34 +209,36 @@ enum symtab_type { | |||
207 | SYMTAB__NOT_FOUND, | 209 | SYMTAB__NOT_FOUND, |
208 | }; | 210 | }; |
209 | 211 | ||
210 | char dso__symtab_origin(const struct dso *self); | 212 | char dso__symtab_origin(const struct dso *dso); |
211 | void dso__set_long_name(struct dso *self, char *name); | 213 | void dso__set_long_name(struct dso *dso, char *name); |
212 | void dso__set_build_id(struct dso *self, void *build_id); | 214 | void dso__set_build_id(struct dso *dso, void *build_id); |
213 | void dso__read_running_kernel_build_id(struct dso *self, struct machine *machine); | 215 | void dso__read_running_kernel_build_id(struct dso *dso, |
214 | struct symbol *dso__find_symbol(struct dso *self, enum map_type type, u64 addr); | 216 | struct machine *machine); |
215 | struct symbol *dso__find_symbol_by_name(struct dso *self, enum map_type type, | 217 | struct symbol *dso__find_symbol(struct dso *dso, enum map_type type, |
218 | u64 addr); | ||
219 | struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type, | ||
216 | const char *name); | 220 | const char *name); |
217 | 221 | ||
218 | int filename__read_build_id(const char *filename, void *bf, size_t size); | 222 | int filename__read_build_id(const char *filename, void *bf, size_t size); |
219 | int sysfs__read_build_id(const char *filename, void *bf, size_t size); | 223 | int sysfs__read_build_id(const char *filename, void *bf, size_t size); |
220 | bool __dsos__read_build_ids(struct list_head *head, bool with_hits); | 224 | bool __dsos__read_build_ids(struct list_head *head, bool with_hits); |
221 | int build_id__sprintf(const u8 *self, int len, char *bf); | 225 | int build_id__sprintf(const u8 *build_id, int len, char *bf); |
222 | int kallsyms__parse(const char *filename, void *arg, | 226 | int kallsyms__parse(const char *filename, void *arg, |
223 | int (*process_symbol)(void *arg, const char *name, | 227 | int (*process_symbol)(void *arg, const char *name, |
224 | char type, u64 start, u64 end)); | 228 | char type, u64 start, u64 end)); |
225 | 229 | ||
226 | void machine__destroy_kernel_maps(struct machine *self); | 230 | void machine__destroy_kernel_maps(struct machine *machine); |
227 | int __machine__create_kernel_maps(struct machine *self, struct dso *kernel); | 231 | int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel); |
228 | int machine__create_kernel_maps(struct machine *self); | 232 | int machine__create_kernel_maps(struct machine *machine); |
229 | 233 | ||
230 | int machines__create_kernel_maps(struct rb_root *self, pid_t pid); | 234 | int machines__create_kernel_maps(struct rb_root *machines, pid_t pid); |
231 | int machines__create_guest_kernel_maps(struct rb_root *self); | 235 | int machines__create_guest_kernel_maps(struct rb_root *machines); |
232 | void machines__destroy_guest_kernel_maps(struct rb_root *self); | 236 | void machines__destroy_guest_kernel_maps(struct rb_root *machines); |
233 | 237 | ||
234 | int symbol__init(void); | 238 | int symbol__init(void); |
235 | void symbol__exit(void); | 239 | void symbol__exit(void); |
236 | bool symbol_type__is_a(char symbol_type, enum map_type map_type); | 240 | bool symbol_type__is_a(char symbol_type, enum map_type map_type); |
237 | 241 | ||
238 | size_t machine__fprintf_vmlinux_path(struct machine *self, FILE *fp); | 242 | size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp); |
239 | 243 | ||
240 | #endif /* __PERF_SYMBOL */ | 244 | #endif /* __PERF_SYMBOL */ |