diff options
274 files changed, 3759 insertions, 2419 deletions
diff --git a/Documentation/power/devices.txt b/Documentation/power/devices.txt index 88880839ece..64565aac6e4 100644 --- a/Documentation/power/devices.txt +++ b/Documentation/power/devices.txt | |||
@@ -520,59 +520,20 @@ Support for power domains is provided through the pwr_domain field of struct | |||
520 | device. This field is a pointer to an object of type struct dev_power_domain, | 520 | device. This field is a pointer to an object of type struct dev_power_domain, |
521 | defined in include/linux/pm.h, providing a set of power management callbacks | 521 | defined in include/linux/pm.h, providing a set of power management callbacks |
522 | analogous to the subsystem-level and device driver callbacks that are executed | 522 | analogous to the subsystem-level and device driver callbacks that are executed |
523 | for the given device during all power transitions, in addition to the respective | 523 | for the given device during all power transitions, instead of the respective |
524 | subsystem-level callbacks. Specifically, the power domain "suspend" callbacks | 524 | subsystem-level callbacks. Specifically, if a device's pm_domain pointer is |
525 | (i.e. ->runtime_suspend(), ->suspend(), ->freeze(), ->poweroff(), etc.) are | 525 | not NULL, the ->suspend() callback from the object pointed to by it will be |
526 | executed after the analogous subsystem-level callbacks, while the power domain | 526 | executed instead of its subsystem's (e.g. bus type's) ->suspend() callback and |
527 | "resume" callbacks (i.e. ->runtime_resume(), ->resume(), ->thaw(), ->restore, | 527 | anlogously for all of the remaining callbacks. In other words, power management |
528 | etc.) are executed before the analogous subsystem-level callbacks. Error codes | 528 | domain callbacks, if defined for the given device, always take precedence over |
529 | returned by the "suspend" and "resume" power domain callbacks are ignored. | 529 | the callbacks provided by the device's subsystem (e.g. bus type). |
530 | 530 | ||
531 | Power domain ->runtime_idle() callback is executed before the subsystem-level | 531 | The support for device power management domains is only relevant to platforms |
532 | ->runtime_idle() callback and the result returned by it is not ignored. Namely, | 532 | needing to use the same device driver power management callbacks in many |
533 | if it returns error code, the subsystem-level ->runtime_idle() callback will not | 533 | different power domain configurations and wanting to avoid incorporating the |
534 | be called and the helper function rpm_idle() executing it will return error | 534 | support for power domains into subsystem-level callbacks, for example by |
535 | code. This mechanism is intended to help platforms where saving device state | 535 | modifying the platform bus type. Other platforms need not implement it or take |
536 | is a time consuming operation and should only be carried out if all devices | 536 | it into account in any way. |
537 | in the power domain are idle, before turning off the shared power resource(s). | ||
538 | Namely, the power domain ->runtime_idle() callback may return error code until | ||
539 | the pm_runtime_idle() helper (or its asychronous version) has been called for | ||
540 | all devices in the power domain (it is recommended that the returned error code | ||
541 | be -EBUSY in those cases), preventing the subsystem-level ->runtime_idle() | ||
542 | callback from being run prematurely. | ||
543 | |||
544 | The support for device power domains is only relevant to platforms needing to | ||
545 | use the same subsystem-level (e.g. platform bus type) and device driver power | ||
546 | management callbacks in many different power domain configurations and wanting | ||
547 | to avoid incorporating the support for power domains into the subsystem-level | ||
548 | callbacks. The other platforms need not implement it or take it into account | ||
549 | in any way. | ||
550 | |||
551 | |||
552 | System Devices | ||
553 | -------------- | ||
554 | System devices (sysdevs) follow a slightly different API, which can be found in | ||
555 | |||
556 | include/linux/sysdev.h | ||
557 | drivers/base/sys.c | ||
558 | |||
559 | System devices will be suspended with interrupts disabled, and after all other | ||
560 | devices have been suspended. On resume, they will be resumed before any other | ||
561 | devices, and also with interrupts disabled. These things occur in special | ||
562 | "sysdev_driver" phases, which affect only system devices. | ||
563 | |||
564 | Thus, after the suspend_noirq (or freeze_noirq or poweroff_noirq) phase, when | ||
565 | the non-boot CPUs are all offline and IRQs are disabled on the remaining online | ||
566 | CPU, then a sysdev_driver.suspend phase is carried out, and the system enters a | ||
567 | sleep state (or a system image is created). During resume (or after the image | ||
568 | has been created or loaded) a sysdev_driver.resume phase is carried out, IRQs | ||
569 | are enabled on the only online CPU, the non-boot CPUs are enabled, and the | ||
570 | resume_noirq (or thaw_noirq or restore_noirq) phase begins. | ||
571 | |||
572 | Code to actually enter and exit the system-wide low power state sometimes | ||
573 | involves hardware details that are only known to the boot firmware, and | ||
574 | may leave a CPU running software (from SRAM or flash memory) that monitors | ||
575 | the system and manages its wakeup sequence. | ||
576 | 537 | ||
577 | 538 | ||
578 | Device Low Power (suspend) States | 539 | Device Low Power (suspend) States |
diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt index 654097b130b..22accb3eb40 100644 --- a/Documentation/power/runtime_pm.txt +++ b/Documentation/power/runtime_pm.txt | |||
@@ -566,11 +566,6 @@ to do this is: | |||
566 | pm_runtime_set_active(dev); | 566 | pm_runtime_set_active(dev); |
567 | pm_runtime_enable(dev); | 567 | pm_runtime_enable(dev); |
568 | 568 | ||
569 | The PM core always increments the run-time usage counter before calling the | ||
570 | ->prepare() callback and decrements it after calling the ->complete() callback. | ||
571 | Hence disabling run-time PM temporarily like this will not cause any run-time | ||
572 | suspend callbacks to be lost. | ||
573 | |||
574 | 7. Generic subsystem callbacks | 569 | 7. Generic subsystem callbacks |
575 | 570 | ||
576 | Subsystems may wish to conserve code space by using the set of generic power | 571 | Subsystems may wish to conserve code space by using the set of generic power |
@@ -1,7 +1,7 @@ | |||
1 | VERSION = 3 | 1 | VERSION = 3 |
2 | PATCHLEVEL = 0 | 2 | PATCHLEVEL = 0 |
3 | SUBLEVEL = 0 | 3 | SUBLEVEL = 0 |
4 | EXTRAVERSION = -rc4 | 4 | EXTRAVERSION = -rc5 |
5 | NAME = Sneaky Weasel | 5 | NAME = Sneaky Weasel |
6 | 6 | ||
7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
@@ -1290,6 +1290,7 @@ help: | |||
1290 | @echo ' make O=dir [targets] Locate all output files in "dir", including .config' | 1290 | @echo ' make O=dir [targets] Locate all output files in "dir", including .config' |
1291 | @echo ' make C=1 [targets] Check all c source with $$CHECK (sparse by default)' | 1291 | @echo ' make C=1 [targets] Check all c source with $$CHECK (sparse by default)' |
1292 | @echo ' make C=2 [targets] Force check of all c source with $$CHECK' | 1292 | @echo ' make C=2 [targets] Force check of all c source with $$CHECK' |
1293 | @echo ' make RECORDMCOUNT_WARN=1 [targets] Warn about ignored mcount sections' | ||
1293 | @echo ' make W=n [targets] Enable extra gcc checks, n=1,2,3 where' | 1294 | @echo ' make W=n [targets] Enable extra gcc checks, n=1,2,3 where' |
1294 | @echo ' 1: warnings which may be relevant and do not occur too often' | 1295 | @echo ' 1: warnings which may be relevant and do not occur too often' |
1295 | @echo ' 2: warnings which occur quite often but may still be relevant' | 1296 | @echo ' 2: warnings which occur quite often but may still be relevant' |
diff --git a/arch/alpha/include/asm/mmzone.h b/arch/alpha/include/asm/mmzone.h index 8af56ce346a..445dc42e033 100644 --- a/arch/alpha/include/asm/mmzone.h +++ b/arch/alpha/include/asm/mmzone.h | |||
@@ -56,7 +56,6 @@ PLAT_NODE_DATA_LOCALNR(unsigned long p, int n) | |||
56 | * Given a kernel address, find the home node of the underlying memory. | 56 | * Given a kernel address, find the home node of the underlying memory. |
57 | */ | 57 | */ |
58 | #define kvaddr_to_nid(kaddr) pa_to_nid(__pa(kaddr)) | 58 | #define kvaddr_to_nid(kaddr) pa_to_nid(__pa(kaddr)) |
59 | #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) | ||
60 | 59 | ||
61 | /* | 60 | /* |
62 | * Given a kaddr, LOCAL_BASE_ADDR finds the owning node of the memory | 61 | * Given a kaddr, LOCAL_BASE_ADDR finds the owning node of the memory |
diff --git a/arch/alpha/kernel/perf_event.c b/arch/alpha/kernel/perf_event.c index 90561c45e7d..8e47709160f 100644 --- a/arch/alpha/kernel/perf_event.c +++ b/arch/alpha/kernel/perf_event.c | |||
@@ -847,7 +847,7 @@ static void alpha_perf_event_irq_handler(unsigned long la_ptr, | |||
847 | data.period = event->hw.last_period; | 847 | data.period = event->hw.last_period; |
848 | 848 | ||
849 | if (alpha_perf_event_set_period(event, hwc, idx)) { | 849 | if (alpha_perf_event_set_period(event, hwc, idx)) { |
850 | if (perf_event_overflow(event, 1, &data, regs)) { | 850 | if (perf_event_overflow(event, &data, regs)) { |
851 | /* Interrupts coming too quickly; "throttle" the | 851 | /* Interrupts coming too quickly; "throttle" the |
852 | * counter, i.e., disable it for a little while. | 852 | * counter, i.e., disable it for a little while. |
853 | */ | 853 | */ |
diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c index 818e74ed45d..f20d1b5396b 100644 --- a/arch/alpha/kernel/time.c +++ b/arch/alpha/kernel/time.c | |||
@@ -91,7 +91,7 @@ DEFINE_PER_CPU(u8, irq_work_pending); | |||
91 | #define test_irq_work_pending() __get_cpu_var(irq_work_pending) | 91 | #define test_irq_work_pending() __get_cpu_var(irq_work_pending) |
92 | #define clear_irq_work_pending() __get_cpu_var(irq_work_pending) = 0 | 92 | #define clear_irq_work_pending() __get_cpu_var(irq_work_pending) = 0 |
93 | 93 | ||
94 | void set_irq_work_pending(void) | 94 | void arch_irq_work_raise(void) |
95 | { | 95 | { |
96 | set_irq_work_pending_flag(); | 96 | set_irq_work_pending_flag(); |
97 | } | 97 | } |
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S index 942fad97e44..940b2017810 100644 --- a/arch/arm/boot/compressed/head.S +++ b/arch/arm/boot/compressed/head.S | |||
@@ -597,6 +597,8 @@ __common_mmu_cache_on: | |||
597 | sub pc, lr, r0, lsr #32 @ properly flush pipeline | 597 | sub pc, lr, r0, lsr #32 @ properly flush pipeline |
598 | #endif | 598 | #endif |
599 | 599 | ||
600 | #define PROC_ENTRY_SIZE (4*5) | ||
601 | |||
600 | /* | 602 | /* |
601 | * Here follow the relocatable cache support functions for the | 603 | * Here follow the relocatable cache support functions for the |
602 | * various processors. This is a generic hook for locating an | 604 | * various processors. This is a generic hook for locating an |
@@ -624,7 +626,7 @@ call_cache_fn: adr r12, proc_types | |||
624 | ARM( addeq pc, r12, r3 ) @ call cache function | 626 | ARM( addeq pc, r12, r3 ) @ call cache function |
625 | THUMB( addeq r12, r3 ) | 627 | THUMB( addeq r12, r3 ) |
626 | THUMB( moveq pc, r12 ) @ call cache function | 628 | THUMB( moveq pc, r12 ) @ call cache function |
627 | add r12, r12, #4*5 | 629 | add r12, r12, #PROC_ENTRY_SIZE |
628 | b 1b | 630 | b 1b |
629 | 631 | ||
630 | /* | 632 | /* |
@@ -794,6 +796,16 @@ proc_types: | |||
794 | 796 | ||
795 | .size proc_types, . - proc_types | 797 | .size proc_types, . - proc_types |
796 | 798 | ||
799 | /* | ||
800 | * If you get a "non-constant expression in ".if" statement" | ||
801 | * error from the assembler on this line, check that you have | ||
802 | * not accidentally written a "b" instruction where you should | ||
803 | * have written W(b). | ||
804 | */ | ||
805 | .if (. - proc_types) % PROC_ENTRY_SIZE != 0 | ||
806 | .error "The size of one or more proc_types entries is wrong." | ||
807 | .endif | ||
808 | |||
797 | /* | 809 | /* |
798 | * Turn off the Cache and MMU. ARMv3 does not support | 810 | * Turn off the Cache and MMU. ARMv3 does not support |
799 | * reading the control register, but ARMv4 does. | 811 | * reading the control register, but ARMv4 does. |
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index bc2d2d75f70..65c3f2474f5 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h | |||
@@ -13,6 +13,9 @@ | |||
13 | * Do not include any C declarations in this file - it is included by | 13 | * Do not include any C declarations in this file - it is included by |
14 | * assembler source. | 14 | * assembler source. |
15 | */ | 15 | */ |
16 | #ifndef __ASM_ASSEMBLER_H__ | ||
17 | #define __ASM_ASSEMBLER_H__ | ||
18 | |||
16 | #ifndef __ASSEMBLY__ | 19 | #ifndef __ASSEMBLY__ |
17 | #error "Only include this from assembly code" | 20 | #error "Only include this from assembly code" |
18 | #endif | 21 | #endif |
@@ -290,3 +293,4 @@ | |||
290 | .macro ldrusr, reg, ptr, inc, cond=al, rept=1, abort=9001f | 293 | .macro ldrusr, reg, ptr, inc, cond=al, rept=1, abort=9001f |
291 | usracc ldr, \reg, \ptr, \inc, \cond, \rept, \abort | 294 | usracc ldr, \reg, \ptr, \inc, \cond, \rept, \abort |
292 | .endm | 295 | .endm |
296 | #endif /* __ASM_ASSEMBLER_H__ */ | ||
diff --git a/arch/arm/include/asm/entry-macro-multi.S b/arch/arm/include/asm/entry-macro-multi.S index ec0bbf79c71..2da8547de6d 100644 --- a/arch/arm/include/asm/entry-macro-multi.S +++ b/arch/arm/include/asm/entry-macro-multi.S | |||
@@ -1,3 +1,5 @@ | |||
1 | #include <asm/assembler.h> | ||
2 | |||
1 | /* | 3 | /* |
2 | * Interrupt handling. Preserves r7, r8, r9 | 4 | * Interrupt handling. Preserves r7, r8, r9 |
3 | */ | 5 | */ |
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c index fee7c36349e..016d6a0830a 100644 --- a/arch/arm/kernel/module.c +++ b/arch/arm/kernel/module.c | |||
@@ -193,8 +193,17 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex, | |||
193 | offset -= 0x02000000; | 193 | offset -= 0x02000000; |
194 | offset += sym->st_value - loc; | 194 | offset += sym->st_value - loc; |
195 | 195 | ||
196 | /* only Thumb addresses allowed (no interworking) */ | 196 | /* |
197 | if (!(offset & 1) || | 197 | * For function symbols, only Thumb addresses are |
198 | * allowed (no interworking). | ||
199 | * | ||
200 | * For non-function symbols, the destination | ||
201 | * has no specific ARM/Thumb disposition, so | ||
202 | * the branch is resolved under the assumption | ||
203 | * that interworking is not required. | ||
204 | */ | ||
205 | if ((ELF32_ST_TYPE(sym->st_info) == STT_FUNC && | ||
206 | !(offset & 1)) || | ||
198 | offset <= (s32)0xff000000 || | 207 | offset <= (s32)0xff000000 || |
199 | offset >= (s32)0x01000000) { | 208 | offset >= (s32)0x01000000) { |
200 | pr_err("%s: section %u reloc %u sym '%s': relocation %u out of range (%#lx -> %#x)\n", | 209 | pr_err("%s: section %u reloc %u sym '%s': relocation %u out of range (%#lx -> %#x)\n", |
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c index f1e8dd94afe..dd7f3b9f4cb 100644 --- a/arch/arm/kernel/perf_event_v6.c +++ b/arch/arm/kernel/perf_event_v6.c | |||
@@ -173,6 +173,20 @@ static const unsigned armv6_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | |||
173 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | 173 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, |
174 | }, | 174 | }, |
175 | }, | 175 | }, |
176 | [C(NODE)] = { | ||
177 | [C(OP_READ)] = { | ||
178 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
179 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
180 | }, | ||
181 | [C(OP_WRITE)] = { | ||
182 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
183 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
184 | }, | ||
185 | [C(OP_PREFETCH)] = { | ||
186 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
187 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
188 | }, | ||
189 | }, | ||
176 | }; | 190 | }; |
177 | 191 | ||
178 | enum armv6mpcore_perf_types { | 192 | enum armv6mpcore_perf_types { |
@@ -310,6 +324,20 @@ static const unsigned armv6mpcore_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | |||
310 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | 324 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, |
311 | }, | 325 | }, |
312 | }, | 326 | }, |
327 | [C(NODE)] = { | ||
328 | [C(OP_READ)] = { | ||
329 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
330 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
331 | }, | ||
332 | [C(OP_WRITE)] = { | ||
333 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
334 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
335 | }, | ||
336 | [C(OP_PREFETCH)] = { | ||
337 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
338 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
339 | }, | ||
340 | }, | ||
313 | }; | 341 | }; |
314 | 342 | ||
315 | static inline unsigned long | 343 | static inline unsigned long |
@@ -479,7 +507,7 @@ armv6pmu_handle_irq(int irq_num, | |||
479 | if (!armpmu_event_set_period(event, hwc, idx)) | 507 | if (!armpmu_event_set_period(event, hwc, idx)) |
480 | continue; | 508 | continue; |
481 | 509 | ||
482 | if (perf_event_overflow(event, 0, &data, regs)) | 510 | if (perf_event_overflow(event, &data, regs)) |
483 | armpmu->disable(hwc, idx); | 511 | armpmu->disable(hwc, idx); |
484 | } | 512 | } |
485 | 513 | ||
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index 4960686afb5..e20ca9cafef 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c | |||
@@ -255,6 +255,20 @@ static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | |||
255 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | 255 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, |
256 | }, | 256 | }, |
257 | }, | 257 | }, |
258 | [C(NODE)] = { | ||
259 | [C(OP_READ)] = { | ||
260 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
261 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
262 | }, | ||
263 | [C(OP_WRITE)] = { | ||
264 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
265 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
266 | }, | ||
267 | [C(OP_PREFETCH)] = { | ||
268 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
269 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
270 | }, | ||
271 | }, | ||
258 | }; | 272 | }; |
259 | 273 | ||
260 | /* | 274 | /* |
@@ -371,6 +385,20 @@ static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | |||
371 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | 385 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, |
372 | }, | 386 | }, |
373 | }, | 387 | }, |
388 | [C(NODE)] = { | ||
389 | [C(OP_READ)] = { | ||
390 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
391 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
392 | }, | ||
393 | [C(OP_WRITE)] = { | ||
394 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
395 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
396 | }, | ||
397 | [C(OP_PREFETCH)] = { | ||
398 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
399 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
400 | }, | ||
401 | }, | ||
374 | }; | 402 | }; |
375 | 403 | ||
376 | /* | 404 | /* |
@@ -787,7 +815,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) | |||
787 | if (!armpmu_event_set_period(event, hwc, idx)) | 815 | if (!armpmu_event_set_period(event, hwc, idx)) |
788 | continue; | 816 | continue; |
789 | 817 | ||
790 | if (perf_event_overflow(event, 0, &data, regs)) | 818 | if (perf_event_overflow(event, &data, regs)) |
791 | armpmu->disable(hwc, idx); | 819 | armpmu->disable(hwc, idx); |
792 | } | 820 | } |
793 | 821 | ||
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c index 39affbe4fdb..3c4397491d0 100644 --- a/arch/arm/kernel/perf_event_xscale.c +++ b/arch/arm/kernel/perf_event_xscale.c | |||
@@ -144,6 +144,20 @@ static const unsigned xscale_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | |||
144 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | 144 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, |
145 | }, | 145 | }, |
146 | }, | 146 | }, |
147 | [C(NODE)] = { | ||
148 | [C(OP_READ)] = { | ||
149 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
150 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
151 | }, | ||
152 | [C(OP_WRITE)] = { | ||
153 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
154 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
155 | }, | ||
156 | [C(OP_PREFETCH)] = { | ||
157 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
158 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
159 | }, | ||
160 | }, | ||
147 | }; | 161 | }; |
148 | 162 | ||
149 | #define XSCALE_PMU_ENABLE 0x001 | 163 | #define XSCALE_PMU_ENABLE 0x001 |
@@ -251,7 +265,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev) | |||
251 | if (!armpmu_event_set_period(event, hwc, idx)) | 265 | if (!armpmu_event_set_period(event, hwc, idx)) |
252 | continue; | 266 | continue; |
253 | 267 | ||
254 | if (perf_event_overflow(event, 0, &data, regs)) | 268 | if (perf_event_overflow(event, &data, regs)) |
255 | armpmu->disable(hwc, idx); | 269 | armpmu->disable(hwc, idx); |
256 | } | 270 | } |
257 | 271 | ||
@@ -583,7 +597,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev) | |||
583 | if (!armpmu_event_set_period(event, hwc, idx)) | 597 | if (!armpmu_event_set_period(event, hwc, idx)) |
584 | continue; | 598 | continue; |
585 | 599 | ||
586 | if (perf_event_overflow(event, 0, &data, regs)) | 600 | if (perf_event_overflow(event, &data, regs)) |
587 | armpmu->disable(hwc, idx); | 601 | armpmu->disable(hwc, idx); |
588 | } | 602 | } |
589 | 603 | ||
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index 97260060bf2..5c199610719 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c | |||
@@ -396,7 +396,7 @@ static long ptrace_hbp_idx_to_num(int idx) | |||
396 | /* | 396 | /* |
397 | * Handle hitting a HW-breakpoint. | 397 | * Handle hitting a HW-breakpoint. |
398 | */ | 398 | */ |
399 | static void ptrace_hbptriggered(struct perf_event *bp, int unused, | 399 | static void ptrace_hbptriggered(struct perf_event *bp, |
400 | struct perf_sample_data *data, | 400 | struct perf_sample_data *data, |
401 | struct pt_regs *regs) | 401 | struct pt_regs *regs) |
402 | { | 402 | { |
@@ -479,7 +479,8 @@ static struct perf_event *ptrace_hbp_create(struct task_struct *tsk, int type) | |||
479 | attr.bp_type = type; | 479 | attr.bp_type = type; |
480 | attr.disabled = 1; | 480 | attr.disabled = 1; |
481 | 481 | ||
482 | return register_user_hw_breakpoint(&attr, ptrace_hbptriggered, tsk); | 482 | return register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, |
483 | tsk); | ||
483 | } | 484 | } |
484 | 485 | ||
485 | static int ptrace_gethbpregs(struct task_struct *tsk, long num, | 486 | static int ptrace_gethbpregs(struct task_struct *tsk, long num, |
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 344e52b16c8..e7f92a4321f 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c | |||
@@ -318,9 +318,13 @@ asmlinkage void __cpuinit secondary_start_kernel(void) | |||
318 | smp_store_cpu_info(cpu); | 318 | smp_store_cpu_info(cpu); |
319 | 319 | ||
320 | /* | 320 | /* |
321 | * OK, now it's safe to let the boot CPU continue | 321 | * OK, now it's safe to let the boot CPU continue. Wait for |
322 | * the CPU migration code to notice that the CPU is online | ||
323 | * before we continue. | ||
322 | */ | 324 | */ |
323 | set_cpu_online(cpu, true); | 325 | set_cpu_online(cpu, true); |
326 | while (!cpu_active(cpu)) | ||
327 | cpu_relax(); | ||
324 | 328 | ||
325 | /* | 329 | /* |
326 | * OK, it's off to the idle thread for us | 330 | * OK, it's off to the idle thread for us |
diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c index 40ee7e5045e..5f452f8fde0 100644 --- a/arch/arm/kernel/swp_emulate.c +++ b/arch/arm/kernel/swp_emulate.c | |||
@@ -183,7 +183,7 @@ static int swp_handler(struct pt_regs *regs, unsigned int instr) | |||
183 | unsigned int address, destreg, data, type; | 183 | unsigned int address, destreg, data, type; |
184 | unsigned int res = 0; | 184 | unsigned int res = 0; |
185 | 185 | ||
186 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, regs->ARM_pc); | 186 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->ARM_pc); |
187 | 187 | ||
188 | if (current->pid != previous_pid) { | 188 | if (current->pid != previous_pid) { |
189 | pr_debug("\"%s\" (%ld) uses deprecated SWP{B} instruction\n", | 189 | pr_debug("\"%s\" (%ld) uses deprecated SWP{B} instruction\n", |
diff --git a/arch/arm/mach-h720x/Kconfig b/arch/arm/mach-h720x/Kconfig index 9b6982efbd2..abf356c0234 100644 --- a/arch/arm/mach-h720x/Kconfig +++ b/arch/arm/mach-h720x/Kconfig | |||
@@ -6,12 +6,14 @@ config ARCH_H7201 | |||
6 | bool "gms30c7201" | 6 | bool "gms30c7201" |
7 | depends on ARCH_H720X | 7 | depends on ARCH_H720X |
8 | select CPU_H7201 | 8 | select CPU_H7201 |
9 | select ZONE_DMA | ||
9 | help | 10 | help |
10 | Say Y here if you are using the Hynix GMS30C7201 Reference Board | 11 | Say Y here if you are using the Hynix GMS30C7201 Reference Board |
11 | 12 | ||
12 | config ARCH_H7202 | 13 | config ARCH_H7202 |
13 | bool "hms30c7202" | 14 | bool "hms30c7202" |
14 | select CPU_H7202 | 15 | select CPU_H7202 |
16 | select ZONE_DMA | ||
15 | depends on ARCH_H720X | 17 | depends on ARCH_H720X |
16 | help | 18 | help |
17 | Say Y here if you are using the Hynix HMS30C7202 Reference Board | 19 | Say Y here if you are using the Hynix HMS30C7202 Reference Board |
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index bc0e1d88fd3..9ea4f7ddd66 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c | |||
@@ -318,11 +318,11 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) | |||
318 | fault = __do_page_fault(mm, addr, fsr, tsk); | 318 | fault = __do_page_fault(mm, addr, fsr, tsk); |
319 | up_read(&mm->mmap_sem); | 319 | up_read(&mm->mmap_sem); |
320 | 320 | ||
321 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, addr); | 321 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr); |
322 | if (fault & VM_FAULT_MAJOR) | 322 | if (fault & VM_FAULT_MAJOR) |
323 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, regs, addr); | 323 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, addr); |
324 | else if (fault & VM_FAULT_MINOR) | 324 | else if (fault & VM_FAULT_MINOR) |
325 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, regs, addr); | 325 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, addr); |
326 | 326 | ||
327 | /* | 327 | /* |
328 | * Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR | 328 | * Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR |
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 3c3867850a3..089c0b5e454 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S | |||
@@ -210,19 +210,21 @@ cpu_v7_name: | |||
210 | 210 | ||
211 | /* Suspend/resume support: derived from arch/arm/mach-s5pv210/sleep.S */ | 211 | /* Suspend/resume support: derived from arch/arm/mach-s5pv210/sleep.S */ |
212 | .globl cpu_v7_suspend_size | 212 | .globl cpu_v7_suspend_size |
213 | .equ cpu_v7_suspend_size, 4 * 8 | 213 | .equ cpu_v7_suspend_size, 4 * 9 |
214 | #ifdef CONFIG_PM_SLEEP | 214 | #ifdef CONFIG_PM_SLEEP |
215 | ENTRY(cpu_v7_do_suspend) | 215 | ENTRY(cpu_v7_do_suspend) |
216 | stmfd sp!, {r4 - r11, lr} | 216 | stmfd sp!, {r4 - r11, lr} |
217 | mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID | 217 | mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID |
218 | mrc p15, 0, r5, c13, c0, 1 @ Context ID | 218 | mrc p15, 0, r5, c13, c0, 1 @ Context ID |
219 | mrc p15, 0, r6, c13, c0, 3 @ User r/o thread ID | ||
220 | stmia r0!, {r4 - r6} | ||
219 | mrc p15, 0, r6, c3, c0, 0 @ Domain ID | 221 | mrc p15, 0, r6, c3, c0, 0 @ Domain ID |
220 | mrc p15, 0, r7, c2, c0, 0 @ TTB 0 | 222 | mrc p15, 0, r7, c2, c0, 0 @ TTB 0 |
221 | mrc p15, 0, r8, c2, c0, 1 @ TTB 1 | 223 | mrc p15, 0, r8, c2, c0, 1 @ TTB 1 |
222 | mrc p15, 0, r9, c1, c0, 0 @ Control register | 224 | mrc p15, 0, r9, c1, c0, 0 @ Control register |
223 | mrc p15, 0, r10, c1, c0, 1 @ Auxiliary control register | 225 | mrc p15, 0, r10, c1, c0, 1 @ Auxiliary control register |
224 | mrc p15, 0, r11, c1, c0, 2 @ Co-processor access control | 226 | mrc p15, 0, r11, c1, c0, 2 @ Co-processor access control |
225 | stmia r0, {r4 - r11} | 227 | stmia r0, {r6 - r11} |
226 | ldmfd sp!, {r4 - r11, pc} | 228 | ldmfd sp!, {r4 - r11, pc} |
227 | ENDPROC(cpu_v7_do_suspend) | 229 | ENDPROC(cpu_v7_do_suspend) |
228 | 230 | ||
@@ -230,9 +232,11 @@ ENTRY(cpu_v7_do_resume) | |||
230 | mov ip, #0 | 232 | mov ip, #0 |
231 | mcr p15, 0, ip, c8, c7, 0 @ invalidate TLBs | 233 | mcr p15, 0, ip, c8, c7, 0 @ invalidate TLBs |
232 | mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache | 234 | mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache |
233 | ldmia r0, {r4 - r11} | 235 | ldmia r0!, {r4 - r6} |
234 | mcr p15, 0, r4, c13, c0, 0 @ FCSE/PID | 236 | mcr p15, 0, r4, c13, c0, 0 @ FCSE/PID |
235 | mcr p15, 0, r5, c13, c0, 1 @ Context ID | 237 | mcr p15, 0, r5, c13, c0, 1 @ Context ID |
238 | mcr p15, 0, r6, c13, c0, 3 @ User r/o thread ID | ||
239 | ldmia r0, {r6 - r11} | ||
236 | mcr p15, 0, r6, c3, c0, 0 @ Domain ID | 240 | mcr p15, 0, r6, c3, c0, 0 @ Domain ID |
237 | mcr p15, 0, r7, c2, c0, 0 @ TTB 0 | 241 | mcr p15, 0, r7, c2, c0, 0 @ TTB 0 |
238 | mcr p15, 0, r8, c2, c0, 1 @ TTB 1 | 242 | mcr p15, 0, r8, c2, c0, 1 @ TTB 1 |
@@ -418,9 +422,9 @@ ENTRY(v7_processor_functions) | |||
418 | .word cpu_v7_dcache_clean_area | 422 | .word cpu_v7_dcache_clean_area |
419 | .word cpu_v7_switch_mm | 423 | .word cpu_v7_switch_mm |
420 | .word cpu_v7_set_pte_ext | 424 | .word cpu_v7_set_pte_ext |
421 | .word 0 | 425 | .word cpu_v7_suspend_size |
422 | .word 0 | 426 | .word cpu_v7_do_suspend |
423 | .word 0 | 427 | .word cpu_v7_do_resume |
424 | .size v7_processor_functions, . - v7_processor_functions | 428 | .size v7_processor_functions, . - v7_processor_functions |
425 | 429 | ||
426 | .section ".rodata" | 430 | .section ".rodata" |
diff --git a/arch/arm/plat-iop/cp6.c b/arch/arm/plat-iop/cp6.c index 9612a87e2a8..bab73e2c79d 100644 --- a/arch/arm/plat-iop/cp6.c +++ b/arch/arm/plat-iop/cp6.c | |||
@@ -18,6 +18,7 @@ | |||
18 | */ | 18 | */ |
19 | #include <linux/init.h> | 19 | #include <linux/init.h> |
20 | #include <asm/traps.h> | 20 | #include <asm/traps.h> |
21 | #include <asm/ptrace.h> | ||
21 | 22 | ||
22 | static int cp6_trap(struct pt_regs *regs, unsigned int instr) | 23 | static int cp6_trap(struct pt_regs *regs, unsigned int instr) |
23 | { | 24 | { |
diff --git a/arch/m32r/include/asm/mmzone.h b/arch/m32r/include/asm/mmzone.h index 9f3b5accda8..115ced33feb 100644 --- a/arch/m32r/include/asm/mmzone.h +++ b/arch/m32r/include/asm/mmzone.h | |||
@@ -14,12 +14,6 @@ extern struct pglist_data *node_data[]; | |||
14 | #define NODE_DATA(nid) (node_data[nid]) | 14 | #define NODE_DATA(nid) (node_data[nid]) |
15 | 15 | ||
16 | #define node_localnr(pfn, nid) ((pfn) - NODE_DATA(nid)->node_start_pfn) | 16 | #define node_localnr(pfn, nid) ((pfn) - NODE_DATA(nid)->node_start_pfn) |
17 | #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) | ||
18 | #define node_end_pfn(nid) \ | ||
19 | ({ \ | ||
20 | pg_data_t *__pgdat = NODE_DATA(nid); \ | ||
21 | __pgdat->node_start_pfn + __pgdat->node_spanned_pages - 1; \ | ||
22 | }) | ||
23 | 17 | ||
24 | #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)) | 18 | #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)) |
25 | /* | 19 | /* |
@@ -44,7 +38,7 @@ static __inline__ int pfn_to_nid(unsigned long pfn) | |||
44 | int node; | 38 | int node; |
45 | 39 | ||
46 | for (node = 0 ; node < MAX_NUMNODES ; node++) | 40 | for (node = 0 ; node < MAX_NUMNODES ; node++) |
47 | if (pfn >= node_start_pfn(node) && pfn <= node_end_pfn(node)) | 41 | if (pfn >= node_start_pfn(node) && pfn < node_end_pfn(node)) |
48 | break; | 42 | break; |
49 | 43 | ||
50 | return node; | 44 | return node; |
diff --git a/arch/mips/include/asm/stacktrace.h b/arch/mips/include/asm/stacktrace.h index 0bf82818aa5..780ee2c2a2a 100644 --- a/arch/mips/include/asm/stacktrace.h +++ b/arch/mips/include/asm/stacktrace.h | |||
@@ -7,6 +7,10 @@ | |||
7 | extern int raw_show_trace; | 7 | extern int raw_show_trace; |
8 | extern unsigned long unwind_stack(struct task_struct *task, unsigned long *sp, | 8 | extern unsigned long unwind_stack(struct task_struct *task, unsigned long *sp, |
9 | unsigned long pc, unsigned long *ra); | 9 | unsigned long pc, unsigned long *ra); |
10 | extern unsigned long unwind_stack_by_address(unsigned long stack_page, | ||
11 | unsigned long *sp, | ||
12 | unsigned long pc, | ||
13 | unsigned long *ra); | ||
10 | #else | 14 | #else |
11 | #define raw_show_trace 1 | 15 | #define raw_show_trace 1 |
12 | static inline unsigned long unwind_stack(struct task_struct *task, | 16 | static inline unsigned long unwind_stack(struct task_struct *task, |
diff --git a/arch/mips/kernel/perf_event.c b/arch/mips/kernel/perf_event.c index a8244854d3d..d0deaab9ace 100644 --- a/arch/mips/kernel/perf_event.c +++ b/arch/mips/kernel/perf_event.c | |||
@@ -527,7 +527,7 @@ handle_associated_event(struct cpu_hw_events *cpuc, | |||
527 | if (!mipspmu_event_set_period(event, hwc, idx)) | 527 | if (!mipspmu_event_set_period(event, hwc, idx)) |
528 | return; | 528 | return; |
529 | 529 | ||
530 | if (perf_event_overflow(event, 0, data, regs)) | 530 | if (perf_event_overflow(event, data, regs)) |
531 | mipspmu->disable_event(idx); | 531 | mipspmu->disable_event(idx); |
532 | } | 532 | } |
533 | 533 | ||
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c index 75266ff4cc3..e5ad09a9baf 100644 --- a/arch/mips/kernel/perf_event_mipsxx.c +++ b/arch/mips/kernel/perf_event_mipsxx.c | |||
@@ -377,6 +377,20 @@ static const struct mips_perf_event mipsxxcore_cache_map | |||
377 | [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, | 377 | [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, |
378 | }, | 378 | }, |
379 | }, | 379 | }, |
380 | [C(NODE)] = { | ||
381 | [C(OP_READ)] = { | ||
382 | [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, | ||
383 | [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, | ||
384 | }, | ||
385 | [C(OP_WRITE)] = { | ||
386 | [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, | ||
387 | [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, | ||
388 | }, | ||
389 | [C(OP_PREFETCH)] = { | ||
390 | [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, | ||
391 | [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, | ||
392 | }, | ||
393 | }, | ||
380 | }; | 394 | }; |
381 | 395 | ||
382 | /* 74K core has completely different cache event map. */ | 396 | /* 74K core has completely different cache event map. */ |
@@ -480,6 +494,20 @@ static const struct mips_perf_event mipsxx74Kcore_cache_map | |||
480 | [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, | 494 | [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, |
481 | }, | 495 | }, |
482 | }, | 496 | }, |
497 | [C(NODE)] = { | ||
498 | [C(OP_READ)] = { | ||
499 | [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, | ||
500 | [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, | ||
501 | }, | ||
502 | [C(OP_WRITE)] = { | ||
503 | [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, | ||
504 | [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, | ||
505 | }, | ||
506 | [C(OP_PREFETCH)] = { | ||
507 | [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, | ||
508 | [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, | ||
509 | }, | ||
510 | }, | ||
483 | }; | 511 | }; |
484 | 512 | ||
485 | #ifdef CONFIG_MIPS_MT_SMP | 513 | #ifdef CONFIG_MIPS_MT_SMP |
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index d2112d3cf11..c28fbe6107b 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c | |||
@@ -373,18 +373,18 @@ unsigned long thread_saved_pc(struct task_struct *tsk) | |||
373 | 373 | ||
374 | 374 | ||
375 | #ifdef CONFIG_KALLSYMS | 375 | #ifdef CONFIG_KALLSYMS |
376 | /* used by show_backtrace() */ | 376 | /* generic stack unwinding function */ |
377 | unsigned long unwind_stack(struct task_struct *task, unsigned long *sp, | 377 | unsigned long notrace unwind_stack_by_address(unsigned long stack_page, |
378 | unsigned long pc, unsigned long *ra) | 378 | unsigned long *sp, |
379 | unsigned long pc, | ||
380 | unsigned long *ra) | ||
379 | { | 381 | { |
380 | unsigned long stack_page; | ||
381 | struct mips_frame_info info; | 382 | struct mips_frame_info info; |
382 | unsigned long size, ofs; | 383 | unsigned long size, ofs; |
383 | int leaf; | 384 | int leaf; |
384 | extern void ret_from_irq(void); | 385 | extern void ret_from_irq(void); |
385 | extern void ret_from_exception(void); | 386 | extern void ret_from_exception(void); |
386 | 387 | ||
387 | stack_page = (unsigned long)task_stack_page(task); | ||
388 | if (!stack_page) | 388 | if (!stack_page) |
389 | return 0; | 389 | return 0; |
390 | 390 | ||
@@ -443,6 +443,15 @@ unsigned long unwind_stack(struct task_struct *task, unsigned long *sp, | |||
443 | *ra = 0; | 443 | *ra = 0; |
444 | return __kernel_text_address(pc) ? pc : 0; | 444 | return __kernel_text_address(pc) ? pc : 0; |
445 | } | 445 | } |
446 | EXPORT_SYMBOL(unwind_stack_by_address); | ||
447 | |||
448 | /* used by show_backtrace() */ | ||
449 | unsigned long unwind_stack(struct task_struct *task, unsigned long *sp, | ||
450 | unsigned long pc, unsigned long *ra) | ||
451 | { | ||
452 | unsigned long stack_page = (unsigned long)task_stack_page(task); | ||
453 | return unwind_stack_by_address(stack_page, sp, pc, ra); | ||
454 | } | ||
446 | #endif | 455 | #endif |
447 | 456 | ||
448 | /* | 457 | /* |
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index e9b3af27d84..b7517e3abc8 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c | |||
@@ -578,12 +578,12 @@ static int simulate_llsc(struct pt_regs *regs, unsigned int opcode) | |||
578 | { | 578 | { |
579 | if ((opcode & OPCODE) == LL) { | 579 | if ((opcode & OPCODE) == LL) { |
580 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, | 580 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, |
581 | 1, 0, regs, 0); | 581 | 1, regs, 0); |
582 | return simulate_ll(regs, opcode); | 582 | return simulate_ll(regs, opcode); |
583 | } | 583 | } |
584 | if ((opcode & OPCODE) == SC) { | 584 | if ((opcode & OPCODE) == SC) { |
585 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, | 585 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, |
586 | 1, 0, regs, 0); | 586 | 1, regs, 0); |
587 | return simulate_sc(regs, opcode); | 587 | return simulate_sc(regs, opcode); |
588 | } | 588 | } |
589 | 589 | ||
@@ -602,7 +602,7 @@ static int simulate_rdhwr(struct pt_regs *regs, unsigned int opcode) | |||
602 | int rd = (opcode & RD) >> 11; | 602 | int rd = (opcode & RD) >> 11; |
603 | int rt = (opcode & RT) >> 16; | 603 | int rt = (opcode & RT) >> 16; |
604 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, | 604 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, |
605 | 1, 0, regs, 0); | 605 | 1, regs, 0); |
606 | switch (rd) { | 606 | switch (rd) { |
607 | case 0: /* CPU number */ | 607 | case 0: /* CPU number */ |
608 | regs->regs[rt] = smp_processor_id(); | 608 | regs->regs[rt] = smp_processor_id(); |
@@ -640,7 +640,7 @@ static int simulate_sync(struct pt_regs *regs, unsigned int opcode) | |||
640 | { | 640 | { |
641 | if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) { | 641 | if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) { |
642 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, | 642 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, |
643 | 1, 0, regs, 0); | 643 | 1, regs, 0); |
644 | return 0; | 644 | return 0; |
645 | } | 645 | } |
646 | 646 | ||
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c index cfea1adfa15..eb319b58035 100644 --- a/arch/mips/kernel/unaligned.c +++ b/arch/mips/kernel/unaligned.c | |||
@@ -111,8 +111,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, | |||
111 | unsigned long value; | 111 | unsigned long value; |
112 | unsigned int res; | 112 | unsigned int res; |
113 | 113 | ||
114 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, | 114 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); |
115 | 1, 0, regs, 0); | ||
116 | 115 | ||
117 | /* | 116 | /* |
118 | * This load never faults. | 117 | * This load never faults. |
@@ -517,7 +516,7 @@ asmlinkage void do_ade(struct pt_regs *regs) | |||
517 | mm_segment_t seg; | 516 | mm_segment_t seg; |
518 | 517 | ||
519 | perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, | 518 | perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, |
520 | 1, 0, regs, regs->cp0_badvaddr); | 519 | 1, regs, regs->cp0_badvaddr); |
521 | /* | 520 | /* |
522 | * Did we catch a fault trying to load an instruction? | 521 | * Did we catch a fault trying to load an instruction? |
523 | * Or are we running in MIPS16 mode? | 522 | * Or are we running in MIPS16 mode? |
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c index d32cb050311..dbf2f93a509 100644 --- a/arch/mips/math-emu/cp1emu.c +++ b/arch/mips/math-emu/cp1emu.c | |||
@@ -272,8 +272,7 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx, | |||
272 | } | 272 | } |
273 | 273 | ||
274 | emul: | 274 | emul: |
275 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, | 275 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, xcp, 0); |
276 | 1, 0, xcp, 0); | ||
277 | MIPS_FPU_EMU_INC_STATS(emulated); | 276 | MIPS_FPU_EMU_INC_STATS(emulated); |
278 | switch (MIPSInst_OPCODE(ir)) { | 277 | switch (MIPSInst_OPCODE(ir)) { |
279 | case ldc1_op:{ | 278 | case ldc1_op:{ |
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c index 137ee76a004..937cf336816 100644 --- a/arch/mips/mm/fault.c +++ b/arch/mips/mm/fault.c | |||
@@ -145,7 +145,7 @@ good_area: | |||
145 | * the fault. | 145 | * the fault. |
146 | */ | 146 | */ |
147 | fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0); | 147 | fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0); |
148 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); | 148 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); |
149 | if (unlikely(fault & VM_FAULT_ERROR)) { | 149 | if (unlikely(fault & VM_FAULT_ERROR)) { |
150 | if (fault & VM_FAULT_OOM) | 150 | if (fault & VM_FAULT_OOM) |
151 | goto out_of_memory; | 151 | goto out_of_memory; |
@@ -154,12 +154,10 @@ good_area: | |||
154 | BUG(); | 154 | BUG(); |
155 | } | 155 | } |
156 | if (fault & VM_FAULT_MAJOR) { | 156 | if (fault & VM_FAULT_MAJOR) { |
157 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, | 157 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); |
158 | 1, 0, regs, address); | ||
159 | tsk->maj_flt++; | 158 | tsk->maj_flt++; |
160 | } else { | 159 | } else { |
161 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, | 160 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); |
162 | 1, 0, regs, address); | ||
163 | tsk->min_flt++; | 161 | tsk->min_flt++; |
164 | } | 162 | } |
165 | 163 | ||
diff --git a/arch/mips/oprofile/Makefile b/arch/mips/oprofile/Makefile index 4b9d7044e26..29f2f13eb31 100644 --- a/arch/mips/oprofile/Makefile +++ b/arch/mips/oprofile/Makefile | |||
@@ -8,7 +8,7 @@ DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \ | |||
8 | oprofilefs.o oprofile_stats.o \ | 8 | oprofilefs.o oprofile_stats.o \ |
9 | timer_int.o ) | 9 | timer_int.o ) |
10 | 10 | ||
11 | oprofile-y := $(DRIVER_OBJS) common.o | 11 | oprofile-y := $(DRIVER_OBJS) common.o backtrace.o |
12 | 12 | ||
13 | oprofile-$(CONFIG_CPU_MIPS32) += op_model_mipsxx.o | 13 | oprofile-$(CONFIG_CPU_MIPS32) += op_model_mipsxx.o |
14 | oprofile-$(CONFIG_CPU_MIPS64) += op_model_mipsxx.o | 14 | oprofile-$(CONFIG_CPU_MIPS64) += op_model_mipsxx.o |
diff --git a/arch/mips/oprofile/backtrace.c b/arch/mips/oprofile/backtrace.c new file mode 100644 index 00000000000..6854ed5097d --- /dev/null +++ b/arch/mips/oprofile/backtrace.c | |||
@@ -0,0 +1,175 @@ | |||
1 | #include <linux/oprofile.h> | ||
2 | #include <linux/sched.h> | ||
3 | #include <linux/mm.h> | ||
4 | #include <linux/uaccess.h> | ||
5 | #include <asm/ptrace.h> | ||
6 | #include <asm/stacktrace.h> | ||
7 | #include <linux/stacktrace.h> | ||
8 | #include <linux/kernel.h> | ||
9 | #include <asm/sections.h> | ||
10 | #include <asm/inst.h> | ||
11 | |||
12 | struct stackframe { | ||
13 | unsigned long sp; | ||
14 | unsigned long pc; | ||
15 | unsigned long ra; | ||
16 | }; | ||
17 | |||
18 | static inline int get_mem(unsigned long addr, unsigned long *result) | ||
19 | { | ||
20 | unsigned long *address = (unsigned long *) addr; | ||
21 | if (!access_ok(VERIFY_READ, addr, sizeof(unsigned long))) | ||
22 | return -1; | ||
23 | if (__copy_from_user_inatomic(result, address, sizeof(unsigned long))) | ||
24 | return -3; | ||
25 | return 0; | ||
26 | } | ||
27 | |||
28 | /* | ||
29 | * These two instruction helpers were taken from process.c | ||
30 | */ | ||
31 | static inline int is_ra_save_ins(union mips_instruction *ip) | ||
32 | { | ||
33 | /* sw / sd $ra, offset($sp) */ | ||
34 | return (ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) | ||
35 | && ip->i_format.rs == 29 && ip->i_format.rt == 31; | ||
36 | } | ||
37 | |||
38 | static inline int is_sp_move_ins(union mips_instruction *ip) | ||
39 | { | ||
40 | /* addiu/daddiu sp,sp,-imm */ | ||
41 | if (ip->i_format.rs != 29 || ip->i_format.rt != 29) | ||
42 | return 0; | ||
43 | if (ip->i_format.opcode == addiu_op || ip->i_format.opcode == daddiu_op) | ||
44 | return 1; | ||
45 | return 0; | ||
46 | } | ||
47 | |||
48 | /* | ||
49 | * Looks for specific instructions that mark the end of a function. | ||
50 | * This usually means we ran into the code area of the previous function. | ||
51 | */ | ||
52 | static inline int is_end_of_function_marker(union mips_instruction *ip) | ||
53 | { | ||
54 | /* jr ra */ | ||
55 | if (ip->r_format.func == jr_op && ip->r_format.rs == 31) | ||
56 | return 1; | ||
57 | /* lui gp */ | ||
58 | if (ip->i_format.opcode == lui_op && ip->i_format.rt == 28) | ||
59 | return 1; | ||
60 | return 0; | ||
61 | } | ||
62 | |||
63 | /* | ||
64 | * TODO for userspace stack unwinding: | ||
65 | * - handle cases where the stack is adjusted inside a function | ||
66 | * (generally doesn't happen) | ||
67 | * - find optimal value for max_instr_check | ||
68 | * - try to find a way to handle leaf functions | ||
69 | */ | ||
70 | |||
71 | static inline int unwind_user_frame(struct stackframe *old_frame, | ||
72 | const unsigned int max_instr_check) | ||
73 | { | ||
74 | struct stackframe new_frame = *old_frame; | ||
75 | off_t ra_offset = 0; | ||
76 | size_t stack_size = 0; | ||
77 | unsigned long addr; | ||
78 | |||
79 | if (old_frame->pc == 0 || old_frame->sp == 0 || old_frame->ra == 0) | ||
80 | return -9; | ||
81 | |||
82 | for (addr = new_frame.pc; (addr + max_instr_check > new_frame.pc) | ||
83 | && (!ra_offset || !stack_size); --addr) { | ||
84 | union mips_instruction ip; | ||
85 | |||
86 | if (get_mem(addr, (unsigned long *) &ip)) | ||
87 | return -11; | ||
88 | |||
89 | if (is_sp_move_ins(&ip)) { | ||
90 | int stack_adjustment = ip.i_format.simmediate; | ||
91 | if (stack_adjustment > 0) | ||
92 | /* This marks the end of the previous function, | ||
93 | which means we overran. */ | ||
94 | break; | ||
95 | stack_size = (unsigned) stack_adjustment; | ||
96 | } else if (is_ra_save_ins(&ip)) { | ||
97 | int ra_slot = ip.i_format.simmediate; | ||
98 | if (ra_slot < 0) | ||
99 | /* This shouldn't happen. */ | ||
100 | break; | ||
101 | ra_offset = ra_slot; | ||
102 | } else if (is_end_of_function_marker(&ip)) | ||
103 | break; | ||
104 | } | ||
105 | |||
106 | if (!ra_offset || !stack_size) | ||
107 | return -1; | ||
108 | |||
109 | if (ra_offset) { | ||
110 | new_frame.ra = old_frame->sp + ra_offset; | ||
111 | if (get_mem(new_frame.ra, &(new_frame.ra))) | ||
112 | return -13; | ||
113 | } | ||
114 | |||
115 | if (stack_size) { | ||
116 | new_frame.sp = old_frame->sp + stack_size; | ||
117 | if (get_mem(new_frame.sp, &(new_frame.sp))) | ||
118 | return -14; | ||
119 | } | ||
120 | |||
121 | if (new_frame.sp > old_frame->sp) | ||
122 | return -2; | ||
123 | |||
124 | new_frame.pc = old_frame->ra; | ||
125 | *old_frame = new_frame; | ||
126 | |||
127 | return 0; | ||
128 | } | ||
129 | |||
130 | static inline void do_user_backtrace(unsigned long low_addr, | ||
131 | struct stackframe *frame, | ||
132 | unsigned int depth) | ||
133 | { | ||
134 | const unsigned int max_instr_check = 512; | ||
135 | const unsigned long high_addr = low_addr + THREAD_SIZE; | ||
136 | |||
137 | while (depth-- && !unwind_user_frame(frame, max_instr_check)) { | ||
138 | oprofile_add_trace(frame->ra); | ||
139 | if (frame->sp < low_addr || frame->sp > high_addr) | ||
140 | break; | ||
141 | } | ||
142 | } | ||
143 | |||
144 | #ifndef CONFIG_KALLSYMS | ||
145 | static inline void do_kernel_backtrace(unsigned long low_addr, | ||
146 | struct stackframe *frame, | ||
147 | unsigned int depth) { } | ||
148 | #else | ||
149 | static inline void do_kernel_backtrace(unsigned long low_addr, | ||
150 | struct stackframe *frame, | ||
151 | unsigned int depth) | ||
152 | { | ||
153 | while (depth-- && frame->pc) { | ||
154 | frame->pc = unwind_stack_by_address(low_addr, | ||
155 | &(frame->sp), | ||
156 | frame->pc, | ||
157 | &(frame->ra)); | ||
158 | oprofile_add_trace(frame->ra); | ||
159 | } | ||
160 | } | ||
161 | #endif | ||
162 | |||
163 | void notrace op_mips_backtrace(struct pt_regs *const regs, unsigned int depth) | ||
164 | { | ||
165 | struct stackframe frame = { .sp = regs->regs[29], | ||
166 | .pc = regs->cp0_epc, | ||
167 | .ra = regs->regs[31] }; | ||
168 | const int userspace = user_mode(regs); | ||
169 | const unsigned long low_addr = ALIGN(frame.sp, THREAD_SIZE); | ||
170 | |||
171 | if (userspace) | ||
172 | do_user_backtrace(low_addr, &frame, depth); | ||
173 | else | ||
174 | do_kernel_backtrace(low_addr, &frame, depth); | ||
175 | } | ||
diff --git a/arch/mips/oprofile/common.c b/arch/mips/oprofile/common.c index f9eb1aba634..d1f2d4c52d4 100644 --- a/arch/mips/oprofile/common.c +++ b/arch/mips/oprofile/common.c | |||
@@ -115,6 +115,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops) | |||
115 | ops->start = op_mips_start; | 115 | ops->start = op_mips_start; |
116 | ops->stop = op_mips_stop; | 116 | ops->stop = op_mips_stop; |
117 | ops->cpu_type = lmodel->cpu_type; | 117 | ops->cpu_type = lmodel->cpu_type; |
118 | ops->backtrace = op_mips_backtrace; | ||
118 | 119 | ||
119 | printk(KERN_INFO "oprofile: using %s performance monitoring.\n", | 120 | printk(KERN_INFO "oprofile: using %s performance monitoring.\n", |
120 | lmodel->cpu_type); | 121 | lmodel->cpu_type); |
diff --git a/arch/mips/oprofile/op_impl.h b/arch/mips/oprofile/op_impl.h index f04b54fb37d..7c2da27ece0 100644 --- a/arch/mips/oprofile/op_impl.h +++ b/arch/mips/oprofile/op_impl.h | |||
@@ -36,4 +36,6 @@ struct op_mips_model { | |||
36 | unsigned char num_counters; | 36 | unsigned char num_counters; |
37 | }; | 37 | }; |
38 | 38 | ||
39 | void op_mips_backtrace(struct pt_regs * const regs, unsigned int depth); | ||
40 | |||
39 | #endif | 41 | #endif |
diff --git a/arch/mn10300/include/asm/uaccess.h b/arch/mn10300/include/asm/uaccess.h index 3d6e60dad9d..780560b330d 100644 --- a/arch/mn10300/include/asm/uaccess.h +++ b/arch/mn10300/include/asm/uaccess.h | |||
@@ -15,6 +15,7 @@ | |||
15 | * User space memory access functions | 15 | * User space memory access functions |
16 | */ | 16 | */ |
17 | #include <linux/thread_info.h> | 17 | #include <linux/thread_info.h> |
18 | #include <linux/kernel.h> | ||
18 | #include <asm/page.h> | 19 | #include <asm/page.h> |
19 | #include <asm/errno.h> | 20 | #include <asm/errno.h> |
20 | 21 | ||
diff --git a/arch/parisc/include/asm/mmzone.h b/arch/parisc/include/asm/mmzone.h index 9608d2cf214..e67eb9c3d1b 100644 --- a/arch/parisc/include/asm/mmzone.h +++ b/arch/parisc/include/asm/mmzone.h | |||
@@ -14,13 +14,6 @@ extern struct node_map_data node_data[]; | |||
14 | 14 | ||
15 | #define NODE_DATA(nid) (&node_data[nid].pg_data) | 15 | #define NODE_DATA(nid) (&node_data[nid].pg_data) |
16 | 16 | ||
17 | #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) | ||
18 | #define node_end_pfn(nid) \ | ||
19 | ({ \ | ||
20 | pg_data_t *__pgdat = NODE_DATA(nid); \ | ||
21 | __pgdat->node_start_pfn + __pgdat->node_spanned_pages; \ | ||
22 | }) | ||
23 | |||
24 | /* We have these possible memory map layouts: | 17 | /* We have these possible memory map layouts: |
25 | * Astro: 0-3.75, 67.75-68, 4-64 | 18 | * Astro: 0-3.75, 67.75-68, 4-64 |
26 | * zx1: 0-1, 257-260, 4-256 | 19 | * zx1: 0-1, 257-260, 4-256 |
diff --git a/arch/powerpc/include/asm/emulated_ops.h b/arch/powerpc/include/asm/emulated_ops.h index 45921672b97..2cc41c715d2 100644 --- a/arch/powerpc/include/asm/emulated_ops.h +++ b/arch/powerpc/include/asm/emulated_ops.h | |||
@@ -78,14 +78,14 @@ extern void ppc_warn_emulated_print(const char *type); | |||
78 | #define PPC_WARN_EMULATED(type, regs) \ | 78 | #define PPC_WARN_EMULATED(type, regs) \ |
79 | do { \ | 79 | do { \ |
80 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, \ | 80 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, \ |
81 | 1, 0, regs, 0); \ | 81 | 1, regs, 0); \ |
82 | __PPC_WARN_EMULATED(type); \ | 82 | __PPC_WARN_EMULATED(type); \ |
83 | } while (0) | 83 | } while (0) |
84 | 84 | ||
85 | #define PPC_WARN_ALIGNMENT(type, regs) \ | 85 | #define PPC_WARN_ALIGNMENT(type, regs) \ |
86 | do { \ | 86 | do { \ |
87 | perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, \ | 87 | perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, \ |
88 | 1, 0, regs, regs->dar); \ | 88 | 1, regs, regs->dar); \ |
89 | __PPC_WARN_EMULATED(type); \ | 89 | __PPC_WARN_EMULATED(type); \ |
90 | } while (0) | 90 | } while (0) |
91 | 91 | ||
diff --git a/arch/powerpc/include/asm/hw_breakpoint.h b/arch/powerpc/include/asm/hw_breakpoint.h index 1c33ec17ca3..80fd4d2b4a6 100644 --- a/arch/powerpc/include/asm/hw_breakpoint.h +++ b/arch/powerpc/include/asm/hw_breakpoint.h | |||
@@ -57,7 +57,7 @@ void hw_breakpoint_pmu_read(struct perf_event *bp); | |||
57 | extern void flush_ptrace_hw_breakpoint(struct task_struct *tsk); | 57 | extern void flush_ptrace_hw_breakpoint(struct task_struct *tsk); |
58 | 58 | ||
59 | extern struct pmu perf_ops_bp; | 59 | extern struct pmu perf_ops_bp; |
60 | extern void ptrace_triggered(struct perf_event *bp, int nmi, | 60 | extern void ptrace_triggered(struct perf_event *bp, |
61 | struct perf_sample_data *data, struct pt_regs *regs); | 61 | struct perf_sample_data *data, struct pt_regs *regs); |
62 | static inline void hw_breakpoint_disable(void) | 62 | static inline void hw_breakpoint_disable(void) |
63 | { | 63 | { |
diff --git a/arch/powerpc/include/asm/mmzone.h b/arch/powerpc/include/asm/mmzone.h index fd3fd58bad8..7b589178be4 100644 --- a/arch/powerpc/include/asm/mmzone.h +++ b/arch/powerpc/include/asm/mmzone.h | |||
@@ -38,13 +38,6 @@ u64 memory_hotplug_max(void); | |||
38 | #define memory_hotplug_max() memblock_end_of_DRAM() | 38 | #define memory_hotplug_max() memblock_end_of_DRAM() |
39 | #endif | 39 | #endif |
40 | 40 | ||
41 | /* | ||
42 | * Following are macros that each numa implmentation must define. | ||
43 | */ | ||
44 | |||
45 | #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) | ||
46 | #define node_end_pfn(nid) (NODE_DATA(nid)->node_end_pfn) | ||
47 | |||
48 | #else | 41 | #else |
49 | #define memory_hotplug_max() memblock_end_of_DRAM() | 42 | #define memory_hotplug_max() memblock_end_of_DRAM() |
50 | #endif /* CONFIG_NEED_MULTIPLE_NODES */ | 43 | #endif /* CONFIG_NEED_MULTIPLE_NODES */ |
diff --git a/arch/powerpc/kernel/e500-pmu.c b/arch/powerpc/kernel/e500-pmu.c index b150b510510..cb2e2949c8d 100644 --- a/arch/powerpc/kernel/e500-pmu.c +++ b/arch/powerpc/kernel/e500-pmu.c | |||
@@ -75,6 +75,11 @@ static int e500_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { | |||
75 | [C(OP_WRITE)] = { -1, -1 }, | 75 | [C(OP_WRITE)] = { -1, -1 }, |
76 | [C(OP_PREFETCH)] = { -1, -1 }, | 76 | [C(OP_PREFETCH)] = { -1, -1 }, |
77 | }, | 77 | }, |
78 | [C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */ | ||
79 | [C(OP_READ)] = { -1, -1 }, | ||
80 | [C(OP_WRITE)] = { -1, -1 }, | ||
81 | [C(OP_PREFETCH)] = { -1, -1 }, | ||
82 | }, | ||
78 | }; | 83 | }; |
79 | 84 | ||
80 | static int num_events = 128; | 85 | static int num_events = 128; |
diff --git a/arch/powerpc/kernel/mpc7450-pmu.c b/arch/powerpc/kernel/mpc7450-pmu.c index 2cc5e0301d0..845a5847889 100644 --- a/arch/powerpc/kernel/mpc7450-pmu.c +++ b/arch/powerpc/kernel/mpc7450-pmu.c | |||
@@ -388,6 +388,11 @@ static int mpc7450_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { | |||
388 | [C(OP_WRITE)] = { -1, -1 }, | 388 | [C(OP_WRITE)] = { -1, -1 }, |
389 | [C(OP_PREFETCH)] = { -1, -1 }, | 389 | [C(OP_PREFETCH)] = { -1, -1 }, |
390 | }, | 390 | }, |
391 | [C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */ | ||
392 | [C(OP_READ)] = { -1, -1 }, | ||
393 | [C(OP_WRITE)] = { -1, -1 }, | ||
394 | [C(OP_PREFETCH)] = { -1, -1 }, | ||
395 | }, | ||
391 | }; | 396 | }; |
392 | 397 | ||
393 | struct power_pmu mpc7450_pmu = { | 398 | struct power_pmu mpc7450_pmu = { |
diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c index 822f63008ae..14967de9887 100644 --- a/arch/powerpc/kernel/perf_event.c +++ b/arch/powerpc/kernel/perf_event.c | |||
@@ -1207,7 +1207,7 @@ struct pmu power_pmu = { | |||
1207 | * here so there is no possibility of being interrupted. | 1207 | * here so there is no possibility of being interrupted. |
1208 | */ | 1208 | */ |
1209 | static void record_and_restart(struct perf_event *event, unsigned long val, | 1209 | static void record_and_restart(struct perf_event *event, unsigned long val, |
1210 | struct pt_regs *regs, int nmi) | 1210 | struct pt_regs *regs) |
1211 | { | 1211 | { |
1212 | u64 period = event->hw.sample_period; | 1212 | u64 period = event->hw.sample_period; |
1213 | s64 prev, delta, left; | 1213 | s64 prev, delta, left; |
@@ -1258,7 +1258,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val, | |||
1258 | if (event->attr.sample_type & PERF_SAMPLE_ADDR) | 1258 | if (event->attr.sample_type & PERF_SAMPLE_ADDR) |
1259 | perf_get_data_addr(regs, &data.addr); | 1259 | perf_get_data_addr(regs, &data.addr); |
1260 | 1260 | ||
1261 | if (perf_event_overflow(event, nmi, &data, regs)) | 1261 | if (perf_event_overflow(event, &data, regs)) |
1262 | power_pmu_stop(event, 0); | 1262 | power_pmu_stop(event, 0); |
1263 | } | 1263 | } |
1264 | } | 1264 | } |
@@ -1346,7 +1346,7 @@ static void perf_event_interrupt(struct pt_regs *regs) | |||
1346 | if ((int)val < 0) { | 1346 | if ((int)val < 0) { |
1347 | /* event has overflowed */ | 1347 | /* event has overflowed */ |
1348 | found = 1; | 1348 | found = 1; |
1349 | record_and_restart(event, val, regs, nmi); | 1349 | record_and_restart(event, val, regs); |
1350 | } | 1350 | } |
1351 | } | 1351 | } |
1352 | 1352 | ||
diff --git a/arch/powerpc/kernel/perf_event_fsl_emb.c b/arch/powerpc/kernel/perf_event_fsl_emb.c index b0dc8f7069c..0a6d2a9d569 100644 --- a/arch/powerpc/kernel/perf_event_fsl_emb.c +++ b/arch/powerpc/kernel/perf_event_fsl_emb.c | |||
@@ -568,7 +568,7 @@ static struct pmu fsl_emb_pmu = { | |||
568 | * here so there is no possibility of being interrupted. | 568 | * here so there is no possibility of being interrupted. |
569 | */ | 569 | */ |
570 | static void record_and_restart(struct perf_event *event, unsigned long val, | 570 | static void record_and_restart(struct perf_event *event, unsigned long val, |
571 | struct pt_regs *regs, int nmi) | 571 | struct pt_regs *regs) |
572 | { | 572 | { |
573 | u64 period = event->hw.sample_period; | 573 | u64 period = event->hw.sample_period; |
574 | s64 prev, delta, left; | 574 | s64 prev, delta, left; |
@@ -616,7 +616,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val, | |||
616 | perf_sample_data_init(&data, 0); | 616 | perf_sample_data_init(&data, 0); |
617 | data.period = event->hw.last_period; | 617 | data.period = event->hw.last_period; |
618 | 618 | ||
619 | if (perf_event_overflow(event, nmi, &data, regs)) | 619 | if (perf_event_overflow(event, &data, regs)) |
620 | fsl_emb_pmu_stop(event, 0); | 620 | fsl_emb_pmu_stop(event, 0); |
621 | } | 621 | } |
622 | } | 622 | } |
@@ -644,7 +644,7 @@ static void perf_event_interrupt(struct pt_regs *regs) | |||
644 | if (event) { | 644 | if (event) { |
645 | /* event has overflowed */ | 645 | /* event has overflowed */ |
646 | found = 1; | 646 | found = 1; |
647 | record_and_restart(event, val, regs, nmi); | 647 | record_and_restart(event, val, regs); |
648 | } else { | 648 | } else { |
649 | /* | 649 | /* |
650 | * Disabled counter is negative, | 650 | * Disabled counter is negative, |
diff --git a/arch/powerpc/kernel/power4-pmu.c b/arch/powerpc/kernel/power4-pmu.c index ead8b3c2649..e9dbc2d35c9 100644 --- a/arch/powerpc/kernel/power4-pmu.c +++ b/arch/powerpc/kernel/power4-pmu.c | |||
@@ -587,6 +587,11 @@ static int power4_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { | |||
587 | [C(OP_WRITE)] = { -1, -1 }, | 587 | [C(OP_WRITE)] = { -1, -1 }, |
588 | [C(OP_PREFETCH)] = { -1, -1 }, | 588 | [C(OP_PREFETCH)] = { -1, -1 }, |
589 | }, | 589 | }, |
590 | [C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */ | ||
591 | [C(OP_READ)] = { -1, -1 }, | ||
592 | [C(OP_WRITE)] = { -1, -1 }, | ||
593 | [C(OP_PREFETCH)] = { -1, -1 }, | ||
594 | }, | ||
590 | }; | 595 | }; |
591 | 596 | ||
592 | static struct power_pmu power4_pmu = { | 597 | static struct power_pmu power4_pmu = { |
diff --git a/arch/powerpc/kernel/power5+-pmu.c b/arch/powerpc/kernel/power5+-pmu.c index eca0ac595cb..f58a2bd41b5 100644 --- a/arch/powerpc/kernel/power5+-pmu.c +++ b/arch/powerpc/kernel/power5+-pmu.c | |||
@@ -653,6 +653,11 @@ static int power5p_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { | |||
653 | [C(OP_WRITE)] = { -1, -1 }, | 653 | [C(OP_WRITE)] = { -1, -1 }, |
654 | [C(OP_PREFETCH)] = { -1, -1 }, | 654 | [C(OP_PREFETCH)] = { -1, -1 }, |
655 | }, | 655 | }, |
656 | [C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */ | ||
657 | [C(OP_READ)] = { -1, -1 }, | ||
658 | [C(OP_WRITE)] = { -1, -1 }, | ||
659 | [C(OP_PREFETCH)] = { -1, -1 }, | ||
660 | }, | ||
656 | }; | 661 | }; |
657 | 662 | ||
658 | static struct power_pmu power5p_pmu = { | 663 | static struct power_pmu power5p_pmu = { |
diff --git a/arch/powerpc/kernel/power5-pmu.c b/arch/powerpc/kernel/power5-pmu.c index d5ff0f64a5e..b1acab68414 100644 --- a/arch/powerpc/kernel/power5-pmu.c +++ b/arch/powerpc/kernel/power5-pmu.c | |||
@@ -595,6 +595,11 @@ static int power5_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { | |||
595 | [C(OP_WRITE)] = { -1, -1 }, | 595 | [C(OP_WRITE)] = { -1, -1 }, |
596 | [C(OP_PREFETCH)] = { -1, -1 }, | 596 | [C(OP_PREFETCH)] = { -1, -1 }, |
597 | }, | 597 | }, |
598 | [C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */ | ||
599 | [C(OP_READ)] = { -1, -1 }, | ||
600 | [C(OP_WRITE)] = { -1, -1 }, | ||
601 | [C(OP_PREFETCH)] = { -1, -1 }, | ||
602 | }, | ||
598 | }; | 603 | }; |
599 | 604 | ||
600 | static struct power_pmu power5_pmu = { | 605 | static struct power_pmu power5_pmu = { |
diff --git a/arch/powerpc/kernel/power6-pmu.c b/arch/powerpc/kernel/power6-pmu.c index 31603927e37..b24a3a23d07 100644 --- a/arch/powerpc/kernel/power6-pmu.c +++ b/arch/powerpc/kernel/power6-pmu.c | |||
@@ -516,6 +516,11 @@ static int power6_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { | |||
516 | [C(OP_WRITE)] = { -1, -1 }, | 516 | [C(OP_WRITE)] = { -1, -1 }, |
517 | [C(OP_PREFETCH)] = { -1, -1 }, | 517 | [C(OP_PREFETCH)] = { -1, -1 }, |
518 | }, | 518 | }, |
519 | [C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */ | ||
520 | [C(OP_READ)] = { -1, -1 }, | ||
521 | [C(OP_WRITE)] = { -1, -1 }, | ||
522 | [C(OP_PREFETCH)] = { -1, -1 }, | ||
523 | }, | ||
519 | }; | 524 | }; |
520 | 525 | ||
521 | static struct power_pmu power6_pmu = { | 526 | static struct power_pmu power6_pmu = { |
diff --git a/arch/powerpc/kernel/power7-pmu.c b/arch/powerpc/kernel/power7-pmu.c index 593740fcb79..6d9dccb2ea5 100644 --- a/arch/powerpc/kernel/power7-pmu.c +++ b/arch/powerpc/kernel/power7-pmu.c | |||
@@ -342,6 +342,11 @@ static int power7_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { | |||
342 | [C(OP_WRITE)] = { -1, -1 }, | 342 | [C(OP_WRITE)] = { -1, -1 }, |
343 | [C(OP_PREFETCH)] = { -1, -1 }, | 343 | [C(OP_PREFETCH)] = { -1, -1 }, |
344 | }, | 344 | }, |
345 | [C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */ | ||
346 | [C(OP_READ)] = { -1, -1 }, | ||
347 | [C(OP_WRITE)] = { -1, -1 }, | ||
348 | [C(OP_PREFETCH)] = { -1, -1 }, | ||
349 | }, | ||
345 | }; | 350 | }; |
346 | 351 | ||
347 | static struct power_pmu power7_pmu = { | 352 | static struct power_pmu power7_pmu = { |
diff --git a/arch/powerpc/kernel/ppc970-pmu.c b/arch/powerpc/kernel/ppc970-pmu.c index 9a6e093858f..b121de9658e 100644 --- a/arch/powerpc/kernel/ppc970-pmu.c +++ b/arch/powerpc/kernel/ppc970-pmu.c | |||
@@ -467,6 +467,11 @@ static int ppc970_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { | |||
467 | [C(OP_WRITE)] = { -1, -1 }, | 467 | [C(OP_WRITE)] = { -1, -1 }, |
468 | [C(OP_PREFETCH)] = { -1, -1 }, | 468 | [C(OP_PREFETCH)] = { -1, -1 }, |
469 | }, | 469 | }, |
470 | [C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */ | ||
471 | [C(OP_READ)] = { -1, -1 }, | ||
472 | [C(OP_WRITE)] = { -1, -1 }, | ||
473 | [C(OP_PREFETCH)] = { -1, -1 }, | ||
474 | }, | ||
470 | }; | 475 | }; |
471 | 476 | ||
472 | static struct power_pmu ppc970_pmu = { | 477 | static struct power_pmu ppc970_pmu = { |
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index cb22024f2b4..05b7dd217f6 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c | |||
@@ -882,7 +882,7 @@ void user_disable_single_step(struct task_struct *task) | |||
882 | } | 882 | } |
883 | 883 | ||
884 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | 884 | #ifdef CONFIG_HAVE_HW_BREAKPOINT |
885 | void ptrace_triggered(struct perf_event *bp, int nmi, | 885 | void ptrace_triggered(struct perf_event *bp, |
886 | struct perf_sample_data *data, struct pt_regs *regs) | 886 | struct perf_sample_data *data, struct pt_regs *regs) |
887 | { | 887 | { |
888 | struct perf_event_attr attr; | 888 | struct perf_event_attr attr; |
@@ -973,7 +973,7 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, | |||
973 | &attr.bp_type); | 973 | &attr.bp_type); |
974 | 974 | ||
975 | thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr, | 975 | thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr, |
976 | ptrace_triggered, task); | 976 | ptrace_triggered, NULL, task); |
977 | if (IS_ERR(bp)) { | 977 | if (IS_ERR(bp)) { |
978 | thread->ptrace_bps[0] = NULL; | 978 | thread->ptrace_bps[0] = NULL; |
979 | ptrace_put_breakpoints(task); | 979 | ptrace_put_breakpoints(task); |
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index f33acfd872a..03b29a6759a 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c | |||
@@ -544,7 +544,7 @@ DEFINE_PER_CPU(u8, irq_work_pending); | |||
544 | 544 | ||
545 | #endif /* 32 vs 64 bit */ | 545 | #endif /* 32 vs 64 bit */ |
546 | 546 | ||
547 | void set_irq_work_pending(void) | 547 | void arch_irq_work_raise(void) |
548 | { | 548 | { |
549 | preempt_disable(); | 549 | preempt_disable(); |
550 | set_irq_work_pending_flag(); | 550 | set_irq_work_pending_flag(); |
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 54f4fb994e9..dbc48254c6c 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c | |||
@@ -173,7 +173,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, | |||
173 | die("Weird page fault", regs, SIGSEGV); | 173 | die("Weird page fault", regs, SIGSEGV); |
174 | } | 174 | } |
175 | 175 | ||
176 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); | 176 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); |
177 | 177 | ||
178 | /* When running in the kernel we expect faults to occur only to | 178 | /* When running in the kernel we expect faults to occur only to |
179 | * addresses in user space. All other faults represent errors in the | 179 | * addresses in user space. All other faults represent errors in the |
@@ -319,7 +319,7 @@ good_area: | |||
319 | } | 319 | } |
320 | if (ret & VM_FAULT_MAJOR) { | 320 | if (ret & VM_FAULT_MAJOR) { |
321 | current->maj_flt++; | 321 | current->maj_flt++; |
322 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, | 322 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, |
323 | regs, address); | 323 | regs, address); |
324 | #ifdef CONFIG_PPC_SMLPAR | 324 | #ifdef CONFIG_PPC_SMLPAR |
325 | if (firmware_has_feature(FW_FEATURE_CMO)) { | 325 | if (firmware_has_feature(FW_FEATURE_CMO)) { |
@@ -330,7 +330,7 @@ good_area: | |||
330 | #endif | 330 | #endif |
331 | } else { | 331 | } else { |
332 | current->min_flt++; | 332 | current->min_flt++; |
333 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, | 333 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, |
334 | regs, address); | 334 | regs, address); |
335 | } | 335 | } |
336 | up_read(&mm->mmap_sem); | 336 | up_read(&mm->mmap_sem); |
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 90d77bd078f..c03fef7a9c2 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig | |||
@@ -579,6 +579,7 @@ config S390_GUEST | |||
579 | def_bool y | 579 | def_bool y |
580 | prompt "s390 guest support for KVM (EXPERIMENTAL)" | 580 | prompt "s390 guest support for KVM (EXPERIMENTAL)" |
581 | depends on 64BIT && EXPERIMENTAL | 581 | depends on 64BIT && EXPERIMENTAL |
582 | select VIRTUALIZATION | ||
582 | select VIRTIO | 583 | select VIRTIO |
583 | select VIRTIO_RING | 584 | select VIRTIO_RING |
584 | select VIRTIO_CONSOLE | 585 | select VIRTIO_CONSOLE |
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 52420d2785b..1d55c95f617 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c | |||
@@ -262,7 +262,7 @@ void smp_ctl_set_bit(int cr, int bit) | |||
262 | 262 | ||
263 | memset(&parms.orvals, 0, sizeof(parms.orvals)); | 263 | memset(&parms.orvals, 0, sizeof(parms.orvals)); |
264 | memset(&parms.andvals, 0xff, sizeof(parms.andvals)); | 264 | memset(&parms.andvals, 0xff, sizeof(parms.andvals)); |
265 | parms.orvals[cr] = 1 << bit; | 265 | parms.orvals[cr] = 1UL << bit; |
266 | on_each_cpu(smp_ctl_bit_callback, &parms, 1); | 266 | on_each_cpu(smp_ctl_bit_callback, &parms, 1); |
267 | } | 267 | } |
268 | EXPORT_SYMBOL(smp_ctl_set_bit); | 268 | EXPORT_SYMBOL(smp_ctl_set_bit); |
@@ -276,7 +276,7 @@ void smp_ctl_clear_bit(int cr, int bit) | |||
276 | 276 | ||
277 | memset(&parms.orvals, 0, sizeof(parms.orvals)); | 277 | memset(&parms.orvals, 0, sizeof(parms.orvals)); |
278 | memset(&parms.andvals, 0xff, sizeof(parms.andvals)); | 278 | memset(&parms.andvals, 0xff, sizeof(parms.andvals)); |
279 | parms.andvals[cr] = ~(1L << bit); | 279 | parms.andvals[cr] = ~(1UL << bit); |
280 | on_each_cpu(smp_ctl_bit_callback, &parms, 1); | 280 | on_each_cpu(smp_ctl_bit_callback, &parms, 1); |
281 | } | 281 | } |
282 | EXPORT_SYMBOL(smp_ctl_clear_bit); | 282 | EXPORT_SYMBOL(smp_ctl_clear_bit); |
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index fe103e891e7..095f782a551 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c | |||
@@ -299,7 +299,7 @@ static inline int do_exception(struct pt_regs *regs, int access, | |||
299 | goto out; | 299 | goto out; |
300 | 300 | ||
301 | address = trans_exc_code & __FAIL_ADDR_MASK; | 301 | address = trans_exc_code & __FAIL_ADDR_MASK; |
302 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); | 302 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); |
303 | flags = FAULT_FLAG_ALLOW_RETRY; | 303 | flags = FAULT_FLAG_ALLOW_RETRY; |
304 | if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400) | 304 | if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400) |
305 | flags |= FAULT_FLAG_WRITE; | 305 | flags |= FAULT_FLAG_WRITE; |
@@ -345,11 +345,11 @@ retry: | |||
345 | if (flags & FAULT_FLAG_ALLOW_RETRY) { | 345 | if (flags & FAULT_FLAG_ALLOW_RETRY) { |
346 | if (fault & VM_FAULT_MAJOR) { | 346 | if (fault & VM_FAULT_MAJOR) { |
347 | tsk->maj_flt++; | 347 | tsk->maj_flt++; |
348 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, | 348 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, |
349 | regs, address); | 349 | regs, address); |
350 | } else { | 350 | } else { |
351 | tsk->min_flt++; | 351 | tsk->min_flt++; |
352 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, | 352 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, |
353 | regs, address); | 353 | regs, address); |
354 | } | 354 | } |
355 | if (fault & VM_FAULT_RETRY) { | 355 | if (fault & VM_FAULT_RETRY) { |
diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c index 5995e9bc72d..0e358c2cffe 100644 --- a/arch/s390/oprofile/init.c +++ b/arch/s390/oprofile/init.c | |||
@@ -25,7 +25,7 @@ extern void s390_backtrace(struct pt_regs * const regs, unsigned int depth); | |||
25 | 25 | ||
26 | #include "hwsampler.h" | 26 | #include "hwsampler.h" |
27 | 27 | ||
28 | #define DEFAULT_INTERVAL 4096 | 28 | #define DEFAULT_INTERVAL 4127518 |
29 | 29 | ||
30 | #define DEFAULT_SDBT_BLOCKS 1 | 30 | #define DEFAULT_SDBT_BLOCKS 1 |
31 | #define DEFAULT_SDB_BLOCKS 511 | 31 | #define DEFAULT_SDB_BLOCKS 511 |
@@ -151,6 +151,12 @@ static int oprofile_hwsampler_init(struct oprofile_operations *ops) | |||
151 | if (oprofile_max_interval == 0) | 151 | if (oprofile_max_interval == 0) |
152 | return -ENODEV; | 152 | return -ENODEV; |
153 | 153 | ||
154 | /* The initial value should be sane */ | ||
155 | if (oprofile_hw_interval < oprofile_min_interval) | ||
156 | oprofile_hw_interval = oprofile_min_interval; | ||
157 | if (oprofile_hw_interval > oprofile_max_interval) | ||
158 | oprofile_hw_interval = oprofile_max_interval; | ||
159 | |||
154 | if (oprofile_timer_init(ops)) | 160 | if (oprofile_timer_init(ops)) |
155 | return -ENODEV; | 161 | return -ENODEV; |
156 | 162 | ||
diff --git a/arch/sh/include/asm/mmzone.h b/arch/sh/include/asm/mmzone.h index 8887baff5ef..15a8496960e 100644 --- a/arch/sh/include/asm/mmzone.h +++ b/arch/sh/include/asm/mmzone.h | |||
@@ -9,10 +9,6 @@ | |||
9 | extern struct pglist_data *node_data[]; | 9 | extern struct pglist_data *node_data[]; |
10 | #define NODE_DATA(nid) (node_data[nid]) | 10 | #define NODE_DATA(nid) (node_data[nid]) |
11 | 11 | ||
12 | #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) | ||
13 | #define node_end_pfn(nid) (NODE_DATA(nid)->node_start_pfn + \ | ||
14 | NODE_DATA(nid)->node_spanned_pages) | ||
15 | |||
16 | static inline int pfn_to_nid(unsigned long pfn) | 12 | static inline int pfn_to_nid(unsigned long pfn) |
17 | { | 13 | { |
18 | int nid; | 14 | int nid; |
diff --git a/arch/sh/kernel/cpu/sh4/perf_event.c b/arch/sh/kernel/cpu/sh4/perf_event.c index 748955df018..fa4f724b295 100644 --- a/arch/sh/kernel/cpu/sh4/perf_event.c +++ b/arch/sh/kernel/cpu/sh4/perf_event.c | |||
@@ -180,6 +180,21 @@ static const int sh7750_cache_events | |||
180 | [ C(RESULT_MISS) ] = -1, | 180 | [ C(RESULT_MISS) ] = -1, |
181 | }, | 181 | }, |
182 | }, | 182 | }, |
183 | |||
184 | [ C(NODE) ] = { | ||
185 | [ C(OP_READ) ] = { | ||
186 | [ C(RESULT_ACCESS) ] = -1, | ||
187 | [ C(RESULT_MISS) ] = -1, | ||
188 | }, | ||
189 | [ C(OP_WRITE) ] = { | ||
190 | [ C(RESULT_ACCESS) ] = -1, | ||
191 | [ C(RESULT_MISS) ] = -1, | ||
192 | }, | ||
193 | [ C(OP_PREFETCH) ] = { | ||
194 | [ C(RESULT_ACCESS) ] = -1, | ||
195 | [ C(RESULT_MISS) ] = -1, | ||
196 | }, | ||
197 | }, | ||
183 | }; | 198 | }; |
184 | 199 | ||
185 | static int sh7750_event_map(int event) | 200 | static int sh7750_event_map(int event) |
diff --git a/arch/sh/kernel/cpu/sh4a/perf_event.c b/arch/sh/kernel/cpu/sh4a/perf_event.c index 17e6bebfede..84a2c396cee 100644 --- a/arch/sh/kernel/cpu/sh4a/perf_event.c +++ b/arch/sh/kernel/cpu/sh4a/perf_event.c | |||
@@ -205,6 +205,21 @@ static const int sh4a_cache_events | |||
205 | [ C(RESULT_MISS) ] = -1, | 205 | [ C(RESULT_MISS) ] = -1, |
206 | }, | 206 | }, |
207 | }, | 207 | }, |
208 | |||
209 | [ C(NODE) ] = { | ||
210 | [ C(OP_READ) ] = { | ||
211 | [ C(RESULT_ACCESS) ] = -1, | ||
212 | [ C(RESULT_MISS) ] = -1, | ||
213 | }, | ||
214 | [ C(OP_WRITE) ] = { | ||
215 | [ C(RESULT_ACCESS) ] = -1, | ||
216 | [ C(RESULT_MISS) ] = -1, | ||
217 | }, | ||
218 | [ C(OP_PREFETCH) ] = { | ||
219 | [ C(RESULT_ACCESS) ] = -1, | ||
220 | [ C(RESULT_MISS) ] = -1, | ||
221 | }, | ||
222 | }, | ||
208 | }; | 223 | }; |
209 | 224 | ||
210 | static int sh4a_event_map(int event) | 225 | static int sh4a_event_map(int event) |
diff --git a/arch/sh/kernel/ptrace_32.c b/arch/sh/kernel/ptrace_32.c index 3d7b209b217..92b3c276339 100644 --- a/arch/sh/kernel/ptrace_32.c +++ b/arch/sh/kernel/ptrace_32.c | |||
@@ -63,7 +63,7 @@ static inline int put_stack_long(struct task_struct *task, int offset, | |||
63 | return 0; | 63 | return 0; |
64 | } | 64 | } |
65 | 65 | ||
66 | void ptrace_triggered(struct perf_event *bp, int nmi, | 66 | void ptrace_triggered(struct perf_event *bp, |
67 | struct perf_sample_data *data, struct pt_regs *regs) | 67 | struct perf_sample_data *data, struct pt_regs *regs) |
68 | { | 68 | { |
69 | struct perf_event_attr attr; | 69 | struct perf_event_attr attr; |
@@ -91,7 +91,8 @@ static int set_single_step(struct task_struct *tsk, unsigned long addr) | |||
91 | attr.bp_len = HW_BREAKPOINT_LEN_2; | 91 | attr.bp_len = HW_BREAKPOINT_LEN_2; |
92 | attr.bp_type = HW_BREAKPOINT_R; | 92 | attr.bp_type = HW_BREAKPOINT_R; |
93 | 93 | ||
94 | bp = register_user_hw_breakpoint(&attr, ptrace_triggered, tsk); | 94 | bp = register_user_hw_breakpoint(&attr, ptrace_triggered, |
95 | NULL, tsk); | ||
95 | if (IS_ERR(bp)) | 96 | if (IS_ERR(bp)) |
96 | return PTR_ERR(bp); | 97 | return PTR_ERR(bp); |
97 | 98 | ||
diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c index b51a17104b5..d9006f8ffc1 100644 --- a/arch/sh/kernel/traps_32.c +++ b/arch/sh/kernel/traps_32.c | |||
@@ -393,7 +393,7 @@ int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs, | |||
393 | */ | 393 | */ |
394 | if (!expected) { | 394 | if (!expected) { |
395 | unaligned_fixups_notify(current, instruction, regs); | 395 | unaligned_fixups_notify(current, instruction, regs); |
396 | perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, | 396 | perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, |
397 | regs, address); | 397 | regs, address); |
398 | } | 398 | } |
399 | 399 | ||
diff --git a/arch/sh/kernel/traps_64.c b/arch/sh/kernel/traps_64.c index 6713ca97e55..67110be83fd 100644 --- a/arch/sh/kernel/traps_64.c +++ b/arch/sh/kernel/traps_64.c | |||
@@ -434,7 +434,7 @@ static int misaligned_load(struct pt_regs *regs, | |||
434 | return error; | 434 | return error; |
435 | } | 435 | } |
436 | 436 | ||
437 | perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, address); | 437 | perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, address); |
438 | 438 | ||
439 | destreg = (opcode >> 4) & 0x3f; | 439 | destreg = (opcode >> 4) & 0x3f; |
440 | if (user_mode(regs)) { | 440 | if (user_mode(regs)) { |
@@ -512,7 +512,7 @@ static int misaligned_store(struct pt_regs *regs, | |||
512 | return error; | 512 | return error; |
513 | } | 513 | } |
514 | 514 | ||
515 | perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, address); | 515 | perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, address); |
516 | 516 | ||
517 | srcreg = (opcode >> 4) & 0x3f; | 517 | srcreg = (opcode >> 4) & 0x3f; |
518 | if (user_mode(regs)) { | 518 | if (user_mode(regs)) { |
@@ -588,7 +588,7 @@ static int misaligned_fpu_load(struct pt_regs *regs, | |||
588 | return error; | 588 | return error; |
589 | } | 589 | } |
590 | 590 | ||
591 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, address); | 591 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, address); |
592 | 592 | ||
593 | destreg = (opcode >> 4) & 0x3f; | 593 | destreg = (opcode >> 4) & 0x3f; |
594 | if (user_mode(regs)) { | 594 | if (user_mode(regs)) { |
@@ -665,7 +665,7 @@ static int misaligned_fpu_store(struct pt_regs *regs, | |||
665 | return error; | 665 | return error; |
666 | } | 666 | } |
667 | 667 | ||
668 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, address); | 668 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, address); |
669 | 669 | ||
670 | srcreg = (opcode >> 4) & 0x3f; | 670 | srcreg = (opcode >> 4) & 0x3f; |
671 | if (user_mode(regs)) { | 671 | if (user_mode(regs)) { |
diff --git a/arch/sh/math-emu/math.c b/arch/sh/math-emu/math.c index f76a5090d5d..97719521065 100644 --- a/arch/sh/math-emu/math.c +++ b/arch/sh/math-emu/math.c | |||
@@ -620,7 +620,7 @@ int do_fpu_inst(unsigned short inst, struct pt_regs *regs) | |||
620 | struct task_struct *tsk = current; | 620 | struct task_struct *tsk = current; |
621 | struct sh_fpu_soft_struct *fpu = &(tsk->thread.xstate->softfpu); | 621 | struct sh_fpu_soft_struct *fpu = &(tsk->thread.xstate->softfpu); |
622 | 622 | ||
623 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); | 623 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); |
624 | 624 | ||
625 | if (!(task_thread_info(tsk)->status & TS_USEDFPU)) { | 625 | if (!(task_thread_info(tsk)->status & TS_USEDFPU)) { |
626 | /* initialize once. */ | 626 | /* initialize once. */ |
diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c index d4c34d757f0..7bebd044f2a 100644 --- a/arch/sh/mm/fault_32.c +++ b/arch/sh/mm/fault_32.c | |||
@@ -160,7 +160,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, | |||
160 | if ((regs->sr & SR_IMASK) != SR_IMASK) | 160 | if ((regs->sr & SR_IMASK) != SR_IMASK) |
161 | local_irq_enable(); | 161 | local_irq_enable(); |
162 | 162 | ||
163 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); | 163 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); |
164 | 164 | ||
165 | /* | 165 | /* |
166 | * If we're in an interrupt, have no user context or are running | 166 | * If we're in an interrupt, have no user context or are running |
@@ -210,11 +210,11 @@ good_area: | |||
210 | } | 210 | } |
211 | if (fault & VM_FAULT_MAJOR) { | 211 | if (fault & VM_FAULT_MAJOR) { |
212 | tsk->maj_flt++; | 212 | tsk->maj_flt++; |
213 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, | 213 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, |
214 | regs, address); | 214 | regs, address); |
215 | } else { | 215 | } else { |
216 | tsk->min_flt++; | 216 | tsk->min_flt++; |
217 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, | 217 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, |
218 | regs, address); | 218 | regs, address); |
219 | } | 219 | } |
220 | 220 | ||
diff --git a/arch/sh/mm/tlbflush_64.c b/arch/sh/mm/tlbflush_64.c index 7f5810f5dfd..e3430e093d4 100644 --- a/arch/sh/mm/tlbflush_64.c +++ b/arch/sh/mm/tlbflush_64.c | |||
@@ -116,7 +116,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess, | |||
116 | /* Not an IO address, so reenable interrupts */ | 116 | /* Not an IO address, so reenable interrupts */ |
117 | local_irq_enable(); | 117 | local_irq_enable(); |
118 | 118 | ||
119 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); | 119 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); |
120 | 120 | ||
121 | /* | 121 | /* |
122 | * If we're in an interrupt or have no user | 122 | * If we're in an interrupt or have no user |
@@ -200,11 +200,11 @@ good_area: | |||
200 | 200 | ||
201 | if (fault & VM_FAULT_MAJOR) { | 201 | if (fault & VM_FAULT_MAJOR) { |
202 | tsk->maj_flt++; | 202 | tsk->maj_flt++; |
203 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, | 203 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, |
204 | regs, address); | 204 | regs, address); |
205 | } else { | 205 | } else { |
206 | tsk->min_flt++; | 206 | tsk->min_flt++; |
207 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, | 207 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, |
208 | regs, address); | 208 | regs, address); |
209 | } | 209 | } |
210 | 210 | ||
diff --git a/arch/sparc/include/asm/mmzone.h b/arch/sparc/include/asm/mmzone.h index e8c648741ed..99d9b9f577b 100644 --- a/arch/sparc/include/asm/mmzone.h +++ b/arch/sparc/include/asm/mmzone.h | |||
@@ -8,8 +8,6 @@ | |||
8 | extern struct pglist_data *node_data[]; | 8 | extern struct pglist_data *node_data[]; |
9 | 9 | ||
10 | #define NODE_DATA(nid) (node_data[nid]) | 10 | #define NODE_DATA(nid) (node_data[nid]) |
11 | #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) | ||
12 | #define node_end_pfn(nid) (NODE_DATA(nid)->node_end_pfn) | ||
13 | 11 | ||
14 | extern int numa_cpu_lookup_table[]; | 12 | extern int numa_cpu_lookup_table[]; |
15 | extern cpumask_t numa_cpumask_lookup_table[]; | 13 | extern cpumask_t numa_cpumask_lookup_table[]; |
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index 2cb0e1c001e..62a034318b1 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c | |||
@@ -246,6 +246,20 @@ static const cache_map_t ultra3_cache_map = { | |||
246 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | 246 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, |
247 | }, | 247 | }, |
248 | }, | 248 | }, |
249 | [C(NODE)] = { | ||
250 | [C(OP_READ)] = { | ||
251 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | ||
252 | [C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | ||
253 | }, | ||
254 | [ C(OP_WRITE) ] = { | ||
255 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | ||
256 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | ||
257 | }, | ||
258 | [ C(OP_PREFETCH) ] = { | ||
259 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | ||
260 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | ||
261 | }, | ||
262 | }, | ||
249 | }; | 263 | }; |
250 | 264 | ||
251 | static const struct sparc_pmu ultra3_pmu = { | 265 | static const struct sparc_pmu ultra3_pmu = { |
@@ -361,6 +375,20 @@ static const cache_map_t niagara1_cache_map = { | |||
361 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | 375 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, |
362 | }, | 376 | }, |
363 | }, | 377 | }, |
378 | [C(NODE)] = { | ||
379 | [C(OP_READ)] = { | ||
380 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | ||
381 | [C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | ||
382 | }, | ||
383 | [ C(OP_WRITE) ] = { | ||
384 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | ||
385 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | ||
386 | }, | ||
387 | [ C(OP_PREFETCH) ] = { | ||
388 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | ||
389 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | ||
390 | }, | ||
391 | }, | ||
364 | }; | 392 | }; |
365 | 393 | ||
366 | static const struct sparc_pmu niagara1_pmu = { | 394 | static const struct sparc_pmu niagara1_pmu = { |
@@ -473,6 +501,20 @@ static const cache_map_t niagara2_cache_map = { | |||
473 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | 501 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, |
474 | }, | 502 | }, |
475 | }, | 503 | }, |
504 | [C(NODE)] = { | ||
505 | [C(OP_READ)] = { | ||
506 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | ||
507 | [C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | ||
508 | }, | ||
509 | [ C(OP_WRITE) ] = { | ||
510 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | ||
511 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | ||
512 | }, | ||
513 | [ C(OP_PREFETCH) ] = { | ||
514 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | ||
515 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | ||
516 | }, | ||
517 | }, | ||
476 | }; | 518 | }; |
477 | 519 | ||
478 | static const struct sparc_pmu niagara2_pmu = { | 520 | static const struct sparc_pmu niagara2_pmu = { |
@@ -1277,7 +1319,7 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self, | |||
1277 | if (!sparc_perf_event_set_period(event, hwc, idx)) | 1319 | if (!sparc_perf_event_set_period(event, hwc, idx)) |
1278 | continue; | 1320 | continue; |
1279 | 1321 | ||
1280 | if (perf_event_overflow(event, 1, &data, regs)) | 1322 | if (perf_event_overflow(event, &data, regs)) |
1281 | sparc_pmu_stop(event, 0); | 1323 | sparc_pmu_stop(event, 0); |
1282 | } | 1324 | } |
1283 | 1325 | ||
diff --git a/arch/sparc/kernel/unaligned_32.c b/arch/sparc/kernel/unaligned_32.c index 4491f4cb269..7efbb2f9e77 100644 --- a/arch/sparc/kernel/unaligned_32.c +++ b/arch/sparc/kernel/unaligned_32.c | |||
@@ -247,7 +247,7 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn) | |||
247 | unsigned long addr = compute_effective_address(regs, insn); | 247 | unsigned long addr = compute_effective_address(regs, insn); |
248 | int err; | 248 | int err; |
249 | 249 | ||
250 | perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, addr); | 250 | perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr); |
251 | switch (dir) { | 251 | switch (dir) { |
252 | case load: | 252 | case load: |
253 | err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f), | 253 | err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f), |
@@ -338,7 +338,7 @@ asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn) | |||
338 | } | 338 | } |
339 | 339 | ||
340 | addr = compute_effective_address(regs, insn); | 340 | addr = compute_effective_address(regs, insn); |
341 | perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, addr); | 341 | perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr); |
342 | switch(dir) { | 342 | switch(dir) { |
343 | case load: | 343 | case load: |
344 | err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f), | 344 | err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f), |
diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c index b2b019ea8ca..35cff1673aa 100644 --- a/arch/sparc/kernel/unaligned_64.c +++ b/arch/sparc/kernel/unaligned_64.c | |||
@@ -317,7 +317,7 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn) | |||
317 | 317 | ||
318 | addr = compute_effective_address(regs, insn, | 318 | addr = compute_effective_address(regs, insn, |
319 | ((insn >> 25) & 0x1f)); | 319 | ((insn >> 25) & 0x1f)); |
320 | perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, addr); | 320 | perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr); |
321 | switch (asi) { | 321 | switch (asi) { |
322 | case ASI_NL: | 322 | case ASI_NL: |
323 | case ASI_AIUPL: | 323 | case ASI_AIUPL: |
@@ -384,7 +384,7 @@ int handle_popc(u32 insn, struct pt_regs *regs) | |||
384 | int ret, i, rd = ((insn >> 25) & 0x1f); | 384 | int ret, i, rd = ((insn >> 25) & 0x1f); |
385 | int from_kernel = (regs->tstate & TSTATE_PRIV) != 0; | 385 | int from_kernel = (regs->tstate & TSTATE_PRIV) != 0; |
386 | 386 | ||
387 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); | 387 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); |
388 | if (insn & 0x2000) { | 388 | if (insn & 0x2000) { |
389 | maybe_flush_windows(0, 0, rd, from_kernel); | 389 | maybe_flush_windows(0, 0, rd, from_kernel); |
390 | value = sign_extend_imm13(insn); | 390 | value = sign_extend_imm13(insn); |
@@ -431,7 +431,7 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs) | |||
431 | int asi = decode_asi(insn, regs); | 431 | int asi = decode_asi(insn, regs); |
432 | int flag = (freg < 32) ? FPRS_DL : FPRS_DU; | 432 | int flag = (freg < 32) ? FPRS_DL : FPRS_DU; |
433 | 433 | ||
434 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); | 434 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); |
435 | 435 | ||
436 | save_and_clear_fpu(); | 436 | save_and_clear_fpu(); |
437 | current_thread_info()->xfsr[0] &= ~0x1c000; | 437 | current_thread_info()->xfsr[0] &= ~0x1c000; |
@@ -554,7 +554,7 @@ void handle_ld_nf(u32 insn, struct pt_regs *regs) | |||
554 | int from_kernel = (regs->tstate & TSTATE_PRIV) != 0; | 554 | int from_kernel = (regs->tstate & TSTATE_PRIV) != 0; |
555 | unsigned long *reg; | 555 | unsigned long *reg; |
556 | 556 | ||
557 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); | 557 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); |
558 | 558 | ||
559 | maybe_flush_windows(0, 0, rd, from_kernel); | 559 | maybe_flush_windows(0, 0, rd, from_kernel); |
560 | reg = fetch_reg_addr(rd, regs); | 560 | reg = fetch_reg_addr(rd, regs); |
@@ -586,7 +586,7 @@ void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr | |||
586 | 586 | ||
587 | if (tstate & TSTATE_PRIV) | 587 | if (tstate & TSTATE_PRIV) |
588 | die_if_kernel("lddfmna from kernel", regs); | 588 | die_if_kernel("lddfmna from kernel", regs); |
589 | perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, sfar); | 589 | perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, sfar); |
590 | if (test_thread_flag(TIF_32BIT)) | 590 | if (test_thread_flag(TIF_32BIT)) |
591 | pc = (u32)pc; | 591 | pc = (u32)pc; |
592 | if (get_user(insn, (u32 __user *) pc) != -EFAULT) { | 592 | if (get_user(insn, (u32 __user *) pc) != -EFAULT) { |
@@ -647,7 +647,7 @@ void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr | |||
647 | 647 | ||
648 | if (tstate & TSTATE_PRIV) | 648 | if (tstate & TSTATE_PRIV) |
649 | die_if_kernel("stdfmna from kernel", regs); | 649 | die_if_kernel("stdfmna from kernel", regs); |
650 | perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, sfar); | 650 | perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, sfar); |
651 | if (test_thread_flag(TIF_32BIT)) | 651 | if (test_thread_flag(TIF_32BIT)) |
652 | pc = (u32)pc; | 652 | pc = (u32)pc; |
653 | if (get_user(insn, (u32 __user *) pc) != -EFAULT) { | 653 | if (get_user(insn, (u32 __user *) pc) != -EFAULT) { |
diff --git a/arch/sparc/kernel/visemul.c b/arch/sparc/kernel/visemul.c index 36357717d69..32b626c9d81 100644 --- a/arch/sparc/kernel/visemul.c +++ b/arch/sparc/kernel/visemul.c | |||
@@ -802,7 +802,7 @@ int vis_emul(struct pt_regs *regs, unsigned int insn) | |||
802 | 802 | ||
803 | BUG_ON(regs->tstate & TSTATE_PRIV); | 803 | BUG_ON(regs->tstate & TSTATE_PRIV); |
804 | 804 | ||
805 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); | 805 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); |
806 | 806 | ||
807 | if (test_thread_flag(TIF_32BIT)) | 807 | if (test_thread_flag(TIF_32BIT)) |
808 | pc = (u32)pc; | 808 | pc = (u32)pc; |
diff --git a/arch/sparc/math-emu/math_32.c b/arch/sparc/math-emu/math_32.c index a3fccde894e..aa4d55b0bdf 100644 --- a/arch/sparc/math-emu/math_32.c +++ b/arch/sparc/math-emu/math_32.c | |||
@@ -164,7 +164,7 @@ int do_mathemu(struct pt_regs *regs, struct task_struct *fpt) | |||
164 | int retcode = 0; /* assume all succeed */ | 164 | int retcode = 0; /* assume all succeed */ |
165 | unsigned long insn; | 165 | unsigned long insn; |
166 | 166 | ||
167 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); | 167 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); |
168 | 168 | ||
169 | #ifdef DEBUG_MATHEMU | 169 | #ifdef DEBUG_MATHEMU |
170 | printk("In do_mathemu()... pc is %08lx\n", regs->pc); | 170 | printk("In do_mathemu()... pc is %08lx\n", regs->pc); |
diff --git a/arch/sparc/math-emu/math_64.c b/arch/sparc/math-emu/math_64.c index 56d2c44747b..e575bd2fe38 100644 --- a/arch/sparc/math-emu/math_64.c +++ b/arch/sparc/math-emu/math_64.c | |||
@@ -184,7 +184,7 @@ int do_mathemu(struct pt_regs *regs, struct fpustate *f) | |||
184 | 184 | ||
185 | if (tstate & TSTATE_PRIV) | 185 | if (tstate & TSTATE_PRIV) |
186 | die_if_kernel("unfinished/unimplemented FPop from kernel", regs); | 186 | die_if_kernel("unfinished/unimplemented FPop from kernel", regs); |
187 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); | 187 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); |
188 | if (test_thread_flag(TIF_32BIT)) | 188 | if (test_thread_flag(TIF_32BIT)) |
189 | pc = (u32)pc; | 189 | pc = (u32)pc; |
190 | if (get_user(insn, (u32 __user *) pc) != -EFAULT) { | 190 | if (get_user(insn, (u32 __user *) pc) != -EFAULT) { |
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c index 7543ddbdadb..aa1c1b1ce5c 100644 --- a/arch/sparc/mm/fault_32.c +++ b/arch/sparc/mm/fault_32.c | |||
@@ -251,7 +251,7 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write, | |||
251 | if (in_atomic() || !mm) | 251 | if (in_atomic() || !mm) |
252 | goto no_context; | 252 | goto no_context; |
253 | 253 | ||
254 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); | 254 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); |
255 | 255 | ||
256 | down_read(&mm->mmap_sem); | 256 | down_read(&mm->mmap_sem); |
257 | 257 | ||
@@ -301,12 +301,10 @@ good_area: | |||
301 | } | 301 | } |
302 | if (fault & VM_FAULT_MAJOR) { | 302 | if (fault & VM_FAULT_MAJOR) { |
303 | current->maj_flt++; | 303 | current->maj_flt++; |
304 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, | 304 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); |
305 | regs, address); | ||
306 | } else { | 305 | } else { |
307 | current->min_flt++; | 306 | current->min_flt++; |
308 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, | 307 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); |
309 | regs, address); | ||
310 | } | 308 | } |
311 | up_read(&mm->mmap_sem); | 309 | up_read(&mm->mmap_sem); |
312 | return; | 310 | return; |
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c index f92ce56a8b2..504c0622f72 100644 --- a/arch/sparc/mm/fault_64.c +++ b/arch/sparc/mm/fault_64.c | |||
@@ -325,7 +325,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) | |||
325 | if (in_atomic() || !mm) | 325 | if (in_atomic() || !mm) |
326 | goto intr_or_no_mm; | 326 | goto intr_or_no_mm; |
327 | 327 | ||
328 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); | 328 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); |
329 | 329 | ||
330 | if (!down_read_trylock(&mm->mmap_sem)) { | 330 | if (!down_read_trylock(&mm->mmap_sem)) { |
331 | if ((regs->tstate & TSTATE_PRIV) && | 331 | if ((regs->tstate & TSTATE_PRIV) && |
@@ -433,12 +433,10 @@ good_area: | |||
433 | } | 433 | } |
434 | if (fault & VM_FAULT_MAJOR) { | 434 | if (fault & VM_FAULT_MAJOR) { |
435 | current->maj_flt++; | 435 | current->maj_flt++; |
436 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, | 436 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); |
437 | regs, address); | ||
438 | } else { | 437 | } else { |
439 | current->min_flt++; | 438 | current->min_flt++; |
440 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, | 439 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); |
441 | regs, address); | ||
442 | } | 440 | } |
443 | up_read(&mm->mmap_sem); | 441 | up_read(&mm->mmap_sem); |
444 | 442 | ||
diff --git a/arch/tile/include/asm/mmzone.h b/arch/tile/include/asm/mmzone.h index c6344c4f32a..9d3dbce8f95 100644 --- a/arch/tile/include/asm/mmzone.h +++ b/arch/tile/include/asm/mmzone.h | |||
@@ -40,17 +40,6 @@ static inline int pfn_to_nid(unsigned long pfn) | |||
40 | return highbits_to_node[__pfn_to_highbits(pfn)]; | 40 | return highbits_to_node[__pfn_to_highbits(pfn)]; |
41 | } | 41 | } |
42 | 42 | ||
43 | /* | ||
44 | * Following are macros that each numa implmentation must define. | ||
45 | */ | ||
46 | |||
47 | #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) | ||
48 | #define node_end_pfn(nid) \ | ||
49 | ({ \ | ||
50 | pg_data_t *__pgdat = NODE_DATA(nid); \ | ||
51 | __pgdat->node_start_pfn + __pgdat->node_spanned_pages; \ | ||
52 | }) | ||
53 | |||
54 | #define kern_addr_valid(kaddr) virt_addr_valid((void *)kaddr) | 43 | #define kern_addr_valid(kaddr) virt_addr_valid((void *)kaddr) |
55 | 44 | ||
56 | static inline int pfn_valid(int pfn) | 45 | static inline int pfn_valid(int pfn) |
diff --git a/arch/um/include/asm/percpu.h b/arch/um/include/asm/percpu.h new file mode 100644 index 00000000000..efe7508d8ab --- /dev/null +++ b/arch/um/include/asm/percpu.h | |||
@@ -0,0 +1,6 @@ | |||
1 | #ifndef __UM_PERCPU_H | ||
2 | #define __UM_PERCPU_H | ||
3 | |||
4 | #include <asm-generic/percpu.h> | ||
5 | |||
6 | #endif /* __UM_PERCPU_H */ | ||
diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h index 5745ce8bf10..bba3cf88e62 100644 --- a/arch/x86/include/asm/irqflags.h +++ b/arch/x86/include/asm/irqflags.h | |||
@@ -60,23 +60,24 @@ static inline void native_halt(void) | |||
60 | #include <asm/paravirt.h> | 60 | #include <asm/paravirt.h> |
61 | #else | 61 | #else |
62 | #ifndef __ASSEMBLY__ | 62 | #ifndef __ASSEMBLY__ |
63 | #include <linux/types.h> | ||
63 | 64 | ||
64 | static inline unsigned long arch_local_save_flags(void) | 65 | static inline notrace unsigned long arch_local_save_flags(void) |
65 | { | 66 | { |
66 | return native_save_fl(); | 67 | return native_save_fl(); |
67 | } | 68 | } |
68 | 69 | ||
69 | static inline void arch_local_irq_restore(unsigned long flags) | 70 | static inline notrace void arch_local_irq_restore(unsigned long flags) |
70 | { | 71 | { |
71 | native_restore_fl(flags); | 72 | native_restore_fl(flags); |
72 | } | 73 | } |
73 | 74 | ||
74 | static inline void arch_local_irq_disable(void) | 75 | static inline notrace void arch_local_irq_disable(void) |
75 | { | 76 | { |
76 | native_irq_disable(); | 77 | native_irq_disable(); |
77 | } | 78 | } |
78 | 79 | ||
79 | static inline void arch_local_irq_enable(void) | 80 | static inline notrace void arch_local_irq_enable(void) |
80 | { | 81 | { |
81 | native_irq_enable(); | 82 | native_irq_enable(); |
82 | } | 83 | } |
@@ -102,7 +103,7 @@ static inline void halt(void) | |||
102 | /* | 103 | /* |
103 | * For spinlocks, etc: | 104 | * For spinlocks, etc: |
104 | */ | 105 | */ |
105 | static inline unsigned long arch_local_irq_save(void) | 106 | static inline notrace unsigned long arch_local_irq_save(void) |
106 | { | 107 | { |
107 | unsigned long flags = arch_local_save_flags(); | 108 | unsigned long flags = arch_local_save_flags(); |
108 | arch_local_irq_disable(); | 109 | arch_local_irq_disable(); |
diff --git a/arch/x86/include/asm/mmzone_32.h b/arch/x86/include/asm/mmzone_32.h index 5e83a416eca..224e8c5eb30 100644 --- a/arch/x86/include/asm/mmzone_32.h +++ b/arch/x86/include/asm/mmzone_32.h | |||
@@ -48,17 +48,6 @@ static inline int pfn_to_nid(unsigned long pfn) | |||
48 | #endif | 48 | #endif |
49 | } | 49 | } |
50 | 50 | ||
51 | /* | ||
52 | * Following are macros that each numa implmentation must define. | ||
53 | */ | ||
54 | |||
55 | #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) | ||
56 | #define node_end_pfn(nid) \ | ||
57 | ({ \ | ||
58 | pg_data_t *__pgdat = NODE_DATA(nid); \ | ||
59 | __pgdat->node_start_pfn + __pgdat->node_spanned_pages; \ | ||
60 | }) | ||
61 | |||
62 | static inline int pfn_valid(int pfn) | 51 | static inline int pfn_valid(int pfn) |
63 | { | 52 | { |
64 | int nid = pfn_to_nid(pfn); | 53 | int nid = pfn_to_nid(pfn); |
diff --git a/arch/x86/include/asm/mmzone_64.h b/arch/x86/include/asm/mmzone_64.h index b3f88d7867c..129d9aa3ceb 100644 --- a/arch/x86/include/asm/mmzone_64.h +++ b/arch/x86/include/asm/mmzone_64.h | |||
@@ -13,8 +13,5 @@ extern struct pglist_data *node_data[]; | |||
13 | 13 | ||
14 | #define NODE_DATA(nid) (node_data[nid]) | 14 | #define NODE_DATA(nid) (node_data[nid]) |
15 | 15 | ||
16 | #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) | ||
17 | #define node_end_pfn(nid) (NODE_DATA(nid)->node_start_pfn + \ | ||
18 | NODE_DATA(nid)->node_spanned_pages) | ||
19 | #endif | 16 | #endif |
20 | #endif /* _ASM_X86_MMZONE_64_H */ | 17 | #endif /* _ASM_X86_MMZONE_64_H */ |
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index d9d4dae305f..094fb30817a 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h | |||
@@ -152,6 +152,11 @@ extern unsigned long perf_misc_flags(struct pt_regs *regs); | |||
152 | (regs)->bp = caller_frame_pointer(); \ | 152 | (regs)->bp = caller_frame_pointer(); \ |
153 | (regs)->cs = __KERNEL_CS; \ | 153 | (regs)->cs = __KERNEL_CS; \ |
154 | regs->flags = 0; \ | 154 | regs->flags = 0; \ |
155 | asm volatile( \ | ||
156 | _ASM_MOV "%%"_ASM_SP ", %0\n" \ | ||
157 | : "=m" ((regs)->sp) \ | ||
158 | :: "memory" \ | ||
159 | ); \ | ||
155 | } | 160 | } |
156 | 161 | ||
157 | #else | 162 | #else |
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 3a0338b4b17..c53d433c3dd 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -45,6 +45,29 @@ do { \ | |||
45 | #endif | 45 | #endif |
46 | 46 | ||
47 | /* | 47 | /* |
48 | * | NHM/WSM | SNB | | ||
49 | * register ------------------------------- | ||
50 | * | HT | no HT | HT | no HT | | ||
51 | *----------------------------------------- | ||
52 | * offcore | core | core | cpu | core | | ||
53 | * lbr_sel | core | core | cpu | core | | ||
54 | * ld_lat | cpu | core | cpu | core | | ||
55 | *----------------------------------------- | ||
56 | * | ||
57 | * Given that there is a small number of shared regs, | ||
58 | * we can pre-allocate their slot in the per-cpu | ||
59 | * per-core reg tables. | ||
60 | */ | ||
61 | enum extra_reg_type { | ||
62 | EXTRA_REG_NONE = -1, /* not used */ | ||
63 | |||
64 | EXTRA_REG_RSP_0 = 0, /* offcore_response_0 */ | ||
65 | EXTRA_REG_RSP_1 = 1, /* offcore_response_1 */ | ||
66 | |||
67 | EXTRA_REG_MAX /* number of entries needed */ | ||
68 | }; | ||
69 | |||
70 | /* | ||
48 | * best effort, GUP based copy_from_user() that assumes IRQ or NMI context | 71 | * best effort, GUP based copy_from_user() that assumes IRQ or NMI context |
49 | */ | 72 | */ |
50 | static unsigned long | 73 | static unsigned long |
@@ -132,11 +155,10 @@ struct cpu_hw_events { | |||
132 | struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES]; | 155 | struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES]; |
133 | 156 | ||
134 | /* | 157 | /* |
135 | * Intel percore register state. | 158 | * manage shared (per-core, per-cpu) registers |
136 | * Coordinate shared resources between HT threads. | 159 | * used on Intel NHM/WSM/SNB |
137 | */ | 160 | */ |
138 | int percore_used; /* Used by this CPU? */ | 161 | struct intel_shared_regs *shared_regs; |
139 | struct intel_percore *per_core; | ||
140 | 162 | ||
141 | /* | 163 | /* |
142 | * AMD specific bits | 164 | * AMD specific bits |
@@ -187,26 +209,45 @@ struct cpu_hw_events { | |||
187 | for ((e) = (c); (e)->weight; (e)++) | 209 | for ((e) = (c); (e)->weight; (e)++) |
188 | 210 | ||
189 | /* | 211 | /* |
212 | * Per register state. | ||
213 | */ | ||
214 | struct er_account { | ||
215 | raw_spinlock_t lock; /* per-core: protect structure */ | ||
216 | u64 config; /* extra MSR config */ | ||
217 | u64 reg; /* extra MSR number */ | ||
218 | atomic_t ref; /* reference count */ | ||
219 | }; | ||
220 | |||
221 | /* | ||
190 | * Extra registers for specific events. | 222 | * Extra registers for specific events. |
223 | * | ||
191 | * Some events need large masks and require external MSRs. | 224 | * Some events need large masks and require external MSRs. |
192 | * Define a mapping to these extra registers. | 225 | * Those extra MSRs end up being shared for all events on |
226 | * a PMU and sometimes between PMU of sibling HT threads. | ||
227 | * In either case, the kernel needs to handle conflicting | ||
228 | * accesses to those extra, shared, regs. The data structure | ||
229 | * to manage those registers is stored in cpu_hw_event. | ||
193 | */ | 230 | */ |
194 | struct extra_reg { | 231 | struct extra_reg { |
195 | unsigned int event; | 232 | unsigned int event; |
196 | unsigned int msr; | 233 | unsigned int msr; |
197 | u64 config_mask; | 234 | u64 config_mask; |
198 | u64 valid_mask; | 235 | u64 valid_mask; |
236 | int idx; /* per_xxx->regs[] reg index */ | ||
199 | }; | 237 | }; |
200 | 238 | ||
201 | #define EVENT_EXTRA_REG(e, ms, m, vm) { \ | 239 | #define EVENT_EXTRA_REG(e, ms, m, vm, i) { \ |
202 | .event = (e), \ | 240 | .event = (e), \ |
203 | .msr = (ms), \ | 241 | .msr = (ms), \ |
204 | .config_mask = (m), \ | 242 | .config_mask = (m), \ |
205 | .valid_mask = (vm), \ | 243 | .valid_mask = (vm), \ |
244 | .idx = EXTRA_REG_##i \ | ||
206 | } | 245 | } |
207 | #define INTEL_EVENT_EXTRA_REG(event, msr, vm) \ | 246 | |
208 | EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm) | 247 | #define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx) \ |
209 | #define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0) | 248 | EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm, idx) |
249 | |||
250 | #define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0, RSP_0) | ||
210 | 251 | ||
211 | union perf_capabilities { | 252 | union perf_capabilities { |
212 | struct { | 253 | struct { |
@@ -233,6 +274,7 @@ struct x86_pmu { | |||
233 | void (*enable_all)(int added); | 274 | void (*enable_all)(int added); |
234 | void (*enable)(struct perf_event *); | 275 | void (*enable)(struct perf_event *); |
235 | void (*disable)(struct perf_event *); | 276 | void (*disable)(struct perf_event *); |
277 | void (*hw_watchdog_set_attr)(struct perf_event_attr *attr); | ||
236 | int (*hw_config)(struct perf_event *event); | 278 | int (*hw_config)(struct perf_event *event); |
237 | int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign); | 279 | int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign); |
238 | unsigned eventsel; | 280 | unsigned eventsel; |
@@ -252,7 +294,6 @@ struct x86_pmu { | |||
252 | void (*put_event_constraints)(struct cpu_hw_events *cpuc, | 294 | void (*put_event_constraints)(struct cpu_hw_events *cpuc, |
253 | struct perf_event *event); | 295 | struct perf_event *event); |
254 | struct event_constraint *event_constraints; | 296 | struct event_constraint *event_constraints; |
255 | struct event_constraint *percore_constraints; | ||
256 | void (*quirks)(void); | 297 | void (*quirks)(void); |
257 | int perfctr_second_write; | 298 | int perfctr_second_write; |
258 | 299 | ||
@@ -286,8 +327,12 @@ struct x86_pmu { | |||
286 | * Extra registers for events | 327 | * Extra registers for events |
287 | */ | 328 | */ |
288 | struct extra_reg *extra_regs; | 329 | struct extra_reg *extra_regs; |
330 | unsigned int er_flags; | ||
289 | }; | 331 | }; |
290 | 332 | ||
333 | #define ERF_NO_HT_SHARING 1 | ||
334 | #define ERF_HAS_RSP_1 2 | ||
335 | |||
291 | static struct x86_pmu x86_pmu __read_mostly; | 336 | static struct x86_pmu x86_pmu __read_mostly; |
292 | 337 | ||
293 | static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { | 338 | static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { |
@@ -315,6 +360,12 @@ static u64 __read_mostly hw_cache_extra_regs | |||
315 | [PERF_COUNT_HW_CACHE_OP_MAX] | 360 | [PERF_COUNT_HW_CACHE_OP_MAX] |
316 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; | 361 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; |
317 | 362 | ||
363 | void hw_nmi_watchdog_set_attr(struct perf_event_attr *wd_attr) | ||
364 | { | ||
365 | if (x86_pmu.hw_watchdog_set_attr) | ||
366 | x86_pmu.hw_watchdog_set_attr(wd_attr); | ||
367 | } | ||
368 | |||
318 | /* | 369 | /* |
319 | * Propagate event elapsed time into the generic event. | 370 | * Propagate event elapsed time into the generic event. |
320 | * Can only be executed on the CPU where the event is active. | 371 | * Can only be executed on the CPU where the event is active. |
@@ -393,10 +444,10 @@ static inline unsigned int x86_pmu_event_addr(int index) | |||
393 | */ | 444 | */ |
394 | static int x86_pmu_extra_regs(u64 config, struct perf_event *event) | 445 | static int x86_pmu_extra_regs(u64 config, struct perf_event *event) |
395 | { | 446 | { |
447 | struct hw_perf_event_extra *reg; | ||
396 | struct extra_reg *er; | 448 | struct extra_reg *er; |
397 | 449 | ||
398 | event->hw.extra_reg = 0; | 450 | reg = &event->hw.extra_reg; |
399 | event->hw.extra_config = 0; | ||
400 | 451 | ||
401 | if (!x86_pmu.extra_regs) | 452 | if (!x86_pmu.extra_regs) |
402 | return 0; | 453 | return 0; |
@@ -406,8 +457,10 @@ static int x86_pmu_extra_regs(u64 config, struct perf_event *event) | |||
406 | continue; | 457 | continue; |
407 | if (event->attr.config1 & ~er->valid_mask) | 458 | if (event->attr.config1 & ~er->valid_mask) |
408 | return -EINVAL; | 459 | return -EINVAL; |
409 | event->hw.extra_reg = er->msr; | 460 | |
410 | event->hw.extra_config = event->attr.config1; | 461 | reg->idx = er->idx; |
462 | reg->config = event->attr.config1; | ||
463 | reg->reg = er->msr; | ||
411 | break; | 464 | break; |
412 | } | 465 | } |
413 | return 0; | 466 | return 0; |
@@ -706,6 +759,9 @@ static int __x86_pmu_event_init(struct perf_event *event) | |||
706 | event->hw.last_cpu = -1; | 759 | event->hw.last_cpu = -1; |
707 | event->hw.last_tag = ~0ULL; | 760 | event->hw.last_tag = ~0ULL; |
708 | 761 | ||
762 | /* mark unused */ | ||
763 | event->hw.extra_reg.idx = EXTRA_REG_NONE; | ||
764 | |||
709 | return x86_pmu.hw_config(event); | 765 | return x86_pmu.hw_config(event); |
710 | } | 766 | } |
711 | 767 | ||
@@ -747,8 +803,8 @@ static void x86_pmu_disable(struct pmu *pmu) | |||
747 | static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, | 803 | static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, |
748 | u64 enable_mask) | 804 | u64 enable_mask) |
749 | { | 805 | { |
750 | if (hwc->extra_reg) | 806 | if (hwc->extra_reg.reg) |
751 | wrmsrl(hwc->extra_reg, hwc->extra_config); | 807 | wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config); |
752 | wrmsrl(hwc->config_base, hwc->config | enable_mask); | 808 | wrmsrl(hwc->config_base, hwc->config | enable_mask); |
753 | } | 809 | } |
754 | 810 | ||
@@ -1332,7 +1388,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs) | |||
1332 | if (!x86_perf_event_set_period(event)) | 1388 | if (!x86_perf_event_set_period(event)) |
1333 | continue; | 1389 | continue; |
1334 | 1390 | ||
1335 | if (perf_event_overflow(event, 1, &data, regs)) | 1391 | if (perf_event_overflow(event, &data, regs)) |
1336 | x86_pmu_stop(event, 0); | 1392 | x86_pmu_stop(event, 0); |
1337 | } | 1393 | } |
1338 | 1394 | ||
@@ -1637,6 +1693,40 @@ static int x86_pmu_commit_txn(struct pmu *pmu) | |||
1637 | perf_pmu_enable(pmu); | 1693 | perf_pmu_enable(pmu); |
1638 | return 0; | 1694 | return 0; |
1639 | } | 1695 | } |
1696 | /* | ||
1697 | * a fake_cpuc is used to validate event groups. Due to | ||
1698 | * the extra reg logic, we need to also allocate a fake | ||
1699 | * per_core and per_cpu structure. Otherwise, group events | ||
1700 | * using extra reg may conflict without the kernel being | ||
1701 | * able to catch this when the last event gets added to | ||
1702 | * the group. | ||
1703 | */ | ||
1704 | static void free_fake_cpuc(struct cpu_hw_events *cpuc) | ||
1705 | { | ||
1706 | kfree(cpuc->shared_regs); | ||
1707 | kfree(cpuc); | ||
1708 | } | ||
1709 | |||
1710 | static struct cpu_hw_events *allocate_fake_cpuc(void) | ||
1711 | { | ||
1712 | struct cpu_hw_events *cpuc; | ||
1713 | int cpu = raw_smp_processor_id(); | ||
1714 | |||
1715 | cpuc = kzalloc(sizeof(*cpuc), GFP_KERNEL); | ||
1716 | if (!cpuc) | ||
1717 | return ERR_PTR(-ENOMEM); | ||
1718 | |||
1719 | /* only needed, if we have extra_regs */ | ||
1720 | if (x86_pmu.extra_regs) { | ||
1721 | cpuc->shared_regs = allocate_shared_regs(cpu); | ||
1722 | if (!cpuc->shared_regs) | ||
1723 | goto error; | ||
1724 | } | ||
1725 | return cpuc; | ||
1726 | error: | ||
1727 | free_fake_cpuc(cpuc); | ||
1728 | return ERR_PTR(-ENOMEM); | ||
1729 | } | ||
1640 | 1730 | ||
1641 | /* | 1731 | /* |
1642 | * validate that we can schedule this event | 1732 | * validate that we can schedule this event |
@@ -1647,9 +1737,9 @@ static int validate_event(struct perf_event *event) | |||
1647 | struct event_constraint *c; | 1737 | struct event_constraint *c; |
1648 | int ret = 0; | 1738 | int ret = 0; |
1649 | 1739 | ||
1650 | fake_cpuc = kmalloc(sizeof(*fake_cpuc), GFP_KERNEL | __GFP_ZERO); | 1740 | fake_cpuc = allocate_fake_cpuc(); |
1651 | if (!fake_cpuc) | 1741 | if (IS_ERR(fake_cpuc)) |
1652 | return -ENOMEM; | 1742 | return PTR_ERR(fake_cpuc); |
1653 | 1743 | ||
1654 | c = x86_pmu.get_event_constraints(fake_cpuc, event); | 1744 | c = x86_pmu.get_event_constraints(fake_cpuc, event); |
1655 | 1745 | ||
@@ -1659,7 +1749,7 @@ static int validate_event(struct perf_event *event) | |||
1659 | if (x86_pmu.put_event_constraints) | 1749 | if (x86_pmu.put_event_constraints) |
1660 | x86_pmu.put_event_constraints(fake_cpuc, event); | 1750 | x86_pmu.put_event_constraints(fake_cpuc, event); |
1661 | 1751 | ||
1662 | kfree(fake_cpuc); | 1752 | free_fake_cpuc(fake_cpuc); |
1663 | 1753 | ||
1664 | return ret; | 1754 | return ret; |
1665 | } | 1755 | } |
@@ -1679,36 +1769,32 @@ static int validate_group(struct perf_event *event) | |||
1679 | { | 1769 | { |
1680 | struct perf_event *leader = event->group_leader; | 1770 | struct perf_event *leader = event->group_leader; |
1681 | struct cpu_hw_events *fake_cpuc; | 1771 | struct cpu_hw_events *fake_cpuc; |
1682 | int ret, n; | 1772 | int ret = -ENOSPC, n; |
1683 | |||
1684 | ret = -ENOMEM; | ||
1685 | fake_cpuc = kmalloc(sizeof(*fake_cpuc), GFP_KERNEL | __GFP_ZERO); | ||
1686 | if (!fake_cpuc) | ||
1687 | goto out; | ||
1688 | 1773 | ||
1774 | fake_cpuc = allocate_fake_cpuc(); | ||
1775 | if (IS_ERR(fake_cpuc)) | ||
1776 | return PTR_ERR(fake_cpuc); | ||
1689 | /* | 1777 | /* |
1690 | * the event is not yet connected with its | 1778 | * the event is not yet connected with its |
1691 | * siblings therefore we must first collect | 1779 | * siblings therefore we must first collect |
1692 | * existing siblings, then add the new event | 1780 | * existing siblings, then add the new event |
1693 | * before we can simulate the scheduling | 1781 | * before we can simulate the scheduling |
1694 | */ | 1782 | */ |
1695 | ret = -ENOSPC; | ||
1696 | n = collect_events(fake_cpuc, leader, true); | 1783 | n = collect_events(fake_cpuc, leader, true); |
1697 | if (n < 0) | 1784 | if (n < 0) |
1698 | goto out_free; | 1785 | goto out; |
1699 | 1786 | ||
1700 | fake_cpuc->n_events = n; | 1787 | fake_cpuc->n_events = n; |
1701 | n = collect_events(fake_cpuc, event, false); | 1788 | n = collect_events(fake_cpuc, event, false); |
1702 | if (n < 0) | 1789 | if (n < 0) |
1703 | goto out_free; | 1790 | goto out; |
1704 | 1791 | ||
1705 | fake_cpuc->n_events = n; | 1792 | fake_cpuc->n_events = n; |
1706 | 1793 | ||
1707 | ret = x86_pmu.schedule_events(fake_cpuc, n, NULL); | 1794 | ret = x86_pmu.schedule_events(fake_cpuc, n, NULL); |
1708 | 1795 | ||
1709 | out_free: | ||
1710 | kfree(fake_cpuc); | ||
1711 | out: | 1796 | out: |
1797 | free_fake_cpuc(fake_cpuc); | ||
1712 | return ret; | 1798 | return ret; |
1713 | } | 1799 | } |
1714 | 1800 | ||
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c index fe29c1d2219..941caa2e449 100644 --- a/arch/x86/kernel/cpu/perf_event_amd.c +++ b/arch/x86/kernel/cpu/perf_event_amd.c | |||
@@ -89,6 +89,20 @@ static __initconst const u64 amd_hw_cache_event_ids | |||
89 | [ C(RESULT_MISS) ] = -1, | 89 | [ C(RESULT_MISS) ] = -1, |
90 | }, | 90 | }, |
91 | }, | 91 | }, |
92 | [ C(NODE) ] = { | ||
93 | [ C(OP_READ) ] = { | ||
94 | [ C(RESULT_ACCESS) ] = 0xb8e9, /* CPU Request to Memory, l+r */ | ||
95 | [ C(RESULT_MISS) ] = 0x98e9, /* CPU Request to Memory, r */ | ||
96 | }, | ||
97 | [ C(OP_WRITE) ] = { | ||
98 | [ C(RESULT_ACCESS) ] = -1, | ||
99 | [ C(RESULT_MISS) ] = -1, | ||
100 | }, | ||
101 | [ C(OP_PREFETCH) ] = { | ||
102 | [ C(RESULT_ACCESS) ] = -1, | ||
103 | [ C(RESULT_MISS) ] = -1, | ||
104 | }, | ||
105 | }, | ||
92 | }; | 106 | }; |
93 | 107 | ||
94 | /* | 108 | /* |
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 41178c826c4..45fbb8f7f54 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c | |||
@@ -1,25 +1,15 @@ | |||
1 | #ifdef CONFIG_CPU_SUP_INTEL | 1 | #ifdef CONFIG_CPU_SUP_INTEL |
2 | 2 | ||
3 | #define MAX_EXTRA_REGS 2 | ||
4 | |||
5 | /* | ||
6 | * Per register state. | ||
7 | */ | ||
8 | struct er_account { | ||
9 | int ref; /* reference count */ | ||
10 | unsigned int extra_reg; /* extra MSR number */ | ||
11 | u64 extra_config; /* extra MSR config */ | ||
12 | }; | ||
13 | |||
14 | /* | 3 | /* |
15 | * Per core state | 4 | * Per core/cpu state |
16 | * This used to coordinate shared registers for HT threads. | 5 | * |
6 | * Used to coordinate shared registers between HT threads or | ||
7 | * among events on a single PMU. | ||
17 | */ | 8 | */ |
18 | struct intel_percore { | 9 | struct intel_shared_regs { |
19 | raw_spinlock_t lock; /* protect structure */ | 10 | struct er_account regs[EXTRA_REG_MAX]; |
20 | struct er_account regs[MAX_EXTRA_REGS]; | 11 | int refcnt; /* per-core: #HT threads */ |
21 | int refcnt; /* number of threads */ | 12 | unsigned core_id; /* per-core: core id */ |
22 | unsigned core_id; | ||
23 | }; | 13 | }; |
24 | 14 | ||
25 | /* | 15 | /* |
@@ -88,16 +78,10 @@ static struct event_constraint intel_nehalem_event_constraints[] __read_mostly = | |||
88 | 78 | ||
89 | static struct extra_reg intel_nehalem_extra_regs[] __read_mostly = | 79 | static struct extra_reg intel_nehalem_extra_regs[] __read_mostly = |
90 | { | 80 | { |
91 | INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff), | 81 | INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0), |
92 | EVENT_EXTRA_END | 82 | EVENT_EXTRA_END |
93 | }; | 83 | }; |
94 | 84 | ||
95 | static struct event_constraint intel_nehalem_percore_constraints[] __read_mostly = | ||
96 | { | ||
97 | INTEL_EVENT_CONSTRAINT(0xb7, 0), | ||
98 | EVENT_CONSTRAINT_END | ||
99 | }; | ||
100 | |||
101 | static struct event_constraint intel_westmere_event_constraints[] __read_mostly = | 85 | static struct event_constraint intel_westmere_event_constraints[] __read_mostly = |
102 | { | 86 | { |
103 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ | 87 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ |
@@ -116,8 +100,6 @@ static struct event_constraint intel_snb_event_constraints[] __read_mostly = | |||
116 | FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ | 100 | FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ |
117 | /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */ | 101 | /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */ |
118 | INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */ | 102 | INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */ |
119 | INTEL_EVENT_CONSTRAINT(0xb7, 0x1), /* OFF_CORE_RESPONSE_0 */ | ||
120 | INTEL_EVENT_CONSTRAINT(0xbb, 0x8), /* OFF_CORE_RESPONSE_1 */ | ||
121 | INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */ | 103 | INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */ |
122 | INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */ | 104 | INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */ |
123 | EVENT_CONSTRAINT_END | 105 | EVENT_CONSTRAINT_END |
@@ -125,15 +107,13 @@ static struct event_constraint intel_snb_event_constraints[] __read_mostly = | |||
125 | 107 | ||
126 | static struct extra_reg intel_westmere_extra_regs[] __read_mostly = | 108 | static struct extra_reg intel_westmere_extra_regs[] __read_mostly = |
127 | { | 109 | { |
128 | INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff), | 110 | INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0), |
129 | INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0xffff), | 111 | INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0xffff, RSP_1), |
130 | EVENT_EXTRA_END | 112 | EVENT_EXTRA_END |
131 | }; | 113 | }; |
132 | 114 | ||
133 | static struct event_constraint intel_westmere_percore_constraints[] __read_mostly = | 115 | static struct event_constraint intel_v1_event_constraints[] __read_mostly = |
134 | { | 116 | { |
135 | INTEL_EVENT_CONSTRAINT(0xb7, 0), | ||
136 | INTEL_EVENT_CONSTRAINT(0xbb, 0), | ||
137 | EVENT_CONSTRAINT_END | 117 | EVENT_CONSTRAINT_END |
138 | }; | 118 | }; |
139 | 119 | ||
@@ -145,6 +125,12 @@ static struct event_constraint intel_gen_event_constraints[] __read_mostly = | |||
145 | EVENT_CONSTRAINT_END | 125 | EVENT_CONSTRAINT_END |
146 | }; | 126 | }; |
147 | 127 | ||
128 | static struct extra_reg intel_snb_extra_regs[] __read_mostly = { | ||
129 | INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0), | ||
130 | INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1), | ||
131 | EVENT_EXTRA_END | ||
132 | }; | ||
133 | |||
148 | static u64 intel_pmu_event_map(int hw_event) | 134 | static u64 intel_pmu_event_map(int hw_event) |
149 | { | 135 | { |
150 | return intel_perfmon_event_map[hw_event]; | 136 | return intel_perfmon_event_map[hw_event]; |
@@ -245,6 +231,21 @@ static __initconst const u64 snb_hw_cache_event_ids | |||
245 | [ C(RESULT_MISS) ] = -1, | 231 | [ C(RESULT_MISS) ] = -1, |
246 | }, | 232 | }, |
247 | }, | 233 | }, |
234 | [ C(NODE) ] = { | ||
235 | [ C(OP_READ) ] = { | ||
236 | [ C(RESULT_ACCESS) ] = -1, | ||
237 | [ C(RESULT_MISS) ] = -1, | ||
238 | }, | ||
239 | [ C(OP_WRITE) ] = { | ||
240 | [ C(RESULT_ACCESS) ] = -1, | ||
241 | [ C(RESULT_MISS) ] = -1, | ||
242 | }, | ||
243 | [ C(OP_PREFETCH) ] = { | ||
244 | [ C(RESULT_ACCESS) ] = -1, | ||
245 | [ C(RESULT_MISS) ] = -1, | ||
246 | }, | ||
247 | }, | ||
248 | |||
248 | }; | 249 | }; |
249 | 250 | ||
250 | static __initconst const u64 westmere_hw_cache_event_ids | 251 | static __initconst const u64 westmere_hw_cache_event_ids |
@@ -346,6 +347,20 @@ static __initconst const u64 westmere_hw_cache_event_ids | |||
346 | [ C(RESULT_MISS) ] = -1, | 347 | [ C(RESULT_MISS) ] = -1, |
347 | }, | 348 | }, |
348 | }, | 349 | }, |
350 | [ C(NODE) ] = { | ||
351 | [ C(OP_READ) ] = { | ||
352 | [ C(RESULT_ACCESS) ] = 0x01b7, | ||
353 | [ C(RESULT_MISS) ] = 0x01b7, | ||
354 | }, | ||
355 | [ C(OP_WRITE) ] = { | ||
356 | [ C(RESULT_ACCESS) ] = 0x01b7, | ||
357 | [ C(RESULT_MISS) ] = 0x01b7, | ||
358 | }, | ||
359 | [ C(OP_PREFETCH) ] = { | ||
360 | [ C(RESULT_ACCESS) ] = 0x01b7, | ||
361 | [ C(RESULT_MISS) ] = 0x01b7, | ||
362 | }, | ||
363 | }, | ||
349 | }; | 364 | }; |
350 | 365 | ||
351 | /* | 366 | /* |
@@ -398,7 +413,21 @@ static __initconst const u64 nehalem_hw_cache_extra_regs | |||
398 | [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_L3_ACCESS, | 413 | [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_L3_ACCESS, |
399 | [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_L3_MISS, | 414 | [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_L3_MISS, |
400 | }, | 415 | }, |
401 | } | 416 | }, |
417 | [ C(NODE) ] = { | ||
418 | [ C(OP_READ) ] = { | ||
419 | [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_ALL_DRAM, | ||
420 | [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_REMOTE_DRAM, | ||
421 | }, | ||
422 | [ C(OP_WRITE) ] = { | ||
423 | [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_ALL_DRAM, | ||
424 | [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_REMOTE_DRAM, | ||
425 | }, | ||
426 | [ C(OP_PREFETCH) ] = { | ||
427 | [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_ALL_DRAM, | ||
428 | [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_REMOTE_DRAM, | ||
429 | }, | ||
430 | }, | ||
402 | }; | 431 | }; |
403 | 432 | ||
404 | static __initconst const u64 nehalem_hw_cache_event_ids | 433 | static __initconst const u64 nehalem_hw_cache_event_ids |
@@ -500,6 +529,20 @@ static __initconst const u64 nehalem_hw_cache_event_ids | |||
500 | [ C(RESULT_MISS) ] = -1, | 529 | [ C(RESULT_MISS) ] = -1, |
501 | }, | 530 | }, |
502 | }, | 531 | }, |
532 | [ C(NODE) ] = { | ||
533 | [ C(OP_READ) ] = { | ||
534 | [ C(RESULT_ACCESS) ] = 0x01b7, | ||
535 | [ C(RESULT_MISS) ] = 0x01b7, | ||
536 | }, | ||
537 | [ C(OP_WRITE) ] = { | ||
538 | [ C(RESULT_ACCESS) ] = 0x01b7, | ||
539 | [ C(RESULT_MISS) ] = 0x01b7, | ||
540 | }, | ||
541 | [ C(OP_PREFETCH) ] = { | ||
542 | [ C(RESULT_ACCESS) ] = 0x01b7, | ||
543 | [ C(RESULT_MISS) ] = 0x01b7, | ||
544 | }, | ||
545 | }, | ||
503 | }; | 546 | }; |
504 | 547 | ||
505 | static __initconst const u64 core2_hw_cache_event_ids | 548 | static __initconst const u64 core2_hw_cache_event_ids |
@@ -1003,7 +1046,7 @@ again: | |||
1003 | 1046 | ||
1004 | data.period = event->hw.last_period; | 1047 | data.period = event->hw.last_period; |
1005 | 1048 | ||
1006 | if (perf_event_overflow(event, 1, &data, regs)) | 1049 | if (perf_event_overflow(event, &data, regs)) |
1007 | x86_pmu_stop(event, 0); | 1050 | x86_pmu_stop(event, 0); |
1008 | } | 1051 | } |
1009 | 1052 | ||
@@ -1037,65 +1080,121 @@ intel_bts_constraints(struct perf_event *event) | |||
1037 | return NULL; | 1080 | return NULL; |
1038 | } | 1081 | } |
1039 | 1082 | ||
1083 | static bool intel_try_alt_er(struct perf_event *event, int orig_idx) | ||
1084 | { | ||
1085 | if (!(x86_pmu.er_flags & ERF_HAS_RSP_1)) | ||
1086 | return false; | ||
1087 | |||
1088 | if (event->hw.extra_reg.idx == EXTRA_REG_RSP_0) { | ||
1089 | event->hw.config &= ~INTEL_ARCH_EVENT_MASK; | ||
1090 | event->hw.config |= 0x01bb; | ||
1091 | event->hw.extra_reg.idx = EXTRA_REG_RSP_1; | ||
1092 | event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1; | ||
1093 | } else if (event->hw.extra_reg.idx == EXTRA_REG_RSP_1) { | ||
1094 | event->hw.config &= ~INTEL_ARCH_EVENT_MASK; | ||
1095 | event->hw.config |= 0x01b7; | ||
1096 | event->hw.extra_reg.idx = EXTRA_REG_RSP_0; | ||
1097 | event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0; | ||
1098 | } | ||
1099 | |||
1100 | if (event->hw.extra_reg.idx == orig_idx) | ||
1101 | return false; | ||
1102 | |||
1103 | return true; | ||
1104 | } | ||
1105 | |||
1106 | /* | ||
1107 | * manage allocation of shared extra msr for certain events | ||
1108 | * | ||
1109 | * sharing can be: | ||
1110 | * per-cpu: to be shared between the various events on a single PMU | ||
1111 | * per-core: per-cpu + shared by HT threads | ||
1112 | */ | ||
1040 | static struct event_constraint * | 1113 | static struct event_constraint * |
1041 | intel_percore_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) | 1114 | __intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc, |
1115 | struct perf_event *event) | ||
1042 | { | 1116 | { |
1043 | struct hw_perf_event *hwc = &event->hw; | 1117 | struct event_constraint *c = &emptyconstraint; |
1044 | unsigned int e = hwc->config & ARCH_PERFMON_EVENTSEL_EVENT; | 1118 | struct hw_perf_event_extra *reg = &event->hw.extra_reg; |
1045 | struct event_constraint *c; | ||
1046 | struct intel_percore *pc; | ||
1047 | struct er_account *era; | 1119 | struct er_account *era; |
1048 | int i; | 1120 | unsigned long flags; |
1049 | int free_slot; | 1121 | int orig_idx = reg->idx; |
1050 | int found; | ||
1051 | 1122 | ||
1052 | if (!x86_pmu.percore_constraints || hwc->extra_alloc) | 1123 | /* already allocated shared msr */ |
1053 | return NULL; | 1124 | if (reg->alloc) |
1125 | return &unconstrained; | ||
1054 | 1126 | ||
1055 | for (c = x86_pmu.percore_constraints; c->cmask; c++) { | 1127 | again: |
1056 | if (e != c->code) | 1128 | era = &cpuc->shared_regs->regs[reg->idx]; |
1057 | continue; | 1129 | /* |
1130 | * we use spin_lock_irqsave() to avoid lockdep issues when | ||
1131 | * passing a fake cpuc | ||
1132 | */ | ||
1133 | raw_spin_lock_irqsave(&era->lock, flags); | ||
1134 | |||
1135 | if (!atomic_read(&era->ref) || era->config == reg->config) { | ||
1136 | |||
1137 | /* lock in msr value */ | ||
1138 | era->config = reg->config; | ||
1139 | era->reg = reg->reg; | ||
1140 | |||
1141 | /* one more user */ | ||
1142 | atomic_inc(&era->ref); | ||
1143 | |||
1144 | /* no need to reallocate during incremental event scheduling */ | ||
1145 | reg->alloc = 1; | ||
1058 | 1146 | ||
1059 | /* | 1147 | /* |
1060 | * Allocate resource per core. | 1148 | * All events using extra_reg are unconstrained. |
1149 | * Avoids calling x86_get_event_constraints() | ||
1150 | * | ||
1151 | * Must revisit if extra_reg controlling events | ||
1152 | * ever have constraints. Worst case we go through | ||
1153 | * the regular event constraint table. | ||
1061 | */ | 1154 | */ |
1062 | pc = cpuc->per_core; | 1155 | c = &unconstrained; |
1063 | if (!pc) | 1156 | } else if (intel_try_alt_er(event, orig_idx)) { |
1064 | break; | 1157 | raw_spin_unlock(&era->lock); |
1065 | c = &emptyconstraint; | 1158 | goto again; |
1066 | raw_spin_lock(&pc->lock); | ||
1067 | free_slot = -1; | ||
1068 | found = 0; | ||
1069 | for (i = 0; i < MAX_EXTRA_REGS; i++) { | ||
1070 | era = &pc->regs[i]; | ||
1071 | if (era->ref > 0 && hwc->extra_reg == era->extra_reg) { | ||
1072 | /* Allow sharing same config */ | ||
1073 | if (hwc->extra_config == era->extra_config) { | ||
1074 | era->ref++; | ||
1075 | cpuc->percore_used = 1; | ||
1076 | hwc->extra_alloc = 1; | ||
1077 | c = NULL; | ||
1078 | } | ||
1079 | /* else conflict */ | ||
1080 | found = 1; | ||
1081 | break; | ||
1082 | } else if (era->ref == 0 && free_slot == -1) | ||
1083 | free_slot = i; | ||
1084 | } | ||
1085 | if (!found && free_slot != -1) { | ||
1086 | era = &pc->regs[free_slot]; | ||
1087 | era->ref = 1; | ||
1088 | era->extra_reg = hwc->extra_reg; | ||
1089 | era->extra_config = hwc->extra_config; | ||
1090 | cpuc->percore_used = 1; | ||
1091 | hwc->extra_alloc = 1; | ||
1092 | c = NULL; | ||
1093 | } | ||
1094 | raw_spin_unlock(&pc->lock); | ||
1095 | return c; | ||
1096 | } | 1159 | } |
1160 | raw_spin_unlock_irqrestore(&era->lock, flags); | ||
1097 | 1161 | ||
1098 | return NULL; | 1162 | return c; |
1163 | } | ||
1164 | |||
1165 | static void | ||
1166 | __intel_shared_reg_put_constraints(struct cpu_hw_events *cpuc, | ||
1167 | struct hw_perf_event_extra *reg) | ||
1168 | { | ||
1169 | struct er_account *era; | ||
1170 | |||
1171 | /* | ||
1172 | * only put constraint if extra reg was actually | ||
1173 | * allocated. Also takes care of event which do | ||
1174 | * not use an extra shared reg | ||
1175 | */ | ||
1176 | if (!reg->alloc) | ||
1177 | return; | ||
1178 | |||
1179 | era = &cpuc->shared_regs->regs[reg->idx]; | ||
1180 | |||
1181 | /* one fewer user */ | ||
1182 | atomic_dec(&era->ref); | ||
1183 | |||
1184 | /* allocate again next time */ | ||
1185 | reg->alloc = 0; | ||
1186 | } | ||
1187 | |||
1188 | static struct event_constraint * | ||
1189 | intel_shared_regs_constraints(struct cpu_hw_events *cpuc, | ||
1190 | struct perf_event *event) | ||
1191 | { | ||
1192 | struct event_constraint *c = NULL; | ||
1193 | |||
1194 | if (event->hw.extra_reg.idx != EXTRA_REG_NONE) | ||
1195 | c = __intel_shared_reg_get_constraints(cpuc, event); | ||
1196 | |||
1197 | return c; | ||
1099 | } | 1198 | } |
1100 | 1199 | ||
1101 | static struct event_constraint * | 1200 | static struct event_constraint * |
@@ -1111,49 +1210,28 @@ intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event | |||
1111 | if (c) | 1210 | if (c) |
1112 | return c; | 1211 | return c; |
1113 | 1212 | ||
1114 | c = intel_percore_constraints(cpuc, event); | 1213 | c = intel_shared_regs_constraints(cpuc, event); |
1115 | if (c) | 1214 | if (c) |
1116 | return c; | 1215 | return c; |
1117 | 1216 | ||
1118 | return x86_get_event_constraints(cpuc, event); | 1217 | return x86_get_event_constraints(cpuc, event); |
1119 | } | 1218 | } |
1120 | 1219 | ||
1121 | static void intel_put_event_constraints(struct cpu_hw_events *cpuc, | 1220 | static void |
1221 | intel_put_shared_regs_event_constraints(struct cpu_hw_events *cpuc, | ||
1122 | struct perf_event *event) | 1222 | struct perf_event *event) |
1123 | { | 1223 | { |
1124 | struct extra_reg *er; | 1224 | struct hw_perf_event_extra *reg; |
1125 | struct intel_percore *pc; | ||
1126 | struct er_account *era; | ||
1127 | struct hw_perf_event *hwc = &event->hw; | ||
1128 | int i, allref; | ||
1129 | 1225 | ||
1130 | if (!cpuc->percore_used) | 1226 | reg = &event->hw.extra_reg; |
1131 | return; | 1227 | if (reg->idx != EXTRA_REG_NONE) |
1132 | 1228 | __intel_shared_reg_put_constraints(cpuc, reg); | |
1133 | for (er = x86_pmu.extra_regs; er->msr; er++) { | 1229 | } |
1134 | if (er->event != (hwc->config & er->config_mask)) | ||
1135 | continue; | ||
1136 | 1230 | ||
1137 | pc = cpuc->per_core; | 1231 | static void intel_put_event_constraints(struct cpu_hw_events *cpuc, |
1138 | raw_spin_lock(&pc->lock); | 1232 | struct perf_event *event) |
1139 | for (i = 0; i < MAX_EXTRA_REGS; i++) { | 1233 | { |
1140 | era = &pc->regs[i]; | 1234 | intel_put_shared_regs_event_constraints(cpuc, event); |
1141 | if (era->ref > 0 && | ||
1142 | era->extra_config == hwc->extra_config && | ||
1143 | era->extra_reg == er->msr) { | ||
1144 | era->ref--; | ||
1145 | hwc->extra_alloc = 0; | ||
1146 | break; | ||
1147 | } | ||
1148 | } | ||
1149 | allref = 0; | ||
1150 | for (i = 0; i < MAX_EXTRA_REGS; i++) | ||
1151 | allref += pc->regs[i].ref; | ||
1152 | if (allref == 0) | ||
1153 | cpuc->percore_used = 0; | ||
1154 | raw_spin_unlock(&pc->lock); | ||
1155 | break; | ||
1156 | } | ||
1157 | } | 1235 | } |
1158 | 1236 | ||
1159 | static int intel_pmu_hw_config(struct perf_event *event) | 1237 | static int intel_pmu_hw_config(struct perf_event *event) |
@@ -1231,20 +1309,36 @@ static __initconst const struct x86_pmu core_pmu = { | |||
1231 | .event_constraints = intel_core_event_constraints, | 1309 | .event_constraints = intel_core_event_constraints, |
1232 | }; | 1310 | }; |
1233 | 1311 | ||
1312 | static struct intel_shared_regs *allocate_shared_regs(int cpu) | ||
1313 | { | ||
1314 | struct intel_shared_regs *regs; | ||
1315 | int i; | ||
1316 | |||
1317 | regs = kzalloc_node(sizeof(struct intel_shared_regs), | ||
1318 | GFP_KERNEL, cpu_to_node(cpu)); | ||
1319 | if (regs) { | ||
1320 | /* | ||
1321 | * initialize the locks to keep lockdep happy | ||
1322 | */ | ||
1323 | for (i = 0; i < EXTRA_REG_MAX; i++) | ||
1324 | raw_spin_lock_init(®s->regs[i].lock); | ||
1325 | |||
1326 | regs->core_id = -1; | ||
1327 | } | ||
1328 | return regs; | ||
1329 | } | ||
1330 | |||
1234 | static int intel_pmu_cpu_prepare(int cpu) | 1331 | static int intel_pmu_cpu_prepare(int cpu) |
1235 | { | 1332 | { |
1236 | struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); | 1333 | struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); |
1237 | 1334 | ||
1238 | if (!cpu_has_ht_siblings()) | 1335 | if (!x86_pmu.extra_regs) |
1239 | return NOTIFY_OK; | 1336 | return NOTIFY_OK; |
1240 | 1337 | ||
1241 | cpuc->per_core = kzalloc_node(sizeof(struct intel_percore), | 1338 | cpuc->shared_regs = allocate_shared_regs(cpu); |
1242 | GFP_KERNEL, cpu_to_node(cpu)); | 1339 | if (!cpuc->shared_regs) |
1243 | if (!cpuc->per_core) | ||
1244 | return NOTIFY_BAD; | 1340 | return NOTIFY_BAD; |
1245 | 1341 | ||
1246 | raw_spin_lock_init(&cpuc->per_core->lock); | ||
1247 | cpuc->per_core->core_id = -1; | ||
1248 | return NOTIFY_OK; | 1342 | return NOTIFY_OK; |
1249 | } | 1343 | } |
1250 | 1344 | ||
@@ -1260,32 +1354,34 @@ static void intel_pmu_cpu_starting(int cpu) | |||
1260 | */ | 1354 | */ |
1261 | intel_pmu_lbr_reset(); | 1355 | intel_pmu_lbr_reset(); |
1262 | 1356 | ||
1263 | if (!cpu_has_ht_siblings()) | 1357 | if (!cpuc->shared_regs || (x86_pmu.er_flags & ERF_NO_HT_SHARING)) |
1264 | return; | 1358 | return; |
1265 | 1359 | ||
1266 | for_each_cpu(i, topology_thread_cpumask(cpu)) { | 1360 | for_each_cpu(i, topology_thread_cpumask(cpu)) { |
1267 | struct intel_percore *pc = per_cpu(cpu_hw_events, i).per_core; | 1361 | struct intel_shared_regs *pc; |
1268 | 1362 | ||
1363 | pc = per_cpu(cpu_hw_events, i).shared_regs; | ||
1269 | if (pc && pc->core_id == core_id) { | 1364 | if (pc && pc->core_id == core_id) { |
1270 | kfree(cpuc->per_core); | 1365 | kfree(cpuc->shared_regs); |
1271 | cpuc->per_core = pc; | 1366 | cpuc->shared_regs = pc; |
1272 | break; | 1367 | break; |
1273 | } | 1368 | } |
1274 | } | 1369 | } |
1275 | 1370 | ||
1276 | cpuc->per_core->core_id = core_id; | 1371 | cpuc->shared_regs->core_id = core_id; |
1277 | cpuc->per_core->refcnt++; | 1372 | cpuc->shared_regs->refcnt++; |
1278 | } | 1373 | } |
1279 | 1374 | ||
1280 | static void intel_pmu_cpu_dying(int cpu) | 1375 | static void intel_pmu_cpu_dying(int cpu) |
1281 | { | 1376 | { |
1282 | struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); | 1377 | struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); |
1283 | struct intel_percore *pc = cpuc->per_core; | 1378 | struct intel_shared_regs *pc; |
1284 | 1379 | ||
1380 | pc = cpuc->shared_regs; | ||
1285 | if (pc) { | 1381 | if (pc) { |
1286 | if (pc->core_id == -1 || --pc->refcnt == 0) | 1382 | if (pc->core_id == -1 || --pc->refcnt == 0) |
1287 | kfree(pc); | 1383 | kfree(pc); |
1288 | cpuc->per_core = NULL; | 1384 | cpuc->shared_regs = NULL; |
1289 | } | 1385 | } |
1290 | 1386 | ||
1291 | fini_debug_store_on_cpu(cpu); | 1387 | fini_debug_store_on_cpu(cpu); |
@@ -1436,7 +1532,6 @@ static __init int intel_pmu_init(void) | |||
1436 | 1532 | ||
1437 | x86_pmu.event_constraints = intel_nehalem_event_constraints; | 1533 | x86_pmu.event_constraints = intel_nehalem_event_constraints; |
1438 | x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints; | 1534 | x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints; |
1439 | x86_pmu.percore_constraints = intel_nehalem_percore_constraints; | ||
1440 | x86_pmu.enable_all = intel_pmu_nhm_enable_all; | 1535 | x86_pmu.enable_all = intel_pmu_nhm_enable_all; |
1441 | x86_pmu.extra_regs = intel_nehalem_extra_regs; | 1536 | x86_pmu.extra_regs = intel_nehalem_extra_regs; |
1442 | 1537 | ||
@@ -1481,10 +1576,10 @@ static __init int intel_pmu_init(void) | |||
1481 | intel_pmu_lbr_init_nhm(); | 1576 | intel_pmu_lbr_init_nhm(); |
1482 | 1577 | ||
1483 | x86_pmu.event_constraints = intel_westmere_event_constraints; | 1578 | x86_pmu.event_constraints = intel_westmere_event_constraints; |
1484 | x86_pmu.percore_constraints = intel_westmere_percore_constraints; | ||
1485 | x86_pmu.enable_all = intel_pmu_nhm_enable_all; | 1579 | x86_pmu.enable_all = intel_pmu_nhm_enable_all; |
1486 | x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints; | 1580 | x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints; |
1487 | x86_pmu.extra_regs = intel_westmere_extra_regs; | 1581 | x86_pmu.extra_regs = intel_westmere_extra_regs; |
1582 | x86_pmu.er_flags |= ERF_HAS_RSP_1; | ||
1488 | 1583 | ||
1489 | /* UOPS_ISSUED.STALLED_CYCLES */ | 1584 | /* UOPS_ISSUED.STALLED_CYCLES */ |
1490 | intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x180010e; | 1585 | intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x180010e; |
@@ -1502,6 +1597,10 @@ static __init int intel_pmu_init(void) | |||
1502 | 1597 | ||
1503 | x86_pmu.event_constraints = intel_snb_event_constraints; | 1598 | x86_pmu.event_constraints = intel_snb_event_constraints; |
1504 | x86_pmu.pebs_constraints = intel_snb_pebs_events; | 1599 | x86_pmu.pebs_constraints = intel_snb_pebs_events; |
1600 | x86_pmu.extra_regs = intel_snb_extra_regs; | ||
1601 | /* all extra regs are per-cpu when HT is on */ | ||
1602 | x86_pmu.er_flags |= ERF_HAS_RSP_1; | ||
1603 | x86_pmu.er_flags |= ERF_NO_HT_SHARING; | ||
1505 | 1604 | ||
1506 | /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */ | 1605 | /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */ |
1507 | intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x180010e; | 1606 | intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x180010e; |
@@ -1512,11 +1611,19 @@ static __init int intel_pmu_init(void) | |||
1512 | break; | 1611 | break; |
1513 | 1612 | ||
1514 | default: | 1613 | default: |
1515 | /* | 1614 | switch (x86_pmu.version) { |
1516 | * default constraints for v2 and up | 1615 | case 1: |
1517 | */ | 1616 | x86_pmu.event_constraints = intel_v1_event_constraints; |
1518 | x86_pmu.event_constraints = intel_gen_event_constraints; | 1617 | pr_cont("generic architected perfmon v1, "); |
1519 | pr_cont("generic architected perfmon, "); | 1618 | break; |
1619 | default: | ||
1620 | /* | ||
1621 | * default constraints for v2 and up | ||
1622 | */ | ||
1623 | x86_pmu.event_constraints = intel_gen_event_constraints; | ||
1624 | pr_cont("generic architected perfmon, "); | ||
1625 | break; | ||
1626 | } | ||
1520 | } | 1627 | } |
1521 | return 0; | 1628 | return 0; |
1522 | } | 1629 | } |
@@ -1528,4 +1635,8 @@ static int intel_pmu_init(void) | |||
1528 | return 0; | 1635 | return 0; |
1529 | } | 1636 | } |
1530 | 1637 | ||
1638 | static struct intel_shared_regs *allocate_shared_regs(int cpu) | ||
1639 | { | ||
1640 | return NULL; | ||
1641 | } | ||
1531 | #endif /* CONFIG_CPU_SUP_INTEL */ | 1642 | #endif /* CONFIG_CPU_SUP_INTEL */ |
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c index bab491b8ee2..1b1ef3addcf 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_ds.c +++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c | |||
@@ -340,7 +340,7 @@ static int intel_pmu_drain_bts_buffer(void) | |||
340 | */ | 340 | */ |
341 | perf_prepare_sample(&header, &data, event, ®s); | 341 | perf_prepare_sample(&header, &data, event, ®s); |
342 | 342 | ||
343 | if (perf_output_begin(&handle, event, header.size * (top - at), 1, 1)) | 343 | if (perf_output_begin(&handle, event, header.size * (top - at))) |
344 | return 1; | 344 | return 1; |
345 | 345 | ||
346 | for (; at < top; at++) { | 346 | for (; at < top; at++) { |
@@ -616,7 +616,7 @@ static void __intel_pmu_pebs_event(struct perf_event *event, | |||
616 | else | 616 | else |
617 | regs.flags &= ~PERF_EFLAGS_EXACT; | 617 | regs.flags &= ~PERF_EFLAGS_EXACT; |
618 | 618 | ||
619 | if (perf_event_overflow(event, 1, &data, ®s)) | 619 | if (perf_event_overflow(event, &data, ®s)) |
620 | x86_pmu_stop(event, 0); | 620 | x86_pmu_stop(event, 0); |
621 | } | 621 | } |
622 | 622 | ||
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c index ead584fb6a7..fb901c5080f 100644 --- a/arch/x86/kernel/cpu/perf_event_p4.c +++ b/arch/x86/kernel/cpu/perf_event_p4.c | |||
@@ -554,6 +554,20 @@ static __initconst const u64 p4_hw_cache_event_ids | |||
554 | [ C(RESULT_MISS) ] = -1, | 554 | [ C(RESULT_MISS) ] = -1, |
555 | }, | 555 | }, |
556 | }, | 556 | }, |
557 | [ C(NODE) ] = { | ||
558 | [ C(OP_READ) ] = { | ||
559 | [ C(RESULT_ACCESS) ] = -1, | ||
560 | [ C(RESULT_MISS) ] = -1, | ||
561 | }, | ||
562 | [ C(OP_WRITE) ] = { | ||
563 | [ C(RESULT_ACCESS) ] = -1, | ||
564 | [ C(RESULT_MISS) ] = -1, | ||
565 | }, | ||
566 | [ C(OP_PREFETCH) ] = { | ||
567 | [ C(RESULT_ACCESS) ] = -1, | ||
568 | [ C(RESULT_MISS) ] = -1, | ||
569 | }, | ||
570 | }, | ||
557 | }; | 571 | }; |
558 | 572 | ||
559 | static u64 p4_general_events[PERF_COUNT_HW_MAX] = { | 573 | static u64 p4_general_events[PERF_COUNT_HW_MAX] = { |
@@ -705,6 +719,31 @@ static int p4_validate_raw_event(struct perf_event *event) | |||
705 | return 0; | 719 | return 0; |
706 | } | 720 | } |
707 | 721 | ||
722 | static void p4_hw_watchdog_set_attr(struct perf_event_attr *wd_attr) | ||
723 | { | ||
724 | /* | ||
725 | * Watchdog ticks are special on Netburst, we use | ||
726 | * that named "non-sleeping" ticks as recommended | ||
727 | * by Intel SDM Vol3b. | ||
728 | */ | ||
729 | WARN_ON_ONCE(wd_attr->type != PERF_TYPE_HARDWARE || | ||
730 | wd_attr->config != PERF_COUNT_HW_CPU_CYCLES); | ||
731 | |||
732 | wd_attr->type = PERF_TYPE_RAW; | ||
733 | wd_attr->config = | ||
734 | p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_EXECUTION_EVENT) | | ||
735 | P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS0) | | ||
736 | P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS1) | | ||
737 | P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS2) | | ||
738 | P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS3) | | ||
739 | P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS0) | | ||
740 | P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS1) | | ||
741 | P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS2) | | ||
742 | P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS3)) | | ||
743 | p4_config_pack_cccr(P4_CCCR_THRESHOLD(15) | P4_CCCR_COMPLEMENT | | ||
744 | P4_CCCR_COMPARE); | ||
745 | } | ||
746 | |||
708 | static int p4_hw_config(struct perf_event *event) | 747 | static int p4_hw_config(struct perf_event *event) |
709 | { | 748 | { |
710 | int cpu = get_cpu(); | 749 | int cpu = get_cpu(); |
@@ -945,7 +984,7 @@ static int p4_pmu_handle_irq(struct pt_regs *regs) | |||
945 | 984 | ||
946 | if (!x86_perf_event_set_period(event)) | 985 | if (!x86_perf_event_set_period(event)) |
947 | continue; | 986 | continue; |
948 | if (perf_event_overflow(event, 1, &data, regs)) | 987 | if (perf_event_overflow(event, &data, regs)) |
949 | x86_pmu_stop(event, 0); | 988 | x86_pmu_stop(event, 0); |
950 | } | 989 | } |
951 | 990 | ||
@@ -1179,6 +1218,7 @@ static __initconst const struct x86_pmu p4_pmu = { | |||
1179 | .cntval_bits = ARCH_P4_CNTRVAL_BITS, | 1218 | .cntval_bits = ARCH_P4_CNTRVAL_BITS, |
1180 | .cntval_mask = ARCH_P4_CNTRVAL_MASK, | 1219 | .cntval_mask = ARCH_P4_CNTRVAL_MASK, |
1181 | .max_period = (1ULL << (ARCH_P4_CNTRVAL_BITS - 1)) - 1, | 1220 | .max_period = (1ULL << (ARCH_P4_CNTRVAL_BITS - 1)) - 1, |
1221 | .hw_watchdog_set_attr = p4_hw_watchdog_set_attr, | ||
1182 | .hw_config = p4_hw_config, | 1222 | .hw_config = p4_hw_config, |
1183 | .schedule_events = p4_pmu_schedule_events, | 1223 | .schedule_events = p4_pmu_schedule_events, |
1184 | /* | 1224 | /* |
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c index e71c98d3c0d..19853ad8afc 100644 --- a/arch/x86/kernel/dumpstack_64.c +++ b/arch/x86/kernel/dumpstack_64.c | |||
@@ -105,34 +105,6 @@ in_irq_stack(unsigned long *stack, unsigned long *irq_stack, | |||
105 | } | 105 | } |
106 | 106 | ||
107 | /* | 107 | /* |
108 | * We are returning from the irq stack and go to the previous one. | ||
109 | * If the previous stack is also in the irq stack, then bp in the first | ||
110 | * frame of the irq stack points to the previous, interrupted one. | ||
111 | * Otherwise we have another level of indirection: We first save | ||
112 | * the bp of the previous stack, then we switch the stack to the irq one | ||
113 | * and save a new bp that links to the previous one. | ||
114 | * (See save_args()) | ||
115 | */ | ||
116 | static inline unsigned long | ||
117 | fixup_bp_irq_link(unsigned long bp, unsigned long *stack, | ||
118 | unsigned long *irq_stack, unsigned long *irq_stack_end) | ||
119 | { | ||
120 | #ifdef CONFIG_FRAME_POINTER | ||
121 | struct stack_frame *frame = (struct stack_frame *)bp; | ||
122 | unsigned long next; | ||
123 | |||
124 | if (!in_irq_stack(stack, irq_stack, irq_stack_end)) { | ||
125 | if (!probe_kernel_address(&frame->next_frame, next)) | ||
126 | return next; | ||
127 | else | ||
128 | WARN_ONCE(1, "Perf: bad frame pointer = %p in " | ||
129 | "callchain\n", &frame->next_frame); | ||
130 | } | ||
131 | #endif | ||
132 | return bp; | ||
133 | } | ||
134 | |||
135 | /* | ||
136 | * x86-64 can have up to three kernel stacks: | 108 | * x86-64 can have up to three kernel stacks: |
137 | * process stack | 109 | * process stack |
138 | * interrupt stack | 110 | * interrupt stack |
@@ -155,9 +127,12 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, | |||
155 | task = current; | 127 | task = current; |
156 | 128 | ||
157 | if (!stack) { | 129 | if (!stack) { |
158 | stack = &dummy; | 130 | if (regs) |
159 | if (task && task != current) | 131 | stack = (unsigned long *)regs->sp; |
132 | else if (task && task != current) | ||
160 | stack = (unsigned long *)task->thread.sp; | 133 | stack = (unsigned long *)task->thread.sp; |
134 | else | ||
135 | stack = &dummy; | ||
161 | } | 136 | } |
162 | 137 | ||
163 | if (!bp) | 138 | if (!bp) |
@@ -205,8 +180,6 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, | |||
205 | * pointer (index -1 to end) in the IRQ stack: | 180 | * pointer (index -1 to end) in the IRQ stack: |
206 | */ | 181 | */ |
207 | stack = (unsigned long *) (irq_stack_end[-1]); | 182 | stack = (unsigned long *) (irq_stack_end[-1]); |
208 | bp = fixup_bp_irq_link(bp, stack, irq_stack, | ||
209 | irq_stack_end); | ||
210 | irq_stack_end = NULL; | 183 | irq_stack_end = NULL; |
211 | ops->stack(data, "EOI"); | 184 | ops->stack(data, "EOI"); |
212 | continue; | 185 | continue; |
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 8a445a0c989..d656f68371a 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S | |||
@@ -297,27 +297,26 @@ ENDPROC(native_usergs_sysret64) | |||
297 | .endm | 297 | .endm |
298 | 298 | ||
299 | /* save partial stack frame */ | 299 | /* save partial stack frame */ |
300 | .pushsection .kprobes.text, "ax" | 300 | .macro SAVE_ARGS_IRQ |
301 | ENTRY(save_args) | ||
302 | XCPT_FRAME | ||
303 | cld | 301 | cld |
304 | /* | 302 | /* start from rbp in pt_regs and jump over */ |
305 | * start from rbp in pt_regs and jump over | 303 | movq_cfi rdi, RDI-RBP |
306 | * return address. | 304 | movq_cfi rsi, RSI-RBP |
307 | */ | 305 | movq_cfi rdx, RDX-RBP |
308 | movq_cfi rdi, RDI+8-RBP | 306 | movq_cfi rcx, RCX-RBP |
309 | movq_cfi rsi, RSI+8-RBP | 307 | movq_cfi rax, RAX-RBP |
310 | movq_cfi rdx, RDX+8-RBP | 308 | movq_cfi r8, R8-RBP |
311 | movq_cfi rcx, RCX+8-RBP | 309 | movq_cfi r9, R9-RBP |
312 | movq_cfi rax, RAX+8-RBP | 310 | movq_cfi r10, R10-RBP |
313 | movq_cfi r8, R8+8-RBP | 311 | movq_cfi r11, R11-RBP |
314 | movq_cfi r9, R9+8-RBP | 312 | |
315 | movq_cfi r10, R10+8-RBP | 313 | /* Save rbp so that we can unwind from get_irq_regs() */ |
316 | movq_cfi r11, R11+8-RBP | 314 | movq_cfi rbp, 0 |
317 | 315 | ||
318 | leaq -RBP+8(%rsp),%rdi /* arg1 for handler */ | 316 | /* Save previous stack value */ |
319 | movq_cfi rbp, 8 /* push %rbp */ | 317 | movq %rsp, %rsi |
320 | leaq 8(%rsp), %rbp /* mov %rsp, %ebp */ | 318 | |
319 | leaq -RBP(%rsp),%rdi /* arg1 for handler */ | ||
321 | testl $3, CS(%rdi) | 320 | testl $3, CS(%rdi) |
322 | je 1f | 321 | je 1f |
323 | SWAPGS | 322 | SWAPGS |
@@ -329,19 +328,14 @@ ENTRY(save_args) | |||
329 | */ | 328 | */ |
330 | 1: incl PER_CPU_VAR(irq_count) | 329 | 1: incl PER_CPU_VAR(irq_count) |
331 | jne 2f | 330 | jne 2f |
332 | popq_cfi %rax /* move return address... */ | ||
333 | mov PER_CPU_VAR(irq_stack_ptr),%rsp | 331 | mov PER_CPU_VAR(irq_stack_ptr),%rsp |
334 | EMPTY_FRAME 0 | 332 | EMPTY_FRAME 0 |
335 | pushq_cfi %rbp /* backlink for unwinder */ | 333 | |
336 | pushq_cfi %rax /* ... to the new stack */ | 334 | 2: /* Store previous stack value */ |
337 | /* | 335 | pushq %rsi |
338 | * We entered an interrupt context - irqs are off: | 336 | /* We entered an interrupt context - irqs are off: */ |
339 | */ | 337 | TRACE_IRQS_OFF |
340 | 2: TRACE_IRQS_OFF | 338 | .endm |
341 | ret | ||
342 | CFI_ENDPROC | ||
343 | END(save_args) | ||
344 | .popsection | ||
345 | 339 | ||
346 | ENTRY(save_rest) | 340 | ENTRY(save_rest) |
347 | PARTIAL_FRAME 1 REST_SKIP+8 | 341 | PARTIAL_FRAME 1 REST_SKIP+8 |
@@ -791,7 +785,7 @@ END(interrupt) | |||
791 | /* reserve pt_regs for scratch regs and rbp */ | 785 | /* reserve pt_regs for scratch regs and rbp */ |
792 | subq $ORIG_RAX-RBP, %rsp | 786 | subq $ORIG_RAX-RBP, %rsp |
793 | CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP | 787 | CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP |
794 | call save_args | 788 | SAVE_ARGS_IRQ |
795 | PARTIAL_FRAME 0 | 789 | PARTIAL_FRAME 0 |
796 | call \func | 790 | call \func |
797 | .endm | 791 | .endm |
@@ -814,15 +808,14 @@ ret_from_intr: | |||
814 | DISABLE_INTERRUPTS(CLBR_NONE) | 808 | DISABLE_INTERRUPTS(CLBR_NONE) |
815 | TRACE_IRQS_OFF | 809 | TRACE_IRQS_OFF |
816 | decl PER_CPU_VAR(irq_count) | 810 | decl PER_CPU_VAR(irq_count) |
817 | leaveq | ||
818 | 811 | ||
819 | CFI_RESTORE rbp | 812 | /* Restore saved previous stack */ |
813 | popq %rsi | ||
814 | leaq 16(%rsi), %rsp | ||
815 | |||
820 | CFI_DEF_CFA_REGISTER rsp | 816 | CFI_DEF_CFA_REGISTER rsp |
821 | CFI_ADJUST_CFA_OFFSET -8 | 817 | CFI_ADJUST_CFA_OFFSET -16 |
822 | 818 | ||
823 | /* we did not save rbx, restore only from ARGOFFSET */ | ||
824 | addq $8, %rsp | ||
825 | CFI_ADJUST_CFA_OFFSET -8 | ||
826 | exit_intr: | 819 | exit_intr: |
827 | GET_THREAD_INFO(%rcx) | 820 | GET_THREAD_INFO(%rcx) |
828 | testl $3,CS-ARGOFFSET(%rsp) | 821 | testl $3,CS-ARGOFFSET(%rsp) |
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c index 5f9ecff328b..00354d4919a 100644 --- a/arch/x86/kernel/kgdb.c +++ b/arch/x86/kernel/kgdb.c | |||
@@ -608,7 +608,7 @@ int kgdb_arch_init(void) | |||
608 | return register_die_notifier(&kgdb_notifier); | 608 | return register_die_notifier(&kgdb_notifier); |
609 | } | 609 | } |
610 | 610 | ||
611 | static void kgdb_hw_overflow_handler(struct perf_event *event, int nmi, | 611 | static void kgdb_hw_overflow_handler(struct perf_event *event, |
612 | struct perf_sample_data *data, struct pt_regs *regs) | 612 | struct perf_sample_data *data, struct pt_regs *regs) |
613 | { | 613 | { |
614 | struct task_struct *tsk = current; | 614 | struct task_struct *tsk = current; |
@@ -638,7 +638,7 @@ void kgdb_arch_late(void) | |||
638 | for (i = 0; i < HBP_NUM; i++) { | 638 | for (i = 0; i < HBP_NUM; i++) { |
639 | if (breakinfo[i].pev) | 639 | if (breakinfo[i].pev) |
640 | continue; | 640 | continue; |
641 | breakinfo[i].pev = register_wide_hw_breakpoint(&attr, NULL); | 641 | breakinfo[i].pev = register_wide_hw_breakpoint(&attr, NULL, NULL); |
642 | if (IS_ERR((void * __force)breakinfo[i].pev)) { | 642 | if (IS_ERR((void * __force)breakinfo[i].pev)) { |
643 | printk(KERN_ERR "kgdb: Could not allocate hw" | 643 | printk(KERN_ERR "kgdb: Could not allocate hw" |
644 | "breakpoints\nDisabling the kernel debugger\n"); | 644 | "breakpoints\nDisabling the kernel debugger\n"); |
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index 807c2a2b80f..82528799c5d 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c | |||
@@ -528,7 +528,7 @@ static int genregs_set(struct task_struct *target, | |||
528 | return ret; | 528 | return ret; |
529 | } | 529 | } |
530 | 530 | ||
531 | static void ptrace_triggered(struct perf_event *bp, int nmi, | 531 | static void ptrace_triggered(struct perf_event *bp, |
532 | struct perf_sample_data *data, | 532 | struct perf_sample_data *data, |
533 | struct pt_regs *regs) | 533 | struct pt_regs *regs) |
534 | { | 534 | { |
@@ -715,7 +715,8 @@ static int ptrace_set_breakpoint_addr(struct task_struct *tsk, int nr, | |||
715 | attr.bp_type = HW_BREAKPOINT_W; | 715 | attr.bp_type = HW_BREAKPOINT_W; |
716 | attr.disabled = 1; | 716 | attr.disabled = 1; |
717 | 717 | ||
718 | bp = register_user_hw_breakpoint(&attr, ptrace_triggered, tsk); | 718 | bp = register_user_hw_breakpoint(&attr, ptrace_triggered, |
719 | NULL, tsk); | ||
719 | 720 | ||
720 | /* | 721 | /* |
721 | * CHECKME: the previous code returned -EIO if the addr wasn't | 722 | * CHECKME: the previous code returned -EIO if the addr wasn't |
diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c index 55d9bc03f69..fdd0c6430e5 100644 --- a/arch/x86/kernel/stacktrace.c +++ b/arch/x86/kernel/stacktrace.c | |||
@@ -66,7 +66,7 @@ void save_stack_trace(struct stack_trace *trace) | |||
66 | } | 66 | } |
67 | EXPORT_SYMBOL_GPL(save_stack_trace); | 67 | EXPORT_SYMBOL_GPL(save_stack_trace); |
68 | 68 | ||
69 | void save_stack_trace_regs(struct stack_trace *trace, struct pt_regs *regs) | 69 | void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace) |
70 | { | 70 | { |
71 | dump_trace(current, regs, NULL, 0, &save_stack_ops, trace); | 71 | dump_trace(current, regs, NULL, 0, &save_stack_ops, trace); |
72 | if (trace->nr_entries < trace->max_entries) | 72 | if (trace->nr_entries < trace->max_entries) |
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 2dbf6bf4c7e..4d09df054e3 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c | |||
@@ -1059,7 +1059,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code) | |||
1059 | if (unlikely(error_code & PF_RSVD)) | 1059 | if (unlikely(error_code & PF_RSVD)) |
1060 | pgtable_bad(regs, error_code, address); | 1060 | pgtable_bad(regs, error_code, address); |
1061 | 1061 | ||
1062 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); | 1062 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); |
1063 | 1063 | ||
1064 | /* | 1064 | /* |
1065 | * If we're in an interrupt, have no user context or are running | 1065 | * If we're in an interrupt, have no user context or are running |
@@ -1161,11 +1161,11 @@ good_area: | |||
1161 | if (flags & FAULT_FLAG_ALLOW_RETRY) { | 1161 | if (flags & FAULT_FLAG_ALLOW_RETRY) { |
1162 | if (fault & VM_FAULT_MAJOR) { | 1162 | if (fault & VM_FAULT_MAJOR) { |
1163 | tsk->maj_flt++; | 1163 | tsk->maj_flt++; |
1164 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, | 1164 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, |
1165 | regs, address); | 1165 | regs, address); |
1166 | } else { | 1166 | } else { |
1167 | tsk->min_flt++; | 1167 | tsk->min_flt++; |
1168 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, | 1168 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, |
1169 | regs, address); | 1169 | regs, address); |
1170 | } | 1170 | } |
1171 | if (fault & VM_FAULT_RETRY) { | 1171 | if (fault & VM_FAULT_RETRY) { |
diff --git a/arch/x86/mm/kmemcheck/error.c b/arch/x86/mm/kmemcheck/error.c index 704a37ceddd..dab41876cdd 100644 --- a/arch/x86/mm/kmemcheck/error.c +++ b/arch/x86/mm/kmemcheck/error.c | |||
@@ -185,7 +185,7 @@ void kmemcheck_error_save(enum kmemcheck_shadow state, | |||
185 | e->trace.entries = e->trace_entries; | 185 | e->trace.entries = e->trace_entries; |
186 | e->trace.max_entries = ARRAY_SIZE(e->trace_entries); | 186 | e->trace.max_entries = ARRAY_SIZE(e->trace_entries); |
187 | e->trace.skip = 0; | 187 | e->trace.skip = 0; |
188 | save_stack_trace_regs(&e->trace, regs); | 188 | save_stack_trace_regs(regs, &e->trace); |
189 | 189 | ||
190 | /* Round address down to nearest 16 bytes */ | 190 | /* Round address down to nearest 16 bytes */ |
191 | shadow_copy = kmemcheck_shadow_lookup(address | 191 | shadow_copy = kmemcheck_shadow_lookup(address |
diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c index a5b64ab4cd6..32f78eb4674 100644 --- a/arch/x86/oprofile/backtrace.c +++ b/arch/x86/oprofile/backtrace.c | |||
@@ -11,10 +11,12 @@ | |||
11 | #include <linux/oprofile.h> | 11 | #include <linux/oprofile.h> |
12 | #include <linux/sched.h> | 12 | #include <linux/sched.h> |
13 | #include <linux/mm.h> | 13 | #include <linux/mm.h> |
14 | #include <linux/compat.h> | ||
15 | #include <linux/highmem.h> | ||
16 | |||
14 | #include <asm/ptrace.h> | 17 | #include <asm/ptrace.h> |
15 | #include <asm/uaccess.h> | 18 | #include <asm/uaccess.h> |
16 | #include <asm/stacktrace.h> | 19 | #include <asm/stacktrace.h> |
17 | #include <linux/compat.h> | ||
18 | 20 | ||
19 | static int backtrace_stack(void *data, char *name) | 21 | static int backtrace_stack(void *data, char *name) |
20 | { | 22 | { |
@@ -36,17 +38,53 @@ static struct stacktrace_ops backtrace_ops = { | |||
36 | .walk_stack = print_context_stack, | 38 | .walk_stack = print_context_stack, |
37 | }; | 39 | }; |
38 | 40 | ||
41 | /* from arch/x86/kernel/cpu/perf_event.c: */ | ||
42 | |||
43 | /* | ||
44 | * best effort, GUP based copy_from_user() that assumes IRQ or NMI context | ||
45 | */ | ||
46 | static unsigned long | ||
47 | copy_from_user_nmi(void *to, const void __user *from, unsigned long n) | ||
48 | { | ||
49 | unsigned long offset, addr = (unsigned long)from; | ||
50 | unsigned long size, len = 0; | ||
51 | struct page *page; | ||
52 | void *map; | ||
53 | int ret; | ||
54 | |||
55 | do { | ||
56 | ret = __get_user_pages_fast(addr, 1, 0, &page); | ||
57 | if (!ret) | ||
58 | break; | ||
59 | |||
60 | offset = addr & (PAGE_SIZE - 1); | ||
61 | size = min(PAGE_SIZE - offset, n - len); | ||
62 | |||
63 | map = kmap_atomic(page); | ||
64 | memcpy(to, map+offset, size); | ||
65 | kunmap_atomic(map); | ||
66 | put_page(page); | ||
67 | |||
68 | len += size; | ||
69 | to += size; | ||
70 | addr += size; | ||
71 | |||
72 | } while (len < n); | ||
73 | |||
74 | return len; | ||
75 | } | ||
76 | |||
39 | #ifdef CONFIG_COMPAT | 77 | #ifdef CONFIG_COMPAT |
40 | static struct stack_frame_ia32 * | 78 | static struct stack_frame_ia32 * |
41 | dump_user_backtrace_32(struct stack_frame_ia32 *head) | 79 | dump_user_backtrace_32(struct stack_frame_ia32 *head) |
42 | { | 80 | { |
81 | /* Also check accessibility of one struct frame_head beyond: */ | ||
43 | struct stack_frame_ia32 bufhead[2]; | 82 | struct stack_frame_ia32 bufhead[2]; |
44 | struct stack_frame_ia32 *fp; | 83 | struct stack_frame_ia32 *fp; |
84 | unsigned long bytes; | ||
45 | 85 | ||
46 | /* Also check accessibility of one struct frame_head beyond */ | 86 | bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead)); |
47 | if (!access_ok(VERIFY_READ, head, sizeof(bufhead))) | 87 | if (bytes != sizeof(bufhead)) |
48 | return NULL; | ||
49 | if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead))) | ||
50 | return NULL; | 88 | return NULL; |
51 | 89 | ||
52 | fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame); | 90 | fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame); |
@@ -87,12 +125,12 @@ x86_backtrace_32(struct pt_regs * const regs, unsigned int depth) | |||
87 | 125 | ||
88 | static struct stack_frame *dump_user_backtrace(struct stack_frame *head) | 126 | static struct stack_frame *dump_user_backtrace(struct stack_frame *head) |
89 | { | 127 | { |
128 | /* Also check accessibility of one struct frame_head beyond: */ | ||
90 | struct stack_frame bufhead[2]; | 129 | struct stack_frame bufhead[2]; |
130 | unsigned long bytes; | ||
91 | 131 | ||
92 | /* Also check accessibility of one struct stack_frame beyond */ | 132 | bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead)); |
93 | if (!access_ok(VERIFY_READ, head, sizeof(bufhead))) | 133 | if (bytes != sizeof(bufhead)) |
94 | return NULL; | ||
95 | if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead))) | ||
96 | return NULL; | 134 | return NULL; |
97 | 135 | ||
98 | oprofile_add_trace(bufhead[0].return_address); | 136 | oprofile_add_trace(bufhead[0].return_address); |
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c index 0972315c386..68c3c139520 100644 --- a/arch/x86/pci/acpi.c +++ b/arch/x86/pci/acpi.c | |||
@@ -188,7 +188,7 @@ static bool resource_contains(struct resource *res, resource_size_t point) | |||
188 | return false; | 188 | return false; |
189 | } | 189 | } |
190 | 190 | ||
191 | static void coalesce_windows(struct pci_root_info *info, int type) | 191 | static void coalesce_windows(struct pci_root_info *info, unsigned long type) |
192 | { | 192 | { |
193 | int i, j; | 193 | int i, j; |
194 | struct resource *res1, *res2; | 194 | struct resource *res1, *res2; |
diff --git a/block/blk-throttle.c b/block/blk-throttle.c index a62be8d0dc1..3689f833afd 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c | |||
@@ -927,7 +927,7 @@ static int throtl_dispatch(struct request_queue *q) | |||
927 | 927 | ||
928 | bio_list_init(&bio_list_on_stack); | 928 | bio_list_init(&bio_list_on_stack); |
929 | 929 | ||
930 | throtl_log(td, "dispatch nr_queued=%lu read=%u write=%u", | 930 | throtl_log(td, "dispatch nr_queued=%d read=%u write=%u", |
931 | total_nr_queued(td), td->nr_queued[READ], | 931 | total_nr_queued(td), td->nr_queued[READ], |
932 | td->nr_queued[WRITE]); | 932 | td->nr_queued[WRITE]); |
933 | 933 | ||
@@ -1204,7 +1204,7 @@ int blk_throtl_bio(struct request_queue *q, struct bio **biop) | |||
1204 | } | 1204 | } |
1205 | 1205 | ||
1206 | queue_bio: | 1206 | queue_bio: |
1207 | throtl_log_tg(td, tg, "[%c] bio. bdisp=%u sz=%u bps=%llu" | 1207 | throtl_log_tg(td, tg, "[%c] bio. bdisp=%llu sz=%u bps=%llu" |
1208 | " iodisp=%u iops=%u queued=%d/%d", | 1208 | " iodisp=%u iops=%u queued=%d/%d", |
1209 | rw == READ ? 'R' : 'W', | 1209 | rw == READ ? 'R' : 'W', |
1210 | tg->bytes_disp[rw], bio->bi_size, tg->bps[rw], | 1210 | tg->bytes_disp[rw], bio->bi_size, tg->bps[rw], |
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 3c7b537bf90..f3799432676 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -988,9 +988,10 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg, | |||
988 | 988 | ||
989 | cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime, | 989 | cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime, |
990 | st->min_vdisktime); | 990 | st->min_vdisktime); |
991 | cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u disp=%u charge=%u iops=%u" | 991 | cfq_log_cfqq(cfqq->cfqd, cfqq, |
992 | " sect=%u", used_sl, cfqq->slice_dispatch, charge, | 992 | "sl_used=%u disp=%u charge=%u iops=%u sect=%lu", |
993 | iops_mode(cfqd), cfqq->nr_sectors); | 993 | used_sl, cfqq->slice_dispatch, charge, |
994 | iops_mode(cfqd), cfqq->nr_sectors); | ||
994 | cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl, | 995 | cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl, |
995 | unaccounted_sl); | 996 | unaccounted_sl); |
996 | cfq_blkiocg_set_start_empty_time(&cfqg->blkg); | 997 | cfq_blkiocg_set_start_empty_time(&cfqg->blkg); |
@@ -2023,8 +2024,8 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd) | |||
2023 | */ | 2024 | */ |
2024 | if (sample_valid(cic->ttime_samples) && | 2025 | if (sample_valid(cic->ttime_samples) && |
2025 | (cfqq->slice_end - jiffies < cic->ttime_mean)) { | 2026 | (cfqq->slice_end - jiffies < cic->ttime_mean)) { |
2026 | cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%d", | 2027 | cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%lu", |
2027 | cic->ttime_mean); | 2028 | cic->ttime_mean); |
2028 | return; | 2029 | return; |
2029 | } | 2030 | } |
2030 | 2031 | ||
@@ -2772,8 +2773,11 @@ static void __cfq_exit_single_io_context(struct cfq_data *cfqd, | |||
2772 | smp_wmb(); | 2773 | smp_wmb(); |
2773 | cic->key = cfqd_dead_key(cfqd); | 2774 | cic->key = cfqd_dead_key(cfqd); |
2774 | 2775 | ||
2775 | if (ioc->ioc_data == cic) | 2776 | if (rcu_dereference(ioc->ioc_data) == cic) { |
2777 | spin_lock(&ioc->lock); | ||
2776 | rcu_assign_pointer(ioc->ioc_data, NULL); | 2778 | rcu_assign_pointer(ioc->ioc_data, NULL); |
2779 | spin_unlock(&ioc->lock); | ||
2780 | } | ||
2777 | 2781 | ||
2778 | if (cic->cfqq[BLK_RW_ASYNC]) { | 2782 | if (cic->cfqq[BLK_RW_ASYNC]) { |
2779 | cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]); | 2783 | cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]); |
diff --git a/block/genhd.c b/block/genhd.c index 95822ae25cf..3608289c8ec 100644 --- a/block/genhd.c +++ b/block/genhd.c | |||
@@ -1371,6 +1371,7 @@ struct disk_events { | |||
1371 | struct gendisk *disk; /* the associated disk */ | 1371 | struct gendisk *disk; /* the associated disk */ |
1372 | spinlock_t lock; | 1372 | spinlock_t lock; |
1373 | 1373 | ||
1374 | struct mutex block_mutex; /* protects blocking */ | ||
1374 | int block; /* event blocking depth */ | 1375 | int block; /* event blocking depth */ |
1375 | unsigned int pending; /* events already sent out */ | 1376 | unsigned int pending; /* events already sent out */ |
1376 | unsigned int clearing; /* events being cleared */ | 1377 | unsigned int clearing; /* events being cleared */ |
@@ -1414,22 +1415,44 @@ static unsigned long disk_events_poll_jiffies(struct gendisk *disk) | |||
1414 | return msecs_to_jiffies(intv_msecs); | 1415 | return msecs_to_jiffies(intv_msecs); |
1415 | } | 1416 | } |
1416 | 1417 | ||
1417 | static void __disk_block_events(struct gendisk *disk, bool sync) | 1418 | /** |
1419 | * disk_block_events - block and flush disk event checking | ||
1420 | * @disk: disk to block events for | ||
1421 | * | ||
1422 | * On return from this function, it is guaranteed that event checking | ||
1423 | * isn't in progress and won't happen until unblocked by | ||
1424 | * disk_unblock_events(). Events blocking is counted and the actual | ||
1425 | * unblocking happens after the matching number of unblocks are done. | ||
1426 | * | ||
1427 | * Note that this intentionally does not block event checking from | ||
1428 | * disk_clear_events(). | ||
1429 | * | ||
1430 | * CONTEXT: | ||
1431 | * Might sleep. | ||
1432 | */ | ||
1433 | void disk_block_events(struct gendisk *disk) | ||
1418 | { | 1434 | { |
1419 | struct disk_events *ev = disk->ev; | 1435 | struct disk_events *ev = disk->ev; |
1420 | unsigned long flags; | 1436 | unsigned long flags; |
1421 | bool cancel; | 1437 | bool cancel; |
1422 | 1438 | ||
1439 | if (!ev) | ||
1440 | return; | ||
1441 | |||
1442 | /* | ||
1443 | * Outer mutex ensures that the first blocker completes canceling | ||
1444 | * the event work before further blockers are allowed to finish. | ||
1445 | */ | ||
1446 | mutex_lock(&ev->block_mutex); | ||
1447 | |||
1423 | spin_lock_irqsave(&ev->lock, flags); | 1448 | spin_lock_irqsave(&ev->lock, flags); |
1424 | cancel = !ev->block++; | 1449 | cancel = !ev->block++; |
1425 | spin_unlock_irqrestore(&ev->lock, flags); | 1450 | spin_unlock_irqrestore(&ev->lock, flags); |
1426 | 1451 | ||
1427 | if (cancel) { | 1452 | if (cancel) |
1428 | if (sync) | 1453 | cancel_delayed_work_sync(&disk->ev->dwork); |
1429 | cancel_delayed_work_sync(&disk->ev->dwork); | 1454 | |
1430 | else | 1455 | mutex_unlock(&ev->block_mutex); |
1431 | cancel_delayed_work(&disk->ev->dwork); | ||
1432 | } | ||
1433 | } | 1456 | } |
1434 | 1457 | ||
1435 | static void __disk_unblock_events(struct gendisk *disk, bool check_now) | 1458 | static void __disk_unblock_events(struct gendisk *disk, bool check_now) |
@@ -1461,27 +1484,6 @@ out_unlock: | |||
1461 | } | 1484 | } |
1462 | 1485 | ||
1463 | /** | 1486 | /** |
1464 | * disk_block_events - block and flush disk event checking | ||
1465 | * @disk: disk to block events for | ||
1466 | * | ||
1467 | * On return from this function, it is guaranteed that event checking | ||
1468 | * isn't in progress and won't happen until unblocked by | ||
1469 | * disk_unblock_events(). Events blocking is counted and the actual | ||
1470 | * unblocking happens after the matching number of unblocks are done. | ||
1471 | * | ||
1472 | * Note that this intentionally does not block event checking from | ||
1473 | * disk_clear_events(). | ||
1474 | * | ||
1475 | * CONTEXT: | ||
1476 | * Might sleep. | ||
1477 | */ | ||
1478 | void disk_block_events(struct gendisk *disk) | ||
1479 | { | ||
1480 | if (disk->ev) | ||
1481 | __disk_block_events(disk, true); | ||
1482 | } | ||
1483 | |||
1484 | /** | ||
1485 | * disk_unblock_events - unblock disk event checking | 1487 | * disk_unblock_events - unblock disk event checking |
1486 | * @disk: disk to unblock events for | 1488 | * @disk: disk to unblock events for |
1487 | * | 1489 | * |
@@ -1508,10 +1510,18 @@ void disk_unblock_events(struct gendisk *disk) | |||
1508 | */ | 1510 | */ |
1509 | void disk_check_events(struct gendisk *disk) | 1511 | void disk_check_events(struct gendisk *disk) |
1510 | { | 1512 | { |
1511 | if (disk->ev) { | 1513 | struct disk_events *ev = disk->ev; |
1512 | __disk_block_events(disk, false); | 1514 | unsigned long flags; |
1513 | __disk_unblock_events(disk, true); | 1515 | |
1516 | if (!ev) | ||
1517 | return; | ||
1518 | |||
1519 | spin_lock_irqsave(&ev->lock, flags); | ||
1520 | if (!ev->block) { | ||
1521 | cancel_delayed_work(&ev->dwork); | ||
1522 | queue_delayed_work(system_nrt_wq, &ev->dwork, 0); | ||
1514 | } | 1523 | } |
1524 | spin_unlock_irqrestore(&ev->lock, flags); | ||
1515 | } | 1525 | } |
1516 | EXPORT_SYMBOL_GPL(disk_check_events); | 1526 | EXPORT_SYMBOL_GPL(disk_check_events); |
1517 | 1527 | ||
@@ -1546,7 +1556,7 @@ unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask) | |||
1546 | spin_unlock_irq(&ev->lock); | 1556 | spin_unlock_irq(&ev->lock); |
1547 | 1557 | ||
1548 | /* uncondtionally schedule event check and wait for it to finish */ | 1558 | /* uncondtionally schedule event check and wait for it to finish */ |
1549 | __disk_block_events(disk, true); | 1559 | disk_block_events(disk); |
1550 | queue_delayed_work(system_nrt_wq, &ev->dwork, 0); | 1560 | queue_delayed_work(system_nrt_wq, &ev->dwork, 0); |
1551 | flush_delayed_work(&ev->dwork); | 1561 | flush_delayed_work(&ev->dwork); |
1552 | __disk_unblock_events(disk, false); | 1562 | __disk_unblock_events(disk, false); |
@@ -1664,7 +1674,7 @@ static ssize_t disk_events_poll_msecs_store(struct device *dev, | |||
1664 | if (intv < 0 && intv != -1) | 1674 | if (intv < 0 && intv != -1) |
1665 | return -EINVAL; | 1675 | return -EINVAL; |
1666 | 1676 | ||
1667 | __disk_block_events(disk, true); | 1677 | disk_block_events(disk); |
1668 | disk->ev->poll_msecs = intv; | 1678 | disk->ev->poll_msecs = intv; |
1669 | __disk_unblock_events(disk, true); | 1679 | __disk_unblock_events(disk, true); |
1670 | 1680 | ||
@@ -1750,6 +1760,7 @@ static void disk_add_events(struct gendisk *disk) | |||
1750 | INIT_LIST_HEAD(&ev->node); | 1760 | INIT_LIST_HEAD(&ev->node); |
1751 | ev->disk = disk; | 1761 | ev->disk = disk; |
1752 | spin_lock_init(&ev->lock); | 1762 | spin_lock_init(&ev->lock); |
1763 | mutex_init(&ev->block_mutex); | ||
1753 | ev->block = 1; | 1764 | ev->block = 1; |
1754 | ev->poll_msecs = -1; | 1765 | ev->poll_msecs = -1; |
1755 | INIT_DELAYED_WORK(&ev->dwork, disk_events_workfn); | 1766 | INIT_DELAYED_WORK(&ev->dwork, disk_events_workfn); |
@@ -1770,7 +1781,7 @@ static void disk_del_events(struct gendisk *disk) | |||
1770 | if (!disk->ev) | 1781 | if (!disk->ev) |
1771 | return; | 1782 | return; |
1772 | 1783 | ||
1773 | __disk_block_events(disk, true); | 1784 | disk_block_events(disk); |
1774 | 1785 | ||
1775 | mutex_lock(&disk_events_mutex); | 1786 | mutex_lock(&disk_events_mutex); |
1776 | list_del_init(&disk->ev->node); | 1787 | list_del_init(&disk->ev->node); |
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 736bee5dafe..000d03ae665 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
@@ -4143,9 +4143,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { | |||
4143 | * Devices which choke on SETXFER. Applies only if both the | 4143 | * Devices which choke on SETXFER. Applies only if both the |
4144 | * device and controller are SATA. | 4144 | * device and controller are SATA. |
4145 | */ | 4145 | */ |
4146 | { "PIONEER DVD-RW DVRTD08", "1.00", ATA_HORKAGE_NOSETXFER }, | 4146 | { "PIONEER DVD-RW DVRTD08", NULL, ATA_HORKAGE_NOSETXFER }, |
4147 | { "PIONEER DVD-RW DVR-212D", "1.28", ATA_HORKAGE_NOSETXFER }, | 4147 | { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER }, |
4148 | { "PIONEER DVD-RW DVR-216D", "1.08", ATA_HORKAGE_NOSETXFER }, | 4148 | { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER }, |
4149 | 4149 | ||
4150 | /* End Marker */ | 4150 | /* End Marker */ |
4151 | { } | 4151 | { } |
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index d51f9795c06..927f968e99d 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c | |||
@@ -3797,6 +3797,12 @@ EXPORT_SYMBOL_GPL(ata_sas_port_alloc); | |||
3797 | */ | 3797 | */ |
3798 | int ata_sas_port_start(struct ata_port *ap) | 3798 | int ata_sas_port_start(struct ata_port *ap) |
3799 | { | 3799 | { |
3800 | /* | ||
3801 | * the port is marked as frozen at allocation time, but if we don't | ||
3802 | * have new eh, we won't thaw it | ||
3803 | */ | ||
3804 | if (!ap->ops->error_handler) | ||
3805 | ap->pflags &= ~ATA_PFLAG_FROZEN; | ||
3800 | return 0; | 3806 | return 0; |
3801 | } | 3807 | } |
3802 | EXPORT_SYMBOL_GPL(ata_sas_port_start); | 3808 | EXPORT_SYMBOL_GPL(ata_sas_port_start); |
diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c index 75a6a0c0094..5d7f58a7e34 100644 --- a/drivers/ata/pata_marvell.c +++ b/drivers/ata/pata_marvell.c | |||
@@ -161,6 +161,9 @@ static const struct pci_device_id marvell_pci_tbl[] = { | |||
161 | { PCI_DEVICE(0x11AB, 0x6121), }, | 161 | { PCI_DEVICE(0x11AB, 0x6121), }, |
162 | { PCI_DEVICE(0x11AB, 0x6123), }, | 162 | { PCI_DEVICE(0x11AB, 0x6123), }, |
163 | { PCI_DEVICE(0x11AB, 0x6145), }, | 163 | { PCI_DEVICE(0x11AB, 0x6145), }, |
164 | { PCI_DEVICE(0x1B4B, 0x91A0), }, | ||
165 | { PCI_DEVICE(0x1B4B, 0x91A4), }, | ||
166 | |||
164 | { } /* terminate list */ | 167 | { } /* terminate list */ |
165 | }; | 168 | }; |
166 | 169 | ||
diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c index 1c4b3aa4c7c..dc88a39e7db 100644 --- a/drivers/ata/sata_dwc_460ex.c +++ b/drivers/ata/sata_dwc_460ex.c | |||
@@ -389,7 +389,7 @@ static void sata_dwc_tf_dump(struct ata_taskfile *tf) | |||
389 | /* | 389 | /* |
390 | * Function: get_burst_length_encode | 390 | * Function: get_burst_length_encode |
391 | * arguments: datalength: length in bytes of data | 391 | * arguments: datalength: length in bytes of data |
392 | * returns value to be programmed in register corrresponding to data length | 392 | * returns value to be programmed in register corresponding to data length |
393 | * This value is effectively the log(base 2) of the length | 393 | * This value is effectively the log(base 2) of the length |
394 | */ | 394 | */ |
395 | static int get_burst_length_encode(int datalength) | 395 | static int get_burst_length_encode(int datalength) |
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c index eaa8a854af0..ad367c4139b 100644 --- a/drivers/base/power/clock_ops.c +++ b/drivers/base/power/clock_ops.c | |||
@@ -387,7 +387,7 @@ static int pm_runtime_clk_notify(struct notifier_block *nb, | |||
387 | clknb = container_of(nb, struct pm_clk_notifier_block, nb); | 387 | clknb = container_of(nb, struct pm_clk_notifier_block, nb); |
388 | 388 | ||
389 | switch (action) { | 389 | switch (action) { |
390 | case BUS_NOTIFY_ADD_DEVICE: | 390 | case BUS_NOTIFY_BIND_DRIVER: |
391 | if (clknb->con_ids[0]) { | 391 | if (clknb->con_ids[0]) { |
392 | for (con_id = clknb->con_ids; *con_id; con_id++) | 392 | for (con_id = clknb->con_ids; *con_id; con_id++) |
393 | enable_clock(dev, *con_id); | 393 | enable_clock(dev, *con_id); |
@@ -395,7 +395,7 @@ static int pm_runtime_clk_notify(struct notifier_block *nb, | |||
395 | enable_clock(dev, NULL); | 395 | enable_clock(dev, NULL); |
396 | } | 396 | } |
397 | break; | 397 | break; |
398 | case BUS_NOTIFY_DEL_DEVICE: | 398 | case BUS_NOTIFY_UNBOUND_DRIVER: |
399 | if (clknb->con_ids[0]) { | 399 | if (clknb->con_ids[0]) { |
400 | for (con_id = clknb->con_ids; *con_id; con_id++) | 400 | for (con_id = clknb->con_ids; *con_id; con_id++) |
401 | disable_clock(dev, *con_id); | 401 | disable_clock(dev, *con_id); |
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index aa632020774..06f09bf89cb 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c | |||
@@ -57,7 +57,8 @@ static int async_error; | |||
57 | */ | 57 | */ |
58 | void device_pm_init(struct device *dev) | 58 | void device_pm_init(struct device *dev) |
59 | { | 59 | { |
60 | dev->power.in_suspend = false; | 60 | dev->power.is_prepared = false; |
61 | dev->power.is_suspended = false; | ||
61 | init_completion(&dev->power.completion); | 62 | init_completion(&dev->power.completion); |
62 | complete_all(&dev->power.completion); | 63 | complete_all(&dev->power.completion); |
63 | dev->power.wakeup = NULL; | 64 | dev->power.wakeup = NULL; |
@@ -91,7 +92,7 @@ void device_pm_add(struct device *dev) | |||
91 | pr_debug("PM: Adding info for %s:%s\n", | 92 | pr_debug("PM: Adding info for %s:%s\n", |
92 | dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); | 93 | dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); |
93 | mutex_lock(&dpm_list_mtx); | 94 | mutex_lock(&dpm_list_mtx); |
94 | if (dev->parent && dev->parent->power.in_suspend) | 95 | if (dev->parent && dev->parent->power.is_prepared) |
95 | dev_warn(dev, "parent %s should not be sleeping\n", | 96 | dev_warn(dev, "parent %s should not be sleeping\n", |
96 | dev_name(dev->parent)); | 97 | dev_name(dev->parent)); |
97 | list_add_tail(&dev->power.entry, &dpm_list); | 98 | list_add_tail(&dev->power.entry, &dpm_list); |
@@ -511,7 +512,14 @@ static int device_resume(struct device *dev, pm_message_t state, bool async) | |||
511 | dpm_wait(dev->parent, async); | 512 | dpm_wait(dev->parent, async); |
512 | device_lock(dev); | 513 | device_lock(dev); |
513 | 514 | ||
514 | dev->power.in_suspend = false; | 515 | /* |
516 | * This is a fib. But we'll allow new children to be added below | ||
517 | * a resumed device, even if the device hasn't been completed yet. | ||
518 | */ | ||
519 | dev->power.is_prepared = false; | ||
520 | |||
521 | if (!dev->power.is_suspended) | ||
522 | goto Unlock; | ||
515 | 523 | ||
516 | if (dev->pwr_domain) { | 524 | if (dev->pwr_domain) { |
517 | pm_dev_dbg(dev, state, "power domain "); | 525 | pm_dev_dbg(dev, state, "power domain "); |
@@ -548,6 +556,9 @@ static int device_resume(struct device *dev, pm_message_t state, bool async) | |||
548 | } | 556 | } |
549 | 557 | ||
550 | End: | 558 | End: |
559 | dev->power.is_suspended = false; | ||
560 | |||
561 | Unlock: | ||
551 | device_unlock(dev); | 562 | device_unlock(dev); |
552 | complete_all(&dev->power.completion); | 563 | complete_all(&dev->power.completion); |
553 | 564 | ||
@@ -670,7 +681,7 @@ void dpm_complete(pm_message_t state) | |||
670 | struct device *dev = to_device(dpm_prepared_list.prev); | 681 | struct device *dev = to_device(dpm_prepared_list.prev); |
671 | 682 | ||
672 | get_device(dev); | 683 | get_device(dev); |
673 | dev->power.in_suspend = false; | 684 | dev->power.is_prepared = false; |
674 | list_move(&dev->power.entry, &list); | 685 | list_move(&dev->power.entry, &list); |
675 | mutex_unlock(&dpm_list_mtx); | 686 | mutex_unlock(&dpm_list_mtx); |
676 | 687 | ||
@@ -835,11 +846,11 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) | |||
835 | device_lock(dev); | 846 | device_lock(dev); |
836 | 847 | ||
837 | if (async_error) | 848 | if (async_error) |
838 | goto End; | 849 | goto Unlock; |
839 | 850 | ||
840 | if (pm_wakeup_pending()) { | 851 | if (pm_wakeup_pending()) { |
841 | async_error = -EBUSY; | 852 | async_error = -EBUSY; |
842 | goto End; | 853 | goto Unlock; |
843 | } | 854 | } |
844 | 855 | ||
845 | if (dev->pwr_domain) { | 856 | if (dev->pwr_domain) { |
@@ -877,6 +888,9 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) | |||
877 | } | 888 | } |
878 | 889 | ||
879 | End: | 890 | End: |
891 | dev->power.is_suspended = !error; | ||
892 | |||
893 | Unlock: | ||
880 | device_unlock(dev); | 894 | device_unlock(dev); |
881 | complete_all(&dev->power.completion); | 895 | complete_all(&dev->power.completion); |
882 | 896 | ||
@@ -1042,7 +1056,7 @@ int dpm_prepare(pm_message_t state) | |||
1042 | put_device(dev); | 1056 | put_device(dev); |
1043 | break; | 1057 | break; |
1044 | } | 1058 | } |
1045 | dev->power.in_suspend = true; | 1059 | dev->power.is_prepared = true; |
1046 | if (!list_empty(&dev->power.entry)) | 1060 | if (!list_empty(&dev->power.entry)) |
1047 | list_move_tail(&dev->power.entry, &dpm_prepared_list); | 1061 | list_move_tail(&dev->power.entry, &dpm_prepared_list); |
1048 | put_device(dev); | 1062 | put_device(dev); |
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 74e4ff57801..4012fe42346 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/module.h> | 34 | #include <linux/module.h> |
35 | #include <linux/mman.h> | 35 | #include <linux/mman.h> |
36 | #include <linux/pagemap.h> | 36 | #include <linux/pagemap.h> |
37 | #include <linux/shmem_fs.h> | ||
37 | #include "drmP.h" | 38 | #include "drmP.h" |
38 | 39 | ||
39 | /** @file drm_gem.c | 40 | /** @file drm_gem.c |
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 0239e9974bf..2b79588541e 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -2182,9 +2182,8 @@ int i915_driver_unload(struct drm_device *dev) | |||
2182 | /* Flush any outstanding unpin_work. */ | 2182 | /* Flush any outstanding unpin_work. */ |
2183 | flush_workqueue(dev_priv->wq); | 2183 | flush_workqueue(dev_priv->wq); |
2184 | 2184 | ||
2185 | i915_gem_free_all_phys_object(dev); | ||
2186 | |||
2187 | mutex_lock(&dev->struct_mutex); | 2185 | mutex_lock(&dev->struct_mutex); |
2186 | i915_gem_free_all_phys_object(dev); | ||
2188 | i915_gem_cleanup_ringbuffer(dev); | 2187 | i915_gem_cleanup_ringbuffer(dev); |
2189 | mutex_unlock(&dev->struct_mutex); | 2188 | mutex_unlock(&dev->struct_mutex); |
2190 | if (I915_HAS_FBC(dev) && i915_powersave) | 2189 | if (I915_HAS_FBC(dev) && i915_powersave) |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 94c84d74410..85f713746a1 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include "i915_drv.h" | 31 | #include "i915_drv.h" |
32 | #include "i915_trace.h" | 32 | #include "i915_trace.h" |
33 | #include "intel_drv.h" | 33 | #include "intel_drv.h" |
34 | #include <linux/shmem_fs.h> | ||
34 | #include <linux/slab.h> | 35 | #include <linux/slab.h> |
35 | #include <linux/swap.h> | 36 | #include <linux/swap.h> |
36 | #include <linux/pci.h> | 37 | #include <linux/pci.h> |
@@ -359,8 +360,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, | |||
359 | if ((page_offset + remain) > PAGE_SIZE) | 360 | if ((page_offset + remain) > PAGE_SIZE) |
360 | page_length = PAGE_SIZE - page_offset; | 361 | page_length = PAGE_SIZE - page_offset; |
361 | 362 | ||
362 | page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT, | 363 | page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT); |
363 | GFP_HIGHUSER | __GFP_RECLAIMABLE); | ||
364 | if (IS_ERR(page)) | 364 | if (IS_ERR(page)) |
365 | return PTR_ERR(page); | 365 | return PTR_ERR(page); |
366 | 366 | ||
@@ -463,8 +463,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, | |||
463 | if ((data_page_offset + page_length) > PAGE_SIZE) | 463 | if ((data_page_offset + page_length) > PAGE_SIZE) |
464 | page_length = PAGE_SIZE - data_page_offset; | 464 | page_length = PAGE_SIZE - data_page_offset; |
465 | 465 | ||
466 | page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT, | 466 | page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT); |
467 | GFP_HIGHUSER | __GFP_RECLAIMABLE); | ||
468 | if (IS_ERR(page)) { | 467 | if (IS_ERR(page)) { |
469 | ret = PTR_ERR(page); | 468 | ret = PTR_ERR(page); |
470 | goto out; | 469 | goto out; |
@@ -797,8 +796,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, | |||
797 | if ((page_offset + remain) > PAGE_SIZE) | 796 | if ((page_offset + remain) > PAGE_SIZE) |
798 | page_length = PAGE_SIZE - page_offset; | 797 | page_length = PAGE_SIZE - page_offset; |
799 | 798 | ||
800 | page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT, | 799 | page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT); |
801 | GFP_HIGHUSER | __GFP_RECLAIMABLE); | ||
802 | if (IS_ERR(page)) | 800 | if (IS_ERR(page)) |
803 | return PTR_ERR(page); | 801 | return PTR_ERR(page); |
804 | 802 | ||
@@ -907,8 +905,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, | |||
907 | if ((data_page_offset + page_length) > PAGE_SIZE) | 905 | if ((data_page_offset + page_length) > PAGE_SIZE) |
908 | page_length = PAGE_SIZE - data_page_offset; | 906 | page_length = PAGE_SIZE - data_page_offset; |
909 | 907 | ||
910 | page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT, | 908 | page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT); |
911 | GFP_HIGHUSER | __GFP_RECLAIMABLE); | ||
912 | if (IS_ERR(page)) { | 909 | if (IS_ERR(page)) { |
913 | ret = PTR_ERR(page); | 910 | ret = PTR_ERR(page); |
914 | goto out; | 911 | goto out; |
@@ -1219,11 +1216,11 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
1219 | ret = i915_gem_object_bind_to_gtt(obj, 0, true); | 1216 | ret = i915_gem_object_bind_to_gtt(obj, 0, true); |
1220 | if (ret) | 1217 | if (ret) |
1221 | goto unlock; | 1218 | goto unlock; |
1222 | } | ||
1223 | 1219 | ||
1224 | ret = i915_gem_object_set_to_gtt_domain(obj, write); | 1220 | ret = i915_gem_object_set_to_gtt_domain(obj, write); |
1225 | if (ret) | 1221 | if (ret) |
1226 | goto unlock; | 1222 | goto unlock; |
1223 | } | ||
1227 | 1224 | ||
1228 | if (obj->tiling_mode == I915_TILING_NONE) | 1225 | if (obj->tiling_mode == I915_TILING_NONE) |
1229 | ret = i915_gem_object_put_fence(obj); | 1226 | ret = i915_gem_object_put_fence(obj); |
@@ -1558,12 +1555,10 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj, | |||
1558 | 1555 | ||
1559 | inode = obj->base.filp->f_path.dentry->d_inode; | 1556 | inode = obj->base.filp->f_path.dentry->d_inode; |
1560 | mapping = inode->i_mapping; | 1557 | mapping = inode->i_mapping; |
1558 | gfpmask |= mapping_gfp_mask(mapping); | ||
1559 | |||
1561 | for (i = 0; i < page_count; i++) { | 1560 | for (i = 0; i < page_count; i++) { |
1562 | page = read_cache_page_gfp(mapping, i, | 1561 | page = shmem_read_mapping_page_gfp(mapping, i, gfpmask); |
1563 | GFP_HIGHUSER | | ||
1564 | __GFP_COLD | | ||
1565 | __GFP_RECLAIMABLE | | ||
1566 | gfpmask); | ||
1567 | if (IS_ERR(page)) | 1562 | if (IS_ERR(page)) |
1568 | goto err_pages; | 1563 | goto err_pages; |
1569 | 1564 | ||
@@ -1701,13 +1696,10 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj) | |||
1701 | /* Our goal here is to return as much of the memory as | 1696 | /* Our goal here is to return as much of the memory as |
1702 | * is possible back to the system as we are called from OOM. | 1697 | * is possible back to the system as we are called from OOM. |
1703 | * To do this we must instruct the shmfs to drop all of its | 1698 | * To do this we must instruct the shmfs to drop all of its |
1704 | * backing pages, *now*. Here we mirror the actions taken | 1699 | * backing pages, *now*. |
1705 | * when by shmem_delete_inode() to release the backing store. | ||
1706 | */ | 1700 | */ |
1707 | inode = obj->base.filp->f_path.dentry->d_inode; | 1701 | inode = obj->base.filp->f_path.dentry->d_inode; |
1708 | truncate_inode_pages(inode->i_mapping, 0); | 1702 | shmem_truncate_range(inode, 0, (loff_t)-1); |
1709 | if (inode->i_op->truncate_range) | ||
1710 | inode->i_op->truncate_range(inode, 0, (loff_t)-1); | ||
1711 | 1703 | ||
1712 | obj->madv = __I915_MADV_PURGED; | 1704 | obj->madv = __I915_MADV_PURGED; |
1713 | } | 1705 | } |
@@ -2926,8 +2918,6 @@ i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj) | |||
2926 | */ | 2918 | */ |
2927 | wmb(); | 2919 | wmb(); |
2928 | 2920 | ||
2929 | i915_gem_release_mmap(obj); | ||
2930 | |||
2931 | old_write_domain = obj->base.write_domain; | 2921 | old_write_domain = obj->base.write_domain; |
2932 | obj->base.write_domain = 0; | 2922 | obj->base.write_domain = 0; |
2933 | 2923 | ||
@@ -3567,6 +3557,7 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, | |||
3567 | { | 3557 | { |
3568 | struct drm_i915_private *dev_priv = dev->dev_private; | 3558 | struct drm_i915_private *dev_priv = dev->dev_private; |
3569 | struct drm_i915_gem_object *obj; | 3559 | struct drm_i915_gem_object *obj; |
3560 | struct address_space *mapping; | ||
3570 | 3561 | ||
3571 | obj = kzalloc(sizeof(*obj), GFP_KERNEL); | 3562 | obj = kzalloc(sizeof(*obj), GFP_KERNEL); |
3572 | if (obj == NULL) | 3563 | if (obj == NULL) |
@@ -3577,6 +3568,9 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, | |||
3577 | return NULL; | 3568 | return NULL; |
3578 | } | 3569 | } |
3579 | 3570 | ||
3571 | mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; | ||
3572 | mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE); | ||
3573 | |||
3580 | i915_gem_info_add_obj(dev_priv, size); | 3574 | i915_gem_info_add_obj(dev_priv, size); |
3581 | 3575 | ||
3582 | obj->base.write_domain = I915_GEM_DOMAIN_CPU; | 3576 | obj->base.write_domain = I915_GEM_DOMAIN_CPU; |
@@ -3952,8 +3946,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev, | |||
3952 | 3946 | ||
3953 | page_count = obj->base.size / PAGE_SIZE; | 3947 | page_count = obj->base.size / PAGE_SIZE; |
3954 | for (i = 0; i < page_count; i++) { | 3948 | for (i = 0; i < page_count; i++) { |
3955 | struct page *page = read_cache_page_gfp(mapping, i, | 3949 | struct page *page = shmem_read_mapping_page(mapping, i); |
3956 | GFP_HIGHUSER | __GFP_RECLAIMABLE); | ||
3957 | if (!IS_ERR(page)) { | 3950 | if (!IS_ERR(page)) { |
3958 | char *dst = kmap_atomic(page); | 3951 | char *dst = kmap_atomic(page); |
3959 | memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE); | 3952 | memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE); |
@@ -4014,8 +4007,7 @@ i915_gem_attach_phys_object(struct drm_device *dev, | |||
4014 | struct page *page; | 4007 | struct page *page; |
4015 | char *dst, *src; | 4008 | char *dst, *src; |
4016 | 4009 | ||
4017 | page = read_cache_page_gfp(mapping, i, | 4010 | page = shmem_read_mapping_page(mapping, i); |
4018 | GFP_HIGHUSER | __GFP_RECLAIMABLE); | ||
4019 | if (IS_ERR(page)) | 4011 | if (IS_ERR(page)) |
4020 | return PTR_ERR(page); | 4012 | return PTR_ERR(page); |
4021 | 4013 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 20a4cc5b818..4934cf84c32 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c | |||
@@ -187,10 +187,6 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj, | |||
187 | if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) | 187 | if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) |
188 | i915_gem_clflush_object(obj); | 188 | i915_gem_clflush_object(obj); |
189 | 189 | ||
190 | /* blow away mappings if mapped through GTT */ | ||
191 | if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_GTT) | ||
192 | i915_gem_release_mmap(obj); | ||
193 | |||
194 | if (obj->base.pending_write_domain) | 190 | if (obj->base.pending_write_domain) |
195 | cd->flips |= atomic_read(&obj->pending_flip); | 191 | cd->flips |= atomic_read(&obj->pending_flip); |
196 | 192 | ||
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 9e34a1abeb6..ae2b49969b9 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -1749,6 +1749,7 @@ void ironlake_irq_preinstall(struct drm_device *dev) | |||
1749 | * happens. | 1749 | * happens. |
1750 | */ | 1750 | */ |
1751 | I915_WRITE(GEN6_BLITTER_HWSTAM, ~GEN6_BLITTER_USER_INTERRUPT); | 1751 | I915_WRITE(GEN6_BLITTER_HWSTAM, ~GEN6_BLITTER_USER_INTERRUPT); |
1752 | I915_WRITE(GEN6_BSD_HWSTAM, ~GEN6_BSD_USER_INTERRUPT); | ||
1752 | } | 1753 | } |
1753 | 1754 | ||
1754 | /* XXX hotplug from PCH */ | 1755 | /* XXX hotplug from PCH */ |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 2f967af8e62..5d5def756c9 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -531,6 +531,7 @@ | |||
531 | #define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE 0 | 531 | #define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE 0 |
532 | #define GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR (1 << 3) | 532 | #define GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR (1 << 3) |
533 | 533 | ||
534 | #define GEN6_BSD_HWSTAM 0x12098 | ||
534 | #define GEN6_BSD_IMR 0x120a8 | 535 | #define GEN6_BSD_IMR 0x120a8 |
535 | #define GEN6_BSD_USER_INTERRUPT (1 << 12) | 536 | #define GEN6_BSD_USER_INTERRUPT (1 << 12) |
536 | 537 | ||
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index 60a94d2b526..e8152d23d5b 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c | |||
@@ -678,6 +678,7 @@ void i915_save_display(struct drm_device *dev) | |||
678 | } | 678 | } |
679 | 679 | ||
680 | /* VGA state */ | 680 | /* VGA state */ |
681 | mutex_lock(&dev->struct_mutex); | ||
681 | dev_priv->saveVGA0 = I915_READ(VGA0); | 682 | dev_priv->saveVGA0 = I915_READ(VGA0); |
682 | dev_priv->saveVGA1 = I915_READ(VGA1); | 683 | dev_priv->saveVGA1 = I915_READ(VGA1); |
683 | dev_priv->saveVGA_PD = I915_READ(VGA_PD); | 684 | dev_priv->saveVGA_PD = I915_READ(VGA_PD); |
@@ -687,6 +688,7 @@ void i915_save_display(struct drm_device *dev) | |||
687 | dev_priv->saveVGACNTRL = I915_READ(VGACNTRL); | 688 | dev_priv->saveVGACNTRL = I915_READ(VGACNTRL); |
688 | 689 | ||
689 | i915_save_vga(dev); | 690 | i915_save_vga(dev); |
691 | mutex_unlock(&dev->struct_mutex); | ||
690 | } | 692 | } |
691 | 693 | ||
692 | void i915_restore_display(struct drm_device *dev) | 694 | void i915_restore_display(struct drm_device *dev) |
@@ -780,6 +782,8 @@ void i915_restore_display(struct drm_device *dev) | |||
780 | I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL); | 782 | I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL); |
781 | else | 783 | else |
782 | I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL); | 784 | I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL); |
785 | |||
786 | mutex_lock(&dev->struct_mutex); | ||
783 | I915_WRITE(VGA0, dev_priv->saveVGA0); | 787 | I915_WRITE(VGA0, dev_priv->saveVGA0); |
784 | I915_WRITE(VGA1, dev_priv->saveVGA1); | 788 | I915_WRITE(VGA1, dev_priv->saveVGA1); |
785 | I915_WRITE(VGA_PD, dev_priv->saveVGA_PD); | 789 | I915_WRITE(VGA_PD, dev_priv->saveVGA_PD); |
@@ -787,6 +791,7 @@ void i915_restore_display(struct drm_device *dev) | |||
787 | udelay(150); | 791 | udelay(150); |
788 | 792 | ||
789 | i915_restore_vga(dev); | 793 | i915_restore_vga(dev); |
794 | mutex_unlock(&dev->struct_mutex); | ||
790 | } | 795 | } |
791 | 796 | ||
792 | int i915_save_state(struct drm_device *dev) | 797 | int i915_save_state(struct drm_device *dev) |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 81a9059b6a9..aa43e7be605 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -4687,6 +4687,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc, | |||
4687 | 4687 | ||
4688 | I915_WRITE(DSPCNTR(plane), dspcntr); | 4688 | I915_WRITE(DSPCNTR(plane), dspcntr); |
4689 | POSTING_READ(DSPCNTR(plane)); | 4689 | POSTING_READ(DSPCNTR(plane)); |
4690 | intel_enable_plane(dev_priv, plane, pipe); | ||
4690 | 4691 | ||
4691 | ret = intel_pipe_set_base(crtc, x, y, old_fb); | 4692 | ret = intel_pipe_set_base(crtc, x, y, old_fb); |
4692 | 4693 | ||
@@ -5217,8 +5218,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, | |||
5217 | 5218 | ||
5218 | I915_WRITE(DSPCNTR(plane), dspcntr); | 5219 | I915_WRITE(DSPCNTR(plane), dspcntr); |
5219 | POSTING_READ(DSPCNTR(plane)); | 5220 | POSTING_READ(DSPCNTR(plane)); |
5220 | if (!HAS_PCH_SPLIT(dev)) | ||
5221 | intel_enable_plane(dev_priv, plane, pipe); | ||
5222 | 5221 | ||
5223 | ret = intel_pipe_set_base(crtc, x, y, old_fb); | 5222 | ret = intel_pipe_set_base(crtc, x, y, old_fb); |
5224 | 5223 | ||
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index a670c006982..56a8e2aea19 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c | |||
@@ -1416,6 +1416,8 @@ void intel_setup_overlay(struct drm_device *dev) | |||
1416 | goto out_free; | 1416 | goto out_free; |
1417 | overlay->reg_bo = reg_bo; | 1417 | overlay->reg_bo = reg_bo; |
1418 | 1418 | ||
1419 | mutex_lock(&dev->struct_mutex); | ||
1420 | |||
1419 | if (OVERLAY_NEEDS_PHYSICAL(dev)) { | 1421 | if (OVERLAY_NEEDS_PHYSICAL(dev)) { |
1420 | ret = i915_gem_attach_phys_object(dev, reg_bo, | 1422 | ret = i915_gem_attach_phys_object(dev, reg_bo, |
1421 | I915_GEM_PHYS_OVERLAY_REGS, | 1423 | I915_GEM_PHYS_OVERLAY_REGS, |
@@ -1440,6 +1442,8 @@ void intel_setup_overlay(struct drm_device *dev) | |||
1440 | } | 1442 | } |
1441 | } | 1443 | } |
1442 | 1444 | ||
1445 | mutex_unlock(&dev->struct_mutex); | ||
1446 | |||
1443 | /* init all values */ | 1447 | /* init all values */ |
1444 | overlay->color_key = 0x0101fe; | 1448 | overlay->color_key = 0x0101fe; |
1445 | overlay->brightness = -19; | 1449 | overlay->brightness = -19; |
@@ -1464,6 +1468,7 @@ out_unpin_bo: | |||
1464 | i915_gem_object_unpin(reg_bo); | 1468 | i915_gem_object_unpin(reg_bo); |
1465 | out_free_bo: | 1469 | out_free_bo: |
1466 | drm_gem_object_unreference(®_bo->base); | 1470 | drm_gem_object_unreference(®_bo->base); |
1471 | mutex_unlock(&dev->struct_mutex); | ||
1467 | out_free: | 1472 | out_free: |
1468 | kfree(overlay); | 1473 | kfree(overlay); |
1469 | return; | 1474 | return; |
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 445af798163..12d2fdc5241 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
@@ -2013,9 +2013,9 @@ static void evergreen_gpu_init(struct radeon_device *rdev) | |||
2013 | rdev->config.evergreen.tile_config |= (3 << 0); | 2013 | rdev->config.evergreen.tile_config |= (3 << 0); |
2014 | break; | 2014 | break; |
2015 | } | 2015 | } |
2016 | /* num banks is 8 on all fusion asics */ | 2016 | /* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */ |
2017 | if (rdev->flags & RADEON_IS_IGP) | 2017 | if (rdev->flags & RADEON_IS_IGP) |
2018 | rdev->config.evergreen.tile_config |= 8 << 4; | 2018 | rdev->config.evergreen.tile_config |= 1 << 4; |
2019 | else | 2019 | else |
2020 | rdev->config.evergreen.tile_config |= | 2020 | rdev->config.evergreen.tile_config |= |
2021 | ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4; | 2021 | ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4; |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 27f45579e64..ef0e0e01691 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
@@ -179,6 +179,7 @@ void radeon_pm_resume(struct radeon_device *rdev); | |||
179 | void radeon_combios_get_power_modes(struct radeon_device *rdev); | 179 | void radeon_combios_get_power_modes(struct radeon_device *rdev); |
180 | void radeon_atombios_get_power_modes(struct radeon_device *rdev); | 180 | void radeon_atombios_get_power_modes(struct radeon_device *rdev); |
181 | void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type); | 181 | void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type); |
182 | int radeon_atom_get_max_vddc(struct radeon_device *rdev, u16 *voltage); | ||
182 | void rs690_pm_info(struct radeon_device *rdev); | 183 | void rs690_pm_info(struct radeon_device *rdev); |
183 | extern int rv6xx_get_temp(struct radeon_device *rdev); | 184 | extern int rv6xx_get_temp(struct radeon_device *rdev); |
184 | extern int rv770_get_temp(struct radeon_device *rdev); | 185 | extern int rv770_get_temp(struct radeon_device *rdev); |
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 1e725d9f767..bf2b61584cd 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c | |||
@@ -2320,6 +2320,14 @@ static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev, | |||
2320 | le16_to_cpu(clock_info->r600.usVDDC); | 2320 | le16_to_cpu(clock_info->r600.usVDDC); |
2321 | } | 2321 | } |
2322 | 2322 | ||
2323 | /* patch up vddc if necessary */ | ||
2324 | if (rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage == 0xff01) { | ||
2325 | u16 vddc; | ||
2326 | |||
2327 | if (radeon_atom_get_max_vddc(rdev, &vddc) == 0) | ||
2328 | rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = vddc; | ||
2329 | } | ||
2330 | |||
2323 | if (rdev->flags & RADEON_IS_IGP) { | 2331 | if (rdev->flags & RADEON_IS_IGP) { |
2324 | /* skip invalid modes */ | 2332 | /* skip invalid modes */ |
2325 | if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0) | 2333 | if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0) |
@@ -2630,7 +2638,35 @@ void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 v | |||
2630 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | 2638 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
2631 | } | 2639 | } |
2632 | 2640 | ||
2641 | int radeon_atom_get_max_vddc(struct radeon_device *rdev, | ||
2642 | u16 *voltage) | ||
2643 | { | ||
2644 | union set_voltage args; | ||
2645 | int index = GetIndexIntoMasterTable(COMMAND, SetVoltage); | ||
2646 | u8 frev, crev; | ||
2647 | |||
2648 | if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) | ||
2649 | return -EINVAL; | ||
2650 | |||
2651 | switch (crev) { | ||
2652 | case 1: | ||
2653 | return -EINVAL; | ||
2654 | case 2: | ||
2655 | args.v2.ucVoltageType = SET_VOLTAGE_GET_MAX_VOLTAGE; | ||
2656 | args.v2.ucVoltageMode = 0; | ||
2657 | args.v2.usVoltageLevel = 0; | ||
2658 | |||
2659 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
2660 | |||
2661 | *voltage = le16_to_cpu(args.v2.usVoltageLevel); | ||
2662 | break; | ||
2663 | default: | ||
2664 | DRM_ERROR("Unknown table version %d, %d\n", frev, crev); | ||
2665 | return -EINVAL; | ||
2666 | } | ||
2633 | 2667 | ||
2668 | return 0; | ||
2669 | } | ||
2634 | 2670 | ||
2635 | void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev) | 2671 | void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev) |
2636 | { | 2672 | { |
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index 90e23e0bfad..58c271ebc0f 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/sched.h> | 31 | #include <linux/sched.h> |
32 | #include <linux/highmem.h> | 32 | #include <linux/highmem.h> |
33 | #include <linux/pagemap.h> | 33 | #include <linux/pagemap.h> |
34 | #include <linux/shmem_fs.h> | ||
34 | #include <linux/file.h> | 35 | #include <linux/file.h> |
35 | #include <linux/swap.h> | 36 | #include <linux/swap.h> |
36 | #include <linux/slab.h> | 37 | #include <linux/slab.h> |
@@ -484,7 +485,7 @@ static int ttm_tt_swapin(struct ttm_tt *ttm) | |||
484 | swap_space = swap_storage->f_path.dentry->d_inode->i_mapping; | 485 | swap_space = swap_storage->f_path.dentry->d_inode->i_mapping; |
485 | 486 | ||
486 | for (i = 0; i < ttm->num_pages; ++i) { | 487 | for (i = 0; i < ttm->num_pages; ++i) { |
487 | from_page = read_mapping_page(swap_space, i, NULL); | 488 | from_page = shmem_read_mapping_page(swap_space, i); |
488 | if (IS_ERR(from_page)) { | 489 | if (IS_ERR(from_page)) { |
489 | ret = PTR_ERR(from_page); | 490 | ret = PTR_ERR(from_page); |
490 | goto out_err; | 491 | goto out_err; |
@@ -557,7 +558,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage) | |||
557 | from_page = ttm->pages[i]; | 558 | from_page = ttm->pages[i]; |
558 | if (unlikely(from_page == NULL)) | 559 | if (unlikely(from_page == NULL)) |
559 | continue; | 560 | continue; |
560 | to_page = read_mapping_page(swap_space, i, NULL); | 561 | to_page = shmem_read_mapping_page(swap_space, i); |
561 | if (unlikely(IS_ERR(to_page))) { | 562 | if (unlikely(IS_ERR(to_page))) { |
562 | ret = PTR_ERR(to_page); | 563 | ret = PTR_ERR(to_page); |
563 | goto out_err; | 564 | goto out_err; |
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index f7440e8ce3e..6f3289a5788 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c | |||
@@ -1423,6 +1423,7 @@ static const struct hid_device_id hid_have_special_driver[] = { | |||
1423 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACETRAVELLER) }, | 1423 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACETRAVELLER) }, |
1424 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR) }, | 1424 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR) }, |
1425 | { HID_USB_DEVICE(USB_VENDOR_ID_LUMIO, USB_DEVICE_ID_CRYSTALTOUCH) }, | 1425 | { HID_USB_DEVICE(USB_VENDOR_ID_LUMIO, USB_DEVICE_ID_CRYSTALTOUCH) }, |
1426 | { HID_USB_DEVICE(USB_VENDOR_ID_LUMIO, USB_DEVICE_ID_CRYSTALTOUCH_DUAL) }, | ||
1426 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD) }, | 1427 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD) }, |
1427 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD_BOOTLOADER) }, | 1428 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD_BOOTLOADER) }, |
1428 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV) }, | 1429 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV) }, |
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index aecb5a4b8d6..a756ee6c7df 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h | |||
@@ -449,6 +449,7 @@ | |||
449 | 449 | ||
450 | #define USB_VENDOR_ID_LUMIO 0x202e | 450 | #define USB_VENDOR_ID_LUMIO 0x202e |
451 | #define USB_DEVICE_ID_CRYSTALTOUCH 0x0006 | 451 | #define USB_DEVICE_ID_CRYSTALTOUCH 0x0006 |
452 | #define USB_DEVICE_ID_CRYSTALTOUCH_DUAL 0x0007 | ||
452 | 453 | ||
453 | #define USB_VENDOR_ID_MCC 0x09db | 454 | #define USB_VENDOR_ID_MCC 0x09db |
454 | #define USB_DEVICE_ID_MCC_PMD1024LS 0x0076 | 455 | #define USB_DEVICE_ID_MCC_PMD1024LS 0x0076 |
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c index 0b2dcd0ee59..62cac4dc3b6 100644 --- a/drivers/hid/hid-multitouch.c +++ b/drivers/hid/hid-multitouch.c | |||
@@ -271,6 +271,8 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi, | |||
271 | } | 271 | } |
272 | return 1; | 272 | return 1; |
273 | case HID_DG_CONTACTID: | 273 | case HID_DG_CONTACTID: |
274 | if (!td->maxcontacts) | ||
275 | td->maxcontacts = MT_DEFAULT_MAXCONTACT; | ||
274 | input_mt_init_slots(hi->input, td->maxcontacts); | 276 | input_mt_init_slots(hi->input, td->maxcontacts); |
275 | td->last_slot_field = usage->hid; | 277 | td->last_slot_field = usage->hid; |
276 | td->last_field_index = field->index; | 278 | td->last_field_index = field->index; |
@@ -547,9 +549,6 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id) | |||
547 | if (ret) | 549 | if (ret) |
548 | goto fail; | 550 | goto fail; |
549 | 551 | ||
550 | if (!td->maxcontacts) | ||
551 | td->maxcontacts = MT_DEFAULT_MAXCONTACT; | ||
552 | |||
553 | td->slots = kzalloc(td->maxcontacts * sizeof(struct mt_slot), | 552 | td->slots = kzalloc(td->maxcontacts * sizeof(struct mt_slot), |
554 | GFP_KERNEL); | 553 | GFP_KERNEL); |
555 | if (!td->slots) { | 554 | if (!td->slots) { |
@@ -677,6 +676,9 @@ static const struct hid_device_id mt_devices[] = { | |||
677 | { .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE, | 676 | { .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE, |
678 | HID_USB_DEVICE(USB_VENDOR_ID_LUMIO, | 677 | HID_USB_DEVICE(USB_VENDOR_ID_LUMIO, |
679 | USB_DEVICE_ID_CRYSTALTOUCH) }, | 678 | USB_DEVICE_ID_CRYSTALTOUCH) }, |
679 | { .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE, | ||
680 | HID_USB_DEVICE(USB_VENDOR_ID_LUMIO, | ||
681 | USB_DEVICE_ID_CRYSTALTOUCH_DUAL) }, | ||
680 | 682 | ||
681 | /* MosArt panels */ | 683 | /* MosArt panels */ |
682 | { .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE, | 684 | { .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE, |
@@ -707,10 +709,10 @@ static const struct hid_device_id mt_devices[] = { | |||
707 | HID_USB_DEVICE(USB_VENDOR_ID_STANTUM, | 709 | HID_USB_DEVICE(USB_VENDOR_ID_STANTUM, |
708 | USB_DEVICE_ID_MTP)}, | 710 | USB_DEVICE_ID_MTP)}, |
709 | { .driver_data = MT_CLS_CONFIDENCE, | 711 | { .driver_data = MT_CLS_CONFIDENCE, |
710 | HID_USB_DEVICE(USB_VENDOR_ID_STANTUM, | 712 | HID_USB_DEVICE(USB_VENDOR_ID_STANTUM_STM, |
711 | USB_DEVICE_ID_MTP_STM)}, | 713 | USB_DEVICE_ID_MTP_STM)}, |
712 | { .driver_data = MT_CLS_CONFIDENCE, | 714 | { .driver_data = MT_CLS_CONFIDENCE, |
713 | HID_USB_DEVICE(USB_VENDOR_ID_STANTUM, | 715 | HID_USB_DEVICE(USB_VENDOR_ID_STANTUM_SITRONIX, |
714 | USB_DEVICE_ID_MTP_SITRONIX)}, | 716 | USB_DEVICE_ID_MTP_SITRONIX)}, |
715 | 717 | ||
716 | /* Touch International panels */ | 718 | /* Touch International panels */ |
diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c index c0cff64a1ae..cc1dc4817fa 100644 --- a/drivers/leds/leds-lp5521.c +++ b/drivers/leds/leds-lp5521.c | |||
@@ -593,7 +593,7 @@ static void lp5521_unregister_sysfs(struct i2c_client *client) | |||
593 | &lp5521_led_attribute_group); | 593 | &lp5521_led_attribute_group); |
594 | } | 594 | } |
595 | 595 | ||
596 | static int __init lp5521_init_led(struct lp5521_led *led, | 596 | static int __devinit lp5521_init_led(struct lp5521_led *led, |
597 | struct i2c_client *client, | 597 | struct i2c_client *client, |
598 | int chan, struct lp5521_platform_data *pdata) | 598 | int chan, struct lp5521_platform_data *pdata) |
599 | { | 599 | { |
@@ -637,7 +637,7 @@ static int __init lp5521_init_led(struct lp5521_led *led, | |||
637 | return 0; | 637 | return 0; |
638 | } | 638 | } |
639 | 639 | ||
640 | static int lp5521_probe(struct i2c_client *client, | 640 | static int __devinit lp5521_probe(struct i2c_client *client, |
641 | const struct i2c_device_id *id) | 641 | const struct i2c_device_id *id) |
642 | { | 642 | { |
643 | struct lp5521_chip *chip; | 643 | struct lp5521_chip *chip; |
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c index e19fed25f13..5971e309b23 100644 --- a/drivers/leds/leds-lp5523.c +++ b/drivers/leds/leds-lp5523.c | |||
@@ -826,7 +826,7 @@ static int __init lp5523_init_engine(struct lp5523_engine *engine, int id) | |||
826 | return 0; | 826 | return 0; |
827 | } | 827 | } |
828 | 828 | ||
829 | static int __init lp5523_init_led(struct lp5523_led *led, struct device *dev, | 829 | static int __devinit lp5523_init_led(struct lp5523_led *led, struct device *dev, |
830 | int chan, struct lp5523_platform_data *pdata) | 830 | int chan, struct lp5523_platform_data *pdata) |
831 | { | 831 | { |
832 | char name[32]; | 832 | char name[32]; |
@@ -872,7 +872,7 @@ static int __init lp5523_init_led(struct lp5523_led *led, struct device *dev, | |||
872 | 872 | ||
873 | static struct i2c_driver lp5523_driver; | 873 | static struct i2c_driver lp5523_driver; |
874 | 874 | ||
875 | static int lp5523_probe(struct i2c_client *client, | 875 | static int __devinit lp5523_probe(struct i2c_client *client, |
876 | const struct i2c_device_id *id) | 876 | const struct i2c_device_id *id) |
877 | { | 877 | { |
878 | struct lp5523_chip *chip; | 878 | struct lp5523_chip *chip; |
diff --git a/drivers/misc/cb710/sgbuf2.c b/drivers/misc/cb710/sgbuf2.c index d019746551f..2a40d0efdff 100644 --- a/drivers/misc/cb710/sgbuf2.c +++ b/drivers/misc/cb710/sgbuf2.c | |||
@@ -47,7 +47,7 @@ static uint32_t sg_dwiter_read_buffer(struct sg_mapping_iter *miter) | |||
47 | 47 | ||
48 | static inline bool needs_unaligned_copy(const void *ptr) | 48 | static inline bool needs_unaligned_copy(const void *ptr) |
49 | { | 49 | { |
50 | #ifdef HAVE_EFFICIENT_UNALIGNED_ACCESS | 50 | #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS |
51 | return false; | 51 | return false; |
52 | #else | 52 | #else |
53 | return ((ptr - NULL) & 3) != 0; | 53 | return ((ptr - NULL) & 3) != 0; |
diff --git a/drivers/misc/ioc4.c b/drivers/misc/ioc4.c index 668d41e594a..df03dd3bd0e 100644 --- a/drivers/misc/ioc4.c +++ b/drivers/misc/ioc4.c | |||
@@ -270,7 +270,7 @@ ioc4_variant(struct ioc4_driver_data *idd) | |||
270 | return IOC4_VARIANT_PCI_RT; | 270 | return IOC4_VARIANT_PCI_RT; |
271 | } | 271 | } |
272 | 272 | ||
273 | static void __devinit | 273 | static void |
274 | ioc4_load_modules(struct work_struct *work) | 274 | ioc4_load_modules(struct work_struct *work) |
275 | { | 275 | { |
276 | request_module("sgiioc4"); | 276 | request_module("sgiioc4"); |
diff --git a/drivers/misc/lkdtm.c b/drivers/misc/lkdtm.c index 81d7fa4ec0d..150cd7061b8 100644 --- a/drivers/misc/lkdtm.c +++ b/drivers/misc/lkdtm.c | |||
@@ -120,6 +120,7 @@ static int recur_count = REC_NUM_DEFAULT; | |||
120 | static enum cname cpoint = CN_INVALID; | 120 | static enum cname cpoint = CN_INVALID; |
121 | static enum ctype cptype = CT_NONE; | 121 | static enum ctype cptype = CT_NONE; |
122 | static int count = DEFAULT_COUNT; | 122 | static int count = DEFAULT_COUNT; |
123 | static DEFINE_SPINLOCK(count_lock); | ||
123 | 124 | ||
124 | module_param(recur_count, int, 0644); | 125 | module_param(recur_count, int, 0644); |
125 | MODULE_PARM_DESC(recur_count, " Recursion level for the stack overflow test, "\ | 126 | MODULE_PARM_DESC(recur_count, " Recursion level for the stack overflow test, "\ |
@@ -230,11 +231,14 @@ static const char *cp_name_to_str(enum cname name) | |||
230 | static int lkdtm_parse_commandline(void) | 231 | static int lkdtm_parse_commandline(void) |
231 | { | 232 | { |
232 | int i; | 233 | int i; |
234 | unsigned long flags; | ||
233 | 235 | ||
234 | if (cpoint_count < 1 || recur_count < 1) | 236 | if (cpoint_count < 1 || recur_count < 1) |
235 | return -EINVAL; | 237 | return -EINVAL; |
236 | 238 | ||
239 | spin_lock_irqsave(&count_lock, flags); | ||
237 | count = cpoint_count; | 240 | count = cpoint_count; |
241 | spin_unlock_irqrestore(&count_lock, flags); | ||
238 | 242 | ||
239 | /* No special parameters */ | 243 | /* No special parameters */ |
240 | if (!cpoint_type && !cpoint_name) | 244 | if (!cpoint_type && !cpoint_name) |
@@ -349,6 +353,9 @@ static void lkdtm_do_action(enum ctype which) | |||
349 | 353 | ||
350 | static void lkdtm_handler(void) | 354 | static void lkdtm_handler(void) |
351 | { | 355 | { |
356 | unsigned long flags; | ||
357 | |||
358 | spin_lock_irqsave(&count_lock, flags); | ||
352 | count--; | 359 | count--; |
353 | printk(KERN_INFO "lkdtm: Crash point %s of type %s hit, trigger in %d rounds\n", | 360 | printk(KERN_INFO "lkdtm: Crash point %s of type %s hit, trigger in %d rounds\n", |
354 | cp_name_to_str(cpoint), cp_type_to_str(cptype), count); | 361 | cp_name_to_str(cpoint), cp_type_to_str(cptype), count); |
@@ -357,6 +364,7 @@ static void lkdtm_handler(void) | |||
357 | lkdtm_do_action(cptype); | 364 | lkdtm_do_action(cptype); |
358 | count = cpoint_count; | 365 | count = cpoint_count; |
359 | } | 366 | } |
367 | spin_unlock_irqrestore(&count_lock, flags); | ||
360 | } | 368 | } |
361 | 369 | ||
362 | static int lkdtm_register_cpoint(enum cname which) | 370 | static int lkdtm_register_cpoint(enum cname which) |
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index 71da5641e25..f85e4222455 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c | |||
@@ -1024,7 +1024,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card, | |||
1024 | INIT_LIST_HEAD(&md->part); | 1024 | INIT_LIST_HEAD(&md->part); |
1025 | md->usage = 1; | 1025 | md->usage = 1; |
1026 | 1026 | ||
1027 | ret = mmc_init_queue(&md->queue, card, &md->lock); | 1027 | ret = mmc_init_queue(&md->queue, card, &md->lock, subname); |
1028 | if (ret) | 1028 | if (ret) |
1029 | goto err_putdisk; | 1029 | goto err_putdisk; |
1030 | 1030 | ||
@@ -1297,6 +1297,9 @@ static void mmc_blk_remove(struct mmc_card *card) | |||
1297 | struct mmc_blk_data *md = mmc_get_drvdata(card); | 1297 | struct mmc_blk_data *md = mmc_get_drvdata(card); |
1298 | 1298 | ||
1299 | mmc_blk_remove_parts(card, md); | 1299 | mmc_blk_remove_parts(card, md); |
1300 | mmc_claim_host(card->host); | ||
1301 | mmc_blk_part_switch(card, md); | ||
1302 | mmc_release_host(card->host); | ||
1300 | mmc_blk_remove_req(md); | 1303 | mmc_blk_remove_req(md); |
1301 | mmc_set_drvdata(card, NULL); | 1304 | mmc_set_drvdata(card, NULL); |
1302 | } | 1305 | } |
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c index c07322c2658..6413afa318d 100644 --- a/drivers/mmc/card/queue.c +++ b/drivers/mmc/card/queue.c | |||
@@ -106,10 +106,12 @@ static void mmc_request(struct request_queue *q) | |||
106 | * @mq: mmc queue | 106 | * @mq: mmc queue |
107 | * @card: mmc card to attach this queue | 107 | * @card: mmc card to attach this queue |
108 | * @lock: queue lock | 108 | * @lock: queue lock |
109 | * @subname: partition subname | ||
109 | * | 110 | * |
110 | * Initialise a MMC card request queue. | 111 | * Initialise a MMC card request queue. |
111 | */ | 112 | */ |
112 | int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock) | 113 | int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, |
114 | spinlock_t *lock, const char *subname) | ||
113 | { | 115 | { |
114 | struct mmc_host *host = card->host; | 116 | struct mmc_host *host = card->host; |
115 | u64 limit = BLK_BOUNCE_HIGH; | 117 | u64 limit = BLK_BOUNCE_HIGH; |
@@ -133,12 +135,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock | |||
133 | mq->queue->limits.max_discard_sectors = UINT_MAX; | 135 | mq->queue->limits.max_discard_sectors = UINT_MAX; |
134 | if (card->erased_byte == 0) | 136 | if (card->erased_byte == 0) |
135 | mq->queue->limits.discard_zeroes_data = 1; | 137 | mq->queue->limits.discard_zeroes_data = 1; |
136 | if (!mmc_can_trim(card) && is_power_of_2(card->erase_size)) { | 138 | mq->queue->limits.discard_granularity = card->pref_erase << 9; |
137 | mq->queue->limits.discard_granularity = | ||
138 | card->erase_size << 9; | ||
139 | mq->queue->limits.discard_alignment = | ||
140 | card->erase_size << 9; | ||
141 | } | ||
142 | if (mmc_can_secure_erase_trim(card)) | 139 | if (mmc_can_secure_erase_trim(card)) |
143 | queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, | 140 | queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, |
144 | mq->queue); | 141 | mq->queue); |
@@ -209,8 +206,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock | |||
209 | 206 | ||
210 | sema_init(&mq->thread_sem, 1); | 207 | sema_init(&mq->thread_sem, 1); |
211 | 208 | ||
212 | mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d", | 209 | mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s", |
213 | host->index); | 210 | host->index, subname ? subname : ""); |
214 | 211 | ||
215 | if (IS_ERR(mq->thread)) { | 212 | if (IS_ERR(mq->thread)) { |
216 | ret = PTR_ERR(mq->thread); | 213 | ret = PTR_ERR(mq->thread); |
diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h index 64e66e0d499..6223ef8dc9c 100644 --- a/drivers/mmc/card/queue.h +++ b/drivers/mmc/card/queue.h | |||
@@ -19,7 +19,8 @@ struct mmc_queue { | |||
19 | unsigned int bounce_sg_len; | 19 | unsigned int bounce_sg_len; |
20 | }; | 20 | }; |
21 | 21 | ||
22 | extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *); | 22 | extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *, |
23 | const char *); | ||
23 | extern void mmc_cleanup_queue(struct mmc_queue *); | 24 | extern void mmc_cleanup_queue(struct mmc_queue *); |
24 | extern void mmc_queue_suspend(struct mmc_queue *); | 25 | extern void mmc_queue_suspend(struct mmc_queue *); |
25 | extern void mmc_queue_resume(struct mmc_queue *); | 26 | extern void mmc_queue_resume(struct mmc_queue *); |
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 68091dda3f3..7843efe2235 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c | |||
@@ -1245,7 +1245,7 @@ static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card, | |||
1245 | */ | 1245 | */ |
1246 | timeout_clks <<= 1; | 1246 | timeout_clks <<= 1; |
1247 | timeout_us += (timeout_clks * 1000) / | 1247 | timeout_us += (timeout_clks * 1000) / |
1248 | (card->host->ios.clock / 1000); | 1248 | (mmc_host_clk_rate(card->host) / 1000); |
1249 | 1249 | ||
1250 | erase_timeout = timeout_us / 1000; | 1250 | erase_timeout = timeout_us / 1000; |
1251 | 1251 | ||
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c index 4d0c15bfa51..262fff01917 100644 --- a/drivers/mmc/core/sdio.c +++ b/drivers/mmc/core/sdio.c | |||
@@ -691,15 +691,54 @@ static int mmc_sdio_resume(struct mmc_host *host) | |||
691 | static int mmc_sdio_power_restore(struct mmc_host *host) | 691 | static int mmc_sdio_power_restore(struct mmc_host *host) |
692 | { | 692 | { |
693 | int ret; | 693 | int ret; |
694 | u32 ocr; | ||
694 | 695 | ||
695 | BUG_ON(!host); | 696 | BUG_ON(!host); |
696 | BUG_ON(!host->card); | 697 | BUG_ON(!host->card); |
697 | 698 | ||
698 | mmc_claim_host(host); | 699 | mmc_claim_host(host); |
700 | |||
701 | /* | ||
702 | * Reset the card by performing the same steps that are taken by | ||
703 | * mmc_rescan_try_freq() and mmc_attach_sdio() during a "normal" probe. | ||
704 | * | ||
705 | * sdio_reset() is technically not needed. Having just powered up the | ||
706 | * hardware, it should already be in reset state. However, some | ||
707 | * platforms (such as SD8686 on OLPC) do not instantly cut power, | ||
708 | * meaning that a reset is required when restoring power soon after | ||
709 | * powering off. It is harmless in other cases. | ||
710 | * | ||
711 | * The CMD5 reset (mmc_send_io_op_cond()), according to the SDIO spec, | ||
712 | * is not necessary for non-removable cards. However, it is required | ||
713 | * for OLPC SD8686 (which expects a [CMD5,5,3,7] init sequence), and | ||
714 | * harmless in other situations. | ||
715 | * | ||
716 | * With these steps taken, mmc_select_voltage() is also required to | ||
717 | * restore the correct voltage setting of the card. | ||
718 | */ | ||
719 | sdio_reset(host); | ||
720 | mmc_go_idle(host); | ||
721 | mmc_send_if_cond(host, host->ocr_avail); | ||
722 | |||
723 | ret = mmc_send_io_op_cond(host, 0, &ocr); | ||
724 | if (ret) | ||
725 | goto out; | ||
726 | |||
727 | if (host->ocr_avail_sdio) | ||
728 | host->ocr_avail = host->ocr_avail_sdio; | ||
729 | |||
730 | host->ocr = mmc_select_voltage(host, ocr & ~0x7F); | ||
731 | if (!host->ocr) { | ||
732 | ret = -EINVAL; | ||
733 | goto out; | ||
734 | } | ||
735 | |||
699 | ret = mmc_sdio_init_card(host, host->ocr, host->card, | 736 | ret = mmc_sdio_init_card(host, host->ocr, host->card, |
700 | mmc_card_keep_power(host)); | 737 | mmc_card_keep_power(host)); |
701 | if (!ret && host->sdio_irqs) | 738 | if (!ret && host->sdio_irqs) |
702 | mmc_signal_sdio_irq(host); | 739 | mmc_signal_sdio_irq(host); |
740 | |||
741 | out: | ||
703 | mmc_release_host(host); | 742 | mmc_release_host(host); |
704 | 743 | ||
705 | return ret; | 744 | return ret; |
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c index d29b9c36919..d2565df8a7f 100644 --- a/drivers/mmc/core/sdio_bus.c +++ b/drivers/mmc/core/sdio_bus.c | |||
@@ -189,7 +189,7 @@ static int sdio_bus_remove(struct device *dev) | |||
189 | 189 | ||
190 | /* Then undo the runtime PM settings in sdio_bus_probe() */ | 190 | /* Then undo the runtime PM settings in sdio_bus_probe() */ |
191 | if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD) | 191 | if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD) |
192 | pm_runtime_put_noidle(dev); | 192 | pm_runtime_put_sync(dev); |
193 | 193 | ||
194 | out: | 194 | out: |
195 | return ret; | 195 | return ret; |
diff --git a/drivers/mmc/host/of_mmc_spi.c b/drivers/mmc/host/of_mmc_spi.c index e2aecb7f1d5..ab66f2454dc 100644 --- a/drivers/mmc/host/of_mmc_spi.c +++ b/drivers/mmc/host/of_mmc_spi.c | |||
@@ -25,6 +25,11 @@ | |||
25 | #include <linux/mmc/core.h> | 25 | #include <linux/mmc/core.h> |
26 | #include <linux/mmc/host.h> | 26 | #include <linux/mmc/host.h> |
27 | 27 | ||
28 | /* For archs that don't support NO_IRQ (such as mips), provide a dummy value */ | ||
29 | #ifndef NO_IRQ | ||
30 | #define NO_IRQ 0 | ||
31 | #endif | ||
32 | |||
28 | MODULE_LICENSE("GPL"); | 33 | MODULE_LICENSE("GPL"); |
29 | 34 | ||
30 | enum { | 35 | enum { |
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c index 5b2e2155b41..dedf3dab8a3 100644 --- a/drivers/mmc/host/omap_hsmmc.c +++ b/drivers/mmc/host/omap_hsmmc.c | |||
@@ -429,7 +429,6 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host) | |||
429 | return -EINVAL; | 429 | return -EINVAL; |
430 | } | 430 | } |
431 | } | 431 | } |
432 | mmc_slot(host).ocr_mask = mmc_regulator_get_ocrmask(reg); | ||
433 | 432 | ||
434 | /* Allow an aux regulator */ | 433 | /* Allow an aux regulator */ |
435 | reg = regulator_get(host->dev, "vmmc_aux"); | 434 | reg = regulator_get(host->dev, "vmmc_aux"); |
@@ -962,7 +961,8 @@ static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno) | |||
962 | spin_unlock(&host->irq_lock); | 961 | spin_unlock(&host->irq_lock); |
963 | 962 | ||
964 | if (host->use_dma && dma_ch != -1) { | 963 | if (host->use_dma && dma_ch != -1) { |
965 | dma_unmap_sg(mmc_dev(host->mmc), host->data->sg, host->dma_len, | 964 | dma_unmap_sg(mmc_dev(host->mmc), host->data->sg, |
965 | host->data->sg_len, | ||
966 | omap_hsmmc_get_dma_dir(host, host->data)); | 966 | omap_hsmmc_get_dma_dir(host, host->data)); |
967 | omap_free_dma(dma_ch); | 967 | omap_free_dma(dma_ch); |
968 | } | 968 | } |
@@ -1346,7 +1346,7 @@ static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data) | |||
1346 | return; | 1346 | return; |
1347 | } | 1347 | } |
1348 | 1348 | ||
1349 | dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_len, | 1349 | dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, |
1350 | omap_hsmmc_get_dma_dir(host, data)); | 1350 | omap_hsmmc_get_dma_dir(host, data)); |
1351 | 1351 | ||
1352 | req_in_progress = host->req_in_progress; | 1352 | req_in_progress = host->req_in_progress; |
diff --git a/drivers/mmc/host/sh_mobile_sdhi.c b/drivers/mmc/host/sh_mobile_sdhi.c index b3654293017..ce500f03df8 100644 --- a/drivers/mmc/host/sh_mobile_sdhi.c +++ b/drivers/mmc/host/sh_mobile_sdhi.c | |||
@@ -92,7 +92,7 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev) | |||
92 | mmc_data->ocr_mask = p->tmio_ocr_mask; | 92 | mmc_data->ocr_mask = p->tmio_ocr_mask; |
93 | mmc_data->capabilities |= p->tmio_caps; | 93 | mmc_data->capabilities |= p->tmio_caps; |
94 | 94 | ||
95 | if (p->dma_slave_tx >= 0 && p->dma_slave_rx >= 0) { | 95 | if (p->dma_slave_tx > 0 && p->dma_slave_rx > 0) { |
96 | priv->param_tx.slave_id = p->dma_slave_tx; | 96 | priv->param_tx.slave_id = p->dma_slave_tx; |
97 | priv->param_rx.slave_id = p->dma_slave_rx; | 97 | priv->param_rx.slave_id = p->dma_slave_rx; |
98 | priv->dma_priv.chan_priv_tx = &priv->param_tx; | 98 | priv->dma_priv.chan_priv_tx = &priv->param_tx; |
@@ -165,13 +165,14 @@ static int sh_mobile_sdhi_remove(struct platform_device *pdev) | |||
165 | 165 | ||
166 | p->pdata = NULL; | 166 | p->pdata = NULL; |
167 | 167 | ||
168 | tmio_mmc_host_remove(host); | ||
169 | |||
168 | for (i = 0; i < 3; i++) { | 170 | for (i = 0; i < 3; i++) { |
169 | irq = platform_get_irq(pdev, i); | 171 | irq = platform_get_irq(pdev, i); |
170 | if (irq >= 0) | 172 | if (irq >= 0) |
171 | free_irq(irq, host); | 173 | free_irq(irq, host); |
172 | } | 174 | } |
173 | 175 | ||
174 | tmio_mmc_host_remove(host); | ||
175 | clk_disable(priv->clk); | 176 | clk_disable(priv->clk); |
176 | clk_put(priv->clk); | 177 | clk_put(priv->clk); |
177 | kfree(priv); | 178 | kfree(priv); |
diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c index ad6347bb02d..0b09e8239aa 100644 --- a/drivers/mmc/host/tmio_mmc_pio.c +++ b/drivers/mmc/host/tmio_mmc_pio.c | |||
@@ -824,8 +824,8 @@ static int tmio_mmc_get_ro(struct mmc_host *mmc) | |||
824 | struct tmio_mmc_host *host = mmc_priv(mmc); | 824 | struct tmio_mmc_host *host = mmc_priv(mmc); |
825 | struct tmio_mmc_data *pdata = host->pdata; | 825 | struct tmio_mmc_data *pdata = host->pdata; |
826 | 826 | ||
827 | return ((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) || | 827 | return !((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) || |
828 | !(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT)); | 828 | (sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT)); |
829 | } | 829 | } |
830 | 830 | ||
831 | static int tmio_mmc_get_cd(struct mmc_host *mmc) | 831 | static int tmio_mmc_get_cd(struct mmc_host *mmc) |
diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c index cbb03305b77..d4455ffbefd 100644 --- a/drivers/mmc/host/vub300.c +++ b/drivers/mmc/host/vub300.c | |||
@@ -2096,7 +2096,7 @@ static struct mmc_host_ops vub300_mmc_ops = { | |||
2096 | static int vub300_probe(struct usb_interface *interface, | 2096 | static int vub300_probe(struct usb_interface *interface, |
2097 | const struct usb_device_id *id) | 2097 | const struct usb_device_id *id) |
2098 | { /* NOT irq */ | 2098 | { /* NOT irq */ |
2099 | struct vub300_mmc_host *vub300 = NULL; | 2099 | struct vub300_mmc_host *vub300; |
2100 | struct usb_host_interface *iface_desc; | 2100 | struct usb_host_interface *iface_desc; |
2101 | struct usb_device *udev = usb_get_dev(interface_to_usbdev(interface)); | 2101 | struct usb_device *udev = usb_get_dev(interface_to_usbdev(interface)); |
2102 | int i; | 2102 | int i; |
@@ -2118,23 +2118,20 @@ static int vub300_probe(struct usb_interface *interface, | |||
2118 | command_out_urb = usb_alloc_urb(0, GFP_KERNEL); | 2118 | command_out_urb = usb_alloc_urb(0, GFP_KERNEL); |
2119 | if (!command_out_urb) { | 2119 | if (!command_out_urb) { |
2120 | retval = -ENOMEM; | 2120 | retval = -ENOMEM; |
2121 | dev_err(&vub300->udev->dev, | 2121 | dev_err(&udev->dev, "not enough memory for command_out_urb\n"); |
2122 | "not enough memory for the command_out_urb\n"); | ||
2123 | goto error0; | 2122 | goto error0; |
2124 | } | 2123 | } |
2125 | command_res_urb = usb_alloc_urb(0, GFP_KERNEL); | 2124 | command_res_urb = usb_alloc_urb(0, GFP_KERNEL); |
2126 | if (!command_res_urb) { | 2125 | if (!command_res_urb) { |
2127 | retval = -ENOMEM; | 2126 | retval = -ENOMEM; |
2128 | dev_err(&vub300->udev->dev, | 2127 | dev_err(&udev->dev, "not enough memory for command_res_urb\n"); |
2129 | "not enough memory for the command_res_urb\n"); | ||
2130 | goto error1; | 2128 | goto error1; |
2131 | } | 2129 | } |
2132 | /* this also allocates memory for our VUB300 mmc host device */ | 2130 | /* this also allocates memory for our VUB300 mmc host device */ |
2133 | mmc = mmc_alloc_host(sizeof(struct vub300_mmc_host), &udev->dev); | 2131 | mmc = mmc_alloc_host(sizeof(struct vub300_mmc_host), &udev->dev); |
2134 | if (!mmc) { | 2132 | if (!mmc) { |
2135 | retval = -ENOMEM; | 2133 | retval = -ENOMEM; |
2136 | dev_err(&vub300->udev->dev, | 2134 | dev_err(&udev->dev, "not enough memory for the mmc_host\n"); |
2137 | "not enough memory for the mmc_host\n"); | ||
2138 | goto error4; | 2135 | goto error4; |
2139 | } | 2136 | } |
2140 | /* MMC core transfer sizes tunable parameters */ | 2137 | /* MMC core transfer sizes tunable parameters */ |
diff --git a/drivers/oprofile/oprofile_perf.c b/drivers/oprofile/oprofile_perf.c index 9046f7b2ed7..59acf9ef78a 100644 --- a/drivers/oprofile/oprofile_perf.c +++ b/drivers/oprofile/oprofile_perf.c | |||
@@ -79,7 +79,7 @@ static int op_create_counter(int cpu, int event) | |||
79 | 79 | ||
80 | pevent = perf_event_create_kernel_counter(&counter_config[event].attr, | 80 | pevent = perf_event_create_kernel_counter(&counter_config[event].attr, |
81 | cpu, NULL, | 81 | cpu, NULL, |
82 | op_overflow_handler); | 82 | op_overflow_handler, NULL); |
83 | 83 | ||
84 | if (IS_ERR(pevent)) | 84 | if (IS_ERR(pevent)) |
85 | return PTR_ERR(pevent); | 85 | return PTR_ERR(pevent); |
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 135df164a4c..46767c53917 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c | |||
@@ -624,7 +624,7 @@ static int pci_pm_prepare(struct device *dev) | |||
624 | * system from the sleep state, we'll have to prevent it from signaling | 624 | * system from the sleep state, we'll have to prevent it from signaling |
625 | * wake-up. | 625 | * wake-up. |
626 | */ | 626 | */ |
627 | pm_runtime_resume(dev); | 627 | pm_runtime_get_sync(dev); |
628 | 628 | ||
629 | if (drv && drv->pm && drv->pm->prepare) | 629 | if (drv && drv->pm && drv->pm->prepare) |
630 | error = drv->pm->prepare(dev); | 630 | error = drv->pm->prepare(dev); |
@@ -638,6 +638,8 @@ static void pci_pm_complete(struct device *dev) | |||
638 | 638 | ||
639 | if (drv && drv->pm && drv->pm->complete) | 639 | if (drv && drv->pm && drv->pm->complete) |
640 | drv->pm->complete(dev); | 640 | drv->pm->complete(dev); |
641 | |||
642 | pm_runtime_put_sync(dev); | ||
641 | } | 643 | } |
642 | 644 | ||
643 | #else /* !CONFIG_PM_SLEEP */ | 645 | #else /* !CONFIG_PM_SLEEP */ |
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 5f10c23dff9..2c5b9b99127 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c | |||
@@ -3284,7 +3284,7 @@ static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode, | |||
3284 | * @dev: the PCI device | 3284 | * @dev: the PCI device |
3285 | * @decode: true = enable decoding, false = disable decoding | 3285 | * @decode: true = enable decoding, false = disable decoding |
3286 | * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY | 3286 | * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY |
3287 | * @change_bridge_flags: traverse ancestors and change bridges | 3287 | * @flags: traverse ancestors and change bridges |
3288 | * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE | 3288 | * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE |
3289 | */ | 3289 | */ |
3290 | int pci_set_vga_state(struct pci_dev *dev, bool decode, | 3290 | int pci_set_vga_state(struct pci_dev *dev, bool decode, |
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 48849ffdd67..bafb3c3d4a8 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c | |||
@@ -168,7 +168,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, | |||
168 | res->flags |= pci_calc_resource_flags(l) | IORESOURCE_SIZEALIGN; | 168 | res->flags |= pci_calc_resource_flags(l) | IORESOURCE_SIZEALIGN; |
169 | if (type == pci_bar_io) { | 169 | if (type == pci_bar_io) { |
170 | l &= PCI_BASE_ADDRESS_IO_MASK; | 170 | l &= PCI_BASE_ADDRESS_IO_MASK; |
171 | mask = PCI_BASE_ADDRESS_IO_MASK & IO_SPACE_LIMIT; | 171 | mask = PCI_BASE_ADDRESS_IO_MASK & (u32) IO_SPACE_LIMIT; |
172 | } else { | 172 | } else { |
173 | l &= PCI_BASE_ADDRESS_MEM_MASK; | 173 | l &= PCI_BASE_ADDRESS_MEM_MASK; |
174 | mask = (u32)PCI_BASE_ADDRESS_MEM_MASK; | 174 | mask = (u32)PCI_BASE_ADDRESS_MEM_MASK; |
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index e8a140669f9..02145e9697a 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c | |||
@@ -2761,6 +2761,8 @@ static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev) | |||
2761 | } | 2761 | } |
2762 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832); | 2762 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832); |
2763 | DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832); | 2763 | DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832); |
2764 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823, ricoh_mmc_fixup_r5c832); | ||
2765 | DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823, ricoh_mmc_fixup_r5c832); | ||
2764 | #endif /*CONFIG_MMC_RICOH_MMC*/ | 2766 | #endif /*CONFIG_MMC_RICOH_MMC*/ |
2765 | 2767 | ||
2766 | #if defined(CONFIG_DMAR) || defined(CONFIG_INTR_REMAP) | 2768 | #if defined(CONFIG_DMAR) || defined(CONFIG_INTR_REMAP) |
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c index 4724ba3acf1..b2005b44e4f 100644 --- a/drivers/rtc/rtc-ds1307.c +++ b/drivers/rtc/rtc-ds1307.c | |||
@@ -149,6 +149,7 @@ static const struct i2c_device_id ds1307_id[] = { | |||
149 | { "ds1340", ds_1340 }, | 149 | { "ds1340", ds_1340 }, |
150 | { "ds3231", ds_3231 }, | 150 | { "ds3231", ds_3231 }, |
151 | { "m41t00", m41t00 }, | 151 | { "m41t00", m41t00 }, |
152 | { "pt7c4338", ds_1307 }, | ||
152 | { "rx8025", rx_8025 }, | 153 | { "rx8025", rx_8025 }, |
153 | { } | 154 | { } |
154 | }; | 155 | }; |
diff --git a/drivers/rtc/rtc-vt8500.c b/drivers/rtc/rtc-vt8500.c index b8bc862903a..efd6066b5cd 100644 --- a/drivers/rtc/rtc-vt8500.c +++ b/drivers/rtc/rtc-vt8500.c | |||
@@ -78,7 +78,6 @@ struct vt8500_rtc { | |||
78 | void __iomem *regbase; | 78 | void __iomem *regbase; |
79 | struct resource *res; | 79 | struct resource *res; |
80 | int irq_alarm; | 80 | int irq_alarm; |
81 | int irq_hz; | ||
82 | struct rtc_device *rtc; | 81 | struct rtc_device *rtc; |
83 | spinlock_t lock; /* Protects this structure */ | 82 | spinlock_t lock; /* Protects this structure */ |
84 | }; | 83 | }; |
@@ -100,10 +99,6 @@ static irqreturn_t vt8500_rtc_irq(int irq, void *dev_id) | |||
100 | if (isr & 1) | 99 | if (isr & 1) |
101 | events |= RTC_AF | RTC_IRQF; | 100 | events |= RTC_AF | RTC_IRQF; |
102 | 101 | ||
103 | /* Only second/minute interrupts are supported */ | ||
104 | if (isr & 2) | ||
105 | events |= RTC_UF | RTC_IRQF; | ||
106 | |||
107 | rtc_update_irq(vt8500_rtc->rtc, 1, events); | 102 | rtc_update_irq(vt8500_rtc->rtc, 1, events); |
108 | 103 | ||
109 | return IRQ_HANDLED; | 104 | return IRQ_HANDLED; |
@@ -199,27 +194,12 @@ static int vt8500_alarm_irq_enable(struct device *dev, unsigned int enabled) | |||
199 | return 0; | 194 | return 0; |
200 | } | 195 | } |
201 | 196 | ||
202 | static int vt8500_update_irq_enable(struct device *dev, unsigned int enabled) | ||
203 | { | ||
204 | struct vt8500_rtc *vt8500_rtc = dev_get_drvdata(dev); | ||
205 | unsigned long tmp = readl(vt8500_rtc->regbase + VT8500_RTC_CR); | ||
206 | |||
207 | if (enabled) | ||
208 | tmp |= VT8500_RTC_CR_SM_SEC | VT8500_RTC_CR_SM_ENABLE; | ||
209 | else | ||
210 | tmp &= ~VT8500_RTC_CR_SM_ENABLE; | ||
211 | |||
212 | writel(tmp, vt8500_rtc->regbase + VT8500_RTC_CR); | ||
213 | return 0; | ||
214 | } | ||
215 | |||
216 | static const struct rtc_class_ops vt8500_rtc_ops = { | 197 | static const struct rtc_class_ops vt8500_rtc_ops = { |
217 | .read_time = vt8500_rtc_read_time, | 198 | .read_time = vt8500_rtc_read_time, |
218 | .set_time = vt8500_rtc_set_time, | 199 | .set_time = vt8500_rtc_set_time, |
219 | .read_alarm = vt8500_rtc_read_alarm, | 200 | .read_alarm = vt8500_rtc_read_alarm, |
220 | .set_alarm = vt8500_rtc_set_alarm, | 201 | .set_alarm = vt8500_rtc_set_alarm, |
221 | .alarm_irq_enable = vt8500_alarm_irq_enable, | 202 | .alarm_irq_enable = vt8500_alarm_irq_enable, |
222 | .update_irq_enable = vt8500_update_irq_enable, | ||
223 | }; | 203 | }; |
224 | 204 | ||
225 | static int __devinit vt8500_rtc_probe(struct platform_device *pdev) | 205 | static int __devinit vt8500_rtc_probe(struct platform_device *pdev) |
@@ -248,13 +228,6 @@ static int __devinit vt8500_rtc_probe(struct platform_device *pdev) | |||
248 | goto err_free; | 228 | goto err_free; |
249 | } | 229 | } |
250 | 230 | ||
251 | vt8500_rtc->irq_hz = platform_get_irq(pdev, 1); | ||
252 | if (vt8500_rtc->irq_hz < 0) { | ||
253 | dev_err(&pdev->dev, "No 1Hz IRQ resource defined\n"); | ||
254 | ret = -ENXIO; | ||
255 | goto err_free; | ||
256 | } | ||
257 | |||
258 | vt8500_rtc->res = request_mem_region(vt8500_rtc->res->start, | 231 | vt8500_rtc->res = request_mem_region(vt8500_rtc->res->start, |
259 | resource_size(vt8500_rtc->res), | 232 | resource_size(vt8500_rtc->res), |
260 | "vt8500-rtc"); | 233 | "vt8500-rtc"); |
@@ -272,9 +245,8 @@ static int __devinit vt8500_rtc_probe(struct platform_device *pdev) | |||
272 | goto err_release; | 245 | goto err_release; |
273 | } | 246 | } |
274 | 247 | ||
275 | /* Enable the second/minute interrupt generation and enable RTC */ | 248 | /* Enable RTC and set it to 24-hour mode */ |
276 | writel(VT8500_RTC_CR_ENABLE | VT8500_RTC_CR_24H | 249 | writel(VT8500_RTC_CR_ENABLE | VT8500_RTC_CR_24H, |
277 | | VT8500_RTC_CR_SM_ENABLE | VT8500_RTC_CR_SM_SEC, | ||
278 | vt8500_rtc->regbase + VT8500_RTC_CR); | 250 | vt8500_rtc->regbase + VT8500_RTC_CR); |
279 | 251 | ||
280 | vt8500_rtc->rtc = rtc_device_register("vt8500-rtc", &pdev->dev, | 252 | vt8500_rtc->rtc = rtc_device_register("vt8500-rtc", &pdev->dev, |
@@ -286,26 +258,16 @@ static int __devinit vt8500_rtc_probe(struct platform_device *pdev) | |||
286 | goto err_unmap; | 258 | goto err_unmap; |
287 | } | 259 | } |
288 | 260 | ||
289 | ret = request_irq(vt8500_rtc->irq_hz, vt8500_rtc_irq, 0, | ||
290 | "rtc 1Hz", vt8500_rtc); | ||
291 | if (ret < 0) { | ||
292 | dev_err(&pdev->dev, "can't get irq %i, err %d\n", | ||
293 | vt8500_rtc->irq_hz, ret); | ||
294 | goto err_unreg; | ||
295 | } | ||
296 | |||
297 | ret = request_irq(vt8500_rtc->irq_alarm, vt8500_rtc_irq, 0, | 261 | ret = request_irq(vt8500_rtc->irq_alarm, vt8500_rtc_irq, 0, |
298 | "rtc alarm", vt8500_rtc); | 262 | "rtc alarm", vt8500_rtc); |
299 | if (ret < 0) { | 263 | if (ret < 0) { |
300 | dev_err(&pdev->dev, "can't get irq %i, err %d\n", | 264 | dev_err(&pdev->dev, "can't get irq %i, err %d\n", |
301 | vt8500_rtc->irq_alarm, ret); | 265 | vt8500_rtc->irq_alarm, ret); |
302 | goto err_free_hz; | 266 | goto err_unreg; |
303 | } | 267 | } |
304 | 268 | ||
305 | return 0; | 269 | return 0; |
306 | 270 | ||
307 | err_free_hz: | ||
308 | free_irq(vt8500_rtc->irq_hz, vt8500_rtc); | ||
309 | err_unreg: | 271 | err_unreg: |
310 | rtc_device_unregister(vt8500_rtc->rtc); | 272 | rtc_device_unregister(vt8500_rtc->rtc); |
311 | err_unmap: | 273 | err_unmap: |
@@ -323,7 +285,6 @@ static int __devexit vt8500_rtc_remove(struct platform_device *pdev) | |||
323 | struct vt8500_rtc *vt8500_rtc = platform_get_drvdata(pdev); | 285 | struct vt8500_rtc *vt8500_rtc = platform_get_drvdata(pdev); |
324 | 286 | ||
325 | free_irq(vt8500_rtc->irq_alarm, vt8500_rtc); | 287 | free_irq(vt8500_rtc->irq_alarm, vt8500_rtc); |
326 | free_irq(vt8500_rtc->irq_hz, vt8500_rtc); | ||
327 | 288 | ||
328 | rtc_device_unregister(vt8500_rtc->rtc); | 289 | rtc_device_unregister(vt8500_rtc->rtc); |
329 | 290 | ||
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c index dee2a2c909f..70c2e7fa666 100644 --- a/drivers/target/loopback/tcm_loop.c +++ b/drivers/target/loopback/tcm_loop.c | |||
@@ -386,7 +386,7 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc) | |||
386 | */ | 386 | */ |
387 | se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd, (void *)tl_tmr, | 387 | se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd, (void *)tl_tmr, |
388 | TMR_LUN_RESET); | 388 | TMR_LUN_RESET); |
389 | if (!se_cmd->se_tmr_req) | 389 | if (IS_ERR(se_cmd->se_tmr_req)) |
390 | goto release; | 390 | goto release; |
391 | /* | 391 | /* |
392 | * Locate the underlying TCM struct se_lun from sc->device->lun | 392 | * Locate the underlying TCM struct se_lun from sc->device->lun |
@@ -1017,6 +1017,7 @@ static int tcm_loop_make_nexus( | |||
1017 | struct se_portal_group *se_tpg; | 1017 | struct se_portal_group *se_tpg; |
1018 | struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; | 1018 | struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; |
1019 | struct tcm_loop_nexus *tl_nexus; | 1019 | struct tcm_loop_nexus *tl_nexus; |
1020 | int ret = -ENOMEM; | ||
1020 | 1021 | ||
1021 | if (tl_tpg->tl_hba->tl_nexus) { | 1022 | if (tl_tpg->tl_hba->tl_nexus) { |
1022 | printk(KERN_INFO "tl_tpg->tl_hba->tl_nexus already exists\n"); | 1023 | printk(KERN_INFO "tl_tpg->tl_hba->tl_nexus already exists\n"); |
@@ -1033,8 +1034,10 @@ static int tcm_loop_make_nexus( | |||
1033 | * Initialize the struct se_session pointer | 1034 | * Initialize the struct se_session pointer |
1034 | */ | 1035 | */ |
1035 | tl_nexus->se_sess = transport_init_session(); | 1036 | tl_nexus->se_sess = transport_init_session(); |
1036 | if (!tl_nexus->se_sess) | 1037 | if (IS_ERR(tl_nexus->se_sess)) { |
1038 | ret = PTR_ERR(tl_nexus->se_sess); | ||
1037 | goto out; | 1039 | goto out; |
1040 | } | ||
1038 | /* | 1041 | /* |
1039 | * Since we are running in 'demo mode' this call with generate a | 1042 | * Since we are running in 'demo mode' this call with generate a |
1040 | * struct se_node_acl for the tcm_loop struct se_portal_group with the SCSI | 1043 | * struct se_node_acl for the tcm_loop struct se_portal_group with the SCSI |
@@ -1060,7 +1063,7 @@ static int tcm_loop_make_nexus( | |||
1060 | 1063 | ||
1061 | out: | 1064 | out: |
1062 | kfree(tl_nexus); | 1065 | kfree(tl_nexus); |
1063 | return -ENOMEM; | 1066 | return ret; |
1064 | } | 1067 | } |
1065 | 1068 | ||
1066 | static int tcm_loop_drop_nexus( | 1069 | static int tcm_loop_drop_nexus( |
@@ -1140,7 +1143,7 @@ static ssize_t tcm_loop_tpg_store_nexus( | |||
1140 | * the fabric protocol_id set in tcm_loop_make_scsi_hba(), and call | 1143 | * the fabric protocol_id set in tcm_loop_make_scsi_hba(), and call |
1141 | * tcm_loop_make_nexus() | 1144 | * tcm_loop_make_nexus() |
1142 | */ | 1145 | */ |
1143 | if (strlen(page) > TL_WWN_ADDR_LEN) { | 1146 | if (strlen(page) >= TL_WWN_ADDR_LEN) { |
1144 | printk(KERN_ERR "Emulated NAA Sas Address: %s, exceeds" | 1147 | printk(KERN_ERR "Emulated NAA Sas Address: %s, exceeds" |
1145 | " max: %d\n", page, TL_WWN_ADDR_LEN); | 1148 | " max: %d\n", page, TL_WWN_ADDR_LEN); |
1146 | return -EINVAL; | 1149 | return -EINVAL; |
@@ -1321,7 +1324,7 @@ struct se_wwn *tcm_loop_make_scsi_hba( | |||
1321 | return ERR_PTR(-EINVAL); | 1324 | return ERR_PTR(-EINVAL); |
1322 | 1325 | ||
1323 | check_len: | 1326 | check_len: |
1324 | if (strlen(name) > TL_WWN_ADDR_LEN) { | 1327 | if (strlen(name) >= TL_WWN_ADDR_LEN) { |
1325 | printk(KERN_ERR "Emulated NAA %s Address: %s, exceeds" | 1328 | printk(KERN_ERR "Emulated NAA %s Address: %s, exceeds" |
1326 | " max: %d\n", name, tcm_loop_dump_proto_id(tl_hba), | 1329 | " max: %d\n", name, tcm_loop_dump_proto_id(tl_hba), |
1327 | TL_WWN_ADDR_LEN); | 1330 | TL_WWN_ADDR_LEN); |
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index ee6fad979b5..25c1f49a7d8 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c | |||
@@ -304,7 +304,7 @@ struct target_fabric_configfs *target_fabric_configfs_init( | |||
304 | printk(KERN_ERR "Unable to locate passed fabric name\n"); | 304 | printk(KERN_ERR "Unable to locate passed fabric name\n"); |
305 | return NULL; | 305 | return NULL; |
306 | } | 306 | } |
307 | if (strlen(name) > TARGET_FABRIC_NAME_SIZE) { | 307 | if (strlen(name) >= TARGET_FABRIC_NAME_SIZE) { |
308 | printk(KERN_ERR "Passed name: %s exceeds TARGET_FABRIC" | 308 | printk(KERN_ERR "Passed name: %s exceeds TARGET_FABRIC" |
309 | "_NAME_SIZE\n", name); | 309 | "_NAME_SIZE\n", name); |
310 | return NULL; | 310 | return NULL; |
@@ -312,7 +312,7 @@ struct target_fabric_configfs *target_fabric_configfs_init( | |||
312 | 312 | ||
313 | tf = kzalloc(sizeof(struct target_fabric_configfs), GFP_KERNEL); | 313 | tf = kzalloc(sizeof(struct target_fabric_configfs), GFP_KERNEL); |
314 | if (!(tf)) | 314 | if (!(tf)) |
315 | return ERR_PTR(-ENOMEM); | 315 | return NULL; |
316 | 316 | ||
317 | INIT_LIST_HEAD(&tf->tf_list); | 317 | INIT_LIST_HEAD(&tf->tf_list); |
318 | atomic_set(&tf->tf_access_cnt, 0); | 318 | atomic_set(&tf->tf_access_cnt, 0); |
@@ -851,7 +851,7 @@ static ssize_t target_core_dev_wwn_store_attr_vpd_unit_serial( | |||
851 | return -EOPNOTSUPP; | 851 | return -EOPNOTSUPP; |
852 | } | 852 | } |
853 | 853 | ||
854 | if ((strlen(page) + 1) > INQUIRY_VPD_SERIAL_LEN) { | 854 | if (strlen(page) >= INQUIRY_VPD_SERIAL_LEN) { |
855 | printk(KERN_ERR "Emulated VPD Unit Serial exceeds" | 855 | printk(KERN_ERR "Emulated VPD Unit Serial exceeds" |
856 | " INQUIRY_VPD_SERIAL_LEN: %d\n", INQUIRY_VPD_SERIAL_LEN); | 856 | " INQUIRY_VPD_SERIAL_LEN: %d\n", INQUIRY_VPD_SERIAL_LEN); |
857 | return -EOVERFLOW; | 857 | return -EOVERFLOW; |
@@ -917,7 +917,7 @@ static ssize_t target_core_dev_wwn_show_attr_vpd_protocol_identifier( | |||
917 | 917 | ||
918 | transport_dump_vpd_proto_id(vpd, buf, VPD_TMP_BUF_SIZE); | 918 | transport_dump_vpd_proto_id(vpd, buf, VPD_TMP_BUF_SIZE); |
919 | 919 | ||
920 | if ((len + strlen(buf) > PAGE_SIZE)) | 920 | if ((len + strlen(buf) >= PAGE_SIZE)) |
921 | break; | 921 | break; |
922 | 922 | ||
923 | len += sprintf(page+len, "%s", buf); | 923 | len += sprintf(page+len, "%s", buf); |
@@ -962,19 +962,19 @@ static ssize_t target_core_dev_wwn_show_attr_##_name( \ | |||
962 | \ | 962 | \ |
963 | memset(buf, 0, VPD_TMP_BUF_SIZE); \ | 963 | memset(buf, 0, VPD_TMP_BUF_SIZE); \ |
964 | transport_dump_vpd_assoc(vpd, buf, VPD_TMP_BUF_SIZE); \ | 964 | transport_dump_vpd_assoc(vpd, buf, VPD_TMP_BUF_SIZE); \ |
965 | if ((len + strlen(buf) > PAGE_SIZE)) \ | 965 | if ((len + strlen(buf) >= PAGE_SIZE)) \ |
966 | break; \ | 966 | break; \ |
967 | len += sprintf(page+len, "%s", buf); \ | 967 | len += sprintf(page+len, "%s", buf); \ |
968 | \ | 968 | \ |
969 | memset(buf, 0, VPD_TMP_BUF_SIZE); \ | 969 | memset(buf, 0, VPD_TMP_BUF_SIZE); \ |
970 | transport_dump_vpd_ident_type(vpd, buf, VPD_TMP_BUF_SIZE); \ | 970 | transport_dump_vpd_ident_type(vpd, buf, VPD_TMP_BUF_SIZE); \ |
971 | if ((len + strlen(buf) > PAGE_SIZE)) \ | 971 | if ((len + strlen(buf) >= PAGE_SIZE)) \ |
972 | break; \ | 972 | break; \ |
973 | len += sprintf(page+len, "%s", buf); \ | 973 | len += sprintf(page+len, "%s", buf); \ |
974 | \ | 974 | \ |
975 | memset(buf, 0, VPD_TMP_BUF_SIZE); \ | 975 | memset(buf, 0, VPD_TMP_BUF_SIZE); \ |
976 | transport_dump_vpd_ident(vpd, buf, VPD_TMP_BUF_SIZE); \ | 976 | transport_dump_vpd_ident(vpd, buf, VPD_TMP_BUF_SIZE); \ |
977 | if ((len + strlen(buf) > PAGE_SIZE)) \ | 977 | if ((len + strlen(buf) >= PAGE_SIZE)) \ |
978 | break; \ | 978 | break; \ |
979 | len += sprintf(page+len, "%s", buf); \ | 979 | len += sprintf(page+len, "%s", buf); \ |
980 | } \ | 980 | } \ |
@@ -1299,7 +1299,7 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts( | |||
1299 | &i_buf[0] : "", pr_reg->pr_res_key, | 1299 | &i_buf[0] : "", pr_reg->pr_res_key, |
1300 | pr_reg->pr_res_generation); | 1300 | pr_reg->pr_res_generation); |
1301 | 1301 | ||
1302 | if ((len + strlen(buf) > PAGE_SIZE)) | 1302 | if ((len + strlen(buf) >= PAGE_SIZE)) |
1303 | break; | 1303 | break; |
1304 | 1304 | ||
1305 | len += sprintf(page+len, "%s", buf); | 1305 | len += sprintf(page+len, "%s", buf); |
@@ -1496,7 +1496,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata( | |||
1496 | ret = -ENOMEM; | 1496 | ret = -ENOMEM; |
1497 | goto out; | 1497 | goto out; |
1498 | } | 1498 | } |
1499 | if (strlen(i_port) > PR_APTPL_MAX_IPORT_LEN) { | 1499 | if (strlen(i_port) >= PR_APTPL_MAX_IPORT_LEN) { |
1500 | printk(KERN_ERR "APTPL metadata initiator_node=" | 1500 | printk(KERN_ERR "APTPL metadata initiator_node=" |
1501 | " exceeds PR_APTPL_MAX_IPORT_LEN: %d\n", | 1501 | " exceeds PR_APTPL_MAX_IPORT_LEN: %d\n", |
1502 | PR_APTPL_MAX_IPORT_LEN); | 1502 | PR_APTPL_MAX_IPORT_LEN); |
@@ -1510,7 +1510,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata( | |||
1510 | ret = -ENOMEM; | 1510 | ret = -ENOMEM; |
1511 | goto out; | 1511 | goto out; |
1512 | } | 1512 | } |
1513 | if (strlen(isid) > PR_REG_ISID_LEN) { | 1513 | if (strlen(isid) >= PR_REG_ISID_LEN) { |
1514 | printk(KERN_ERR "APTPL metadata initiator_isid" | 1514 | printk(KERN_ERR "APTPL metadata initiator_isid" |
1515 | "= exceeds PR_REG_ISID_LEN: %d\n", | 1515 | "= exceeds PR_REG_ISID_LEN: %d\n", |
1516 | PR_REG_ISID_LEN); | 1516 | PR_REG_ISID_LEN); |
@@ -1571,7 +1571,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata( | |||
1571 | ret = -ENOMEM; | 1571 | ret = -ENOMEM; |
1572 | goto out; | 1572 | goto out; |
1573 | } | 1573 | } |
1574 | if (strlen(t_port) > PR_APTPL_MAX_TPORT_LEN) { | 1574 | if (strlen(t_port) >= PR_APTPL_MAX_TPORT_LEN) { |
1575 | printk(KERN_ERR "APTPL metadata target_node=" | 1575 | printk(KERN_ERR "APTPL metadata target_node=" |
1576 | " exceeds PR_APTPL_MAX_TPORT_LEN: %d\n", | 1576 | " exceeds PR_APTPL_MAX_TPORT_LEN: %d\n", |
1577 | PR_APTPL_MAX_TPORT_LEN); | 1577 | PR_APTPL_MAX_TPORT_LEN); |
@@ -3052,7 +3052,7 @@ static struct config_group *target_core_call_addhbatotarget( | |||
3052 | int ret; | 3052 | int ret; |
3053 | 3053 | ||
3054 | memset(buf, 0, TARGET_CORE_NAME_MAX_LEN); | 3054 | memset(buf, 0, TARGET_CORE_NAME_MAX_LEN); |
3055 | if (strlen(name) > TARGET_CORE_NAME_MAX_LEN) { | 3055 | if (strlen(name) >= TARGET_CORE_NAME_MAX_LEN) { |
3056 | printk(KERN_ERR "Passed *name strlen(): %d exceeds" | 3056 | printk(KERN_ERR "Passed *name strlen(): %d exceeds" |
3057 | " TARGET_CORE_NAME_MAX_LEN: %d\n", (int)strlen(name), | 3057 | " TARGET_CORE_NAME_MAX_LEN: %d\n", (int)strlen(name), |
3058 | TARGET_CORE_NAME_MAX_LEN); | 3058 | TARGET_CORE_NAME_MAX_LEN); |
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index 8407f9ca2b3..ba698ea62bb 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c | |||
@@ -192,7 +192,7 @@ int transport_get_lun_for_tmr( | |||
192 | &SE_NODE_ACL(se_sess)->device_list[unpacked_lun]; | 192 | &SE_NODE_ACL(se_sess)->device_list[unpacked_lun]; |
193 | if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { | 193 | if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { |
194 | se_lun = se_cmd->se_lun = se_tmr->tmr_lun = deve->se_lun; | 194 | se_lun = se_cmd->se_lun = se_tmr->tmr_lun = deve->se_lun; |
195 | dev = se_tmr->tmr_dev = se_lun->lun_se_dev; | 195 | dev = se_lun->lun_se_dev; |
196 | se_cmd->pr_res_key = deve->pr_res_key; | 196 | se_cmd->pr_res_key = deve->pr_res_key; |
197 | se_cmd->orig_fe_lun = unpacked_lun; | 197 | se_cmd->orig_fe_lun = unpacked_lun; |
198 | se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev; | 198 | se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev; |
@@ -216,6 +216,7 @@ int transport_get_lun_for_tmr( | |||
216 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | 216 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
217 | return -1; | 217 | return -1; |
218 | } | 218 | } |
219 | se_tmr->tmr_dev = dev; | ||
219 | 220 | ||
220 | spin_lock(&dev->se_tmr_lock); | 221 | spin_lock(&dev->se_tmr_lock); |
221 | list_add_tail(&se_tmr->tmr_list, &dev->dev_tmr_list); | 222 | list_add_tail(&se_tmr->tmr_list, &dev->dev_tmr_list); |
@@ -1430,7 +1431,7 @@ struct se_lun_acl *core_dev_init_initiator_node_lun_acl( | |||
1430 | struct se_lun_acl *lacl; | 1431 | struct se_lun_acl *lacl; |
1431 | struct se_node_acl *nacl; | 1432 | struct se_node_acl *nacl; |
1432 | 1433 | ||
1433 | if (strlen(initiatorname) > TRANSPORT_IQN_LEN) { | 1434 | if (strlen(initiatorname) >= TRANSPORT_IQN_LEN) { |
1434 | printk(KERN_ERR "%s InitiatorName exceeds maximum size.\n", | 1435 | printk(KERN_ERR "%s InitiatorName exceeds maximum size.\n", |
1435 | TPG_TFO(tpg)->get_fabric_name()); | 1436 | TPG_TFO(tpg)->get_fabric_name()); |
1436 | *ret = -EOVERFLOW; | 1437 | *ret = -EOVERFLOW; |
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index a79f518ca6e..b662db3a320 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c | |||
@@ -1916,7 +1916,7 @@ static int __core_scsi3_update_aptpl_buf( | |||
1916 | pr_reg->pr_res_mapped_lun); | 1916 | pr_reg->pr_res_mapped_lun); |
1917 | } | 1917 | } |
1918 | 1918 | ||
1919 | if ((len + strlen(tmp) > pr_aptpl_buf_len)) { | 1919 | if ((len + strlen(tmp) >= pr_aptpl_buf_len)) { |
1920 | printk(KERN_ERR "Unable to update renaming" | 1920 | printk(KERN_ERR "Unable to update renaming" |
1921 | " APTPL metadata\n"); | 1921 | " APTPL metadata\n"); |
1922 | spin_unlock(&T10_RES(su_dev)->registration_lock); | 1922 | spin_unlock(&T10_RES(su_dev)->registration_lock); |
@@ -1934,7 +1934,7 @@ static int __core_scsi3_update_aptpl_buf( | |||
1934 | TPG_TFO(tpg)->tpg_get_tag(tpg), | 1934 | TPG_TFO(tpg)->tpg_get_tag(tpg), |
1935 | lun->lun_sep->sep_rtpi, lun->unpacked_lun, reg_count); | 1935 | lun->lun_sep->sep_rtpi, lun->unpacked_lun, reg_count); |
1936 | 1936 | ||
1937 | if ((len + strlen(tmp) > pr_aptpl_buf_len)) { | 1937 | if ((len + strlen(tmp) >= pr_aptpl_buf_len)) { |
1938 | printk(KERN_ERR "Unable to update renaming" | 1938 | printk(KERN_ERR "Unable to update renaming" |
1939 | " APTPL metadata\n"); | 1939 | " APTPL metadata\n"); |
1940 | spin_unlock(&T10_RES(su_dev)->registration_lock); | 1940 | spin_unlock(&T10_RES(su_dev)->registration_lock); |
@@ -1986,7 +1986,7 @@ static int __core_scsi3_write_aptpl_to_file( | |||
1986 | memset(iov, 0, sizeof(struct iovec)); | 1986 | memset(iov, 0, sizeof(struct iovec)); |
1987 | memset(path, 0, 512); | 1987 | memset(path, 0, 512); |
1988 | 1988 | ||
1989 | if (strlen(&wwn->unit_serial[0]) > 512) { | 1989 | if (strlen(&wwn->unit_serial[0]) >= 512) { |
1990 | printk(KERN_ERR "WWN value for struct se_device does not fit" | 1990 | printk(KERN_ERR "WWN value for struct se_device does not fit" |
1991 | " into path buffer\n"); | 1991 | " into path buffer\n"); |
1992 | return -1; | 1992 | return -1; |
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index 59b8b9c5ad7..179063d81cd 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c | |||
@@ -75,10 +75,16 @@ void core_tmr_release_req( | |||
75 | { | 75 | { |
76 | struct se_device *dev = tmr->tmr_dev; | 76 | struct se_device *dev = tmr->tmr_dev; |
77 | 77 | ||
78 | if (!dev) { | ||
79 | kmem_cache_free(se_tmr_req_cache, tmr); | ||
80 | return; | ||
81 | } | ||
82 | |||
78 | spin_lock(&dev->se_tmr_lock); | 83 | spin_lock(&dev->se_tmr_lock); |
79 | list_del(&tmr->tmr_list); | 84 | list_del(&tmr->tmr_list); |
80 | kmem_cache_free(se_tmr_req_cache, tmr); | ||
81 | spin_unlock(&dev->se_tmr_lock); | 85 | spin_unlock(&dev->se_tmr_lock); |
86 | |||
87 | kmem_cache_free(se_tmr_req_cache, tmr); | ||
82 | } | 88 | } |
83 | 89 | ||
84 | static void core_tmr_handle_tas_abort( | 90 | static void core_tmr_handle_tas_abort( |
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 4dafeb8b563..4b9b7169bdd 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c | |||
@@ -536,13 +536,13 @@ EXPORT_SYMBOL(transport_register_session); | |||
536 | void transport_deregister_session_configfs(struct se_session *se_sess) | 536 | void transport_deregister_session_configfs(struct se_session *se_sess) |
537 | { | 537 | { |
538 | struct se_node_acl *se_nacl; | 538 | struct se_node_acl *se_nacl; |
539 | 539 | unsigned long flags; | |
540 | /* | 540 | /* |
541 | * Used by struct se_node_acl's under ConfigFS to locate active struct se_session | 541 | * Used by struct se_node_acl's under ConfigFS to locate active struct se_session |
542 | */ | 542 | */ |
543 | se_nacl = se_sess->se_node_acl; | 543 | se_nacl = se_sess->se_node_acl; |
544 | if ((se_nacl)) { | 544 | if ((se_nacl)) { |
545 | spin_lock_irq(&se_nacl->nacl_sess_lock); | 545 | spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); |
546 | list_del(&se_sess->sess_acl_list); | 546 | list_del(&se_sess->sess_acl_list); |
547 | /* | 547 | /* |
548 | * If the session list is empty, then clear the pointer. | 548 | * If the session list is empty, then clear the pointer. |
@@ -556,7 +556,7 @@ void transport_deregister_session_configfs(struct se_session *se_sess) | |||
556 | se_nacl->acl_sess_list.prev, | 556 | se_nacl->acl_sess_list.prev, |
557 | struct se_session, sess_acl_list); | 557 | struct se_session, sess_acl_list); |
558 | } | 558 | } |
559 | spin_unlock_irq(&se_nacl->nacl_sess_lock); | 559 | spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); |
560 | } | 560 | } |
561 | } | 561 | } |
562 | EXPORT_SYMBOL(transport_deregister_session_configfs); | 562 | EXPORT_SYMBOL(transport_deregister_session_configfs); |
diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h index defff32b788..7b82f1b7fef 100644 --- a/drivers/target/tcm_fc/tcm_fc.h +++ b/drivers/target/tcm_fc/tcm_fc.h | |||
@@ -144,7 +144,7 @@ enum ft_cmd_state { | |||
144 | */ | 144 | */ |
145 | struct ft_cmd { | 145 | struct ft_cmd { |
146 | enum ft_cmd_state state; | 146 | enum ft_cmd_state state; |
147 | u16 lun; /* LUN from request */ | 147 | u32 lun; /* LUN from request */ |
148 | struct ft_sess *sess; /* session held for cmd */ | 148 | struct ft_sess *sess; /* session held for cmd */ |
149 | struct fc_seq *seq; /* sequence in exchange mgr */ | 149 | struct fc_seq *seq; /* sequence in exchange mgr */ |
150 | struct se_cmd se_cmd; /* Local TCM I/O descriptor */ | 150 | struct se_cmd se_cmd; /* Local TCM I/O descriptor */ |
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c index c056a1132ae..b2a106729d4 100644 --- a/drivers/target/tcm_fc/tfc_cmd.c +++ b/drivers/target/tcm_fc/tfc_cmd.c | |||
@@ -94,29 +94,6 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller) | |||
94 | 16, 4, cmd->cdb, MAX_COMMAND_SIZE, 0); | 94 | 16, 4, cmd->cdb, MAX_COMMAND_SIZE, 0); |
95 | } | 95 | } |
96 | 96 | ||
97 | /* | ||
98 | * Get LUN from CDB. | ||
99 | */ | ||
100 | static int ft_get_lun_for_cmd(struct ft_cmd *cmd, u8 *lunp) | ||
101 | { | ||
102 | u64 lun; | ||
103 | |||
104 | lun = lunp[1]; | ||
105 | switch (lunp[0] >> 6) { | ||
106 | case 0: | ||
107 | break; | ||
108 | case 1: | ||
109 | lun |= (lunp[0] & 0x3f) << 8; | ||
110 | break; | ||
111 | default: | ||
112 | return -1; | ||
113 | } | ||
114 | if (lun >= TRANSPORT_MAX_LUNS_PER_TPG) | ||
115 | return -1; | ||
116 | cmd->lun = lun; | ||
117 | return transport_get_lun_for_cmd(&cmd->se_cmd, NULL, lun); | ||
118 | } | ||
119 | |||
120 | static void ft_queue_cmd(struct ft_sess *sess, struct ft_cmd *cmd) | 97 | static void ft_queue_cmd(struct ft_sess *sess, struct ft_cmd *cmd) |
121 | { | 98 | { |
122 | struct se_queue_obj *qobj; | 99 | struct se_queue_obj *qobj; |
@@ -418,6 +395,7 @@ static void ft_send_tm(struct ft_cmd *cmd) | |||
418 | { | 395 | { |
419 | struct se_tmr_req *tmr; | 396 | struct se_tmr_req *tmr; |
420 | struct fcp_cmnd *fcp; | 397 | struct fcp_cmnd *fcp; |
398 | struct ft_sess *sess; | ||
421 | u8 tm_func; | 399 | u8 tm_func; |
422 | 400 | ||
423 | fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp)); | 401 | fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp)); |
@@ -425,13 +403,6 @@ static void ft_send_tm(struct ft_cmd *cmd) | |||
425 | switch (fcp->fc_tm_flags) { | 403 | switch (fcp->fc_tm_flags) { |
426 | case FCP_TMF_LUN_RESET: | 404 | case FCP_TMF_LUN_RESET: |
427 | tm_func = TMR_LUN_RESET; | 405 | tm_func = TMR_LUN_RESET; |
428 | if (ft_get_lun_for_cmd(cmd, fcp->fc_lun) < 0) { | ||
429 | ft_dump_cmd(cmd, __func__); | ||
430 | transport_send_check_condition_and_sense(&cmd->se_cmd, | ||
431 | cmd->se_cmd.scsi_sense_reason, 0); | ||
432 | ft_sess_put(cmd->sess); | ||
433 | return; | ||
434 | } | ||
435 | break; | 406 | break; |
436 | case FCP_TMF_TGT_RESET: | 407 | case FCP_TMF_TGT_RESET: |
437 | tm_func = TMR_TARGET_WARM_RESET; | 408 | tm_func = TMR_TARGET_WARM_RESET; |
@@ -463,6 +434,36 @@ static void ft_send_tm(struct ft_cmd *cmd) | |||
463 | return; | 434 | return; |
464 | } | 435 | } |
465 | cmd->se_cmd.se_tmr_req = tmr; | 436 | cmd->se_cmd.se_tmr_req = tmr; |
437 | |||
438 | switch (fcp->fc_tm_flags) { | ||
439 | case FCP_TMF_LUN_RESET: | ||
440 | cmd->lun = scsilun_to_int((struct scsi_lun *)fcp->fc_lun); | ||
441 | if (transport_get_lun_for_tmr(&cmd->se_cmd, cmd->lun) < 0) { | ||
442 | /* | ||
443 | * Make sure to clean up newly allocated TMR request | ||
444 | * since "unable to handle TMR request because failed | ||
445 | * to get to LUN" | ||
446 | */ | ||
447 | FT_TM_DBG("Failed to get LUN for TMR func %d, " | ||
448 | "se_cmd %p, unpacked_lun %d\n", | ||
449 | tm_func, &cmd->se_cmd, cmd->lun); | ||
450 | ft_dump_cmd(cmd, __func__); | ||
451 | sess = cmd->sess; | ||
452 | transport_send_check_condition_and_sense(&cmd->se_cmd, | ||
453 | cmd->se_cmd.scsi_sense_reason, 0); | ||
454 | transport_generic_free_cmd(&cmd->se_cmd, 0, 1, 0); | ||
455 | ft_sess_put(sess); | ||
456 | return; | ||
457 | } | ||
458 | break; | ||
459 | case FCP_TMF_TGT_RESET: | ||
460 | case FCP_TMF_CLR_TASK_SET: | ||
461 | case FCP_TMF_ABT_TASK_SET: | ||
462 | case FCP_TMF_CLR_ACA: | ||
463 | break; | ||
464 | default: | ||
465 | return; | ||
466 | } | ||
466 | transport_generic_handle_tmr(&cmd->se_cmd); | 467 | transport_generic_handle_tmr(&cmd->se_cmd); |
467 | } | 468 | } |
468 | 469 | ||
@@ -635,7 +636,8 @@ static void ft_send_cmd(struct ft_cmd *cmd) | |||
635 | 636 | ||
636 | fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd); | 637 | fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd); |
637 | 638 | ||
638 | ret = ft_get_lun_for_cmd(cmd, fcp->fc_lun); | 639 | cmd->lun = scsilun_to_int((struct scsi_lun *)fcp->fc_lun); |
640 | ret = transport_get_lun_for_cmd(&cmd->se_cmd, NULL, cmd->lun); | ||
639 | if (ret < 0) { | 641 | if (ret < 0) { |
640 | ft_dump_cmd(cmd, __func__); | 642 | ft_dump_cmd(cmd, __func__); |
641 | transport_send_check_condition_and_sense(&cmd->se_cmd, | 643 | transport_send_check_condition_and_sense(&cmd->se_cmd, |
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c index 4c3c0efbe13..8c4a24077d9 100644 --- a/drivers/target/tcm_fc/tfc_io.c +++ b/drivers/target/tcm_fc/tfc_io.c | |||
@@ -203,7 +203,7 @@ int ft_queue_data_in(struct se_cmd *se_cmd) | |||
203 | /* XXX For now, initiator will retry */ | 203 | /* XXX For now, initiator will retry */ |
204 | if (printk_ratelimit()) | 204 | if (printk_ratelimit()) |
205 | printk(KERN_ERR "%s: Failed to send frame %p, " | 205 | printk(KERN_ERR "%s: Failed to send frame %p, " |
206 | "xid <0x%x>, remaining <0x%x>, " | 206 | "xid <0x%x>, remaining %zu, " |
207 | "lso_max <0x%x>\n", | 207 | "lso_max <0x%x>\n", |
208 | __func__, fp, ep->xid, | 208 | __func__, fp, ep->xid, |
209 | remaining, lport->lso_max); | 209 | remaining, lport->lso_max); |
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c index a3bd57f2ea3..7491e21cc6a 100644 --- a/drivers/target/tcm_fc/tfc_sess.c +++ b/drivers/target/tcm_fc/tfc_sess.c | |||
@@ -229,7 +229,7 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id, | |||
229 | return NULL; | 229 | return NULL; |
230 | 230 | ||
231 | sess->se_sess = transport_init_session(); | 231 | sess->se_sess = transport_init_session(); |
232 | if (!sess->se_sess) { | 232 | if (IS_ERR(sess->se_sess)) { |
233 | kfree(sess); | 233 | kfree(sess); |
234 | return NULL; | 234 | return NULL; |
235 | } | 235 | } |
@@ -332,7 +332,7 @@ void ft_sess_close(struct se_session *se_sess) | |||
332 | lport = sess->tport->lport; | 332 | lport = sess->tport->lport; |
333 | port_id = sess->port_id; | 333 | port_id = sess->port_id; |
334 | if (port_id == -1) { | 334 | if (port_id == -1) { |
335 | mutex_lock(&ft_lport_lock); | 335 | mutex_unlock(&ft_lport_lock); |
336 | return; | 336 | return; |
337 | } | 337 | } |
338 | FT_SESS_DBG("port_id %x\n", port_id); | 338 | FT_SESS_DBG("port_id %x\n", port_id); |
diff --git a/drivers/tty/serial/8250_pci.c b/drivers/tty/serial/8250_pci.c index 4b4968a294b..78e98a5cef9 100644 --- a/drivers/tty/serial/8250_pci.c +++ b/drivers/tty/serial/8250_pci.c | |||
@@ -973,7 +973,7 @@ ce4100_serial_setup(struct serial_private *priv, | |||
973 | 973 | ||
974 | static int | 974 | static int |
975 | pci_omegapci_setup(struct serial_private *priv, | 975 | pci_omegapci_setup(struct serial_private *priv, |
976 | struct pciserial_board *board, | 976 | const struct pciserial_board *board, |
977 | struct uart_port *port, int idx) | 977 | struct uart_port *port, int idx) |
978 | { | 978 | { |
979 | return setup_port(priv, port, 2, idx * 8, 0); | 979 | return setup_port(priv, port, 2, idx * 8, 0); |
diff --git a/drivers/tty/serial/mrst_max3110.c b/drivers/tty/serial/mrst_max3110.c index 1bd28450ca4..a764bf99743 100644 --- a/drivers/tty/serial/mrst_max3110.c +++ b/drivers/tty/serial/mrst_max3110.c | |||
@@ -421,7 +421,6 @@ static int max3110_main_thread(void *_max) | |||
421 | int ret = 0; | 421 | int ret = 0; |
422 | struct circ_buf *xmit = &max->con_xmit; | 422 | struct circ_buf *xmit = &max->con_xmit; |
423 | 423 | ||
424 | init_waitqueue_head(wq); | ||
425 | pr_info(PR_FMT "start main thread\n"); | 424 | pr_info(PR_FMT "start main thread\n"); |
426 | 425 | ||
427 | do { | 426 | do { |
@@ -823,7 +822,7 @@ static int __devinit serial_m3110_probe(struct spi_device *spi) | |||
823 | res = RC_TAG; | 822 | res = RC_TAG; |
824 | ret = max3110_write_then_read(max, (u8 *)&res, (u8 *)&res, 2, 0); | 823 | ret = max3110_write_then_read(max, (u8 *)&res, (u8 *)&res, 2, 0); |
825 | if (ret < 0 || res == 0 || res == 0xffff) { | 824 | if (ret < 0 || res == 0 || res == 0xffff) { |
826 | printk(KERN_ERR "MAX3111 deemed not present (conf reg %04x)", | 825 | dev_dbg(&spi->dev, "MAX3111 deemed not present (conf reg %04x)", |
827 | res); | 826 | res); |
828 | ret = -ENODEV; | 827 | ret = -ENODEV; |
829 | goto err_get_page; | 828 | goto err_get_page; |
@@ -838,6 +837,8 @@ static int __devinit serial_m3110_probe(struct spi_device *spi) | |||
838 | max->con_xmit.head = 0; | 837 | max->con_xmit.head = 0; |
839 | max->con_xmit.tail = 0; | 838 | max->con_xmit.tail = 0; |
840 | 839 | ||
840 | init_waitqueue_head(&max->wq); | ||
841 | |||
841 | max->main_thread = kthread_run(max3110_main_thread, | 842 | max->main_thread = kthread_run(max3110_main_thread, |
842 | max, "max3110_main"); | 843 | max, "max3110_main"); |
843 | if (IS_ERR(max->main_thread)) { | 844 | if (IS_ERR(max->main_thread)) { |
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c index e35a17687c0..aa3cc465a60 100644 --- a/drivers/usb/core/driver.c +++ b/drivers/usb/core/driver.c | |||
@@ -375,7 +375,7 @@ static int usb_unbind_interface(struct device *dev) | |||
375 | * Just re-enable it without affecting the endpoint toggles. | 375 | * Just re-enable it without affecting the endpoint toggles. |
376 | */ | 376 | */ |
377 | usb_enable_interface(udev, intf, false); | 377 | usb_enable_interface(udev, intf, false); |
378 | } else if (!error && !intf->dev.power.in_suspend) { | 378 | } else if (!error && !intf->dev.power.is_prepared) { |
379 | r = usb_set_interface(udev, intf->altsetting[0]. | 379 | r = usb_set_interface(udev, intf->altsetting[0]. |
380 | desc.bInterfaceNumber, 0); | 380 | desc.bInterfaceNumber, 0); |
381 | if (r < 0) | 381 | if (r < 0) |
@@ -960,7 +960,7 @@ void usb_rebind_intf(struct usb_interface *intf) | |||
960 | } | 960 | } |
961 | 961 | ||
962 | /* Try to rebind the interface */ | 962 | /* Try to rebind the interface */ |
963 | if (!intf->dev.power.in_suspend) { | 963 | if (!intf->dev.power.is_prepared) { |
964 | intf->needs_binding = 0; | 964 | intf->needs_binding = 0; |
965 | rc = device_attach(&intf->dev); | 965 | rc = device_attach(&intf->dev); |
966 | if (rc < 0) | 966 | if (rc < 0) |
@@ -1107,7 +1107,7 @@ static int usb_resume_interface(struct usb_device *udev, | |||
1107 | if (intf->condition == USB_INTERFACE_UNBOUND) { | 1107 | if (intf->condition == USB_INTERFACE_UNBOUND) { |
1108 | 1108 | ||
1109 | /* Carry out a deferred switch to altsetting 0 */ | 1109 | /* Carry out a deferred switch to altsetting 0 */ |
1110 | if (intf->needs_altsetting0 && !intf->dev.power.in_suspend) { | 1110 | if (intf->needs_altsetting0 && !intf->dev.power.is_prepared) { |
1111 | usb_set_interface(udev, intf->altsetting[0]. | 1111 | usb_set_interface(udev, intf->altsetting[0]. |
1112 | desc.bInterfaceNumber, 0); | 1112 | desc.bInterfaceNumber, 0); |
1113 | intf->needs_altsetting0 = 0; | 1113 | intf->needs_altsetting0 = 0; |
diff --git a/fs/block_dev.c b/fs/block_dev.c index 1a2421f908f..610e8e0b04b 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c | |||
@@ -762,7 +762,19 @@ static struct block_device *bd_start_claiming(struct block_device *bdev, | |||
762 | if (!disk) | 762 | if (!disk) |
763 | return ERR_PTR(-ENXIO); | 763 | return ERR_PTR(-ENXIO); |
764 | 764 | ||
765 | whole = bdget_disk(disk, 0); | 765 | /* |
766 | * Normally, @bdev should equal what's returned from bdget_disk() | ||
767 | * if partno is 0; however, some drivers (floppy) use multiple | ||
768 | * bdev's for the same physical device and @bdev may be one of the | ||
769 | * aliases. Keep @bdev if partno is 0. This means claimer | ||
770 | * tracking is broken for those devices but it has always been that | ||
771 | * way. | ||
772 | */ | ||
773 | if (partno) | ||
774 | whole = bdget_disk(disk, 0); | ||
775 | else | ||
776 | whole = bdgrab(bdev); | ||
777 | |||
766 | module_put(disk->fops->owner); | 778 | module_put(disk->fops->owner); |
767 | put_disk(disk); | 779 | put_disk(disk); |
768 | if (!whole) | 780 | if (!whole) |
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 300628795fd..f30ac05dbda 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h | |||
@@ -19,7 +19,6 @@ | |||
19 | #ifndef __BTRFS_CTREE__ | 19 | #ifndef __BTRFS_CTREE__ |
20 | #define __BTRFS_CTREE__ | 20 | #define __BTRFS_CTREE__ |
21 | 21 | ||
22 | #include <linux/version.h> | ||
23 | #include <linux/mm.h> | 22 | #include <linux/mm.h> |
24 | #include <linux/highmem.h> | 23 | #include <linux/highmem.h> |
25 | #include <linux/fs.h> | 24 | #include <linux/fs.h> |
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c index f1cbd028f7b..98c68e658a9 100644 --- a/fs/btrfs/delayed-inode.c +++ b/fs/btrfs/delayed-inode.c | |||
@@ -82,19 +82,16 @@ static inline struct btrfs_delayed_root *btrfs_get_delayed_root( | |||
82 | return root->fs_info->delayed_root; | 82 | return root->fs_info->delayed_root; |
83 | } | 83 | } |
84 | 84 | ||
85 | static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node( | 85 | static struct btrfs_delayed_node *btrfs_get_delayed_node(struct inode *inode) |
86 | struct inode *inode) | ||
87 | { | 86 | { |
88 | struct btrfs_delayed_node *node; | ||
89 | struct btrfs_inode *btrfs_inode = BTRFS_I(inode); | 87 | struct btrfs_inode *btrfs_inode = BTRFS_I(inode); |
90 | struct btrfs_root *root = btrfs_inode->root; | 88 | struct btrfs_root *root = btrfs_inode->root; |
91 | u64 ino = btrfs_ino(inode); | 89 | u64 ino = btrfs_ino(inode); |
92 | int ret; | 90 | struct btrfs_delayed_node *node; |
93 | 91 | ||
94 | again: | ||
95 | node = ACCESS_ONCE(btrfs_inode->delayed_node); | 92 | node = ACCESS_ONCE(btrfs_inode->delayed_node); |
96 | if (node) { | 93 | if (node) { |
97 | atomic_inc(&node->refs); /* can be accessed */ | 94 | atomic_inc(&node->refs); |
98 | return node; | 95 | return node; |
99 | } | 96 | } |
100 | 97 | ||
@@ -102,8 +99,10 @@ again: | |||
102 | node = radix_tree_lookup(&root->delayed_nodes_tree, ino); | 99 | node = radix_tree_lookup(&root->delayed_nodes_tree, ino); |
103 | if (node) { | 100 | if (node) { |
104 | if (btrfs_inode->delayed_node) { | 101 | if (btrfs_inode->delayed_node) { |
102 | atomic_inc(&node->refs); /* can be accessed */ | ||
103 | BUG_ON(btrfs_inode->delayed_node != node); | ||
105 | spin_unlock(&root->inode_lock); | 104 | spin_unlock(&root->inode_lock); |
106 | goto again; | 105 | return node; |
107 | } | 106 | } |
108 | btrfs_inode->delayed_node = node; | 107 | btrfs_inode->delayed_node = node; |
109 | atomic_inc(&node->refs); /* can be accessed */ | 108 | atomic_inc(&node->refs); /* can be accessed */ |
@@ -113,6 +112,23 @@ again: | |||
113 | } | 112 | } |
114 | spin_unlock(&root->inode_lock); | 113 | spin_unlock(&root->inode_lock); |
115 | 114 | ||
115 | return NULL; | ||
116 | } | ||
117 | |||
118 | static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node( | ||
119 | struct inode *inode) | ||
120 | { | ||
121 | struct btrfs_delayed_node *node; | ||
122 | struct btrfs_inode *btrfs_inode = BTRFS_I(inode); | ||
123 | struct btrfs_root *root = btrfs_inode->root; | ||
124 | u64 ino = btrfs_ino(inode); | ||
125 | int ret; | ||
126 | |||
127 | again: | ||
128 | node = btrfs_get_delayed_node(inode); | ||
129 | if (node) | ||
130 | return node; | ||
131 | |||
116 | node = kmem_cache_alloc(delayed_node_cache, GFP_NOFS); | 132 | node = kmem_cache_alloc(delayed_node_cache, GFP_NOFS); |
117 | if (!node) | 133 | if (!node) |
118 | return ERR_PTR(-ENOMEM); | 134 | return ERR_PTR(-ENOMEM); |
@@ -548,19 +564,6 @@ struct btrfs_delayed_item *__btrfs_next_delayed_item( | |||
548 | return next; | 564 | return next; |
549 | } | 565 | } |
550 | 566 | ||
551 | static inline struct btrfs_delayed_node *btrfs_get_delayed_node( | ||
552 | struct inode *inode) | ||
553 | { | ||
554 | struct btrfs_inode *btrfs_inode = BTRFS_I(inode); | ||
555 | struct btrfs_delayed_node *delayed_node; | ||
556 | |||
557 | delayed_node = btrfs_inode->delayed_node; | ||
558 | if (delayed_node) | ||
559 | atomic_inc(&delayed_node->refs); | ||
560 | |||
561 | return delayed_node; | ||
562 | } | ||
563 | |||
564 | static inline struct btrfs_root *btrfs_get_fs_root(struct btrfs_root *root, | 567 | static inline struct btrfs_root *btrfs_get_fs_root(struct btrfs_root *root, |
565 | u64 root_id) | 568 | u64 root_id) |
566 | { | 569 | { |
@@ -1404,8 +1407,7 @@ end: | |||
1404 | 1407 | ||
1405 | int btrfs_inode_delayed_dir_index_count(struct inode *inode) | 1408 | int btrfs_inode_delayed_dir_index_count(struct inode *inode) |
1406 | { | 1409 | { |
1407 | struct btrfs_delayed_node *delayed_node = BTRFS_I(inode)->delayed_node; | 1410 | struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode); |
1408 | int ret = 0; | ||
1409 | 1411 | ||
1410 | if (!delayed_node) | 1412 | if (!delayed_node) |
1411 | return -ENOENT; | 1413 | return -ENOENT; |
@@ -1415,11 +1417,14 @@ int btrfs_inode_delayed_dir_index_count(struct inode *inode) | |||
1415 | * a new directory index is added into the delayed node and index_cnt | 1417 | * a new directory index is added into the delayed node and index_cnt |
1416 | * is updated now. So we needn't lock the delayed node. | 1418 | * is updated now. So we needn't lock the delayed node. |
1417 | */ | 1419 | */ |
1418 | if (!delayed_node->index_cnt) | 1420 | if (!delayed_node->index_cnt) { |
1421 | btrfs_release_delayed_node(delayed_node); | ||
1419 | return -EINVAL; | 1422 | return -EINVAL; |
1423 | } | ||
1420 | 1424 | ||
1421 | BTRFS_I(inode)->index_cnt = delayed_node->index_cnt; | 1425 | BTRFS_I(inode)->index_cnt = delayed_node->index_cnt; |
1422 | return ret; | 1426 | btrfs_release_delayed_node(delayed_node); |
1427 | return 0; | ||
1423 | } | 1428 | } |
1424 | 1429 | ||
1425 | void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list, | 1430 | void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list, |
@@ -1613,6 +1618,57 @@ static void fill_stack_inode_item(struct btrfs_trans_handle *trans, | |||
1613 | inode->i_ctime.tv_nsec); | 1618 | inode->i_ctime.tv_nsec); |
1614 | } | 1619 | } |
1615 | 1620 | ||
1621 | int btrfs_fill_inode(struct inode *inode, u32 *rdev) | ||
1622 | { | ||
1623 | struct btrfs_delayed_node *delayed_node; | ||
1624 | struct btrfs_inode_item *inode_item; | ||
1625 | struct btrfs_timespec *tspec; | ||
1626 | |||
1627 | delayed_node = btrfs_get_delayed_node(inode); | ||
1628 | if (!delayed_node) | ||
1629 | return -ENOENT; | ||
1630 | |||
1631 | mutex_lock(&delayed_node->mutex); | ||
1632 | if (!delayed_node->inode_dirty) { | ||
1633 | mutex_unlock(&delayed_node->mutex); | ||
1634 | btrfs_release_delayed_node(delayed_node); | ||
1635 | return -ENOENT; | ||
1636 | } | ||
1637 | |||
1638 | inode_item = &delayed_node->inode_item; | ||
1639 | |||
1640 | inode->i_uid = btrfs_stack_inode_uid(inode_item); | ||
1641 | inode->i_gid = btrfs_stack_inode_gid(inode_item); | ||
1642 | btrfs_i_size_write(inode, btrfs_stack_inode_size(inode_item)); | ||
1643 | inode->i_mode = btrfs_stack_inode_mode(inode_item); | ||
1644 | inode->i_nlink = btrfs_stack_inode_nlink(inode_item); | ||
1645 | inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item)); | ||
1646 | BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item); | ||
1647 | BTRFS_I(inode)->sequence = btrfs_stack_inode_sequence(inode_item); | ||
1648 | inode->i_rdev = 0; | ||
1649 | *rdev = btrfs_stack_inode_rdev(inode_item); | ||
1650 | BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item); | ||
1651 | |||
1652 | tspec = btrfs_inode_atime(inode_item); | ||
1653 | inode->i_atime.tv_sec = btrfs_stack_timespec_sec(tspec); | ||
1654 | inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(tspec); | ||
1655 | |||
1656 | tspec = btrfs_inode_mtime(inode_item); | ||
1657 | inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(tspec); | ||
1658 | inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(tspec); | ||
1659 | |||
1660 | tspec = btrfs_inode_ctime(inode_item); | ||
1661 | inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(tspec); | ||
1662 | inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(tspec); | ||
1663 | |||
1664 | inode->i_generation = BTRFS_I(inode)->generation; | ||
1665 | BTRFS_I(inode)->index_cnt = (u64)-1; | ||
1666 | |||
1667 | mutex_unlock(&delayed_node->mutex); | ||
1668 | btrfs_release_delayed_node(delayed_node); | ||
1669 | return 0; | ||
1670 | } | ||
1671 | |||
1616 | int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans, | 1672 | int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans, |
1617 | struct btrfs_root *root, struct inode *inode) | 1673 | struct btrfs_root *root, struct inode *inode) |
1618 | { | 1674 | { |
diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h index d1a6a2915c6..8d27af4bd8b 100644 --- a/fs/btrfs/delayed-inode.h +++ b/fs/btrfs/delayed-inode.h | |||
@@ -119,6 +119,7 @@ void btrfs_kill_delayed_inode_items(struct inode *inode); | |||
119 | 119 | ||
120 | int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans, | 120 | int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans, |
121 | struct btrfs_root *root, struct inode *inode); | 121 | struct btrfs_root *root, struct inode *inode); |
122 | int btrfs_fill_inode(struct inode *inode, u32 *rdev); | ||
122 | 123 | ||
123 | /* Used for drop dead root */ | 124 | /* Used for drop dead root */ |
124 | void btrfs_kill_all_delayed_nodes(struct btrfs_root *root); | 125 | void btrfs_kill_all_delayed_nodes(struct btrfs_root *root); |
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 1f61bf5b496..71cd456fdb6 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
@@ -4842,7 +4842,7 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans, | |||
4842 | u64 num_bytes, u64 empty_size, | 4842 | u64 num_bytes, u64 empty_size, |
4843 | u64 search_start, u64 search_end, | 4843 | u64 search_start, u64 search_end, |
4844 | u64 hint_byte, struct btrfs_key *ins, | 4844 | u64 hint_byte, struct btrfs_key *ins, |
4845 | int data) | 4845 | u64 data) |
4846 | { | 4846 | { |
4847 | int ret = 0; | 4847 | int ret = 0; |
4848 | struct btrfs_root *root = orig_root->fs_info->extent_root; | 4848 | struct btrfs_root *root = orig_root->fs_info->extent_root; |
@@ -4869,7 +4869,7 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans, | |||
4869 | 4869 | ||
4870 | space_info = __find_space_info(root->fs_info, data); | 4870 | space_info = __find_space_info(root->fs_info, data); |
4871 | if (!space_info) { | 4871 | if (!space_info) { |
4872 | printk(KERN_ERR "No space info for %d\n", data); | 4872 | printk(KERN_ERR "No space info for %llu\n", data); |
4873 | return -ENOSPC; | 4873 | return -ENOSPC; |
4874 | } | 4874 | } |
4875 | 4875 | ||
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 9f985a42987..bf0d61567f3 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c | |||
@@ -1893,9 +1893,12 @@ void __btrfs_remove_free_space_cache_locked(struct btrfs_free_space_ctl *ctl) | |||
1893 | 1893 | ||
1894 | while ((node = rb_last(&ctl->free_space_offset)) != NULL) { | 1894 | while ((node = rb_last(&ctl->free_space_offset)) != NULL) { |
1895 | info = rb_entry(node, struct btrfs_free_space, offset_index); | 1895 | info = rb_entry(node, struct btrfs_free_space, offset_index); |
1896 | unlink_free_space(ctl, info); | 1896 | if (!info->bitmap) { |
1897 | kfree(info->bitmap); | 1897 | unlink_free_space(ctl, info); |
1898 | kmem_cache_free(btrfs_free_space_cachep, info); | 1898 | kmem_cache_free(btrfs_free_space_cachep, info); |
1899 | } else { | ||
1900 | free_bitmap(ctl, info); | ||
1901 | } | ||
1899 | if (need_resched()) { | 1902 | if (need_resched()) { |
1900 | spin_unlock(&ctl->tree_lock); | 1903 | spin_unlock(&ctl->tree_lock); |
1901 | cond_resched(); | 1904 | cond_resched(); |
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 0a9b10c5b0a..d340f63d8f0 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
@@ -2509,6 +2509,11 @@ static void btrfs_read_locked_inode(struct inode *inode) | |||
2509 | int maybe_acls; | 2509 | int maybe_acls; |
2510 | u32 rdev; | 2510 | u32 rdev; |
2511 | int ret; | 2511 | int ret; |
2512 | bool filled = false; | ||
2513 | |||
2514 | ret = btrfs_fill_inode(inode, &rdev); | ||
2515 | if (!ret) | ||
2516 | filled = true; | ||
2512 | 2517 | ||
2513 | path = btrfs_alloc_path(); | 2518 | path = btrfs_alloc_path(); |
2514 | BUG_ON(!path); | 2519 | BUG_ON(!path); |
@@ -2520,6 +2525,10 @@ static void btrfs_read_locked_inode(struct inode *inode) | |||
2520 | goto make_bad; | 2525 | goto make_bad; |
2521 | 2526 | ||
2522 | leaf = path->nodes[0]; | 2527 | leaf = path->nodes[0]; |
2528 | |||
2529 | if (filled) | ||
2530 | goto cache_acl; | ||
2531 | |||
2523 | inode_item = btrfs_item_ptr(leaf, path->slots[0], | 2532 | inode_item = btrfs_item_ptr(leaf, path->slots[0], |
2524 | struct btrfs_inode_item); | 2533 | struct btrfs_inode_item); |
2525 | if (!leaf->map_token) | 2534 | if (!leaf->map_token) |
@@ -2556,7 +2565,7 @@ static void btrfs_read_locked_inode(struct inode *inode) | |||
2556 | 2565 | ||
2557 | BTRFS_I(inode)->index_cnt = (u64)-1; | 2566 | BTRFS_I(inode)->index_cnt = (u64)-1; |
2558 | BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item); | 2567 | BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item); |
2559 | 2568 | cache_acl: | |
2560 | /* | 2569 | /* |
2561 | * try to precache a NULL acl entry for files that don't have | 2570 | * try to precache a NULL acl entry for files that don't have |
2562 | * any xattrs or acls | 2571 | * any xattrs or acls |
@@ -2572,7 +2581,6 @@ static void btrfs_read_locked_inode(struct inode *inode) | |||
2572 | } | 2581 | } |
2573 | 2582 | ||
2574 | btrfs_free_path(path); | 2583 | btrfs_free_path(path); |
2575 | inode_item = NULL; | ||
2576 | 2584 | ||
2577 | switch (inode->i_mode & S_IFMT) { | 2585 | switch (inode->i_mode & S_IFMT) { |
2578 | case S_IFREG: | 2586 | case S_IFREG: |
@@ -4520,6 +4528,7 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, | |||
4520 | inode_tree_add(inode); | 4528 | inode_tree_add(inode); |
4521 | 4529 | ||
4522 | trace_btrfs_inode_new(inode); | 4530 | trace_btrfs_inode_new(inode); |
4531 | btrfs_set_inode_last_trans(trans, inode); | ||
4523 | 4532 | ||
4524 | return inode; | 4533 | return inode; |
4525 | fail: | 4534 | fail: |
diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig index 53ed1ad2c11..f66cc162515 100644 --- a/fs/cifs/Kconfig +++ b/fs/cifs/Kconfig | |||
@@ -156,6 +156,6 @@ config CIFS_ACL | |||
156 | 156 | ||
157 | config CIFS_NFSD_EXPORT | 157 | config CIFS_NFSD_EXPORT |
158 | bool "Allow nfsd to export CIFS file system (EXPERIMENTAL)" | 158 | bool "Allow nfsd to export CIFS file system (EXPERIMENTAL)" |
159 | depends on CIFS && EXPERIMENTAL | 159 | depends on CIFS && EXPERIMENTAL && BROKEN |
160 | help | 160 | help |
161 | Allows NFS server to export a CIFS mounted share (nfsd over cifs) | 161 | Allows NFS server to export a CIFS mounted share (nfsd over cifs) |
diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h index ffb1459dc6e..7260e11e21f 100644 --- a/fs/cifs/cifs_fs_sb.h +++ b/fs/cifs/cifs_fs_sb.h | |||
@@ -42,6 +42,7 @@ | |||
42 | #define CIFS_MOUNT_MULTIUSER 0x20000 /* multiuser mount */ | 42 | #define CIFS_MOUNT_MULTIUSER 0x20000 /* multiuser mount */ |
43 | #define CIFS_MOUNT_STRICT_IO 0x40000 /* strict cache mode */ | 43 | #define CIFS_MOUNT_STRICT_IO 0x40000 /* strict cache mode */ |
44 | #define CIFS_MOUNT_RWPIDFORWARD 0x80000 /* use pid forwarding for rw */ | 44 | #define CIFS_MOUNT_RWPIDFORWARD 0x80000 /* use pid forwarding for rw */ |
45 | #define CIFS_MOUNT_POSIXACL 0x100000 /* mirror of MS_POSIXACL in mnt_cifs_flags */ | ||
45 | 46 | ||
46 | struct cifs_sb_info { | 47 | struct cifs_sb_info { |
47 | struct rb_root tlink_tree; | 48 | struct rb_root tlink_tree; |
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 2f0c58646c1..35f9154615f 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c | |||
@@ -104,8 +104,7 @@ cifs_sb_deactive(struct super_block *sb) | |||
104 | } | 104 | } |
105 | 105 | ||
106 | static int | 106 | static int |
107 | cifs_read_super(struct super_block *sb, struct smb_vol *volume_info, | 107 | cifs_read_super(struct super_block *sb) |
108 | const char *devname, int silent) | ||
109 | { | 108 | { |
110 | struct inode *inode; | 109 | struct inode *inode; |
111 | struct cifs_sb_info *cifs_sb; | 110 | struct cifs_sb_info *cifs_sb; |
@@ -113,22 +112,16 @@ cifs_read_super(struct super_block *sb, struct smb_vol *volume_info, | |||
113 | 112 | ||
114 | cifs_sb = CIFS_SB(sb); | 113 | cifs_sb = CIFS_SB(sb); |
115 | 114 | ||
116 | spin_lock_init(&cifs_sb->tlink_tree_lock); | 115 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL) |
117 | cifs_sb->tlink_tree = RB_ROOT; | 116 | sb->s_flags |= MS_POSIXACL; |
118 | 117 | ||
119 | rc = bdi_setup_and_register(&cifs_sb->bdi, "cifs", BDI_CAP_MAP_COPY); | 118 | if (cifs_sb_master_tcon(cifs_sb)->ses->capabilities & CAP_LARGE_FILES) |
120 | if (rc) | 119 | sb->s_maxbytes = MAX_LFS_FILESIZE; |
121 | return rc; | 120 | else |
122 | 121 | sb->s_maxbytes = MAX_NON_LFS; | |
123 | cifs_sb->bdi.ra_pages = default_backing_dev_info.ra_pages; | ||
124 | 122 | ||
125 | rc = cifs_mount(sb, cifs_sb, volume_info, devname); | 123 | /* BB FIXME fix time_gran to be larger for LANMAN sessions */ |
126 | 124 | sb->s_time_gran = 100; | |
127 | if (rc) { | ||
128 | if (!silent) | ||
129 | cERROR(1, "cifs_mount failed w/return code = %d", rc); | ||
130 | goto out_mount_failed; | ||
131 | } | ||
132 | 125 | ||
133 | sb->s_magic = CIFS_MAGIC_NUMBER; | 126 | sb->s_magic = CIFS_MAGIC_NUMBER; |
134 | sb->s_op = &cifs_super_ops; | 127 | sb->s_op = &cifs_super_ops; |
@@ -170,37 +163,14 @@ out_no_root: | |||
170 | if (inode) | 163 | if (inode) |
171 | iput(inode); | 164 | iput(inode); |
172 | 165 | ||
173 | cifs_umount(sb, cifs_sb); | ||
174 | |||
175 | out_mount_failed: | ||
176 | bdi_destroy(&cifs_sb->bdi); | ||
177 | return rc; | 166 | return rc; |
178 | } | 167 | } |
179 | 168 | ||
180 | static void | 169 | static void cifs_kill_sb(struct super_block *sb) |
181 | cifs_put_super(struct super_block *sb) | ||
182 | { | 170 | { |
183 | int rc = 0; | 171 | struct cifs_sb_info *cifs_sb = CIFS_SB(sb); |
184 | struct cifs_sb_info *cifs_sb; | 172 | kill_anon_super(sb); |
185 | 173 | cifs_umount(cifs_sb); | |
186 | cFYI(1, "In cifs_put_super"); | ||
187 | cifs_sb = CIFS_SB(sb); | ||
188 | if (cifs_sb == NULL) { | ||
189 | cFYI(1, "Empty cifs superblock info passed to unmount"); | ||
190 | return; | ||
191 | } | ||
192 | |||
193 | rc = cifs_umount(sb, cifs_sb); | ||
194 | if (rc) | ||
195 | cERROR(1, "cifs_umount failed with return code %d", rc); | ||
196 | if (cifs_sb->mountdata) { | ||
197 | kfree(cifs_sb->mountdata); | ||
198 | cifs_sb->mountdata = NULL; | ||
199 | } | ||
200 | |||
201 | unload_nls(cifs_sb->local_nls); | ||
202 | bdi_destroy(&cifs_sb->bdi); | ||
203 | kfree(cifs_sb); | ||
204 | } | 174 | } |
205 | 175 | ||
206 | static int | 176 | static int |
@@ -548,7 +518,6 @@ static int cifs_drop_inode(struct inode *inode) | |||
548 | } | 518 | } |
549 | 519 | ||
550 | static const struct super_operations cifs_super_ops = { | 520 | static const struct super_operations cifs_super_ops = { |
551 | .put_super = cifs_put_super, | ||
552 | .statfs = cifs_statfs, | 521 | .statfs = cifs_statfs, |
553 | .alloc_inode = cifs_alloc_inode, | 522 | .alloc_inode = cifs_alloc_inode, |
554 | .destroy_inode = cifs_destroy_inode, | 523 | .destroy_inode = cifs_destroy_inode, |
@@ -585,7 +554,7 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb) | |||
585 | full_path = cifs_build_path_to_root(vol, cifs_sb, | 554 | full_path = cifs_build_path_to_root(vol, cifs_sb, |
586 | cifs_sb_master_tcon(cifs_sb)); | 555 | cifs_sb_master_tcon(cifs_sb)); |
587 | if (full_path == NULL) | 556 | if (full_path == NULL) |
588 | return NULL; | 557 | return ERR_PTR(-ENOMEM); |
589 | 558 | ||
590 | cFYI(1, "Get root dentry for %s", full_path); | 559 | cFYI(1, "Get root dentry for %s", full_path); |
591 | 560 | ||
@@ -614,7 +583,7 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb) | |||
614 | dchild = d_alloc(dparent, &name); | 583 | dchild = d_alloc(dparent, &name); |
615 | if (dchild == NULL) { | 584 | if (dchild == NULL) { |
616 | dput(dparent); | 585 | dput(dparent); |
617 | dparent = NULL; | 586 | dparent = ERR_PTR(-ENOMEM); |
618 | goto out; | 587 | goto out; |
619 | } | 588 | } |
620 | } | 589 | } |
@@ -632,7 +601,7 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb) | |||
632 | if (rc) { | 601 | if (rc) { |
633 | dput(dchild); | 602 | dput(dchild); |
634 | dput(dparent); | 603 | dput(dparent); |
635 | dparent = NULL; | 604 | dparent = ERR_PTR(rc); |
636 | goto out; | 605 | goto out; |
637 | } | 606 | } |
638 | alias = d_materialise_unique(dchild, inode); | 607 | alias = d_materialise_unique(dchild, inode); |
@@ -640,7 +609,7 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb) | |||
640 | dput(dchild); | 609 | dput(dchild); |
641 | if (IS_ERR(alias)) { | 610 | if (IS_ERR(alias)) { |
642 | dput(dparent); | 611 | dput(dparent); |
643 | dparent = NULL; | 612 | dparent = ERR_PTR(-EINVAL); /* XXX */ |
644 | goto out; | 613 | goto out; |
645 | } | 614 | } |
646 | dchild = alias; | 615 | dchild = alias; |
@@ -660,6 +629,13 @@ out: | |||
660 | return dparent; | 629 | return dparent; |
661 | } | 630 | } |
662 | 631 | ||
632 | static int cifs_set_super(struct super_block *sb, void *data) | ||
633 | { | ||
634 | struct cifs_mnt_data *mnt_data = data; | ||
635 | sb->s_fs_info = mnt_data->cifs_sb; | ||
636 | return set_anon_super(sb, NULL); | ||
637 | } | ||
638 | |||
663 | static struct dentry * | 639 | static struct dentry * |
664 | cifs_do_mount(struct file_system_type *fs_type, | 640 | cifs_do_mount(struct file_system_type *fs_type, |
665 | int flags, const char *dev_name, void *data) | 641 | int flags, const char *dev_name, void *data) |
@@ -680,75 +656,73 @@ cifs_do_mount(struct file_system_type *fs_type, | |||
680 | cifs_sb = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL); | 656 | cifs_sb = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL); |
681 | if (cifs_sb == NULL) { | 657 | if (cifs_sb == NULL) { |
682 | root = ERR_PTR(-ENOMEM); | 658 | root = ERR_PTR(-ENOMEM); |
683 | goto out; | 659 | goto out_nls; |
660 | } | ||
661 | |||
662 | cifs_sb->mountdata = kstrndup(data, PAGE_SIZE, GFP_KERNEL); | ||
663 | if (cifs_sb->mountdata == NULL) { | ||
664 | root = ERR_PTR(-ENOMEM); | ||
665 | goto out_cifs_sb; | ||
684 | } | 666 | } |
685 | 667 | ||
686 | cifs_setup_cifs_sb(volume_info, cifs_sb); | 668 | cifs_setup_cifs_sb(volume_info, cifs_sb); |
687 | 669 | ||
670 | rc = cifs_mount(cifs_sb, volume_info); | ||
671 | if (rc) { | ||
672 | if (!(flags & MS_SILENT)) | ||
673 | cERROR(1, "cifs_mount failed w/return code = %d", rc); | ||
674 | root = ERR_PTR(rc); | ||
675 | goto out_mountdata; | ||
676 | } | ||
677 | |||
688 | mnt_data.vol = volume_info; | 678 | mnt_data.vol = volume_info; |
689 | mnt_data.cifs_sb = cifs_sb; | 679 | mnt_data.cifs_sb = cifs_sb; |
690 | mnt_data.flags = flags; | 680 | mnt_data.flags = flags; |
691 | 681 | ||
692 | sb = sget(fs_type, cifs_match_super, set_anon_super, &mnt_data); | 682 | sb = sget(fs_type, cifs_match_super, cifs_set_super, &mnt_data); |
693 | if (IS_ERR(sb)) { | 683 | if (IS_ERR(sb)) { |
694 | root = ERR_CAST(sb); | 684 | root = ERR_CAST(sb); |
695 | goto out_cifs_sb; | 685 | cifs_umount(cifs_sb); |
686 | goto out; | ||
696 | } | 687 | } |
697 | 688 | ||
698 | if (sb->s_fs_info) { | 689 | if (sb->s_root) { |
699 | cFYI(1, "Use existing superblock"); | 690 | cFYI(1, "Use existing superblock"); |
700 | goto out_shared; | 691 | cifs_umount(cifs_sb); |
701 | } | 692 | } else { |
702 | 693 | sb->s_flags = flags; | |
703 | /* | 694 | /* BB should we make this contingent on mount parm? */ |
704 | * Copy mount params for use in submounts. Better to do | 695 | sb->s_flags |= MS_NODIRATIME | MS_NOATIME; |
705 | * the copy here and deal with the error before cleanup gets | 696 | |
706 | * complicated post-mount. | 697 | rc = cifs_read_super(sb); |
707 | */ | 698 | if (rc) { |
708 | cifs_sb->mountdata = kstrndup(data, PAGE_SIZE, GFP_KERNEL); | 699 | root = ERR_PTR(rc); |
709 | if (cifs_sb->mountdata == NULL) { | 700 | goto out_super; |
710 | root = ERR_PTR(-ENOMEM); | 701 | } |
711 | goto out_super; | ||
712 | } | ||
713 | |||
714 | sb->s_flags = flags; | ||
715 | /* BB should we make this contingent on mount parm? */ | ||
716 | sb->s_flags |= MS_NODIRATIME | MS_NOATIME; | ||
717 | sb->s_fs_info = cifs_sb; | ||
718 | 702 | ||
719 | rc = cifs_read_super(sb, volume_info, dev_name, | 703 | sb->s_flags |= MS_ACTIVE; |
720 | flags & MS_SILENT ? 1 : 0); | ||
721 | if (rc) { | ||
722 | root = ERR_PTR(rc); | ||
723 | goto out_super; | ||
724 | } | 704 | } |
725 | 705 | ||
726 | sb->s_flags |= MS_ACTIVE; | ||
727 | |||
728 | root = cifs_get_root(volume_info, sb); | 706 | root = cifs_get_root(volume_info, sb); |
729 | if (root == NULL) | 707 | if (IS_ERR(root)) |
730 | goto out_super; | 708 | goto out_super; |
731 | 709 | ||
732 | cFYI(1, "dentry root is: %p", root); | 710 | cFYI(1, "dentry root is: %p", root); |
733 | goto out; | 711 | goto out; |
734 | 712 | ||
735 | out_shared: | ||
736 | root = cifs_get_root(volume_info, sb); | ||
737 | if (root) | ||
738 | cFYI(1, "dentry root is: %p", root); | ||
739 | goto out; | ||
740 | |||
741 | out_super: | 713 | out_super: |
742 | kfree(cifs_sb->mountdata); | ||
743 | deactivate_locked_super(sb); | 714 | deactivate_locked_super(sb); |
744 | |||
745 | out_cifs_sb: | ||
746 | unload_nls(cifs_sb->local_nls); | ||
747 | kfree(cifs_sb); | ||
748 | |||
749 | out: | 715 | out: |
750 | cifs_cleanup_volume_info(&volume_info); | 716 | cifs_cleanup_volume_info(&volume_info); |
751 | return root; | 717 | return root; |
718 | |||
719 | out_mountdata: | ||
720 | kfree(cifs_sb->mountdata); | ||
721 | out_cifs_sb: | ||
722 | kfree(cifs_sb); | ||
723 | out_nls: | ||
724 | unload_nls(volume_info->local_nls); | ||
725 | goto out; | ||
752 | } | 726 | } |
753 | 727 | ||
754 | static ssize_t cifs_file_aio_write(struct kiocb *iocb, const struct iovec *iov, | 728 | static ssize_t cifs_file_aio_write(struct kiocb *iocb, const struct iovec *iov, |
@@ -837,7 +811,7 @@ struct file_system_type cifs_fs_type = { | |||
837 | .owner = THIS_MODULE, | 811 | .owner = THIS_MODULE, |
838 | .name = "cifs", | 812 | .name = "cifs", |
839 | .mount = cifs_do_mount, | 813 | .mount = cifs_do_mount, |
840 | .kill_sb = kill_anon_super, | 814 | .kill_sb = cifs_kill_sb, |
841 | /* .fs_flags */ | 815 | /* .fs_flags */ |
842 | }; | 816 | }; |
843 | const struct inode_operations cifs_dir_inode_ops = { | 817 | const struct inode_operations cifs_dir_inode_ops = { |
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index 953f84413c7..257f312ede4 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h | |||
@@ -157,9 +157,8 @@ extern int cifs_match_super(struct super_block *, void *); | |||
157 | extern void cifs_cleanup_volume_info(struct smb_vol **pvolume_info); | 157 | extern void cifs_cleanup_volume_info(struct smb_vol **pvolume_info); |
158 | extern int cifs_setup_volume_info(struct smb_vol **pvolume_info, | 158 | extern int cifs_setup_volume_info(struct smb_vol **pvolume_info, |
159 | char *mount_data, const char *devname); | 159 | char *mount_data, const char *devname); |
160 | extern int cifs_mount(struct super_block *, struct cifs_sb_info *, | 160 | extern int cifs_mount(struct cifs_sb_info *, struct smb_vol *); |
161 | struct smb_vol *, const char *); | 161 | extern void cifs_umount(struct cifs_sb_info *); |
162 | extern int cifs_umount(struct super_block *, struct cifs_sb_info *); | ||
163 | extern void cifs_dfs_release_automount_timer(void); | 162 | extern void cifs_dfs_release_automount_timer(void); |
164 | void cifs_proc_init(void); | 163 | void cifs_proc_init(void); |
165 | void cifs_proc_clean(void); | 164 | void cifs_proc_clean(void); |
@@ -218,7 +217,8 @@ extern int get_dfs_path(int xid, struct cifs_ses *pSesInfo, | |||
218 | struct dfs_info3_param **preferrals, | 217 | struct dfs_info3_param **preferrals, |
219 | int remap); | 218 | int remap); |
220 | extern void reset_cifs_unix_caps(int xid, struct cifs_tcon *tcon, | 219 | extern void reset_cifs_unix_caps(int xid, struct cifs_tcon *tcon, |
221 | struct super_block *sb, struct smb_vol *vol); | 220 | struct cifs_sb_info *cifs_sb, |
221 | struct smb_vol *vol); | ||
222 | extern int CIFSSMBQFSInfo(const int xid, struct cifs_tcon *tcon, | 222 | extern int CIFSSMBQFSInfo(const int xid, struct cifs_tcon *tcon, |
223 | struct kstatfs *FSData); | 223 | struct kstatfs *FSData); |
224 | extern int SMBOldQFSInfo(const int xid, struct cifs_tcon *tcon, | 224 | extern int SMBOldQFSInfo(const int xid, struct cifs_tcon *tcon, |
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 12cf72dd0c4..7f540df5252 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c | |||
@@ -2546,7 +2546,7 @@ ip_connect(struct TCP_Server_Info *server) | |||
2546 | } | 2546 | } |
2547 | 2547 | ||
2548 | void reset_cifs_unix_caps(int xid, struct cifs_tcon *tcon, | 2548 | void reset_cifs_unix_caps(int xid, struct cifs_tcon *tcon, |
2549 | struct super_block *sb, struct smb_vol *vol_info) | 2549 | struct cifs_sb_info *cifs_sb, struct smb_vol *vol_info) |
2550 | { | 2550 | { |
2551 | /* if we are reconnecting then should we check to see if | 2551 | /* if we are reconnecting then should we check to see if |
2552 | * any requested capabilities changed locally e.g. via | 2552 | * any requested capabilities changed locally e.g. via |
@@ -2600,22 +2600,23 @@ void reset_cifs_unix_caps(int xid, struct cifs_tcon *tcon, | |||
2600 | cap &= ~CIFS_UNIX_POSIX_ACL_CAP; | 2600 | cap &= ~CIFS_UNIX_POSIX_ACL_CAP; |
2601 | else if (CIFS_UNIX_POSIX_ACL_CAP & cap) { | 2601 | else if (CIFS_UNIX_POSIX_ACL_CAP & cap) { |
2602 | cFYI(1, "negotiated posix acl support"); | 2602 | cFYI(1, "negotiated posix acl support"); |
2603 | if (sb) | 2603 | if (cifs_sb) |
2604 | sb->s_flags |= MS_POSIXACL; | 2604 | cifs_sb->mnt_cifs_flags |= |
2605 | CIFS_MOUNT_POSIXACL; | ||
2605 | } | 2606 | } |
2606 | 2607 | ||
2607 | if (vol_info && vol_info->posix_paths == 0) | 2608 | if (vol_info && vol_info->posix_paths == 0) |
2608 | cap &= ~CIFS_UNIX_POSIX_PATHNAMES_CAP; | 2609 | cap &= ~CIFS_UNIX_POSIX_PATHNAMES_CAP; |
2609 | else if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) { | 2610 | else if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) { |
2610 | cFYI(1, "negotiate posix pathnames"); | 2611 | cFYI(1, "negotiate posix pathnames"); |
2611 | if (sb) | 2612 | if (cifs_sb) |
2612 | CIFS_SB(sb)->mnt_cifs_flags |= | 2613 | cifs_sb->mnt_cifs_flags |= |
2613 | CIFS_MOUNT_POSIX_PATHS; | 2614 | CIFS_MOUNT_POSIX_PATHS; |
2614 | } | 2615 | } |
2615 | 2616 | ||
2616 | if (sb && (CIFS_SB(sb)->rsize > 127 * 1024)) { | 2617 | if (cifs_sb && (cifs_sb->rsize > 127 * 1024)) { |
2617 | if ((cap & CIFS_UNIX_LARGE_READ_CAP) == 0) { | 2618 | if ((cap & CIFS_UNIX_LARGE_READ_CAP) == 0) { |
2618 | CIFS_SB(sb)->rsize = 127 * 1024; | 2619 | cifs_sb->rsize = 127 * 1024; |
2619 | cFYI(DBG2, "larger reads not supported by srv"); | 2620 | cFYI(DBG2, "larger reads not supported by srv"); |
2620 | } | 2621 | } |
2621 | } | 2622 | } |
@@ -2662,6 +2663,9 @@ void cifs_setup_cifs_sb(struct smb_vol *pvolume_info, | |||
2662 | { | 2663 | { |
2663 | INIT_DELAYED_WORK(&cifs_sb->prune_tlinks, cifs_prune_tlinks); | 2664 | INIT_DELAYED_WORK(&cifs_sb->prune_tlinks, cifs_prune_tlinks); |
2664 | 2665 | ||
2666 | spin_lock_init(&cifs_sb->tlink_tree_lock); | ||
2667 | cifs_sb->tlink_tree = RB_ROOT; | ||
2668 | |||
2665 | if (pvolume_info->rsize > CIFSMaxBufSize) { | 2669 | if (pvolume_info->rsize > CIFSMaxBufSize) { |
2666 | cERROR(1, "rsize %d too large, using MaxBufSize", | 2670 | cERROR(1, "rsize %d too large, using MaxBufSize", |
2667 | pvolume_info->rsize); | 2671 | pvolume_info->rsize); |
@@ -2750,21 +2754,21 @@ void cifs_setup_cifs_sb(struct smb_vol *pvolume_info, | |||
2750 | 2754 | ||
2751 | /* | 2755 | /* |
2752 | * When the server supports very large writes via POSIX extensions, we can | 2756 | * When the server supports very large writes via POSIX extensions, we can |
2753 | * allow up to 2^24 - PAGE_CACHE_SIZE. | 2757 | * allow up to 2^24-1, minus the size of a WRITE_AND_X header, not including |
2758 | * the RFC1001 length. | ||
2754 | * | 2759 | * |
2755 | * Note that this might make for "interesting" allocation problems during | 2760 | * Note that this might make for "interesting" allocation problems during |
2756 | * writeback however (as we have to allocate an array of pointers for the | 2761 | * writeback however as we have to allocate an array of pointers for the |
2757 | * pages). A 16M write means ~32kb page array with PAGE_CACHE_SIZE == 4096. | 2762 | * pages. A 16M write means ~32kb page array with PAGE_CACHE_SIZE == 4096. |
2758 | */ | 2763 | */ |
2759 | #define CIFS_MAX_WSIZE ((1<<24) - PAGE_CACHE_SIZE) | 2764 | #define CIFS_MAX_WSIZE ((1<<24) - 1 - sizeof(WRITE_REQ) + 4) |
2760 | 2765 | ||
2761 | /* | 2766 | /* |
2762 | * When the server doesn't allow large posix writes, default to a wsize of | 2767 | * When the server doesn't allow large posix writes, only allow a wsize of |
2763 | * 128k - PAGE_CACHE_SIZE -- one page less than the largest frame size | 2768 | * 128k minus the size of the WRITE_AND_X header. That allows for a write up |
2764 | * described in RFC1001. This allows space for the header without going over | 2769 | * to the maximum size described by RFC1002. |
2765 | * that by default. | ||
2766 | */ | 2770 | */ |
2767 | #define CIFS_MAX_RFC1001_WSIZE (128 * 1024 - PAGE_CACHE_SIZE) | 2771 | #define CIFS_MAX_RFC1002_WSIZE (128 * 1024 - sizeof(WRITE_REQ) + 4) |
2768 | 2772 | ||
2769 | /* | 2773 | /* |
2770 | * The default wsize is 1M. find_get_pages seems to return a maximum of 256 | 2774 | * The default wsize is 1M. find_get_pages seems to return a maximum of 256 |
@@ -2783,11 +2787,18 @@ cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info) | |||
2783 | 2787 | ||
2784 | /* can server support 24-bit write sizes? (via UNIX extensions) */ | 2788 | /* can server support 24-bit write sizes? (via UNIX extensions) */ |
2785 | if (!tcon->unix_ext || !(unix_cap & CIFS_UNIX_LARGE_WRITE_CAP)) | 2789 | if (!tcon->unix_ext || !(unix_cap & CIFS_UNIX_LARGE_WRITE_CAP)) |
2786 | wsize = min_t(unsigned int, wsize, CIFS_MAX_RFC1001_WSIZE); | 2790 | wsize = min_t(unsigned int, wsize, CIFS_MAX_RFC1002_WSIZE); |
2787 | 2791 | ||
2788 | /* no CAP_LARGE_WRITE_X? Limit it to 16 bits */ | 2792 | /* |
2789 | if (!(server->capabilities & CAP_LARGE_WRITE_X)) | 2793 | * no CAP_LARGE_WRITE_X or is signing enabled without CAP_UNIX set? |
2790 | wsize = min_t(unsigned int, wsize, USHRT_MAX); | 2794 | * Limit it to max buffer offered by the server, minus the size of the |
2795 | * WRITEX header, not including the 4 byte RFC1001 length. | ||
2796 | */ | ||
2797 | if (!(server->capabilities & CAP_LARGE_WRITE_X) || | ||
2798 | (!(server->capabilities & CAP_UNIX) && | ||
2799 | (server->sec_mode & (SECMODE_SIGN_ENABLED|SECMODE_SIGN_REQUIRED)))) | ||
2800 | wsize = min_t(unsigned int, wsize, | ||
2801 | server->maxBuf - sizeof(WRITE_REQ) + 4); | ||
2791 | 2802 | ||
2792 | /* hard limit of CIFS_MAX_WSIZE */ | 2803 | /* hard limit of CIFS_MAX_WSIZE */ |
2793 | wsize = min_t(unsigned int, wsize, CIFS_MAX_WSIZE); | 2804 | wsize = min_t(unsigned int, wsize, CIFS_MAX_WSIZE); |
@@ -2937,7 +2948,11 @@ int cifs_setup_volume_info(struct smb_vol **pvolume_info, char *mount_data, | |||
2937 | 2948 | ||
2938 | if (volume_info->nullauth) { | 2949 | if (volume_info->nullauth) { |
2939 | cFYI(1, "null user"); | 2950 | cFYI(1, "null user"); |
2940 | volume_info->username = ""; | 2951 | volume_info->username = kzalloc(1, GFP_KERNEL); |
2952 | if (volume_info->username == NULL) { | ||
2953 | rc = -ENOMEM; | ||
2954 | goto out; | ||
2955 | } | ||
2941 | } else if (volume_info->username) { | 2956 | } else if (volume_info->username) { |
2942 | /* BB fixme parse for domain name here */ | 2957 | /* BB fixme parse for domain name here */ |
2943 | cFYI(1, "Username: %s", volume_info->username); | 2958 | cFYI(1, "Username: %s", volume_info->username); |
@@ -2971,8 +2986,7 @@ out: | |||
2971 | } | 2986 | } |
2972 | 2987 | ||
2973 | int | 2988 | int |
2974 | cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb, | 2989 | cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *volume_info) |
2975 | struct smb_vol *volume_info, const char *devname) | ||
2976 | { | 2990 | { |
2977 | int rc = 0; | 2991 | int rc = 0; |
2978 | int xid; | 2992 | int xid; |
@@ -2983,6 +2997,13 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb, | |||
2983 | struct tcon_link *tlink; | 2997 | struct tcon_link *tlink; |
2984 | #ifdef CONFIG_CIFS_DFS_UPCALL | 2998 | #ifdef CONFIG_CIFS_DFS_UPCALL |
2985 | int referral_walks_count = 0; | 2999 | int referral_walks_count = 0; |
3000 | |||
3001 | rc = bdi_setup_and_register(&cifs_sb->bdi, "cifs", BDI_CAP_MAP_COPY); | ||
3002 | if (rc) | ||
3003 | return rc; | ||
3004 | |||
3005 | cifs_sb->bdi.ra_pages = default_backing_dev_info.ra_pages; | ||
3006 | |||
2986 | try_mount_again: | 3007 | try_mount_again: |
2987 | /* cleanup activities if we're chasing a referral */ | 3008 | /* cleanup activities if we're chasing a referral */ |
2988 | if (referral_walks_count) { | 3009 | if (referral_walks_count) { |
@@ -3007,6 +3028,7 @@ try_mount_again: | |||
3007 | srvTcp = cifs_get_tcp_session(volume_info); | 3028 | srvTcp = cifs_get_tcp_session(volume_info); |
3008 | if (IS_ERR(srvTcp)) { | 3029 | if (IS_ERR(srvTcp)) { |
3009 | rc = PTR_ERR(srvTcp); | 3030 | rc = PTR_ERR(srvTcp); |
3031 | bdi_destroy(&cifs_sb->bdi); | ||
3010 | goto out; | 3032 | goto out; |
3011 | } | 3033 | } |
3012 | 3034 | ||
@@ -3018,14 +3040,6 @@ try_mount_again: | |||
3018 | goto mount_fail_check; | 3040 | goto mount_fail_check; |
3019 | } | 3041 | } |
3020 | 3042 | ||
3021 | if (pSesInfo->capabilities & CAP_LARGE_FILES) | ||
3022 | sb->s_maxbytes = MAX_LFS_FILESIZE; | ||
3023 | else | ||
3024 | sb->s_maxbytes = MAX_NON_LFS; | ||
3025 | |||
3026 | /* BB FIXME fix time_gran to be larger for LANMAN sessions */ | ||
3027 | sb->s_time_gran = 100; | ||
3028 | |||
3029 | /* search for existing tcon to this server share */ | 3043 | /* search for existing tcon to this server share */ |
3030 | tcon = cifs_get_tcon(pSesInfo, volume_info); | 3044 | tcon = cifs_get_tcon(pSesInfo, volume_info); |
3031 | if (IS_ERR(tcon)) { | 3045 | if (IS_ERR(tcon)) { |
@@ -3038,7 +3052,7 @@ try_mount_again: | |||
3038 | if (tcon->ses->capabilities & CAP_UNIX) { | 3052 | if (tcon->ses->capabilities & CAP_UNIX) { |
3039 | /* reset of caps checks mount to see if unix extensions | 3053 | /* reset of caps checks mount to see if unix extensions |
3040 | disabled for just this mount */ | 3054 | disabled for just this mount */ |
3041 | reset_cifs_unix_caps(xid, tcon, sb, volume_info); | 3055 | reset_cifs_unix_caps(xid, tcon, cifs_sb, volume_info); |
3042 | if ((tcon->ses->server->tcpStatus == CifsNeedReconnect) && | 3056 | if ((tcon->ses->server->tcpStatus == CifsNeedReconnect) && |
3043 | (le64_to_cpu(tcon->fsUnixInfo.Capability) & | 3057 | (le64_to_cpu(tcon->fsUnixInfo.Capability) & |
3044 | CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP)) { | 3058 | CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP)) { |
@@ -3161,6 +3175,7 @@ mount_fail_check: | |||
3161 | cifs_put_smb_ses(pSesInfo); | 3175 | cifs_put_smb_ses(pSesInfo); |
3162 | else | 3176 | else |
3163 | cifs_put_tcp_session(srvTcp); | 3177 | cifs_put_tcp_session(srvTcp); |
3178 | bdi_destroy(&cifs_sb->bdi); | ||
3164 | goto out; | 3179 | goto out; |
3165 | } | 3180 | } |
3166 | 3181 | ||
@@ -3335,8 +3350,8 @@ CIFSTCon(unsigned int xid, struct cifs_ses *ses, | |||
3335 | return rc; | 3350 | return rc; |
3336 | } | 3351 | } |
3337 | 3352 | ||
3338 | int | 3353 | void |
3339 | cifs_umount(struct super_block *sb, struct cifs_sb_info *cifs_sb) | 3354 | cifs_umount(struct cifs_sb_info *cifs_sb) |
3340 | { | 3355 | { |
3341 | struct rb_root *root = &cifs_sb->tlink_tree; | 3356 | struct rb_root *root = &cifs_sb->tlink_tree; |
3342 | struct rb_node *node; | 3357 | struct rb_node *node; |
@@ -3357,7 +3372,10 @@ cifs_umount(struct super_block *sb, struct cifs_sb_info *cifs_sb) | |||
3357 | } | 3372 | } |
3358 | spin_unlock(&cifs_sb->tlink_tree_lock); | 3373 | spin_unlock(&cifs_sb->tlink_tree_lock); |
3359 | 3374 | ||
3360 | return 0; | 3375 | bdi_destroy(&cifs_sb->bdi); |
3376 | kfree(cifs_sb->mountdata); | ||
3377 | unload_nls(cifs_sb->local_nls); | ||
3378 | kfree(cifs_sb); | ||
3361 | } | 3379 | } |
3362 | 3380 | ||
3363 | int cifs_negotiate_protocol(unsigned int xid, struct cifs_ses *ses) | 3381 | int cifs_negotiate_protocol(unsigned int xid, struct cifs_ses *ses) |
diff --git a/fs/cifs/smbencrypt.c b/fs/cifs/smbencrypt.c index 1525d5e662b..1c5b770c314 100644 --- a/fs/cifs/smbencrypt.c +++ b/fs/cifs/smbencrypt.c | |||
@@ -90,12 +90,10 @@ smbhash(unsigned char *out, const unsigned char *in, unsigned char *key) | |||
90 | sg_init_one(&sgout, out, 8); | 90 | sg_init_one(&sgout, out, 8); |
91 | 91 | ||
92 | rc = crypto_blkcipher_encrypt(&desc, &sgout, &sgin, 8); | 92 | rc = crypto_blkcipher_encrypt(&desc, &sgout, &sgin, 8); |
93 | if (rc) { | 93 | if (rc) |
94 | cERROR(1, "could not encrypt crypt key rc: %d\n", rc); | 94 | cERROR(1, "could not encrypt crypt key rc: %d\n", rc); |
95 | crypto_free_blkcipher(tfm_des); | ||
96 | goto smbhash_err; | ||
97 | } | ||
98 | 95 | ||
96 | crypto_free_blkcipher(tfm_des); | ||
99 | smbhash_err: | 97 | smbhash_err: |
100 | return rc; | 98 | return rc; |
101 | } | 99 | } |
diff --git a/fs/inode.c b/fs/inode.c index 0f7e88a7803..43566d17d1b 100644 --- a/fs/inode.c +++ b/fs/inode.c | |||
@@ -423,7 +423,14 @@ EXPORT_SYMBOL(remove_inode_hash); | |||
423 | void end_writeback(struct inode *inode) | 423 | void end_writeback(struct inode *inode) |
424 | { | 424 | { |
425 | might_sleep(); | 425 | might_sleep(); |
426 | /* | ||
427 | * We have to cycle tree_lock here because reclaim can be still in the | ||
428 | * process of removing the last page (in __delete_from_page_cache()) | ||
429 | * and we must not free mapping under it. | ||
430 | */ | ||
431 | spin_lock_irq(&inode->i_data.tree_lock); | ||
426 | BUG_ON(inode->i_data.nrpages); | 432 | BUG_ON(inode->i_data.nrpages); |
433 | spin_unlock_irq(&inode->i_data.tree_lock); | ||
427 | BUG_ON(!list_empty(&inode->i_data.private_list)); | 434 | BUG_ON(!list_empty(&inode->i_data.private_list)); |
428 | BUG_ON(!(inode->i_state & I_FREEING)); | 435 | BUG_ON(!(inode->i_state & I_FREEING)); |
429 | BUG_ON(inode->i_state & I_CLEAR); | 436 | BUG_ON(inode->i_state & I_CLEAR); |
diff --git a/fs/jfs/file.c b/fs/jfs/file.c index c5ce6c1d1ff..2f3f531f360 100644 --- a/fs/jfs/file.c +++ b/fs/jfs/file.c | |||
@@ -66,9 +66,9 @@ static int jfs_open(struct inode *inode, struct file *file) | |||
66 | struct jfs_inode_info *ji = JFS_IP(inode); | 66 | struct jfs_inode_info *ji = JFS_IP(inode); |
67 | spin_lock_irq(&ji->ag_lock); | 67 | spin_lock_irq(&ji->ag_lock); |
68 | if (ji->active_ag == -1) { | 68 | if (ji->active_ag == -1) { |
69 | ji->active_ag = ji->agno; | 69 | struct jfs_sb_info *jfs_sb = JFS_SBI(inode->i_sb); |
70 | atomic_inc( | 70 | ji->active_ag = BLKTOAG(addressPXD(&ji->ixpxd), jfs_sb); |
71 | &JFS_SBI(inode->i_sb)->bmap->db_active[ji->agno]); | 71 | atomic_inc( &jfs_sb->bmap->db_active[ji->active_ag]); |
72 | } | 72 | } |
73 | spin_unlock_irq(&ji->ag_lock); | 73 | spin_unlock_irq(&ji->ag_lock); |
74 | } | 74 | } |
diff --git a/fs/jfs/jfs_imap.c b/fs/jfs/jfs_imap.c index ed53a474016..b78b2f978f0 100644 --- a/fs/jfs/jfs_imap.c +++ b/fs/jfs/jfs_imap.c | |||
@@ -397,7 +397,7 @@ int diRead(struct inode *ip) | |||
397 | release_metapage(mp); | 397 | release_metapage(mp); |
398 | 398 | ||
399 | /* set the ag for the inode */ | 399 | /* set the ag for the inode */ |
400 | JFS_IP(ip)->agno = BLKTOAG(agstart, sbi); | 400 | JFS_IP(ip)->agstart = agstart; |
401 | JFS_IP(ip)->active_ag = -1; | 401 | JFS_IP(ip)->active_ag = -1; |
402 | 402 | ||
403 | return (rc); | 403 | return (rc); |
@@ -901,7 +901,7 @@ int diFree(struct inode *ip) | |||
901 | 901 | ||
902 | /* get the allocation group for this ino. | 902 | /* get the allocation group for this ino. |
903 | */ | 903 | */ |
904 | agno = JFS_IP(ip)->agno; | 904 | agno = BLKTOAG(JFS_IP(ip)->agstart, JFS_SBI(ip->i_sb)); |
905 | 905 | ||
906 | /* Lock the AG specific inode map information | 906 | /* Lock the AG specific inode map information |
907 | */ | 907 | */ |
@@ -1315,12 +1315,11 @@ int diFree(struct inode *ip) | |||
1315 | static inline void | 1315 | static inline void |
1316 | diInitInode(struct inode *ip, int iagno, int ino, int extno, struct iag * iagp) | 1316 | diInitInode(struct inode *ip, int iagno, int ino, int extno, struct iag * iagp) |
1317 | { | 1317 | { |
1318 | struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb); | ||
1319 | struct jfs_inode_info *jfs_ip = JFS_IP(ip); | 1318 | struct jfs_inode_info *jfs_ip = JFS_IP(ip); |
1320 | 1319 | ||
1321 | ip->i_ino = (iagno << L2INOSPERIAG) + ino; | 1320 | ip->i_ino = (iagno << L2INOSPERIAG) + ino; |
1322 | jfs_ip->ixpxd = iagp->inoext[extno]; | 1321 | jfs_ip->ixpxd = iagp->inoext[extno]; |
1323 | jfs_ip->agno = BLKTOAG(le64_to_cpu(iagp->agstart), sbi); | 1322 | jfs_ip->agstart = le64_to_cpu(iagp->agstart); |
1324 | jfs_ip->active_ag = -1; | 1323 | jfs_ip->active_ag = -1; |
1325 | } | 1324 | } |
1326 | 1325 | ||
@@ -1379,7 +1378,7 @@ int diAlloc(struct inode *pip, bool dir, struct inode *ip) | |||
1379 | */ | 1378 | */ |
1380 | 1379 | ||
1381 | /* get the ag number of this iag */ | 1380 | /* get the ag number of this iag */ |
1382 | agno = JFS_IP(pip)->agno; | 1381 | agno = BLKTOAG(JFS_IP(pip)->agstart, JFS_SBI(pip->i_sb)); |
1383 | 1382 | ||
1384 | if (atomic_read(&JFS_SBI(pip->i_sb)->bmap->db_active[agno])) { | 1383 | if (atomic_read(&JFS_SBI(pip->i_sb)->bmap->db_active[agno])) { |
1385 | /* | 1384 | /* |
@@ -2921,10 +2920,9 @@ int diExtendFS(struct inode *ipimap, struct inode *ipbmap) | |||
2921 | continue; | 2920 | continue; |
2922 | } | 2921 | } |
2923 | 2922 | ||
2924 | /* agstart that computes to the same ag is treated as same; */ | ||
2925 | agstart = le64_to_cpu(iagp->agstart); | 2923 | agstart = le64_to_cpu(iagp->agstart); |
2926 | /* iagp->agstart = agstart & ~(mp->db_agsize - 1); */ | ||
2927 | n = agstart >> mp->db_agl2size; | 2924 | n = agstart >> mp->db_agl2size; |
2925 | iagp->agstart = cpu_to_le64((s64)n << mp->db_agl2size); | ||
2928 | 2926 | ||
2929 | /* compute backed inodes */ | 2927 | /* compute backed inodes */ |
2930 | numinos = (EXTSPERIAG - le32_to_cpu(iagp->nfreeexts)) | 2928 | numinos = (EXTSPERIAG - le32_to_cpu(iagp->nfreeexts)) |
diff --git a/fs/jfs/jfs_incore.h b/fs/jfs/jfs_incore.h index 1439f119ec8..584a4a1a6e8 100644 --- a/fs/jfs/jfs_incore.h +++ b/fs/jfs/jfs_incore.h | |||
@@ -50,8 +50,9 @@ struct jfs_inode_info { | |||
50 | short btindex; /* btpage entry index*/ | 50 | short btindex; /* btpage entry index*/ |
51 | struct inode *ipimap; /* inode map */ | 51 | struct inode *ipimap; /* inode map */ |
52 | unsigned long cflag; /* commit flags */ | 52 | unsigned long cflag; /* commit flags */ |
53 | u64 agstart; /* agstart of the containing IAG */ | ||
53 | u16 bxflag; /* xflag of pseudo buffer? */ | 54 | u16 bxflag; /* xflag of pseudo buffer? */ |
54 | unchar agno; /* ag number */ | 55 | unchar pad; |
55 | signed char active_ag; /* ag currently allocating from */ | 56 | signed char active_ag; /* ag currently allocating from */ |
56 | lid_t blid; /* lid of pseudo buffer? */ | 57 | lid_t blid; /* lid of pseudo buffer? */ |
57 | lid_t atlhead; /* anonymous tlock list head */ | 58 | lid_t atlhead; /* anonymous tlock list head */ |
diff --git a/fs/jfs/resize.c b/fs/jfs/resize.c index 8ea5efb5a34..8d0c1c7c082 100644 --- a/fs/jfs/resize.c +++ b/fs/jfs/resize.c | |||
@@ -80,7 +80,7 @@ int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize) | |||
80 | int log_formatted = 0; | 80 | int log_formatted = 0; |
81 | struct inode *iplist[1]; | 81 | struct inode *iplist[1]; |
82 | struct jfs_superblock *j_sb, *j_sb2; | 82 | struct jfs_superblock *j_sb, *j_sb2; |
83 | uint old_agsize; | 83 | s64 old_agsize; |
84 | int agsizechanged = 0; | 84 | int agsizechanged = 0; |
85 | struct buffer_head *bh, *bh2; | 85 | struct buffer_head *bh, *bh2; |
86 | 86 | ||
diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c index adb45ec9038..e374050a911 100644 --- a/fs/lockd/clntproc.c +++ b/fs/lockd/clntproc.c | |||
@@ -708,7 +708,13 @@ static void nlmclnt_unlock_callback(struct rpc_task *task, void *data) | |||
708 | 708 | ||
709 | if (task->tk_status < 0) { | 709 | if (task->tk_status < 0) { |
710 | dprintk("lockd: unlock failed (err = %d)\n", -task->tk_status); | 710 | dprintk("lockd: unlock failed (err = %d)\n", -task->tk_status); |
711 | goto retry_rebind; | 711 | switch (task->tk_status) { |
712 | case -EACCES: | ||
713 | case -EIO: | ||
714 | goto die; | ||
715 | default: | ||
716 | goto retry_rebind; | ||
717 | } | ||
712 | } | 718 | } |
713 | if (status == NLM_LCK_DENIED_GRACE_PERIOD) { | 719 | if (status == NLM_LCK_DENIED_GRACE_PERIOD) { |
714 | rpc_delay(task, NLMCLNT_GRACE_WAIT); | 720 | rpc_delay(task, NLMCLNT_GRACE_WAIT); |
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 144f2a3c718..6f4850deb27 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c | |||
@@ -256,7 +256,8 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr) | |||
256 | 256 | ||
257 | nfs_attr_check_mountpoint(sb, fattr); | 257 | nfs_attr_check_mountpoint(sb, fattr); |
258 | 258 | ||
259 | if ((fattr->valid & NFS_ATTR_FATTR_FILEID) == 0 && (fattr->valid & NFS_ATTR_FATTR_MOUNTPOINT) == 0) | 259 | if (((fattr->valid & NFS_ATTR_FATTR_FILEID) == 0) && |
260 | !nfs_attr_use_mounted_on_fileid(fattr)) | ||
260 | goto out_no_inode; | 261 | goto out_no_inode; |
261 | if ((fattr->valid & NFS_ATTR_FATTR_TYPE) == 0) | 262 | if ((fattr->valid & NFS_ATTR_FATTR_TYPE) == 0) |
262 | goto out_no_inode; | 263 | goto out_no_inode; |
@@ -1294,7 +1295,8 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) | |||
1294 | if (new_isize != cur_isize) { | 1295 | if (new_isize != cur_isize) { |
1295 | /* Do we perhaps have any outstanding writes, or has | 1296 | /* Do we perhaps have any outstanding writes, or has |
1296 | * the file grown beyond our last write? */ | 1297 | * the file grown beyond our last write? */ |
1297 | if (nfsi->npages == 0 || new_isize > cur_isize) { | 1298 | if ((nfsi->npages == 0 && !test_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) || |
1299 | new_isize > cur_isize) { | ||
1298 | i_size_write(inode, new_isize); | 1300 | i_size_write(inode, new_isize); |
1299 | invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA; | 1301 | invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA; |
1300 | } | 1302 | } |
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index b9056cbe68d..2a55347a2da 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h | |||
@@ -45,6 +45,17 @@ static inline void nfs_attr_check_mountpoint(struct super_block *parent, struct | |||
45 | fattr->valid |= NFS_ATTR_FATTR_MOUNTPOINT; | 45 | fattr->valid |= NFS_ATTR_FATTR_MOUNTPOINT; |
46 | } | 46 | } |
47 | 47 | ||
48 | static inline int nfs_attr_use_mounted_on_fileid(struct nfs_fattr *fattr) | ||
49 | { | ||
50 | if (((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) == 0) || | ||
51 | (((fattr->valid & NFS_ATTR_FATTR_MOUNTPOINT) == 0) && | ||
52 | ((fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL) == 0))) | ||
53 | return 0; | ||
54 | |||
55 | fattr->fileid = fattr->mounted_on_fileid; | ||
56 | return 1; | ||
57 | } | ||
58 | |||
48 | struct nfs_clone_mount { | 59 | struct nfs_clone_mount { |
49 | const struct super_block *sb; | 60 | const struct super_block *sb; |
50 | const struct dentry *dentry; | 61 | const struct dentry *dentry; |
diff --git a/fs/nfs/nfs4filelayout.c b/fs/nfs/nfs4filelayout.c index 426908809c9..0bafcc91c27 100644 --- a/fs/nfs/nfs4filelayout.c +++ b/fs/nfs/nfs4filelayout.c | |||
@@ -30,6 +30,7 @@ | |||
30 | */ | 30 | */ |
31 | 31 | ||
32 | #include <linux/nfs_fs.h> | 32 | #include <linux/nfs_fs.h> |
33 | #include <linux/nfs_page.h> | ||
33 | 34 | ||
34 | #include "internal.h" | 35 | #include "internal.h" |
35 | #include "nfs4filelayout.h" | 36 | #include "nfs4filelayout.h" |
@@ -552,13 +553,18 @@ filelayout_decode_layout(struct pnfs_layout_hdr *flo, | |||
552 | __func__, nfl_util, fl->num_fh, fl->first_stripe_index, | 553 | __func__, nfl_util, fl->num_fh, fl->first_stripe_index, |
553 | fl->pattern_offset); | 554 | fl->pattern_offset); |
554 | 555 | ||
555 | if (!fl->num_fh) | 556 | /* Note that a zero value for num_fh is legal for STRIPE_SPARSE. |
557 | * Futher checking is done in filelayout_check_layout */ | ||
558 | if (fl->num_fh < 0 || fl->num_fh > | ||
559 | max(NFS4_PNFS_MAX_STRIPE_CNT, NFS4_PNFS_MAX_MULTI_CNT)) | ||
556 | goto out_err; | 560 | goto out_err; |
557 | 561 | ||
558 | fl->fh_array = kzalloc(fl->num_fh * sizeof(struct nfs_fh *), | 562 | if (fl->num_fh > 0) { |
559 | gfp_flags); | 563 | fl->fh_array = kzalloc(fl->num_fh * sizeof(struct nfs_fh *), |
560 | if (!fl->fh_array) | 564 | gfp_flags); |
561 | goto out_err; | 565 | if (!fl->fh_array) |
566 | goto out_err; | ||
567 | } | ||
562 | 568 | ||
563 | for (i = 0; i < fl->num_fh; i++) { | 569 | for (i = 0; i < fl->num_fh; i++) { |
564 | /* Do we want to use a mempool here? */ | 570 | /* Do we want to use a mempool here? */ |
@@ -661,8 +667,9 @@ filelayout_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev, | |||
661 | u64 p_stripe, r_stripe; | 667 | u64 p_stripe, r_stripe; |
662 | u32 stripe_unit; | 668 | u32 stripe_unit; |
663 | 669 | ||
664 | if (!pnfs_generic_pg_test(pgio, prev, req)) | 670 | if (!pnfs_generic_pg_test(pgio, prev, req) || |
665 | return 0; | 671 | !nfs_generic_pg_test(pgio, prev, req)) |
672 | return false; | ||
666 | 673 | ||
667 | if (!pgio->pg_lseg) | 674 | if (!pgio->pg_lseg) |
668 | return 1; | 675 | return 1; |
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index d2c4b59c896..5879b23e0c9 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c | |||
@@ -2265,12 +2265,14 @@ static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle, | |||
2265 | return nfs4_map_errors(status); | 2265 | return nfs4_map_errors(status); |
2266 | } | 2266 | } |
2267 | 2267 | ||
2268 | static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr); | ||
2268 | /* | 2269 | /* |
2269 | * Get locations and (maybe) other attributes of a referral. | 2270 | * Get locations and (maybe) other attributes of a referral. |
2270 | * Note that we'll actually follow the referral later when | 2271 | * Note that we'll actually follow the referral later when |
2271 | * we detect fsid mismatch in inode revalidation | 2272 | * we detect fsid mismatch in inode revalidation |
2272 | */ | 2273 | */ |
2273 | static int nfs4_get_referral(struct inode *dir, const struct qstr *name, struct nfs_fattr *fattr, struct nfs_fh *fhandle) | 2274 | static int nfs4_get_referral(struct inode *dir, const struct qstr *name, |
2275 | struct nfs_fattr *fattr, struct nfs_fh *fhandle) | ||
2274 | { | 2276 | { |
2275 | int status = -ENOMEM; | 2277 | int status = -ENOMEM; |
2276 | struct page *page = NULL; | 2278 | struct page *page = NULL; |
@@ -2288,15 +2290,16 @@ static int nfs4_get_referral(struct inode *dir, const struct qstr *name, struct | |||
2288 | goto out; | 2290 | goto out; |
2289 | /* Make sure server returned a different fsid for the referral */ | 2291 | /* Make sure server returned a different fsid for the referral */ |
2290 | if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &locations->fattr.fsid)) { | 2292 | if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &locations->fattr.fsid)) { |
2291 | dprintk("%s: server did not return a different fsid for a referral at %s\n", __func__, name->name); | 2293 | dprintk("%s: server did not return a different fsid for" |
2294 | " a referral at %s\n", __func__, name->name); | ||
2292 | status = -EIO; | 2295 | status = -EIO; |
2293 | goto out; | 2296 | goto out; |
2294 | } | 2297 | } |
2298 | /* Fixup attributes for the nfs_lookup() call to nfs_fhget() */ | ||
2299 | nfs_fixup_referral_attributes(&locations->fattr); | ||
2295 | 2300 | ||
2301 | /* replace the lookup nfs_fattr with the locations nfs_fattr */ | ||
2296 | memcpy(fattr, &locations->fattr, sizeof(struct nfs_fattr)); | 2302 | memcpy(fattr, &locations->fattr, sizeof(struct nfs_fattr)); |
2297 | fattr->valid |= NFS_ATTR_FATTR_V4_REFERRAL; | ||
2298 | if (!fattr->mode) | ||
2299 | fattr->mode = S_IFDIR; | ||
2300 | memset(fhandle, 0, sizeof(struct nfs_fh)); | 2303 | memset(fhandle, 0, sizeof(struct nfs_fh)); |
2301 | out: | 2304 | out: |
2302 | if (page) | 2305 | if (page) |
@@ -4667,11 +4670,15 @@ static size_t nfs4_xattr_list_nfs4_acl(struct dentry *dentry, char *list, | |||
4667 | return len; | 4670 | return len; |
4668 | } | 4671 | } |
4669 | 4672 | ||
4673 | /* | ||
4674 | * nfs_fhget will use either the mounted_on_fileid or the fileid | ||
4675 | */ | ||
4670 | static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr) | 4676 | static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr) |
4671 | { | 4677 | { |
4672 | if (!((fattr->valid & NFS_ATTR_FATTR_FILEID) && | 4678 | if (!(((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) || |
4673 | (fattr->valid & NFS_ATTR_FATTR_FSID) && | 4679 | (fattr->valid & NFS_ATTR_FATTR_FILEID)) && |
4674 | (fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL))) | 4680 | (fattr->valid & NFS_ATTR_FATTR_FSID) && |
4681 | (fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL))) | ||
4675 | return; | 4682 | return; |
4676 | 4683 | ||
4677 | fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE | | 4684 | fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE | |
@@ -4686,7 +4693,6 @@ int nfs4_proc_fs_locations(struct inode *dir, const struct qstr *name, | |||
4686 | struct nfs_server *server = NFS_SERVER(dir); | 4693 | struct nfs_server *server = NFS_SERVER(dir); |
4687 | u32 bitmask[2] = { | 4694 | u32 bitmask[2] = { |
4688 | [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS, | 4695 | [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS, |
4689 | [1] = FATTR4_WORD1_MOUNTED_ON_FILEID, | ||
4690 | }; | 4696 | }; |
4691 | struct nfs4_fs_locations_arg args = { | 4697 | struct nfs4_fs_locations_arg args = { |
4692 | .dir_fh = NFS_FH(dir), | 4698 | .dir_fh = NFS_FH(dir), |
@@ -4705,11 +4711,18 @@ int nfs4_proc_fs_locations(struct inode *dir, const struct qstr *name, | |||
4705 | int status; | 4711 | int status; |
4706 | 4712 | ||
4707 | dprintk("%s: start\n", __func__); | 4713 | dprintk("%s: start\n", __func__); |
4714 | |||
4715 | /* Ask for the fileid of the absent filesystem if mounted_on_fileid | ||
4716 | * is not supported */ | ||
4717 | if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID) | ||
4718 | bitmask[1] |= FATTR4_WORD1_MOUNTED_ON_FILEID; | ||
4719 | else | ||
4720 | bitmask[0] |= FATTR4_WORD0_FILEID; | ||
4721 | |||
4708 | nfs_fattr_init(&fs_locations->fattr); | 4722 | nfs_fattr_init(&fs_locations->fattr); |
4709 | fs_locations->server = server; | 4723 | fs_locations->server = server; |
4710 | fs_locations->nlocations = 0; | 4724 | fs_locations->nlocations = 0; |
4711 | status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); | 4725 | status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); |
4712 | nfs_fixup_referral_attributes(&fs_locations->fattr); | ||
4713 | dprintk("%s: returned status = %d\n", __func__, status); | 4726 | dprintk("%s: returned status = %d\n", __func__, status); |
4714 | return status; | 4727 | return status; |
4715 | } | 4728 | } |
@@ -5098,7 +5111,6 @@ static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args) | |||
5098 | if (mxresp_sz == 0) | 5111 | if (mxresp_sz == 0) |
5099 | mxresp_sz = NFS_MAX_FILE_IO_SIZE; | 5112 | mxresp_sz = NFS_MAX_FILE_IO_SIZE; |
5100 | /* Fore channel attributes */ | 5113 | /* Fore channel attributes */ |
5101 | args->fc_attrs.headerpadsz = 0; | ||
5102 | args->fc_attrs.max_rqst_sz = mxrqst_sz; | 5114 | args->fc_attrs.max_rqst_sz = mxrqst_sz; |
5103 | args->fc_attrs.max_resp_sz = mxresp_sz; | 5115 | args->fc_attrs.max_resp_sz = mxresp_sz; |
5104 | args->fc_attrs.max_ops = NFS4_MAX_OPS; | 5116 | args->fc_attrs.max_ops = NFS4_MAX_OPS; |
@@ -5111,7 +5123,6 @@ static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args) | |||
5111 | args->fc_attrs.max_ops, args->fc_attrs.max_reqs); | 5123 | args->fc_attrs.max_ops, args->fc_attrs.max_reqs); |
5112 | 5124 | ||
5113 | /* Back channel attributes */ | 5125 | /* Back channel attributes */ |
5114 | args->bc_attrs.headerpadsz = 0; | ||
5115 | args->bc_attrs.max_rqst_sz = PAGE_SIZE; | 5126 | args->bc_attrs.max_rqst_sz = PAGE_SIZE; |
5116 | args->bc_attrs.max_resp_sz = PAGE_SIZE; | 5127 | args->bc_attrs.max_resp_sz = PAGE_SIZE; |
5117 | args->bc_attrs.max_resp_sz_cached = 0; | 5128 | args->bc_attrs.max_resp_sz_cached = 0; |
@@ -5131,8 +5142,6 @@ static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args | |||
5131 | struct nfs4_channel_attrs *sent = &args->fc_attrs; | 5142 | struct nfs4_channel_attrs *sent = &args->fc_attrs; |
5132 | struct nfs4_channel_attrs *rcvd = &session->fc_attrs; | 5143 | struct nfs4_channel_attrs *rcvd = &session->fc_attrs; |
5133 | 5144 | ||
5134 | if (rcvd->headerpadsz > sent->headerpadsz) | ||
5135 | return -EINVAL; | ||
5136 | if (rcvd->max_resp_sz > sent->max_resp_sz) | 5145 | if (rcvd->max_resp_sz > sent->max_resp_sz) |
5137 | return -EINVAL; | 5146 | return -EINVAL; |
5138 | /* | 5147 | /* |
@@ -5697,6 +5706,7 @@ static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata) | |||
5697 | { | 5706 | { |
5698 | struct nfs4_layoutreturn *lrp = calldata; | 5707 | struct nfs4_layoutreturn *lrp = calldata; |
5699 | struct nfs_server *server; | 5708 | struct nfs_server *server; |
5709 | struct pnfs_layout_hdr *lo = NFS_I(lrp->args.inode)->layout; | ||
5700 | 5710 | ||
5701 | dprintk("--> %s\n", __func__); | 5711 | dprintk("--> %s\n", __func__); |
5702 | 5712 | ||
@@ -5708,16 +5718,15 @@ static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata) | |||
5708 | nfs_restart_rpc(task, lrp->clp); | 5718 | nfs_restart_rpc(task, lrp->clp); |
5709 | return; | 5719 | return; |
5710 | } | 5720 | } |
5721 | spin_lock(&lo->plh_inode->i_lock); | ||
5711 | if (task->tk_status == 0) { | 5722 | if (task->tk_status == 0) { |
5712 | struct pnfs_layout_hdr *lo = NFS_I(lrp->args.inode)->layout; | ||
5713 | |||
5714 | if (lrp->res.lrs_present) { | 5723 | if (lrp->res.lrs_present) { |
5715 | spin_lock(&lo->plh_inode->i_lock); | ||
5716 | pnfs_set_layout_stateid(lo, &lrp->res.stateid, true); | 5724 | pnfs_set_layout_stateid(lo, &lrp->res.stateid, true); |
5717 | spin_unlock(&lo->plh_inode->i_lock); | ||
5718 | } else | 5725 | } else |
5719 | BUG_ON(!list_empty(&lo->plh_segs)); | 5726 | BUG_ON(!list_empty(&lo->plh_segs)); |
5720 | } | 5727 | } |
5728 | lo->plh_block_lgets--; | ||
5729 | spin_unlock(&lo->plh_inode->i_lock); | ||
5721 | dprintk("<-- %s\n", __func__); | 5730 | dprintk("<-- %s\n", __func__); |
5722 | } | 5731 | } |
5723 | 5732 | ||
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index d869a5e5464..6870bc61cee 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c | |||
@@ -255,7 +255,7 @@ static int nfs4_stat_to_errno(int); | |||
255 | #define decode_fs_locations_maxsz \ | 255 | #define decode_fs_locations_maxsz \ |
256 | (0) | 256 | (0) |
257 | #define encode_secinfo_maxsz (op_encode_hdr_maxsz + nfs4_name_maxsz) | 257 | #define encode_secinfo_maxsz (op_encode_hdr_maxsz + nfs4_name_maxsz) |
258 | #define decode_secinfo_maxsz (op_decode_hdr_maxsz + 4 + (NFS_MAX_SECFLAVORS * (16 + GSS_OID_MAX_LEN))) | 258 | #define decode_secinfo_maxsz (op_decode_hdr_maxsz + 1 + ((NFS_MAX_SECFLAVORS * (16 + GSS_OID_MAX_LEN)) / 4)) |
259 | 259 | ||
260 | #if defined(CONFIG_NFS_V4_1) | 260 | #if defined(CONFIG_NFS_V4_1) |
261 | #define NFS4_MAX_MACHINE_NAME_LEN (64) | 261 | #define NFS4_MAX_MACHINE_NAME_LEN (64) |
@@ -1725,7 +1725,7 @@ static void encode_create_session(struct xdr_stream *xdr, | |||
1725 | *p++ = cpu_to_be32(args->flags); /*flags */ | 1725 | *p++ = cpu_to_be32(args->flags); /*flags */ |
1726 | 1726 | ||
1727 | /* Fore Channel */ | 1727 | /* Fore Channel */ |
1728 | *p++ = cpu_to_be32(args->fc_attrs.headerpadsz); /* header padding size */ | 1728 | *p++ = cpu_to_be32(0); /* header padding size */ |
1729 | *p++ = cpu_to_be32(args->fc_attrs.max_rqst_sz); /* max req size */ | 1729 | *p++ = cpu_to_be32(args->fc_attrs.max_rqst_sz); /* max req size */ |
1730 | *p++ = cpu_to_be32(args->fc_attrs.max_resp_sz); /* max resp size */ | 1730 | *p++ = cpu_to_be32(args->fc_attrs.max_resp_sz); /* max resp size */ |
1731 | *p++ = cpu_to_be32(max_resp_sz_cached); /* Max resp sz cached */ | 1731 | *p++ = cpu_to_be32(max_resp_sz_cached); /* Max resp sz cached */ |
@@ -1734,7 +1734,7 @@ static void encode_create_session(struct xdr_stream *xdr, | |||
1734 | *p++ = cpu_to_be32(0); /* rdmachannel_attrs */ | 1734 | *p++ = cpu_to_be32(0); /* rdmachannel_attrs */ |
1735 | 1735 | ||
1736 | /* Back Channel */ | 1736 | /* Back Channel */ |
1737 | *p++ = cpu_to_be32(args->fc_attrs.headerpadsz); /* header padding size */ | 1737 | *p++ = cpu_to_be32(0); /* header padding size */ |
1738 | *p++ = cpu_to_be32(args->bc_attrs.max_rqst_sz); /* max req size */ | 1738 | *p++ = cpu_to_be32(args->bc_attrs.max_rqst_sz); /* max req size */ |
1739 | *p++ = cpu_to_be32(args->bc_attrs.max_resp_sz); /* max resp size */ | 1739 | *p++ = cpu_to_be32(args->bc_attrs.max_resp_sz); /* max resp size */ |
1740 | *p++ = cpu_to_be32(args->bc_attrs.max_resp_sz_cached); /* Max resp sz cached */ | 1740 | *p++ = cpu_to_be32(args->bc_attrs.max_resp_sz_cached); /* Max resp sz cached */ |
@@ -3098,7 +3098,7 @@ out_overflow: | |||
3098 | return -EIO; | 3098 | return -EIO; |
3099 | } | 3099 | } |
3100 | 3100 | ||
3101 | static int decode_attr_error(struct xdr_stream *xdr, uint32_t *bitmap) | 3101 | static int decode_attr_error(struct xdr_stream *xdr, uint32_t *bitmap, int32_t *res) |
3102 | { | 3102 | { |
3103 | __be32 *p; | 3103 | __be32 *p; |
3104 | 3104 | ||
@@ -3109,7 +3109,7 @@ static int decode_attr_error(struct xdr_stream *xdr, uint32_t *bitmap) | |||
3109 | if (unlikely(!p)) | 3109 | if (unlikely(!p)) |
3110 | goto out_overflow; | 3110 | goto out_overflow; |
3111 | bitmap[0] &= ~FATTR4_WORD0_RDATTR_ERROR; | 3111 | bitmap[0] &= ~FATTR4_WORD0_RDATTR_ERROR; |
3112 | return -be32_to_cpup(p); | 3112 | *res = -be32_to_cpup(p); |
3113 | } | 3113 | } |
3114 | return 0; | 3114 | return 0; |
3115 | out_overflow: | 3115 | out_overflow: |
@@ -4070,6 +4070,7 @@ static int decode_getfattr_attrs(struct xdr_stream *xdr, uint32_t *bitmap, | |||
4070 | int status; | 4070 | int status; |
4071 | umode_t fmode = 0; | 4071 | umode_t fmode = 0; |
4072 | uint32_t type; | 4072 | uint32_t type; |
4073 | int32_t err; | ||
4073 | 4074 | ||
4074 | status = decode_attr_type(xdr, bitmap, &type); | 4075 | status = decode_attr_type(xdr, bitmap, &type); |
4075 | if (status < 0) | 4076 | if (status < 0) |
@@ -4095,13 +4096,12 @@ static int decode_getfattr_attrs(struct xdr_stream *xdr, uint32_t *bitmap, | |||
4095 | goto xdr_error; | 4096 | goto xdr_error; |
4096 | fattr->valid |= status; | 4097 | fattr->valid |= status; |
4097 | 4098 | ||
4098 | status = decode_attr_error(xdr, bitmap); | 4099 | err = 0; |
4099 | if (status == -NFS4ERR_WRONGSEC) { | 4100 | status = decode_attr_error(xdr, bitmap, &err); |
4100 | nfs_fixup_secinfo_attributes(fattr, fh); | ||
4101 | status = 0; | ||
4102 | } | ||
4103 | if (status < 0) | 4101 | if (status < 0) |
4104 | goto xdr_error; | 4102 | goto xdr_error; |
4103 | if (err == -NFS4ERR_WRONGSEC) | ||
4104 | nfs_fixup_secinfo_attributes(fattr, fh); | ||
4105 | 4105 | ||
4106 | status = decode_attr_filehandle(xdr, bitmap, fh); | 4106 | status = decode_attr_filehandle(xdr, bitmap, fh); |
4107 | if (status < 0) | 4107 | if (status < 0) |
@@ -4997,12 +4997,14 @@ static int decode_chan_attrs(struct xdr_stream *xdr, | |||
4997 | struct nfs4_channel_attrs *attrs) | 4997 | struct nfs4_channel_attrs *attrs) |
4998 | { | 4998 | { |
4999 | __be32 *p; | 4999 | __be32 *p; |
5000 | u32 nr_attrs; | 5000 | u32 nr_attrs, val; |
5001 | 5001 | ||
5002 | p = xdr_inline_decode(xdr, 28); | 5002 | p = xdr_inline_decode(xdr, 28); |
5003 | if (unlikely(!p)) | 5003 | if (unlikely(!p)) |
5004 | goto out_overflow; | 5004 | goto out_overflow; |
5005 | attrs->headerpadsz = be32_to_cpup(p++); | 5005 | val = be32_to_cpup(p++); /* headerpadsz */ |
5006 | if (val) | ||
5007 | return -EINVAL; /* no support for header padding yet */ | ||
5006 | attrs->max_rqst_sz = be32_to_cpup(p++); | 5008 | attrs->max_rqst_sz = be32_to_cpup(p++); |
5007 | attrs->max_resp_sz = be32_to_cpup(p++); | 5009 | attrs->max_resp_sz = be32_to_cpup(p++); |
5008 | attrs->max_resp_sz_cached = be32_to_cpup(p++); | 5010 | attrs->max_resp_sz_cached = be32_to_cpup(p++); |
diff --git a/fs/nfs/objlayout/objio_osd.c b/fs/nfs/objlayout/objio_osd.c index 9cf208df1f2..8ff2ea3f10e 100644 --- a/fs/nfs/objlayout/objio_osd.c +++ b/fs/nfs/objlayout/objio_osd.c | |||
@@ -108,7 +108,6 @@ _dev_list_add(const struct nfs_server *nfss, | |||
108 | de = n; | 108 | de = n; |
109 | } | 109 | } |
110 | 110 | ||
111 | atomic_inc(&de->id_node.ref); | ||
112 | return de; | 111 | return de; |
113 | } | 112 | } |
114 | 113 | ||
@@ -1001,6 +1000,9 @@ static bool objio_pg_test(struct nfs_pageio_descriptor *pgio, | |||
1001 | if (!pnfs_generic_pg_test(pgio, prev, req)) | 1000 | if (!pnfs_generic_pg_test(pgio, prev, req)) |
1002 | return false; | 1001 | return false; |
1003 | 1002 | ||
1003 | if (pgio->pg_lseg == NULL) | ||
1004 | return true; | ||
1005 | |||
1004 | return pgio->pg_count + req->wb_bytes <= | 1006 | return pgio->pg_count + req->wb_bytes <= |
1005 | OBJIO_LSEG(pgio->pg_lseg)->max_io_size; | 1007 | OBJIO_LSEG(pgio->pg_lseg)->max_io_size; |
1006 | } | 1008 | } |
diff --git a/fs/nfs/objlayout/objlayout.c b/fs/nfs/objlayout/objlayout.c index dc3956c0de8..1d06f8e2ade 100644 --- a/fs/nfs/objlayout/objlayout.c +++ b/fs/nfs/objlayout/objlayout.c | |||
@@ -291,7 +291,7 @@ objlayout_read_done(struct objlayout_io_state *state, ssize_t status, bool sync) | |||
291 | struct nfs_read_data *rdata; | 291 | struct nfs_read_data *rdata; |
292 | 292 | ||
293 | state->status = status; | 293 | state->status = status; |
294 | dprintk("%s: Begin status=%ld eof=%d\n", __func__, status, eof); | 294 | dprintk("%s: Begin status=%zd eof=%d\n", __func__, status, eof); |
295 | rdata = state->rpcdata; | 295 | rdata = state->rpcdata; |
296 | rdata->task.tk_status = status; | 296 | rdata->task.tk_status = status; |
297 | if (status >= 0) { | 297 | if (status >= 0) { |
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index 7913961aff2..00985571628 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c | |||
@@ -204,7 +204,7 @@ nfs_wait_on_request(struct nfs_page *req) | |||
204 | TASK_UNINTERRUPTIBLE); | 204 | TASK_UNINTERRUPTIBLE); |
205 | } | 205 | } |
206 | 206 | ||
207 | static bool nfs_generic_pg_test(struct nfs_pageio_descriptor *desc, struct nfs_page *prev, struct nfs_page *req) | 207 | bool nfs_generic_pg_test(struct nfs_pageio_descriptor *desc, struct nfs_page *prev, struct nfs_page *req) |
208 | { | 208 | { |
209 | /* | 209 | /* |
210 | * FIXME: ideally we should be able to coalesce all requests | 210 | * FIXME: ideally we should be able to coalesce all requests |
@@ -218,6 +218,7 @@ static bool nfs_generic_pg_test(struct nfs_pageio_descriptor *desc, struct nfs_p | |||
218 | 218 | ||
219 | return desc->pg_count + req->wb_bytes <= desc->pg_bsize; | 219 | return desc->pg_count + req->wb_bytes <= desc->pg_bsize; |
220 | } | 220 | } |
221 | EXPORT_SYMBOL_GPL(nfs_generic_pg_test); | ||
221 | 222 | ||
222 | /** | 223 | /** |
223 | * nfs_pageio_init - initialise a page io descriptor | 224 | * nfs_pageio_init - initialise a page io descriptor |
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 8c1309d852a..29c0ca7fc34 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c | |||
@@ -634,14 +634,16 @@ _pnfs_return_layout(struct inode *ino) | |||
634 | 634 | ||
635 | spin_lock(&ino->i_lock); | 635 | spin_lock(&ino->i_lock); |
636 | lo = nfsi->layout; | 636 | lo = nfsi->layout; |
637 | if (!lo || !mark_matching_lsegs_invalid(lo, &tmp_list, NULL)) { | 637 | if (!lo) { |
638 | spin_unlock(&ino->i_lock); | 638 | spin_unlock(&ino->i_lock); |
639 | dprintk("%s: no layout segments to return\n", __func__); | 639 | dprintk("%s: no layout to return\n", __func__); |
640 | goto out; | 640 | return status; |
641 | } | 641 | } |
642 | stateid = nfsi->layout->plh_stateid; | 642 | stateid = nfsi->layout->plh_stateid; |
643 | /* Reference matched in nfs4_layoutreturn_release */ | 643 | /* Reference matched in nfs4_layoutreturn_release */ |
644 | get_layout_hdr(lo); | 644 | get_layout_hdr(lo); |
645 | mark_matching_lsegs_invalid(lo, &tmp_list, NULL); | ||
646 | lo->plh_block_lgets++; | ||
645 | spin_unlock(&ino->i_lock); | 647 | spin_unlock(&ino->i_lock); |
646 | pnfs_free_lseg_list(&tmp_list); | 648 | pnfs_free_lseg_list(&tmp_list); |
647 | 649 | ||
@@ -650,6 +652,9 @@ _pnfs_return_layout(struct inode *ino) | |||
650 | lrp = kzalloc(sizeof(*lrp), GFP_KERNEL); | 652 | lrp = kzalloc(sizeof(*lrp), GFP_KERNEL); |
651 | if (unlikely(lrp == NULL)) { | 653 | if (unlikely(lrp == NULL)) { |
652 | status = -ENOMEM; | 654 | status = -ENOMEM; |
655 | set_bit(NFS_LAYOUT_RW_FAILED, &lo->plh_flags); | ||
656 | set_bit(NFS_LAYOUT_RO_FAILED, &lo->plh_flags); | ||
657 | put_layout_hdr(lo); | ||
653 | goto out; | 658 | goto out; |
654 | } | 659 | } |
655 | 660 | ||
@@ -887,7 +892,7 @@ pnfs_find_lseg(struct pnfs_layout_hdr *lo, | |||
887 | ret = get_lseg(lseg); | 892 | ret = get_lseg(lseg); |
888 | break; | 893 | break; |
889 | } | 894 | } |
890 | if (cmp_layout(range, &lseg->pls_range) > 0) | 895 | if (lseg->pls_range.offset > range->offset) |
891 | break; | 896 | break; |
892 | } | 897 | } |
893 | 898 | ||
@@ -1059,23 +1064,36 @@ pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev, | |||
1059 | gfp_flags = GFP_NOFS; | 1064 | gfp_flags = GFP_NOFS; |
1060 | } | 1065 | } |
1061 | 1066 | ||
1062 | if (pgio->pg_count == prev->wb_bytes) { | 1067 | if (pgio->pg_lseg == NULL) { |
1068 | if (pgio->pg_count != prev->wb_bytes) | ||
1069 | return true; | ||
1063 | /* This is first coelesce call for a series of nfs_pages */ | 1070 | /* This is first coelesce call for a series of nfs_pages */ |
1064 | pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, | 1071 | pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, |
1065 | prev->wb_context, | 1072 | prev->wb_context, |
1066 | req_offset(req), | 1073 | req_offset(prev), |
1067 | pgio->pg_count, | 1074 | pgio->pg_count, |
1068 | access_type, | 1075 | access_type, |
1069 | gfp_flags); | 1076 | gfp_flags); |
1070 | return true; | 1077 | if (pgio->pg_lseg == NULL) |
1078 | return true; | ||
1071 | } | 1079 | } |
1072 | 1080 | ||
1073 | if (pgio->pg_lseg && | 1081 | /* |
1074 | req_offset(req) > end_offset(pgio->pg_lseg->pls_range.offset, | 1082 | * Test if a nfs_page is fully contained in the pnfs_layout_range. |
1075 | pgio->pg_lseg->pls_range.length)) | 1083 | * Note that this test makes several assumptions: |
1076 | return false; | 1084 | * - that the previous nfs_page in the struct nfs_pageio_descriptor |
1077 | 1085 | * is known to lie within the range. | |
1078 | return true; | 1086 | * - that the nfs_page being tested is known to be contiguous with the |
1087 | * previous nfs_page. | ||
1088 | * - Layout ranges are page aligned, so we only have to test the | ||
1089 | * start offset of the request. | ||
1090 | * | ||
1091 | * Please also note that 'end_offset' is actually the offset of the | ||
1092 | * first byte that lies outside the pnfs_layout_range. FIXME? | ||
1093 | * | ||
1094 | */ | ||
1095 | return req_offset(req) < end_offset(pgio->pg_lseg->pls_range.offset, | ||
1096 | pgio->pg_lseg->pls_range.length); | ||
1079 | } | 1097 | } |
1080 | EXPORT_SYMBOL_GPL(pnfs_generic_pg_test); | 1098 | EXPORT_SYMBOL_GPL(pnfs_generic_pg_test); |
1081 | 1099 | ||
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index 48d0a8e4d06..96bf4e6f45b 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h | |||
@@ -186,6 +186,7 @@ int pnfs_ld_read_done(struct nfs_read_data *); | |||
186 | /* pnfs_dev.c */ | 186 | /* pnfs_dev.c */ |
187 | struct nfs4_deviceid_node { | 187 | struct nfs4_deviceid_node { |
188 | struct hlist_node node; | 188 | struct hlist_node node; |
189 | struct hlist_node tmpnode; | ||
189 | const struct pnfs_layoutdriver_type *ld; | 190 | const struct pnfs_layoutdriver_type *ld; |
190 | const struct nfs_client *nfs_client; | 191 | const struct nfs_client *nfs_client; |
191 | struct nfs4_deviceid deviceid; | 192 | struct nfs4_deviceid deviceid; |
diff --git a/fs/nfs/pnfs_dev.c b/fs/nfs/pnfs_dev.c index c65e133ce9c..f0f8e1e22f6 100644 --- a/fs/nfs/pnfs_dev.c +++ b/fs/nfs/pnfs_dev.c | |||
@@ -174,6 +174,7 @@ nfs4_init_deviceid_node(struct nfs4_deviceid_node *d, | |||
174 | const struct nfs4_deviceid *id) | 174 | const struct nfs4_deviceid *id) |
175 | { | 175 | { |
176 | INIT_HLIST_NODE(&d->node); | 176 | INIT_HLIST_NODE(&d->node); |
177 | INIT_HLIST_NODE(&d->tmpnode); | ||
177 | d->ld = ld; | 178 | d->ld = ld; |
178 | d->nfs_client = nfs_client; | 179 | d->nfs_client = nfs_client; |
179 | d->deviceid = *id; | 180 | d->deviceid = *id; |
@@ -208,6 +209,7 @@ nfs4_insert_deviceid_node(struct nfs4_deviceid_node *new) | |||
208 | 209 | ||
209 | hlist_add_head_rcu(&new->node, &nfs4_deviceid_cache[hash]); | 210 | hlist_add_head_rcu(&new->node, &nfs4_deviceid_cache[hash]); |
210 | spin_unlock(&nfs4_deviceid_lock); | 211 | spin_unlock(&nfs4_deviceid_lock); |
212 | atomic_inc(&new->ref); | ||
211 | 213 | ||
212 | return new; | 214 | return new; |
213 | } | 215 | } |
@@ -238,24 +240,29 @@ static void | |||
238 | _deviceid_purge_client(const struct nfs_client *clp, long hash) | 240 | _deviceid_purge_client(const struct nfs_client *clp, long hash) |
239 | { | 241 | { |
240 | struct nfs4_deviceid_node *d; | 242 | struct nfs4_deviceid_node *d; |
241 | struct hlist_node *n, *next; | 243 | struct hlist_node *n; |
242 | HLIST_HEAD(tmp); | 244 | HLIST_HEAD(tmp); |
243 | 245 | ||
246 | spin_lock(&nfs4_deviceid_lock); | ||
244 | rcu_read_lock(); | 247 | rcu_read_lock(); |
245 | hlist_for_each_entry_rcu(d, n, &nfs4_deviceid_cache[hash], node) | 248 | hlist_for_each_entry_rcu(d, n, &nfs4_deviceid_cache[hash], node) |
246 | if (d->nfs_client == clp && atomic_read(&d->ref)) { | 249 | if (d->nfs_client == clp && atomic_read(&d->ref)) { |
247 | hlist_del_init_rcu(&d->node); | 250 | hlist_del_init_rcu(&d->node); |
248 | hlist_add_head(&d->node, &tmp); | 251 | hlist_add_head(&d->tmpnode, &tmp); |
249 | } | 252 | } |
250 | rcu_read_unlock(); | 253 | rcu_read_unlock(); |
254 | spin_unlock(&nfs4_deviceid_lock); | ||
251 | 255 | ||
252 | if (hlist_empty(&tmp)) | 256 | if (hlist_empty(&tmp)) |
253 | return; | 257 | return; |
254 | 258 | ||
255 | synchronize_rcu(); | 259 | synchronize_rcu(); |
256 | hlist_for_each_entry_safe(d, n, next, &tmp, node) | 260 | while (!hlist_empty(&tmp)) { |
261 | d = hlist_entry(tmp.first, struct nfs4_deviceid_node, tmpnode); | ||
262 | hlist_del(&d->tmpnode); | ||
257 | if (atomic_dec_and_test(&d->ref)) | 263 | if (atomic_dec_and_test(&d->ref)) |
258 | d->ld->free_deviceid_node(d); | 264 | d->ld->free_deviceid_node(d); |
265 | } | ||
259 | } | 266 | } |
260 | 267 | ||
261 | void | 268 | void |
@@ -263,8 +270,8 @@ nfs4_deviceid_purge_client(const struct nfs_client *clp) | |||
263 | { | 270 | { |
264 | long h; | 271 | long h; |
265 | 272 | ||
266 | spin_lock(&nfs4_deviceid_lock); | 273 | if (!(clp->cl_exchange_flags & EXCHGID4_FLAG_USE_PNFS_MDS)) |
274 | return; | ||
267 | for (h = 0; h < NFS4_DEVICE_ID_HASH_SIZE; h++) | 275 | for (h = 0; h < NFS4_DEVICE_ID_HASH_SIZE; h++) |
268 | _deviceid_purge_client(clp, h); | 276 | _deviceid_purge_client(clp, h); |
269 | spin_unlock(&nfs4_deviceid_lock); | ||
270 | } | 277 | } |
diff --git a/fs/omfs/file.c b/fs/omfs/file.c index d738a7e493d..2c6d95257a4 100644 --- a/fs/omfs/file.c +++ b/fs/omfs/file.c | |||
@@ -4,7 +4,6 @@ | |||
4 | * Released under GPL v2. | 4 | * Released under GPL v2. |
5 | */ | 5 | */ |
6 | 6 | ||
7 | #include <linux/version.h> | ||
8 | #include <linux/module.h> | 7 | #include <linux/module.h> |
9 | #include <linux/fs.h> | 8 | #include <linux/fs.h> |
10 | #include <linux/buffer_head.h> | 9 | #include <linux/buffer_head.h> |
diff --git a/fs/romfs/mmap-nommu.c b/fs/romfs/mmap-nommu.c index f0511e81696..eed99428f10 100644 --- a/fs/romfs/mmap-nommu.c +++ b/fs/romfs/mmap-nommu.c | |||
@@ -27,14 +27,18 @@ static unsigned long romfs_get_unmapped_area(struct file *file, | |||
27 | { | 27 | { |
28 | struct inode *inode = file->f_mapping->host; | 28 | struct inode *inode = file->f_mapping->host; |
29 | struct mtd_info *mtd = inode->i_sb->s_mtd; | 29 | struct mtd_info *mtd = inode->i_sb->s_mtd; |
30 | unsigned long isize, offset; | 30 | unsigned long isize, offset, maxpages, lpages; |
31 | 31 | ||
32 | if (!mtd) | 32 | if (!mtd) |
33 | goto cant_map_directly; | 33 | goto cant_map_directly; |
34 | 34 | ||
35 | /* the mapping mustn't extend beyond the EOF */ | ||
36 | lpages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; | ||
35 | isize = i_size_read(inode); | 37 | isize = i_size_read(inode); |
36 | offset = pgoff << PAGE_SHIFT; | 38 | offset = pgoff << PAGE_SHIFT; |
37 | if (offset > isize || len > isize || offset > isize - len) | 39 | |
40 | maxpages = (isize + PAGE_SIZE - 1) >> PAGE_SHIFT; | ||
41 | if ((pgoff >= maxpages) || (maxpages - pgoff < lpages)) | ||
38 | return (unsigned long) -EINVAL; | 42 | return (unsigned long) -EINVAL; |
39 | 43 | ||
40 | /* we need to call down to the MTD layer to do the actual mapping */ | 44 | /* we need to call down to the MTD layer to do the actual mapping */ |
diff --git a/fs/xfs/xfs_attr.c b/fs/xfs/xfs_attr.c index c8637537881..01d2072fb6d 100644 --- a/fs/xfs/xfs_attr.c +++ b/fs/xfs/xfs_attr.c | |||
@@ -490,6 +490,13 @@ xfs_attr_remove_int(xfs_inode_t *dp, struct xfs_name *name, int flags) | |||
490 | args.whichfork = XFS_ATTR_FORK; | 490 | args.whichfork = XFS_ATTR_FORK; |
491 | 491 | ||
492 | /* | 492 | /* |
493 | * we have no control over the attribute names that userspace passes us | ||
494 | * to remove, so we have to allow the name lookup prior to attribute | ||
495 | * removal to fail. | ||
496 | */ | ||
497 | args.op_flags = XFS_DA_OP_OKNOENT; | ||
498 | |||
499 | /* | ||
493 | * Attach the dquots to the inode. | 500 | * Attach the dquots to the inode. |
494 | */ | 501 | */ |
495 | error = xfs_qm_dqattach(dp, 0); | 502 | error = xfs_qm_dqattach(dp, 0); |
diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c index cb9b6d1469f..3631783b2b5 100644 --- a/fs/xfs/xfs_iget.c +++ b/fs/xfs/xfs_iget.c | |||
@@ -253,16 +253,21 @@ xfs_iget_cache_hit( | |||
253 | rcu_read_lock(); | 253 | rcu_read_lock(); |
254 | spin_lock(&ip->i_flags_lock); | 254 | spin_lock(&ip->i_flags_lock); |
255 | 255 | ||
256 | ip->i_flags &= ~XFS_INEW; | 256 | ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM); |
257 | ip->i_flags |= XFS_IRECLAIMABLE; | 257 | ASSERT(ip->i_flags & XFS_IRECLAIMABLE); |
258 | __xfs_inode_set_reclaim_tag(pag, ip); | ||
259 | trace_xfs_iget_reclaim_fail(ip); | 258 | trace_xfs_iget_reclaim_fail(ip); |
260 | goto out_error; | 259 | goto out_error; |
261 | } | 260 | } |
262 | 261 | ||
263 | spin_lock(&pag->pag_ici_lock); | 262 | spin_lock(&pag->pag_ici_lock); |
264 | spin_lock(&ip->i_flags_lock); | 263 | spin_lock(&ip->i_flags_lock); |
265 | ip->i_flags &= ~(XFS_IRECLAIMABLE | XFS_IRECLAIM); | 264 | |
265 | /* | ||
266 | * Clear the per-lifetime state in the inode as we are now | ||
267 | * effectively a new inode and need to return to the initial | ||
268 | * state before reuse occurs. | ||
269 | */ | ||
270 | ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS; | ||
266 | ip->i_flags |= XFS_INEW; | 271 | ip->i_flags |= XFS_INEW; |
267 | __xfs_inode_clear_reclaim_tag(mp, pag, ip); | 272 | __xfs_inode_clear_reclaim_tag(mp, pag, ip); |
268 | inode->i_state = I_NEW; | 273 | inode->i_state = I_NEW; |
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index 3ae6d58e547..964cfea7768 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h | |||
@@ -384,6 +384,16 @@ static inline void xfs_ifunlock(xfs_inode_t *ip) | |||
384 | #define XFS_IDIRTY_RELEASE 0x0040 /* dirty release already seen */ | 384 | #define XFS_IDIRTY_RELEASE 0x0040 /* dirty release already seen */ |
385 | 385 | ||
386 | /* | 386 | /* |
387 | * Per-lifetime flags need to be reset when re-using a reclaimable inode during | ||
388 | * inode lookup. Thi prevents unintended behaviour on the new inode from | ||
389 | * ocurring. | ||
390 | */ | ||
391 | #define XFS_IRECLAIM_RESET_FLAGS \ | ||
392 | (XFS_IRECLAIMABLE | XFS_IRECLAIM | \ | ||
393 | XFS_IDIRTY_RELEASE | XFS_ITRUNCATED | \ | ||
394 | XFS_IFILESTREAM); | ||
395 | |||
396 | /* | ||
387 | * Flags for inode locking. | 397 | * Flags for inode locking. |
388 | * Bit ranges: 1<<1 - 1<<16-1 -- iolock/ilock modes (bitfield) | 398 | * Bit ranges: 1<<1 - 1<<16-1 -- iolock/ilock modes (bitfield) |
389 | * 1<<16 - 1<<32-1 -- lockdep annotation (integers) | 399 | * 1<<16 - 1<<32-1 -- lockdep annotation (integers) |
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c index b7a5fe7c52c..619720705bc 100644 --- a/fs/xfs/xfs_vnodeops.c +++ b/fs/xfs/xfs_vnodeops.c | |||
@@ -960,8 +960,11 @@ xfs_release( | |||
960 | * be exposed to that problem. | 960 | * be exposed to that problem. |
961 | */ | 961 | */ |
962 | truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED); | 962 | truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED); |
963 | if (truncated && VN_DIRTY(VFS_I(ip)) && ip->i_delayed_blks > 0) | 963 | if (truncated) { |
964 | xfs_flush_pages(ip, 0, -1, XBF_ASYNC, FI_NONE); | 964 | xfs_iflags_clear(ip, XFS_IDIRTY_RELEASE); |
965 | if (VN_DIRTY(VFS_I(ip)) && ip->i_delayed_blks > 0) | ||
966 | xfs_flush_pages(ip, 0, -1, XBF_ASYNC, FI_NONE); | ||
967 | } | ||
965 | } | 968 | } |
966 | 969 | ||
967 | if (ip->i_d.di_nlink == 0) | 970 | if (ip->i_d.di_nlink == 0) |
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 2a7cea53ca0..6395692b2e7 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h | |||
@@ -167,7 +167,7 @@ enum rq_flag_bits { | |||
167 | (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER) | 167 | (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER) |
168 | #define REQ_COMMON_MASK \ | 168 | #define REQ_COMMON_MASK \ |
169 | (REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_DISCARD | \ | 169 | (REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_DISCARD | \ |
170 | REQ_NOIDLE | REQ_FLUSH | REQ_FUA) | 170 | REQ_NOIDLE | REQ_FLUSH | REQ_FUA | REQ_SECURE) |
171 | #define REQ_CLONE_MASK REQ_COMMON_MASK | 171 | #define REQ_CLONE_MASK REQ_COMMON_MASK |
172 | 172 | ||
173 | #define REQ_RAHEAD (1 << __REQ_RAHEAD) | 173 | #define REQ_RAHEAD (1 << __REQ_RAHEAD) |
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h index b22fb0d3db0..8c7c2de7631 100644 --- a/include/linux/blktrace_api.h +++ b/include/linux/blktrace_api.h | |||
@@ -169,7 +169,8 @@ extern void blk_trace_shutdown(struct request_queue *); | |||
169 | extern int do_blk_trace_setup(struct request_queue *q, char *name, | 169 | extern int do_blk_trace_setup(struct request_queue *q, char *name, |
170 | dev_t dev, struct block_device *bdev, | 170 | dev_t dev, struct block_device *bdev, |
171 | struct blk_user_trace_setup *buts); | 171 | struct blk_user_trace_setup *buts); |
172 | extern void __trace_note_message(struct blk_trace *, const char *fmt, ...); | 172 | extern __attribute__((format(printf, 2, 3))) |
173 | void __trace_note_message(struct blk_trace *, const char *fmt, ...); | ||
173 | 174 | ||
174 | /** | 175 | /** |
175 | * blk_add_trace_msg - Add a (simple) message to the blktrace stream | 176 | * blk_add_trace_msg - Add a (simple) message to the blktrace stream |
diff --git a/include/linux/compat.h b/include/linux/compat.h index ddcb7db38e6..846bb179257 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h | |||
@@ -467,6 +467,8 @@ asmlinkage long compat_sys_setsockopt(int fd, int level, int optname, | |||
467 | char __user *optval, unsigned int optlen); | 467 | char __user *optval, unsigned int optlen); |
468 | asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg, | 468 | asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg, |
469 | unsigned flags); | 469 | unsigned flags); |
470 | asmlinkage long compat_sys_sendmmsg(int fd, struct compat_mmsghdr __user *mmsg, | ||
471 | unsigned vlen, unsigned int flags); | ||
470 | asmlinkage long compat_sys_recvmsg(int fd, struct compat_msghdr __user *msg, | 472 | asmlinkage long compat_sys_recvmsg(int fd, struct compat_msghdr __user *msg, |
471 | unsigned int flags); | 473 | unsigned int flags); |
472 | asmlinkage long compat_sys_recv(int fd, void __user *buf, size_t len, | 474 | asmlinkage long compat_sys_recv(int fd, void __user *buf, size_t len, |
diff --git a/include/linux/device.h b/include/linux/device.h index c66111affca..e4f62d8896b 100644 --- a/include/linux/device.h +++ b/include/linux/device.h | |||
@@ -530,7 +530,6 @@ struct device_dma_parameters { | |||
530 | * @dma_mem: Internal for coherent mem override. | 530 | * @dma_mem: Internal for coherent mem override. |
531 | * @archdata: For arch-specific additions. | 531 | * @archdata: For arch-specific additions. |
532 | * @of_node: Associated device tree node. | 532 | * @of_node: Associated device tree node. |
533 | * @of_match: Matching of_device_id from driver. | ||
534 | * @devt: For creating the sysfs "dev". | 533 | * @devt: For creating the sysfs "dev". |
535 | * @devres_lock: Spinlock to protect the resource of the device. | 534 | * @devres_lock: Spinlock to protect the resource of the device. |
536 | * @devres_head: The resources list of the device. | 535 | * @devres_head: The resources list of the device. |
@@ -654,13 +653,13 @@ static inline int device_is_registered(struct device *dev) | |||
654 | 653 | ||
655 | static inline void device_enable_async_suspend(struct device *dev) | 654 | static inline void device_enable_async_suspend(struct device *dev) |
656 | { | 655 | { |
657 | if (!dev->power.in_suspend) | 656 | if (!dev->power.is_prepared) |
658 | dev->power.async_suspend = true; | 657 | dev->power.async_suspend = true; |
659 | } | 658 | } |
660 | 659 | ||
661 | static inline void device_disable_async_suspend(struct device *dev) | 660 | static inline void device_disable_async_suspend(struct device *dev) |
662 | { | 661 | { |
663 | if (!dev->power.in_suspend) | 662 | if (!dev->power.is_prepared) |
664 | dev->power.async_suspend = false; | 663 | dev->power.async_suspend = false; |
665 | } | 664 | } |
666 | 665 | ||
diff --git a/include/linux/fs.h b/include/linux/fs.h index 6e73e2e9ae3..b5b97924786 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -639,6 +639,7 @@ struct address_space { | |||
639 | struct prio_tree_root i_mmap; /* tree of private and shared mappings */ | 639 | struct prio_tree_root i_mmap; /* tree of private and shared mappings */ |
640 | struct list_head i_mmap_nonlinear;/*list VM_NONLINEAR mappings */ | 640 | struct list_head i_mmap_nonlinear;/*list VM_NONLINEAR mappings */ |
641 | struct mutex i_mmap_mutex; /* protect tree, count, list */ | 641 | struct mutex i_mmap_mutex; /* protect tree, count, list */ |
642 | /* Protected by tree_lock together with the radix tree */ | ||
642 | unsigned long nrpages; /* number of total pages */ | 643 | unsigned long nrpages; /* number of total pages */ |
643 | pgoff_t writeback_index;/* writeback starts here */ | 644 | pgoff_t writeback_index;/* writeback starts here */ |
644 | const struct address_space_operations *a_ops; /* methods */ | 645 | const struct address_space_operations *a_ops; /* methods */ |
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 59d3ef100eb..b1e69eefc20 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h | |||
@@ -129,6 +129,10 @@ void trace_current_buffer_unlock_commit(struct ring_buffer *buffer, | |||
129 | void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer, | 129 | void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer, |
130 | struct ring_buffer_event *event, | 130 | struct ring_buffer_event *event, |
131 | unsigned long flags, int pc); | 131 | unsigned long flags, int pc); |
132 | void trace_nowake_buffer_unlock_commit_regs(struct ring_buffer *buffer, | ||
133 | struct ring_buffer_event *event, | ||
134 | unsigned long flags, int pc, | ||
135 | struct pt_regs *regs); | ||
132 | void trace_current_buffer_discard_commit(struct ring_buffer *buffer, | 136 | void trace_current_buffer_discard_commit(struct ring_buffer *buffer, |
133 | struct ring_buffer_event *event); | 137 | struct ring_buffer_event *event); |
134 | 138 | ||
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 51932e5acf7..fd0dc30c9f1 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h | |||
@@ -135,6 +135,7 @@ struct hrtimer_sleeper { | |||
135 | * @cpu_base: per cpu clock base | 135 | * @cpu_base: per cpu clock base |
136 | * @index: clock type index for per_cpu support when moving a | 136 | * @index: clock type index for per_cpu support when moving a |
137 | * timer to a base on another cpu. | 137 | * timer to a base on another cpu. |
138 | * @clockid: clock id for per_cpu support | ||
138 | * @active: red black tree root node for the active timers | 139 | * @active: red black tree root node for the active timers |
139 | * @resolution: the resolution of the clock, in nanoseconds | 140 | * @resolution: the resolution of the clock, in nanoseconds |
140 | * @get_time: function to retrieve the current time of the clock | 141 | * @get_time: function to retrieve the current time of the clock |
diff --git a/include/linux/hw_breakpoint.h b/include/linux/hw_breakpoint.h index d1e55fed2c7..6ae9c631a1b 100644 --- a/include/linux/hw_breakpoint.h +++ b/include/linux/hw_breakpoint.h | |||
@@ -73,6 +73,7 @@ static inline unsigned long hw_breakpoint_len(struct perf_event *bp) | |||
73 | extern struct perf_event * | 73 | extern struct perf_event * |
74 | register_user_hw_breakpoint(struct perf_event_attr *attr, | 74 | register_user_hw_breakpoint(struct perf_event_attr *attr, |
75 | perf_overflow_handler_t triggered, | 75 | perf_overflow_handler_t triggered, |
76 | void *context, | ||
76 | struct task_struct *tsk); | 77 | struct task_struct *tsk); |
77 | 78 | ||
78 | /* FIXME: only change from the attr, and don't unregister */ | 79 | /* FIXME: only change from the attr, and don't unregister */ |
@@ -85,11 +86,13 @@ modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr); | |||
85 | extern struct perf_event * | 86 | extern struct perf_event * |
86 | register_wide_hw_breakpoint_cpu(struct perf_event_attr *attr, | 87 | register_wide_hw_breakpoint_cpu(struct perf_event_attr *attr, |
87 | perf_overflow_handler_t triggered, | 88 | perf_overflow_handler_t triggered, |
89 | void *context, | ||
88 | int cpu); | 90 | int cpu); |
89 | 91 | ||
90 | extern struct perf_event * __percpu * | 92 | extern struct perf_event * __percpu * |
91 | register_wide_hw_breakpoint(struct perf_event_attr *attr, | 93 | register_wide_hw_breakpoint(struct perf_event_attr *attr, |
92 | perf_overflow_handler_t triggered); | 94 | perf_overflow_handler_t triggered, |
95 | void *context); | ||
93 | 96 | ||
94 | extern int register_perf_hw_breakpoint(struct perf_event *bp); | 97 | extern int register_perf_hw_breakpoint(struct perf_event *bp); |
95 | extern int __register_perf_hw_breakpoint(struct perf_event *bp); | 98 | extern int __register_perf_hw_breakpoint(struct perf_event *bp); |
@@ -115,6 +118,7 @@ static inline int __init init_hw_breakpoint(void) { return 0; } | |||
115 | static inline struct perf_event * | 118 | static inline struct perf_event * |
116 | register_user_hw_breakpoint(struct perf_event_attr *attr, | 119 | register_user_hw_breakpoint(struct perf_event_attr *attr, |
117 | perf_overflow_handler_t triggered, | 120 | perf_overflow_handler_t triggered, |
121 | void *context, | ||
118 | struct task_struct *tsk) { return NULL; } | 122 | struct task_struct *tsk) { return NULL; } |
119 | static inline int | 123 | static inline int |
120 | modify_user_hw_breakpoint(struct perf_event *bp, | 124 | modify_user_hw_breakpoint(struct perf_event *bp, |
@@ -122,10 +126,12 @@ modify_user_hw_breakpoint(struct perf_event *bp, | |||
122 | static inline struct perf_event * | 126 | static inline struct perf_event * |
123 | register_wide_hw_breakpoint_cpu(struct perf_event_attr *attr, | 127 | register_wide_hw_breakpoint_cpu(struct perf_event_attr *attr, |
124 | perf_overflow_handler_t triggered, | 128 | perf_overflow_handler_t triggered, |
129 | void *context, | ||
125 | int cpu) { return NULL; } | 130 | int cpu) { return NULL; } |
126 | static inline struct perf_event * __percpu * | 131 | static inline struct perf_event * __percpu * |
127 | register_wide_hw_breakpoint(struct perf_event_attr *attr, | 132 | register_wide_hw_breakpoint(struct perf_event_attr *attr, |
128 | perf_overflow_handler_t triggered) { return NULL; } | 133 | perf_overflow_handler_t triggered, |
134 | void *context) { return NULL; } | ||
129 | static inline int | 135 | static inline int |
130 | register_perf_hw_breakpoint(struct perf_event *bp) { return -ENOSYS; } | 136 | register_perf_hw_breakpoint(struct perf_event *bp) { return -ENOSYS; } |
131 | static inline int | 137 | static inline int |
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index c928dac6cad..9f7c3ebcbba 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
@@ -647,6 +647,13 @@ typedef struct pglist_data { | |||
647 | #endif | 647 | #endif |
648 | #define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr)) | 648 | #define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr)) |
649 | 649 | ||
650 | #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) | ||
651 | |||
652 | #define node_end_pfn(nid) ({\ | ||
653 | pg_data_t *__pgdat = NODE_DATA(nid);\ | ||
654 | __pgdat->node_start_pfn + __pgdat->node_spanned_pages;\ | ||
655 | }) | ||
656 | |||
650 | #include <linux/memory_hotplug.h> | 657 | #include <linux/memory_hotplug.h> |
651 | 658 | ||
652 | extern struct mutex zonelists_mutex; | 659 | extern struct mutex zonelists_mutex; |
diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h index 3a34e80ae92..25311b3bedf 100644 --- a/include/linux/nfs_page.h +++ b/include/linux/nfs_page.h | |||
@@ -92,6 +92,9 @@ extern int nfs_pageio_add_request(struct nfs_pageio_descriptor *, | |||
92 | struct nfs_page *); | 92 | struct nfs_page *); |
93 | extern void nfs_pageio_complete(struct nfs_pageio_descriptor *desc); | 93 | extern void nfs_pageio_complete(struct nfs_pageio_descriptor *desc); |
94 | extern void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *, pgoff_t); | 94 | extern void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *, pgoff_t); |
95 | extern bool nfs_generic_pg_test(struct nfs_pageio_descriptor *desc, | ||
96 | struct nfs_page *prev, | ||
97 | struct nfs_page *req); | ||
95 | extern int nfs_wait_on_request(struct nfs_page *); | 98 | extern int nfs_wait_on_request(struct nfs_page *); |
96 | extern void nfs_unlock_request(struct nfs_page *req); | 99 | extern void nfs_unlock_request(struct nfs_page *req); |
97 | extern int nfs_set_page_tag_locked(struct nfs_page *req); | 100 | extern int nfs_set_page_tag_locked(struct nfs_page *req); |
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 5e8444a11ad..00848d86ffb 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h | |||
@@ -158,7 +158,6 @@ struct nfs_seqid; | |||
158 | 158 | ||
159 | /* nfs41 sessions channel attributes */ | 159 | /* nfs41 sessions channel attributes */ |
160 | struct nfs4_channel_attrs { | 160 | struct nfs4_channel_attrs { |
161 | u32 headerpadsz; | ||
162 | u32 max_rqst_sz; | 161 | u32 max_rqst_sz; |
163 | u32 max_resp_sz; | 162 | u32 max_resp_sz; |
164 | u32 max_resp_sz_cached; | 163 | u32 max_resp_sz_cached; |
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index a311008af5e..f8910e15556 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h | |||
@@ -1537,6 +1537,7 @@ | |||
1537 | #define PCI_DEVICE_ID_RICOH_RL5C476 0x0476 | 1537 | #define PCI_DEVICE_ID_RICOH_RL5C476 0x0476 |
1538 | #define PCI_DEVICE_ID_RICOH_RL5C478 0x0478 | 1538 | #define PCI_DEVICE_ID_RICOH_RL5C478 0x0478 |
1539 | #define PCI_DEVICE_ID_RICOH_R5C822 0x0822 | 1539 | #define PCI_DEVICE_ID_RICOH_R5C822 0x0822 |
1540 | #define PCI_DEVICE_ID_RICOH_R5CE823 0xe823 | ||
1540 | #define PCI_DEVICE_ID_RICOH_R5C832 0x0832 | 1541 | #define PCI_DEVICE_ID_RICOH_R5C832 0x0832 |
1541 | #define PCI_DEVICE_ID_RICOH_R5C843 0x0843 | 1542 | #define PCI_DEVICE_ID_RICOH_R5C843 0x0843 |
1542 | 1543 | ||
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index e0786e35f24..3f2711ccf91 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h | |||
@@ -61,7 +61,7 @@ enum perf_hw_id { | |||
61 | /* | 61 | /* |
62 | * Generalized hardware cache events: | 62 | * Generalized hardware cache events: |
63 | * | 63 | * |
64 | * { L1-D, L1-I, LLC, ITLB, DTLB, BPU } x | 64 | * { L1-D, L1-I, LLC, ITLB, DTLB, BPU, NODE } x |
65 | * { read, write, prefetch } x | 65 | * { read, write, prefetch } x |
66 | * { accesses, misses } | 66 | * { accesses, misses } |
67 | */ | 67 | */ |
@@ -72,6 +72,7 @@ enum perf_hw_cache_id { | |||
72 | PERF_COUNT_HW_CACHE_DTLB = 3, | 72 | PERF_COUNT_HW_CACHE_DTLB = 3, |
73 | PERF_COUNT_HW_CACHE_ITLB = 4, | 73 | PERF_COUNT_HW_CACHE_ITLB = 4, |
74 | PERF_COUNT_HW_CACHE_BPU = 5, | 74 | PERF_COUNT_HW_CACHE_BPU = 5, |
75 | PERF_COUNT_HW_CACHE_NODE = 6, | ||
75 | 76 | ||
76 | PERF_COUNT_HW_CACHE_MAX, /* non-ABI */ | 77 | PERF_COUNT_HW_CACHE_MAX, /* non-ABI */ |
77 | }; | 78 | }; |
@@ -536,6 +537,16 @@ struct perf_branch_stack { | |||
536 | 537 | ||
537 | struct task_struct; | 538 | struct task_struct; |
538 | 539 | ||
540 | /* | ||
541 | * extra PMU register associated with an event | ||
542 | */ | ||
543 | struct hw_perf_event_extra { | ||
544 | u64 config; /* register value */ | ||
545 | unsigned int reg; /* register address or index */ | ||
546 | int alloc; /* extra register already allocated */ | ||
547 | int idx; /* index in shared_regs->regs[] */ | ||
548 | }; | ||
549 | |||
539 | /** | 550 | /** |
540 | * struct hw_perf_event - performance event hardware details: | 551 | * struct hw_perf_event - performance event hardware details: |
541 | */ | 552 | */ |
@@ -549,9 +560,7 @@ struct hw_perf_event { | |||
549 | unsigned long event_base; | 560 | unsigned long event_base; |
550 | int idx; | 561 | int idx; |
551 | int last_cpu; | 562 | int last_cpu; |
552 | unsigned int extra_reg; | 563 | struct hw_perf_event_extra extra_reg; |
553 | u64 extra_config; | ||
554 | int extra_alloc; | ||
555 | }; | 564 | }; |
556 | struct { /* software */ | 565 | struct { /* software */ |
557 | struct hrtimer hrtimer; | 566 | struct hrtimer hrtimer; |
@@ -680,36 +689,9 @@ enum perf_event_active_state { | |||
680 | }; | 689 | }; |
681 | 690 | ||
682 | struct file; | 691 | struct file; |
683 | |||
684 | #define PERF_BUFFER_WRITABLE 0x01 | ||
685 | |||
686 | struct perf_buffer { | ||
687 | atomic_t refcount; | ||
688 | struct rcu_head rcu_head; | ||
689 | #ifdef CONFIG_PERF_USE_VMALLOC | ||
690 | struct work_struct work; | ||
691 | int page_order; /* allocation order */ | ||
692 | #endif | ||
693 | int nr_pages; /* nr of data pages */ | ||
694 | int writable; /* are we writable */ | ||
695 | |||
696 | atomic_t poll; /* POLL_ for wakeups */ | ||
697 | |||
698 | local_t head; /* write position */ | ||
699 | local_t nest; /* nested writers */ | ||
700 | local_t events; /* event limit */ | ||
701 | local_t wakeup; /* wakeup stamp */ | ||
702 | local_t lost; /* nr records lost */ | ||
703 | |||
704 | long watermark; /* wakeup watermark */ | ||
705 | |||
706 | struct perf_event_mmap_page *user_page; | ||
707 | void *data_pages[0]; | ||
708 | }; | ||
709 | |||
710 | struct perf_sample_data; | 692 | struct perf_sample_data; |
711 | 693 | ||
712 | typedef void (*perf_overflow_handler_t)(struct perf_event *, int, | 694 | typedef void (*perf_overflow_handler_t)(struct perf_event *, |
713 | struct perf_sample_data *, | 695 | struct perf_sample_data *, |
714 | struct pt_regs *regs); | 696 | struct pt_regs *regs); |
715 | 697 | ||
@@ -745,6 +727,8 @@ struct perf_cgroup { | |||
745 | }; | 727 | }; |
746 | #endif | 728 | #endif |
747 | 729 | ||
730 | struct ring_buffer; | ||
731 | |||
748 | /** | 732 | /** |
749 | * struct perf_event - performance event kernel representation: | 733 | * struct perf_event - performance event kernel representation: |
750 | */ | 734 | */ |
@@ -834,7 +818,7 @@ struct perf_event { | |||
834 | atomic_t mmap_count; | 818 | atomic_t mmap_count; |
835 | int mmap_locked; | 819 | int mmap_locked; |
836 | struct user_struct *mmap_user; | 820 | struct user_struct *mmap_user; |
837 | struct perf_buffer *buffer; | 821 | struct ring_buffer *rb; |
838 | 822 | ||
839 | /* poll related */ | 823 | /* poll related */ |
840 | wait_queue_head_t waitq; | 824 | wait_queue_head_t waitq; |
@@ -855,6 +839,7 @@ struct perf_event { | |||
855 | u64 id; | 839 | u64 id; |
856 | 840 | ||
857 | perf_overflow_handler_t overflow_handler; | 841 | perf_overflow_handler_t overflow_handler; |
842 | void *overflow_handler_context; | ||
858 | 843 | ||
859 | #ifdef CONFIG_EVENT_TRACING | 844 | #ifdef CONFIG_EVENT_TRACING |
860 | struct ftrace_event_call *tp_event; | 845 | struct ftrace_event_call *tp_event; |
@@ -919,8 +904,8 @@ struct perf_event_context { | |||
919 | u64 parent_gen; | 904 | u64 parent_gen; |
920 | u64 generation; | 905 | u64 generation; |
921 | int pin_count; | 906 | int pin_count; |
922 | struct rcu_head rcu_head; | ||
923 | int nr_cgroups; /* cgroup events present */ | 907 | int nr_cgroups; /* cgroup events present */ |
908 | struct rcu_head rcu_head; | ||
924 | }; | 909 | }; |
925 | 910 | ||
926 | /* | 911 | /* |
@@ -945,13 +930,11 @@ struct perf_cpu_context { | |||
945 | 930 | ||
946 | struct perf_output_handle { | 931 | struct perf_output_handle { |
947 | struct perf_event *event; | 932 | struct perf_event *event; |
948 | struct perf_buffer *buffer; | 933 | struct ring_buffer *rb; |
949 | unsigned long wakeup; | 934 | unsigned long wakeup; |
950 | unsigned long size; | 935 | unsigned long size; |
951 | void *addr; | 936 | void *addr; |
952 | int page; | 937 | int page; |
953 | int nmi; | ||
954 | int sample; | ||
955 | }; | 938 | }; |
956 | 939 | ||
957 | #ifdef CONFIG_PERF_EVENTS | 940 | #ifdef CONFIG_PERF_EVENTS |
@@ -972,13 +955,15 @@ extern void perf_pmu_disable(struct pmu *pmu); | |||
972 | extern void perf_pmu_enable(struct pmu *pmu); | 955 | extern void perf_pmu_enable(struct pmu *pmu); |
973 | extern int perf_event_task_disable(void); | 956 | extern int perf_event_task_disable(void); |
974 | extern int perf_event_task_enable(void); | 957 | extern int perf_event_task_enable(void); |
958 | extern int perf_event_refresh(struct perf_event *event, int refresh); | ||
975 | extern void perf_event_update_userpage(struct perf_event *event); | 959 | extern void perf_event_update_userpage(struct perf_event *event); |
976 | extern int perf_event_release_kernel(struct perf_event *event); | 960 | extern int perf_event_release_kernel(struct perf_event *event); |
977 | extern struct perf_event * | 961 | extern struct perf_event * |
978 | perf_event_create_kernel_counter(struct perf_event_attr *attr, | 962 | perf_event_create_kernel_counter(struct perf_event_attr *attr, |
979 | int cpu, | 963 | int cpu, |
980 | struct task_struct *task, | 964 | struct task_struct *task, |
981 | perf_overflow_handler_t callback); | 965 | perf_overflow_handler_t callback, |
966 | void *context); | ||
982 | extern u64 perf_event_read_value(struct perf_event *event, | 967 | extern u64 perf_event_read_value(struct perf_event *event, |
983 | u64 *enabled, u64 *running); | 968 | u64 *enabled, u64 *running); |
984 | 969 | ||
@@ -1018,7 +1003,7 @@ extern void perf_prepare_sample(struct perf_event_header *header, | |||
1018 | struct perf_event *event, | 1003 | struct perf_event *event, |
1019 | struct pt_regs *regs); | 1004 | struct pt_regs *regs); |
1020 | 1005 | ||
1021 | extern int perf_event_overflow(struct perf_event *event, int nmi, | 1006 | extern int perf_event_overflow(struct perf_event *event, |
1022 | struct perf_sample_data *data, | 1007 | struct perf_sample_data *data, |
1023 | struct pt_regs *regs); | 1008 | struct pt_regs *regs); |
1024 | 1009 | ||
@@ -1037,7 +1022,7 @@ static inline int is_software_event(struct perf_event *event) | |||
1037 | 1022 | ||
1038 | extern struct jump_label_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; | 1023 | extern struct jump_label_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; |
1039 | 1024 | ||
1040 | extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64); | 1025 | extern void __perf_sw_event(u32, u64, struct pt_regs *, u64); |
1041 | 1026 | ||
1042 | #ifndef perf_arch_fetch_caller_regs | 1027 | #ifndef perf_arch_fetch_caller_regs |
1043 | static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { } | 1028 | static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { } |
@@ -1059,7 +1044,7 @@ static inline void perf_fetch_caller_regs(struct pt_regs *regs) | |||
1059 | } | 1044 | } |
1060 | 1045 | ||
1061 | static __always_inline void | 1046 | static __always_inline void |
1062 | perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr) | 1047 | perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) |
1063 | { | 1048 | { |
1064 | struct pt_regs hot_regs; | 1049 | struct pt_regs hot_regs; |
1065 | 1050 | ||
@@ -1068,7 +1053,7 @@ perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr) | |||
1068 | perf_fetch_caller_regs(&hot_regs); | 1053 | perf_fetch_caller_regs(&hot_regs); |
1069 | regs = &hot_regs; | 1054 | regs = &hot_regs; |
1070 | } | 1055 | } |
1071 | __perf_sw_event(event_id, nr, nmi, regs, addr); | 1056 | __perf_sw_event(event_id, nr, regs, addr); |
1072 | } | 1057 | } |
1073 | } | 1058 | } |
1074 | 1059 | ||
@@ -1082,7 +1067,7 @@ static inline void perf_event_task_sched_in(struct task_struct *task) | |||
1082 | 1067 | ||
1083 | static inline void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next) | 1068 | static inline void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next) |
1084 | { | 1069 | { |
1085 | perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0); | 1070 | perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0); |
1086 | 1071 | ||
1087 | __perf_event_task_sched_out(task, next); | 1072 | __perf_event_task_sched_out(task, next); |
1088 | } | 1073 | } |
@@ -1143,8 +1128,7 @@ extern void perf_bp_event(struct perf_event *event, void *data); | |||
1143 | #endif | 1128 | #endif |
1144 | 1129 | ||
1145 | extern int perf_output_begin(struct perf_output_handle *handle, | 1130 | extern int perf_output_begin(struct perf_output_handle *handle, |
1146 | struct perf_event *event, unsigned int size, | 1131 | struct perf_event *event, unsigned int size); |
1147 | int nmi, int sample); | ||
1148 | extern void perf_output_end(struct perf_output_handle *handle); | 1132 | extern void perf_output_end(struct perf_output_handle *handle); |
1149 | extern void perf_output_copy(struct perf_output_handle *handle, | 1133 | extern void perf_output_copy(struct perf_output_handle *handle, |
1150 | const void *buf, unsigned int len); | 1134 | const void *buf, unsigned int len); |
@@ -1166,10 +1150,13 @@ static inline void perf_event_delayed_put(struct task_struct *task) { } | |||
1166 | static inline void perf_event_print_debug(void) { } | 1150 | static inline void perf_event_print_debug(void) { } |
1167 | static inline int perf_event_task_disable(void) { return -EINVAL; } | 1151 | static inline int perf_event_task_disable(void) { return -EINVAL; } |
1168 | static inline int perf_event_task_enable(void) { return -EINVAL; } | 1152 | static inline int perf_event_task_enable(void) { return -EINVAL; } |
1153 | static inline int perf_event_refresh(struct perf_event *event, int refresh) | ||
1154 | { | ||
1155 | return -EINVAL; | ||
1156 | } | ||
1169 | 1157 | ||
1170 | static inline void | 1158 | static inline void |
1171 | perf_sw_event(u32 event_id, u64 nr, int nmi, | 1159 | perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) { } |
1172 | struct pt_regs *regs, u64 addr) { } | ||
1173 | static inline void | 1160 | static inline void |
1174 | perf_bp_event(struct perf_event *event, void *data) { } | 1161 | perf_bp_event(struct perf_event *event, void *data) { } |
1175 | 1162 | ||
diff --git a/include/linux/pm.h b/include/linux/pm.h index 3160648ccdd..411e4f4be52 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h | |||
@@ -425,7 +425,8 @@ struct dev_pm_info { | |||
425 | pm_message_t power_state; | 425 | pm_message_t power_state; |
426 | unsigned int can_wakeup:1; | 426 | unsigned int can_wakeup:1; |
427 | unsigned int async_suspend:1; | 427 | unsigned int async_suspend:1; |
428 | unsigned int in_suspend:1; /* Owned by the PM core */ | 428 | bool is_prepared:1; /* Owned by the PM core */ |
429 | bool is_suspended:1; /* Ditto */ | ||
429 | spinlock_t lock; | 430 | spinlock_t lock; |
430 | #ifdef CONFIG_PM_SLEEP | 431 | #ifdef CONFIG_PM_SLEEP |
431 | struct list_head entry; | 432 | struct list_head entry; |
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index ab38ac80b0f..b891de96000 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h | |||
@@ -169,7 +169,7 @@ void ring_buffer_set_clock(struct ring_buffer *buffer, | |||
169 | size_t ring_buffer_page_len(void *page); | 169 | size_t ring_buffer_page_len(void *page); |
170 | 170 | ||
171 | 171 | ||
172 | void *ring_buffer_alloc_read_page(struct ring_buffer *buffer); | 172 | void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu); |
173 | void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data); | 173 | void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data); |
174 | int ring_buffer_read_page(struct ring_buffer *buffer, void **data_page, | 174 | int ring_buffer_read_page(struct ring_buffer *buffer, void **data_page, |
175 | size_t len, int cpu, int full); | 175 | size_t len, int cpu, int full); |
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index 2b7fec84051..aa08fa8fd79 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h | |||
@@ -3,6 +3,7 @@ | |||
3 | 3 | ||
4 | #include <linux/swap.h> | 4 | #include <linux/swap.h> |
5 | #include <linux/mempolicy.h> | 5 | #include <linux/mempolicy.h> |
6 | #include <linux/pagemap.h> | ||
6 | #include <linux/percpu_counter.h> | 7 | #include <linux/percpu_counter.h> |
7 | 8 | ||
8 | /* inode in-kernel data */ | 9 | /* inode in-kernel data */ |
@@ -45,7 +46,27 @@ static inline struct shmem_inode_info *SHMEM_I(struct inode *inode) | |||
45 | return container_of(inode, struct shmem_inode_info, vfs_inode); | 46 | return container_of(inode, struct shmem_inode_info, vfs_inode); |
46 | } | 47 | } |
47 | 48 | ||
49 | /* | ||
50 | * Functions in mm/shmem.c called directly from elsewhere: | ||
51 | */ | ||
48 | extern int init_tmpfs(void); | 52 | extern int init_tmpfs(void); |
49 | extern int shmem_fill_super(struct super_block *sb, void *data, int silent); | 53 | extern int shmem_fill_super(struct super_block *sb, void *data, int silent); |
54 | extern struct file *shmem_file_setup(const char *name, | ||
55 | loff_t size, unsigned long flags); | ||
56 | extern int shmem_zero_setup(struct vm_area_struct *); | ||
57 | extern int shmem_lock(struct file *file, int lock, struct user_struct *user); | ||
58 | extern struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, | ||
59 | pgoff_t index, gfp_t gfp_mask); | ||
60 | extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end); | ||
61 | extern int shmem_unuse(swp_entry_t entry, struct page *page); | ||
62 | extern void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t pgoff, | ||
63 | struct page **pagep, swp_entry_t *ent); | ||
64 | |||
65 | static inline struct page *shmem_read_mapping_page( | ||
66 | struct address_space *mapping, pgoff_t index) | ||
67 | { | ||
68 | return shmem_read_mapping_page_gfp(mapping, index, | ||
69 | mapping_gfp_mask(mapping)); | ||
70 | } | ||
50 | 71 | ||
51 | #endif | 72 | #endif |
diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h index 25310f1d7f3..115b570e3bf 100644 --- a/include/linux/stacktrace.h +++ b/include/linux/stacktrace.h | |||
@@ -14,8 +14,8 @@ struct stack_trace { | |||
14 | }; | 14 | }; |
15 | 15 | ||
16 | extern void save_stack_trace(struct stack_trace *trace); | 16 | extern void save_stack_trace(struct stack_trace *trace); |
17 | extern void save_stack_trace_regs(struct stack_trace *trace, | 17 | extern void save_stack_trace_regs(struct pt_regs *regs, |
18 | struct pt_regs *regs); | 18 | struct stack_trace *trace); |
19 | extern void save_stack_trace_tsk(struct task_struct *tsk, | 19 | extern void save_stack_trace_tsk(struct task_struct *tsk, |
20 | struct stack_trace *trace); | 20 | struct stack_trace *trace); |
21 | 21 | ||
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h index f73c482ec9c..fe2d8e6b923 100644 --- a/include/linux/sunrpc/sched.h +++ b/include/linux/sunrpc/sched.h | |||
@@ -84,7 +84,8 @@ struct rpc_task { | |||
84 | #endif | 84 | #endif |
85 | unsigned char tk_priority : 2,/* Task priority */ | 85 | unsigned char tk_priority : 2,/* Task priority */ |
86 | tk_garb_retry : 2, | 86 | tk_garb_retry : 2, |
87 | tk_cred_retry : 2; | 87 | tk_cred_retry : 2, |
88 | tk_rebind_retry : 2; | ||
88 | }; | 89 | }; |
89 | #define tk_xprt tk_client->cl_xprt | 90 | #define tk_xprt tk_client->cl_xprt |
90 | 91 | ||
diff --git a/include/linux/swap.h b/include/linux/swap.h index e7056464703..a273468f828 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h | |||
@@ -300,16 +300,6 @@ static inline void scan_unevictable_unregister_node(struct node *node) | |||
300 | extern int kswapd_run(int nid); | 300 | extern int kswapd_run(int nid); |
301 | extern void kswapd_stop(int nid); | 301 | extern void kswapd_stop(int nid); |
302 | 302 | ||
303 | #ifdef CONFIG_MMU | ||
304 | /* linux/mm/shmem.c */ | ||
305 | extern int shmem_unuse(swp_entry_t entry, struct page *page); | ||
306 | #endif /* CONFIG_MMU */ | ||
307 | |||
308 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR | ||
309 | extern void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t pgoff, | ||
310 | struct page **pagep, swp_entry_t *ent); | ||
311 | #endif | ||
312 | |||
313 | #ifdef CONFIG_SWAP | 303 | #ifdef CONFIG_SWAP |
314 | /* linux/mm/page_io.c */ | 304 | /* linux/mm/page_io.c */ |
315 | extern int swap_readpage(struct page *); | 305 | extern int swap_readpage(struct page *); |
diff --git a/include/net/sock.h b/include/net/sock.h index f2046e404a6..c0b938cb4b1 100644 --- a/include/net/sock.h +++ b/include/net/sock.h | |||
@@ -178,7 +178,6 @@ struct sock_common { | |||
178 | * @sk_dst_cache: destination cache | 178 | * @sk_dst_cache: destination cache |
179 | * @sk_dst_lock: destination cache lock | 179 | * @sk_dst_lock: destination cache lock |
180 | * @sk_policy: flow policy | 180 | * @sk_policy: flow policy |
181 | * @sk_rmem_alloc: receive queue bytes committed | ||
182 | * @sk_receive_queue: incoming packets | 181 | * @sk_receive_queue: incoming packets |
183 | * @sk_wmem_alloc: transmit queue bytes committed | 182 | * @sk_wmem_alloc: transmit queue bytes committed |
184 | * @sk_write_queue: Packet sending queue | 183 | * @sk_write_queue: Packet sending queue |
diff --git a/include/sound/soc.h b/include/sound/soc.h index f1de3e0c75b..3a4bd3a3c68 100644 --- a/include/sound/soc.h +++ b/include/sound/soc.h | |||
@@ -248,8 +248,7 @@ typedef int (*hw_write_t)(void *,const char* ,int); | |||
248 | extern struct snd_ac97_bus_ops soc_ac97_ops; | 248 | extern struct snd_ac97_bus_ops soc_ac97_ops; |
249 | 249 | ||
250 | enum snd_soc_control_type { | 250 | enum snd_soc_control_type { |
251 | SND_SOC_CUSTOM = 1, | 251 | SND_SOC_I2C = 1, |
252 | SND_SOC_I2C, | ||
253 | SND_SOC_SPI, | 252 | SND_SOC_SPI, |
254 | }; | 253 | }; |
255 | 254 | ||
diff --git a/init/calibrate.c b/init/calibrate.c index 2568d22a304..aae2f40fea4 100644 --- a/init/calibrate.c +++ b/init/calibrate.c | |||
@@ -245,30 +245,32 @@ recalibrate: | |||
245 | 245 | ||
246 | void __cpuinit calibrate_delay(void) | 246 | void __cpuinit calibrate_delay(void) |
247 | { | 247 | { |
248 | unsigned long lpj; | ||
248 | static bool printed; | 249 | static bool printed; |
249 | 250 | ||
250 | if (preset_lpj) { | 251 | if (preset_lpj) { |
251 | loops_per_jiffy = preset_lpj; | 252 | lpj = preset_lpj; |
252 | if (!printed) | 253 | if (!printed) |
253 | pr_info("Calibrating delay loop (skipped) " | 254 | pr_info("Calibrating delay loop (skipped) " |
254 | "preset value.. "); | 255 | "preset value.. "); |
255 | } else if ((!printed) && lpj_fine) { | 256 | } else if ((!printed) && lpj_fine) { |
256 | loops_per_jiffy = lpj_fine; | 257 | lpj = lpj_fine; |
257 | pr_info("Calibrating delay loop (skipped), " | 258 | pr_info("Calibrating delay loop (skipped), " |
258 | "value calculated using timer frequency.. "); | 259 | "value calculated using timer frequency.. "); |
259 | } else if ((loops_per_jiffy = calibrate_delay_direct()) != 0) { | 260 | } else if ((lpj = calibrate_delay_direct()) != 0) { |
260 | if (!printed) | 261 | if (!printed) |
261 | pr_info("Calibrating delay using timer " | 262 | pr_info("Calibrating delay using timer " |
262 | "specific routine.. "); | 263 | "specific routine.. "); |
263 | } else { | 264 | } else { |
264 | if (!printed) | 265 | if (!printed) |
265 | pr_info("Calibrating delay loop... "); | 266 | pr_info("Calibrating delay loop... "); |
266 | loops_per_jiffy = calibrate_delay_converge(); | 267 | lpj = calibrate_delay_converge(); |
267 | } | 268 | } |
268 | if (!printed) | 269 | if (!printed) |
269 | pr_cont("%lu.%02lu BogoMIPS (lpj=%lu)\n", | 270 | pr_cont("%lu.%02lu BogoMIPS (lpj=%lu)\n", |
270 | loops_per_jiffy/(500000/HZ), | 271 | lpj/(500000/HZ), |
271 | (loops_per_jiffy/(5000/HZ)) % 100, loops_per_jiffy); | 272 | (lpj/(5000/HZ)) % 100, lpj); |
272 | 273 | ||
274 | loops_per_jiffy = lpj; | ||
273 | printed = true; | 275 | printed = true; |
274 | } | 276 | } |
diff --git a/kernel/async.c b/kernel/async.c index cd9dbb913c7..d5fe7af0de2 100644 --- a/kernel/async.c +++ b/kernel/async.c | |||
@@ -49,12 +49,13 @@ asynchronous and synchronous parts of the kernel. | |||
49 | */ | 49 | */ |
50 | 50 | ||
51 | #include <linux/async.h> | 51 | #include <linux/async.h> |
52 | #include <linux/atomic.h> | ||
53 | #include <linux/ktime.h> | ||
52 | #include <linux/module.h> | 54 | #include <linux/module.h> |
53 | #include <linux/wait.h> | 55 | #include <linux/wait.h> |
54 | #include <linux/sched.h> | 56 | #include <linux/sched.h> |
55 | #include <linux/slab.h> | 57 | #include <linux/slab.h> |
56 | #include <linux/workqueue.h> | 58 | #include <linux/workqueue.h> |
57 | #include <asm/atomic.h> | ||
58 | 59 | ||
59 | static async_cookie_t next_cookie = 1; | 60 | static async_cookie_t next_cookie = 1; |
60 | 61 | ||
@@ -128,7 +129,8 @@ static void async_run_entry_fn(struct work_struct *work) | |||
128 | 129 | ||
129 | /* 2) run (and print duration) */ | 130 | /* 2) run (and print duration) */ |
130 | if (initcall_debug && system_state == SYSTEM_BOOTING) { | 131 | if (initcall_debug && system_state == SYSTEM_BOOTING) { |
131 | printk("calling %lli_%pF @ %i\n", (long long)entry->cookie, | 132 | printk(KERN_DEBUG "calling %lli_%pF @ %i\n", |
133 | (long long)entry->cookie, | ||
132 | entry->func, task_pid_nr(current)); | 134 | entry->func, task_pid_nr(current)); |
133 | calltime = ktime_get(); | 135 | calltime = ktime_get(); |
134 | } | 136 | } |
@@ -136,7 +138,7 @@ static void async_run_entry_fn(struct work_struct *work) | |||
136 | if (initcall_debug && system_state == SYSTEM_BOOTING) { | 138 | if (initcall_debug && system_state == SYSTEM_BOOTING) { |
137 | rettime = ktime_get(); | 139 | rettime = ktime_get(); |
138 | delta = ktime_sub(rettime, calltime); | 140 | delta = ktime_sub(rettime, calltime); |
139 | printk("initcall %lli_%pF returned 0 after %lld usecs\n", | 141 | printk(KERN_DEBUG "initcall %lli_%pF returned 0 after %lld usecs\n", |
140 | (long long)entry->cookie, | 142 | (long long)entry->cookie, |
141 | entry->func, | 143 | entry->func, |
142 | (long long)ktime_to_ns(delta) >> 10); | 144 | (long long)ktime_to_ns(delta) >> 10); |
@@ -270,7 +272,7 @@ void async_synchronize_cookie_domain(async_cookie_t cookie, | |||
270 | ktime_t starttime, delta, endtime; | 272 | ktime_t starttime, delta, endtime; |
271 | 273 | ||
272 | if (initcall_debug && system_state == SYSTEM_BOOTING) { | 274 | if (initcall_debug && system_state == SYSTEM_BOOTING) { |
273 | printk("async_waiting @ %i\n", task_pid_nr(current)); | 275 | printk(KERN_DEBUG "async_waiting @ %i\n", task_pid_nr(current)); |
274 | starttime = ktime_get(); | 276 | starttime = ktime_get(); |
275 | } | 277 | } |
276 | 278 | ||
@@ -280,7 +282,7 @@ void async_synchronize_cookie_domain(async_cookie_t cookie, | |||
280 | endtime = ktime_get(); | 282 | endtime = ktime_get(); |
281 | delta = ktime_sub(endtime, starttime); | 283 | delta = ktime_sub(endtime, starttime); |
282 | 284 | ||
283 | printk("async_continuing @ %i after %lli usec\n", | 285 | printk(KERN_DEBUG "async_continuing @ %i after %lli usec\n", |
284 | task_pid_nr(current), | 286 | task_pid_nr(current), |
285 | (long long)ktime_to_ns(delta) >> 10); | 287 | (long long)ktime_to_ns(delta) >> 10); |
286 | } | 288 | } |
diff --git a/kernel/events/Makefile b/kernel/events/Makefile index 1ce23d3d839..89e5e8aa4c3 100644 --- a/kernel/events/Makefile +++ b/kernel/events/Makefile | |||
@@ -2,5 +2,5 @@ ifdef CONFIG_FUNCTION_TRACER | |||
2 | CFLAGS_REMOVE_core.o = -pg | 2 | CFLAGS_REMOVE_core.o = -pg |
3 | endif | 3 | endif |
4 | 4 | ||
5 | obj-y := core.o | 5 | obj-y := core.o ring_buffer.o |
6 | obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o | 6 | obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o |
diff --git a/kernel/events/core.c b/kernel/events/core.c index 9efe7108cca..0567e32d71a 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -36,6 +36,8 @@ | |||
36 | #include <linux/ftrace_event.h> | 36 | #include <linux/ftrace_event.h> |
37 | #include <linux/hw_breakpoint.h> | 37 | #include <linux/hw_breakpoint.h> |
38 | 38 | ||
39 | #include "internal.h" | ||
40 | |||
39 | #include <asm/irq_regs.h> | 41 | #include <asm/irq_regs.h> |
40 | 42 | ||
41 | struct remote_function_call { | 43 | struct remote_function_call { |
@@ -200,6 +202,22 @@ __get_cpu_context(struct perf_event_context *ctx) | |||
200 | return this_cpu_ptr(ctx->pmu->pmu_cpu_context); | 202 | return this_cpu_ptr(ctx->pmu->pmu_cpu_context); |
201 | } | 203 | } |
202 | 204 | ||
205 | static void perf_ctx_lock(struct perf_cpu_context *cpuctx, | ||
206 | struct perf_event_context *ctx) | ||
207 | { | ||
208 | raw_spin_lock(&cpuctx->ctx.lock); | ||
209 | if (ctx) | ||
210 | raw_spin_lock(&ctx->lock); | ||
211 | } | ||
212 | |||
213 | static void perf_ctx_unlock(struct perf_cpu_context *cpuctx, | ||
214 | struct perf_event_context *ctx) | ||
215 | { | ||
216 | if (ctx) | ||
217 | raw_spin_unlock(&ctx->lock); | ||
218 | raw_spin_unlock(&cpuctx->ctx.lock); | ||
219 | } | ||
220 | |||
203 | #ifdef CONFIG_CGROUP_PERF | 221 | #ifdef CONFIG_CGROUP_PERF |
204 | 222 | ||
205 | /* | 223 | /* |
@@ -340,11 +358,8 @@ void perf_cgroup_switch(struct task_struct *task, int mode) | |||
340 | rcu_read_lock(); | 358 | rcu_read_lock(); |
341 | 359 | ||
342 | list_for_each_entry_rcu(pmu, &pmus, entry) { | 360 | list_for_each_entry_rcu(pmu, &pmus, entry) { |
343 | |||
344 | cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); | 361 | cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); |
345 | 362 | ||
346 | perf_pmu_disable(cpuctx->ctx.pmu); | ||
347 | |||
348 | /* | 363 | /* |
349 | * perf_cgroup_events says at least one | 364 | * perf_cgroup_events says at least one |
350 | * context on this CPU has cgroup events. | 365 | * context on this CPU has cgroup events. |
@@ -353,6 +368,8 @@ void perf_cgroup_switch(struct task_struct *task, int mode) | |||
353 | * events for a context. | 368 | * events for a context. |
354 | */ | 369 | */ |
355 | if (cpuctx->ctx.nr_cgroups > 0) { | 370 | if (cpuctx->ctx.nr_cgroups > 0) { |
371 | perf_ctx_lock(cpuctx, cpuctx->task_ctx); | ||
372 | perf_pmu_disable(cpuctx->ctx.pmu); | ||
356 | 373 | ||
357 | if (mode & PERF_CGROUP_SWOUT) { | 374 | if (mode & PERF_CGROUP_SWOUT) { |
358 | cpu_ctx_sched_out(cpuctx, EVENT_ALL); | 375 | cpu_ctx_sched_out(cpuctx, EVENT_ALL); |
@@ -372,9 +389,9 @@ void perf_cgroup_switch(struct task_struct *task, int mode) | |||
372 | cpuctx->cgrp = perf_cgroup_from_task(task); | 389 | cpuctx->cgrp = perf_cgroup_from_task(task); |
373 | cpu_ctx_sched_in(cpuctx, EVENT_ALL, task); | 390 | cpu_ctx_sched_in(cpuctx, EVENT_ALL, task); |
374 | } | 391 | } |
392 | perf_pmu_enable(cpuctx->ctx.pmu); | ||
393 | perf_ctx_unlock(cpuctx, cpuctx->task_ctx); | ||
375 | } | 394 | } |
376 | |||
377 | perf_pmu_enable(cpuctx->ctx.pmu); | ||
378 | } | 395 | } |
379 | 396 | ||
380 | rcu_read_unlock(); | 397 | rcu_read_unlock(); |
@@ -731,6 +748,7 @@ static u64 perf_event_time(struct perf_event *event) | |||
731 | 748 | ||
732 | /* | 749 | /* |
733 | * Update the total_time_enabled and total_time_running fields for a event. | 750 | * Update the total_time_enabled and total_time_running fields for a event. |
751 | * The caller of this function needs to hold the ctx->lock. | ||
734 | */ | 752 | */ |
735 | static void update_event_times(struct perf_event *event) | 753 | static void update_event_times(struct perf_event *event) |
736 | { | 754 | { |
@@ -1105,6 +1123,10 @@ static int __perf_remove_from_context(void *info) | |||
1105 | raw_spin_lock(&ctx->lock); | 1123 | raw_spin_lock(&ctx->lock); |
1106 | event_sched_out(event, cpuctx, ctx); | 1124 | event_sched_out(event, cpuctx, ctx); |
1107 | list_del_event(event, ctx); | 1125 | list_del_event(event, ctx); |
1126 | if (!ctx->nr_events && cpuctx->task_ctx == ctx) { | ||
1127 | ctx->is_active = 0; | ||
1128 | cpuctx->task_ctx = NULL; | ||
1129 | } | ||
1108 | raw_spin_unlock(&ctx->lock); | 1130 | raw_spin_unlock(&ctx->lock); |
1109 | 1131 | ||
1110 | return 0; | 1132 | return 0; |
@@ -1454,8 +1476,24 @@ static void add_event_to_ctx(struct perf_event *event, | |||
1454 | event->tstamp_stopped = tstamp; | 1476 | event->tstamp_stopped = tstamp; |
1455 | } | 1477 | } |
1456 | 1478 | ||
1457 | static void perf_event_context_sched_in(struct perf_event_context *ctx, | 1479 | static void task_ctx_sched_out(struct perf_event_context *ctx); |
1458 | struct task_struct *tsk); | 1480 | static void |
1481 | ctx_sched_in(struct perf_event_context *ctx, | ||
1482 | struct perf_cpu_context *cpuctx, | ||
1483 | enum event_type_t event_type, | ||
1484 | struct task_struct *task); | ||
1485 | |||
1486 | static void perf_event_sched_in(struct perf_cpu_context *cpuctx, | ||
1487 | struct perf_event_context *ctx, | ||
1488 | struct task_struct *task) | ||
1489 | { | ||
1490 | cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task); | ||
1491 | if (ctx) | ||
1492 | ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task); | ||
1493 | cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task); | ||
1494 | if (ctx) | ||
1495 | ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task); | ||
1496 | } | ||
1459 | 1497 | ||
1460 | /* | 1498 | /* |
1461 | * Cross CPU call to install and enable a performance event | 1499 | * Cross CPU call to install and enable a performance event |
@@ -1466,20 +1504,37 @@ static int __perf_install_in_context(void *info) | |||
1466 | { | 1504 | { |
1467 | struct perf_event *event = info; | 1505 | struct perf_event *event = info; |
1468 | struct perf_event_context *ctx = event->ctx; | 1506 | struct perf_event_context *ctx = event->ctx; |
1469 | struct perf_event *leader = event->group_leader; | ||
1470 | struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); | 1507 | struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); |
1471 | int err; | 1508 | struct perf_event_context *task_ctx = cpuctx->task_ctx; |
1509 | struct task_struct *task = current; | ||
1510 | |||
1511 | perf_ctx_lock(cpuctx, task_ctx); | ||
1512 | perf_pmu_disable(cpuctx->ctx.pmu); | ||
1472 | 1513 | ||
1473 | /* | 1514 | /* |
1474 | * In case we're installing a new context to an already running task, | 1515 | * If there was an active task_ctx schedule it out. |
1475 | * could also happen before perf_event_task_sched_in() on architectures | ||
1476 | * which do context switches with IRQs enabled. | ||
1477 | */ | 1516 | */ |
1478 | if (ctx->task && !cpuctx->task_ctx) | 1517 | if (task_ctx) |
1479 | perf_event_context_sched_in(ctx, ctx->task); | 1518 | task_ctx_sched_out(task_ctx); |
1519 | |||
1520 | /* | ||
1521 | * If the context we're installing events in is not the | ||
1522 | * active task_ctx, flip them. | ||
1523 | */ | ||
1524 | if (ctx->task && task_ctx != ctx) { | ||
1525 | if (task_ctx) | ||
1526 | raw_spin_unlock(&task_ctx->lock); | ||
1527 | raw_spin_lock(&ctx->lock); | ||
1528 | task_ctx = ctx; | ||
1529 | } | ||
1530 | |||
1531 | if (task_ctx) { | ||
1532 | cpuctx->task_ctx = task_ctx; | ||
1533 | task = task_ctx->task; | ||
1534 | } | ||
1535 | |||
1536 | cpu_ctx_sched_out(cpuctx, EVENT_ALL); | ||
1480 | 1537 | ||
1481 | raw_spin_lock(&ctx->lock); | ||
1482 | ctx->is_active = 1; | ||
1483 | update_context_time(ctx); | 1538 | update_context_time(ctx); |
1484 | /* | 1539 | /* |
1485 | * update cgrp time only if current cgrp | 1540 | * update cgrp time only if current cgrp |
@@ -1490,43 +1545,13 @@ static int __perf_install_in_context(void *info) | |||
1490 | 1545 | ||
1491 | add_event_to_ctx(event, ctx); | 1546 | add_event_to_ctx(event, ctx); |
1492 | 1547 | ||
1493 | if (!event_filter_match(event)) | ||
1494 | goto unlock; | ||
1495 | |||
1496 | /* | 1548 | /* |
1497 | * Don't put the event on if it is disabled or if | 1549 | * Schedule everything back in |
1498 | * it is in a group and the group isn't on. | ||
1499 | */ | 1550 | */ |
1500 | if (event->state != PERF_EVENT_STATE_INACTIVE || | 1551 | perf_event_sched_in(cpuctx, task_ctx, task); |
1501 | (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE)) | ||
1502 | goto unlock; | ||
1503 | 1552 | ||
1504 | /* | 1553 | perf_pmu_enable(cpuctx->ctx.pmu); |
1505 | * An exclusive event can't go on if there are already active | 1554 | perf_ctx_unlock(cpuctx, task_ctx); |
1506 | * hardware events, and no hardware event can go on if there | ||
1507 | * is already an exclusive event on. | ||
1508 | */ | ||
1509 | if (!group_can_go_on(event, cpuctx, 1)) | ||
1510 | err = -EEXIST; | ||
1511 | else | ||
1512 | err = event_sched_in(event, cpuctx, ctx); | ||
1513 | |||
1514 | if (err) { | ||
1515 | /* | ||
1516 | * This event couldn't go on. If it is in a group | ||
1517 | * then we have to pull the whole group off. | ||
1518 | * If the event group is pinned then put it in error state. | ||
1519 | */ | ||
1520 | if (leader != event) | ||
1521 | group_sched_out(leader, cpuctx, ctx); | ||
1522 | if (leader->attr.pinned) { | ||
1523 | update_group_times(leader); | ||
1524 | leader->state = PERF_EVENT_STATE_ERROR; | ||
1525 | } | ||
1526 | } | ||
1527 | |||
1528 | unlock: | ||
1529 | raw_spin_unlock(&ctx->lock); | ||
1530 | 1555 | ||
1531 | return 0; | 1556 | return 0; |
1532 | } | 1557 | } |
@@ -1739,7 +1764,7 @@ out: | |||
1739 | raw_spin_unlock_irq(&ctx->lock); | 1764 | raw_spin_unlock_irq(&ctx->lock); |
1740 | } | 1765 | } |
1741 | 1766 | ||
1742 | static int perf_event_refresh(struct perf_event *event, int refresh) | 1767 | int perf_event_refresh(struct perf_event *event, int refresh) |
1743 | { | 1768 | { |
1744 | /* | 1769 | /* |
1745 | * not supported on inherited events | 1770 | * not supported on inherited events |
@@ -1752,36 +1777,35 @@ static int perf_event_refresh(struct perf_event *event, int refresh) | |||
1752 | 1777 | ||
1753 | return 0; | 1778 | return 0; |
1754 | } | 1779 | } |
1780 | EXPORT_SYMBOL_GPL(perf_event_refresh); | ||
1755 | 1781 | ||
1756 | static void ctx_sched_out(struct perf_event_context *ctx, | 1782 | static void ctx_sched_out(struct perf_event_context *ctx, |
1757 | struct perf_cpu_context *cpuctx, | 1783 | struct perf_cpu_context *cpuctx, |
1758 | enum event_type_t event_type) | 1784 | enum event_type_t event_type) |
1759 | { | 1785 | { |
1760 | struct perf_event *event; | 1786 | struct perf_event *event; |
1787 | int is_active = ctx->is_active; | ||
1761 | 1788 | ||
1762 | raw_spin_lock(&ctx->lock); | 1789 | ctx->is_active &= ~event_type; |
1763 | perf_pmu_disable(ctx->pmu); | ||
1764 | ctx->is_active = 0; | ||
1765 | if (likely(!ctx->nr_events)) | 1790 | if (likely(!ctx->nr_events)) |
1766 | goto out; | 1791 | return; |
1792 | |||
1767 | update_context_time(ctx); | 1793 | update_context_time(ctx); |
1768 | update_cgrp_time_from_cpuctx(cpuctx); | 1794 | update_cgrp_time_from_cpuctx(cpuctx); |
1769 | |||
1770 | if (!ctx->nr_active) | 1795 | if (!ctx->nr_active) |
1771 | goto out; | 1796 | return; |
1772 | 1797 | ||
1773 | if (event_type & EVENT_PINNED) { | 1798 | perf_pmu_disable(ctx->pmu); |
1799 | if ((is_active & EVENT_PINNED) && (event_type & EVENT_PINNED)) { | ||
1774 | list_for_each_entry(event, &ctx->pinned_groups, group_entry) | 1800 | list_for_each_entry(event, &ctx->pinned_groups, group_entry) |
1775 | group_sched_out(event, cpuctx, ctx); | 1801 | group_sched_out(event, cpuctx, ctx); |
1776 | } | 1802 | } |
1777 | 1803 | ||
1778 | if (event_type & EVENT_FLEXIBLE) { | 1804 | if ((is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE)) { |
1779 | list_for_each_entry(event, &ctx->flexible_groups, group_entry) | 1805 | list_for_each_entry(event, &ctx->flexible_groups, group_entry) |
1780 | group_sched_out(event, cpuctx, ctx); | 1806 | group_sched_out(event, cpuctx, ctx); |
1781 | } | 1807 | } |
1782 | out: | ||
1783 | perf_pmu_enable(ctx->pmu); | 1808 | perf_pmu_enable(ctx->pmu); |
1784 | raw_spin_unlock(&ctx->lock); | ||
1785 | } | 1809 | } |
1786 | 1810 | ||
1787 | /* | 1811 | /* |
@@ -1929,8 +1953,10 @@ static void perf_event_context_sched_out(struct task_struct *task, int ctxn, | |||
1929 | rcu_read_unlock(); | 1953 | rcu_read_unlock(); |
1930 | 1954 | ||
1931 | if (do_switch) { | 1955 | if (do_switch) { |
1956 | raw_spin_lock(&ctx->lock); | ||
1932 | ctx_sched_out(ctx, cpuctx, EVENT_ALL); | 1957 | ctx_sched_out(ctx, cpuctx, EVENT_ALL); |
1933 | cpuctx->task_ctx = NULL; | 1958 | cpuctx->task_ctx = NULL; |
1959 | raw_spin_unlock(&ctx->lock); | ||
1934 | } | 1960 | } |
1935 | } | 1961 | } |
1936 | 1962 | ||
@@ -1965,8 +1991,7 @@ void __perf_event_task_sched_out(struct task_struct *task, | |||
1965 | perf_cgroup_sched_out(task); | 1991 | perf_cgroup_sched_out(task); |
1966 | } | 1992 | } |
1967 | 1993 | ||
1968 | static void task_ctx_sched_out(struct perf_event_context *ctx, | 1994 | static void task_ctx_sched_out(struct perf_event_context *ctx) |
1969 | enum event_type_t event_type) | ||
1970 | { | 1995 | { |
1971 | struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); | 1996 | struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); |
1972 | 1997 | ||
@@ -1976,7 +2001,7 @@ static void task_ctx_sched_out(struct perf_event_context *ctx, | |||
1976 | if (WARN_ON_ONCE(ctx != cpuctx->task_ctx)) | 2001 | if (WARN_ON_ONCE(ctx != cpuctx->task_ctx)) |
1977 | return; | 2002 | return; |
1978 | 2003 | ||
1979 | ctx_sched_out(ctx, cpuctx, event_type); | 2004 | ctx_sched_out(ctx, cpuctx, EVENT_ALL); |
1980 | cpuctx->task_ctx = NULL; | 2005 | cpuctx->task_ctx = NULL; |
1981 | } | 2006 | } |
1982 | 2007 | ||
@@ -2055,11 +2080,11 @@ ctx_sched_in(struct perf_event_context *ctx, | |||
2055 | struct task_struct *task) | 2080 | struct task_struct *task) |
2056 | { | 2081 | { |
2057 | u64 now; | 2082 | u64 now; |
2083 | int is_active = ctx->is_active; | ||
2058 | 2084 | ||
2059 | raw_spin_lock(&ctx->lock); | 2085 | ctx->is_active |= event_type; |
2060 | ctx->is_active = 1; | ||
2061 | if (likely(!ctx->nr_events)) | 2086 | if (likely(!ctx->nr_events)) |
2062 | goto out; | 2087 | return; |
2063 | 2088 | ||
2064 | now = perf_clock(); | 2089 | now = perf_clock(); |
2065 | ctx->timestamp = now; | 2090 | ctx->timestamp = now; |
@@ -2068,15 +2093,12 @@ ctx_sched_in(struct perf_event_context *ctx, | |||
2068 | * First go through the list and put on any pinned groups | 2093 | * First go through the list and put on any pinned groups |
2069 | * in order to give them the best chance of going on. | 2094 | * in order to give them the best chance of going on. |
2070 | */ | 2095 | */ |
2071 | if (event_type & EVENT_PINNED) | 2096 | if (!(is_active & EVENT_PINNED) && (event_type & EVENT_PINNED)) |
2072 | ctx_pinned_sched_in(ctx, cpuctx); | 2097 | ctx_pinned_sched_in(ctx, cpuctx); |
2073 | 2098 | ||
2074 | /* Then walk through the lower prio flexible groups */ | 2099 | /* Then walk through the lower prio flexible groups */ |
2075 | if (event_type & EVENT_FLEXIBLE) | 2100 | if (!(is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE)) |
2076 | ctx_flexible_sched_in(ctx, cpuctx); | 2101 | ctx_flexible_sched_in(ctx, cpuctx); |
2077 | |||
2078 | out: | ||
2079 | raw_spin_unlock(&ctx->lock); | ||
2080 | } | 2102 | } |
2081 | 2103 | ||
2082 | static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx, | 2104 | static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx, |
@@ -2088,19 +2110,6 @@ static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx, | |||
2088 | ctx_sched_in(ctx, cpuctx, event_type, task); | 2110 | ctx_sched_in(ctx, cpuctx, event_type, task); |
2089 | } | 2111 | } |
2090 | 2112 | ||
2091 | static void task_ctx_sched_in(struct perf_event_context *ctx, | ||
2092 | enum event_type_t event_type) | ||
2093 | { | ||
2094 | struct perf_cpu_context *cpuctx; | ||
2095 | |||
2096 | cpuctx = __get_cpu_context(ctx); | ||
2097 | if (cpuctx->task_ctx == ctx) | ||
2098 | return; | ||
2099 | |||
2100 | ctx_sched_in(ctx, cpuctx, event_type, NULL); | ||
2101 | cpuctx->task_ctx = ctx; | ||
2102 | } | ||
2103 | |||
2104 | static void perf_event_context_sched_in(struct perf_event_context *ctx, | 2113 | static void perf_event_context_sched_in(struct perf_event_context *ctx, |
2105 | struct task_struct *task) | 2114 | struct task_struct *task) |
2106 | { | 2115 | { |
@@ -2110,6 +2119,7 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx, | |||
2110 | if (cpuctx->task_ctx == ctx) | 2119 | if (cpuctx->task_ctx == ctx) |
2111 | return; | 2120 | return; |
2112 | 2121 | ||
2122 | perf_ctx_lock(cpuctx, ctx); | ||
2113 | perf_pmu_disable(ctx->pmu); | 2123 | perf_pmu_disable(ctx->pmu); |
2114 | /* | 2124 | /* |
2115 | * We want to keep the following priority order: | 2125 | * We want to keep the following priority order: |
@@ -2118,18 +2128,18 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx, | |||
2118 | */ | 2128 | */ |
2119 | cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); | 2129 | cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); |
2120 | 2130 | ||
2121 | ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task); | 2131 | perf_event_sched_in(cpuctx, ctx, task); |
2122 | cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task); | ||
2123 | ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task); | ||
2124 | 2132 | ||
2125 | cpuctx->task_ctx = ctx; | 2133 | cpuctx->task_ctx = ctx; |
2126 | 2134 | ||
2135 | perf_pmu_enable(ctx->pmu); | ||
2136 | perf_ctx_unlock(cpuctx, ctx); | ||
2137 | |||
2127 | /* | 2138 | /* |
2128 | * Since these rotations are per-cpu, we need to ensure the | 2139 | * Since these rotations are per-cpu, we need to ensure the |
2129 | * cpu-context we got scheduled on is actually rotating. | 2140 | * cpu-context we got scheduled on is actually rotating. |
2130 | */ | 2141 | */ |
2131 | perf_pmu_rotate_start(ctx->pmu); | 2142 | perf_pmu_rotate_start(ctx->pmu); |
2132 | perf_pmu_enable(ctx->pmu); | ||
2133 | } | 2143 | } |
2134 | 2144 | ||
2135 | /* | 2145 | /* |
@@ -2269,7 +2279,6 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx, u64 period) | |||
2269 | u64 interrupts, now; | 2279 | u64 interrupts, now; |
2270 | s64 delta; | 2280 | s64 delta; |
2271 | 2281 | ||
2272 | raw_spin_lock(&ctx->lock); | ||
2273 | list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { | 2282 | list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { |
2274 | if (event->state != PERF_EVENT_STATE_ACTIVE) | 2283 | if (event->state != PERF_EVENT_STATE_ACTIVE) |
2275 | continue; | 2284 | continue; |
@@ -2301,7 +2310,6 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx, u64 period) | |||
2301 | if (delta > 0) | 2310 | if (delta > 0) |
2302 | perf_adjust_period(event, period, delta); | 2311 | perf_adjust_period(event, period, delta); |
2303 | } | 2312 | } |
2304 | raw_spin_unlock(&ctx->lock); | ||
2305 | } | 2313 | } |
2306 | 2314 | ||
2307 | /* | 2315 | /* |
@@ -2309,16 +2317,12 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx, u64 period) | |||
2309 | */ | 2317 | */ |
2310 | static void rotate_ctx(struct perf_event_context *ctx) | 2318 | static void rotate_ctx(struct perf_event_context *ctx) |
2311 | { | 2319 | { |
2312 | raw_spin_lock(&ctx->lock); | ||
2313 | |||
2314 | /* | 2320 | /* |
2315 | * Rotate the first entry last of non-pinned groups. Rotation might be | 2321 | * Rotate the first entry last of non-pinned groups. Rotation might be |
2316 | * disabled by the inheritance code. | 2322 | * disabled by the inheritance code. |
2317 | */ | 2323 | */ |
2318 | if (!ctx->rotate_disable) | 2324 | if (!ctx->rotate_disable) |
2319 | list_rotate_left(&ctx->flexible_groups); | 2325 | list_rotate_left(&ctx->flexible_groups); |
2320 | |||
2321 | raw_spin_unlock(&ctx->lock); | ||
2322 | } | 2326 | } |
2323 | 2327 | ||
2324 | /* | 2328 | /* |
@@ -2345,6 +2349,7 @@ static void perf_rotate_context(struct perf_cpu_context *cpuctx) | |||
2345 | rotate = 1; | 2349 | rotate = 1; |
2346 | } | 2350 | } |
2347 | 2351 | ||
2352 | perf_ctx_lock(cpuctx, cpuctx->task_ctx); | ||
2348 | perf_pmu_disable(cpuctx->ctx.pmu); | 2353 | perf_pmu_disable(cpuctx->ctx.pmu); |
2349 | perf_ctx_adjust_freq(&cpuctx->ctx, interval); | 2354 | perf_ctx_adjust_freq(&cpuctx->ctx, interval); |
2350 | if (ctx) | 2355 | if (ctx) |
@@ -2355,21 +2360,20 @@ static void perf_rotate_context(struct perf_cpu_context *cpuctx) | |||
2355 | 2360 | ||
2356 | cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); | 2361 | cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); |
2357 | if (ctx) | 2362 | if (ctx) |
2358 | task_ctx_sched_out(ctx, EVENT_FLEXIBLE); | 2363 | ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE); |
2359 | 2364 | ||
2360 | rotate_ctx(&cpuctx->ctx); | 2365 | rotate_ctx(&cpuctx->ctx); |
2361 | if (ctx) | 2366 | if (ctx) |
2362 | rotate_ctx(ctx); | 2367 | rotate_ctx(ctx); |
2363 | 2368 | ||
2364 | cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, current); | 2369 | perf_event_sched_in(cpuctx, ctx, current); |
2365 | if (ctx) | ||
2366 | task_ctx_sched_in(ctx, EVENT_FLEXIBLE); | ||
2367 | 2370 | ||
2368 | done: | 2371 | done: |
2369 | if (remove) | 2372 | if (remove) |
2370 | list_del_init(&cpuctx->rotation_list); | 2373 | list_del_init(&cpuctx->rotation_list); |
2371 | 2374 | ||
2372 | perf_pmu_enable(cpuctx->ctx.pmu); | 2375 | perf_pmu_enable(cpuctx->ctx.pmu); |
2376 | perf_ctx_unlock(cpuctx, cpuctx->task_ctx); | ||
2373 | } | 2377 | } |
2374 | 2378 | ||
2375 | void perf_event_task_tick(void) | 2379 | void perf_event_task_tick(void) |
@@ -2424,9 +2428,9 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx) | |||
2424 | * in. | 2428 | * in. |
2425 | */ | 2429 | */ |
2426 | perf_cgroup_sched_out(current); | 2430 | perf_cgroup_sched_out(current); |
2427 | task_ctx_sched_out(ctx, EVENT_ALL); | ||
2428 | 2431 | ||
2429 | raw_spin_lock(&ctx->lock); | 2432 | raw_spin_lock(&ctx->lock); |
2433 | task_ctx_sched_out(ctx); | ||
2430 | 2434 | ||
2431 | list_for_each_entry(event, &ctx->pinned_groups, group_entry) { | 2435 | list_for_each_entry(event, &ctx->pinned_groups, group_entry) { |
2432 | ret = event_enable_on_exec(event, ctx); | 2436 | ret = event_enable_on_exec(event, ctx); |
@@ -2835,16 +2839,12 @@ retry: | |||
2835 | unclone_ctx(ctx); | 2839 | unclone_ctx(ctx); |
2836 | ++ctx->pin_count; | 2840 | ++ctx->pin_count; |
2837 | raw_spin_unlock_irqrestore(&ctx->lock, flags); | 2841 | raw_spin_unlock_irqrestore(&ctx->lock, flags); |
2838 | } | 2842 | } else { |
2839 | |||
2840 | if (!ctx) { | ||
2841 | ctx = alloc_perf_context(pmu, task); | 2843 | ctx = alloc_perf_context(pmu, task); |
2842 | err = -ENOMEM; | 2844 | err = -ENOMEM; |
2843 | if (!ctx) | 2845 | if (!ctx) |
2844 | goto errout; | 2846 | goto errout; |
2845 | 2847 | ||
2846 | get_ctx(ctx); | ||
2847 | |||
2848 | err = 0; | 2848 | err = 0; |
2849 | mutex_lock(&task->perf_event_mutex); | 2849 | mutex_lock(&task->perf_event_mutex); |
2850 | /* | 2850 | /* |
@@ -2856,14 +2856,14 @@ retry: | |||
2856 | else if (task->perf_event_ctxp[ctxn]) | 2856 | else if (task->perf_event_ctxp[ctxn]) |
2857 | err = -EAGAIN; | 2857 | err = -EAGAIN; |
2858 | else { | 2858 | else { |
2859 | get_ctx(ctx); | ||
2859 | ++ctx->pin_count; | 2860 | ++ctx->pin_count; |
2860 | rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx); | 2861 | rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx); |
2861 | } | 2862 | } |
2862 | mutex_unlock(&task->perf_event_mutex); | 2863 | mutex_unlock(&task->perf_event_mutex); |
2863 | 2864 | ||
2864 | if (unlikely(err)) { | 2865 | if (unlikely(err)) { |
2865 | put_task_struct(task); | 2866 | put_ctx(ctx); |
2866 | kfree(ctx); | ||
2867 | 2867 | ||
2868 | if (err == -EAGAIN) | 2868 | if (err == -EAGAIN) |
2869 | goto retry; | 2869 | goto retry; |
@@ -2890,7 +2890,7 @@ static void free_event_rcu(struct rcu_head *head) | |||
2890 | kfree(event); | 2890 | kfree(event); |
2891 | } | 2891 | } |
2892 | 2892 | ||
2893 | static void perf_buffer_put(struct perf_buffer *buffer); | 2893 | static void ring_buffer_put(struct ring_buffer *rb); |
2894 | 2894 | ||
2895 | static void free_event(struct perf_event *event) | 2895 | static void free_event(struct perf_event *event) |
2896 | { | 2896 | { |
@@ -2913,9 +2913,9 @@ static void free_event(struct perf_event *event) | |||
2913 | } | 2913 | } |
2914 | } | 2914 | } |
2915 | 2915 | ||
2916 | if (event->buffer) { | 2916 | if (event->rb) { |
2917 | perf_buffer_put(event->buffer); | 2917 | ring_buffer_put(event->rb); |
2918 | event->buffer = NULL; | 2918 | event->rb = NULL; |
2919 | } | 2919 | } |
2920 | 2920 | ||
2921 | if (is_cgroup_event(event)) | 2921 | if (is_cgroup_event(event)) |
@@ -2934,12 +2934,6 @@ int perf_event_release_kernel(struct perf_event *event) | |||
2934 | { | 2934 | { |
2935 | struct perf_event_context *ctx = event->ctx; | 2935 | struct perf_event_context *ctx = event->ctx; |
2936 | 2936 | ||
2937 | /* | ||
2938 | * Remove from the PMU, can't get re-enabled since we got | ||
2939 | * here because the last ref went. | ||
2940 | */ | ||
2941 | perf_event_disable(event); | ||
2942 | |||
2943 | WARN_ON_ONCE(ctx->parent_ctx); | 2937 | WARN_ON_ONCE(ctx->parent_ctx); |
2944 | /* | 2938 | /* |
2945 | * There are two ways this annotation is useful: | 2939 | * There are two ways this annotation is useful: |
@@ -2956,8 +2950,8 @@ int perf_event_release_kernel(struct perf_event *event) | |||
2956 | mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING); | 2950 | mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING); |
2957 | raw_spin_lock_irq(&ctx->lock); | 2951 | raw_spin_lock_irq(&ctx->lock); |
2958 | perf_group_detach(event); | 2952 | perf_group_detach(event); |
2959 | list_del_event(event, ctx); | ||
2960 | raw_spin_unlock_irq(&ctx->lock); | 2953 | raw_spin_unlock_irq(&ctx->lock); |
2954 | perf_remove_from_context(event); | ||
2961 | mutex_unlock(&ctx->mutex); | 2955 | mutex_unlock(&ctx->mutex); |
2962 | 2956 | ||
2963 | free_event(event); | 2957 | free_event(event); |
@@ -3149,13 +3143,13 @@ perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) | |||
3149 | static unsigned int perf_poll(struct file *file, poll_table *wait) | 3143 | static unsigned int perf_poll(struct file *file, poll_table *wait) |
3150 | { | 3144 | { |
3151 | struct perf_event *event = file->private_data; | 3145 | struct perf_event *event = file->private_data; |
3152 | struct perf_buffer *buffer; | 3146 | struct ring_buffer *rb; |
3153 | unsigned int events = POLL_HUP; | 3147 | unsigned int events = POLL_HUP; |
3154 | 3148 | ||
3155 | rcu_read_lock(); | 3149 | rcu_read_lock(); |
3156 | buffer = rcu_dereference(event->buffer); | 3150 | rb = rcu_dereference(event->rb); |
3157 | if (buffer) | 3151 | if (rb) |
3158 | events = atomic_xchg(&buffer->poll, 0); | 3152 | events = atomic_xchg(&rb->poll, 0); |
3159 | rcu_read_unlock(); | 3153 | rcu_read_unlock(); |
3160 | 3154 | ||
3161 | poll_wait(file, &event->waitq, wait); | 3155 | poll_wait(file, &event->waitq, wait); |
@@ -3358,6 +3352,18 @@ static int perf_event_index(struct perf_event *event) | |||
3358 | return event->hw.idx + 1 - PERF_EVENT_INDEX_OFFSET; | 3352 | return event->hw.idx + 1 - PERF_EVENT_INDEX_OFFSET; |
3359 | } | 3353 | } |
3360 | 3354 | ||
3355 | static void calc_timer_values(struct perf_event *event, | ||
3356 | u64 *running, | ||
3357 | u64 *enabled) | ||
3358 | { | ||
3359 | u64 now, ctx_time; | ||
3360 | |||
3361 | now = perf_clock(); | ||
3362 | ctx_time = event->shadow_ctx_time + now; | ||
3363 | *enabled = ctx_time - event->tstamp_enabled; | ||
3364 | *running = ctx_time - event->tstamp_running; | ||
3365 | } | ||
3366 | |||
3361 | /* | 3367 | /* |
3362 | * Callers need to ensure there can be no nesting of this function, otherwise | 3368 | * Callers need to ensure there can be no nesting of this function, otherwise |
3363 | * the seqlock logic goes bad. We can not serialize this because the arch | 3369 | * the seqlock logic goes bad. We can not serialize this because the arch |
@@ -3366,14 +3372,25 @@ static int perf_event_index(struct perf_event *event) | |||
3366 | void perf_event_update_userpage(struct perf_event *event) | 3372 | void perf_event_update_userpage(struct perf_event *event) |
3367 | { | 3373 | { |
3368 | struct perf_event_mmap_page *userpg; | 3374 | struct perf_event_mmap_page *userpg; |
3369 | struct perf_buffer *buffer; | 3375 | struct ring_buffer *rb; |
3376 | u64 enabled, running; | ||
3370 | 3377 | ||
3371 | rcu_read_lock(); | 3378 | rcu_read_lock(); |
3372 | buffer = rcu_dereference(event->buffer); | 3379 | /* |
3373 | if (!buffer) | 3380 | * compute total_time_enabled, total_time_running |
3381 | * based on snapshot values taken when the event | ||
3382 | * was last scheduled in. | ||
3383 | * | ||
3384 | * we cannot simply called update_context_time() | ||
3385 | * because of locking issue as we can be called in | ||
3386 | * NMI context | ||
3387 | */ | ||
3388 | calc_timer_values(event, &enabled, &running); | ||
3389 | rb = rcu_dereference(event->rb); | ||
3390 | if (!rb) | ||
3374 | goto unlock; | 3391 | goto unlock; |
3375 | 3392 | ||
3376 | userpg = buffer->user_page; | 3393 | userpg = rb->user_page; |
3377 | 3394 | ||
3378 | /* | 3395 | /* |
3379 | * Disable preemption so as to not let the corresponding user-space | 3396 | * Disable preemption so as to not let the corresponding user-space |
@@ -3387,10 +3404,10 @@ void perf_event_update_userpage(struct perf_event *event) | |||
3387 | if (event->state == PERF_EVENT_STATE_ACTIVE) | 3404 | if (event->state == PERF_EVENT_STATE_ACTIVE) |
3388 | userpg->offset -= local64_read(&event->hw.prev_count); | 3405 | userpg->offset -= local64_read(&event->hw.prev_count); |
3389 | 3406 | ||
3390 | userpg->time_enabled = event->total_time_enabled + | 3407 | userpg->time_enabled = enabled + |
3391 | atomic64_read(&event->child_total_time_enabled); | 3408 | atomic64_read(&event->child_total_time_enabled); |
3392 | 3409 | ||
3393 | userpg->time_running = event->total_time_running + | 3410 | userpg->time_running = running + |
3394 | atomic64_read(&event->child_total_time_running); | 3411 | atomic64_read(&event->child_total_time_running); |
3395 | 3412 | ||
3396 | barrier(); | 3413 | barrier(); |
@@ -3400,220 +3417,10 @@ unlock: | |||
3400 | rcu_read_unlock(); | 3417 | rcu_read_unlock(); |
3401 | } | 3418 | } |
3402 | 3419 | ||
3403 | static unsigned long perf_data_size(struct perf_buffer *buffer); | ||
3404 | |||
3405 | static void | ||
3406 | perf_buffer_init(struct perf_buffer *buffer, long watermark, int flags) | ||
3407 | { | ||
3408 | long max_size = perf_data_size(buffer); | ||
3409 | |||
3410 | if (watermark) | ||
3411 | buffer->watermark = min(max_size, watermark); | ||
3412 | |||
3413 | if (!buffer->watermark) | ||
3414 | buffer->watermark = max_size / 2; | ||
3415 | |||
3416 | if (flags & PERF_BUFFER_WRITABLE) | ||
3417 | buffer->writable = 1; | ||
3418 | |||
3419 | atomic_set(&buffer->refcount, 1); | ||
3420 | } | ||
3421 | |||
3422 | #ifndef CONFIG_PERF_USE_VMALLOC | ||
3423 | |||
3424 | /* | ||
3425 | * Back perf_mmap() with regular GFP_KERNEL-0 pages. | ||
3426 | */ | ||
3427 | |||
3428 | static struct page * | ||
3429 | perf_mmap_to_page(struct perf_buffer *buffer, unsigned long pgoff) | ||
3430 | { | ||
3431 | if (pgoff > buffer->nr_pages) | ||
3432 | return NULL; | ||
3433 | |||
3434 | if (pgoff == 0) | ||
3435 | return virt_to_page(buffer->user_page); | ||
3436 | |||
3437 | return virt_to_page(buffer->data_pages[pgoff - 1]); | ||
3438 | } | ||
3439 | |||
3440 | static void *perf_mmap_alloc_page(int cpu) | ||
3441 | { | ||
3442 | struct page *page; | ||
3443 | int node; | ||
3444 | |||
3445 | node = (cpu == -1) ? cpu : cpu_to_node(cpu); | ||
3446 | page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0); | ||
3447 | if (!page) | ||
3448 | return NULL; | ||
3449 | |||
3450 | return page_address(page); | ||
3451 | } | ||
3452 | |||
3453 | static struct perf_buffer * | ||
3454 | perf_buffer_alloc(int nr_pages, long watermark, int cpu, int flags) | ||
3455 | { | ||
3456 | struct perf_buffer *buffer; | ||
3457 | unsigned long size; | ||
3458 | int i; | ||
3459 | |||
3460 | size = sizeof(struct perf_buffer); | ||
3461 | size += nr_pages * sizeof(void *); | ||
3462 | |||
3463 | buffer = kzalloc(size, GFP_KERNEL); | ||
3464 | if (!buffer) | ||
3465 | goto fail; | ||
3466 | |||
3467 | buffer->user_page = perf_mmap_alloc_page(cpu); | ||
3468 | if (!buffer->user_page) | ||
3469 | goto fail_user_page; | ||
3470 | |||
3471 | for (i = 0; i < nr_pages; i++) { | ||
3472 | buffer->data_pages[i] = perf_mmap_alloc_page(cpu); | ||
3473 | if (!buffer->data_pages[i]) | ||
3474 | goto fail_data_pages; | ||
3475 | } | ||
3476 | |||
3477 | buffer->nr_pages = nr_pages; | ||
3478 | |||
3479 | perf_buffer_init(buffer, watermark, flags); | ||
3480 | |||
3481 | return buffer; | ||
3482 | |||
3483 | fail_data_pages: | ||
3484 | for (i--; i >= 0; i--) | ||
3485 | free_page((unsigned long)buffer->data_pages[i]); | ||
3486 | |||
3487 | free_page((unsigned long)buffer->user_page); | ||
3488 | |||
3489 | fail_user_page: | ||
3490 | kfree(buffer); | ||
3491 | |||
3492 | fail: | ||
3493 | return NULL; | ||
3494 | } | ||
3495 | |||
3496 | static void perf_mmap_free_page(unsigned long addr) | ||
3497 | { | ||
3498 | struct page *page = virt_to_page((void *)addr); | ||
3499 | |||
3500 | page->mapping = NULL; | ||
3501 | __free_page(page); | ||
3502 | } | ||
3503 | |||
3504 | static void perf_buffer_free(struct perf_buffer *buffer) | ||
3505 | { | ||
3506 | int i; | ||
3507 | |||
3508 | perf_mmap_free_page((unsigned long)buffer->user_page); | ||
3509 | for (i = 0; i < buffer->nr_pages; i++) | ||
3510 | perf_mmap_free_page((unsigned long)buffer->data_pages[i]); | ||
3511 | kfree(buffer); | ||
3512 | } | ||
3513 | |||
3514 | static inline int page_order(struct perf_buffer *buffer) | ||
3515 | { | ||
3516 | return 0; | ||
3517 | } | ||
3518 | |||
3519 | #else | ||
3520 | |||
3521 | /* | ||
3522 | * Back perf_mmap() with vmalloc memory. | ||
3523 | * | ||
3524 | * Required for architectures that have d-cache aliasing issues. | ||
3525 | */ | ||
3526 | |||
3527 | static inline int page_order(struct perf_buffer *buffer) | ||
3528 | { | ||
3529 | return buffer->page_order; | ||
3530 | } | ||
3531 | |||
3532 | static struct page * | ||
3533 | perf_mmap_to_page(struct perf_buffer *buffer, unsigned long pgoff) | ||
3534 | { | ||
3535 | if (pgoff > (1UL << page_order(buffer))) | ||
3536 | return NULL; | ||
3537 | |||
3538 | return vmalloc_to_page((void *)buffer->user_page + pgoff * PAGE_SIZE); | ||
3539 | } | ||
3540 | |||
3541 | static void perf_mmap_unmark_page(void *addr) | ||
3542 | { | ||
3543 | struct page *page = vmalloc_to_page(addr); | ||
3544 | |||
3545 | page->mapping = NULL; | ||
3546 | } | ||
3547 | |||
3548 | static void perf_buffer_free_work(struct work_struct *work) | ||
3549 | { | ||
3550 | struct perf_buffer *buffer; | ||
3551 | void *base; | ||
3552 | int i, nr; | ||
3553 | |||
3554 | buffer = container_of(work, struct perf_buffer, work); | ||
3555 | nr = 1 << page_order(buffer); | ||
3556 | |||
3557 | base = buffer->user_page; | ||
3558 | for (i = 0; i < nr + 1; i++) | ||
3559 | perf_mmap_unmark_page(base + (i * PAGE_SIZE)); | ||
3560 | |||
3561 | vfree(base); | ||
3562 | kfree(buffer); | ||
3563 | } | ||
3564 | |||
3565 | static void perf_buffer_free(struct perf_buffer *buffer) | ||
3566 | { | ||
3567 | schedule_work(&buffer->work); | ||
3568 | } | ||
3569 | |||
3570 | static struct perf_buffer * | ||
3571 | perf_buffer_alloc(int nr_pages, long watermark, int cpu, int flags) | ||
3572 | { | ||
3573 | struct perf_buffer *buffer; | ||
3574 | unsigned long size; | ||
3575 | void *all_buf; | ||
3576 | |||
3577 | size = sizeof(struct perf_buffer); | ||
3578 | size += sizeof(void *); | ||
3579 | |||
3580 | buffer = kzalloc(size, GFP_KERNEL); | ||
3581 | if (!buffer) | ||
3582 | goto fail; | ||
3583 | |||
3584 | INIT_WORK(&buffer->work, perf_buffer_free_work); | ||
3585 | |||
3586 | all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE); | ||
3587 | if (!all_buf) | ||
3588 | goto fail_all_buf; | ||
3589 | |||
3590 | buffer->user_page = all_buf; | ||
3591 | buffer->data_pages[0] = all_buf + PAGE_SIZE; | ||
3592 | buffer->page_order = ilog2(nr_pages); | ||
3593 | buffer->nr_pages = 1; | ||
3594 | |||
3595 | perf_buffer_init(buffer, watermark, flags); | ||
3596 | |||
3597 | return buffer; | ||
3598 | |||
3599 | fail_all_buf: | ||
3600 | kfree(buffer); | ||
3601 | |||
3602 | fail: | ||
3603 | return NULL; | ||
3604 | } | ||
3605 | |||
3606 | #endif | ||
3607 | |||
3608 | static unsigned long perf_data_size(struct perf_buffer *buffer) | ||
3609 | { | ||
3610 | return buffer->nr_pages << (PAGE_SHIFT + page_order(buffer)); | ||
3611 | } | ||
3612 | |||
3613 | static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 3420 | static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) |
3614 | { | 3421 | { |
3615 | struct perf_event *event = vma->vm_file->private_data; | 3422 | struct perf_event *event = vma->vm_file->private_data; |
3616 | struct perf_buffer *buffer; | 3423 | struct ring_buffer *rb; |
3617 | int ret = VM_FAULT_SIGBUS; | 3424 | int ret = VM_FAULT_SIGBUS; |
3618 | 3425 | ||
3619 | if (vmf->flags & FAULT_FLAG_MKWRITE) { | 3426 | if (vmf->flags & FAULT_FLAG_MKWRITE) { |
@@ -3623,14 +3430,14 @@ static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
3623 | } | 3430 | } |
3624 | 3431 | ||
3625 | rcu_read_lock(); | 3432 | rcu_read_lock(); |
3626 | buffer = rcu_dereference(event->buffer); | 3433 | rb = rcu_dereference(event->rb); |
3627 | if (!buffer) | 3434 | if (!rb) |
3628 | goto unlock; | 3435 | goto unlock; |
3629 | 3436 | ||
3630 | if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE)) | 3437 | if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE)) |
3631 | goto unlock; | 3438 | goto unlock; |
3632 | 3439 | ||
3633 | vmf->page = perf_mmap_to_page(buffer, vmf->pgoff); | 3440 | vmf->page = perf_mmap_to_page(rb, vmf->pgoff); |
3634 | if (!vmf->page) | 3441 | if (!vmf->page) |
3635 | goto unlock; | 3442 | goto unlock; |
3636 | 3443 | ||
@@ -3645,35 +3452,35 @@ unlock: | |||
3645 | return ret; | 3452 | return ret; |
3646 | } | 3453 | } |
3647 | 3454 | ||
3648 | static void perf_buffer_free_rcu(struct rcu_head *rcu_head) | 3455 | static void rb_free_rcu(struct rcu_head *rcu_head) |
3649 | { | 3456 | { |
3650 | struct perf_buffer *buffer; | 3457 | struct ring_buffer *rb; |
3651 | 3458 | ||
3652 | buffer = container_of(rcu_head, struct perf_buffer, rcu_head); | 3459 | rb = container_of(rcu_head, struct ring_buffer, rcu_head); |
3653 | perf_buffer_free(buffer); | 3460 | rb_free(rb); |
3654 | } | 3461 | } |
3655 | 3462 | ||
3656 | static struct perf_buffer *perf_buffer_get(struct perf_event *event) | 3463 | static struct ring_buffer *ring_buffer_get(struct perf_event *event) |
3657 | { | 3464 | { |
3658 | struct perf_buffer *buffer; | 3465 | struct ring_buffer *rb; |
3659 | 3466 | ||
3660 | rcu_read_lock(); | 3467 | rcu_read_lock(); |
3661 | buffer = rcu_dereference(event->buffer); | 3468 | rb = rcu_dereference(event->rb); |
3662 | if (buffer) { | 3469 | if (rb) { |
3663 | if (!atomic_inc_not_zero(&buffer->refcount)) | 3470 | if (!atomic_inc_not_zero(&rb->refcount)) |
3664 | buffer = NULL; | 3471 | rb = NULL; |
3665 | } | 3472 | } |
3666 | rcu_read_unlock(); | 3473 | rcu_read_unlock(); |
3667 | 3474 | ||
3668 | return buffer; | 3475 | return rb; |
3669 | } | 3476 | } |
3670 | 3477 | ||
3671 | static void perf_buffer_put(struct perf_buffer *buffer) | 3478 | static void ring_buffer_put(struct ring_buffer *rb) |
3672 | { | 3479 | { |
3673 | if (!atomic_dec_and_test(&buffer->refcount)) | 3480 | if (!atomic_dec_and_test(&rb->refcount)) |
3674 | return; | 3481 | return; |
3675 | 3482 | ||
3676 | call_rcu(&buffer->rcu_head, perf_buffer_free_rcu); | 3483 | call_rcu(&rb->rcu_head, rb_free_rcu); |
3677 | } | 3484 | } |
3678 | 3485 | ||
3679 | static void perf_mmap_open(struct vm_area_struct *vma) | 3486 | static void perf_mmap_open(struct vm_area_struct *vma) |
@@ -3688,16 +3495,16 @@ static void perf_mmap_close(struct vm_area_struct *vma) | |||
3688 | struct perf_event *event = vma->vm_file->private_data; | 3495 | struct perf_event *event = vma->vm_file->private_data; |
3689 | 3496 | ||
3690 | if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) { | 3497 | if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) { |
3691 | unsigned long size = perf_data_size(event->buffer); | 3498 | unsigned long size = perf_data_size(event->rb); |
3692 | struct user_struct *user = event->mmap_user; | 3499 | struct user_struct *user = event->mmap_user; |
3693 | struct perf_buffer *buffer = event->buffer; | 3500 | struct ring_buffer *rb = event->rb; |
3694 | 3501 | ||
3695 | atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm); | 3502 | atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm); |
3696 | vma->vm_mm->locked_vm -= event->mmap_locked; | 3503 | vma->vm_mm->locked_vm -= event->mmap_locked; |
3697 | rcu_assign_pointer(event->buffer, NULL); | 3504 | rcu_assign_pointer(event->rb, NULL); |
3698 | mutex_unlock(&event->mmap_mutex); | 3505 | mutex_unlock(&event->mmap_mutex); |
3699 | 3506 | ||
3700 | perf_buffer_put(buffer); | 3507 | ring_buffer_put(rb); |
3701 | free_uid(user); | 3508 | free_uid(user); |
3702 | } | 3509 | } |
3703 | } | 3510 | } |
@@ -3715,7 +3522,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) | |||
3715 | unsigned long user_locked, user_lock_limit; | 3522 | unsigned long user_locked, user_lock_limit; |
3716 | struct user_struct *user = current_user(); | 3523 | struct user_struct *user = current_user(); |
3717 | unsigned long locked, lock_limit; | 3524 | unsigned long locked, lock_limit; |
3718 | struct perf_buffer *buffer; | 3525 | struct ring_buffer *rb; |
3719 | unsigned long vma_size; | 3526 | unsigned long vma_size; |
3720 | unsigned long nr_pages; | 3527 | unsigned long nr_pages; |
3721 | long user_extra, extra; | 3528 | long user_extra, extra; |
@@ -3724,7 +3531,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) | |||
3724 | /* | 3531 | /* |
3725 | * Don't allow mmap() of inherited per-task counters. This would | 3532 | * Don't allow mmap() of inherited per-task counters. This would |
3726 | * create a performance issue due to all children writing to the | 3533 | * create a performance issue due to all children writing to the |
3727 | * same buffer. | 3534 | * same rb. |
3728 | */ | 3535 | */ |
3729 | if (event->cpu == -1 && event->attr.inherit) | 3536 | if (event->cpu == -1 && event->attr.inherit) |
3730 | return -EINVAL; | 3537 | return -EINVAL; |
@@ -3736,7 +3543,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) | |||
3736 | nr_pages = (vma_size / PAGE_SIZE) - 1; | 3543 | nr_pages = (vma_size / PAGE_SIZE) - 1; |
3737 | 3544 | ||
3738 | /* | 3545 | /* |
3739 | * If we have buffer pages ensure they're a power-of-two number, so we | 3546 | * If we have rb pages ensure they're a power-of-two number, so we |
3740 | * can do bitmasks instead of modulo. | 3547 | * can do bitmasks instead of modulo. |
3741 | */ | 3548 | */ |
3742 | if (nr_pages != 0 && !is_power_of_2(nr_pages)) | 3549 | if (nr_pages != 0 && !is_power_of_2(nr_pages)) |
@@ -3750,9 +3557,9 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) | |||
3750 | 3557 | ||
3751 | WARN_ON_ONCE(event->ctx->parent_ctx); | 3558 | WARN_ON_ONCE(event->ctx->parent_ctx); |
3752 | mutex_lock(&event->mmap_mutex); | 3559 | mutex_lock(&event->mmap_mutex); |
3753 | if (event->buffer) { | 3560 | if (event->rb) { |
3754 | if (event->buffer->nr_pages == nr_pages) | 3561 | if (event->rb->nr_pages == nr_pages) |
3755 | atomic_inc(&event->buffer->refcount); | 3562 | atomic_inc(&event->rb->refcount); |
3756 | else | 3563 | else |
3757 | ret = -EINVAL; | 3564 | ret = -EINVAL; |
3758 | goto unlock; | 3565 | goto unlock; |
@@ -3782,18 +3589,20 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) | |||
3782 | goto unlock; | 3589 | goto unlock; |
3783 | } | 3590 | } |
3784 | 3591 | ||
3785 | WARN_ON(event->buffer); | 3592 | WARN_ON(event->rb); |
3786 | 3593 | ||
3787 | if (vma->vm_flags & VM_WRITE) | 3594 | if (vma->vm_flags & VM_WRITE) |
3788 | flags |= PERF_BUFFER_WRITABLE; | 3595 | flags |= RING_BUFFER_WRITABLE; |
3789 | 3596 | ||
3790 | buffer = perf_buffer_alloc(nr_pages, event->attr.wakeup_watermark, | 3597 | rb = rb_alloc(nr_pages, |
3791 | event->cpu, flags); | 3598 | event->attr.watermark ? event->attr.wakeup_watermark : 0, |
3792 | if (!buffer) { | 3599 | event->cpu, flags); |
3600 | |||
3601 | if (!rb) { | ||
3793 | ret = -ENOMEM; | 3602 | ret = -ENOMEM; |
3794 | goto unlock; | 3603 | goto unlock; |
3795 | } | 3604 | } |
3796 | rcu_assign_pointer(event->buffer, buffer); | 3605 | rcu_assign_pointer(event->rb, rb); |
3797 | 3606 | ||
3798 | atomic_long_add(user_extra, &user->locked_vm); | 3607 | atomic_long_add(user_extra, &user->locked_vm); |
3799 | event->mmap_locked = extra; | 3608 | event->mmap_locked = extra; |
@@ -3892,117 +3701,6 @@ int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs) | |||
3892 | } | 3701 | } |
3893 | EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks); | 3702 | EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks); |
3894 | 3703 | ||
3895 | /* | ||
3896 | * Output | ||
3897 | */ | ||
3898 | static bool perf_output_space(struct perf_buffer *buffer, unsigned long tail, | ||
3899 | unsigned long offset, unsigned long head) | ||
3900 | { | ||
3901 | unsigned long mask; | ||
3902 | |||
3903 | if (!buffer->writable) | ||
3904 | return true; | ||
3905 | |||
3906 | mask = perf_data_size(buffer) - 1; | ||
3907 | |||
3908 | offset = (offset - tail) & mask; | ||
3909 | head = (head - tail) & mask; | ||
3910 | |||
3911 | if ((int)(head - offset) < 0) | ||
3912 | return false; | ||
3913 | |||
3914 | return true; | ||
3915 | } | ||
3916 | |||
3917 | static void perf_output_wakeup(struct perf_output_handle *handle) | ||
3918 | { | ||
3919 | atomic_set(&handle->buffer->poll, POLL_IN); | ||
3920 | |||
3921 | if (handle->nmi) { | ||
3922 | handle->event->pending_wakeup = 1; | ||
3923 | irq_work_queue(&handle->event->pending); | ||
3924 | } else | ||
3925 | perf_event_wakeup(handle->event); | ||
3926 | } | ||
3927 | |||
3928 | /* | ||
3929 | * We need to ensure a later event_id doesn't publish a head when a former | ||
3930 | * event isn't done writing. However since we need to deal with NMIs we | ||
3931 | * cannot fully serialize things. | ||
3932 | * | ||
3933 | * We only publish the head (and generate a wakeup) when the outer-most | ||
3934 | * event completes. | ||
3935 | */ | ||
3936 | static void perf_output_get_handle(struct perf_output_handle *handle) | ||
3937 | { | ||
3938 | struct perf_buffer *buffer = handle->buffer; | ||
3939 | |||
3940 | preempt_disable(); | ||
3941 | local_inc(&buffer->nest); | ||
3942 | handle->wakeup = local_read(&buffer->wakeup); | ||
3943 | } | ||
3944 | |||
3945 | static void perf_output_put_handle(struct perf_output_handle *handle) | ||
3946 | { | ||
3947 | struct perf_buffer *buffer = handle->buffer; | ||
3948 | unsigned long head; | ||
3949 | |||
3950 | again: | ||
3951 | head = local_read(&buffer->head); | ||
3952 | |||
3953 | /* | ||
3954 | * IRQ/NMI can happen here, which means we can miss a head update. | ||
3955 | */ | ||
3956 | |||
3957 | if (!local_dec_and_test(&buffer->nest)) | ||
3958 | goto out; | ||
3959 | |||
3960 | /* | ||
3961 | * Publish the known good head. Rely on the full barrier implied | ||
3962 | * by atomic_dec_and_test() order the buffer->head read and this | ||
3963 | * write. | ||
3964 | */ | ||
3965 | buffer->user_page->data_head = head; | ||
3966 | |||
3967 | /* | ||
3968 | * Now check if we missed an update, rely on the (compiler) | ||
3969 | * barrier in atomic_dec_and_test() to re-read buffer->head. | ||
3970 | */ | ||
3971 | if (unlikely(head != local_read(&buffer->head))) { | ||
3972 | local_inc(&buffer->nest); | ||
3973 | goto again; | ||
3974 | } | ||
3975 | |||
3976 | if (handle->wakeup != local_read(&buffer->wakeup)) | ||
3977 | perf_output_wakeup(handle); | ||
3978 | |||
3979 | out: | ||
3980 | preempt_enable(); | ||
3981 | } | ||
3982 | |||
3983 | __always_inline void perf_output_copy(struct perf_output_handle *handle, | ||
3984 | const void *buf, unsigned int len) | ||
3985 | { | ||
3986 | do { | ||
3987 | unsigned long size = min_t(unsigned long, handle->size, len); | ||
3988 | |||
3989 | memcpy(handle->addr, buf, size); | ||
3990 | |||
3991 | len -= size; | ||
3992 | handle->addr += size; | ||
3993 | buf += size; | ||
3994 | handle->size -= size; | ||
3995 | if (!handle->size) { | ||
3996 | struct perf_buffer *buffer = handle->buffer; | ||
3997 | |||
3998 | handle->page++; | ||
3999 | handle->page &= buffer->nr_pages - 1; | ||
4000 | handle->addr = buffer->data_pages[handle->page]; | ||
4001 | handle->size = PAGE_SIZE << page_order(buffer); | ||
4002 | } | ||
4003 | } while (len); | ||
4004 | } | ||
4005 | |||
4006 | static void __perf_event_header__init_id(struct perf_event_header *header, | 3704 | static void __perf_event_header__init_id(struct perf_event_header *header, |
4007 | struct perf_sample_data *data, | 3705 | struct perf_sample_data *data, |
4008 | struct perf_event *event) | 3706 | struct perf_event *event) |
@@ -4033,9 +3731,9 @@ static void __perf_event_header__init_id(struct perf_event_header *header, | |||
4033 | } | 3731 | } |
4034 | } | 3732 | } |
4035 | 3733 | ||
4036 | static void perf_event_header__init_id(struct perf_event_header *header, | 3734 | void perf_event_header__init_id(struct perf_event_header *header, |
4037 | struct perf_sample_data *data, | 3735 | struct perf_sample_data *data, |
4038 | struct perf_event *event) | 3736 | struct perf_event *event) |
4039 | { | 3737 | { |
4040 | if (event->attr.sample_id_all) | 3738 | if (event->attr.sample_id_all) |
4041 | __perf_event_header__init_id(header, data, event); | 3739 | __perf_event_header__init_id(header, data, event); |
@@ -4062,121 +3760,14 @@ static void __perf_event__output_id_sample(struct perf_output_handle *handle, | |||
4062 | perf_output_put(handle, data->cpu_entry); | 3760 | perf_output_put(handle, data->cpu_entry); |
4063 | } | 3761 | } |
4064 | 3762 | ||
4065 | static void perf_event__output_id_sample(struct perf_event *event, | 3763 | void perf_event__output_id_sample(struct perf_event *event, |
4066 | struct perf_output_handle *handle, | 3764 | struct perf_output_handle *handle, |
4067 | struct perf_sample_data *sample) | 3765 | struct perf_sample_data *sample) |
4068 | { | 3766 | { |
4069 | if (event->attr.sample_id_all) | 3767 | if (event->attr.sample_id_all) |
4070 | __perf_event__output_id_sample(handle, sample); | 3768 | __perf_event__output_id_sample(handle, sample); |
4071 | } | 3769 | } |
4072 | 3770 | ||
4073 | int perf_output_begin(struct perf_output_handle *handle, | ||
4074 | struct perf_event *event, unsigned int size, | ||
4075 | int nmi, int sample) | ||
4076 | { | ||
4077 | struct perf_buffer *buffer; | ||
4078 | unsigned long tail, offset, head; | ||
4079 | int have_lost; | ||
4080 | struct perf_sample_data sample_data; | ||
4081 | struct { | ||
4082 | struct perf_event_header header; | ||
4083 | u64 id; | ||
4084 | u64 lost; | ||
4085 | } lost_event; | ||
4086 | |||
4087 | rcu_read_lock(); | ||
4088 | /* | ||
4089 | * For inherited events we send all the output towards the parent. | ||
4090 | */ | ||
4091 | if (event->parent) | ||
4092 | event = event->parent; | ||
4093 | |||
4094 | buffer = rcu_dereference(event->buffer); | ||
4095 | if (!buffer) | ||
4096 | goto out; | ||
4097 | |||
4098 | handle->buffer = buffer; | ||
4099 | handle->event = event; | ||
4100 | handle->nmi = nmi; | ||
4101 | handle->sample = sample; | ||
4102 | |||
4103 | if (!buffer->nr_pages) | ||
4104 | goto out; | ||
4105 | |||
4106 | have_lost = local_read(&buffer->lost); | ||
4107 | if (have_lost) { | ||
4108 | lost_event.header.size = sizeof(lost_event); | ||
4109 | perf_event_header__init_id(&lost_event.header, &sample_data, | ||
4110 | event); | ||
4111 | size += lost_event.header.size; | ||
4112 | } | ||
4113 | |||
4114 | perf_output_get_handle(handle); | ||
4115 | |||
4116 | do { | ||
4117 | /* | ||
4118 | * Userspace could choose to issue a mb() before updating the | ||
4119 | * tail pointer. So that all reads will be completed before the | ||
4120 | * write is issued. | ||
4121 | */ | ||
4122 | tail = ACCESS_ONCE(buffer->user_page->data_tail); | ||
4123 | smp_rmb(); | ||
4124 | offset = head = local_read(&buffer->head); | ||
4125 | head += size; | ||
4126 | if (unlikely(!perf_output_space(buffer, tail, offset, head))) | ||
4127 | goto fail; | ||
4128 | } while (local_cmpxchg(&buffer->head, offset, head) != offset); | ||
4129 | |||
4130 | if (head - local_read(&buffer->wakeup) > buffer->watermark) | ||
4131 | local_add(buffer->watermark, &buffer->wakeup); | ||
4132 | |||
4133 | handle->page = offset >> (PAGE_SHIFT + page_order(buffer)); | ||
4134 | handle->page &= buffer->nr_pages - 1; | ||
4135 | handle->size = offset & ((PAGE_SIZE << page_order(buffer)) - 1); | ||
4136 | handle->addr = buffer->data_pages[handle->page]; | ||
4137 | handle->addr += handle->size; | ||
4138 | handle->size = (PAGE_SIZE << page_order(buffer)) - handle->size; | ||
4139 | |||
4140 | if (have_lost) { | ||
4141 | lost_event.header.type = PERF_RECORD_LOST; | ||
4142 | lost_event.header.misc = 0; | ||
4143 | lost_event.id = event->id; | ||
4144 | lost_event.lost = local_xchg(&buffer->lost, 0); | ||
4145 | |||
4146 | perf_output_put(handle, lost_event); | ||
4147 | perf_event__output_id_sample(event, handle, &sample_data); | ||
4148 | } | ||
4149 | |||
4150 | return 0; | ||
4151 | |||
4152 | fail: | ||
4153 | local_inc(&buffer->lost); | ||
4154 | perf_output_put_handle(handle); | ||
4155 | out: | ||
4156 | rcu_read_unlock(); | ||
4157 | |||
4158 | return -ENOSPC; | ||
4159 | } | ||
4160 | |||
4161 | void perf_output_end(struct perf_output_handle *handle) | ||
4162 | { | ||
4163 | struct perf_event *event = handle->event; | ||
4164 | struct perf_buffer *buffer = handle->buffer; | ||
4165 | |||
4166 | int wakeup_events = event->attr.wakeup_events; | ||
4167 | |||
4168 | if (handle->sample && wakeup_events) { | ||
4169 | int events = local_inc_return(&buffer->events); | ||
4170 | if (events >= wakeup_events) { | ||
4171 | local_sub(wakeup_events, &buffer->events); | ||
4172 | local_inc(&buffer->wakeup); | ||
4173 | } | ||
4174 | } | ||
4175 | |||
4176 | perf_output_put_handle(handle); | ||
4177 | rcu_read_unlock(); | ||
4178 | } | ||
4179 | |||
4180 | static void perf_output_read_one(struct perf_output_handle *handle, | 3771 | static void perf_output_read_one(struct perf_output_handle *handle, |
4181 | struct perf_event *event, | 3772 | struct perf_event *event, |
4182 | u64 enabled, u64 running) | 3773 | u64 enabled, u64 running) |
@@ -4197,7 +3788,7 @@ static void perf_output_read_one(struct perf_output_handle *handle, | |||
4197 | if (read_format & PERF_FORMAT_ID) | 3788 | if (read_format & PERF_FORMAT_ID) |
4198 | values[n++] = primary_event_id(event); | 3789 | values[n++] = primary_event_id(event); |
4199 | 3790 | ||
4200 | perf_output_copy(handle, values, n * sizeof(u64)); | 3791 | __output_copy(handle, values, n * sizeof(u64)); |
4201 | } | 3792 | } |
4202 | 3793 | ||
4203 | /* | 3794 | /* |
@@ -4227,7 +3818,7 @@ static void perf_output_read_group(struct perf_output_handle *handle, | |||
4227 | if (read_format & PERF_FORMAT_ID) | 3818 | if (read_format & PERF_FORMAT_ID) |
4228 | values[n++] = primary_event_id(leader); | 3819 | values[n++] = primary_event_id(leader); |
4229 | 3820 | ||
4230 | perf_output_copy(handle, values, n * sizeof(u64)); | 3821 | __output_copy(handle, values, n * sizeof(u64)); |
4231 | 3822 | ||
4232 | list_for_each_entry(sub, &leader->sibling_list, group_entry) { | 3823 | list_for_each_entry(sub, &leader->sibling_list, group_entry) { |
4233 | n = 0; | 3824 | n = 0; |
@@ -4239,7 +3830,7 @@ static void perf_output_read_group(struct perf_output_handle *handle, | |||
4239 | if (read_format & PERF_FORMAT_ID) | 3830 | if (read_format & PERF_FORMAT_ID) |
4240 | values[n++] = primary_event_id(sub); | 3831 | values[n++] = primary_event_id(sub); |
4241 | 3832 | ||
4242 | perf_output_copy(handle, values, n * sizeof(u64)); | 3833 | __output_copy(handle, values, n * sizeof(u64)); |
4243 | } | 3834 | } |
4244 | } | 3835 | } |
4245 | 3836 | ||
@@ -4249,7 +3840,7 @@ static void perf_output_read_group(struct perf_output_handle *handle, | |||
4249 | static void perf_output_read(struct perf_output_handle *handle, | 3840 | static void perf_output_read(struct perf_output_handle *handle, |
4250 | struct perf_event *event) | 3841 | struct perf_event *event) |
4251 | { | 3842 | { |
4252 | u64 enabled = 0, running = 0, now, ctx_time; | 3843 | u64 enabled = 0, running = 0; |
4253 | u64 read_format = event->attr.read_format; | 3844 | u64 read_format = event->attr.read_format; |
4254 | 3845 | ||
4255 | /* | 3846 | /* |
@@ -4261,12 +3852,8 @@ static void perf_output_read(struct perf_output_handle *handle, | |||
4261 | * because of locking issue as we are called in | 3852 | * because of locking issue as we are called in |
4262 | * NMI context | 3853 | * NMI context |
4263 | */ | 3854 | */ |
4264 | if (read_format & PERF_FORMAT_TOTAL_TIMES) { | 3855 | if (read_format & PERF_FORMAT_TOTAL_TIMES) |
4265 | now = perf_clock(); | 3856 | calc_timer_values(event, &enabled, &running); |
4266 | ctx_time = event->shadow_ctx_time + now; | ||
4267 | enabled = ctx_time - event->tstamp_enabled; | ||
4268 | running = ctx_time - event->tstamp_running; | ||
4269 | } | ||
4270 | 3857 | ||
4271 | if (event->attr.read_format & PERF_FORMAT_GROUP) | 3858 | if (event->attr.read_format & PERF_FORMAT_GROUP) |
4272 | perf_output_read_group(handle, event, enabled, running); | 3859 | perf_output_read_group(handle, event, enabled, running); |
@@ -4319,7 +3906,7 @@ void perf_output_sample(struct perf_output_handle *handle, | |||
4319 | 3906 | ||
4320 | size *= sizeof(u64); | 3907 | size *= sizeof(u64); |
4321 | 3908 | ||
4322 | perf_output_copy(handle, data->callchain, size); | 3909 | __output_copy(handle, data->callchain, size); |
4323 | } else { | 3910 | } else { |
4324 | u64 nr = 0; | 3911 | u64 nr = 0; |
4325 | perf_output_put(handle, nr); | 3912 | perf_output_put(handle, nr); |
@@ -4329,8 +3916,8 @@ void perf_output_sample(struct perf_output_handle *handle, | |||
4329 | if (sample_type & PERF_SAMPLE_RAW) { | 3916 | if (sample_type & PERF_SAMPLE_RAW) { |
4330 | if (data->raw) { | 3917 | if (data->raw) { |
4331 | perf_output_put(handle, data->raw->size); | 3918 | perf_output_put(handle, data->raw->size); |
4332 | perf_output_copy(handle, data->raw->data, | 3919 | __output_copy(handle, data->raw->data, |
4333 | data->raw->size); | 3920 | data->raw->size); |
4334 | } else { | 3921 | } else { |
4335 | struct { | 3922 | struct { |
4336 | u32 size; | 3923 | u32 size; |
@@ -4342,6 +3929,20 @@ void perf_output_sample(struct perf_output_handle *handle, | |||
4342 | perf_output_put(handle, raw); | 3929 | perf_output_put(handle, raw); |
4343 | } | 3930 | } |
4344 | } | 3931 | } |
3932 | |||
3933 | if (!event->attr.watermark) { | ||
3934 | int wakeup_events = event->attr.wakeup_events; | ||
3935 | |||
3936 | if (wakeup_events) { | ||
3937 | struct ring_buffer *rb = handle->rb; | ||
3938 | int events = local_inc_return(&rb->events); | ||
3939 | |||
3940 | if (events >= wakeup_events) { | ||
3941 | local_sub(wakeup_events, &rb->events); | ||
3942 | local_inc(&rb->wakeup); | ||
3943 | } | ||
3944 | } | ||
3945 | } | ||
4345 | } | 3946 | } |
4346 | 3947 | ||
4347 | void perf_prepare_sample(struct perf_event_header *header, | 3948 | void perf_prepare_sample(struct perf_event_header *header, |
@@ -4386,7 +3987,7 @@ void perf_prepare_sample(struct perf_event_header *header, | |||
4386 | } | 3987 | } |
4387 | } | 3988 | } |
4388 | 3989 | ||
4389 | static void perf_event_output(struct perf_event *event, int nmi, | 3990 | static void perf_event_output(struct perf_event *event, |
4390 | struct perf_sample_data *data, | 3991 | struct perf_sample_data *data, |
4391 | struct pt_regs *regs) | 3992 | struct pt_regs *regs) |
4392 | { | 3993 | { |
@@ -4398,7 +3999,7 @@ static void perf_event_output(struct perf_event *event, int nmi, | |||
4398 | 3999 | ||
4399 | perf_prepare_sample(&header, data, event, regs); | 4000 | perf_prepare_sample(&header, data, event, regs); |
4400 | 4001 | ||
4401 | if (perf_output_begin(&handle, event, header.size, nmi, 1)) | 4002 | if (perf_output_begin(&handle, event, header.size)) |
4402 | goto exit; | 4003 | goto exit; |
4403 | 4004 | ||
4404 | perf_output_sample(&handle, &header, data, event); | 4005 | perf_output_sample(&handle, &header, data, event); |
@@ -4438,7 +4039,7 @@ perf_event_read_event(struct perf_event *event, | |||
4438 | int ret; | 4039 | int ret; |
4439 | 4040 | ||
4440 | perf_event_header__init_id(&read_event.header, &sample, event); | 4041 | perf_event_header__init_id(&read_event.header, &sample, event); |
4441 | ret = perf_output_begin(&handle, event, read_event.header.size, 0, 0); | 4042 | ret = perf_output_begin(&handle, event, read_event.header.size); |
4442 | if (ret) | 4043 | if (ret) |
4443 | return; | 4044 | return; |
4444 | 4045 | ||
@@ -4481,7 +4082,7 @@ static void perf_event_task_output(struct perf_event *event, | |||
4481 | perf_event_header__init_id(&task_event->event_id.header, &sample, event); | 4082 | perf_event_header__init_id(&task_event->event_id.header, &sample, event); |
4482 | 4083 | ||
4483 | ret = perf_output_begin(&handle, event, | 4084 | ret = perf_output_begin(&handle, event, |
4484 | task_event->event_id.header.size, 0, 0); | 4085 | task_event->event_id.header.size); |
4485 | if (ret) | 4086 | if (ret) |
4486 | goto out; | 4087 | goto out; |
4487 | 4088 | ||
@@ -4618,7 +4219,7 @@ static void perf_event_comm_output(struct perf_event *event, | |||
4618 | 4219 | ||
4619 | perf_event_header__init_id(&comm_event->event_id.header, &sample, event); | 4220 | perf_event_header__init_id(&comm_event->event_id.header, &sample, event); |
4620 | ret = perf_output_begin(&handle, event, | 4221 | ret = perf_output_begin(&handle, event, |
4621 | comm_event->event_id.header.size, 0, 0); | 4222 | comm_event->event_id.header.size); |
4622 | 4223 | ||
4623 | if (ret) | 4224 | if (ret) |
4624 | goto out; | 4225 | goto out; |
@@ -4627,7 +4228,7 @@ static void perf_event_comm_output(struct perf_event *event, | |||
4627 | comm_event->event_id.tid = perf_event_tid(event, comm_event->task); | 4228 | comm_event->event_id.tid = perf_event_tid(event, comm_event->task); |
4628 | 4229 | ||
4629 | perf_output_put(&handle, comm_event->event_id); | 4230 | perf_output_put(&handle, comm_event->event_id); |
4630 | perf_output_copy(&handle, comm_event->comm, | 4231 | __output_copy(&handle, comm_event->comm, |
4631 | comm_event->comm_size); | 4232 | comm_event->comm_size); |
4632 | 4233 | ||
4633 | perf_event__output_id_sample(event, &handle, &sample); | 4234 | perf_event__output_id_sample(event, &handle, &sample); |
@@ -4765,7 +4366,7 @@ static void perf_event_mmap_output(struct perf_event *event, | |||
4765 | 4366 | ||
4766 | perf_event_header__init_id(&mmap_event->event_id.header, &sample, event); | 4367 | perf_event_header__init_id(&mmap_event->event_id.header, &sample, event); |
4767 | ret = perf_output_begin(&handle, event, | 4368 | ret = perf_output_begin(&handle, event, |
4768 | mmap_event->event_id.header.size, 0, 0); | 4369 | mmap_event->event_id.header.size); |
4769 | if (ret) | 4370 | if (ret) |
4770 | goto out; | 4371 | goto out; |
4771 | 4372 | ||
@@ -4773,7 +4374,7 @@ static void perf_event_mmap_output(struct perf_event *event, | |||
4773 | mmap_event->event_id.tid = perf_event_tid(event, current); | 4374 | mmap_event->event_id.tid = perf_event_tid(event, current); |
4774 | 4375 | ||
4775 | perf_output_put(&handle, mmap_event->event_id); | 4376 | perf_output_put(&handle, mmap_event->event_id); |
4776 | perf_output_copy(&handle, mmap_event->file_name, | 4377 | __output_copy(&handle, mmap_event->file_name, |
4777 | mmap_event->file_size); | 4378 | mmap_event->file_size); |
4778 | 4379 | ||
4779 | perf_event__output_id_sample(event, &handle, &sample); | 4380 | perf_event__output_id_sample(event, &handle, &sample); |
@@ -4829,7 +4430,7 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) | |||
4829 | 4430 | ||
4830 | if (file) { | 4431 | if (file) { |
4831 | /* | 4432 | /* |
4832 | * d_path works from the end of the buffer backwards, so we | 4433 | * d_path works from the end of the rb backwards, so we |
4833 | * need to add enough zero bytes after the string to handle | 4434 | * need to add enough zero bytes after the string to handle |
4834 | * the 64bit alignment we do later. | 4435 | * the 64bit alignment we do later. |
4835 | */ | 4436 | */ |
@@ -4960,7 +4561,7 @@ static void perf_log_throttle(struct perf_event *event, int enable) | |||
4960 | perf_event_header__init_id(&throttle_event.header, &sample, event); | 4561 | perf_event_header__init_id(&throttle_event.header, &sample, event); |
4961 | 4562 | ||
4962 | ret = perf_output_begin(&handle, event, | 4563 | ret = perf_output_begin(&handle, event, |
4963 | throttle_event.header.size, 1, 0); | 4564 | throttle_event.header.size); |
4964 | if (ret) | 4565 | if (ret) |
4965 | return; | 4566 | return; |
4966 | 4567 | ||
@@ -4973,7 +4574,7 @@ static void perf_log_throttle(struct perf_event *event, int enable) | |||
4973 | * Generic event overflow handling, sampling. | 4574 | * Generic event overflow handling, sampling. |
4974 | */ | 4575 | */ |
4975 | 4576 | ||
4976 | static int __perf_event_overflow(struct perf_event *event, int nmi, | 4577 | static int __perf_event_overflow(struct perf_event *event, |
4977 | int throttle, struct perf_sample_data *data, | 4578 | int throttle, struct perf_sample_data *data, |
4978 | struct pt_regs *regs) | 4579 | struct pt_regs *regs) |
4979 | { | 4580 | { |
@@ -5016,34 +4617,28 @@ static int __perf_event_overflow(struct perf_event *event, int nmi, | |||
5016 | if (events && atomic_dec_and_test(&event->event_limit)) { | 4617 | if (events && atomic_dec_and_test(&event->event_limit)) { |
5017 | ret = 1; | 4618 | ret = 1; |
5018 | event->pending_kill = POLL_HUP; | 4619 | event->pending_kill = POLL_HUP; |
5019 | if (nmi) { | 4620 | event->pending_disable = 1; |
5020 | event->pending_disable = 1; | 4621 | irq_work_queue(&event->pending); |
5021 | irq_work_queue(&event->pending); | ||
5022 | } else | ||
5023 | perf_event_disable(event); | ||
5024 | } | 4622 | } |
5025 | 4623 | ||
5026 | if (event->overflow_handler) | 4624 | if (event->overflow_handler) |
5027 | event->overflow_handler(event, nmi, data, regs); | 4625 | event->overflow_handler(event, data, regs); |
5028 | else | 4626 | else |
5029 | perf_event_output(event, nmi, data, regs); | 4627 | perf_event_output(event, data, regs); |
5030 | 4628 | ||
5031 | if (event->fasync && event->pending_kill) { | 4629 | if (event->fasync && event->pending_kill) { |
5032 | if (nmi) { | 4630 | event->pending_wakeup = 1; |
5033 | event->pending_wakeup = 1; | 4631 | irq_work_queue(&event->pending); |
5034 | irq_work_queue(&event->pending); | ||
5035 | } else | ||
5036 | perf_event_wakeup(event); | ||
5037 | } | 4632 | } |
5038 | 4633 | ||
5039 | return ret; | 4634 | return ret; |
5040 | } | 4635 | } |
5041 | 4636 | ||
5042 | int perf_event_overflow(struct perf_event *event, int nmi, | 4637 | int perf_event_overflow(struct perf_event *event, |
5043 | struct perf_sample_data *data, | 4638 | struct perf_sample_data *data, |
5044 | struct pt_regs *regs) | 4639 | struct pt_regs *regs) |
5045 | { | 4640 | { |
5046 | return __perf_event_overflow(event, nmi, 1, data, regs); | 4641 | return __perf_event_overflow(event, 1, data, regs); |
5047 | } | 4642 | } |
5048 | 4643 | ||
5049 | /* | 4644 | /* |
@@ -5092,7 +4687,7 @@ again: | |||
5092 | } | 4687 | } |
5093 | 4688 | ||
5094 | static void perf_swevent_overflow(struct perf_event *event, u64 overflow, | 4689 | static void perf_swevent_overflow(struct perf_event *event, u64 overflow, |
5095 | int nmi, struct perf_sample_data *data, | 4690 | struct perf_sample_data *data, |
5096 | struct pt_regs *regs) | 4691 | struct pt_regs *regs) |
5097 | { | 4692 | { |
5098 | struct hw_perf_event *hwc = &event->hw; | 4693 | struct hw_perf_event *hwc = &event->hw; |
@@ -5106,7 +4701,7 @@ static void perf_swevent_overflow(struct perf_event *event, u64 overflow, | |||
5106 | return; | 4701 | return; |
5107 | 4702 | ||
5108 | for (; overflow; overflow--) { | 4703 | for (; overflow; overflow--) { |
5109 | if (__perf_event_overflow(event, nmi, throttle, | 4704 | if (__perf_event_overflow(event, throttle, |
5110 | data, regs)) { | 4705 | data, regs)) { |
5111 | /* | 4706 | /* |
5112 | * We inhibit the overflow from happening when | 4707 | * We inhibit the overflow from happening when |
@@ -5119,7 +4714,7 @@ static void perf_swevent_overflow(struct perf_event *event, u64 overflow, | |||
5119 | } | 4714 | } |
5120 | 4715 | ||
5121 | static void perf_swevent_event(struct perf_event *event, u64 nr, | 4716 | static void perf_swevent_event(struct perf_event *event, u64 nr, |
5122 | int nmi, struct perf_sample_data *data, | 4717 | struct perf_sample_data *data, |
5123 | struct pt_regs *regs) | 4718 | struct pt_regs *regs) |
5124 | { | 4719 | { |
5125 | struct hw_perf_event *hwc = &event->hw; | 4720 | struct hw_perf_event *hwc = &event->hw; |
@@ -5133,12 +4728,12 @@ static void perf_swevent_event(struct perf_event *event, u64 nr, | |||
5133 | return; | 4728 | return; |
5134 | 4729 | ||
5135 | if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq) | 4730 | if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq) |
5136 | return perf_swevent_overflow(event, 1, nmi, data, regs); | 4731 | return perf_swevent_overflow(event, 1, data, regs); |
5137 | 4732 | ||
5138 | if (local64_add_negative(nr, &hwc->period_left)) | 4733 | if (local64_add_negative(nr, &hwc->period_left)) |
5139 | return; | 4734 | return; |
5140 | 4735 | ||
5141 | perf_swevent_overflow(event, 0, nmi, data, regs); | 4736 | perf_swevent_overflow(event, 0, data, regs); |
5142 | } | 4737 | } |
5143 | 4738 | ||
5144 | static int perf_exclude_event(struct perf_event *event, | 4739 | static int perf_exclude_event(struct perf_event *event, |
@@ -5226,7 +4821,7 @@ find_swevent_head(struct swevent_htable *swhash, struct perf_event *event) | |||
5226 | } | 4821 | } |
5227 | 4822 | ||
5228 | static void do_perf_sw_event(enum perf_type_id type, u32 event_id, | 4823 | static void do_perf_sw_event(enum perf_type_id type, u32 event_id, |
5229 | u64 nr, int nmi, | 4824 | u64 nr, |
5230 | struct perf_sample_data *data, | 4825 | struct perf_sample_data *data, |
5231 | struct pt_regs *regs) | 4826 | struct pt_regs *regs) |
5232 | { | 4827 | { |
@@ -5242,7 +4837,7 @@ static void do_perf_sw_event(enum perf_type_id type, u32 event_id, | |||
5242 | 4837 | ||
5243 | hlist_for_each_entry_rcu(event, node, head, hlist_entry) { | 4838 | hlist_for_each_entry_rcu(event, node, head, hlist_entry) { |
5244 | if (perf_swevent_match(event, type, event_id, data, regs)) | 4839 | if (perf_swevent_match(event, type, event_id, data, regs)) |
5245 | perf_swevent_event(event, nr, nmi, data, regs); | 4840 | perf_swevent_event(event, nr, data, regs); |
5246 | } | 4841 | } |
5247 | end: | 4842 | end: |
5248 | rcu_read_unlock(); | 4843 | rcu_read_unlock(); |
@@ -5263,8 +4858,7 @@ inline void perf_swevent_put_recursion_context(int rctx) | |||
5263 | put_recursion_context(swhash->recursion, rctx); | 4858 | put_recursion_context(swhash->recursion, rctx); |
5264 | } | 4859 | } |
5265 | 4860 | ||
5266 | void __perf_sw_event(u32 event_id, u64 nr, int nmi, | 4861 | void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) |
5267 | struct pt_regs *regs, u64 addr) | ||
5268 | { | 4862 | { |
5269 | struct perf_sample_data data; | 4863 | struct perf_sample_data data; |
5270 | int rctx; | 4864 | int rctx; |
@@ -5276,7 +4870,7 @@ void __perf_sw_event(u32 event_id, u64 nr, int nmi, | |||
5276 | 4870 | ||
5277 | perf_sample_data_init(&data, addr); | 4871 | perf_sample_data_init(&data, addr); |
5278 | 4872 | ||
5279 | do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, nmi, &data, regs); | 4873 | do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs); |
5280 | 4874 | ||
5281 | perf_swevent_put_recursion_context(rctx); | 4875 | perf_swevent_put_recursion_context(rctx); |
5282 | preempt_enable_notrace(); | 4876 | preempt_enable_notrace(); |
@@ -5524,7 +5118,7 @@ void perf_tp_event(u64 addr, u64 count, void *record, int entry_size, | |||
5524 | 5118 | ||
5525 | hlist_for_each_entry_rcu(event, node, head, hlist_entry) { | 5119 | hlist_for_each_entry_rcu(event, node, head, hlist_entry) { |
5526 | if (perf_tp_event_match(event, &data, regs)) | 5120 | if (perf_tp_event_match(event, &data, regs)) |
5527 | perf_swevent_event(event, count, 1, &data, regs); | 5121 | perf_swevent_event(event, count, &data, regs); |
5528 | } | 5122 | } |
5529 | 5123 | ||
5530 | perf_swevent_put_recursion_context(rctx); | 5124 | perf_swevent_put_recursion_context(rctx); |
@@ -5617,7 +5211,7 @@ void perf_bp_event(struct perf_event *bp, void *data) | |||
5617 | perf_sample_data_init(&sample, bp->attr.bp_addr); | 5211 | perf_sample_data_init(&sample, bp->attr.bp_addr); |
5618 | 5212 | ||
5619 | if (!bp->hw.state && !perf_exclude_event(bp, regs)) | 5213 | if (!bp->hw.state && !perf_exclude_event(bp, regs)) |
5620 | perf_swevent_event(bp, 1, 1, &sample, regs); | 5214 | perf_swevent_event(bp, 1, &sample, regs); |
5621 | } | 5215 | } |
5622 | #endif | 5216 | #endif |
5623 | 5217 | ||
@@ -5646,7 +5240,7 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer) | |||
5646 | 5240 | ||
5647 | if (regs && !perf_exclude_event(event, regs)) { | 5241 | if (regs && !perf_exclude_event(event, regs)) { |
5648 | if (!(event->attr.exclude_idle && current->pid == 0)) | 5242 | if (!(event->attr.exclude_idle && current->pid == 0)) |
5649 | if (perf_event_overflow(event, 0, &data, regs)) | 5243 | if (perf_event_overflow(event, &data, regs)) |
5650 | ret = HRTIMER_NORESTART; | 5244 | ret = HRTIMER_NORESTART; |
5651 | } | 5245 | } |
5652 | 5246 | ||
@@ -5986,6 +5580,7 @@ free_dev: | |||
5986 | } | 5580 | } |
5987 | 5581 | ||
5988 | static struct lock_class_key cpuctx_mutex; | 5582 | static struct lock_class_key cpuctx_mutex; |
5583 | static struct lock_class_key cpuctx_lock; | ||
5989 | 5584 | ||
5990 | int perf_pmu_register(struct pmu *pmu, char *name, int type) | 5585 | int perf_pmu_register(struct pmu *pmu, char *name, int type) |
5991 | { | 5586 | { |
@@ -6036,6 +5631,7 @@ skip_type: | |||
6036 | cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); | 5631 | cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); |
6037 | __perf_event_init_context(&cpuctx->ctx); | 5632 | __perf_event_init_context(&cpuctx->ctx); |
6038 | lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex); | 5633 | lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex); |
5634 | lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock); | ||
6039 | cpuctx->ctx.type = cpu_context; | 5635 | cpuctx->ctx.type = cpu_context; |
6040 | cpuctx->ctx.pmu = pmu; | 5636 | cpuctx->ctx.pmu = pmu; |
6041 | cpuctx->jiffies_interval = 1; | 5637 | cpuctx->jiffies_interval = 1; |
@@ -6150,7 +5746,8 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, | |||
6150 | struct task_struct *task, | 5746 | struct task_struct *task, |
6151 | struct perf_event *group_leader, | 5747 | struct perf_event *group_leader, |
6152 | struct perf_event *parent_event, | 5748 | struct perf_event *parent_event, |
6153 | perf_overflow_handler_t overflow_handler) | 5749 | perf_overflow_handler_t overflow_handler, |
5750 | void *context) | ||
6154 | { | 5751 | { |
6155 | struct pmu *pmu; | 5752 | struct pmu *pmu; |
6156 | struct perf_event *event; | 5753 | struct perf_event *event; |
@@ -6208,10 +5805,13 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, | |||
6208 | #endif | 5805 | #endif |
6209 | } | 5806 | } |
6210 | 5807 | ||
6211 | if (!overflow_handler && parent_event) | 5808 | if (!overflow_handler && parent_event) { |
6212 | overflow_handler = parent_event->overflow_handler; | 5809 | overflow_handler = parent_event->overflow_handler; |
5810 | context = parent_event->overflow_handler_context; | ||
5811 | } | ||
6213 | 5812 | ||
6214 | event->overflow_handler = overflow_handler; | 5813 | event->overflow_handler = overflow_handler; |
5814 | event->overflow_handler_context = context; | ||
6215 | 5815 | ||
6216 | if (attr->disabled) | 5816 | if (attr->disabled) |
6217 | event->state = PERF_EVENT_STATE_OFF; | 5817 | event->state = PERF_EVENT_STATE_OFF; |
@@ -6354,7 +5954,7 @@ err_size: | |||
6354 | static int | 5954 | static int |
6355 | perf_event_set_output(struct perf_event *event, struct perf_event *output_event) | 5955 | perf_event_set_output(struct perf_event *event, struct perf_event *output_event) |
6356 | { | 5956 | { |
6357 | struct perf_buffer *buffer = NULL, *old_buffer = NULL; | 5957 | struct ring_buffer *rb = NULL, *old_rb = NULL; |
6358 | int ret = -EINVAL; | 5958 | int ret = -EINVAL; |
6359 | 5959 | ||
6360 | if (!output_event) | 5960 | if (!output_event) |
@@ -6371,7 +5971,7 @@ perf_event_set_output(struct perf_event *event, struct perf_event *output_event) | |||
6371 | goto out; | 5971 | goto out; |
6372 | 5972 | ||
6373 | /* | 5973 | /* |
6374 | * If its not a per-cpu buffer, it must be the same task. | 5974 | * If its not a per-cpu rb, it must be the same task. |
6375 | */ | 5975 | */ |
6376 | if (output_event->cpu == -1 && output_event->ctx != event->ctx) | 5976 | if (output_event->cpu == -1 && output_event->ctx != event->ctx) |
6377 | goto out; | 5977 | goto out; |
@@ -6383,20 +5983,20 @@ set: | |||
6383 | goto unlock; | 5983 | goto unlock; |
6384 | 5984 | ||
6385 | if (output_event) { | 5985 | if (output_event) { |
6386 | /* get the buffer we want to redirect to */ | 5986 | /* get the rb we want to redirect to */ |
6387 | buffer = perf_buffer_get(output_event); | 5987 | rb = ring_buffer_get(output_event); |
6388 | if (!buffer) | 5988 | if (!rb) |
6389 | goto unlock; | 5989 | goto unlock; |
6390 | } | 5990 | } |
6391 | 5991 | ||
6392 | old_buffer = event->buffer; | 5992 | old_rb = event->rb; |
6393 | rcu_assign_pointer(event->buffer, buffer); | 5993 | rcu_assign_pointer(event->rb, rb); |
6394 | ret = 0; | 5994 | ret = 0; |
6395 | unlock: | 5995 | unlock: |
6396 | mutex_unlock(&event->mmap_mutex); | 5996 | mutex_unlock(&event->mmap_mutex); |
6397 | 5997 | ||
6398 | if (old_buffer) | 5998 | if (old_rb) |
6399 | perf_buffer_put(old_buffer); | 5999 | ring_buffer_put(old_rb); |
6400 | out: | 6000 | out: |
6401 | return ret; | 6001 | return ret; |
6402 | } | 6002 | } |
@@ -6478,7 +6078,8 @@ SYSCALL_DEFINE5(perf_event_open, | |||
6478 | } | 6078 | } |
6479 | } | 6079 | } |
6480 | 6080 | ||
6481 | event = perf_event_alloc(&attr, cpu, task, group_leader, NULL, NULL); | 6081 | event = perf_event_alloc(&attr, cpu, task, group_leader, NULL, |
6082 | NULL, NULL); | ||
6482 | if (IS_ERR(event)) { | 6083 | if (IS_ERR(event)) { |
6483 | err = PTR_ERR(event); | 6084 | err = PTR_ERR(event); |
6484 | goto err_task; | 6085 | goto err_task; |
@@ -6663,7 +6264,8 @@ err_fd: | |||
6663 | struct perf_event * | 6264 | struct perf_event * |
6664 | perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu, | 6265 | perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu, |
6665 | struct task_struct *task, | 6266 | struct task_struct *task, |
6666 | perf_overflow_handler_t overflow_handler) | 6267 | perf_overflow_handler_t overflow_handler, |
6268 | void *context) | ||
6667 | { | 6269 | { |
6668 | struct perf_event_context *ctx; | 6270 | struct perf_event_context *ctx; |
6669 | struct perf_event *event; | 6271 | struct perf_event *event; |
@@ -6673,7 +6275,8 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu, | |||
6673 | * Get the target context (task or percpu): | 6275 | * Get the target context (task or percpu): |
6674 | */ | 6276 | */ |
6675 | 6277 | ||
6676 | event = perf_event_alloc(attr, cpu, task, NULL, NULL, overflow_handler); | 6278 | event = perf_event_alloc(attr, cpu, task, NULL, NULL, |
6279 | overflow_handler, context); | ||
6677 | if (IS_ERR(event)) { | 6280 | if (IS_ERR(event)) { |
6678 | err = PTR_ERR(event); | 6281 | err = PTR_ERR(event); |
6679 | goto err; | 6282 | goto err; |
@@ -6780,7 +6383,6 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn) | |||
6780 | * our context. | 6383 | * our context. |
6781 | */ | 6384 | */ |
6782 | child_ctx = rcu_dereference_raw(child->perf_event_ctxp[ctxn]); | 6385 | child_ctx = rcu_dereference_raw(child->perf_event_ctxp[ctxn]); |
6783 | task_ctx_sched_out(child_ctx, EVENT_ALL); | ||
6784 | 6386 | ||
6785 | /* | 6387 | /* |
6786 | * Take the context lock here so that if find_get_context is | 6388 | * Take the context lock here so that if find_get_context is |
@@ -6788,6 +6390,7 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn) | |||
6788 | * incremented the context's refcount before we do put_ctx below. | 6390 | * incremented the context's refcount before we do put_ctx below. |
6789 | */ | 6391 | */ |
6790 | raw_spin_lock(&child_ctx->lock); | 6392 | raw_spin_lock(&child_ctx->lock); |
6393 | task_ctx_sched_out(child_ctx); | ||
6791 | child->perf_event_ctxp[ctxn] = NULL; | 6394 | child->perf_event_ctxp[ctxn] = NULL; |
6792 | /* | 6395 | /* |
6793 | * If this context is a clone; unclone it so it can't get | 6396 | * If this context is a clone; unclone it so it can't get |
@@ -6957,7 +6560,7 @@ inherit_event(struct perf_event *parent_event, | |||
6957 | parent_event->cpu, | 6560 | parent_event->cpu, |
6958 | child, | 6561 | child, |
6959 | group_leader, parent_event, | 6562 | group_leader, parent_event, |
6960 | NULL); | 6563 | NULL, NULL); |
6961 | if (IS_ERR(child_event)) | 6564 | if (IS_ERR(child_event)) |
6962 | return child_event; | 6565 | return child_event; |
6963 | get_ctx(child_ctx); | 6566 | get_ctx(child_ctx); |
@@ -6984,6 +6587,8 @@ inherit_event(struct perf_event *parent_event, | |||
6984 | 6587 | ||
6985 | child_event->ctx = child_ctx; | 6588 | child_event->ctx = child_ctx; |
6986 | child_event->overflow_handler = parent_event->overflow_handler; | 6589 | child_event->overflow_handler = parent_event->overflow_handler; |
6590 | child_event->overflow_handler_context | ||
6591 | = parent_event->overflow_handler_context; | ||
6987 | 6592 | ||
6988 | /* | 6593 | /* |
6989 | * Precalculate sample_data sizes | 6594 | * Precalculate sample_data sizes |
diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c index 086adf25a55..b7971d6f38b 100644 --- a/kernel/events/hw_breakpoint.c +++ b/kernel/events/hw_breakpoint.c | |||
@@ -431,9 +431,11 @@ int register_perf_hw_breakpoint(struct perf_event *bp) | |||
431 | struct perf_event * | 431 | struct perf_event * |
432 | register_user_hw_breakpoint(struct perf_event_attr *attr, | 432 | register_user_hw_breakpoint(struct perf_event_attr *attr, |
433 | perf_overflow_handler_t triggered, | 433 | perf_overflow_handler_t triggered, |
434 | void *context, | ||
434 | struct task_struct *tsk) | 435 | struct task_struct *tsk) |
435 | { | 436 | { |
436 | return perf_event_create_kernel_counter(attr, -1, tsk, triggered); | 437 | return perf_event_create_kernel_counter(attr, -1, tsk, triggered, |
438 | context); | ||
437 | } | 439 | } |
438 | EXPORT_SYMBOL_GPL(register_user_hw_breakpoint); | 440 | EXPORT_SYMBOL_GPL(register_user_hw_breakpoint); |
439 | 441 | ||
@@ -502,7 +504,8 @@ EXPORT_SYMBOL_GPL(unregister_hw_breakpoint); | |||
502 | */ | 504 | */ |
503 | struct perf_event * __percpu * | 505 | struct perf_event * __percpu * |
504 | register_wide_hw_breakpoint(struct perf_event_attr *attr, | 506 | register_wide_hw_breakpoint(struct perf_event_attr *attr, |
505 | perf_overflow_handler_t triggered) | 507 | perf_overflow_handler_t triggered, |
508 | void *context) | ||
506 | { | 509 | { |
507 | struct perf_event * __percpu *cpu_events, **pevent, *bp; | 510 | struct perf_event * __percpu *cpu_events, **pevent, *bp; |
508 | long err; | 511 | long err; |
@@ -515,7 +518,8 @@ register_wide_hw_breakpoint(struct perf_event_attr *attr, | |||
515 | get_online_cpus(); | 518 | get_online_cpus(); |
516 | for_each_online_cpu(cpu) { | 519 | for_each_online_cpu(cpu) { |
517 | pevent = per_cpu_ptr(cpu_events, cpu); | 520 | pevent = per_cpu_ptr(cpu_events, cpu); |
518 | bp = perf_event_create_kernel_counter(attr, cpu, NULL, triggered); | 521 | bp = perf_event_create_kernel_counter(attr, cpu, NULL, |
522 | triggered, context); | ||
519 | 523 | ||
520 | *pevent = bp; | 524 | *pevent = bp; |
521 | 525 | ||
diff --git a/kernel/events/internal.h b/kernel/events/internal.h new file mode 100644 index 00000000000..09097dd8116 --- /dev/null +++ b/kernel/events/internal.h | |||
@@ -0,0 +1,96 @@ | |||
1 | #ifndef _KERNEL_EVENTS_INTERNAL_H | ||
2 | #define _KERNEL_EVENTS_INTERNAL_H | ||
3 | |||
4 | #define RING_BUFFER_WRITABLE 0x01 | ||
5 | |||
6 | struct ring_buffer { | ||
7 | atomic_t refcount; | ||
8 | struct rcu_head rcu_head; | ||
9 | #ifdef CONFIG_PERF_USE_VMALLOC | ||
10 | struct work_struct work; | ||
11 | int page_order; /* allocation order */ | ||
12 | #endif | ||
13 | int nr_pages; /* nr of data pages */ | ||
14 | int writable; /* are we writable */ | ||
15 | |||
16 | atomic_t poll; /* POLL_ for wakeups */ | ||
17 | |||
18 | local_t head; /* write position */ | ||
19 | local_t nest; /* nested writers */ | ||
20 | local_t events; /* event limit */ | ||
21 | local_t wakeup; /* wakeup stamp */ | ||
22 | local_t lost; /* nr records lost */ | ||
23 | |||
24 | long watermark; /* wakeup watermark */ | ||
25 | |||
26 | struct perf_event_mmap_page *user_page; | ||
27 | void *data_pages[0]; | ||
28 | }; | ||
29 | |||
30 | extern void rb_free(struct ring_buffer *rb); | ||
31 | extern struct ring_buffer * | ||
32 | rb_alloc(int nr_pages, long watermark, int cpu, int flags); | ||
33 | extern void perf_event_wakeup(struct perf_event *event); | ||
34 | |||
35 | extern void | ||
36 | perf_event_header__init_id(struct perf_event_header *header, | ||
37 | struct perf_sample_data *data, | ||
38 | struct perf_event *event); | ||
39 | extern void | ||
40 | perf_event__output_id_sample(struct perf_event *event, | ||
41 | struct perf_output_handle *handle, | ||
42 | struct perf_sample_data *sample); | ||
43 | |||
44 | extern struct page * | ||
45 | perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff); | ||
46 | |||
47 | #ifdef CONFIG_PERF_USE_VMALLOC | ||
48 | /* | ||
49 | * Back perf_mmap() with vmalloc memory. | ||
50 | * | ||
51 | * Required for architectures that have d-cache aliasing issues. | ||
52 | */ | ||
53 | |||
54 | static inline int page_order(struct ring_buffer *rb) | ||
55 | { | ||
56 | return rb->page_order; | ||
57 | } | ||
58 | |||
59 | #else | ||
60 | |||
61 | static inline int page_order(struct ring_buffer *rb) | ||
62 | { | ||
63 | return 0; | ||
64 | } | ||
65 | #endif | ||
66 | |||
67 | static unsigned long perf_data_size(struct ring_buffer *rb) | ||
68 | { | ||
69 | return rb->nr_pages << (PAGE_SHIFT + page_order(rb)); | ||
70 | } | ||
71 | |||
72 | static inline void | ||
73 | __output_copy(struct perf_output_handle *handle, | ||
74 | const void *buf, unsigned int len) | ||
75 | { | ||
76 | do { | ||
77 | unsigned long size = min_t(unsigned long, handle->size, len); | ||
78 | |||
79 | memcpy(handle->addr, buf, size); | ||
80 | |||
81 | len -= size; | ||
82 | handle->addr += size; | ||
83 | buf += size; | ||
84 | handle->size -= size; | ||
85 | if (!handle->size) { | ||
86 | struct ring_buffer *rb = handle->rb; | ||
87 | |||
88 | handle->page++; | ||
89 | handle->page &= rb->nr_pages - 1; | ||
90 | handle->addr = rb->data_pages[handle->page]; | ||
91 | handle->size = PAGE_SIZE << page_order(rb); | ||
92 | } | ||
93 | } while (len); | ||
94 | } | ||
95 | |||
96 | #endif /* _KERNEL_EVENTS_INTERNAL_H */ | ||
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c new file mode 100644 index 00000000000..a2a29205cc0 --- /dev/null +++ b/kernel/events/ring_buffer.c | |||
@@ -0,0 +1,380 @@ | |||
1 | /* | ||
2 | * Performance events ring-buffer code: | ||
3 | * | ||
4 | * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> | ||
5 | * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar | ||
6 | * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> | ||
7 | * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> | ||
8 | * | ||
9 | * For licensing details see kernel-base/COPYING | ||
10 | */ | ||
11 | |||
12 | #include <linux/perf_event.h> | ||
13 | #include <linux/vmalloc.h> | ||
14 | #include <linux/slab.h> | ||
15 | |||
16 | #include "internal.h" | ||
17 | |||
18 | static bool perf_output_space(struct ring_buffer *rb, unsigned long tail, | ||
19 | unsigned long offset, unsigned long head) | ||
20 | { | ||
21 | unsigned long mask; | ||
22 | |||
23 | if (!rb->writable) | ||
24 | return true; | ||
25 | |||
26 | mask = perf_data_size(rb) - 1; | ||
27 | |||
28 | offset = (offset - tail) & mask; | ||
29 | head = (head - tail) & mask; | ||
30 | |||
31 | if ((int)(head - offset) < 0) | ||
32 | return false; | ||
33 | |||
34 | return true; | ||
35 | } | ||
36 | |||
37 | static void perf_output_wakeup(struct perf_output_handle *handle) | ||
38 | { | ||
39 | atomic_set(&handle->rb->poll, POLL_IN); | ||
40 | |||
41 | handle->event->pending_wakeup = 1; | ||
42 | irq_work_queue(&handle->event->pending); | ||
43 | } | ||
44 | |||
45 | /* | ||
46 | * We need to ensure a later event_id doesn't publish a head when a former | ||
47 | * event isn't done writing. However since we need to deal with NMIs we | ||
48 | * cannot fully serialize things. | ||
49 | * | ||
50 | * We only publish the head (and generate a wakeup) when the outer-most | ||
51 | * event completes. | ||
52 | */ | ||
53 | static void perf_output_get_handle(struct perf_output_handle *handle) | ||
54 | { | ||
55 | struct ring_buffer *rb = handle->rb; | ||
56 | |||
57 | preempt_disable(); | ||
58 | local_inc(&rb->nest); | ||
59 | handle->wakeup = local_read(&rb->wakeup); | ||
60 | } | ||
61 | |||
62 | static void perf_output_put_handle(struct perf_output_handle *handle) | ||
63 | { | ||
64 | struct ring_buffer *rb = handle->rb; | ||
65 | unsigned long head; | ||
66 | |||
67 | again: | ||
68 | head = local_read(&rb->head); | ||
69 | |||
70 | /* | ||
71 | * IRQ/NMI can happen here, which means we can miss a head update. | ||
72 | */ | ||
73 | |||
74 | if (!local_dec_and_test(&rb->nest)) | ||
75 | goto out; | ||
76 | |||
77 | /* | ||
78 | * Publish the known good head. Rely on the full barrier implied | ||
79 | * by atomic_dec_and_test() order the rb->head read and this | ||
80 | * write. | ||
81 | */ | ||
82 | rb->user_page->data_head = head; | ||
83 | |||
84 | /* | ||
85 | * Now check if we missed an update, rely on the (compiler) | ||
86 | * barrier in atomic_dec_and_test() to re-read rb->head. | ||
87 | */ | ||
88 | if (unlikely(head != local_read(&rb->head))) { | ||
89 | local_inc(&rb->nest); | ||
90 | goto again; | ||
91 | } | ||
92 | |||
93 | if (handle->wakeup != local_read(&rb->wakeup)) | ||
94 | perf_output_wakeup(handle); | ||
95 | |||
96 | out: | ||
97 | preempt_enable(); | ||
98 | } | ||
99 | |||
100 | int perf_output_begin(struct perf_output_handle *handle, | ||
101 | struct perf_event *event, unsigned int size) | ||
102 | { | ||
103 | struct ring_buffer *rb; | ||
104 | unsigned long tail, offset, head; | ||
105 | int have_lost; | ||
106 | struct perf_sample_data sample_data; | ||
107 | struct { | ||
108 | struct perf_event_header header; | ||
109 | u64 id; | ||
110 | u64 lost; | ||
111 | } lost_event; | ||
112 | |||
113 | rcu_read_lock(); | ||
114 | /* | ||
115 | * For inherited events we send all the output towards the parent. | ||
116 | */ | ||
117 | if (event->parent) | ||
118 | event = event->parent; | ||
119 | |||
120 | rb = rcu_dereference(event->rb); | ||
121 | if (!rb) | ||
122 | goto out; | ||
123 | |||
124 | handle->rb = rb; | ||
125 | handle->event = event; | ||
126 | |||
127 | if (!rb->nr_pages) | ||
128 | goto out; | ||
129 | |||
130 | have_lost = local_read(&rb->lost); | ||
131 | if (have_lost) { | ||
132 | lost_event.header.size = sizeof(lost_event); | ||
133 | perf_event_header__init_id(&lost_event.header, &sample_data, | ||
134 | event); | ||
135 | size += lost_event.header.size; | ||
136 | } | ||
137 | |||
138 | perf_output_get_handle(handle); | ||
139 | |||
140 | do { | ||
141 | /* | ||
142 | * Userspace could choose to issue a mb() before updating the | ||
143 | * tail pointer. So that all reads will be completed before the | ||
144 | * write is issued. | ||
145 | */ | ||
146 | tail = ACCESS_ONCE(rb->user_page->data_tail); | ||
147 | smp_rmb(); | ||
148 | offset = head = local_read(&rb->head); | ||
149 | head += size; | ||
150 | if (unlikely(!perf_output_space(rb, tail, offset, head))) | ||
151 | goto fail; | ||
152 | } while (local_cmpxchg(&rb->head, offset, head) != offset); | ||
153 | |||
154 | if (head - local_read(&rb->wakeup) > rb->watermark) | ||
155 | local_add(rb->watermark, &rb->wakeup); | ||
156 | |||
157 | handle->page = offset >> (PAGE_SHIFT + page_order(rb)); | ||
158 | handle->page &= rb->nr_pages - 1; | ||
159 | handle->size = offset & ((PAGE_SIZE << page_order(rb)) - 1); | ||
160 | handle->addr = rb->data_pages[handle->page]; | ||
161 | handle->addr += handle->size; | ||
162 | handle->size = (PAGE_SIZE << page_order(rb)) - handle->size; | ||
163 | |||
164 | if (have_lost) { | ||
165 | lost_event.header.type = PERF_RECORD_LOST; | ||
166 | lost_event.header.misc = 0; | ||
167 | lost_event.id = event->id; | ||
168 | lost_event.lost = local_xchg(&rb->lost, 0); | ||
169 | |||
170 | perf_output_put(handle, lost_event); | ||
171 | perf_event__output_id_sample(event, handle, &sample_data); | ||
172 | } | ||
173 | |||
174 | return 0; | ||
175 | |||
176 | fail: | ||
177 | local_inc(&rb->lost); | ||
178 | perf_output_put_handle(handle); | ||
179 | out: | ||
180 | rcu_read_unlock(); | ||
181 | |||
182 | return -ENOSPC; | ||
183 | } | ||
184 | |||
185 | void perf_output_copy(struct perf_output_handle *handle, | ||
186 | const void *buf, unsigned int len) | ||
187 | { | ||
188 | __output_copy(handle, buf, len); | ||
189 | } | ||
190 | |||
191 | void perf_output_end(struct perf_output_handle *handle) | ||
192 | { | ||
193 | perf_output_put_handle(handle); | ||
194 | rcu_read_unlock(); | ||
195 | } | ||
196 | |||
197 | static void | ||
198 | ring_buffer_init(struct ring_buffer *rb, long watermark, int flags) | ||
199 | { | ||
200 | long max_size = perf_data_size(rb); | ||
201 | |||
202 | if (watermark) | ||
203 | rb->watermark = min(max_size, watermark); | ||
204 | |||
205 | if (!rb->watermark) | ||
206 | rb->watermark = max_size / 2; | ||
207 | |||
208 | if (flags & RING_BUFFER_WRITABLE) | ||
209 | rb->writable = 1; | ||
210 | |||
211 | atomic_set(&rb->refcount, 1); | ||
212 | } | ||
213 | |||
214 | #ifndef CONFIG_PERF_USE_VMALLOC | ||
215 | |||
216 | /* | ||
217 | * Back perf_mmap() with regular GFP_KERNEL-0 pages. | ||
218 | */ | ||
219 | |||
220 | struct page * | ||
221 | perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff) | ||
222 | { | ||
223 | if (pgoff > rb->nr_pages) | ||
224 | return NULL; | ||
225 | |||
226 | if (pgoff == 0) | ||
227 | return virt_to_page(rb->user_page); | ||
228 | |||
229 | return virt_to_page(rb->data_pages[pgoff - 1]); | ||
230 | } | ||
231 | |||
232 | static void *perf_mmap_alloc_page(int cpu) | ||
233 | { | ||
234 | struct page *page; | ||
235 | int node; | ||
236 | |||
237 | node = (cpu == -1) ? cpu : cpu_to_node(cpu); | ||
238 | page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0); | ||
239 | if (!page) | ||
240 | return NULL; | ||
241 | |||
242 | return page_address(page); | ||
243 | } | ||
244 | |||
245 | struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags) | ||
246 | { | ||
247 | struct ring_buffer *rb; | ||
248 | unsigned long size; | ||
249 | int i; | ||
250 | |||
251 | size = sizeof(struct ring_buffer); | ||
252 | size += nr_pages * sizeof(void *); | ||
253 | |||
254 | rb = kzalloc(size, GFP_KERNEL); | ||
255 | if (!rb) | ||
256 | goto fail; | ||
257 | |||
258 | rb->user_page = perf_mmap_alloc_page(cpu); | ||
259 | if (!rb->user_page) | ||
260 | goto fail_user_page; | ||
261 | |||
262 | for (i = 0; i < nr_pages; i++) { | ||
263 | rb->data_pages[i] = perf_mmap_alloc_page(cpu); | ||
264 | if (!rb->data_pages[i]) | ||
265 | goto fail_data_pages; | ||
266 | } | ||
267 | |||
268 | rb->nr_pages = nr_pages; | ||
269 | |||
270 | ring_buffer_init(rb, watermark, flags); | ||
271 | |||
272 | return rb; | ||
273 | |||
274 | fail_data_pages: | ||
275 | for (i--; i >= 0; i--) | ||
276 | free_page((unsigned long)rb->data_pages[i]); | ||
277 | |||
278 | free_page((unsigned long)rb->user_page); | ||
279 | |||
280 | fail_user_page: | ||
281 | kfree(rb); | ||
282 | |||
283 | fail: | ||
284 | return NULL; | ||
285 | } | ||
286 | |||
287 | static void perf_mmap_free_page(unsigned long addr) | ||
288 | { | ||
289 | struct page *page = virt_to_page((void *)addr); | ||
290 | |||
291 | page->mapping = NULL; | ||
292 | __free_page(page); | ||
293 | } | ||
294 | |||
295 | void rb_free(struct ring_buffer *rb) | ||
296 | { | ||
297 | int i; | ||
298 | |||
299 | perf_mmap_free_page((unsigned long)rb->user_page); | ||
300 | for (i = 0; i < rb->nr_pages; i++) | ||
301 | perf_mmap_free_page((unsigned long)rb->data_pages[i]); | ||
302 | kfree(rb); | ||
303 | } | ||
304 | |||
305 | #else | ||
306 | |||
307 | struct page * | ||
308 | perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff) | ||
309 | { | ||
310 | if (pgoff > (1UL << page_order(rb))) | ||
311 | return NULL; | ||
312 | |||
313 | return vmalloc_to_page((void *)rb->user_page + pgoff * PAGE_SIZE); | ||
314 | } | ||
315 | |||
316 | static void perf_mmap_unmark_page(void *addr) | ||
317 | { | ||
318 | struct page *page = vmalloc_to_page(addr); | ||
319 | |||
320 | page->mapping = NULL; | ||
321 | } | ||
322 | |||
323 | static void rb_free_work(struct work_struct *work) | ||
324 | { | ||
325 | struct ring_buffer *rb; | ||
326 | void *base; | ||
327 | int i, nr; | ||
328 | |||
329 | rb = container_of(work, struct ring_buffer, work); | ||
330 | nr = 1 << page_order(rb); | ||
331 | |||
332 | base = rb->user_page; | ||
333 | for (i = 0; i < nr + 1; i++) | ||
334 | perf_mmap_unmark_page(base + (i * PAGE_SIZE)); | ||
335 | |||
336 | vfree(base); | ||
337 | kfree(rb); | ||
338 | } | ||
339 | |||
340 | void rb_free(struct ring_buffer *rb) | ||
341 | { | ||
342 | schedule_work(&rb->work); | ||
343 | } | ||
344 | |||
345 | struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags) | ||
346 | { | ||
347 | struct ring_buffer *rb; | ||
348 | unsigned long size; | ||
349 | void *all_buf; | ||
350 | |||
351 | size = sizeof(struct ring_buffer); | ||
352 | size += sizeof(void *); | ||
353 | |||
354 | rb = kzalloc(size, GFP_KERNEL); | ||
355 | if (!rb) | ||
356 | goto fail; | ||
357 | |||
358 | INIT_WORK(&rb->work, rb_free_work); | ||
359 | |||
360 | all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE); | ||
361 | if (!all_buf) | ||
362 | goto fail_all_buf; | ||
363 | |||
364 | rb->user_page = all_buf; | ||
365 | rb->data_pages[0] = all_buf + PAGE_SIZE; | ||
366 | rb->page_order = ilog2(nr_pages); | ||
367 | rb->nr_pages = 1; | ||
368 | |||
369 | ring_buffer_init(rb, watermark, flags); | ||
370 | |||
371 | return rb; | ||
372 | |||
373 | fail_all_buf: | ||
374 | kfree(rb); | ||
375 | |||
376 | fail: | ||
377 | return NULL; | ||
378 | } | ||
379 | |||
380 | #endif | ||
diff --git a/kernel/power/user.c b/kernel/power/user.c index 7d02d33be69..42ddbc6f0de 100644 --- a/kernel/power/user.c +++ b/kernel/power/user.c | |||
@@ -113,8 +113,10 @@ static int snapshot_open(struct inode *inode, struct file *filp) | |||
113 | if (error) | 113 | if (error) |
114 | pm_notifier_call_chain(PM_POST_RESTORE); | 114 | pm_notifier_call_chain(PM_POST_RESTORE); |
115 | } | 115 | } |
116 | if (error) | 116 | if (error) { |
117 | free_basic_memory_bitmaps(); | ||
117 | atomic_inc(&snapshot_device_available); | 118 | atomic_inc(&snapshot_device_available); |
119 | } | ||
118 | data->frozen = 0; | 120 | data->frozen = 0; |
119 | data->ready = 0; | 121 | data->ready = 0; |
120 | data->platform_support = 0; | 122 | data->platform_support = 0; |
diff --git a/kernel/sched.c b/kernel/sched.c index 3f2e502d609..d08d110b897 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -2220,7 +2220,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu) | |||
2220 | 2220 | ||
2221 | if (task_cpu(p) != new_cpu) { | 2221 | if (task_cpu(p) != new_cpu) { |
2222 | p->se.nr_migrations++; | 2222 | p->se.nr_migrations++; |
2223 | perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 1, NULL, 0); | 2223 | perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, NULL, 0); |
2224 | } | 2224 | } |
2225 | 2225 | ||
2226 | __set_task_cpu(p, new_cpu); | 2226 | __set_task_cpu(p, new_cpu); |
diff --git a/kernel/stacktrace.c b/kernel/stacktrace.c index eb212f8f8bc..d20c6983aad 100644 --- a/kernel/stacktrace.c +++ b/kernel/stacktrace.c | |||
@@ -26,12 +26,18 @@ void print_stack_trace(struct stack_trace *trace, int spaces) | |||
26 | EXPORT_SYMBOL_GPL(print_stack_trace); | 26 | EXPORT_SYMBOL_GPL(print_stack_trace); |
27 | 27 | ||
28 | /* | 28 | /* |
29 | * Architectures that do not implement save_stack_trace_tsk get this | 29 | * Architectures that do not implement save_stack_trace_tsk or |
30 | * weak alias and a once-per-bootup warning (whenever this facility | 30 | * save_stack_trace_regs get this weak alias and a once-per-bootup warning |
31 | * is utilized - for example by procfs): | 31 | * (whenever this facility is utilized - for example by procfs): |
32 | */ | 32 | */ |
33 | __weak void | 33 | __weak void |
34 | save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) | 34 | save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) |
35 | { | 35 | { |
36 | WARN_ONCE(1, KERN_INFO "save_stack_trace_tsk() not implemented yet.\n"); | 36 | WARN_ONCE(1, KERN_INFO "save_stack_trace_tsk() not implemented yet.\n"); |
37 | } | 37 | } |
38 | |||
39 | __weak void | ||
40 | save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace) | ||
41 | { | ||
42 | WARN_ONCE(1, KERN_INFO "save_stack_trace_regs() not implemented yet.\n"); | ||
43 | } | ||
diff --git a/kernel/taskstats.c b/kernel/taskstats.c index 9ffea360a77..fc0f2200541 100644 --- a/kernel/taskstats.c +++ b/kernel/taskstats.c | |||
@@ -285,16 +285,18 @@ ret: | |||
285 | static int add_del_listener(pid_t pid, const struct cpumask *mask, int isadd) | 285 | static int add_del_listener(pid_t pid, const struct cpumask *mask, int isadd) |
286 | { | 286 | { |
287 | struct listener_list *listeners; | 287 | struct listener_list *listeners; |
288 | struct listener *s, *tmp; | 288 | struct listener *s, *tmp, *s2; |
289 | unsigned int cpu; | 289 | unsigned int cpu; |
290 | 290 | ||
291 | if (!cpumask_subset(mask, cpu_possible_mask)) | 291 | if (!cpumask_subset(mask, cpu_possible_mask)) |
292 | return -EINVAL; | 292 | return -EINVAL; |
293 | 293 | ||
294 | s = NULL; | ||
294 | if (isadd == REGISTER) { | 295 | if (isadd == REGISTER) { |
295 | for_each_cpu(cpu, mask) { | 296 | for_each_cpu(cpu, mask) { |
296 | s = kmalloc_node(sizeof(struct listener), GFP_KERNEL, | 297 | if (!s) |
297 | cpu_to_node(cpu)); | 298 | s = kmalloc_node(sizeof(struct listener), |
299 | GFP_KERNEL, cpu_to_node(cpu)); | ||
298 | if (!s) | 300 | if (!s) |
299 | goto cleanup; | 301 | goto cleanup; |
300 | s->pid = pid; | 302 | s->pid = pid; |
@@ -303,9 +305,16 @@ static int add_del_listener(pid_t pid, const struct cpumask *mask, int isadd) | |||
303 | 305 | ||
304 | listeners = &per_cpu(listener_array, cpu); | 306 | listeners = &per_cpu(listener_array, cpu); |
305 | down_write(&listeners->sem); | 307 | down_write(&listeners->sem); |
308 | list_for_each_entry_safe(s2, tmp, &listeners->list, list) { | ||
309 | if (s2->pid == pid) | ||
310 | goto next_cpu; | ||
311 | } | ||
306 | list_add(&s->list, &listeners->list); | 312 | list_add(&s->list, &listeners->list); |
313 | s = NULL; | ||
314 | next_cpu: | ||
307 | up_write(&listeners->sem); | 315 | up_write(&listeners->sem); |
308 | } | 316 | } |
317 | kfree(s); | ||
309 | return 0; | 318 | return 0; |
310 | } | 319 | } |
311 | 320 | ||
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c index 2d966244ea6..59f369f98a0 100644 --- a/kernel/time/alarmtimer.c +++ b/kernel/time/alarmtimer.c | |||
@@ -42,15 +42,75 @@ static struct alarm_base { | |||
42 | clockid_t base_clockid; | 42 | clockid_t base_clockid; |
43 | } alarm_bases[ALARM_NUMTYPE]; | 43 | } alarm_bases[ALARM_NUMTYPE]; |
44 | 44 | ||
45 | /* freezer delta & lock used to handle clock_nanosleep triggered wakeups */ | ||
46 | static ktime_t freezer_delta; | ||
47 | static DEFINE_SPINLOCK(freezer_delta_lock); | ||
48 | |||
45 | #ifdef CONFIG_RTC_CLASS | 49 | #ifdef CONFIG_RTC_CLASS |
46 | /* rtc timer and device for setting alarm wakeups at suspend */ | 50 | /* rtc timer and device for setting alarm wakeups at suspend */ |
47 | static struct rtc_timer rtctimer; | 51 | static struct rtc_timer rtctimer; |
48 | static struct rtc_device *rtcdev; | 52 | static struct rtc_device *rtcdev; |
49 | #endif | 53 | static DEFINE_SPINLOCK(rtcdev_lock); |
50 | 54 | ||
51 | /* freezer delta & lock used to handle clock_nanosleep triggered wakeups */ | 55 | /** |
52 | static ktime_t freezer_delta; | 56 | * has_wakealarm - check rtc device has wakealarm ability |
53 | static DEFINE_SPINLOCK(freezer_delta_lock); | 57 | * @dev: current device |
58 | * @name_ptr: name to be returned | ||
59 | * | ||
60 | * This helper function checks to see if the rtc device can wake | ||
61 | * from suspend. | ||
62 | */ | ||
63 | static int has_wakealarm(struct device *dev, void *name_ptr) | ||
64 | { | ||
65 | struct rtc_device *candidate = to_rtc_device(dev); | ||
66 | |||
67 | if (!candidate->ops->set_alarm) | ||
68 | return 0; | ||
69 | if (!device_may_wakeup(candidate->dev.parent)) | ||
70 | return 0; | ||
71 | |||
72 | *(const char **)name_ptr = dev_name(dev); | ||
73 | return 1; | ||
74 | } | ||
75 | |||
76 | /** | ||
77 | * alarmtimer_get_rtcdev - Return selected rtcdevice | ||
78 | * | ||
79 | * This function returns the rtc device to use for wakealarms. | ||
80 | * If one has not already been chosen, it checks to see if a | ||
81 | * functional rtc device is available. | ||
82 | */ | ||
83 | static struct rtc_device *alarmtimer_get_rtcdev(void) | ||
84 | { | ||
85 | struct device *dev; | ||
86 | char *str; | ||
87 | unsigned long flags; | ||
88 | struct rtc_device *ret; | ||
89 | |||
90 | spin_lock_irqsave(&rtcdev_lock, flags); | ||
91 | if (!rtcdev) { | ||
92 | /* Find an rtc device and init the rtc_timer */ | ||
93 | dev = class_find_device(rtc_class, NULL, &str, has_wakealarm); | ||
94 | /* If we have a device then str is valid. See has_wakealarm() */ | ||
95 | if (dev) { | ||
96 | rtcdev = rtc_class_open(str); | ||
97 | /* | ||
98 | * Drop the reference we got in class_find_device, | ||
99 | * rtc_open takes its own. | ||
100 | */ | ||
101 | put_device(dev); | ||
102 | rtc_timer_init(&rtctimer, NULL, NULL); | ||
103 | } | ||
104 | } | ||
105 | ret = rtcdev; | ||
106 | spin_unlock_irqrestore(&rtcdev_lock, flags); | ||
107 | |||
108 | return ret; | ||
109 | } | ||
110 | #else | ||
111 | #define alarmtimer_get_rtcdev() (0) | ||
112 | #define rtcdev (0) | ||
113 | #endif | ||
54 | 114 | ||
55 | 115 | ||
56 | /** | 116 | /** |
@@ -166,6 +226,7 @@ static int alarmtimer_suspend(struct device *dev) | |||
166 | struct rtc_time tm; | 226 | struct rtc_time tm; |
167 | ktime_t min, now; | 227 | ktime_t min, now; |
168 | unsigned long flags; | 228 | unsigned long flags; |
229 | struct rtc_device *rtc; | ||
169 | int i; | 230 | int i; |
170 | 231 | ||
171 | spin_lock_irqsave(&freezer_delta_lock, flags); | 232 | spin_lock_irqsave(&freezer_delta_lock, flags); |
@@ -173,8 +234,9 @@ static int alarmtimer_suspend(struct device *dev) | |||
173 | freezer_delta = ktime_set(0, 0); | 234 | freezer_delta = ktime_set(0, 0); |
174 | spin_unlock_irqrestore(&freezer_delta_lock, flags); | 235 | spin_unlock_irqrestore(&freezer_delta_lock, flags); |
175 | 236 | ||
237 | rtc = rtcdev; | ||
176 | /* If we have no rtcdev, just return */ | 238 | /* If we have no rtcdev, just return */ |
177 | if (!rtcdev) | 239 | if (!rtc) |
178 | return 0; | 240 | return 0; |
179 | 241 | ||
180 | /* Find the soonest timer to expire*/ | 242 | /* Find the soonest timer to expire*/ |
@@ -199,12 +261,12 @@ static int alarmtimer_suspend(struct device *dev) | |||
199 | WARN_ON(min.tv64 < NSEC_PER_SEC); | 261 | WARN_ON(min.tv64 < NSEC_PER_SEC); |
200 | 262 | ||
201 | /* Setup an rtc timer to fire that far in the future */ | 263 | /* Setup an rtc timer to fire that far in the future */ |
202 | rtc_timer_cancel(rtcdev, &rtctimer); | 264 | rtc_timer_cancel(rtc, &rtctimer); |
203 | rtc_read_time(rtcdev, &tm); | 265 | rtc_read_time(rtc, &tm); |
204 | now = rtc_tm_to_ktime(tm); | 266 | now = rtc_tm_to_ktime(tm); |
205 | now = ktime_add(now, min); | 267 | now = ktime_add(now, min); |
206 | 268 | ||
207 | rtc_timer_start(rtcdev, &rtctimer, now, ktime_set(0, 0)); | 269 | rtc_timer_start(rtc, &rtctimer, now, ktime_set(0, 0)); |
208 | 270 | ||
209 | return 0; | 271 | return 0; |
210 | } | 272 | } |
@@ -322,6 +384,9 @@ static int alarm_clock_getres(const clockid_t which_clock, struct timespec *tp) | |||
322 | { | 384 | { |
323 | clockid_t baseid = alarm_bases[clock2alarm(which_clock)].base_clockid; | 385 | clockid_t baseid = alarm_bases[clock2alarm(which_clock)].base_clockid; |
324 | 386 | ||
387 | if (!alarmtimer_get_rtcdev()) | ||
388 | return -ENOTSUPP; | ||
389 | |||
325 | return hrtimer_get_res(baseid, tp); | 390 | return hrtimer_get_res(baseid, tp); |
326 | } | 391 | } |
327 | 392 | ||
@@ -336,6 +401,9 @@ static int alarm_clock_get(clockid_t which_clock, struct timespec *tp) | |||
336 | { | 401 | { |
337 | struct alarm_base *base = &alarm_bases[clock2alarm(which_clock)]; | 402 | struct alarm_base *base = &alarm_bases[clock2alarm(which_clock)]; |
338 | 403 | ||
404 | if (!alarmtimer_get_rtcdev()) | ||
405 | return -ENOTSUPP; | ||
406 | |||
339 | *tp = ktime_to_timespec(base->gettime()); | 407 | *tp = ktime_to_timespec(base->gettime()); |
340 | return 0; | 408 | return 0; |
341 | } | 409 | } |
@@ -351,6 +419,9 @@ static int alarm_timer_create(struct k_itimer *new_timer) | |||
351 | enum alarmtimer_type type; | 419 | enum alarmtimer_type type; |
352 | struct alarm_base *base; | 420 | struct alarm_base *base; |
353 | 421 | ||
422 | if (!alarmtimer_get_rtcdev()) | ||
423 | return -ENOTSUPP; | ||
424 | |||
354 | if (!capable(CAP_WAKE_ALARM)) | 425 | if (!capable(CAP_WAKE_ALARM)) |
355 | return -EPERM; | 426 | return -EPERM; |
356 | 427 | ||
@@ -385,6 +456,9 @@ static void alarm_timer_get(struct k_itimer *timr, | |||
385 | */ | 456 | */ |
386 | static int alarm_timer_del(struct k_itimer *timr) | 457 | static int alarm_timer_del(struct k_itimer *timr) |
387 | { | 458 | { |
459 | if (!rtcdev) | ||
460 | return -ENOTSUPP; | ||
461 | |||
388 | alarm_cancel(&timr->it.alarmtimer); | 462 | alarm_cancel(&timr->it.alarmtimer); |
389 | return 0; | 463 | return 0; |
390 | } | 464 | } |
@@ -402,6 +476,9 @@ static int alarm_timer_set(struct k_itimer *timr, int flags, | |||
402 | struct itimerspec *new_setting, | 476 | struct itimerspec *new_setting, |
403 | struct itimerspec *old_setting) | 477 | struct itimerspec *old_setting) |
404 | { | 478 | { |
479 | if (!rtcdev) | ||
480 | return -ENOTSUPP; | ||
481 | |||
405 | /* Save old values */ | 482 | /* Save old values */ |
406 | old_setting->it_interval = | 483 | old_setting->it_interval = |
407 | ktime_to_timespec(timr->it.alarmtimer.period); | 484 | ktime_to_timespec(timr->it.alarmtimer.period); |
@@ -541,6 +618,9 @@ static int alarm_timer_nsleep(const clockid_t which_clock, int flags, | |||
541 | int ret = 0; | 618 | int ret = 0; |
542 | struct restart_block *restart; | 619 | struct restart_block *restart; |
543 | 620 | ||
621 | if (!alarmtimer_get_rtcdev()) | ||
622 | return -ENOTSUPP; | ||
623 | |||
544 | if (!capable(CAP_WAKE_ALARM)) | 624 | if (!capable(CAP_WAKE_ALARM)) |
545 | return -EPERM; | 625 | return -EPERM; |
546 | 626 | ||
@@ -638,65 +718,3 @@ static int __init alarmtimer_init(void) | |||
638 | } | 718 | } |
639 | device_initcall(alarmtimer_init); | 719 | device_initcall(alarmtimer_init); |
640 | 720 | ||
641 | #ifdef CONFIG_RTC_CLASS | ||
642 | /** | ||
643 | * has_wakealarm - check rtc device has wakealarm ability | ||
644 | * @dev: current device | ||
645 | * @name_ptr: name to be returned | ||
646 | * | ||
647 | * This helper function checks to see if the rtc device can wake | ||
648 | * from suspend. | ||
649 | */ | ||
650 | static int __init has_wakealarm(struct device *dev, void *name_ptr) | ||
651 | { | ||
652 | struct rtc_device *candidate = to_rtc_device(dev); | ||
653 | |||
654 | if (!candidate->ops->set_alarm) | ||
655 | return 0; | ||
656 | if (!device_may_wakeup(candidate->dev.parent)) | ||
657 | return 0; | ||
658 | |||
659 | *(const char **)name_ptr = dev_name(dev); | ||
660 | return 1; | ||
661 | } | ||
662 | |||
663 | /** | ||
664 | * alarmtimer_init_late - Late initializing of alarmtimer code | ||
665 | * | ||
666 | * This function locates a rtc device to use for wakealarms. | ||
667 | * Run as late_initcall to make sure rtc devices have been | ||
668 | * registered. | ||
669 | */ | ||
670 | static int __init alarmtimer_init_late(void) | ||
671 | { | ||
672 | struct device *dev; | ||
673 | char *str; | ||
674 | |||
675 | /* Find an rtc device and init the rtc_timer */ | ||
676 | dev = class_find_device(rtc_class, NULL, &str, has_wakealarm); | ||
677 | /* If we have a device then str is valid. See has_wakealarm() */ | ||
678 | if (dev) { | ||
679 | rtcdev = rtc_class_open(str); | ||
680 | /* | ||
681 | * Drop the reference we got in class_find_device, | ||
682 | * rtc_open takes its own. | ||
683 | */ | ||
684 | put_device(dev); | ||
685 | } | ||
686 | if (!rtcdev) { | ||
687 | printk(KERN_WARNING "No RTC device found, ALARM timers will" | ||
688 | " not wake from suspend"); | ||
689 | } | ||
690 | rtc_timer_init(&rtctimer, NULL, NULL); | ||
691 | |||
692 | return 0; | ||
693 | } | ||
694 | #else | ||
695 | static int __init alarmtimer_init_late(void) | ||
696 | { | ||
697 | printk(KERN_WARNING "Kernel not built with RTC support, ALARM timers" | ||
698 | " will not wake from suspend"); | ||
699 | return 0; | ||
700 | } | ||
701 | #endif | ||
702 | late_initcall(alarmtimer_init_late); | ||
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index ef9271b69b4..a0e246e2cee 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -32,7 +32,6 @@ | |||
32 | 32 | ||
33 | #include <trace/events/sched.h> | 33 | #include <trace/events/sched.h> |
34 | 34 | ||
35 | #include <asm/ftrace.h> | ||
36 | #include <asm/setup.h> | 35 | #include <asm/setup.h> |
37 | 36 | ||
38 | #include "trace_output.h" | 37 | #include "trace_output.h" |
@@ -82,8 +81,7 @@ static int ftrace_disabled __read_mostly; | |||
82 | 81 | ||
83 | static DEFINE_MUTEX(ftrace_lock); | 82 | static DEFINE_MUTEX(ftrace_lock); |
84 | 83 | ||
85 | static struct ftrace_ops ftrace_list_end __read_mostly = | 84 | static struct ftrace_ops ftrace_list_end __read_mostly = { |
86 | { | ||
87 | .func = ftrace_stub, | 85 | .func = ftrace_stub, |
88 | }; | 86 | }; |
89 | 87 | ||
@@ -785,8 +783,7 @@ static void unregister_ftrace_profiler(void) | |||
785 | unregister_ftrace_graph(); | 783 | unregister_ftrace_graph(); |
786 | } | 784 | } |
787 | #else | 785 | #else |
788 | static struct ftrace_ops ftrace_profile_ops __read_mostly = | 786 | static struct ftrace_ops ftrace_profile_ops __read_mostly = { |
789 | { | ||
790 | .func = function_profile_call, | 787 | .func = function_profile_call, |
791 | }; | 788 | }; |
792 | 789 | ||
@@ -806,19 +803,10 @@ ftrace_profile_write(struct file *filp, const char __user *ubuf, | |||
806 | size_t cnt, loff_t *ppos) | 803 | size_t cnt, loff_t *ppos) |
807 | { | 804 | { |
808 | unsigned long val; | 805 | unsigned long val; |
809 | char buf[64]; /* big enough to hold a number */ | ||
810 | int ret; | 806 | int ret; |
811 | 807 | ||
812 | if (cnt >= sizeof(buf)) | 808 | ret = kstrtoul_from_user(ubuf, cnt, 10, &val); |
813 | return -EINVAL; | 809 | if (ret) |
814 | |||
815 | if (copy_from_user(&buf, ubuf, cnt)) | ||
816 | return -EFAULT; | ||
817 | |||
818 | buf[cnt] = 0; | ||
819 | |||
820 | ret = strict_strtoul(buf, 10, &val); | ||
821 | if (ret < 0) | ||
822 | return ret; | 810 | return ret; |
823 | 811 | ||
824 | val = !!val; | 812 | val = !!val; |
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index b0c7aa40794..731201bf4ac 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -997,15 +997,21 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, | |||
997 | unsigned nr_pages) | 997 | unsigned nr_pages) |
998 | { | 998 | { |
999 | struct buffer_page *bpage, *tmp; | 999 | struct buffer_page *bpage, *tmp; |
1000 | unsigned long addr; | ||
1001 | LIST_HEAD(pages); | 1000 | LIST_HEAD(pages); |
1002 | unsigned i; | 1001 | unsigned i; |
1003 | 1002 | ||
1004 | WARN_ON(!nr_pages); | 1003 | WARN_ON(!nr_pages); |
1005 | 1004 | ||
1006 | for (i = 0; i < nr_pages; i++) { | 1005 | for (i = 0; i < nr_pages; i++) { |
1006 | struct page *page; | ||
1007 | /* | ||
1008 | * __GFP_NORETRY flag makes sure that the allocation fails | ||
1009 | * gracefully without invoking oom-killer and the system is | ||
1010 | * not destabilized. | ||
1011 | */ | ||
1007 | bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), | 1012 | bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), |
1008 | GFP_KERNEL, cpu_to_node(cpu_buffer->cpu)); | 1013 | GFP_KERNEL | __GFP_NORETRY, |
1014 | cpu_to_node(cpu_buffer->cpu)); | ||
1009 | if (!bpage) | 1015 | if (!bpage) |
1010 | goto free_pages; | 1016 | goto free_pages; |
1011 | 1017 | ||
@@ -1013,10 +1019,11 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, | |||
1013 | 1019 | ||
1014 | list_add(&bpage->list, &pages); | 1020 | list_add(&bpage->list, &pages); |
1015 | 1021 | ||
1016 | addr = __get_free_page(GFP_KERNEL); | 1022 | page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu), |
1017 | if (!addr) | 1023 | GFP_KERNEL | __GFP_NORETRY, 0); |
1024 | if (!page) | ||
1018 | goto free_pages; | 1025 | goto free_pages; |
1019 | bpage->page = (void *)addr; | 1026 | bpage->page = page_address(page); |
1020 | rb_init_page(bpage->page); | 1027 | rb_init_page(bpage->page); |
1021 | } | 1028 | } |
1022 | 1029 | ||
@@ -1045,7 +1052,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu) | |||
1045 | { | 1052 | { |
1046 | struct ring_buffer_per_cpu *cpu_buffer; | 1053 | struct ring_buffer_per_cpu *cpu_buffer; |
1047 | struct buffer_page *bpage; | 1054 | struct buffer_page *bpage; |
1048 | unsigned long addr; | 1055 | struct page *page; |
1049 | int ret; | 1056 | int ret; |
1050 | 1057 | ||
1051 | cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()), | 1058 | cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()), |
@@ -1067,10 +1074,10 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu) | |||
1067 | rb_check_bpage(cpu_buffer, bpage); | 1074 | rb_check_bpage(cpu_buffer, bpage); |
1068 | 1075 | ||
1069 | cpu_buffer->reader_page = bpage; | 1076 | cpu_buffer->reader_page = bpage; |
1070 | addr = __get_free_page(GFP_KERNEL); | 1077 | page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0); |
1071 | if (!addr) | 1078 | if (!page) |
1072 | goto fail_free_reader; | 1079 | goto fail_free_reader; |
1073 | bpage->page = (void *)addr; | 1080 | bpage->page = page_address(page); |
1074 | rb_init_page(bpage->page); | 1081 | rb_init_page(bpage->page); |
1075 | 1082 | ||
1076 | INIT_LIST_HEAD(&cpu_buffer->reader_page->list); | 1083 | INIT_LIST_HEAD(&cpu_buffer->reader_page->list); |
@@ -1314,7 +1321,6 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) | |||
1314 | unsigned nr_pages, rm_pages, new_pages; | 1321 | unsigned nr_pages, rm_pages, new_pages; |
1315 | struct buffer_page *bpage, *tmp; | 1322 | struct buffer_page *bpage, *tmp; |
1316 | unsigned long buffer_size; | 1323 | unsigned long buffer_size; |
1317 | unsigned long addr; | ||
1318 | LIST_HEAD(pages); | 1324 | LIST_HEAD(pages); |
1319 | int i, cpu; | 1325 | int i, cpu; |
1320 | 1326 | ||
@@ -1375,16 +1381,24 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) | |||
1375 | 1381 | ||
1376 | for_each_buffer_cpu(buffer, cpu) { | 1382 | for_each_buffer_cpu(buffer, cpu) { |
1377 | for (i = 0; i < new_pages; i++) { | 1383 | for (i = 0; i < new_pages; i++) { |
1384 | struct page *page; | ||
1385 | /* | ||
1386 | * __GFP_NORETRY flag makes sure that the allocation | ||
1387 | * fails gracefully without invoking oom-killer and | ||
1388 | * the system is not destabilized. | ||
1389 | */ | ||
1378 | bpage = kzalloc_node(ALIGN(sizeof(*bpage), | 1390 | bpage = kzalloc_node(ALIGN(sizeof(*bpage), |
1379 | cache_line_size()), | 1391 | cache_line_size()), |
1380 | GFP_KERNEL, cpu_to_node(cpu)); | 1392 | GFP_KERNEL | __GFP_NORETRY, |
1393 | cpu_to_node(cpu)); | ||
1381 | if (!bpage) | 1394 | if (!bpage) |
1382 | goto free_pages; | 1395 | goto free_pages; |
1383 | list_add(&bpage->list, &pages); | 1396 | list_add(&bpage->list, &pages); |
1384 | addr = __get_free_page(GFP_KERNEL); | 1397 | page = alloc_pages_node(cpu_to_node(cpu), |
1385 | if (!addr) | 1398 | GFP_KERNEL | __GFP_NORETRY, 0); |
1399 | if (!page) | ||
1386 | goto free_pages; | 1400 | goto free_pages; |
1387 | bpage->page = (void *)addr; | 1401 | bpage->page = page_address(page); |
1388 | rb_init_page(bpage->page); | 1402 | rb_init_page(bpage->page); |
1389 | } | 1403 | } |
1390 | } | 1404 | } |
@@ -3730,16 +3744,17 @@ EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu); | |||
3730 | * Returns: | 3744 | * Returns: |
3731 | * The page allocated, or NULL on error. | 3745 | * The page allocated, or NULL on error. |
3732 | */ | 3746 | */ |
3733 | void *ring_buffer_alloc_read_page(struct ring_buffer *buffer) | 3747 | void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu) |
3734 | { | 3748 | { |
3735 | struct buffer_data_page *bpage; | 3749 | struct buffer_data_page *bpage; |
3736 | unsigned long addr; | 3750 | struct page *page; |
3737 | 3751 | ||
3738 | addr = __get_free_page(GFP_KERNEL); | 3752 | page = alloc_pages_node(cpu_to_node(cpu), |
3739 | if (!addr) | 3753 | GFP_KERNEL | __GFP_NORETRY, 0); |
3754 | if (!page) | ||
3740 | return NULL; | 3755 | return NULL; |
3741 | 3756 | ||
3742 | bpage = (void *)addr; | 3757 | bpage = page_address(page); |
3743 | 3758 | ||
3744 | rb_init_page(bpage); | 3759 | rb_init_page(bpage); |
3745 | 3760 | ||
@@ -3978,20 +3993,11 @@ rb_simple_write(struct file *filp, const char __user *ubuf, | |||
3978 | size_t cnt, loff_t *ppos) | 3993 | size_t cnt, loff_t *ppos) |
3979 | { | 3994 | { |
3980 | unsigned long *p = filp->private_data; | 3995 | unsigned long *p = filp->private_data; |
3981 | char buf[64]; | ||
3982 | unsigned long val; | 3996 | unsigned long val; |
3983 | int ret; | 3997 | int ret; |
3984 | 3998 | ||
3985 | if (cnt >= sizeof(buf)) | 3999 | ret = kstrtoul_from_user(ubuf, cnt, 10, &val); |
3986 | return -EINVAL; | 4000 | if (ret) |
3987 | |||
3988 | if (copy_from_user(&buf, ubuf, cnt)) | ||
3989 | return -EFAULT; | ||
3990 | |||
3991 | buf[cnt] = 0; | ||
3992 | |||
3993 | ret = strict_strtoul(buf, 10, &val); | ||
3994 | if (ret < 0) | ||
3995 | return ret; | 4001 | return ret; |
3996 | 4002 | ||
3997 | if (val) | 4003 | if (val) |
diff --git a/kernel/trace/ring_buffer_benchmark.c b/kernel/trace/ring_buffer_benchmark.c index 302f8a61463..a5457d577b9 100644 --- a/kernel/trace/ring_buffer_benchmark.c +++ b/kernel/trace/ring_buffer_benchmark.c | |||
@@ -106,7 +106,7 @@ static enum event_status read_page(int cpu) | |||
106 | int inc; | 106 | int inc; |
107 | int i; | 107 | int i; |
108 | 108 | ||
109 | bpage = ring_buffer_alloc_read_page(buffer); | 109 | bpage = ring_buffer_alloc_read_page(buffer, cpu); |
110 | if (!bpage) | 110 | if (!bpage) |
111 | return EVENT_DROPPED; | 111 | return EVENT_DROPPED; |
112 | 112 | ||
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index ee9c921d7f2..d9c16123f6e 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -343,26 +343,27 @@ unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | | |||
343 | static int trace_stop_count; | 343 | static int trace_stop_count; |
344 | static DEFINE_SPINLOCK(tracing_start_lock); | 344 | static DEFINE_SPINLOCK(tracing_start_lock); |
345 | 345 | ||
346 | static void wakeup_work_handler(struct work_struct *work) | ||
347 | { | ||
348 | wake_up(&trace_wait); | ||
349 | } | ||
350 | |||
351 | static DECLARE_DELAYED_WORK(wakeup_work, wakeup_work_handler); | ||
352 | |||
346 | /** | 353 | /** |
347 | * trace_wake_up - wake up tasks waiting for trace input | 354 | * trace_wake_up - wake up tasks waiting for trace input |
348 | * | 355 | * |
349 | * Simply wakes up any task that is blocked on the trace_wait | 356 | * Schedules a delayed work to wake up any task that is blocked on the |
350 | * queue. These is used with trace_poll for tasks polling the trace. | 357 | * trace_wait queue. These is used with trace_poll for tasks polling the |
358 | * trace. | ||
351 | */ | 359 | */ |
352 | void trace_wake_up(void) | 360 | void trace_wake_up(void) |
353 | { | 361 | { |
354 | int cpu; | 362 | const unsigned long delay = msecs_to_jiffies(2); |
355 | 363 | ||
356 | if (trace_flags & TRACE_ITER_BLOCK) | 364 | if (trace_flags & TRACE_ITER_BLOCK) |
357 | return; | 365 | return; |
358 | /* | 366 | schedule_delayed_work(&wakeup_work, delay); |
359 | * The runqueue_is_locked() can fail, but this is the best we | ||
360 | * have for now: | ||
361 | */ | ||
362 | cpu = get_cpu(); | ||
363 | if (!runqueue_is_locked(cpu)) | ||
364 | wake_up(&trace_wait); | ||
365 | put_cpu(); | ||
366 | } | 367 | } |
367 | 368 | ||
368 | static int __init set_buf_size(char *str) | 369 | static int __init set_buf_size(char *str) |
@@ -424,6 +425,7 @@ static const char *trace_options[] = { | |||
424 | "graph-time", | 425 | "graph-time", |
425 | "record-cmd", | 426 | "record-cmd", |
426 | "overwrite", | 427 | "overwrite", |
428 | "disable_on_free", | ||
427 | NULL | 429 | NULL |
428 | }; | 430 | }; |
429 | 431 | ||
@@ -1191,6 +1193,18 @@ void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer, | |||
1191 | } | 1193 | } |
1192 | EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit); | 1194 | EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit); |
1193 | 1195 | ||
1196 | void trace_nowake_buffer_unlock_commit_regs(struct ring_buffer *buffer, | ||
1197 | struct ring_buffer_event *event, | ||
1198 | unsigned long flags, int pc, | ||
1199 | struct pt_regs *regs) | ||
1200 | { | ||
1201 | ring_buffer_unlock_commit(buffer, event); | ||
1202 | |||
1203 | ftrace_trace_stack_regs(buffer, flags, 0, pc, regs); | ||
1204 | ftrace_trace_userstack(buffer, flags, pc); | ||
1205 | } | ||
1206 | EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit_regs); | ||
1207 | |||
1194 | void trace_current_buffer_discard_commit(struct ring_buffer *buffer, | 1208 | void trace_current_buffer_discard_commit(struct ring_buffer *buffer, |
1195 | struct ring_buffer_event *event) | 1209 | struct ring_buffer_event *event) |
1196 | { | 1210 | { |
@@ -1236,7 +1250,7 @@ ftrace(struct trace_array *tr, struct trace_array_cpu *data, | |||
1236 | #ifdef CONFIG_STACKTRACE | 1250 | #ifdef CONFIG_STACKTRACE |
1237 | static void __ftrace_trace_stack(struct ring_buffer *buffer, | 1251 | static void __ftrace_trace_stack(struct ring_buffer *buffer, |
1238 | unsigned long flags, | 1252 | unsigned long flags, |
1239 | int skip, int pc) | 1253 | int skip, int pc, struct pt_regs *regs) |
1240 | { | 1254 | { |
1241 | struct ftrace_event_call *call = &event_kernel_stack; | 1255 | struct ftrace_event_call *call = &event_kernel_stack; |
1242 | struct ring_buffer_event *event; | 1256 | struct ring_buffer_event *event; |
@@ -1255,24 +1269,36 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer, | |||
1255 | trace.skip = skip; | 1269 | trace.skip = skip; |
1256 | trace.entries = entry->caller; | 1270 | trace.entries = entry->caller; |
1257 | 1271 | ||
1258 | save_stack_trace(&trace); | 1272 | if (regs) |
1273 | save_stack_trace_regs(regs, &trace); | ||
1274 | else | ||
1275 | save_stack_trace(&trace); | ||
1259 | if (!filter_check_discard(call, entry, buffer, event)) | 1276 | if (!filter_check_discard(call, entry, buffer, event)) |
1260 | ring_buffer_unlock_commit(buffer, event); | 1277 | ring_buffer_unlock_commit(buffer, event); |
1261 | } | 1278 | } |
1262 | 1279 | ||
1280 | void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags, | ||
1281 | int skip, int pc, struct pt_regs *regs) | ||
1282 | { | ||
1283 | if (!(trace_flags & TRACE_ITER_STACKTRACE)) | ||
1284 | return; | ||
1285 | |||
1286 | __ftrace_trace_stack(buffer, flags, skip, pc, regs); | ||
1287 | } | ||
1288 | |||
1263 | void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags, | 1289 | void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags, |
1264 | int skip, int pc) | 1290 | int skip, int pc) |
1265 | { | 1291 | { |
1266 | if (!(trace_flags & TRACE_ITER_STACKTRACE)) | 1292 | if (!(trace_flags & TRACE_ITER_STACKTRACE)) |
1267 | return; | 1293 | return; |
1268 | 1294 | ||
1269 | __ftrace_trace_stack(buffer, flags, skip, pc); | 1295 | __ftrace_trace_stack(buffer, flags, skip, pc, NULL); |
1270 | } | 1296 | } |
1271 | 1297 | ||
1272 | void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, | 1298 | void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, |
1273 | int pc) | 1299 | int pc) |
1274 | { | 1300 | { |
1275 | __ftrace_trace_stack(tr->buffer, flags, skip, pc); | 1301 | __ftrace_trace_stack(tr->buffer, flags, skip, pc, NULL); |
1276 | } | 1302 | } |
1277 | 1303 | ||
1278 | /** | 1304 | /** |
@@ -1288,7 +1314,7 @@ void trace_dump_stack(void) | |||
1288 | local_save_flags(flags); | 1314 | local_save_flags(flags); |
1289 | 1315 | ||
1290 | /* skipping 3 traces, seems to get us at the caller of this function */ | 1316 | /* skipping 3 traces, seems to get us at the caller of this function */ |
1291 | __ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count()); | 1317 | __ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count(), NULL); |
1292 | } | 1318 | } |
1293 | 1319 | ||
1294 | static DEFINE_PER_CPU(int, user_stack_count); | 1320 | static DEFINE_PER_CPU(int, user_stack_count); |
@@ -2051,6 +2077,9 @@ void trace_default_header(struct seq_file *m) | |||
2051 | { | 2077 | { |
2052 | struct trace_iterator *iter = m->private; | 2078 | struct trace_iterator *iter = m->private; |
2053 | 2079 | ||
2080 | if (!(trace_flags & TRACE_ITER_CONTEXT_INFO)) | ||
2081 | return; | ||
2082 | |||
2054 | if (iter->iter_flags & TRACE_FILE_LAT_FMT) { | 2083 | if (iter->iter_flags & TRACE_FILE_LAT_FMT) { |
2055 | /* print nothing if the buffers are empty */ | 2084 | /* print nothing if the buffers are empty */ |
2056 | if (trace_empty(iter)) | 2085 | if (trace_empty(iter)) |
@@ -2701,20 +2730,11 @@ tracing_ctrl_write(struct file *filp, const char __user *ubuf, | |||
2701 | size_t cnt, loff_t *ppos) | 2730 | size_t cnt, loff_t *ppos) |
2702 | { | 2731 | { |
2703 | struct trace_array *tr = filp->private_data; | 2732 | struct trace_array *tr = filp->private_data; |
2704 | char buf[64]; | ||
2705 | unsigned long val; | 2733 | unsigned long val; |
2706 | int ret; | 2734 | int ret; |
2707 | 2735 | ||
2708 | if (cnt >= sizeof(buf)) | 2736 | ret = kstrtoul_from_user(ubuf, cnt, 10, &val); |
2709 | return -EINVAL; | 2737 | if (ret) |
2710 | |||
2711 | if (copy_from_user(&buf, ubuf, cnt)) | ||
2712 | return -EFAULT; | ||
2713 | |||
2714 | buf[cnt] = 0; | ||
2715 | |||
2716 | ret = strict_strtoul(buf, 10, &val); | ||
2717 | if (ret < 0) | ||
2718 | return ret; | 2738 | return ret; |
2719 | 2739 | ||
2720 | val = !!val; | 2740 | val = !!val; |
@@ -2767,7 +2787,7 @@ int tracer_init(struct tracer *t, struct trace_array *tr) | |||
2767 | return t->init(tr); | 2787 | return t->init(tr); |
2768 | } | 2788 | } |
2769 | 2789 | ||
2770 | static int tracing_resize_ring_buffer(unsigned long size) | 2790 | static int __tracing_resize_ring_buffer(unsigned long size) |
2771 | { | 2791 | { |
2772 | int ret; | 2792 | int ret; |
2773 | 2793 | ||
@@ -2819,6 +2839,41 @@ static int tracing_resize_ring_buffer(unsigned long size) | |||
2819 | return ret; | 2839 | return ret; |
2820 | } | 2840 | } |
2821 | 2841 | ||
2842 | static ssize_t tracing_resize_ring_buffer(unsigned long size) | ||
2843 | { | ||
2844 | int cpu, ret = size; | ||
2845 | |||
2846 | mutex_lock(&trace_types_lock); | ||
2847 | |||
2848 | tracing_stop(); | ||
2849 | |||
2850 | /* disable all cpu buffers */ | ||
2851 | for_each_tracing_cpu(cpu) { | ||
2852 | if (global_trace.data[cpu]) | ||
2853 | atomic_inc(&global_trace.data[cpu]->disabled); | ||
2854 | if (max_tr.data[cpu]) | ||
2855 | atomic_inc(&max_tr.data[cpu]->disabled); | ||
2856 | } | ||
2857 | |||
2858 | if (size != global_trace.entries) | ||
2859 | ret = __tracing_resize_ring_buffer(size); | ||
2860 | |||
2861 | if (ret < 0) | ||
2862 | ret = -ENOMEM; | ||
2863 | |||
2864 | for_each_tracing_cpu(cpu) { | ||
2865 | if (global_trace.data[cpu]) | ||
2866 | atomic_dec(&global_trace.data[cpu]->disabled); | ||
2867 | if (max_tr.data[cpu]) | ||
2868 | atomic_dec(&max_tr.data[cpu]->disabled); | ||
2869 | } | ||
2870 | |||
2871 | tracing_start(); | ||
2872 | mutex_unlock(&trace_types_lock); | ||
2873 | |||
2874 | return ret; | ||
2875 | } | ||
2876 | |||
2822 | 2877 | ||
2823 | /** | 2878 | /** |
2824 | * tracing_update_buffers - used by tracing facility to expand ring buffers | 2879 | * tracing_update_buffers - used by tracing facility to expand ring buffers |
@@ -2836,7 +2891,7 @@ int tracing_update_buffers(void) | |||
2836 | 2891 | ||
2837 | mutex_lock(&trace_types_lock); | 2892 | mutex_lock(&trace_types_lock); |
2838 | if (!ring_buffer_expanded) | 2893 | if (!ring_buffer_expanded) |
2839 | ret = tracing_resize_ring_buffer(trace_buf_size); | 2894 | ret = __tracing_resize_ring_buffer(trace_buf_size); |
2840 | mutex_unlock(&trace_types_lock); | 2895 | mutex_unlock(&trace_types_lock); |
2841 | 2896 | ||
2842 | return ret; | 2897 | return ret; |
@@ -2860,7 +2915,7 @@ static int tracing_set_tracer(const char *buf) | |||
2860 | mutex_lock(&trace_types_lock); | 2915 | mutex_lock(&trace_types_lock); |
2861 | 2916 | ||
2862 | if (!ring_buffer_expanded) { | 2917 | if (!ring_buffer_expanded) { |
2863 | ret = tracing_resize_ring_buffer(trace_buf_size); | 2918 | ret = __tracing_resize_ring_buffer(trace_buf_size); |
2864 | if (ret < 0) | 2919 | if (ret < 0) |
2865 | goto out; | 2920 | goto out; |
2866 | ret = 0; | 2921 | ret = 0; |
@@ -2966,20 +3021,11 @@ tracing_max_lat_write(struct file *filp, const char __user *ubuf, | |||
2966 | size_t cnt, loff_t *ppos) | 3021 | size_t cnt, loff_t *ppos) |
2967 | { | 3022 | { |
2968 | unsigned long *ptr = filp->private_data; | 3023 | unsigned long *ptr = filp->private_data; |
2969 | char buf[64]; | ||
2970 | unsigned long val; | 3024 | unsigned long val; |
2971 | int ret; | 3025 | int ret; |
2972 | 3026 | ||
2973 | if (cnt >= sizeof(buf)) | 3027 | ret = kstrtoul_from_user(ubuf, cnt, 10, &val); |
2974 | return -EINVAL; | 3028 | if (ret) |
2975 | |||
2976 | if (copy_from_user(&buf, ubuf, cnt)) | ||
2977 | return -EFAULT; | ||
2978 | |||
2979 | buf[cnt] = 0; | ||
2980 | |||
2981 | ret = strict_strtoul(buf, 10, &val); | ||
2982 | if (ret < 0) | ||
2983 | return ret; | 3029 | return ret; |
2984 | 3030 | ||
2985 | *ptr = val * 1000; | 3031 | *ptr = val * 1000; |
@@ -3434,67 +3480,54 @@ tracing_entries_write(struct file *filp, const char __user *ubuf, | |||
3434 | size_t cnt, loff_t *ppos) | 3480 | size_t cnt, loff_t *ppos) |
3435 | { | 3481 | { |
3436 | unsigned long val; | 3482 | unsigned long val; |
3437 | char buf[64]; | 3483 | int ret; |
3438 | int ret, cpu; | ||
3439 | |||
3440 | if (cnt >= sizeof(buf)) | ||
3441 | return -EINVAL; | ||
3442 | |||
3443 | if (copy_from_user(&buf, ubuf, cnt)) | ||
3444 | return -EFAULT; | ||
3445 | |||
3446 | buf[cnt] = 0; | ||
3447 | 3484 | ||
3448 | ret = strict_strtoul(buf, 10, &val); | 3485 | ret = kstrtoul_from_user(ubuf, cnt, 10, &val); |
3449 | if (ret < 0) | 3486 | if (ret) |
3450 | return ret; | 3487 | return ret; |
3451 | 3488 | ||
3452 | /* must have at least 1 entry */ | 3489 | /* must have at least 1 entry */ |
3453 | if (!val) | 3490 | if (!val) |
3454 | return -EINVAL; | 3491 | return -EINVAL; |
3455 | 3492 | ||
3456 | mutex_lock(&trace_types_lock); | ||
3457 | |||
3458 | tracing_stop(); | ||
3459 | |||
3460 | /* disable all cpu buffers */ | ||
3461 | for_each_tracing_cpu(cpu) { | ||
3462 | if (global_trace.data[cpu]) | ||
3463 | atomic_inc(&global_trace.data[cpu]->disabled); | ||
3464 | if (max_tr.data[cpu]) | ||
3465 | atomic_inc(&max_tr.data[cpu]->disabled); | ||
3466 | } | ||
3467 | |||
3468 | /* value is in KB */ | 3493 | /* value is in KB */ |
3469 | val <<= 10; | 3494 | val <<= 10; |
3470 | 3495 | ||
3471 | if (val != global_trace.entries) { | 3496 | ret = tracing_resize_ring_buffer(val); |
3472 | ret = tracing_resize_ring_buffer(val); | 3497 | if (ret < 0) |
3473 | if (ret < 0) { | 3498 | return ret; |
3474 | cnt = ret; | ||
3475 | goto out; | ||
3476 | } | ||
3477 | } | ||
3478 | 3499 | ||
3479 | *ppos += cnt; | 3500 | *ppos += cnt; |
3480 | 3501 | ||
3481 | /* If check pages failed, return ENOMEM */ | 3502 | return cnt; |
3482 | if (tracing_disabled) | 3503 | } |
3483 | cnt = -ENOMEM; | ||
3484 | out: | ||
3485 | for_each_tracing_cpu(cpu) { | ||
3486 | if (global_trace.data[cpu]) | ||
3487 | atomic_dec(&global_trace.data[cpu]->disabled); | ||
3488 | if (max_tr.data[cpu]) | ||
3489 | atomic_dec(&max_tr.data[cpu]->disabled); | ||
3490 | } | ||
3491 | 3504 | ||
3492 | tracing_start(); | 3505 | static ssize_t |
3493 | mutex_unlock(&trace_types_lock); | 3506 | tracing_free_buffer_write(struct file *filp, const char __user *ubuf, |
3507 | size_t cnt, loff_t *ppos) | ||
3508 | { | ||
3509 | /* | ||
3510 | * There is no need to read what the user has written, this function | ||
3511 | * is just to make sure that there is no error when "echo" is used | ||
3512 | */ | ||
3513 | |||
3514 | *ppos += cnt; | ||
3494 | 3515 | ||
3495 | return cnt; | 3516 | return cnt; |
3496 | } | 3517 | } |
3497 | 3518 | ||
3519 | static int | ||
3520 | tracing_free_buffer_release(struct inode *inode, struct file *filp) | ||
3521 | { | ||
3522 | /* disable tracing ? */ | ||
3523 | if (trace_flags & TRACE_ITER_STOP_ON_FREE) | ||
3524 | tracing_off(); | ||
3525 | /* resize the ring buffer to 0 */ | ||
3526 | tracing_resize_ring_buffer(0); | ||
3527 | |||
3528 | return 0; | ||
3529 | } | ||
3530 | |||
3498 | static int mark_printk(const char *fmt, ...) | 3531 | static int mark_printk(const char *fmt, ...) |
3499 | { | 3532 | { |
3500 | int ret; | 3533 | int ret; |
@@ -3640,6 +3673,11 @@ static const struct file_operations tracing_entries_fops = { | |||
3640 | .llseek = generic_file_llseek, | 3673 | .llseek = generic_file_llseek, |
3641 | }; | 3674 | }; |
3642 | 3675 | ||
3676 | static const struct file_operations tracing_free_buffer_fops = { | ||
3677 | .write = tracing_free_buffer_write, | ||
3678 | .release = tracing_free_buffer_release, | ||
3679 | }; | ||
3680 | |||
3643 | static const struct file_operations tracing_mark_fops = { | 3681 | static const struct file_operations tracing_mark_fops = { |
3644 | .open = tracing_open_generic, | 3682 | .open = tracing_open_generic, |
3645 | .write = tracing_mark_write, | 3683 | .write = tracing_mark_write, |
@@ -3696,7 +3734,7 @@ tracing_buffers_read(struct file *filp, char __user *ubuf, | |||
3696 | return 0; | 3734 | return 0; |
3697 | 3735 | ||
3698 | if (!info->spare) | 3736 | if (!info->spare) |
3699 | info->spare = ring_buffer_alloc_read_page(info->tr->buffer); | 3737 | info->spare = ring_buffer_alloc_read_page(info->tr->buffer, info->cpu); |
3700 | if (!info->spare) | 3738 | if (!info->spare) |
3701 | return -ENOMEM; | 3739 | return -ENOMEM; |
3702 | 3740 | ||
@@ -3853,7 +3891,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, | |||
3853 | 3891 | ||
3854 | ref->ref = 1; | 3892 | ref->ref = 1; |
3855 | ref->buffer = info->tr->buffer; | 3893 | ref->buffer = info->tr->buffer; |
3856 | ref->page = ring_buffer_alloc_read_page(ref->buffer); | 3894 | ref->page = ring_buffer_alloc_read_page(ref->buffer, info->cpu); |
3857 | if (!ref->page) { | 3895 | if (!ref->page) { |
3858 | kfree(ref); | 3896 | kfree(ref); |
3859 | break; | 3897 | break; |
@@ -3862,8 +3900,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, | |||
3862 | r = ring_buffer_read_page(ref->buffer, &ref->page, | 3900 | r = ring_buffer_read_page(ref->buffer, &ref->page, |
3863 | len, info->cpu, 1); | 3901 | len, info->cpu, 1); |
3864 | if (r < 0) { | 3902 | if (r < 0) { |
3865 | ring_buffer_free_read_page(ref->buffer, | 3903 | ring_buffer_free_read_page(ref->buffer, ref->page); |
3866 | ref->page); | ||
3867 | kfree(ref); | 3904 | kfree(ref); |
3868 | break; | 3905 | break; |
3869 | } | 3906 | } |
@@ -4099,19 +4136,10 @@ trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt, | |||
4099 | { | 4136 | { |
4100 | struct trace_option_dentry *topt = filp->private_data; | 4137 | struct trace_option_dentry *topt = filp->private_data; |
4101 | unsigned long val; | 4138 | unsigned long val; |
4102 | char buf[64]; | ||
4103 | int ret; | 4139 | int ret; |
4104 | 4140 | ||
4105 | if (cnt >= sizeof(buf)) | 4141 | ret = kstrtoul_from_user(ubuf, cnt, 10, &val); |
4106 | return -EINVAL; | 4142 | if (ret) |
4107 | |||
4108 | if (copy_from_user(&buf, ubuf, cnt)) | ||
4109 | return -EFAULT; | ||
4110 | |||
4111 | buf[cnt] = 0; | ||
4112 | |||
4113 | ret = strict_strtoul(buf, 10, &val); | ||
4114 | if (ret < 0) | ||
4115 | return ret; | 4143 | return ret; |
4116 | 4144 | ||
4117 | if (val != 0 && val != 1) | 4145 | if (val != 0 && val != 1) |
@@ -4159,20 +4187,11 @@ trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt, | |||
4159 | loff_t *ppos) | 4187 | loff_t *ppos) |
4160 | { | 4188 | { |
4161 | long index = (long)filp->private_data; | 4189 | long index = (long)filp->private_data; |
4162 | char buf[64]; | ||
4163 | unsigned long val; | 4190 | unsigned long val; |
4164 | int ret; | 4191 | int ret; |
4165 | 4192 | ||
4166 | if (cnt >= sizeof(buf)) | 4193 | ret = kstrtoul_from_user(ubuf, cnt, 10, &val); |
4167 | return -EINVAL; | 4194 | if (ret) |
4168 | |||
4169 | if (copy_from_user(&buf, ubuf, cnt)) | ||
4170 | return -EFAULT; | ||
4171 | |||
4172 | buf[cnt] = 0; | ||
4173 | |||
4174 | ret = strict_strtoul(buf, 10, &val); | ||
4175 | if (ret < 0) | ||
4176 | return ret; | 4195 | return ret; |
4177 | 4196 | ||
4178 | if (val != 0 && val != 1) | 4197 | if (val != 0 && val != 1) |
@@ -4365,6 +4384,9 @@ static __init int tracer_init_debugfs(void) | |||
4365 | trace_create_file("buffer_size_kb", 0644, d_tracer, | 4384 | trace_create_file("buffer_size_kb", 0644, d_tracer, |
4366 | &global_trace, &tracing_entries_fops); | 4385 | &global_trace, &tracing_entries_fops); |
4367 | 4386 | ||
4387 | trace_create_file("free_buffer", 0644, d_tracer, | ||
4388 | &global_trace, &tracing_free_buffer_fops); | ||
4389 | |||
4368 | trace_create_file("trace_marker", 0220, d_tracer, | 4390 | trace_create_file("trace_marker", 0220, d_tracer, |
4369 | NULL, &tracing_mark_fops); | 4391 | NULL, &tracing_mark_fops); |
4370 | 4392 | ||
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index f8074072d11..30a94c26dcb 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -389,6 +389,9 @@ void update_max_tr_single(struct trace_array *tr, | |||
389 | void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags, | 389 | void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags, |
390 | int skip, int pc); | 390 | int skip, int pc); |
391 | 391 | ||
392 | void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags, | ||
393 | int skip, int pc, struct pt_regs *regs); | ||
394 | |||
392 | void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, | 395 | void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, |
393 | int pc); | 396 | int pc); |
394 | 397 | ||
@@ -400,6 +403,12 @@ static inline void ftrace_trace_stack(struct ring_buffer *buffer, | |||
400 | { | 403 | { |
401 | } | 404 | } |
402 | 405 | ||
406 | static inline void ftrace_trace_stack_regs(struct ring_buffer *buffer, | ||
407 | unsigned long flags, int skip, | ||
408 | int pc, struct pt_regs *regs) | ||
409 | { | ||
410 | } | ||
411 | |||
403 | static inline void ftrace_trace_userstack(struct ring_buffer *buffer, | 412 | static inline void ftrace_trace_userstack(struct ring_buffer *buffer, |
404 | unsigned long flags, int pc) | 413 | unsigned long flags, int pc) |
405 | { | 414 | { |
@@ -609,6 +618,7 @@ enum trace_iterator_flags { | |||
609 | TRACE_ITER_GRAPH_TIME = 0x80000, | 618 | TRACE_ITER_GRAPH_TIME = 0x80000, |
610 | TRACE_ITER_RECORD_CMD = 0x100000, | 619 | TRACE_ITER_RECORD_CMD = 0x100000, |
611 | TRACE_ITER_OVERWRITE = 0x200000, | 620 | TRACE_ITER_OVERWRITE = 0x200000, |
621 | TRACE_ITER_STOP_ON_FREE = 0x400000, | ||
612 | }; | 622 | }; |
613 | 623 | ||
614 | /* | 624 | /* |
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 3e2a7c91c54..581876f9f38 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
@@ -515,20 +515,11 @@ event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt, | |||
515 | loff_t *ppos) | 515 | loff_t *ppos) |
516 | { | 516 | { |
517 | struct ftrace_event_call *call = filp->private_data; | 517 | struct ftrace_event_call *call = filp->private_data; |
518 | char buf[64]; | ||
519 | unsigned long val; | 518 | unsigned long val; |
520 | int ret; | 519 | int ret; |
521 | 520 | ||
522 | if (cnt >= sizeof(buf)) | 521 | ret = kstrtoul_from_user(ubuf, cnt, 10, &val); |
523 | return -EINVAL; | 522 | if (ret) |
524 | |||
525 | if (copy_from_user(&buf, ubuf, cnt)) | ||
526 | return -EFAULT; | ||
527 | |||
528 | buf[cnt] = 0; | ||
529 | |||
530 | ret = strict_strtoul(buf, 10, &val); | ||
531 | if (ret < 0) | ||
532 | return ret; | 523 | return ret; |
533 | 524 | ||
534 | ret = tracing_update_buffers(); | 525 | ret = tracing_update_buffers(); |
@@ -601,19 +592,10 @@ system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt, | |||
601 | struct event_subsystem *system = filp->private_data; | 592 | struct event_subsystem *system = filp->private_data; |
602 | const char *name = NULL; | 593 | const char *name = NULL; |
603 | unsigned long val; | 594 | unsigned long val; |
604 | char buf[64]; | ||
605 | ssize_t ret; | 595 | ssize_t ret; |
606 | 596 | ||
607 | if (cnt >= sizeof(buf)) | 597 | ret = kstrtoul_from_user(ubuf, cnt, 10, &val); |
608 | return -EINVAL; | 598 | if (ret) |
609 | |||
610 | if (copy_from_user(&buf, ubuf, cnt)) | ||
611 | return -EFAULT; | ||
612 | |||
613 | buf[cnt] = 0; | ||
614 | |||
615 | ret = strict_strtoul(buf, 10, &val); | ||
616 | if (ret < 0) | ||
617 | return ret; | 599 | return ret; |
618 | 600 | ||
619 | ret = tracing_update_buffers(); | 601 | ret = tracing_update_buffers(); |
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 962cdb24ed8..e8d6bb55d71 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
@@ -74,6 +74,20 @@ static struct tracer_flags tracer_flags = { | |||
74 | 74 | ||
75 | static struct trace_array *graph_array; | 75 | static struct trace_array *graph_array; |
76 | 76 | ||
77 | /* | ||
78 | * DURATION column is being also used to display IRQ signs, | ||
79 | * following values are used by print_graph_irq and others | ||
80 | * to fill in space into DURATION column. | ||
81 | */ | ||
82 | enum { | ||
83 | DURATION_FILL_FULL = -1, | ||
84 | DURATION_FILL_START = -2, | ||
85 | DURATION_FILL_END = -3, | ||
86 | }; | ||
87 | |||
88 | static enum print_line_t | ||
89 | print_graph_duration(unsigned long long duration, struct trace_seq *s, | ||
90 | u32 flags); | ||
77 | 91 | ||
78 | /* Add a function return address to the trace stack on thread info.*/ | 92 | /* Add a function return address to the trace stack on thread info.*/ |
79 | int | 93 | int |
@@ -577,32 +591,6 @@ get_return_for_leaf(struct trace_iterator *iter, | |||
577 | return next; | 591 | return next; |
578 | } | 592 | } |
579 | 593 | ||
580 | /* Signal a overhead of time execution to the output */ | ||
581 | static int | ||
582 | print_graph_overhead(unsigned long long duration, struct trace_seq *s, | ||
583 | u32 flags) | ||
584 | { | ||
585 | /* If duration disappear, we don't need anything */ | ||
586 | if (!(flags & TRACE_GRAPH_PRINT_DURATION)) | ||
587 | return 1; | ||
588 | |||
589 | /* Non nested entry or return */ | ||
590 | if (duration == -1) | ||
591 | return trace_seq_printf(s, " "); | ||
592 | |||
593 | if (flags & TRACE_GRAPH_PRINT_OVERHEAD) { | ||
594 | /* Duration exceeded 100 msecs */ | ||
595 | if (duration > 100000ULL) | ||
596 | return trace_seq_printf(s, "! "); | ||
597 | |||
598 | /* Duration exceeded 10 msecs */ | ||
599 | if (duration > 10000ULL) | ||
600 | return trace_seq_printf(s, "+ "); | ||
601 | } | ||
602 | |||
603 | return trace_seq_printf(s, " "); | ||
604 | } | ||
605 | |||
606 | static int print_graph_abs_time(u64 t, struct trace_seq *s) | 594 | static int print_graph_abs_time(u64 t, struct trace_seq *s) |
607 | { | 595 | { |
608 | unsigned long usecs_rem; | 596 | unsigned long usecs_rem; |
@@ -625,34 +613,36 @@ print_graph_irq(struct trace_iterator *iter, unsigned long addr, | |||
625 | addr >= (unsigned long)__irqentry_text_end) | 613 | addr >= (unsigned long)__irqentry_text_end) |
626 | return TRACE_TYPE_UNHANDLED; | 614 | return TRACE_TYPE_UNHANDLED; |
627 | 615 | ||
628 | /* Absolute time */ | 616 | if (trace_flags & TRACE_ITER_CONTEXT_INFO) { |
629 | if (flags & TRACE_GRAPH_PRINT_ABS_TIME) { | 617 | /* Absolute time */ |
630 | ret = print_graph_abs_time(iter->ts, s); | 618 | if (flags & TRACE_GRAPH_PRINT_ABS_TIME) { |
631 | if (!ret) | 619 | ret = print_graph_abs_time(iter->ts, s); |
632 | return TRACE_TYPE_PARTIAL_LINE; | 620 | if (!ret) |
633 | } | 621 | return TRACE_TYPE_PARTIAL_LINE; |
622 | } | ||
634 | 623 | ||
635 | /* Cpu */ | 624 | /* Cpu */ |
636 | if (flags & TRACE_GRAPH_PRINT_CPU) { | 625 | if (flags & TRACE_GRAPH_PRINT_CPU) { |
637 | ret = print_graph_cpu(s, cpu); | 626 | ret = print_graph_cpu(s, cpu); |
638 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 627 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
639 | return TRACE_TYPE_PARTIAL_LINE; | 628 | return TRACE_TYPE_PARTIAL_LINE; |
640 | } | 629 | } |
641 | 630 | ||
642 | /* Proc */ | 631 | /* Proc */ |
643 | if (flags & TRACE_GRAPH_PRINT_PROC) { | 632 | if (flags & TRACE_GRAPH_PRINT_PROC) { |
644 | ret = print_graph_proc(s, pid); | 633 | ret = print_graph_proc(s, pid); |
645 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 634 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
646 | return TRACE_TYPE_PARTIAL_LINE; | 635 | return TRACE_TYPE_PARTIAL_LINE; |
647 | ret = trace_seq_printf(s, " | "); | 636 | ret = trace_seq_printf(s, " | "); |
648 | if (!ret) | 637 | if (!ret) |
649 | return TRACE_TYPE_PARTIAL_LINE; | 638 | return TRACE_TYPE_PARTIAL_LINE; |
639 | } | ||
650 | } | 640 | } |
651 | 641 | ||
652 | /* No overhead */ | 642 | /* No overhead */ |
653 | ret = print_graph_overhead(-1, s, flags); | 643 | ret = print_graph_duration(DURATION_FILL_START, s, flags); |
654 | if (!ret) | 644 | if (ret != TRACE_TYPE_HANDLED) |
655 | return TRACE_TYPE_PARTIAL_LINE; | 645 | return ret; |
656 | 646 | ||
657 | if (type == TRACE_GRAPH_ENT) | 647 | if (type == TRACE_GRAPH_ENT) |
658 | ret = trace_seq_printf(s, "==========>"); | 648 | ret = trace_seq_printf(s, "==========>"); |
@@ -662,9 +652,10 @@ print_graph_irq(struct trace_iterator *iter, unsigned long addr, | |||
662 | if (!ret) | 652 | if (!ret) |
663 | return TRACE_TYPE_PARTIAL_LINE; | 653 | return TRACE_TYPE_PARTIAL_LINE; |
664 | 654 | ||
665 | /* Don't close the duration column if haven't one */ | 655 | ret = print_graph_duration(DURATION_FILL_END, s, flags); |
666 | if (flags & TRACE_GRAPH_PRINT_DURATION) | 656 | if (ret != TRACE_TYPE_HANDLED) |
667 | trace_seq_printf(s, " |"); | 657 | return ret; |
658 | |||
668 | ret = trace_seq_printf(s, "\n"); | 659 | ret = trace_seq_printf(s, "\n"); |
669 | 660 | ||
670 | if (!ret) | 661 | if (!ret) |
@@ -716,9 +707,49 @@ trace_print_graph_duration(unsigned long long duration, struct trace_seq *s) | |||
716 | } | 707 | } |
717 | 708 | ||
718 | static enum print_line_t | 709 | static enum print_line_t |
719 | print_graph_duration(unsigned long long duration, struct trace_seq *s) | 710 | print_graph_duration(unsigned long long duration, struct trace_seq *s, |
711 | u32 flags) | ||
720 | { | 712 | { |
721 | int ret; | 713 | int ret = -1; |
714 | |||
715 | if (!(flags & TRACE_GRAPH_PRINT_DURATION) || | ||
716 | !(trace_flags & TRACE_ITER_CONTEXT_INFO)) | ||
717 | return TRACE_TYPE_HANDLED; | ||
718 | |||
719 | /* No real adata, just filling the column with spaces */ | ||
720 | switch (duration) { | ||
721 | case DURATION_FILL_FULL: | ||
722 | ret = trace_seq_printf(s, " | "); | ||
723 | return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; | ||
724 | case DURATION_FILL_START: | ||
725 | ret = trace_seq_printf(s, " "); | ||
726 | return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; | ||
727 | case DURATION_FILL_END: | ||
728 | ret = trace_seq_printf(s, " |"); | ||
729 | return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; | ||
730 | } | ||
731 | |||
732 | /* Signal a overhead of time execution to the output */ | ||
733 | if (flags & TRACE_GRAPH_PRINT_OVERHEAD) { | ||
734 | /* Duration exceeded 100 msecs */ | ||
735 | if (duration > 100000ULL) | ||
736 | ret = trace_seq_printf(s, "! "); | ||
737 | /* Duration exceeded 10 msecs */ | ||
738 | else if (duration > 10000ULL) | ||
739 | ret = trace_seq_printf(s, "+ "); | ||
740 | } | ||
741 | |||
742 | /* | ||
743 | * The -1 means we either did not exceed the duration tresholds | ||
744 | * or we dont want to print out the overhead. Either way we need | ||
745 | * to fill out the space. | ||
746 | */ | ||
747 | if (ret == -1) | ||
748 | ret = trace_seq_printf(s, " "); | ||
749 | |||
750 | /* Catching here any failure happenned above */ | ||
751 | if (!ret) | ||
752 | return TRACE_TYPE_PARTIAL_LINE; | ||
722 | 753 | ||
723 | ret = trace_print_graph_duration(duration, s); | 754 | ret = trace_print_graph_duration(duration, s); |
724 | if (ret != TRACE_TYPE_HANDLED) | 755 | if (ret != TRACE_TYPE_HANDLED) |
@@ -767,18 +798,11 @@ print_graph_entry_leaf(struct trace_iterator *iter, | |||
767 | cpu_data->enter_funcs[call->depth] = 0; | 798 | cpu_data->enter_funcs[call->depth] = 0; |
768 | } | 799 | } |
769 | 800 | ||
770 | /* Overhead */ | 801 | /* Overhead and duration */ |
771 | ret = print_graph_overhead(duration, s, flags); | 802 | ret = print_graph_duration(duration, s, flags); |
772 | if (!ret) | 803 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
773 | return TRACE_TYPE_PARTIAL_LINE; | 804 | return TRACE_TYPE_PARTIAL_LINE; |
774 | 805 | ||
775 | /* Duration */ | ||
776 | if (flags & TRACE_GRAPH_PRINT_DURATION) { | ||
777 | ret = print_graph_duration(duration, s); | ||
778 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
779 | return TRACE_TYPE_PARTIAL_LINE; | ||
780 | } | ||
781 | |||
782 | /* Function */ | 806 | /* Function */ |
783 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { | 807 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { |
784 | ret = trace_seq_printf(s, " "); | 808 | ret = trace_seq_printf(s, " "); |
@@ -815,17 +839,10 @@ print_graph_entry_nested(struct trace_iterator *iter, | |||
815 | cpu_data->enter_funcs[call->depth] = call->func; | 839 | cpu_data->enter_funcs[call->depth] = call->func; |
816 | } | 840 | } |
817 | 841 | ||
818 | /* No overhead */ | ||
819 | ret = print_graph_overhead(-1, s, flags); | ||
820 | if (!ret) | ||
821 | return TRACE_TYPE_PARTIAL_LINE; | ||
822 | |||
823 | /* No time */ | 842 | /* No time */ |
824 | if (flags & TRACE_GRAPH_PRINT_DURATION) { | 843 | ret = print_graph_duration(DURATION_FILL_FULL, s, flags); |
825 | ret = trace_seq_printf(s, " | "); | 844 | if (ret != TRACE_TYPE_HANDLED) |
826 | if (!ret) | 845 | return ret; |
827 | return TRACE_TYPE_PARTIAL_LINE; | ||
828 | } | ||
829 | 846 | ||
830 | /* Function */ | 847 | /* Function */ |
831 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { | 848 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { |
@@ -865,6 +882,9 @@ print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s, | |||
865 | return TRACE_TYPE_PARTIAL_LINE; | 882 | return TRACE_TYPE_PARTIAL_LINE; |
866 | } | 883 | } |
867 | 884 | ||
885 | if (!(trace_flags & TRACE_ITER_CONTEXT_INFO)) | ||
886 | return 0; | ||
887 | |||
868 | /* Absolute time */ | 888 | /* Absolute time */ |
869 | if (flags & TRACE_GRAPH_PRINT_ABS_TIME) { | 889 | if (flags & TRACE_GRAPH_PRINT_ABS_TIME) { |
870 | ret = print_graph_abs_time(iter->ts, s); | 890 | ret = print_graph_abs_time(iter->ts, s); |
@@ -1078,18 +1098,11 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, | |||
1078 | if (print_graph_prologue(iter, s, 0, 0, flags)) | 1098 | if (print_graph_prologue(iter, s, 0, 0, flags)) |
1079 | return TRACE_TYPE_PARTIAL_LINE; | 1099 | return TRACE_TYPE_PARTIAL_LINE; |
1080 | 1100 | ||
1081 | /* Overhead */ | 1101 | /* Overhead and duration */ |
1082 | ret = print_graph_overhead(duration, s, flags); | 1102 | ret = print_graph_duration(duration, s, flags); |
1083 | if (!ret) | 1103 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
1084 | return TRACE_TYPE_PARTIAL_LINE; | 1104 | return TRACE_TYPE_PARTIAL_LINE; |
1085 | 1105 | ||
1086 | /* Duration */ | ||
1087 | if (flags & TRACE_GRAPH_PRINT_DURATION) { | ||
1088 | ret = print_graph_duration(duration, s); | ||
1089 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
1090 | return TRACE_TYPE_PARTIAL_LINE; | ||
1091 | } | ||
1092 | |||
1093 | /* Closing brace */ | 1106 | /* Closing brace */ |
1094 | for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) { | 1107 | for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) { |
1095 | ret = trace_seq_printf(s, " "); | 1108 | ret = trace_seq_printf(s, " "); |
@@ -1146,17 +1159,10 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent, | |||
1146 | if (print_graph_prologue(iter, s, 0, 0, flags)) | 1159 | if (print_graph_prologue(iter, s, 0, 0, flags)) |
1147 | return TRACE_TYPE_PARTIAL_LINE; | 1160 | return TRACE_TYPE_PARTIAL_LINE; |
1148 | 1161 | ||
1149 | /* No overhead */ | ||
1150 | ret = print_graph_overhead(-1, s, flags); | ||
1151 | if (!ret) | ||
1152 | return TRACE_TYPE_PARTIAL_LINE; | ||
1153 | |||
1154 | /* No time */ | 1162 | /* No time */ |
1155 | if (flags & TRACE_GRAPH_PRINT_DURATION) { | 1163 | ret = print_graph_duration(DURATION_FILL_FULL, s, flags); |
1156 | ret = trace_seq_printf(s, " | "); | 1164 | if (ret != TRACE_TYPE_HANDLED) |
1157 | if (!ret) | 1165 | return ret; |
1158 | return TRACE_TYPE_PARTIAL_LINE; | ||
1159 | } | ||
1160 | 1166 | ||
1161 | /* Indentation */ | 1167 | /* Indentation */ |
1162 | if (depth > 0) | 1168 | if (depth > 0) |
@@ -1207,7 +1213,7 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent, | |||
1207 | 1213 | ||
1208 | 1214 | ||
1209 | enum print_line_t | 1215 | enum print_line_t |
1210 | __print_graph_function_flags(struct trace_iterator *iter, u32 flags) | 1216 | print_graph_function_flags(struct trace_iterator *iter, u32 flags) |
1211 | { | 1217 | { |
1212 | struct ftrace_graph_ent_entry *field; | 1218 | struct ftrace_graph_ent_entry *field; |
1213 | struct fgraph_data *data = iter->private; | 1219 | struct fgraph_data *data = iter->private; |
@@ -1270,18 +1276,7 @@ __print_graph_function_flags(struct trace_iterator *iter, u32 flags) | |||
1270 | static enum print_line_t | 1276 | static enum print_line_t |
1271 | print_graph_function(struct trace_iterator *iter) | 1277 | print_graph_function(struct trace_iterator *iter) |
1272 | { | 1278 | { |
1273 | return __print_graph_function_flags(iter, tracer_flags.val); | 1279 | return print_graph_function_flags(iter, tracer_flags.val); |
1274 | } | ||
1275 | |||
1276 | enum print_line_t print_graph_function_flags(struct trace_iterator *iter, | ||
1277 | u32 flags) | ||
1278 | { | ||
1279 | if (trace_flags & TRACE_ITER_LATENCY_FMT) | ||
1280 | flags |= TRACE_GRAPH_PRINT_DURATION; | ||
1281 | else | ||
1282 | flags |= TRACE_GRAPH_PRINT_ABS_TIME; | ||
1283 | |||
1284 | return __print_graph_function_flags(iter, flags); | ||
1285 | } | 1280 | } |
1286 | 1281 | ||
1287 | static enum print_line_t | 1282 | static enum print_line_t |
@@ -1309,8 +1304,7 @@ static void print_lat_header(struct seq_file *s, u32 flags) | |||
1309 | seq_printf(s, "#%.*s / _----=> need-resched \n", size, spaces); | 1304 | seq_printf(s, "#%.*s / _----=> need-resched \n", size, spaces); |
1310 | seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces); | 1305 | seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces); |
1311 | seq_printf(s, "#%.*s|| / _--=> preempt-depth \n", size, spaces); | 1306 | seq_printf(s, "#%.*s|| / _--=> preempt-depth \n", size, spaces); |
1312 | seq_printf(s, "#%.*s||| / _-=> lock-depth \n", size, spaces); | 1307 | seq_printf(s, "#%.*s||| / \n", size, spaces); |
1313 | seq_printf(s, "#%.*s|||| / \n", size, spaces); | ||
1314 | } | 1308 | } |
1315 | 1309 | ||
1316 | static void __print_graph_headers_flags(struct seq_file *s, u32 flags) | 1310 | static void __print_graph_headers_flags(struct seq_file *s, u32 flags) |
@@ -1329,7 +1323,7 @@ static void __print_graph_headers_flags(struct seq_file *s, u32 flags) | |||
1329 | if (flags & TRACE_GRAPH_PRINT_PROC) | 1323 | if (flags & TRACE_GRAPH_PRINT_PROC) |
1330 | seq_printf(s, " TASK/PID "); | 1324 | seq_printf(s, " TASK/PID "); |
1331 | if (lat) | 1325 | if (lat) |
1332 | seq_printf(s, "|||||"); | 1326 | seq_printf(s, "||||"); |
1333 | if (flags & TRACE_GRAPH_PRINT_DURATION) | 1327 | if (flags & TRACE_GRAPH_PRINT_DURATION) |
1334 | seq_printf(s, " DURATION "); | 1328 | seq_printf(s, " DURATION "); |
1335 | seq_printf(s, " FUNCTION CALLS\n"); | 1329 | seq_printf(s, " FUNCTION CALLS\n"); |
@@ -1343,7 +1337,7 @@ static void __print_graph_headers_flags(struct seq_file *s, u32 flags) | |||
1343 | if (flags & TRACE_GRAPH_PRINT_PROC) | 1337 | if (flags & TRACE_GRAPH_PRINT_PROC) |
1344 | seq_printf(s, " | | "); | 1338 | seq_printf(s, " | | "); |
1345 | if (lat) | 1339 | if (lat) |
1346 | seq_printf(s, "|||||"); | 1340 | seq_printf(s, "||||"); |
1347 | if (flags & TRACE_GRAPH_PRINT_DURATION) | 1341 | if (flags & TRACE_GRAPH_PRINT_DURATION) |
1348 | seq_printf(s, " | | "); | 1342 | seq_printf(s, " | | "); |
1349 | seq_printf(s, " | | | |\n"); | 1343 | seq_printf(s, " | | | |\n"); |
@@ -1358,15 +1352,16 @@ void print_graph_headers_flags(struct seq_file *s, u32 flags) | |||
1358 | { | 1352 | { |
1359 | struct trace_iterator *iter = s->private; | 1353 | struct trace_iterator *iter = s->private; |
1360 | 1354 | ||
1355 | if (!(trace_flags & TRACE_ITER_CONTEXT_INFO)) | ||
1356 | return; | ||
1357 | |||
1361 | if (trace_flags & TRACE_ITER_LATENCY_FMT) { | 1358 | if (trace_flags & TRACE_ITER_LATENCY_FMT) { |
1362 | /* print nothing if the buffers are empty */ | 1359 | /* print nothing if the buffers are empty */ |
1363 | if (trace_empty(iter)) | 1360 | if (trace_empty(iter)) |
1364 | return; | 1361 | return; |
1365 | 1362 | ||
1366 | print_trace_header(s, iter); | 1363 | print_trace_header(s, iter); |
1367 | flags |= TRACE_GRAPH_PRINT_DURATION; | 1364 | } |
1368 | } else | ||
1369 | flags |= TRACE_GRAPH_PRINT_ABS_TIME; | ||
1370 | 1365 | ||
1371 | __print_graph_headers_flags(s, flags); | 1366 | __print_graph_headers_flags(s, flags); |
1372 | } | 1367 | } |
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index c77424be284..667aa8cc0cf 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c | |||
@@ -226,7 +226,9 @@ static void irqsoff_trace_close(struct trace_iterator *iter) | |||
226 | } | 226 | } |
227 | 227 | ||
228 | #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \ | 228 | #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \ |
229 | TRACE_GRAPH_PRINT_PROC) | 229 | TRACE_GRAPH_PRINT_PROC | \ |
230 | TRACE_GRAPH_PRINT_ABS_TIME | \ | ||
231 | TRACE_GRAPH_PRINT_DURATION) | ||
230 | 232 | ||
231 | static enum print_line_t irqsoff_print_line(struct trace_iterator *iter) | 233 | static enum print_line_t irqsoff_print_line(struct trace_iterator *iter) |
232 | { | 234 | { |
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 27d13b36b8b..7db7b68c6c3 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c | |||
@@ -1397,7 +1397,8 @@ static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs) | |||
1397 | store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); | 1397 | store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); |
1398 | 1398 | ||
1399 | if (!filter_current_check_discard(buffer, call, entry, event)) | 1399 | if (!filter_current_check_discard(buffer, call, entry, event)) |
1400 | trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc); | 1400 | trace_nowake_buffer_unlock_commit_regs(buffer, event, |
1401 | irq_flags, pc, regs); | ||
1401 | } | 1402 | } |
1402 | 1403 | ||
1403 | /* Kretprobe handler */ | 1404 | /* Kretprobe handler */ |
@@ -1429,7 +1430,8 @@ static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri, | |||
1429 | store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); | 1430 | store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); |
1430 | 1431 | ||
1431 | if (!filter_current_check_discard(buffer, call, entry, event)) | 1432 | if (!filter_current_check_discard(buffer, call, entry, event)) |
1432 | trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc); | 1433 | trace_nowake_buffer_unlock_commit_regs(buffer, event, |
1434 | irq_flags, pc, regs); | ||
1433 | } | 1435 | } |
1434 | 1436 | ||
1435 | /* Event entry printers */ | 1437 | /* Event entry printers */ |
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index f029dd4fd2c..e4a70c0c71b 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c | |||
@@ -227,7 +227,9 @@ static void wakeup_trace_close(struct trace_iterator *iter) | |||
227 | graph_trace_close(iter); | 227 | graph_trace_close(iter); |
228 | } | 228 | } |
229 | 229 | ||
230 | #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_PROC) | 230 | #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_PROC | \ |
231 | TRACE_GRAPH_PRINT_ABS_TIME | \ | ||
232 | TRACE_GRAPH_PRINT_DURATION) | ||
231 | 233 | ||
232 | static enum print_line_t wakeup_print_line(struct trace_iterator *iter) | 234 | static enum print_line_t wakeup_print_line(struct trace_iterator *iter) |
233 | { | 235 | { |
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index b0b53b8e4c2..77575b386d9 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c | |||
@@ -156,20 +156,11 @@ stack_max_size_write(struct file *filp, const char __user *ubuf, | |||
156 | { | 156 | { |
157 | long *ptr = filp->private_data; | 157 | long *ptr = filp->private_data; |
158 | unsigned long val, flags; | 158 | unsigned long val, flags; |
159 | char buf[64]; | ||
160 | int ret; | 159 | int ret; |
161 | int cpu; | 160 | int cpu; |
162 | 161 | ||
163 | if (count >= sizeof(buf)) | 162 | ret = kstrtoul_from_user(ubuf, count, 10, &val); |
164 | return -EINVAL; | 163 | if (ret) |
165 | |||
166 | if (copy_from_user(&buf, ubuf, count)) | ||
167 | return -EFAULT; | ||
168 | |||
169 | buf[count] = 0; | ||
170 | |||
171 | ret = strict_strtoul(buf, 10, &val); | ||
172 | if (ret < 0) | ||
173 | return ret; | 164 | return ret; |
174 | 165 | ||
175 | local_irq_save(flags); | 166 | local_irq_save(flags); |
diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 3d0c56ad479..a933e3a0398 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c | |||
@@ -200,6 +200,8 @@ static int is_softlockup(unsigned long touch_ts) | |||
200 | } | 200 | } |
201 | 201 | ||
202 | #ifdef CONFIG_HARDLOCKUP_DETECTOR | 202 | #ifdef CONFIG_HARDLOCKUP_DETECTOR |
203 | void __weak hw_nmi_watchdog_set_attr(struct perf_event_attr *wd_attr) { } | ||
204 | |||
203 | static struct perf_event_attr wd_hw_attr = { | 205 | static struct perf_event_attr wd_hw_attr = { |
204 | .type = PERF_TYPE_HARDWARE, | 206 | .type = PERF_TYPE_HARDWARE, |
205 | .config = PERF_COUNT_HW_CPU_CYCLES, | 207 | .config = PERF_COUNT_HW_CPU_CYCLES, |
@@ -209,7 +211,7 @@ static struct perf_event_attr wd_hw_attr = { | |||
209 | }; | 211 | }; |
210 | 212 | ||
211 | /* Callback function for perf event subsystem */ | 213 | /* Callback function for perf event subsystem */ |
212 | static void watchdog_overflow_callback(struct perf_event *event, int nmi, | 214 | static void watchdog_overflow_callback(struct perf_event *event, |
213 | struct perf_sample_data *data, | 215 | struct perf_sample_data *data, |
214 | struct pt_regs *regs) | 216 | struct pt_regs *regs) |
215 | { | 217 | { |
@@ -368,10 +370,12 @@ static int watchdog_nmi_enable(int cpu) | |||
368 | if (event != NULL) | 370 | if (event != NULL) |
369 | goto out_enable; | 371 | goto out_enable; |
370 | 372 | ||
371 | /* Try to register using hardware perf events */ | ||
372 | wd_attr = &wd_hw_attr; | 373 | wd_attr = &wd_hw_attr; |
373 | wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh); | 374 | wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh); |
374 | event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback); | 375 | hw_nmi_watchdog_set_attr(wd_attr); |
376 | |||
377 | /* Try to register using hardware perf events */ | ||
378 | event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL); | ||
375 | if (!IS_ERR(event)) { | 379 | if (!IS_ERR(event)) { |
376 | printk(KERN_INFO "NMI watchdog enabled, takes one hw-pmu counter.\n"); | 380 | printk(KERN_INFO "NMI watchdog enabled, takes one hw-pmu counter.\n"); |
377 | goto out_save; | 381 | goto out_save; |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index cf7d027a884..ddffc74cdeb 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <linux/limits.h> | 35 | #include <linux/limits.h> |
36 | #include <linux/mutex.h> | 36 | #include <linux/mutex.h> |
37 | #include <linux/rbtree.h> | 37 | #include <linux/rbtree.h> |
38 | #include <linux/shmem_fs.h> | ||
38 | #include <linux/slab.h> | 39 | #include <linux/slab.h> |
39 | #include <linux/swap.h> | 40 | #include <linux/swap.h> |
40 | #include <linux/swapops.h> | 41 | #include <linux/swapops.h> |
diff --git a/mm/memory-failure.c b/mm/memory-failure.c index eac0ba56149..740c4f52059 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c | |||
@@ -391,10 +391,11 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill, | |||
391 | struct task_struct *tsk; | 391 | struct task_struct *tsk; |
392 | struct anon_vma *av; | 392 | struct anon_vma *av; |
393 | 393 | ||
394 | read_lock(&tasklist_lock); | ||
395 | av = page_lock_anon_vma(page); | 394 | av = page_lock_anon_vma(page); |
396 | if (av == NULL) /* Not actually mapped anymore */ | 395 | if (av == NULL) /* Not actually mapped anymore */ |
397 | goto out; | 396 | return; |
397 | |||
398 | read_lock(&tasklist_lock); | ||
398 | for_each_process (tsk) { | 399 | for_each_process (tsk) { |
399 | struct anon_vma_chain *vmac; | 400 | struct anon_vma_chain *vmac; |
400 | 401 | ||
@@ -408,9 +409,8 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill, | |||
408 | add_to_kill(tsk, page, vma, to_kill, tkc); | 409 | add_to_kill(tsk, page, vma, to_kill, tkc); |
409 | } | 410 | } |
410 | } | 411 | } |
411 | page_unlock_anon_vma(av); | ||
412 | out: | ||
413 | read_unlock(&tasklist_lock); | 412 | read_unlock(&tasklist_lock); |
413 | page_unlock_anon_vma(av); | ||
414 | } | 414 | } |
415 | 415 | ||
416 | /* | 416 | /* |
@@ -424,17 +424,8 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill, | |||
424 | struct prio_tree_iter iter; | 424 | struct prio_tree_iter iter; |
425 | struct address_space *mapping = page->mapping; | 425 | struct address_space *mapping = page->mapping; |
426 | 426 | ||
427 | /* | ||
428 | * A note on the locking order between the two locks. | ||
429 | * We don't rely on this particular order. | ||
430 | * If you have some other code that needs a different order | ||
431 | * feel free to switch them around. Or add a reverse link | ||
432 | * from mm_struct to task_struct, then this could be all | ||
433 | * done without taking tasklist_lock and looping over all tasks. | ||
434 | */ | ||
435 | |||
436 | read_lock(&tasklist_lock); | ||
437 | mutex_lock(&mapping->i_mmap_mutex); | 427 | mutex_lock(&mapping->i_mmap_mutex); |
428 | read_lock(&tasklist_lock); | ||
438 | for_each_process(tsk) { | 429 | for_each_process(tsk) { |
439 | pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); | 430 | pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); |
440 | 431 | ||
@@ -454,8 +445,8 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill, | |||
454 | add_to_kill(tsk, page, vma, to_kill, tkc); | 445 | add_to_kill(tsk, page, vma, to_kill, tkc); |
455 | } | 446 | } |
456 | } | 447 | } |
457 | mutex_unlock(&mapping->i_mmap_mutex); | ||
458 | read_unlock(&tasklist_lock); | 448 | read_unlock(&tasklist_lock); |
449 | mutex_unlock(&mapping->i_mmap_mutex); | ||
459 | } | 450 | } |
460 | 451 | ||
461 | /* | 452 | /* |
diff --git a/mm/memory.c b/mm/memory.c index 87d935333f0..40b7531ee8b 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -2798,30 +2798,6 @@ void unmap_mapping_range(struct address_space *mapping, | |||
2798 | } | 2798 | } |
2799 | EXPORT_SYMBOL(unmap_mapping_range); | 2799 | EXPORT_SYMBOL(unmap_mapping_range); |
2800 | 2800 | ||
2801 | int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end) | ||
2802 | { | ||
2803 | struct address_space *mapping = inode->i_mapping; | ||
2804 | |||
2805 | /* | ||
2806 | * If the underlying filesystem is not going to provide | ||
2807 | * a way to truncate a range of blocks (punch a hole) - | ||
2808 | * we should return failure right now. | ||
2809 | */ | ||
2810 | if (!inode->i_op->truncate_range) | ||
2811 | return -ENOSYS; | ||
2812 | |||
2813 | mutex_lock(&inode->i_mutex); | ||
2814 | down_write(&inode->i_alloc_sem); | ||
2815 | unmap_mapping_range(mapping, offset, (end - offset), 1); | ||
2816 | truncate_inode_pages_range(mapping, offset, end); | ||
2817 | unmap_mapping_range(mapping, offset, (end - offset), 1); | ||
2818 | inode->i_op->truncate_range(inode, offset, end); | ||
2819 | up_write(&inode->i_alloc_sem); | ||
2820 | mutex_unlock(&inode->i_mutex); | ||
2821 | |||
2822 | return 0; | ||
2823 | } | ||
2824 | |||
2825 | /* | 2801 | /* |
2826 | * We enter with non-exclusive mmap_sem (to exclude vma changes, | 2802 | * We enter with non-exclusive mmap_sem (to exclude vma changes, |
2827 | * but allow concurrent faults), and pte mapped but not yet locked. | 2803 | * but allow concurrent faults), and pte mapped but not yet locked. |
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 02159c75513..c46887b5a11 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c | |||
@@ -498,7 +498,9 @@ static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start) | |||
498 | * The node we allocated has no zone fallback lists. For avoiding | 498 | * The node we allocated has no zone fallback lists. For avoiding |
499 | * to access not-initialized zonelist, build here. | 499 | * to access not-initialized zonelist, build here. |
500 | */ | 500 | */ |
501 | mutex_lock(&zonelists_mutex); | ||
501 | build_all_zonelists(NULL); | 502 | build_all_zonelists(NULL); |
503 | mutex_unlock(&zonelists_mutex); | ||
502 | 504 | ||
503 | return pgdat; | 505 | return pgdat; |
504 | } | 506 | } |
@@ -521,7 +523,7 @@ int mem_online_node(int nid) | |||
521 | 523 | ||
522 | lock_memory_hotplug(); | 524 | lock_memory_hotplug(); |
523 | pgdat = hotadd_new_pgdat(nid, 0); | 525 | pgdat = hotadd_new_pgdat(nid, 0); |
524 | if (pgdat) { | 526 | if (!pgdat) { |
525 | ret = -ENOMEM; | 527 | ret = -ENOMEM; |
526 | goto out; | 528 | goto out; |
527 | } | 529 | } |
@@ -38,9 +38,8 @@ | |||
38 | * in arch-dependent flush_dcache_mmap_lock, | 38 | * in arch-dependent flush_dcache_mmap_lock, |
39 | * within inode_wb_list_lock in __sync_single_inode) | 39 | * within inode_wb_list_lock in __sync_single_inode) |
40 | * | 40 | * |
41 | * (code doesn't rely on that order so it could be switched around) | 41 | * anon_vma->mutex,mapping->i_mutex (memory_failure, collect_procs_anon) |
42 | * ->tasklist_lock | 42 | * ->tasklist_lock |
43 | * anon_vma->mutex (memory_failure, collect_procs_anon) | ||
44 | * pte map lock | 43 | * pte map lock |
45 | */ | 44 | */ |
46 | 45 | ||
diff --git a/mm/shmem.c b/mm/shmem.c index d221a1cfd7b..fcedf5464eb 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
@@ -539,7 +539,7 @@ static void shmem_free_pages(struct list_head *next) | |||
539 | } while (next); | 539 | } while (next); |
540 | } | 540 | } |
541 | 541 | ||
542 | static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end) | 542 | void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end) |
543 | { | 543 | { |
544 | struct shmem_inode_info *info = SHMEM_I(inode); | 544 | struct shmem_inode_info *info = SHMEM_I(inode); |
545 | unsigned long idx; | 545 | unsigned long idx; |
@@ -562,6 +562,8 @@ static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end) | |||
562 | spinlock_t *punch_lock; | 562 | spinlock_t *punch_lock; |
563 | unsigned long upper_limit; | 563 | unsigned long upper_limit; |
564 | 564 | ||
565 | truncate_inode_pages_range(inode->i_mapping, start, end); | ||
566 | |||
565 | inode->i_ctime = inode->i_mtime = CURRENT_TIME; | 567 | inode->i_ctime = inode->i_mtime = CURRENT_TIME; |
566 | idx = (start + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; | 568 | idx = (start + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; |
567 | if (idx >= info->next_index) | 569 | if (idx >= info->next_index) |
@@ -738,16 +740,8 @@ done2: | |||
738 | * lowered next_index. Also, though shmem_getpage checks | 740 | * lowered next_index. Also, though shmem_getpage checks |
739 | * i_size before adding to cache, no recheck after: so fix the | 741 | * i_size before adding to cache, no recheck after: so fix the |
740 | * narrow window there too. | 742 | * narrow window there too. |
741 | * | ||
742 | * Recalling truncate_inode_pages_range and unmap_mapping_range | ||
743 | * every time for punch_hole (which never got a chance to clear | ||
744 | * SHMEM_PAGEIN at the start of vmtruncate_range) is expensive, | ||
745 | * yet hardly ever necessary: try to optimize them out later. | ||
746 | */ | 743 | */ |
747 | truncate_inode_pages_range(inode->i_mapping, start, end); | 744 | truncate_inode_pages_range(inode->i_mapping, start, end); |
748 | if (punch_hole) | ||
749 | unmap_mapping_range(inode->i_mapping, start, | ||
750 | end - start, 1); | ||
751 | } | 745 | } |
752 | 746 | ||
753 | spin_lock(&info->lock); | 747 | spin_lock(&info->lock); |
@@ -766,22 +760,23 @@ done2: | |||
766 | shmem_free_pages(pages_to_free.next); | 760 | shmem_free_pages(pages_to_free.next); |
767 | } | 761 | } |
768 | } | 762 | } |
763 | EXPORT_SYMBOL_GPL(shmem_truncate_range); | ||
769 | 764 | ||
770 | static int shmem_notify_change(struct dentry *dentry, struct iattr *attr) | 765 | static int shmem_setattr(struct dentry *dentry, struct iattr *attr) |
771 | { | 766 | { |
772 | struct inode *inode = dentry->d_inode; | 767 | struct inode *inode = dentry->d_inode; |
773 | loff_t newsize = attr->ia_size; | ||
774 | int error; | 768 | int error; |
775 | 769 | ||
776 | error = inode_change_ok(inode, attr); | 770 | error = inode_change_ok(inode, attr); |
777 | if (error) | 771 | if (error) |
778 | return error; | 772 | return error; |
779 | 773 | ||
780 | if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE) | 774 | if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { |
781 | && newsize != inode->i_size) { | 775 | loff_t oldsize = inode->i_size; |
776 | loff_t newsize = attr->ia_size; | ||
782 | struct page *page = NULL; | 777 | struct page *page = NULL; |
783 | 778 | ||
784 | if (newsize < inode->i_size) { | 779 | if (newsize < oldsize) { |
785 | /* | 780 | /* |
786 | * If truncating down to a partial page, then | 781 | * If truncating down to a partial page, then |
787 | * if that page is already allocated, hold it | 782 | * if that page is already allocated, hold it |
@@ -810,12 +805,19 @@ static int shmem_notify_change(struct dentry *dentry, struct iattr *attr) | |||
810 | spin_unlock(&info->lock); | 805 | spin_unlock(&info->lock); |
811 | } | 806 | } |
812 | } | 807 | } |
813 | 808 | if (newsize != oldsize) { | |
814 | /* XXX(truncate): truncate_setsize should be called last */ | 809 | i_size_write(inode, newsize); |
815 | truncate_setsize(inode, newsize); | 810 | inode->i_ctime = inode->i_mtime = CURRENT_TIME; |
811 | } | ||
812 | if (newsize < oldsize) { | ||
813 | loff_t holebegin = round_up(newsize, PAGE_SIZE); | ||
814 | unmap_mapping_range(inode->i_mapping, holebegin, 0, 1); | ||
815 | shmem_truncate_range(inode, newsize, (loff_t)-1); | ||
816 | /* unmap again to remove racily COWed private pages */ | ||
817 | unmap_mapping_range(inode->i_mapping, holebegin, 0, 1); | ||
818 | } | ||
816 | if (page) | 819 | if (page) |
817 | page_cache_release(page); | 820 | page_cache_release(page); |
818 | shmem_truncate_range(inode, newsize, (loff_t)-1); | ||
819 | } | 821 | } |
820 | 822 | ||
821 | setattr_copy(inode, attr); | 823 | setattr_copy(inode, attr); |
@@ -832,7 +834,6 @@ static void shmem_evict_inode(struct inode *inode) | |||
832 | struct shmem_xattr *xattr, *nxattr; | 834 | struct shmem_xattr *xattr, *nxattr; |
833 | 835 | ||
834 | if (inode->i_mapping->a_ops == &shmem_aops) { | 836 | if (inode->i_mapping->a_ops == &shmem_aops) { |
835 | truncate_inode_pages(inode->i_mapping, 0); | ||
836 | shmem_unacct_size(info->flags, inode->i_size); | 837 | shmem_unacct_size(info->flags, inode->i_size); |
837 | inode->i_size = 0; | 838 | inode->i_size = 0; |
838 | shmem_truncate_range(inode, 0, (loff_t)-1); | 839 | shmem_truncate_range(inode, 0, (loff_t)-1); |
@@ -2706,7 +2707,7 @@ static const struct file_operations shmem_file_operations = { | |||
2706 | }; | 2707 | }; |
2707 | 2708 | ||
2708 | static const struct inode_operations shmem_inode_operations = { | 2709 | static const struct inode_operations shmem_inode_operations = { |
2709 | .setattr = shmem_notify_change, | 2710 | .setattr = shmem_setattr, |
2710 | .truncate_range = shmem_truncate_range, | 2711 | .truncate_range = shmem_truncate_range, |
2711 | #ifdef CONFIG_TMPFS_XATTR | 2712 | #ifdef CONFIG_TMPFS_XATTR |
2712 | .setxattr = shmem_setxattr, | 2713 | .setxattr = shmem_setxattr, |
@@ -2739,7 +2740,7 @@ static const struct inode_operations shmem_dir_inode_operations = { | |||
2739 | .removexattr = shmem_removexattr, | 2740 | .removexattr = shmem_removexattr, |
2740 | #endif | 2741 | #endif |
2741 | #ifdef CONFIG_TMPFS_POSIX_ACL | 2742 | #ifdef CONFIG_TMPFS_POSIX_ACL |
2742 | .setattr = shmem_notify_change, | 2743 | .setattr = shmem_setattr, |
2743 | .check_acl = generic_check_acl, | 2744 | .check_acl = generic_check_acl, |
2744 | #endif | 2745 | #endif |
2745 | }; | 2746 | }; |
@@ -2752,7 +2753,7 @@ static const struct inode_operations shmem_special_inode_operations = { | |||
2752 | .removexattr = shmem_removexattr, | 2753 | .removexattr = shmem_removexattr, |
2753 | #endif | 2754 | #endif |
2754 | #ifdef CONFIG_TMPFS_POSIX_ACL | 2755 | #ifdef CONFIG_TMPFS_POSIX_ACL |
2755 | .setattr = shmem_notify_change, | 2756 | .setattr = shmem_setattr, |
2756 | .check_acl = generic_check_acl, | 2757 | .check_acl = generic_check_acl, |
2757 | #endif | 2758 | #endif |
2758 | }; | 2759 | }; |
@@ -2908,6 +2909,12 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user) | |||
2908 | return 0; | 2909 | return 0; |
2909 | } | 2910 | } |
2910 | 2911 | ||
2912 | void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end) | ||
2913 | { | ||
2914 | truncate_inode_pages_range(inode->i_mapping, start, end); | ||
2915 | } | ||
2916 | EXPORT_SYMBOL_GPL(shmem_truncate_range); | ||
2917 | |||
2911 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR | 2918 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR |
2912 | /** | 2919 | /** |
2913 | * mem_cgroup_get_shmem_target - find a page or entry assigned to the shmem file | 2920 | * mem_cgroup_get_shmem_target - find a page or entry assigned to the shmem file |
@@ -3028,3 +3035,26 @@ int shmem_zero_setup(struct vm_area_struct *vma) | |||
3028 | vma->vm_flags |= VM_CAN_NONLINEAR; | 3035 | vma->vm_flags |= VM_CAN_NONLINEAR; |
3029 | return 0; | 3036 | return 0; |
3030 | } | 3037 | } |
3038 | |||
3039 | /** | ||
3040 | * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags. | ||
3041 | * @mapping: the page's address_space | ||
3042 | * @index: the page index | ||
3043 | * @gfp: the page allocator flags to use if allocating | ||
3044 | * | ||
3045 | * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)", | ||
3046 | * with any new page allocations done using the specified allocation flags. | ||
3047 | * But read_cache_page_gfp() uses the ->readpage() method: which does not | ||
3048 | * suit tmpfs, since it may have pages in swapcache, and needs to find those | ||
3049 | * for itself; although drivers/gpu/drm i915 and ttm rely upon this support. | ||
3050 | * | ||
3051 | * Provide a stub for those callers to start using now, then later | ||
3052 | * flesh it out to call shmem_getpage() with additional gfp mask, when | ||
3053 | * shmem_file_splice_read() is added and shmem_readpage() is removed. | ||
3054 | */ | ||
3055 | struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, | ||
3056 | pgoff_t index, gfp_t gfp) | ||
3057 | { | ||
3058 | return read_cache_page_gfp(mapping, index, gfp); | ||
3059 | } | ||
3060 | EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp); | ||
diff --git a/mm/swapfile.c b/mm/swapfile.c index d537d29e9b7..ff8dc1a18cb 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c | |||
@@ -14,7 +14,7 @@ | |||
14 | #include <linux/vmalloc.h> | 14 | #include <linux/vmalloc.h> |
15 | #include <linux/pagemap.h> | 15 | #include <linux/pagemap.h> |
16 | #include <linux/namei.h> | 16 | #include <linux/namei.h> |
17 | #include <linux/shm.h> | 17 | #include <linux/shmem_fs.h> |
18 | #include <linux/blkdev.h> | 18 | #include <linux/blkdev.h> |
19 | #include <linux/random.h> | 19 | #include <linux/random.h> |
20 | #include <linux/writeback.h> | 20 | #include <linux/writeback.h> |
diff --git a/mm/truncate.c b/mm/truncate.c index 3a29a618021..e13f22efaad 100644 --- a/mm/truncate.c +++ b/mm/truncate.c | |||
@@ -304,6 +304,11 @@ EXPORT_SYMBOL(truncate_inode_pages_range); | |||
304 | * @lstart: offset from which to truncate | 304 | * @lstart: offset from which to truncate |
305 | * | 305 | * |
306 | * Called under (and serialised by) inode->i_mutex. | 306 | * Called under (and serialised by) inode->i_mutex. |
307 | * | ||
308 | * Note: When this function returns, there can be a page in the process of | ||
309 | * deletion (inside __delete_from_page_cache()) in the specified range. Thus | ||
310 | * mapping->nrpages can be non-zero when this function returns even after | ||
311 | * truncation of the whole mapping. | ||
307 | */ | 312 | */ |
308 | void truncate_inode_pages(struct address_space *mapping, loff_t lstart) | 313 | void truncate_inode_pages(struct address_space *mapping, loff_t lstart) |
309 | { | 314 | { |
@@ -603,3 +608,27 @@ int vmtruncate(struct inode *inode, loff_t offset) | |||
603 | return 0; | 608 | return 0; |
604 | } | 609 | } |
605 | EXPORT_SYMBOL(vmtruncate); | 610 | EXPORT_SYMBOL(vmtruncate); |
611 | |||
612 | int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end) | ||
613 | { | ||
614 | struct address_space *mapping = inode->i_mapping; | ||
615 | |||
616 | /* | ||
617 | * If the underlying filesystem is not going to provide | ||
618 | * a way to truncate a range of blocks (punch a hole) - | ||
619 | * we should return failure right now. | ||
620 | */ | ||
621 | if (!inode->i_op->truncate_range) | ||
622 | return -ENOSYS; | ||
623 | |||
624 | mutex_lock(&inode->i_mutex); | ||
625 | down_write(&inode->i_alloc_sem); | ||
626 | unmap_mapping_range(mapping, offset, (end - offset), 1); | ||
627 | inode->i_op->truncate_range(inode, offset, end); | ||
628 | /* unmap again to remove racily COWed private pages */ | ||
629 | unmap_mapping_range(mapping, offset, (end - offset), 1); | ||
630 | up_write(&inode->i_alloc_sem); | ||
631 | mutex_unlock(&inode->i_mutex); | ||
632 | |||
633 | return 0; | ||
634 | } | ||
diff --git a/mm/vmscan.c b/mm/vmscan.c index 8ff834e19c2..4f49535d4cd 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -1995,14 +1995,13 @@ restart: | |||
1995 | * If a zone is deemed to be full of pinned pages then just give it a light | 1995 | * If a zone is deemed to be full of pinned pages then just give it a light |
1996 | * scan then give up on it. | 1996 | * scan then give up on it. |
1997 | */ | 1997 | */ |
1998 | static unsigned long shrink_zones(int priority, struct zonelist *zonelist, | 1998 | static void shrink_zones(int priority, struct zonelist *zonelist, |
1999 | struct scan_control *sc) | 1999 | struct scan_control *sc) |
2000 | { | 2000 | { |
2001 | struct zoneref *z; | 2001 | struct zoneref *z; |
2002 | struct zone *zone; | 2002 | struct zone *zone; |
2003 | unsigned long nr_soft_reclaimed; | 2003 | unsigned long nr_soft_reclaimed; |
2004 | unsigned long nr_soft_scanned; | 2004 | unsigned long nr_soft_scanned; |
2005 | unsigned long total_scanned = 0; | ||
2006 | 2005 | ||
2007 | for_each_zone_zonelist_nodemask(zone, z, zonelist, | 2006 | for_each_zone_zonelist_nodemask(zone, z, zonelist, |
2008 | gfp_zone(sc->gfp_mask), sc->nodemask) { | 2007 | gfp_zone(sc->gfp_mask), sc->nodemask) { |
@@ -2017,19 +2016,23 @@ static unsigned long shrink_zones(int priority, struct zonelist *zonelist, | |||
2017 | continue; | 2016 | continue; |
2018 | if (zone->all_unreclaimable && priority != DEF_PRIORITY) | 2017 | if (zone->all_unreclaimable && priority != DEF_PRIORITY) |
2019 | continue; /* Let kswapd poll it */ | 2018 | continue; /* Let kswapd poll it */ |
2019 | /* | ||
2020 | * This steals pages from memory cgroups over softlimit | ||
2021 | * and returns the number of reclaimed pages and | ||
2022 | * scanned pages. This works for global memory pressure | ||
2023 | * and balancing, not for a memcg's limit. | ||
2024 | */ | ||
2025 | nr_soft_scanned = 0; | ||
2026 | nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone, | ||
2027 | sc->order, sc->gfp_mask, | ||
2028 | &nr_soft_scanned); | ||
2029 | sc->nr_reclaimed += nr_soft_reclaimed; | ||
2030 | sc->nr_scanned += nr_soft_scanned; | ||
2031 | /* need some check for avoid more shrink_zone() */ | ||
2020 | } | 2032 | } |
2021 | 2033 | ||
2022 | nr_soft_scanned = 0; | ||
2023 | nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone, | ||
2024 | sc->order, sc->gfp_mask, | ||
2025 | &nr_soft_scanned); | ||
2026 | sc->nr_reclaimed += nr_soft_reclaimed; | ||
2027 | total_scanned += nr_soft_scanned; | ||
2028 | |||
2029 | shrink_zone(priority, zone, sc); | 2034 | shrink_zone(priority, zone, sc); |
2030 | } | 2035 | } |
2031 | |||
2032 | return total_scanned; | ||
2033 | } | 2036 | } |
2034 | 2037 | ||
2035 | static bool zone_reclaimable(struct zone *zone) | 2038 | static bool zone_reclaimable(struct zone *zone) |
@@ -2094,7 +2097,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, | |||
2094 | sc->nr_scanned = 0; | 2097 | sc->nr_scanned = 0; |
2095 | if (!priority) | 2098 | if (!priority) |
2096 | disable_swap_token(sc->mem_cgroup); | 2099 | disable_swap_token(sc->mem_cgroup); |
2097 | total_scanned += shrink_zones(priority, zonelist, sc); | 2100 | shrink_zones(priority, zonelist, sc); |
2098 | /* | 2101 | /* |
2099 | * Don't shrink slabs when reclaiming memory from | 2102 | * Don't shrink slabs when reclaiming memory from |
2100 | * over limit cgroups | 2103 | * over limit cgroups |
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index 339ba64cce1..5daf6cc4fae 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c | |||
@@ -577,13 +577,13 @@ retry: | |||
577 | } | 577 | } |
578 | inode = &gss_msg->inode->vfs_inode; | 578 | inode = &gss_msg->inode->vfs_inode; |
579 | for (;;) { | 579 | for (;;) { |
580 | prepare_to_wait(&gss_msg->waitqueue, &wait, TASK_INTERRUPTIBLE); | 580 | prepare_to_wait(&gss_msg->waitqueue, &wait, TASK_KILLABLE); |
581 | spin_lock(&inode->i_lock); | 581 | spin_lock(&inode->i_lock); |
582 | if (gss_msg->ctx != NULL || gss_msg->msg.errno < 0) { | 582 | if (gss_msg->ctx != NULL || gss_msg->msg.errno < 0) { |
583 | break; | 583 | break; |
584 | } | 584 | } |
585 | spin_unlock(&inode->i_lock); | 585 | spin_unlock(&inode->i_lock); |
586 | if (signalled()) { | 586 | if (fatal_signal_pending(current)) { |
587 | err = -ERESTARTSYS; | 587 | err = -ERESTARTSYS; |
588 | goto out_intr; | 588 | goto out_intr; |
589 | } | 589 | } |
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index b84d7395535..8c9141583d6 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c | |||
@@ -1061,7 +1061,7 @@ call_allocate(struct rpc_task *task) | |||
1061 | 1061 | ||
1062 | dprintk("RPC: %5u rpc_buffer allocation failed\n", task->tk_pid); | 1062 | dprintk("RPC: %5u rpc_buffer allocation failed\n", task->tk_pid); |
1063 | 1063 | ||
1064 | if (RPC_IS_ASYNC(task) || !signalled()) { | 1064 | if (RPC_IS_ASYNC(task) || !fatal_signal_pending(current)) { |
1065 | task->tk_action = call_allocate; | 1065 | task->tk_action = call_allocate; |
1066 | rpc_delay(task, HZ>>4); | 1066 | rpc_delay(task, HZ>>4); |
1067 | return; | 1067 | return; |
@@ -1175,6 +1175,9 @@ call_bind_status(struct rpc_task *task) | |||
1175 | status = -EOPNOTSUPP; | 1175 | status = -EOPNOTSUPP; |
1176 | break; | 1176 | break; |
1177 | } | 1177 | } |
1178 | if (task->tk_rebind_retry == 0) | ||
1179 | break; | ||
1180 | task->tk_rebind_retry--; | ||
1178 | rpc_delay(task, 3*HZ); | 1181 | rpc_delay(task, 3*HZ); |
1179 | goto retry_timeout; | 1182 | goto retry_timeout; |
1180 | case -ETIMEDOUT: | 1183 | case -ETIMEDOUT: |
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index 6b43ee7221d..a27406b1654 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c | |||
@@ -792,6 +792,7 @@ static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *ta | |||
792 | /* Initialize retry counters */ | 792 | /* Initialize retry counters */ |
793 | task->tk_garb_retry = 2; | 793 | task->tk_garb_retry = 2; |
794 | task->tk_cred_retry = 2; | 794 | task->tk_cred_retry = 2; |
795 | task->tk_rebind_retry = 2; | ||
795 | 796 | ||
796 | task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW; | 797 | task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW; |
797 | task->tk_owner = current->tgid; | 798 | task->tk_owner = current->tgid; |
diff --git a/samples/hw_breakpoint/data_breakpoint.c b/samples/hw_breakpoint/data_breakpoint.c index 063653955f9..ef7f3229185 100644 --- a/samples/hw_breakpoint/data_breakpoint.c +++ b/samples/hw_breakpoint/data_breakpoint.c | |||
@@ -41,7 +41,7 @@ module_param_string(ksym, ksym_name, KSYM_NAME_LEN, S_IRUGO); | |||
41 | MODULE_PARM_DESC(ksym, "Kernel symbol to monitor; this module will report any" | 41 | MODULE_PARM_DESC(ksym, "Kernel symbol to monitor; this module will report any" |
42 | " write operations on the kernel symbol"); | 42 | " write operations on the kernel symbol"); |
43 | 43 | ||
44 | static void sample_hbp_handler(struct perf_event *bp, int nmi, | 44 | static void sample_hbp_handler(struct perf_event *bp, |
45 | struct perf_sample_data *data, | 45 | struct perf_sample_data *data, |
46 | struct pt_regs *regs) | 46 | struct pt_regs *regs) |
47 | { | 47 | { |
@@ -60,7 +60,7 @@ static int __init hw_break_module_init(void) | |||
60 | attr.bp_len = HW_BREAKPOINT_LEN_4; | 60 | attr.bp_len = HW_BREAKPOINT_LEN_4; |
61 | attr.bp_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R; | 61 | attr.bp_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R; |
62 | 62 | ||
63 | sample_hbp = register_wide_hw_breakpoint(&attr, sample_hbp_handler); | 63 | sample_hbp = register_wide_hw_breakpoint(&attr, sample_hbp_handler, NULL); |
64 | if (IS_ERR((void __force *)sample_hbp)) { | 64 | if (IS_ERR((void __force *)sample_hbp)) { |
65 | ret = PTR_ERR((void __force *)sample_hbp); | 65 | ret = PTR_ERR((void __force *)sample_hbp); |
66 | goto fail; | 66 | goto fail; |
diff --git a/security/keys/request_key.c b/security/keys/request_key.c index 8e319a416ee..82465328c39 100644 --- a/security/keys/request_key.c +++ b/security/keys/request_key.c | |||
@@ -469,7 +469,7 @@ static struct key *construct_key_and_link(struct key_type *type, | |||
469 | } else if (ret == -EINPROGRESS) { | 469 | } else if (ret == -EINPROGRESS) { |
470 | ret = 0; | 470 | ret = 0; |
471 | } else { | 471 | } else { |
472 | key = ERR_PTR(ret); | 472 | goto couldnt_alloc_key; |
473 | } | 473 | } |
474 | 474 | ||
475 | key_put(dest_keyring); | 475 | key_put(dest_keyring); |
@@ -479,6 +479,7 @@ static struct key *construct_key_and_link(struct key_type *type, | |||
479 | construction_failed: | 479 | construction_failed: |
480 | key_negate_and_link(key, key_negative_timeout, NULL, NULL); | 480 | key_negate_and_link(key, key_negative_timeout, NULL, NULL); |
481 | key_put(key); | 481 | key_put(key); |
482 | couldnt_alloc_key: | ||
482 | key_put(dest_keyring); | 483 | key_put(dest_keyring); |
483 | kleave(" = %d", ret); | 484 | kleave(" = %d", ret); |
484 | return ERR_PTR(ret); | 485 | return ERR_PTR(ret); |
diff --git a/sound/pci/asihpi/asihpi.c b/sound/pci/asihpi/asihpi.c index 2ca6f4f85b4..e3569bdd3b6 100644 --- a/sound/pci/asihpi/asihpi.c +++ b/sound/pci/asihpi/asihpi.c | |||
@@ -27,7 +27,6 @@ | |||
27 | #include "hpioctl.h" | 27 | #include "hpioctl.h" |
28 | 28 | ||
29 | #include <linux/pci.h> | 29 | #include <linux/pci.h> |
30 | #include <linux/version.h> | ||
31 | #include <linux/init.h> | 30 | #include <linux/init.h> |
32 | #include <linux/jiffies.h> | 31 | #include <linux/jiffies.h> |
33 | #include <linux/slab.h> | 32 | #include <linux/slab.h> |
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 61a774b3d3c..d21191dcfe8 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
@@ -4883,7 +4883,6 @@ static const struct snd_pci_quirk alc880_cfg_tbl[] = { | |||
4883 | SND_PCI_QUIRK(0x1025, 0xe309, "ULI", ALC880_3ST_DIG), | 4883 | SND_PCI_QUIRK(0x1025, 0xe309, "ULI", ALC880_3ST_DIG), |
4884 | SND_PCI_QUIRK(0x1025, 0xe310, "ULI", ALC880_3ST), | 4884 | SND_PCI_QUIRK(0x1025, 0xe310, "ULI", ALC880_3ST), |
4885 | SND_PCI_QUIRK(0x1039, 0x1234, NULL, ALC880_6ST_DIG), | 4885 | SND_PCI_QUIRK(0x1039, 0x1234, NULL, ALC880_6ST_DIG), |
4886 | SND_PCI_QUIRK(0x103c, 0x2a09, "HP", ALC880_5ST), | ||
4887 | SND_PCI_QUIRK(0x1043, 0x10b3, "ASUS W1V", ALC880_ASUS_W1V), | 4886 | SND_PCI_QUIRK(0x1043, 0x10b3, "ASUS W1V", ALC880_ASUS_W1V), |
4888 | SND_PCI_QUIRK(0x1043, 0x10c2, "ASUS W6A", ALC880_ASUS_DIG), | 4887 | SND_PCI_QUIRK(0x1043, 0x10c2, "ASUS W6A", ALC880_ASUS_DIG), |
4889 | SND_PCI_QUIRK(0x1043, 0x10c3, "ASUS Wxx", ALC880_ASUS_DIG), | 4888 | SND_PCI_QUIRK(0x1043, 0x10c3, "ASUS Wxx", ALC880_ASUS_DIG), |
@@ -12600,6 +12599,7 @@ static const struct hda_verb alc262_toshiba_rx1_unsol_verbs[] = { | |||
12600 | */ | 12599 | */ |
12601 | enum { | 12600 | enum { |
12602 | PINFIX_FSC_H270, | 12601 | PINFIX_FSC_H270, |
12602 | PINFIX_HP_Z200, | ||
12603 | }; | 12603 | }; |
12604 | 12604 | ||
12605 | static const struct alc_fixup alc262_fixups[] = { | 12605 | static const struct alc_fixup alc262_fixups[] = { |
@@ -12612,9 +12612,17 @@ static const struct alc_fixup alc262_fixups[] = { | |||
12612 | { } | 12612 | { } |
12613 | } | 12613 | } |
12614 | }, | 12614 | }, |
12615 | [PINFIX_HP_Z200] = { | ||
12616 | .type = ALC_FIXUP_PINS, | ||
12617 | .v.pins = (const struct alc_pincfg[]) { | ||
12618 | { 0x16, 0x99130120 }, /* internal speaker */ | ||
12619 | { } | ||
12620 | } | ||
12621 | }, | ||
12615 | }; | 12622 | }; |
12616 | 12623 | ||
12617 | static const struct snd_pci_quirk alc262_fixup_tbl[] = { | 12624 | static const struct snd_pci_quirk alc262_fixup_tbl[] = { |
12625 | SND_PCI_QUIRK(0x103c, 0x170b, "HP Z200", PINFIX_HP_Z200), | ||
12618 | SND_PCI_QUIRK(0x1734, 0x1147, "FSC Celsius H270", PINFIX_FSC_H270), | 12626 | SND_PCI_QUIRK(0x1734, 0x1147, "FSC Celsius H270", PINFIX_FSC_H270), |
12619 | {} | 12627 | {} |
12620 | }; | 12628 | }; |
@@ -12731,6 +12739,8 @@ static const struct snd_pci_quirk alc262_cfg_tbl[] = { | |||
12731 | ALC262_HP_BPC), | 12739 | ALC262_HP_BPC), |
12732 | SND_PCI_QUIRK_MASK(0x103c, 0xff00, 0x1500, "HP z series", | 12740 | SND_PCI_QUIRK_MASK(0x103c, 0xff00, 0x1500, "HP z series", |
12733 | ALC262_HP_BPC), | 12741 | ALC262_HP_BPC), |
12742 | SND_PCI_QUIRK(0x103c, 0x170b, "HP Z200", | ||
12743 | ALC262_AUTO), | ||
12734 | SND_PCI_QUIRK_MASK(0x103c, 0xff00, 0x1700, "HP xw series", | 12744 | SND_PCI_QUIRK_MASK(0x103c, 0xff00, 0x1700, "HP xw series", |
12735 | ALC262_HP_BPC), | 12745 | ALC262_HP_BPC), |
12736 | SND_PCI_QUIRK(0x103c, 0x2800, "HP D7000", ALC262_HP_BPC_D7000_WL), | 12746 | SND_PCI_QUIRK(0x103c, 0x2800, "HP D7000", ALC262_HP_BPC_D7000_WL), |
@@ -13872,7 +13882,6 @@ static const struct snd_pci_quirk alc268_cfg_tbl[] = { | |||
13872 | SND_PCI_QUIRK(0x1043, 0x1205, "ASUS W7J", ALC268_3ST), | 13882 | SND_PCI_QUIRK(0x1043, 0x1205, "ASUS W7J", ALC268_3ST), |
13873 | SND_PCI_QUIRK(0x1170, 0x0040, "ZEPTO", ALC268_ZEPTO), | 13883 | SND_PCI_QUIRK(0x1170, 0x0040, "ZEPTO", ALC268_ZEPTO), |
13874 | SND_PCI_QUIRK(0x14c0, 0x0025, "COMPAL IFL90/JFL-92", ALC268_TOSHIBA), | 13884 | SND_PCI_QUIRK(0x14c0, 0x0025, "COMPAL IFL90/JFL-92", ALC268_TOSHIBA), |
13875 | SND_PCI_QUIRK(0x152d, 0x0763, "Diverse (CPR2000)", ALC268_ACER), | ||
13876 | SND_PCI_QUIRK(0x152d, 0x0771, "Quanta IL1", ALC267_QUANTA_IL1), | 13885 | SND_PCI_QUIRK(0x152d, 0x0771, "Quanta IL1", ALC267_QUANTA_IL1), |
13877 | {} | 13886 | {} |
13878 | }; | 13887 | }; |
diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c index c952582fb21..f43bb0eaed8 100644 --- a/sound/pci/hda/patch_via.c +++ b/sound/pci/hda/patch_via.c | |||
@@ -745,12 +745,23 @@ static int via_independent_hp_put(struct snd_kcontrol *kcontrol, | |||
745 | struct via_spec *spec = codec->spec; | 745 | struct via_spec *spec = codec->spec; |
746 | hda_nid_t nid = kcontrol->private_value; | 746 | hda_nid_t nid = kcontrol->private_value; |
747 | unsigned int pinsel = ucontrol->value.enumerated.item[0]; | 747 | unsigned int pinsel = ucontrol->value.enumerated.item[0]; |
748 | unsigned int parm0, parm1; | ||
748 | /* Get Independent Mode index of headphone pin widget */ | 749 | /* Get Independent Mode index of headphone pin widget */ |
749 | spec->hp_independent_mode = spec->hp_independent_mode_index == pinsel | 750 | spec->hp_independent_mode = spec->hp_independent_mode_index == pinsel |
750 | ? 1 : 0; | 751 | ? 1 : 0; |
751 | if (spec->codec_type == VT1718S) | 752 | if (spec->codec_type == VT1718S) { |
752 | snd_hda_codec_write(codec, nid, 0, | 753 | snd_hda_codec_write(codec, nid, 0, |
753 | AC_VERB_SET_CONNECT_SEL, pinsel ? 2 : 0); | 754 | AC_VERB_SET_CONNECT_SEL, pinsel ? 2 : 0); |
755 | /* Set correct mute switch for MW3 */ | ||
756 | parm0 = spec->hp_independent_mode ? | ||
757 | AMP_IN_UNMUTE(0) : AMP_IN_MUTE(0); | ||
758 | parm1 = spec->hp_independent_mode ? | ||
759 | AMP_IN_MUTE(1) : AMP_IN_UNMUTE(1); | ||
760 | snd_hda_codec_write(codec, 0x1b, 0, | ||
761 | AC_VERB_SET_AMP_GAIN_MUTE, parm0); | ||
762 | snd_hda_codec_write(codec, 0x1b, 0, | ||
763 | AC_VERB_SET_AMP_GAIN_MUTE, parm1); | ||
764 | } | ||
754 | else | 765 | else |
755 | snd_hda_codec_write(codec, nid, 0, | 766 | snd_hda_codec_write(codec, nid, 0, |
756 | AC_VERB_SET_CONNECT_SEL, pinsel); | 767 | AC_VERB_SET_CONNECT_SEL, pinsel); |
@@ -4283,9 +4294,6 @@ static const struct hda_verb vt1718S_volume_init_verbs[] = { | |||
4283 | {0x21, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)}, | 4294 | {0x21, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)}, |
4284 | {0x21, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)}, | 4295 | {0x21, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)}, |
4285 | {0x21, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(5)}, | 4296 | {0x21, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(5)}, |
4286 | |||
4287 | /* Setup default input of Front HP to MW9 */ | ||
4288 | {0x28, AC_VERB_SET_CONNECT_SEL, 0x1}, | ||
4289 | /* PW9 PW10 Output enable */ | 4297 | /* PW9 PW10 Output enable */ |
4290 | {0x2d, AC_VERB_SET_PIN_WIDGET_CONTROL, AC_PINCTL_OUT_EN}, | 4298 | {0x2d, AC_VERB_SET_PIN_WIDGET_CONTROL, AC_PINCTL_OUT_EN}, |
4291 | {0x2e, AC_VERB_SET_PIN_WIDGET_CONTROL, AC_PINCTL_OUT_EN}, | 4299 | {0x2e, AC_VERB_SET_PIN_WIDGET_CONTROL, AC_PINCTL_OUT_EN}, |
@@ -4294,10 +4302,10 @@ static const struct hda_verb vt1718S_volume_init_verbs[] = { | |||
4294 | /* Enable Boost Volume backdoor */ | 4302 | /* Enable Boost Volume backdoor */ |
4295 | {0x1, 0xf88, 0x8}, | 4303 | {0x1, 0xf88, 0x8}, |
4296 | /* MW0/1/2/3/4: un-mute index 0 (AOWx), mute index 1 (MW9) */ | 4304 | /* MW0/1/2/3/4: un-mute index 0 (AOWx), mute index 1 (MW9) */ |
4297 | {0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)}, | 4305 | {0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)}, |
4298 | {0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)}, | 4306 | {0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)}, |
4299 | {0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)}, | 4307 | {0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)}, |
4300 | {0x1b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)}, | 4308 | {0x1b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)}, |
4301 | {0x1c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)}, | 4309 | {0x1c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)}, |
4302 | {0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)}, | 4310 | {0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)}, |
4303 | {0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)}, | 4311 | {0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)}, |
@@ -4307,8 +4315,6 @@ static const struct hda_verb vt1718S_volume_init_verbs[] = { | |||
4307 | /* set MUX1 = 2 (AOW4), MUX2 = 1 (AOW3) */ | 4315 | /* set MUX1 = 2 (AOW4), MUX2 = 1 (AOW3) */ |
4308 | {0x34, AC_VERB_SET_CONNECT_SEL, 0x2}, | 4316 | {0x34, AC_VERB_SET_CONNECT_SEL, 0x2}, |
4309 | {0x35, AC_VERB_SET_CONNECT_SEL, 0x1}, | 4317 | {0x35, AC_VERB_SET_CONNECT_SEL, 0x1}, |
4310 | /* Unmute MW4's index 0 */ | ||
4311 | {0x1c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)}, | ||
4312 | { } | 4318 | { } |
4313 | }; | 4319 | }; |
4314 | 4320 | ||
@@ -4456,6 +4462,19 @@ static int vt1718S_auto_create_multi_out_ctls(struct via_spec *spec, | |||
4456 | if (err < 0) | 4462 | if (err < 0) |
4457 | return err; | 4463 | return err; |
4458 | } else if (i == AUTO_SEQ_FRONT) { | 4464 | } else if (i == AUTO_SEQ_FRONT) { |
4465 | /* add control to mixer index 0 */ | ||
4466 | err = via_add_control(spec, VIA_CTL_WIDGET_VOL, | ||
4467 | "Master Front Playback Volume", | ||
4468 | HDA_COMPOSE_AMP_VAL(0x21, 3, 5, | ||
4469 | HDA_INPUT)); | ||
4470 | if (err < 0) | ||
4471 | return err; | ||
4472 | err = via_add_control(spec, VIA_CTL_WIDGET_MUTE, | ||
4473 | "Master Front Playback Switch", | ||
4474 | HDA_COMPOSE_AMP_VAL(0x21, 3, 5, | ||
4475 | HDA_INPUT)); | ||
4476 | if (err < 0) | ||
4477 | return err; | ||
4459 | /* Front */ | 4478 | /* Front */ |
4460 | sprintf(name, "%s Playback Volume", chname[i]); | 4479 | sprintf(name, "%s Playback Volume", chname[i]); |
4461 | err = via_add_control( | 4480 | err = via_add_control( |
diff --git a/sound/soc/codecs/wm8991.c b/sound/soc/codecs/wm8991.c index 3c2ee1bb73c..6af23d06870 100644 --- a/sound/soc/codecs/wm8991.c +++ b/sound/soc/codecs/wm8991.c | |||
@@ -13,7 +13,6 @@ | |||
13 | 13 | ||
14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
15 | #include <linux/moduleparam.h> | 15 | #include <linux/moduleparam.h> |
16 | #include <linux/version.h> | ||
17 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
18 | #include <linux/init.h> | 17 | #include <linux/init.h> |
19 | #include <linux/delay.h> | 18 | #include <linux/delay.h> |
diff --git a/sound/soc/imx/Kconfig b/sound/soc/imx/Kconfig index d8f130d39dd..bb699bb55a5 100644 --- a/sound/soc/imx/Kconfig +++ b/sound/soc/imx/Kconfig | |||
@@ -11,9 +11,6 @@ menuconfig SND_IMX_SOC | |||
11 | 11 | ||
12 | if SND_IMX_SOC | 12 | if SND_IMX_SOC |
13 | 13 | ||
14 | config SND_MXC_SOC_SSI | ||
15 | tristate | ||
16 | |||
17 | config SND_MXC_SOC_FIQ | 14 | config SND_MXC_SOC_FIQ |
18 | tristate | 15 | tristate |
19 | 16 | ||
@@ -24,7 +21,6 @@ config SND_MXC_SOC_WM1133_EV1 | |||
24 | tristate "Audio on the the i.MX31ADS with WM1133-EV1 fitted" | 21 | tristate "Audio on the the i.MX31ADS with WM1133-EV1 fitted" |
25 | depends on MACH_MX31ADS_WM1133_EV1 && EXPERIMENTAL | 22 | depends on MACH_MX31ADS_WM1133_EV1 && EXPERIMENTAL |
26 | select SND_SOC_WM8350 | 23 | select SND_SOC_WM8350 |
27 | select SND_MXC_SOC_SSI | ||
28 | select SND_MXC_SOC_FIQ | 24 | select SND_MXC_SOC_FIQ |
29 | help | 25 | help |
30 | Enable support for audio on the i.MX31ADS with the WM1133-EV1 | 26 | Enable support for audio on the i.MX31ADS with the WM1133-EV1 |
@@ -34,7 +30,6 @@ config SND_SOC_MX27VIS_AIC32X4 | |||
34 | tristate "SoC audio support for Visstrim M10 boards" | 30 | tristate "SoC audio support for Visstrim M10 boards" |
35 | depends on MACH_IMX27_VISSTRIM_M10 | 31 | depends on MACH_IMX27_VISSTRIM_M10 |
36 | select SND_SOC_TVL320AIC32X4 | 32 | select SND_SOC_TVL320AIC32X4 |
37 | select SND_MXC_SOC_SSI | ||
38 | select SND_MXC_SOC_MX2 | 33 | select SND_MXC_SOC_MX2 |
39 | help | 34 | help |
40 | Say Y if you want to add support for SoC audio on Visstrim SM10 | 35 | Say Y if you want to add support for SoC audio on Visstrim SM10 |
@@ -44,7 +39,6 @@ config SND_SOC_PHYCORE_AC97 | |||
44 | tristate "SoC Audio support for Phytec phyCORE (and phyCARD) boards" | 39 | tristate "SoC Audio support for Phytec phyCORE (and phyCARD) boards" |
45 | depends on MACH_PCM043 || MACH_PCA100 | 40 | depends on MACH_PCM043 || MACH_PCA100 |
46 | select SND_SOC_WM9712 | 41 | select SND_SOC_WM9712 |
47 | select SND_MXC_SOC_SSI | ||
48 | select SND_MXC_SOC_FIQ | 42 | select SND_MXC_SOC_FIQ |
49 | help | 43 | help |
50 | Say Y if you want to add support for SoC audio on Phytec phyCORE | 44 | Say Y if you want to add support for SoC audio on Phytec phyCORE |
@@ -57,7 +51,6 @@ config SND_SOC_EUKREA_TLV320 | |||
57 | || MACH_EUKREA_MBIMXSD35_BASEBOARD \ | 51 | || MACH_EUKREA_MBIMXSD35_BASEBOARD \ |
58 | || MACH_EUKREA_MBIMXSD51_BASEBOARD | 52 | || MACH_EUKREA_MBIMXSD51_BASEBOARD |
59 | select SND_SOC_TLV320AIC23 | 53 | select SND_SOC_TLV320AIC23 |
60 | select SND_MXC_SOC_SSI | ||
61 | select SND_MXC_SOC_FIQ | 54 | select SND_MXC_SOC_FIQ |
62 | help | 55 | help |
63 | Enable I2S based access to the TLV320AIC23B codec attached | 56 | Enable I2S based access to the TLV320AIC23B codec attached |
diff --git a/sound/soc/imx/imx-pcm-dma-mx2.c b/sound/soc/imx/imx-pcm-dma-mx2.c index aab7765f401..4173b3d87f9 100644 --- a/sound/soc/imx/imx-pcm-dma-mx2.c +++ b/sound/soc/imx/imx-pcm-dma-mx2.c | |||
@@ -337,3 +337,5 @@ static void __exit snd_imx_pcm_exit(void) | |||
337 | platform_driver_unregister(&imx_pcm_driver); | 337 | platform_driver_unregister(&imx_pcm_driver); |
338 | } | 338 | } |
339 | module_exit(snd_imx_pcm_exit); | 339 | module_exit(snd_imx_pcm_exit); |
340 | MODULE_LICENSE("GPL"); | ||
341 | MODULE_ALIAS("platform:imx-pcm-audio"); | ||
diff --git a/sound/soc/imx/imx-ssi.c b/sound/soc/imx/imx-ssi.c index 5b13feca753..61fceb09cdb 100644 --- a/sound/soc/imx/imx-ssi.c +++ b/sound/soc/imx/imx-ssi.c | |||
@@ -774,4 +774,4 @@ module_exit(imx_ssi_exit); | |||
774 | MODULE_AUTHOR("Sascha Hauer, <s.hauer@pengutronix.de>"); | 774 | MODULE_AUTHOR("Sascha Hauer, <s.hauer@pengutronix.de>"); |
775 | MODULE_DESCRIPTION("i.MX I2S/ac97 SoC Interface"); | 775 | MODULE_DESCRIPTION("i.MX I2S/ac97 SoC Interface"); |
776 | MODULE_LICENSE("GPL"); | 776 | MODULE_LICENSE("GPL"); |
777 | 777 | MODULE_ALIAS("platform:imx-ssi"); | |
diff --git a/sound/soc/pxa/pxa2xx-pcm.c b/sound/soc/pxa/pxa2xx-pcm.c index 2ce0b2d891d..fab20a54e86 100644 --- a/sound/soc/pxa/pxa2xx-pcm.c +++ b/sound/soc/pxa/pxa2xx-pcm.c | |||
@@ -95,14 +95,14 @@ static int pxa2xx_soc_pcm_new(struct snd_card *card, struct snd_soc_dai *dai, | |||
95 | if (!card->dev->coherent_dma_mask) | 95 | if (!card->dev->coherent_dma_mask) |
96 | card->dev->coherent_dma_mask = DMA_BIT_MASK(32); | 96 | card->dev->coherent_dma_mask = DMA_BIT_MASK(32); |
97 | 97 | ||
98 | if (dai->driver->playback.channels_min) { | 98 | if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) { |
99 | ret = pxa2xx_pcm_preallocate_dma_buffer(pcm, | 99 | ret = pxa2xx_pcm_preallocate_dma_buffer(pcm, |
100 | SNDRV_PCM_STREAM_PLAYBACK); | 100 | SNDRV_PCM_STREAM_PLAYBACK); |
101 | if (ret) | 101 | if (ret) |
102 | goto out; | 102 | goto out; |
103 | } | 103 | } |
104 | 104 | ||
105 | if (dai->driver->capture.channels_min) { | 105 | if (pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream) { |
106 | ret = pxa2xx_pcm_preallocate_dma_buffer(pcm, | 106 | ret = pxa2xx_pcm_preallocate_dma_buffer(pcm, |
107 | SNDRV_PCM_STREAM_CAPTURE); | 107 | SNDRV_PCM_STREAM_CAPTURE); |
108 | if (ret) | 108 | if (ret) |
diff --git a/sound/soc/soc-cache.c b/sound/soc/soc-cache.c index c005ceb70c9..039b9532b27 100644 --- a/sound/soc/soc-cache.c +++ b/sound/soc/soc-cache.c | |||
@@ -409,9 +409,6 @@ int snd_soc_codec_set_cache_io(struct snd_soc_codec *codec, | |||
409 | codec->bulk_write_raw = snd_soc_hw_bulk_write_raw; | 409 | codec->bulk_write_raw = snd_soc_hw_bulk_write_raw; |
410 | 410 | ||
411 | switch (control) { | 411 | switch (control) { |
412 | case SND_SOC_CUSTOM: | ||
413 | break; | ||
414 | |||
415 | case SND_SOC_I2C: | 412 | case SND_SOC_I2C: |
416 | #if defined(CONFIG_I2C) || (defined(CONFIG_I2C_MODULE) && defined(MODULE)) | 413 | #if defined(CONFIG_I2C) || (defined(CONFIG_I2C_MODULE) && defined(MODULE)) |
417 | codec->hw_write = (hw_write_t)i2c_master_send; | 414 | codec->hw_write = (hw_write_t)i2c_master_send; |
diff --git a/tools/perf/Documentation/perf-annotate.txt b/tools/perf/Documentation/perf-annotate.txt index 6f5a498608b..85c5f026930 100644 --- a/tools/perf/Documentation/perf-annotate.txt +++ b/tools/perf/Documentation/perf-annotate.txt | |||
@@ -66,6 +66,12 @@ OPTIONS | |||
66 | used. This interfaces starts by centering on the line with more | 66 | used. This interfaces starts by centering on the line with more |
67 | samples, TAB/UNTAB cycles through the lines with more samples. | 67 | samples, TAB/UNTAB cycles through the lines with more samples. |
68 | 68 | ||
69 | -c:: | ||
70 | --cpu:: Only report samples for the list of CPUs provided. Multiple CPUs can | ||
71 | be provided as a comma-separated list with no space: 0,1. Ranges of | ||
72 | CPUs are specified with -: 0-2. Default is to report samples on all | ||
73 | CPUs. | ||
74 | |||
69 | SEE ALSO | 75 | SEE ALSO |
70 | -------- | 76 | -------- |
71 | linkperf:perf-record[1], linkperf:perf-report[1] | 77 | linkperf:perf-record[1], linkperf:perf-report[1] |
diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt index 8ba03d6e539..04253c07d19 100644 --- a/tools/perf/Documentation/perf-report.txt +++ b/tools/perf/Documentation/perf-report.txt | |||
@@ -80,15 +80,24 @@ OPTIONS | |||
80 | --dump-raw-trace:: | 80 | --dump-raw-trace:: |
81 | Dump raw trace in ASCII. | 81 | Dump raw trace in ASCII. |
82 | 82 | ||
83 | -g [type,min]:: | 83 | -g [type,min,order]:: |
84 | --call-graph:: | 84 | --call-graph:: |
85 | Display call chains using type and min percent threshold. | 85 | Display call chains using type, min percent threshold and order. |
86 | type can be either: | 86 | type can be either: |
87 | - flat: single column, linear exposure of call chains. | 87 | - flat: single column, linear exposure of call chains. |
88 | - graph: use a graph tree, displaying absolute overhead rates. | 88 | - graph: use a graph tree, displaying absolute overhead rates. |
89 | - fractal: like graph, but displays relative rates. Each branch of | 89 | - fractal: like graph, but displays relative rates. Each branch of |
90 | the tree is considered as a new profiled object. + | 90 | the tree is considered as a new profiled object. + |
91 | Default: fractal,0.5. | 91 | |
92 | order can be either: | ||
93 | - callee: callee based call graph. | ||
94 | - caller: inverted caller based call graph. | ||
95 | |||
96 | Default: fractal,0.5,callee. | ||
97 | |||
98 | -G:: | ||
99 | --inverted:: | ||
100 | alias for inverted caller based call graph. | ||
92 | 101 | ||
93 | --pretty=<key>:: | 102 | --pretty=<key>:: |
94 | Pretty printing style. key: normal, raw | 103 | Pretty printing style. key: normal, raw |
@@ -119,6 +128,12 @@ OPTIONS | |||
119 | --symfs=<directory>:: | 128 | --symfs=<directory>:: |
120 | Look for files with symbols relative to this directory. | 129 | Look for files with symbols relative to this directory. |
121 | 130 | ||
131 | -c:: | ||
132 | --cpu:: Only report samples for the list of CPUs provided. Multiple CPUs can | ||
133 | be provided as a comma-separated list with no space: 0,1. Ranges of | ||
134 | CPUs are specified with -: 0-2. Default is to report samples on all | ||
135 | CPUs. | ||
136 | |||
122 | SEE ALSO | 137 | SEE ALSO |
123 | -------- | 138 | -------- |
124 | linkperf:perf-stat[1] | 139 | linkperf:perf-stat[1] |
diff --git a/tools/perf/Documentation/perf-script.txt b/tools/perf/Documentation/perf-script.txt index 86c87e214b1..db017867d9e 100644 --- a/tools/perf/Documentation/perf-script.txt +++ b/tools/perf/Documentation/perf-script.txt | |||
@@ -115,10 +115,10 @@ OPTIONS | |||
115 | -f:: | 115 | -f:: |
116 | --fields:: | 116 | --fields:: |
117 | Comma separated list of fields to print. Options are: | 117 | Comma separated list of fields to print. Options are: |
118 | comm, tid, pid, time, cpu, event, trace, sym. Field | 118 | comm, tid, pid, time, cpu, event, trace, ip, sym, dso, addr. |
119 | list can be prepended with the type, trace, sw or hw, | 119 | Field list can be prepended with the type, trace, sw or hw, |
120 | to indicate to which event type the field list applies. | 120 | to indicate to which event type the field list applies. |
121 | e.g., -f sw:comm,tid,time,sym and -f trace:time,cpu,trace | 121 | e.g., -f sw:comm,tid,time,ip,sym and -f trace:time,cpu,trace |
122 | 122 | ||
123 | perf script -f <fields> | 123 | perf script -f <fields> |
124 | 124 | ||
@@ -132,17 +132,17 @@ OPTIONS | |||
132 | The arguments are processed in the order received. A later usage can | 132 | The arguments are processed in the order received. A later usage can |
133 | reset a prior request. e.g.: | 133 | reset a prior request. e.g.: |
134 | 134 | ||
135 | -f trace: -f comm,tid,time,sym | 135 | -f trace: -f comm,tid,time,ip,sym |
136 | 136 | ||
137 | The first -f suppresses trace events (field list is ""), but then the | 137 | The first -f suppresses trace events (field list is ""), but then the |
138 | second invocation sets the fields to comm,tid,time,sym. In this case a | 138 | second invocation sets the fields to comm,tid,time,ip,sym. In this case a |
139 | warning is given to the user: | 139 | warning is given to the user: |
140 | 140 | ||
141 | "Overriding previous field request for all events." | 141 | "Overriding previous field request for all events." |
142 | 142 | ||
143 | Alternativey, consider the order: | 143 | Alternativey, consider the order: |
144 | 144 | ||
145 | -f comm,tid,time,sym -f trace: | 145 | -f comm,tid,time,ip,sym -f trace: |
146 | 146 | ||
147 | The first -f sets the fields for all events and the second -f | 147 | The first -f sets the fields for all events and the second -f |
148 | suppresses trace events. The user is given a warning message about | 148 | suppresses trace events. The user is given a warning message about |
@@ -182,6 +182,12 @@ OPTIONS | |||
182 | --hide-call-graph:: | 182 | --hide-call-graph:: |
183 | When printing symbols do not display call chain. | 183 | When printing symbols do not display call chain. |
184 | 184 | ||
185 | -c:: | ||
186 | --cpu:: Only report samples for the list of CPUs provided. Multiple CPUs can | ||
187 | be provided as a comma-separated list with no space: 0,1. Ranges of | ||
188 | CPUs are specified with -: 0-2. Default is to report samples on all | ||
189 | CPUs. | ||
190 | |||
185 | SEE ALSO | 191 | SEE ALSO |
186 | -------- | 192 | -------- |
187 | linkperf:perf-record[1], linkperf:perf-script-perl[1], | 193 | linkperf:perf-record[1], linkperf:perf-script-perl[1], |
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index 7b139e1e7e8..555aefd7fe0 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c | |||
@@ -28,6 +28,8 @@ | |||
28 | #include "util/hist.h" | 28 | #include "util/hist.h" |
29 | #include "util/session.h" | 29 | #include "util/session.h" |
30 | 30 | ||
31 | #include <linux/bitmap.h> | ||
32 | |||
31 | static char const *input_name = "perf.data"; | 33 | static char const *input_name = "perf.data"; |
32 | 34 | ||
33 | static bool force, use_tui, use_stdio; | 35 | static bool force, use_tui, use_stdio; |
@@ -38,6 +40,9 @@ static bool print_line; | |||
38 | 40 | ||
39 | static const char *sym_hist_filter; | 41 | static const char *sym_hist_filter; |
40 | 42 | ||
43 | static const char *cpu_list; | ||
44 | static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS); | ||
45 | |||
41 | static int perf_evlist__add_sample(struct perf_evlist *evlist, | 46 | static int perf_evlist__add_sample(struct perf_evlist *evlist, |
42 | struct perf_sample *sample, | 47 | struct perf_sample *sample, |
43 | struct perf_evsel *evsel, | 48 | struct perf_evsel *evsel, |
@@ -90,6 +95,9 @@ static int process_sample_event(union perf_event *event, | |||
90 | return -1; | 95 | return -1; |
91 | } | 96 | } |
92 | 97 | ||
98 | if (cpu_list && !test_bit(sample->cpu, cpu_bitmap)) | ||
99 | return 0; | ||
100 | |||
93 | if (!al.filtered && | 101 | if (!al.filtered && |
94 | perf_evlist__add_sample(session->evlist, sample, evsel, &al)) { | 102 | perf_evlist__add_sample(session->evlist, sample, evsel, &al)) { |
95 | pr_warning("problem incrementing symbol count, " | 103 | pr_warning("problem incrementing symbol count, " |
@@ -177,6 +185,12 @@ static int __cmd_annotate(void) | |||
177 | if (session == NULL) | 185 | if (session == NULL) |
178 | return -ENOMEM; | 186 | return -ENOMEM; |
179 | 187 | ||
188 | if (cpu_list) { | ||
189 | ret = perf_session__cpu_bitmap(session, cpu_list, cpu_bitmap); | ||
190 | if (ret) | ||
191 | goto out_delete; | ||
192 | } | ||
193 | |||
180 | ret = perf_session__process_events(session, &event_ops); | 194 | ret = perf_session__process_events(session, &event_ops); |
181 | if (ret) | 195 | if (ret) |
182 | goto out_delete; | 196 | goto out_delete; |
@@ -252,6 +266,7 @@ static const struct option options[] = { | |||
252 | "print matching source lines (may be slow)"), | 266 | "print matching source lines (may be slow)"), |
253 | OPT_BOOLEAN('P', "full-paths", &full_paths, | 267 | OPT_BOOLEAN('P', "full-paths", &full_paths, |
254 | "Don't shorten the displayed pathnames"), | 268 | "Don't shorten the displayed pathnames"), |
269 | OPT_STRING('c', "cpu", &cpu_list, "cpu", "list of cpus to profile"), | ||
255 | OPT_END() | 270 | OPT_END() |
256 | }; | 271 | }; |
257 | 272 | ||
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 287a173523a..f854efda768 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c | |||
@@ -33,6 +33,8 @@ | |||
33 | #include "util/sort.h" | 33 | #include "util/sort.h" |
34 | #include "util/hist.h" | 34 | #include "util/hist.h" |
35 | 35 | ||
36 | #include <linux/bitmap.h> | ||
37 | |||
36 | static char const *input_name = "perf.data"; | 38 | static char const *input_name = "perf.data"; |
37 | 39 | ||
38 | static bool force, use_tui, use_stdio; | 40 | static bool force, use_tui, use_stdio; |
@@ -45,9 +47,13 @@ static struct perf_read_values show_threads_values; | |||
45 | static const char default_pretty_printing_style[] = "normal"; | 47 | static const char default_pretty_printing_style[] = "normal"; |
46 | static const char *pretty_printing_style = default_pretty_printing_style; | 48 | static const char *pretty_printing_style = default_pretty_printing_style; |
47 | 49 | ||
48 | static char callchain_default_opt[] = "fractal,0.5"; | 50 | static char callchain_default_opt[] = "fractal,0.5,callee"; |
51 | static bool inverted_callchain; | ||
49 | static symbol_filter_t annotate_init; | 52 | static symbol_filter_t annotate_init; |
50 | 53 | ||
54 | static const char *cpu_list; | ||
55 | static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS); | ||
56 | |||
51 | static int perf_session__add_hist_entry(struct perf_session *session, | 57 | static int perf_session__add_hist_entry(struct perf_session *session, |
52 | struct addr_location *al, | 58 | struct addr_location *al, |
53 | struct perf_sample *sample, | 59 | struct perf_sample *sample, |
@@ -116,6 +122,9 @@ static int process_sample_event(union perf_event *event, | |||
116 | if (al.filtered || (hide_unresolved && al.sym == NULL)) | 122 | if (al.filtered || (hide_unresolved && al.sym == NULL)) |
117 | return 0; | 123 | return 0; |
118 | 124 | ||
125 | if (cpu_list && !test_bit(sample->cpu, cpu_bitmap)) | ||
126 | return 0; | ||
127 | |||
119 | if (al.map != NULL) | 128 | if (al.map != NULL) |
120 | al.map->dso->hit = 1; | 129 | al.map->dso->hit = 1; |
121 | 130 | ||
@@ -262,6 +271,12 @@ static int __cmd_report(void) | |||
262 | if (session == NULL) | 271 | if (session == NULL) |
263 | return -ENOMEM; | 272 | return -ENOMEM; |
264 | 273 | ||
274 | if (cpu_list) { | ||
275 | ret = perf_session__cpu_bitmap(session, cpu_list, cpu_bitmap); | ||
276 | if (ret) | ||
277 | goto out_delete; | ||
278 | } | ||
279 | |||
265 | if (show_threads) | 280 | if (show_threads) |
266 | perf_read_values_init(&show_threads_values); | 281 | perf_read_values_init(&show_threads_values); |
267 | 282 | ||
@@ -386,13 +401,29 @@ parse_callchain_opt(const struct option *opt __used, const char *arg, | |||
386 | if (!tok) | 401 | if (!tok) |
387 | goto setup; | 402 | goto setup; |
388 | 403 | ||
389 | tok2 = strtok(NULL, ","); | ||
390 | callchain_param.min_percent = strtod(tok, &endptr); | 404 | callchain_param.min_percent = strtod(tok, &endptr); |
391 | if (tok == endptr) | 405 | if (tok == endptr) |
392 | return -1; | 406 | return -1; |
393 | 407 | ||
394 | if (tok2) | 408 | /* get the print limit */ |
409 | tok2 = strtok(NULL, ","); | ||
410 | if (!tok2) | ||
411 | goto setup; | ||
412 | |||
413 | if (tok2[0] != 'c') { | ||
395 | callchain_param.print_limit = strtod(tok2, &endptr); | 414 | callchain_param.print_limit = strtod(tok2, &endptr); |
415 | tok2 = strtok(NULL, ","); | ||
416 | if (!tok2) | ||
417 | goto setup; | ||
418 | } | ||
419 | |||
420 | /* get the call chain order */ | ||
421 | if (!strcmp(tok2, "caller")) | ||
422 | callchain_param.order = ORDER_CALLER; | ||
423 | else if (!strcmp(tok2, "callee")) | ||
424 | callchain_param.order = ORDER_CALLEE; | ||
425 | else | ||
426 | return -1; | ||
396 | setup: | 427 | setup: |
397 | if (callchain_register_param(&callchain_param) < 0) { | 428 | if (callchain_register_param(&callchain_param) < 0) { |
398 | fprintf(stderr, "Can't register callchain params\n"); | 429 | fprintf(stderr, "Can't register callchain params\n"); |
@@ -436,9 +467,10 @@ static const struct option options[] = { | |||
436 | "regex filter to identify parent, see: '--sort parent'"), | 467 | "regex filter to identify parent, see: '--sort parent'"), |
437 | OPT_BOOLEAN('x', "exclude-other", &symbol_conf.exclude_other, | 468 | OPT_BOOLEAN('x', "exclude-other", &symbol_conf.exclude_other, |
438 | "Only display entries with parent-match"), | 469 | "Only display entries with parent-match"), |
439 | OPT_CALLBACK_DEFAULT('g', "call-graph", NULL, "output_type,min_percent", | 470 | OPT_CALLBACK_DEFAULT('g', "call-graph", NULL, "output_type,min_percent, call_order", |
440 | "Display callchains using output_type (graph, flat, fractal, or none) and min percent threshold. " | 471 | "Display callchains using output_type (graph, flat, fractal, or none) , min percent threshold and callchain order. " |
441 | "Default: fractal,0.5", &parse_callchain_opt, callchain_default_opt), | 472 | "Default: fractal,0.5,callee", &parse_callchain_opt, callchain_default_opt), |
473 | OPT_BOOLEAN('G', "inverted", &inverted_callchain, "alias for inverted call graph"), | ||
442 | OPT_STRING('d', "dsos", &symbol_conf.dso_list_str, "dso[,dso...]", | 474 | OPT_STRING('d', "dsos", &symbol_conf.dso_list_str, "dso[,dso...]", |
443 | "only consider symbols in these dsos"), | 475 | "only consider symbols in these dsos"), |
444 | OPT_STRING('C', "comms", &symbol_conf.comm_list_str, "comm[,comm...]", | 476 | OPT_STRING('C', "comms", &symbol_conf.comm_list_str, "comm[,comm...]", |
@@ -455,6 +487,7 @@ static const struct option options[] = { | |||
455 | "Only display entries resolved to a symbol"), | 487 | "Only display entries resolved to a symbol"), |
456 | OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory", | 488 | OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory", |
457 | "Look for files with symbols relative to this directory"), | 489 | "Look for files with symbols relative to this directory"), |
490 | OPT_STRING('c', "cpu", &cpu_list, "cpu", "list of cpus to profile"), | ||
458 | OPT_END() | 491 | OPT_END() |
459 | }; | 492 | }; |
460 | 493 | ||
@@ -467,6 +500,9 @@ int cmd_report(int argc, const char **argv, const char *prefix __used) | |||
467 | else if (use_tui) | 500 | else if (use_tui) |
468 | use_browser = 1; | 501 | use_browser = 1; |
469 | 502 | ||
503 | if (inverted_callchain) | ||
504 | callchain_param.order = ORDER_CALLER; | ||
505 | |||
470 | if (strcmp(input_name, "-") != 0) | 506 | if (strcmp(input_name, "-") != 0) |
471 | setup_browser(true); | 507 | setup_browser(true); |
472 | else | 508 | else |
@@ -504,7 +540,14 @@ int cmd_report(int argc, const char **argv, const char *prefix __used) | |||
504 | if (parent_pattern != default_parent_pattern) { | 540 | if (parent_pattern != default_parent_pattern) { |
505 | if (sort_dimension__add("parent") < 0) | 541 | if (sort_dimension__add("parent") < 0) |
506 | return -1; | 542 | return -1; |
507 | sort_parent.elide = 1; | 543 | |
544 | /* | ||
545 | * Only show the parent fields if we explicitly | ||
546 | * sort that way. If we only use parent machinery | ||
547 | * for filtering, we don't want it. | ||
548 | */ | ||
549 | if (!strstr(sort_order, "parent")) | ||
550 | sort_parent.elide = 1; | ||
508 | } else | 551 | } else |
509 | symbol_conf.exclude_other = false; | 552 | symbol_conf.exclude_other = false; |
510 | 553 | ||
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c index 22747de7234..09024ec2ab2 100644 --- a/tools/perf/builtin-script.c +++ b/tools/perf/builtin-script.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include "util/util.h" | 13 | #include "util/util.h" |
14 | #include "util/evlist.h" | 14 | #include "util/evlist.h" |
15 | #include "util/evsel.h" | 15 | #include "util/evsel.h" |
16 | #include <linux/bitmap.h> | ||
16 | 17 | ||
17 | static char const *script_name; | 18 | static char const *script_name; |
18 | static char const *generate_script_lang; | 19 | static char const *generate_script_lang; |
@@ -21,6 +22,8 @@ static u64 last_timestamp; | |||
21 | static u64 nr_unordered; | 22 | static u64 nr_unordered; |
22 | extern const struct option record_options[]; | 23 | extern const struct option record_options[]; |
23 | static bool no_callchain; | 24 | static bool no_callchain; |
25 | static const char *cpu_list; | ||
26 | static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS); | ||
24 | 27 | ||
25 | enum perf_output_field { | 28 | enum perf_output_field { |
26 | PERF_OUTPUT_COMM = 1U << 0, | 29 | PERF_OUTPUT_COMM = 1U << 0, |
@@ -30,7 +33,10 @@ enum perf_output_field { | |||
30 | PERF_OUTPUT_CPU = 1U << 4, | 33 | PERF_OUTPUT_CPU = 1U << 4, |
31 | PERF_OUTPUT_EVNAME = 1U << 5, | 34 | PERF_OUTPUT_EVNAME = 1U << 5, |
32 | PERF_OUTPUT_TRACE = 1U << 6, | 35 | PERF_OUTPUT_TRACE = 1U << 6, |
33 | PERF_OUTPUT_SYM = 1U << 7, | 36 | PERF_OUTPUT_IP = 1U << 7, |
37 | PERF_OUTPUT_SYM = 1U << 8, | ||
38 | PERF_OUTPUT_DSO = 1U << 9, | ||
39 | PERF_OUTPUT_ADDR = 1U << 10, | ||
34 | }; | 40 | }; |
35 | 41 | ||
36 | struct output_option { | 42 | struct output_option { |
@@ -44,7 +50,10 @@ struct output_option { | |||
44 | {.str = "cpu", .field = PERF_OUTPUT_CPU}, | 50 | {.str = "cpu", .field = PERF_OUTPUT_CPU}, |
45 | {.str = "event", .field = PERF_OUTPUT_EVNAME}, | 51 | {.str = "event", .field = PERF_OUTPUT_EVNAME}, |
46 | {.str = "trace", .field = PERF_OUTPUT_TRACE}, | 52 | {.str = "trace", .field = PERF_OUTPUT_TRACE}, |
53 | {.str = "ip", .field = PERF_OUTPUT_IP}, | ||
47 | {.str = "sym", .field = PERF_OUTPUT_SYM}, | 54 | {.str = "sym", .field = PERF_OUTPUT_SYM}, |
55 | {.str = "dso", .field = PERF_OUTPUT_DSO}, | ||
56 | {.str = "addr", .field = PERF_OUTPUT_ADDR}, | ||
48 | }; | 57 | }; |
49 | 58 | ||
50 | /* default set to maintain compatibility with current format */ | 59 | /* default set to maintain compatibility with current format */ |
@@ -60,7 +69,8 @@ static struct { | |||
60 | 69 | ||
61 | .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID | | 70 | .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID | |
62 | PERF_OUTPUT_CPU | PERF_OUTPUT_TIME | | 71 | PERF_OUTPUT_CPU | PERF_OUTPUT_TIME | |
63 | PERF_OUTPUT_EVNAME | PERF_OUTPUT_SYM, | 72 | PERF_OUTPUT_EVNAME | PERF_OUTPUT_IP | |
73 | PERF_OUTPUT_SYM | PERF_OUTPUT_DSO, | ||
64 | 74 | ||
65 | .invalid_fields = PERF_OUTPUT_TRACE, | 75 | .invalid_fields = PERF_OUTPUT_TRACE, |
66 | }, | 76 | }, |
@@ -70,7 +80,8 @@ static struct { | |||
70 | 80 | ||
71 | .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID | | 81 | .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID | |
72 | PERF_OUTPUT_CPU | PERF_OUTPUT_TIME | | 82 | PERF_OUTPUT_CPU | PERF_OUTPUT_TIME | |
73 | PERF_OUTPUT_EVNAME | PERF_OUTPUT_SYM, | 83 | PERF_OUTPUT_EVNAME | PERF_OUTPUT_IP | |
84 | PERF_OUTPUT_SYM | PERF_OUTPUT_DSO, | ||
74 | 85 | ||
75 | .invalid_fields = PERF_OUTPUT_TRACE, | 86 | .invalid_fields = PERF_OUTPUT_TRACE, |
76 | }, | 87 | }, |
@@ -88,7 +99,8 @@ static struct { | |||
88 | 99 | ||
89 | .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID | | 100 | .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID | |
90 | PERF_OUTPUT_CPU | PERF_OUTPUT_TIME | | 101 | PERF_OUTPUT_CPU | PERF_OUTPUT_TIME | |
91 | PERF_OUTPUT_EVNAME | PERF_OUTPUT_SYM, | 102 | PERF_OUTPUT_EVNAME | PERF_OUTPUT_IP | |
103 | PERF_OUTPUT_SYM | PERF_OUTPUT_DSO, | ||
92 | 104 | ||
93 | .invalid_fields = PERF_OUTPUT_TRACE, | 105 | .invalid_fields = PERF_OUTPUT_TRACE, |
94 | }, | 106 | }, |
@@ -157,9 +169,9 @@ static int perf_evsel__check_attr(struct perf_evsel *evsel, | |||
157 | !perf_session__has_traces(session, "record -R")) | 169 | !perf_session__has_traces(session, "record -R")) |
158 | return -EINVAL; | 170 | return -EINVAL; |
159 | 171 | ||
160 | if (PRINT_FIELD(SYM)) { | 172 | if (PRINT_FIELD(IP)) { |
161 | if (perf_event_attr__check_stype(attr, PERF_SAMPLE_IP, "IP", | 173 | if (perf_event_attr__check_stype(attr, PERF_SAMPLE_IP, "IP", |
162 | PERF_OUTPUT_SYM)) | 174 | PERF_OUTPUT_IP)) |
163 | return -EINVAL; | 175 | return -EINVAL; |
164 | 176 | ||
165 | if (!no_callchain && | 177 | if (!no_callchain && |
@@ -167,6 +179,24 @@ static int perf_evsel__check_attr(struct perf_evsel *evsel, | |||
167 | symbol_conf.use_callchain = false; | 179 | symbol_conf.use_callchain = false; |
168 | } | 180 | } |
169 | 181 | ||
182 | if (PRINT_FIELD(ADDR) && | ||
183 | perf_event_attr__check_stype(attr, PERF_SAMPLE_ADDR, "ADDR", | ||
184 | PERF_OUTPUT_ADDR)) | ||
185 | return -EINVAL; | ||
186 | |||
187 | if (PRINT_FIELD(SYM) && !PRINT_FIELD(IP) && !PRINT_FIELD(ADDR)) { | ||
188 | pr_err("Display of symbols requested but neither sample IP nor " | ||
189 | "sample address\nis selected. Hence, no addresses to convert " | ||
190 | "to symbols.\n"); | ||
191 | return -EINVAL; | ||
192 | } | ||
193 | if (PRINT_FIELD(DSO) && !PRINT_FIELD(IP) && !PRINT_FIELD(ADDR)) { | ||
194 | pr_err("Display of DSO requested but neither sample IP nor " | ||
195 | "sample address\nis selected. Hence, no addresses to convert " | ||
196 | "to DSO.\n"); | ||
197 | return -EINVAL; | ||
198 | } | ||
199 | |||
170 | if ((PRINT_FIELD(PID) || PRINT_FIELD(TID)) && | 200 | if ((PRINT_FIELD(PID) || PRINT_FIELD(TID)) && |
171 | perf_event_attr__check_stype(attr, PERF_SAMPLE_TID, "TID", | 201 | perf_event_attr__check_stype(attr, PERF_SAMPLE_TID, "TID", |
172 | PERF_OUTPUT_TID|PERF_OUTPUT_PID)) | 202 | PERF_OUTPUT_TID|PERF_OUTPUT_PID)) |
@@ -230,7 +260,7 @@ static void print_sample_start(struct perf_sample *sample, | |||
230 | if (PRINT_FIELD(COMM)) { | 260 | if (PRINT_FIELD(COMM)) { |
231 | if (latency_format) | 261 | if (latency_format) |
232 | printf("%8.8s ", thread->comm); | 262 | printf("%8.8s ", thread->comm); |
233 | else if (PRINT_FIELD(SYM) && symbol_conf.use_callchain) | 263 | else if (PRINT_FIELD(IP) && symbol_conf.use_callchain) |
234 | printf("%s ", thread->comm); | 264 | printf("%s ", thread->comm); |
235 | else | 265 | else |
236 | printf("%16s ", thread->comm); | 266 | printf("%16s ", thread->comm); |
@@ -271,6 +301,63 @@ static void print_sample_start(struct perf_sample *sample, | |||
271 | } | 301 | } |
272 | } | 302 | } |
273 | 303 | ||
304 | static bool sample_addr_correlates_sym(struct perf_event_attr *attr) | ||
305 | { | ||
306 | if ((attr->type == PERF_TYPE_SOFTWARE) && | ||
307 | ((attr->config == PERF_COUNT_SW_PAGE_FAULTS) || | ||
308 | (attr->config == PERF_COUNT_SW_PAGE_FAULTS_MIN) || | ||
309 | (attr->config == PERF_COUNT_SW_PAGE_FAULTS_MAJ))) | ||
310 | return true; | ||
311 | |||
312 | return false; | ||
313 | } | ||
314 | |||
315 | static void print_sample_addr(union perf_event *event, | ||
316 | struct perf_sample *sample, | ||
317 | struct perf_session *session, | ||
318 | struct thread *thread, | ||
319 | struct perf_event_attr *attr) | ||
320 | { | ||
321 | struct addr_location al; | ||
322 | u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; | ||
323 | const char *symname, *dsoname; | ||
324 | |||
325 | printf("%16" PRIx64, sample->addr); | ||
326 | |||
327 | if (!sample_addr_correlates_sym(attr)) | ||
328 | return; | ||
329 | |||
330 | thread__find_addr_map(thread, session, cpumode, MAP__FUNCTION, | ||
331 | event->ip.pid, sample->addr, &al); | ||
332 | if (!al.map) | ||
333 | thread__find_addr_map(thread, session, cpumode, MAP__VARIABLE, | ||
334 | event->ip.pid, sample->addr, &al); | ||
335 | |||
336 | al.cpu = sample->cpu; | ||
337 | al.sym = NULL; | ||
338 | |||
339 | if (al.map) | ||
340 | al.sym = map__find_symbol(al.map, al.addr, NULL); | ||
341 | |||
342 | if (PRINT_FIELD(SYM)) { | ||
343 | if (al.sym && al.sym->name) | ||
344 | symname = al.sym->name; | ||
345 | else | ||
346 | symname = ""; | ||
347 | |||
348 | printf(" %16s", symname); | ||
349 | } | ||
350 | |||
351 | if (PRINT_FIELD(DSO)) { | ||
352 | if (al.map && al.map->dso && al.map->dso->name) | ||
353 | dsoname = al.map->dso->name; | ||
354 | else | ||
355 | dsoname = ""; | ||
356 | |||
357 | printf(" (%s)", dsoname); | ||
358 | } | ||
359 | } | ||
360 | |||
274 | static void process_event(union perf_event *event __unused, | 361 | static void process_event(union perf_event *event __unused, |
275 | struct perf_sample *sample, | 362 | struct perf_sample *sample, |
276 | struct perf_evsel *evsel, | 363 | struct perf_evsel *evsel, |
@@ -288,12 +375,16 @@ static void process_event(union perf_event *event __unused, | |||
288 | print_trace_event(sample->cpu, sample->raw_data, | 375 | print_trace_event(sample->cpu, sample->raw_data, |
289 | sample->raw_size); | 376 | sample->raw_size); |
290 | 377 | ||
291 | if (PRINT_FIELD(SYM)) { | 378 | if (PRINT_FIELD(ADDR)) |
379 | print_sample_addr(event, sample, session, thread, attr); | ||
380 | |||
381 | if (PRINT_FIELD(IP)) { | ||
292 | if (!symbol_conf.use_callchain) | 382 | if (!symbol_conf.use_callchain) |
293 | printf(" "); | 383 | printf(" "); |
294 | else | 384 | else |
295 | printf("\n"); | 385 | printf("\n"); |
296 | perf_session__print_symbols(event, sample, session); | 386 | perf_session__print_ip(event, sample, session, |
387 | PRINT_FIELD(SYM), PRINT_FIELD(DSO)); | ||
297 | } | 388 | } |
298 | 389 | ||
299 | printf("\n"); | 390 | printf("\n"); |
@@ -365,6 +456,10 @@ static int process_sample_event(union perf_event *event, | |||
365 | last_timestamp = sample->time; | 456 | last_timestamp = sample->time; |
366 | return 0; | 457 | return 0; |
367 | } | 458 | } |
459 | |||
460 | if (cpu_list && !test_bit(sample->cpu, cpu_bitmap)) | ||
461 | return 0; | ||
462 | |||
368 | scripting_ops->process_event(event, sample, evsel, session, thread); | 463 | scripting_ops->process_event(event, sample, evsel, session, thread); |
369 | 464 | ||
370 | session->hists.stats.total_period += sample->period; | 465 | session->hists.stats.total_period += sample->period; |
@@ -985,8 +1080,9 @@ static const struct option options[] = { | |||
985 | OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory", | 1080 | OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory", |
986 | "Look for files with symbols relative to this directory"), | 1081 | "Look for files with symbols relative to this directory"), |
987 | OPT_CALLBACK('f', "fields", NULL, "str", | 1082 | OPT_CALLBACK('f', "fields", NULL, "str", |
988 | "comma separated output fields prepend with 'type:'. Valid types: hw,sw,trace,raw. Fields: comm,tid,pid,time,cpu,event,trace,sym", | 1083 | "comma separated output fields prepend with 'type:'. Valid types: hw,sw,trace,raw. Fields: comm,tid,pid,time,cpu,event,trace,ip,sym,dso,addr", |
989 | parse_output_fields), | 1084 | parse_output_fields), |
1085 | OPT_STRING('c', "cpu", &cpu_list, "cpu", "list of cpus to profile"), | ||
990 | 1086 | ||
991 | OPT_END() | 1087 | OPT_END() |
992 | }; | 1088 | }; |
@@ -1167,6 +1263,11 @@ int cmd_script(int argc, const char **argv, const char *prefix __used) | |||
1167 | if (session == NULL) | 1263 | if (session == NULL) |
1168 | return -ENOMEM; | 1264 | return -ENOMEM; |
1169 | 1265 | ||
1266 | if (cpu_list) { | ||
1267 | if (perf_session__cpu_bitmap(session, cpu_list, cpu_bitmap)) | ||
1268 | return -1; | ||
1269 | } | ||
1270 | |||
1170 | if (!no_callchain) | 1271 | if (!no_callchain) |
1171 | symbol_conf.use_callchain = true; | 1272 | symbol_conf.use_callchain = true; |
1172 | else | 1273 | else |
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index a9f06715e44..1d08c8084cc 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c | |||
@@ -61,6 +61,8 @@ | |||
61 | #include <locale.h> | 61 | #include <locale.h> |
62 | 62 | ||
63 | #define DEFAULT_SEPARATOR " " | 63 | #define DEFAULT_SEPARATOR " " |
64 | #define CNTR_NOT_SUPPORTED "<not supported>" | ||
65 | #define CNTR_NOT_COUNTED "<not counted>" | ||
64 | 66 | ||
65 | static struct perf_event_attr default_attrs[] = { | 67 | static struct perf_event_attr default_attrs[] = { |
66 | 68 | ||
@@ -448,6 +450,7 @@ static int run_perf_stat(int argc __used, const char **argv) | |||
448 | if (verbose) | 450 | if (verbose) |
449 | ui__warning("%s event is not supported by the kernel.\n", | 451 | ui__warning("%s event is not supported by the kernel.\n", |
450 | event_name(counter)); | 452 | event_name(counter)); |
453 | counter->supported = false; | ||
451 | continue; | 454 | continue; |
452 | } | 455 | } |
453 | 456 | ||
@@ -466,6 +469,7 @@ static int run_perf_stat(int argc __used, const char **argv) | |||
466 | die("Not all events could be opened.\n"); | 469 | die("Not all events could be opened.\n"); |
467 | return -1; | 470 | return -1; |
468 | } | 471 | } |
472 | counter->supported = true; | ||
469 | } | 473 | } |
470 | 474 | ||
471 | if (perf_evlist__set_filters(evsel_list)) { | 475 | if (perf_evlist__set_filters(evsel_list)) { |
@@ -513,7 +517,10 @@ static void print_noise_pct(double total, double avg) | |||
513 | if (avg) | 517 | if (avg) |
514 | pct = 100.0*total/avg; | 518 | pct = 100.0*total/avg; |
515 | 519 | ||
516 | fprintf(stderr, " ( +-%6.2f%% )", pct); | 520 | if (csv_output) |
521 | fprintf(stderr, "%s%.2f%%", csv_sep, pct); | ||
522 | else | ||
523 | fprintf(stderr, " ( +-%6.2f%% )", pct); | ||
517 | } | 524 | } |
518 | 525 | ||
519 | static void print_noise(struct perf_evsel *evsel, double avg) | 526 | static void print_noise(struct perf_evsel *evsel, double avg) |
@@ -861,7 +868,7 @@ static void print_counter_aggr(struct perf_evsel *counter) | |||
861 | if (scaled == -1) { | 868 | if (scaled == -1) { |
862 | fprintf(stderr, "%*s%s%*s", | 869 | fprintf(stderr, "%*s%s%*s", |
863 | csv_output ? 0 : 18, | 870 | csv_output ? 0 : 18, |
864 | "<not counted>", | 871 | counter->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED, |
865 | csv_sep, | 872 | csv_sep, |
866 | csv_output ? 0 : -24, | 873 | csv_output ? 0 : -24, |
867 | event_name(counter)); | 874 | event_name(counter)); |
@@ -878,13 +885,13 @@ static void print_counter_aggr(struct perf_evsel *counter) | |||
878 | else | 885 | else |
879 | abs_printout(-1, counter, avg); | 886 | abs_printout(-1, counter, avg); |
880 | 887 | ||
888 | print_noise(counter, avg); | ||
889 | |||
881 | if (csv_output) { | 890 | if (csv_output) { |
882 | fputc('\n', stderr); | 891 | fputc('\n', stderr); |
883 | return; | 892 | return; |
884 | } | 893 | } |
885 | 894 | ||
886 | print_noise(counter, avg); | ||
887 | |||
888 | if (scaled) { | 895 | if (scaled) { |
889 | double avg_enabled, avg_running; | 896 | double avg_enabled, avg_running; |
890 | 897 | ||
@@ -914,7 +921,8 @@ static void print_counter(struct perf_evsel *counter) | |||
914 | csv_output ? 0 : -4, | 921 | csv_output ? 0 : -4, |
915 | evsel_list->cpus->map[cpu], csv_sep, | 922 | evsel_list->cpus->map[cpu], csv_sep, |
916 | csv_output ? 0 : 18, | 923 | csv_output ? 0 : 18, |
917 | "<not counted>", csv_sep, | 924 | counter->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED, |
925 | csv_sep, | ||
918 | csv_output ? 0 : -24, | 926 | csv_output ? 0 : -24, |
919 | event_name(counter)); | 927 | event_name(counter)); |
920 | 928 | ||
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h index 1a79df9f739..9b4ff16cac9 100644 --- a/tools/perf/util/callchain.h +++ b/tools/perf/util/callchain.h | |||
@@ -14,6 +14,11 @@ enum chain_mode { | |||
14 | CHAIN_GRAPH_REL | 14 | CHAIN_GRAPH_REL |
15 | }; | 15 | }; |
16 | 16 | ||
17 | enum chain_order { | ||
18 | ORDER_CALLER, | ||
19 | ORDER_CALLEE | ||
20 | }; | ||
21 | |||
17 | struct callchain_node { | 22 | struct callchain_node { |
18 | struct callchain_node *parent; | 23 | struct callchain_node *parent; |
19 | struct list_head siblings; | 24 | struct list_head siblings; |
@@ -41,6 +46,7 @@ struct callchain_param { | |||
41 | u32 print_limit; | 46 | u32 print_limit; |
42 | double min_percent; | 47 | double min_percent; |
43 | sort_chain_func_t sort; | 48 | sort_chain_func_t sort; |
49 | enum chain_order order; | ||
44 | }; | 50 | }; |
45 | 51 | ||
46 | struct callchain_list { | 52 | struct callchain_list { |
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index 0239eb87b23..a03a36b7908 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c | |||
@@ -377,6 +377,7 @@ int perf_event__parse_sample(const union perf_event *event, u64 type, | |||
377 | array++; | 377 | array++; |
378 | } | 378 | } |
379 | 379 | ||
380 | data->addr = 0; | ||
380 | if (type & PERF_SAMPLE_ADDR) { | 381 | if (type & PERF_SAMPLE_ADDR) { |
381 | data->addr = *array; | 382 | data->addr = *array; |
382 | array++; | 383 | array++; |
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h index 7e9366e4490..e9a31554e26 100644 --- a/tools/perf/util/evsel.h +++ b/tools/perf/util/evsel.h | |||
@@ -61,6 +61,7 @@ struct perf_evsel { | |||
61 | off_t id_offset; | 61 | off_t id_offset; |
62 | }; | 62 | }; |
63 | struct cgroup_sel *cgrp; | 63 | struct cgroup_sel *cgrp; |
64 | bool supported; | ||
64 | }; | 65 | }; |
65 | 66 | ||
66 | struct cpu_map; | 67 | struct cpu_map; |
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c index 627a02e03c5..677e1da6bb3 100644 --- a/tools/perf/util/hist.c +++ b/tools/perf/util/hist.c | |||
@@ -14,7 +14,8 @@ enum hist_filter { | |||
14 | 14 | ||
15 | struct callchain_param callchain_param = { | 15 | struct callchain_param callchain_param = { |
16 | .mode = CHAIN_GRAPH_REL, | 16 | .mode = CHAIN_GRAPH_REL, |
17 | .min_percent = 0.5 | 17 | .min_percent = 0.5, |
18 | .order = ORDER_CALLEE | ||
18 | }; | 19 | }; |
19 | 20 | ||
20 | u16 hists__col_len(struct hists *self, enum hist_column col) | 21 | u16 hists__col_len(struct hists *self, enum hist_column col) |
@@ -846,6 +847,9 @@ print_entries: | |||
846 | for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) { | 847 | for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) { |
847 | struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); | 848 | struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); |
848 | 849 | ||
850 | if (h->filtered) | ||
851 | continue; | ||
852 | |||
849 | if (show_displacement) { | 853 | if (show_displacement) { |
850 | if (h->pair != NULL) | 854 | if (h->pair != NULL) |
851 | displacement = ((long)h->pair->position - | 855 | displacement = ((long)h->pair->position - |
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c index a9ac0504aab..8e0b5a39d8a 100644 --- a/tools/perf/util/python.c +++ b/tools/perf/util/python.c | |||
@@ -247,7 +247,7 @@ struct pyrf_cpu_map { | |||
247 | static int pyrf_cpu_map__init(struct pyrf_cpu_map *pcpus, | 247 | static int pyrf_cpu_map__init(struct pyrf_cpu_map *pcpus, |
248 | PyObject *args, PyObject *kwargs) | 248 | PyObject *args, PyObject *kwargs) |
249 | { | 249 | { |
250 | static char *kwlist[] = { "cpustr", NULL, NULL, }; | 250 | static char *kwlist[] = { "cpustr", NULL }; |
251 | char *cpustr = NULL; | 251 | char *cpustr = NULL; |
252 | 252 | ||
253 | if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|s", | 253 | if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|s", |
@@ -316,7 +316,7 @@ struct pyrf_thread_map { | |||
316 | static int pyrf_thread_map__init(struct pyrf_thread_map *pthreads, | 316 | static int pyrf_thread_map__init(struct pyrf_thread_map *pthreads, |
317 | PyObject *args, PyObject *kwargs) | 317 | PyObject *args, PyObject *kwargs) |
318 | { | 318 | { |
319 | static char *kwlist[] = { "pid", "tid", NULL, NULL, }; | 319 | static char *kwlist[] = { "pid", "tid", NULL }; |
320 | int pid = -1, tid = -1; | 320 | int pid = -1, tid = -1; |
321 | 321 | ||
322 | if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ii", | 322 | if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ii", |
@@ -418,7 +418,9 @@ static int pyrf_evsel__init(struct pyrf_evsel *pevsel, | |||
418 | "wakeup_events", | 418 | "wakeup_events", |
419 | "bp_type", | 419 | "bp_type", |
420 | "bp_addr", | 420 | "bp_addr", |
421 | "bp_len", NULL, NULL, }; | 421 | "bp_len", |
422 | NULL | ||
423 | }; | ||
422 | u64 sample_period = 0; | 424 | u64 sample_period = 0; |
423 | u32 disabled = 0, | 425 | u32 disabled = 0, |
424 | inherit = 0, | 426 | inherit = 0, |
@@ -499,7 +501,7 @@ static PyObject *pyrf_evsel__open(struct pyrf_evsel *pevsel, | |||
499 | struct thread_map *threads = NULL; | 501 | struct thread_map *threads = NULL; |
500 | PyObject *pcpus = NULL, *pthreads = NULL; | 502 | PyObject *pcpus = NULL, *pthreads = NULL; |
501 | int group = 0, inherit = 0; | 503 | int group = 0, inherit = 0; |
502 | static char *kwlist[] = {"cpus", "threads", "group", "inherit", NULL, NULL}; | 504 | static char *kwlist[] = { "cpus", "threads", "group", "inherit", NULL }; |
503 | 505 | ||
504 | if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOii", kwlist, | 506 | if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOii", kwlist, |
505 | &pcpus, &pthreads, &group, &inherit)) | 507 | &pcpus, &pthreads, &group, &inherit)) |
@@ -582,8 +584,7 @@ static PyObject *pyrf_evlist__mmap(struct pyrf_evlist *pevlist, | |||
582 | PyObject *args, PyObject *kwargs) | 584 | PyObject *args, PyObject *kwargs) |
583 | { | 585 | { |
584 | struct perf_evlist *evlist = &pevlist->evlist; | 586 | struct perf_evlist *evlist = &pevlist->evlist; |
585 | static char *kwlist[] = {"pages", "overwrite", | 587 | static char *kwlist[] = { "pages", "overwrite", NULL }; |
586 | NULL, NULL}; | ||
587 | int pages = 128, overwrite = false; | 588 | int pages = 128, overwrite = false; |
588 | 589 | ||
589 | if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ii", kwlist, | 590 | if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ii", kwlist, |
@@ -603,7 +604,7 @@ static PyObject *pyrf_evlist__poll(struct pyrf_evlist *pevlist, | |||
603 | PyObject *args, PyObject *kwargs) | 604 | PyObject *args, PyObject *kwargs) |
604 | { | 605 | { |
605 | struct perf_evlist *evlist = &pevlist->evlist; | 606 | struct perf_evlist *evlist = &pevlist->evlist; |
606 | static char *kwlist[] = {"timeout", NULL, NULL}; | 607 | static char *kwlist[] = { "timeout", NULL }; |
607 | int timeout = -1, n; | 608 | int timeout = -1, n; |
608 | 609 | ||
609 | if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|i", kwlist, &timeout)) | 610 | if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|i", kwlist, &timeout)) |
@@ -674,7 +675,7 @@ static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist, | |||
674 | struct perf_evlist *evlist = &pevlist->evlist; | 675 | struct perf_evlist *evlist = &pevlist->evlist; |
675 | union perf_event *event; | 676 | union perf_event *event; |
676 | int sample_id_all = 1, cpu; | 677 | int sample_id_all = 1, cpu; |
677 | static char *kwlist[] = {"cpu", "sample_id_all", NULL, NULL}; | 678 | static char *kwlist[] = { "cpu", "sample_id_all", NULL }; |
678 | int err; | 679 | int err; |
679 | 680 | ||
680 | if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i|i", kwlist, | 681 | if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i|i", kwlist, |
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index f5a8fbdd3f7..080e5336d89 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include "session.h" | 12 | #include "session.h" |
13 | #include "sort.h" | 13 | #include "sort.h" |
14 | #include "util.h" | 14 | #include "util.h" |
15 | #include "cpumap.h" | ||
15 | 16 | ||
16 | static int perf_session__open(struct perf_session *self, bool force) | 17 | static int perf_session__open(struct perf_session *self, bool force) |
17 | { | 18 | { |
@@ -247,9 +248,14 @@ int perf_session__resolve_callchain(struct perf_session *self, | |||
247 | callchain_cursor_reset(&self->callchain_cursor); | 248 | callchain_cursor_reset(&self->callchain_cursor); |
248 | 249 | ||
249 | for (i = 0; i < chain->nr; i++) { | 250 | for (i = 0; i < chain->nr; i++) { |
250 | u64 ip = chain->ips[i]; | 251 | u64 ip; |
251 | struct addr_location al; | 252 | struct addr_location al; |
252 | 253 | ||
254 | if (callchain_param.order == ORDER_CALLEE) | ||
255 | ip = chain->ips[i]; | ||
256 | else | ||
257 | ip = chain->ips[chain->nr - i - 1]; | ||
258 | |||
253 | if (ip >= PERF_CONTEXT_MAX) { | 259 | if (ip >= PERF_CONTEXT_MAX) { |
254 | switch (ip) { | 260 | switch (ip) { |
255 | case PERF_CONTEXT_HV: | 261 | case PERF_CONTEXT_HV: |
@@ -708,9 +714,9 @@ static void dump_sample(struct perf_session *session, union perf_event *event, | |||
708 | if (!dump_trace) | 714 | if (!dump_trace) |
709 | return; | 715 | return; |
710 | 716 | ||
711 | printf("(IP, %d): %d/%d: %#" PRIx64 " period: %" PRIu64 "\n", | 717 | printf("(IP, %d): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n", |
712 | event->header.misc, sample->pid, sample->tid, sample->ip, | 718 | event->header.misc, sample->pid, sample->tid, sample->ip, |
713 | sample->period); | 719 | sample->period, sample->addr); |
714 | 720 | ||
715 | if (session->sample_type & PERF_SAMPLE_CALLCHAIN) | 721 | if (session->sample_type & PERF_SAMPLE_CALLCHAIN) |
716 | callchain__printf(sample); | 722 | callchain__printf(sample); |
@@ -1202,9 +1208,10 @@ struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session, | |||
1202 | return NULL; | 1208 | return NULL; |
1203 | } | 1209 | } |
1204 | 1210 | ||
1205 | void perf_session__print_symbols(union perf_event *event, | 1211 | void perf_session__print_ip(union perf_event *event, |
1206 | struct perf_sample *sample, | 1212 | struct perf_sample *sample, |
1207 | struct perf_session *session) | 1213 | struct perf_session *session, |
1214 | int print_sym, int print_dso) | ||
1208 | { | 1215 | { |
1209 | struct addr_location al; | 1216 | struct addr_location al; |
1210 | const char *symname, *dsoname; | 1217 | const char *symname, *dsoname; |
@@ -1233,32 +1240,83 @@ void perf_session__print_symbols(union perf_event *event, | |||
1233 | if (!node) | 1240 | if (!node) |
1234 | break; | 1241 | break; |
1235 | 1242 | ||
1236 | if (node->sym && node->sym->name) | 1243 | printf("\t%16" PRIx64, node->ip); |
1237 | symname = node->sym->name; | 1244 | if (print_sym) { |
1245 | if (node->sym && node->sym->name) | ||
1246 | symname = node->sym->name; | ||
1247 | else | ||
1248 | symname = ""; | ||
1249 | |||
1250 | printf(" %s", symname); | ||
1251 | } | ||
1252 | if (print_dso) { | ||
1253 | if (node->map && node->map->dso && node->map->dso->name) | ||
1254 | dsoname = node->map->dso->name; | ||
1255 | else | ||
1256 | dsoname = ""; | ||
1257 | |||
1258 | printf(" (%s)", dsoname); | ||
1259 | } | ||
1260 | printf("\n"); | ||
1261 | |||
1262 | callchain_cursor_advance(cursor); | ||
1263 | } | ||
1264 | |||
1265 | } else { | ||
1266 | printf("%16" PRIx64, al.addr); | ||
1267 | if (print_sym) { | ||
1268 | if (al.sym && al.sym->name) | ||
1269 | symname = al.sym->name; | ||
1238 | else | 1270 | else |
1239 | symname = ""; | 1271 | symname = ""; |
1240 | 1272 | ||
1241 | if (node->map && node->map->dso && node->map->dso->name) | 1273 | printf(" %s", symname); |
1242 | dsoname = node->map->dso->name; | 1274 | } |
1275 | |||
1276 | if (print_dso) { | ||
1277 | if (al.map && al.map->dso && al.map->dso->name) | ||
1278 | dsoname = al.map->dso->name; | ||
1243 | else | 1279 | else |
1244 | dsoname = ""; | 1280 | dsoname = ""; |
1245 | 1281 | ||
1246 | printf("\t%16" PRIx64 " %s (%s)\n", node->ip, symname, dsoname); | 1282 | printf(" (%s)", dsoname); |
1283 | } | ||
1284 | } | ||
1285 | } | ||
1286 | |||
1287 | int perf_session__cpu_bitmap(struct perf_session *session, | ||
1288 | const char *cpu_list, unsigned long *cpu_bitmap) | ||
1289 | { | ||
1290 | int i; | ||
1291 | struct cpu_map *map; | ||
1292 | |||
1293 | for (i = 0; i < PERF_TYPE_MAX; ++i) { | ||
1294 | struct perf_evsel *evsel; | ||
1247 | 1295 | ||
1248 | callchain_cursor_advance(cursor); | 1296 | evsel = perf_session__find_first_evtype(session, i); |
1297 | if (!evsel) | ||
1298 | continue; | ||
1299 | |||
1300 | if (!(evsel->attr.sample_type & PERF_SAMPLE_CPU)) { | ||
1301 | pr_err("File does not contain CPU events. " | ||
1302 | "Remove -c option to proceed.\n"); | ||
1303 | return -1; | ||
1249 | } | 1304 | } |
1305 | } | ||
1250 | 1306 | ||
1251 | } else { | 1307 | map = cpu_map__new(cpu_list); |
1252 | if (al.sym && al.sym->name) | ||
1253 | symname = al.sym->name; | ||
1254 | else | ||
1255 | symname = ""; | ||
1256 | 1308 | ||
1257 | if (al.map && al.map->dso && al.map->dso->name) | 1309 | for (i = 0; i < map->nr; i++) { |
1258 | dsoname = al.map->dso->name; | 1310 | int cpu = map->map[i]; |
1259 | else | 1311 | |
1260 | dsoname = ""; | 1312 | if (cpu >= MAX_NR_CPUS) { |
1313 | pr_err("Requested CPU %d too large. " | ||
1314 | "Consider raising MAX_NR_CPUS\n", cpu); | ||
1315 | return -1; | ||
1316 | } | ||
1261 | 1317 | ||
1262 | printf("%16" PRIx64 " %s (%s)", al.addr, symname, dsoname); | 1318 | set_bit(cpu, cpu_bitmap); |
1263 | } | 1319 | } |
1320 | |||
1321 | return 0; | ||
1264 | } | 1322 | } |
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h index 66d4e149087..5de754f4b7f 100644 --- a/tools/perf/util/session.h +++ b/tools/perf/util/session.h | |||
@@ -167,8 +167,12 @@ static inline int perf_session__parse_sample(struct perf_session *session, | |||
167 | struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session, | 167 | struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session, |
168 | unsigned int type); | 168 | unsigned int type); |
169 | 169 | ||
170 | void perf_session__print_symbols(union perf_event *event, | 170 | void perf_session__print_ip(union perf_event *event, |
171 | struct perf_sample *sample, | 171 | struct perf_sample *sample, |
172 | struct perf_session *session); | 172 | struct perf_session *session, |
173 | int print_sym, int print_dso); | ||
174 | |||
175 | int perf_session__cpu_bitmap(struct perf_session *session, | ||
176 | const char *cpu_list, unsigned long *cpu_bitmap); | ||
173 | 177 | ||
174 | #endif /* __PERF_SESSION_H */ | 178 | #endif /* __PERF_SESSION_H */ |
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c index f44fa541d56..401e220566f 100644 --- a/tools/perf/util/sort.c +++ b/tools/perf/util/sort.c | |||
@@ -15,95 +15,6 @@ char * field_sep; | |||
15 | 15 | ||
16 | LIST_HEAD(hist_entry__sort_list); | 16 | LIST_HEAD(hist_entry__sort_list); |
17 | 17 | ||
18 | static int hist_entry__thread_snprintf(struct hist_entry *self, char *bf, | ||
19 | size_t size, unsigned int width); | ||
20 | static int hist_entry__comm_snprintf(struct hist_entry *self, char *bf, | ||
21 | size_t size, unsigned int width); | ||
22 | static int hist_entry__dso_snprintf(struct hist_entry *self, char *bf, | ||
23 | size_t size, unsigned int width); | ||
24 | static int hist_entry__sym_snprintf(struct hist_entry *self, char *bf, | ||
25 | size_t size, unsigned int width); | ||
26 | static int hist_entry__parent_snprintf(struct hist_entry *self, char *bf, | ||
27 | size_t size, unsigned int width); | ||
28 | static int hist_entry__cpu_snprintf(struct hist_entry *self, char *bf, | ||
29 | size_t size, unsigned int width); | ||
30 | |||
31 | struct sort_entry sort_thread = { | ||
32 | .se_header = "Command: Pid", | ||
33 | .se_cmp = sort__thread_cmp, | ||
34 | .se_snprintf = hist_entry__thread_snprintf, | ||
35 | .se_width_idx = HISTC_THREAD, | ||
36 | }; | ||
37 | |||
38 | struct sort_entry sort_comm = { | ||
39 | .se_header = "Command", | ||
40 | .se_cmp = sort__comm_cmp, | ||
41 | .se_collapse = sort__comm_collapse, | ||
42 | .se_snprintf = hist_entry__comm_snprintf, | ||
43 | .se_width_idx = HISTC_COMM, | ||
44 | }; | ||
45 | |||
46 | struct sort_entry sort_dso = { | ||
47 | .se_header = "Shared Object", | ||
48 | .se_cmp = sort__dso_cmp, | ||
49 | .se_snprintf = hist_entry__dso_snprintf, | ||
50 | .se_width_idx = HISTC_DSO, | ||
51 | }; | ||
52 | |||
53 | struct sort_entry sort_sym = { | ||
54 | .se_header = "Symbol", | ||
55 | .se_cmp = sort__sym_cmp, | ||
56 | .se_snprintf = hist_entry__sym_snprintf, | ||
57 | .se_width_idx = HISTC_SYMBOL, | ||
58 | }; | ||
59 | |||
60 | struct sort_entry sort_parent = { | ||
61 | .se_header = "Parent symbol", | ||
62 | .se_cmp = sort__parent_cmp, | ||
63 | .se_snprintf = hist_entry__parent_snprintf, | ||
64 | .se_width_idx = HISTC_PARENT, | ||
65 | }; | ||
66 | |||
67 | struct sort_entry sort_cpu = { | ||
68 | .se_header = "CPU", | ||
69 | .se_cmp = sort__cpu_cmp, | ||
70 | .se_snprintf = hist_entry__cpu_snprintf, | ||
71 | .se_width_idx = HISTC_CPU, | ||
72 | }; | ||
73 | |||
74 | struct sort_dimension { | ||
75 | const char *name; | ||
76 | struct sort_entry *entry; | ||
77 | int taken; | ||
78 | }; | ||
79 | |||
80 | static struct sort_dimension sort_dimensions[] = { | ||
81 | { .name = "pid", .entry = &sort_thread, }, | ||
82 | { .name = "comm", .entry = &sort_comm, }, | ||
83 | { .name = "dso", .entry = &sort_dso, }, | ||
84 | { .name = "symbol", .entry = &sort_sym, }, | ||
85 | { .name = "parent", .entry = &sort_parent, }, | ||
86 | { .name = "cpu", .entry = &sort_cpu, }, | ||
87 | }; | ||
88 | |||
89 | int64_t cmp_null(void *l, void *r) | ||
90 | { | ||
91 | if (!l && !r) | ||
92 | return 0; | ||
93 | else if (!l) | ||
94 | return -1; | ||
95 | else | ||
96 | return 1; | ||
97 | } | ||
98 | |||
99 | /* --sort pid */ | ||
100 | |||
101 | int64_t | ||
102 | sort__thread_cmp(struct hist_entry *left, struct hist_entry *right) | ||
103 | { | ||
104 | return right->thread->pid - left->thread->pid; | ||
105 | } | ||
106 | |||
107 | static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...) | 18 | static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...) |
108 | { | 19 | { |
109 | int n; | 20 | int n; |
@@ -125,6 +36,24 @@ static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...) | |||
125 | return n; | 36 | return n; |
126 | } | 37 | } |
127 | 38 | ||
39 | static int64_t cmp_null(void *l, void *r) | ||
40 | { | ||
41 | if (!l && !r) | ||
42 | return 0; | ||
43 | else if (!l) | ||
44 | return -1; | ||
45 | else | ||
46 | return 1; | ||
47 | } | ||
48 | |||
49 | /* --sort pid */ | ||
50 | |||
51 | static int64_t | ||
52 | sort__thread_cmp(struct hist_entry *left, struct hist_entry *right) | ||
53 | { | ||
54 | return right->thread->pid - left->thread->pid; | ||
55 | } | ||
56 | |||
128 | static int hist_entry__thread_snprintf(struct hist_entry *self, char *bf, | 57 | static int hist_entry__thread_snprintf(struct hist_entry *self, char *bf, |
129 | size_t size, unsigned int width) | 58 | size_t size, unsigned int width) |
130 | { | 59 | { |
@@ -132,15 +61,50 @@ static int hist_entry__thread_snprintf(struct hist_entry *self, char *bf, | |||
132 | self->thread->comm ?: "", self->thread->pid); | 61 | self->thread->comm ?: "", self->thread->pid); |
133 | } | 62 | } |
134 | 63 | ||
64 | struct sort_entry sort_thread = { | ||
65 | .se_header = "Command: Pid", | ||
66 | .se_cmp = sort__thread_cmp, | ||
67 | .se_snprintf = hist_entry__thread_snprintf, | ||
68 | .se_width_idx = HISTC_THREAD, | ||
69 | }; | ||
70 | |||
71 | /* --sort comm */ | ||
72 | |||
73 | static int64_t | ||
74 | sort__comm_cmp(struct hist_entry *left, struct hist_entry *right) | ||
75 | { | ||
76 | return right->thread->pid - left->thread->pid; | ||
77 | } | ||
78 | |||
79 | static int64_t | ||
80 | sort__comm_collapse(struct hist_entry *left, struct hist_entry *right) | ||
81 | { | ||
82 | char *comm_l = left->thread->comm; | ||
83 | char *comm_r = right->thread->comm; | ||
84 | |||
85 | if (!comm_l || !comm_r) | ||
86 | return cmp_null(comm_l, comm_r); | ||
87 | |||
88 | return strcmp(comm_l, comm_r); | ||
89 | } | ||
90 | |||
135 | static int hist_entry__comm_snprintf(struct hist_entry *self, char *bf, | 91 | static int hist_entry__comm_snprintf(struct hist_entry *self, char *bf, |
136 | size_t size, unsigned int width) | 92 | size_t size, unsigned int width) |
137 | { | 93 | { |
138 | return repsep_snprintf(bf, size, "%*s", width, self->thread->comm); | 94 | return repsep_snprintf(bf, size, "%*s", width, self->thread->comm); |
139 | } | 95 | } |
140 | 96 | ||
97 | struct sort_entry sort_comm = { | ||
98 | .se_header = "Command", | ||
99 | .se_cmp = sort__comm_cmp, | ||
100 | .se_collapse = sort__comm_collapse, | ||
101 | .se_snprintf = hist_entry__comm_snprintf, | ||
102 | .se_width_idx = HISTC_COMM, | ||
103 | }; | ||
104 | |||
141 | /* --sort dso */ | 105 | /* --sort dso */ |
142 | 106 | ||
143 | int64_t | 107 | static int64_t |
144 | sort__dso_cmp(struct hist_entry *left, struct hist_entry *right) | 108 | sort__dso_cmp(struct hist_entry *left, struct hist_entry *right) |
145 | { | 109 | { |
146 | struct dso *dso_l = left->ms.map ? left->ms.map->dso : NULL; | 110 | struct dso *dso_l = left->ms.map ? left->ms.map->dso : NULL; |
@@ -173,9 +137,16 @@ static int hist_entry__dso_snprintf(struct hist_entry *self, char *bf, | |||
173 | return repsep_snprintf(bf, size, "%-*s", width, "[unknown]"); | 137 | return repsep_snprintf(bf, size, "%-*s", width, "[unknown]"); |
174 | } | 138 | } |
175 | 139 | ||
140 | struct sort_entry sort_dso = { | ||
141 | .se_header = "Shared Object", | ||
142 | .se_cmp = sort__dso_cmp, | ||
143 | .se_snprintf = hist_entry__dso_snprintf, | ||
144 | .se_width_idx = HISTC_DSO, | ||
145 | }; | ||
146 | |||
176 | /* --sort symbol */ | 147 | /* --sort symbol */ |
177 | 148 | ||
178 | int64_t | 149 | static int64_t |
179 | sort__sym_cmp(struct hist_entry *left, struct hist_entry *right) | 150 | sort__sym_cmp(struct hist_entry *left, struct hist_entry *right) |
180 | { | 151 | { |
181 | u64 ip_l, ip_r; | 152 | u64 ip_l, ip_r; |
@@ -211,29 +182,16 @@ static int hist_entry__sym_snprintf(struct hist_entry *self, char *bf, | |||
211 | return ret; | 182 | return ret; |
212 | } | 183 | } |
213 | 184 | ||
214 | /* --sort comm */ | 185 | struct sort_entry sort_sym = { |
215 | 186 | .se_header = "Symbol", | |
216 | int64_t | 187 | .se_cmp = sort__sym_cmp, |
217 | sort__comm_cmp(struct hist_entry *left, struct hist_entry *right) | 188 | .se_snprintf = hist_entry__sym_snprintf, |
218 | { | 189 | .se_width_idx = HISTC_SYMBOL, |
219 | return right->thread->pid - left->thread->pid; | 190 | }; |
220 | } | ||
221 | |||
222 | int64_t | ||
223 | sort__comm_collapse(struct hist_entry *left, struct hist_entry *right) | ||
224 | { | ||
225 | char *comm_l = left->thread->comm; | ||
226 | char *comm_r = right->thread->comm; | ||
227 | |||
228 | if (!comm_l || !comm_r) | ||
229 | return cmp_null(comm_l, comm_r); | ||
230 | |||
231 | return strcmp(comm_l, comm_r); | ||
232 | } | ||
233 | 191 | ||
234 | /* --sort parent */ | 192 | /* --sort parent */ |
235 | 193 | ||
236 | int64_t | 194 | static int64_t |
237 | sort__parent_cmp(struct hist_entry *left, struct hist_entry *right) | 195 | sort__parent_cmp(struct hist_entry *left, struct hist_entry *right) |
238 | { | 196 | { |
239 | struct symbol *sym_l = left->parent; | 197 | struct symbol *sym_l = left->parent; |
@@ -252,9 +210,16 @@ static int hist_entry__parent_snprintf(struct hist_entry *self, char *bf, | |||
252 | self->parent ? self->parent->name : "[other]"); | 210 | self->parent ? self->parent->name : "[other]"); |
253 | } | 211 | } |
254 | 212 | ||
213 | struct sort_entry sort_parent = { | ||
214 | .se_header = "Parent symbol", | ||
215 | .se_cmp = sort__parent_cmp, | ||
216 | .se_snprintf = hist_entry__parent_snprintf, | ||
217 | .se_width_idx = HISTC_PARENT, | ||
218 | }; | ||
219 | |||
255 | /* --sort cpu */ | 220 | /* --sort cpu */ |
256 | 221 | ||
257 | int64_t | 222 | static int64_t |
258 | sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right) | 223 | sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right) |
259 | { | 224 | { |
260 | return right->cpu - left->cpu; | 225 | return right->cpu - left->cpu; |
@@ -266,6 +231,28 @@ static int hist_entry__cpu_snprintf(struct hist_entry *self, char *bf, | |||
266 | return repsep_snprintf(bf, size, "%-*d", width, self->cpu); | 231 | return repsep_snprintf(bf, size, "%-*d", width, self->cpu); |
267 | } | 232 | } |
268 | 233 | ||
234 | struct sort_entry sort_cpu = { | ||
235 | .se_header = "CPU", | ||
236 | .se_cmp = sort__cpu_cmp, | ||
237 | .se_snprintf = hist_entry__cpu_snprintf, | ||
238 | .se_width_idx = HISTC_CPU, | ||
239 | }; | ||
240 | |||
241 | struct sort_dimension { | ||
242 | const char *name; | ||
243 | struct sort_entry *entry; | ||
244 | int taken; | ||
245 | }; | ||
246 | |||
247 | static struct sort_dimension sort_dimensions[] = { | ||
248 | { .name = "pid", .entry = &sort_thread, }, | ||
249 | { .name = "comm", .entry = &sort_comm, }, | ||
250 | { .name = "dso", .entry = &sort_dso, }, | ||
251 | { .name = "symbol", .entry = &sort_sym, }, | ||
252 | { .name = "parent", .entry = &sort_parent, }, | ||
253 | { .name = "cpu", .entry = &sort_cpu, }, | ||
254 | }; | ||
255 | |||
269 | int sort_dimension__add(const char *tok) | 256 | int sort_dimension__add(const char *tok) |
270 | { | 257 | { |
271 | unsigned int i; | 258 | unsigned int i; |
@@ -273,15 +260,9 @@ int sort_dimension__add(const char *tok) | |||
273 | for (i = 0; i < ARRAY_SIZE(sort_dimensions); i++) { | 260 | for (i = 0; i < ARRAY_SIZE(sort_dimensions); i++) { |
274 | struct sort_dimension *sd = &sort_dimensions[i]; | 261 | struct sort_dimension *sd = &sort_dimensions[i]; |
275 | 262 | ||
276 | if (sd->taken) | ||
277 | continue; | ||
278 | |||
279 | if (strncasecmp(tok, sd->name, strlen(tok))) | 263 | if (strncasecmp(tok, sd->name, strlen(tok))) |
280 | continue; | 264 | continue; |
281 | 265 | ||
282 | if (sd->entry->se_collapse) | ||
283 | sort__need_collapse = 1; | ||
284 | |||
285 | if (sd->entry == &sort_parent) { | 266 | if (sd->entry == &sort_parent) { |
286 | int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED); | 267 | int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED); |
287 | if (ret) { | 268 | if (ret) { |
@@ -294,6 +275,12 @@ int sort_dimension__add(const char *tok) | |||
294 | sort__has_parent = 1; | 275 | sort__has_parent = 1; |
295 | } | 276 | } |
296 | 277 | ||
278 | if (sd->taken) | ||
279 | return 0; | ||
280 | |||
281 | if (sd->entry->se_collapse) | ||
282 | sort__need_collapse = 1; | ||
283 | |||
297 | if (list_empty(&hist_entry__sort_list)) { | 284 | if (list_empty(&hist_entry__sort_list)) { |
298 | if (!strcmp(sd->name, "pid")) | 285 | if (!strcmp(sd->name, "pid")) |
299 | sort__first_dimension = SORT_PID; | 286 | sort__first_dimension = SORT_PID; |
diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h index 0b91053a7d1..77d0388ad41 100644 --- a/tools/perf/util/sort.h +++ b/tools/perf/util/sort.h | |||
@@ -103,20 +103,6 @@ extern struct sort_entry sort_thread; | |||
103 | extern struct list_head hist_entry__sort_list; | 103 | extern struct list_head hist_entry__sort_list; |
104 | 104 | ||
105 | void setup_sorting(const char * const usagestr[], const struct option *opts); | 105 | void setup_sorting(const char * const usagestr[], const struct option *opts); |
106 | |||
107 | extern size_t sort__thread_print(FILE *, struct hist_entry *, unsigned int); | ||
108 | extern size_t sort__comm_print(FILE *, struct hist_entry *, unsigned int); | ||
109 | extern size_t sort__dso_print(FILE *, struct hist_entry *, unsigned int); | ||
110 | extern size_t sort__sym_print(FILE *, struct hist_entry *, unsigned int __used); | ||
111 | extern int64_t cmp_null(void *, void *); | ||
112 | extern int64_t sort__thread_cmp(struct hist_entry *, struct hist_entry *); | ||
113 | extern int64_t sort__comm_cmp(struct hist_entry *, struct hist_entry *); | ||
114 | extern int64_t sort__comm_collapse(struct hist_entry *, struct hist_entry *); | ||
115 | extern int64_t sort__dso_cmp(struct hist_entry *, struct hist_entry *); | ||
116 | extern int64_t sort__sym_cmp(struct hist_entry *, struct hist_entry *); | ||
117 | extern int64_t sort__parent_cmp(struct hist_entry *, struct hist_entry *); | ||
118 | int64_t sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right); | ||
119 | extern size_t sort__parent_print(FILE *, struct hist_entry *, unsigned int); | ||
120 | extern int sort_dimension__add(const char *); | 106 | extern int sort_dimension__add(const char *); |
121 | void sort_entry__setup_elide(struct sort_entry *self, struct strlist *list, | 107 | void sort_entry__setup_elide(struct sort_entry *self, struct strlist *list, |
122 | const char *list_name, FILE *fp); | 108 | const char *list_name, FILE *fp); |