diff options
-rw-r--r-- | arch/powerpc/kernel/misc.S | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event.c | 54 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_amd.c | 80 | ||||
-rw-r--r-- | arch/x86/kernel/dumpstack.h | 5 | ||||
-rw-r--r-- | arch/x86/kernel/kgdb.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/smpboot.c | 4 | ||||
-rw-r--r-- | fs/proc/task_mmu.c | 87 | ||||
-rw-r--r-- | include/linux/perf_event.h | 21 | ||||
-rw-r--r-- | kernel/perf_event.c | 22 | ||||
-rw-r--r-- | kernel/sched.c | 2 | ||||
-rw-r--r-- | kernel/sched_debug.c | 4 | ||||
-rw-r--r-- | kernel/trace/ring_buffer.c | 8 | ||||
-rw-r--r-- | kernel/trace/trace_clock.c | 4 | ||||
-rw-r--r-- | kernel/trace/trace_event_perf.c | 11 | ||||
-rw-r--r-- | tools/perf/Makefile | 10 | ||||
-rw-r--r-- | tools/perf/util/scripting-engines/trace-event-python.c | 17 |
16 files changed, 199 insertions, 134 deletions
diff --git a/arch/powerpc/kernel/misc.S b/arch/powerpc/kernel/misc.S index b485a87c94e1..22e507c8a556 100644 --- a/arch/powerpc/kernel/misc.S +++ b/arch/powerpc/kernel/misc.S | |||
@@ -128,7 +128,6 @@ _GLOBAL(__restore_cpu_power7) | |||
128 | /* place holder */ | 128 | /* place holder */ |
129 | blr | 129 | blr |
130 | 130 | ||
131 | #ifdef CONFIG_EVENT_TRACING | ||
132 | /* | 131 | /* |
133 | * Get a minimal set of registers for our caller's nth caller. | 132 | * Get a minimal set of registers for our caller's nth caller. |
134 | * r3 = regs pointer, r5 = n. | 133 | * r3 = regs pointer, r5 = n. |
@@ -154,4 +153,3 @@ _GLOBAL(perf_arch_fetch_caller_regs) | |||
154 | PPC_STL r4,_NIP-STACK_FRAME_OVERHEAD(r3) | 153 | PPC_STL r4,_NIP-STACK_FRAME_OVERHEAD(r3) |
155 | PPC_STL r7,_LINK-STACK_FRAME_OVERHEAD(r3) | 154 | PPC_STL r7,_LINK-STACK_FRAME_OVERHEAD(r3) |
156 | blr | 155 | blr |
157 | #endif /* CONFIG_EVENT_TRACING */ | ||
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 60398a0d947c..53ea4cf1a878 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <asm/apic.h> | 28 | #include <asm/apic.h> |
29 | #include <asm/stacktrace.h> | 29 | #include <asm/stacktrace.h> |
30 | #include <asm/nmi.h> | 30 | #include <asm/nmi.h> |
31 | #include <asm/compat.h> | ||
31 | 32 | ||
32 | static u64 perf_event_mask __read_mostly; | 33 | static u64 perf_event_mask __read_mostly; |
33 | 34 | ||
@@ -158,7 +159,7 @@ struct x86_pmu { | |||
158 | struct perf_event *event); | 159 | struct perf_event *event); |
159 | struct event_constraint *event_constraints; | 160 | struct event_constraint *event_constraints; |
160 | 161 | ||
161 | void (*cpu_prepare)(int cpu); | 162 | int (*cpu_prepare)(int cpu); |
162 | void (*cpu_starting)(int cpu); | 163 | void (*cpu_starting)(int cpu); |
163 | void (*cpu_dying)(int cpu); | 164 | void (*cpu_dying)(int cpu); |
164 | void (*cpu_dead)(int cpu); | 165 | void (*cpu_dead)(int cpu); |
@@ -1333,11 +1334,12 @@ static int __cpuinit | |||
1333 | x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) | 1334 | x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) |
1334 | { | 1335 | { |
1335 | unsigned int cpu = (long)hcpu; | 1336 | unsigned int cpu = (long)hcpu; |
1337 | int ret = NOTIFY_OK; | ||
1336 | 1338 | ||
1337 | switch (action & ~CPU_TASKS_FROZEN) { | 1339 | switch (action & ~CPU_TASKS_FROZEN) { |
1338 | case CPU_UP_PREPARE: | 1340 | case CPU_UP_PREPARE: |
1339 | if (x86_pmu.cpu_prepare) | 1341 | if (x86_pmu.cpu_prepare) |
1340 | x86_pmu.cpu_prepare(cpu); | 1342 | ret = x86_pmu.cpu_prepare(cpu); |
1341 | break; | 1343 | break; |
1342 | 1344 | ||
1343 | case CPU_STARTING: | 1345 | case CPU_STARTING: |
@@ -1350,6 +1352,7 @@ x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) | |||
1350 | x86_pmu.cpu_dying(cpu); | 1352 | x86_pmu.cpu_dying(cpu); |
1351 | break; | 1353 | break; |
1352 | 1354 | ||
1355 | case CPU_UP_CANCELED: | ||
1353 | case CPU_DEAD: | 1356 | case CPU_DEAD: |
1354 | if (x86_pmu.cpu_dead) | 1357 | if (x86_pmu.cpu_dead) |
1355 | x86_pmu.cpu_dead(cpu); | 1358 | x86_pmu.cpu_dead(cpu); |
@@ -1359,7 +1362,7 @@ x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) | |||
1359 | break; | 1362 | break; |
1360 | } | 1363 | } |
1361 | 1364 | ||
1362 | return NOTIFY_OK; | 1365 | return ret; |
1363 | } | 1366 | } |
1364 | 1367 | ||
1365 | static void __init pmu_check_apic(void) | 1368 | static void __init pmu_check_apic(void) |
@@ -1628,14 +1631,42 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n) | |||
1628 | return len; | 1631 | return len; |
1629 | } | 1632 | } |
1630 | 1633 | ||
1631 | static int copy_stack_frame(const void __user *fp, struct stack_frame *frame) | 1634 | #ifdef CONFIG_COMPAT |
1635 | static inline int | ||
1636 | perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry) | ||
1632 | { | 1637 | { |
1633 | unsigned long bytes; | 1638 | /* 32-bit process in 64-bit kernel. */ |
1639 | struct stack_frame_ia32 frame; | ||
1640 | const void __user *fp; | ||
1634 | 1641 | ||
1635 | bytes = copy_from_user_nmi(frame, fp, sizeof(*frame)); | 1642 | if (!test_thread_flag(TIF_IA32)) |
1643 | return 0; | ||
1644 | |||
1645 | fp = compat_ptr(regs->bp); | ||
1646 | while (entry->nr < PERF_MAX_STACK_DEPTH) { | ||
1647 | unsigned long bytes; | ||
1648 | frame.next_frame = 0; | ||
1649 | frame.return_address = 0; | ||
1650 | |||
1651 | bytes = copy_from_user_nmi(&frame, fp, sizeof(frame)); | ||
1652 | if (bytes != sizeof(frame)) | ||
1653 | break; | ||
1654 | |||
1655 | if (fp < compat_ptr(regs->sp)) | ||
1656 | break; | ||
1636 | 1657 | ||
1637 | return bytes == sizeof(*frame); | 1658 | callchain_store(entry, frame.return_address); |
1659 | fp = compat_ptr(frame.next_frame); | ||
1660 | } | ||
1661 | return 1; | ||
1662 | } | ||
1663 | #else | ||
1664 | static inline int | ||
1665 | perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry) | ||
1666 | { | ||
1667 | return 0; | ||
1638 | } | 1668 | } |
1669 | #endif | ||
1639 | 1670 | ||
1640 | static void | 1671 | static void |
1641 | perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry) | 1672 | perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry) |
@@ -1651,11 +1682,16 @@ perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry) | |||
1651 | callchain_store(entry, PERF_CONTEXT_USER); | 1682 | callchain_store(entry, PERF_CONTEXT_USER); |
1652 | callchain_store(entry, regs->ip); | 1683 | callchain_store(entry, regs->ip); |
1653 | 1684 | ||
1685 | if (perf_callchain_user32(regs, entry)) | ||
1686 | return; | ||
1687 | |||
1654 | while (entry->nr < PERF_MAX_STACK_DEPTH) { | 1688 | while (entry->nr < PERF_MAX_STACK_DEPTH) { |
1689 | unsigned long bytes; | ||
1655 | frame.next_frame = NULL; | 1690 | frame.next_frame = NULL; |
1656 | frame.return_address = 0; | 1691 | frame.return_address = 0; |
1657 | 1692 | ||
1658 | if (!copy_stack_frame(fp, &frame)) | 1693 | bytes = copy_from_user_nmi(&frame, fp, sizeof(frame)); |
1694 | if (bytes != sizeof(frame)) | ||
1659 | break; | 1695 | break; |
1660 | 1696 | ||
1661 | if ((unsigned long)fp < regs->sp) | 1697 | if ((unsigned long)fp < regs->sp) |
@@ -1702,7 +1738,6 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) | |||
1702 | return entry; | 1738 | return entry; |
1703 | } | 1739 | } |
1704 | 1740 | ||
1705 | #ifdef CONFIG_EVENT_TRACING | ||
1706 | void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip) | 1741 | void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip) |
1707 | { | 1742 | { |
1708 | regs->ip = ip; | 1743 | regs->ip = ip; |
@@ -1714,4 +1749,3 @@ void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int ski | |||
1714 | regs->cs = __KERNEL_CS; | 1749 | regs->cs = __KERNEL_CS; |
1715 | local_save_flags(regs->flags); | 1750 | local_save_flags(regs->flags); |
1716 | } | 1751 | } |
1717 | #endif | ||
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c index b87e0b6970cb..db6f7d4056e1 100644 --- a/arch/x86/kernel/cpu/perf_event_amd.c +++ b/arch/x86/kernel/cpu/perf_event_amd.c | |||
@@ -137,6 +137,13 @@ static inline int amd_is_nb_event(struct hw_perf_event *hwc) | |||
137 | return (hwc->config & 0xe0) == 0xe0; | 137 | return (hwc->config & 0xe0) == 0xe0; |
138 | } | 138 | } |
139 | 139 | ||
140 | static inline int amd_has_nb(struct cpu_hw_events *cpuc) | ||
141 | { | ||
142 | struct amd_nb *nb = cpuc->amd_nb; | ||
143 | |||
144 | return nb && nb->nb_id != -1; | ||
145 | } | ||
146 | |||
140 | static void amd_put_event_constraints(struct cpu_hw_events *cpuc, | 147 | static void amd_put_event_constraints(struct cpu_hw_events *cpuc, |
141 | struct perf_event *event) | 148 | struct perf_event *event) |
142 | { | 149 | { |
@@ -147,7 +154,7 @@ static void amd_put_event_constraints(struct cpu_hw_events *cpuc, | |||
147 | /* | 154 | /* |
148 | * only care about NB events | 155 | * only care about NB events |
149 | */ | 156 | */ |
150 | if (!(nb && amd_is_nb_event(hwc))) | 157 | if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc))) |
151 | return; | 158 | return; |
152 | 159 | ||
153 | /* | 160 | /* |
@@ -214,7 +221,7 @@ amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) | |||
214 | /* | 221 | /* |
215 | * if not NB event or no NB, then no constraints | 222 | * if not NB event or no NB, then no constraints |
216 | */ | 223 | */ |
217 | if (!(nb && amd_is_nb_event(hwc))) | 224 | if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc))) |
218 | return &unconstrained; | 225 | return &unconstrained; |
219 | 226 | ||
220 | /* | 227 | /* |
@@ -293,51 +300,55 @@ static struct amd_nb *amd_alloc_nb(int cpu, int nb_id) | |||
293 | return nb; | 300 | return nb; |
294 | } | 301 | } |
295 | 302 | ||
296 | static void amd_pmu_cpu_online(int cpu) | 303 | static int amd_pmu_cpu_prepare(int cpu) |
304 | { | ||
305 | struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); | ||
306 | |||
307 | WARN_ON_ONCE(cpuc->amd_nb); | ||
308 | |||
309 | if (boot_cpu_data.x86_max_cores < 2) | ||
310 | return NOTIFY_OK; | ||
311 | |||
312 | cpuc->amd_nb = amd_alloc_nb(cpu, -1); | ||
313 | if (!cpuc->amd_nb) | ||
314 | return NOTIFY_BAD; | ||
315 | |||
316 | return NOTIFY_OK; | ||
317 | } | ||
318 | |||
319 | static void amd_pmu_cpu_starting(int cpu) | ||
297 | { | 320 | { |
298 | struct cpu_hw_events *cpu1, *cpu2; | 321 | struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); |
299 | struct amd_nb *nb = NULL; | 322 | struct amd_nb *nb; |
300 | int i, nb_id; | 323 | int i, nb_id; |
301 | 324 | ||
302 | if (boot_cpu_data.x86_max_cores < 2) | 325 | if (boot_cpu_data.x86_max_cores < 2) |
303 | return; | 326 | return; |
304 | 327 | ||
305 | /* | ||
306 | * function may be called too early in the | ||
307 | * boot process, in which case nb_id is bogus | ||
308 | */ | ||
309 | nb_id = amd_get_nb_id(cpu); | 328 | nb_id = amd_get_nb_id(cpu); |
310 | if (nb_id == BAD_APICID) | 329 | WARN_ON_ONCE(nb_id == BAD_APICID); |
311 | return; | ||
312 | |||
313 | cpu1 = &per_cpu(cpu_hw_events, cpu); | ||
314 | cpu1->amd_nb = NULL; | ||
315 | 330 | ||
316 | raw_spin_lock(&amd_nb_lock); | 331 | raw_spin_lock(&amd_nb_lock); |
317 | 332 | ||
318 | for_each_online_cpu(i) { | 333 | for_each_online_cpu(i) { |
319 | cpu2 = &per_cpu(cpu_hw_events, i); | 334 | nb = per_cpu(cpu_hw_events, i).amd_nb; |
320 | nb = cpu2->amd_nb; | 335 | if (WARN_ON_ONCE(!nb)) |
321 | if (!nb) | ||
322 | continue; | 336 | continue; |
323 | if (nb->nb_id == nb_id) | ||
324 | goto found; | ||
325 | } | ||
326 | 337 | ||
327 | nb = amd_alloc_nb(cpu, nb_id); | 338 | if (nb->nb_id == nb_id) { |
328 | if (!nb) { | 339 | kfree(cpuc->amd_nb); |
329 | pr_err("perf_events: failed NB allocation for CPU%d\n", cpu); | 340 | cpuc->amd_nb = nb; |
330 | raw_spin_unlock(&amd_nb_lock); | 341 | break; |
331 | return; | 342 | } |
332 | } | 343 | } |
333 | found: | 344 | |
334 | nb->refcnt++; | 345 | cpuc->amd_nb->nb_id = nb_id; |
335 | cpu1->amd_nb = nb; | 346 | cpuc->amd_nb->refcnt++; |
336 | 347 | ||
337 | raw_spin_unlock(&amd_nb_lock); | 348 | raw_spin_unlock(&amd_nb_lock); |
338 | } | 349 | } |
339 | 350 | ||
340 | static void amd_pmu_cpu_offline(int cpu) | 351 | static void amd_pmu_cpu_dead(int cpu) |
341 | { | 352 | { |
342 | struct cpu_hw_events *cpuhw; | 353 | struct cpu_hw_events *cpuhw; |
343 | 354 | ||
@@ -349,8 +360,10 @@ static void amd_pmu_cpu_offline(int cpu) | |||
349 | raw_spin_lock(&amd_nb_lock); | 360 | raw_spin_lock(&amd_nb_lock); |
350 | 361 | ||
351 | if (cpuhw->amd_nb) { | 362 | if (cpuhw->amd_nb) { |
352 | if (--cpuhw->amd_nb->refcnt == 0) | 363 | struct amd_nb *nb = cpuhw->amd_nb; |
353 | kfree(cpuhw->amd_nb); | 364 | |
365 | if (nb->nb_id == -1 || --nb->refcnt == 0) | ||
366 | kfree(nb); | ||
354 | 367 | ||
355 | cpuhw->amd_nb = NULL; | 368 | cpuhw->amd_nb = NULL; |
356 | } | 369 | } |
@@ -379,8 +392,9 @@ static __initconst struct x86_pmu amd_pmu = { | |||
379 | .get_event_constraints = amd_get_event_constraints, | 392 | .get_event_constraints = amd_get_event_constraints, |
380 | .put_event_constraints = amd_put_event_constraints, | 393 | .put_event_constraints = amd_put_event_constraints, |
381 | 394 | ||
382 | .cpu_prepare = amd_pmu_cpu_online, | 395 | .cpu_prepare = amd_pmu_cpu_prepare, |
383 | .cpu_dead = amd_pmu_cpu_offline, | 396 | .cpu_starting = amd_pmu_cpu_starting, |
397 | .cpu_dead = amd_pmu_cpu_dead, | ||
384 | }; | 398 | }; |
385 | 399 | ||
386 | static __init int amd_pmu_init(void) | 400 | static __init int amd_pmu_init(void) |
diff --git a/arch/x86/kernel/dumpstack.h b/arch/x86/kernel/dumpstack.h index 29e5f7c845b2..e39e77168a37 100644 --- a/arch/x86/kernel/dumpstack.h +++ b/arch/x86/kernel/dumpstack.h | |||
@@ -30,6 +30,11 @@ struct stack_frame { | |||
30 | unsigned long return_address; | 30 | unsigned long return_address; |
31 | }; | 31 | }; |
32 | 32 | ||
33 | struct stack_frame_ia32 { | ||
34 | u32 next_frame; | ||
35 | u32 return_address; | ||
36 | }; | ||
37 | |||
33 | static inline unsigned long rewind_frame_pointer(int n) | 38 | static inline unsigned long rewind_frame_pointer(int n) |
34 | { | 39 | { |
35 | struct stack_frame *frame; | 40 | struct stack_frame *frame; |
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c index bfba6019d762..b2258ca91003 100644 --- a/arch/x86/kernel/kgdb.c +++ b/arch/x86/kernel/kgdb.c | |||
@@ -618,8 +618,8 @@ int kgdb_arch_init(void) | |||
618 | * portion of kgdb because this operation requires mutexs to | 618 | * portion of kgdb because this operation requires mutexs to |
619 | * complete. | 619 | * complete. |
620 | */ | 620 | */ |
621 | hw_breakpoint_init(&attr); | ||
621 | attr.bp_addr = (unsigned long)kgdb_arch_init; | 622 | attr.bp_addr = (unsigned long)kgdb_arch_init; |
622 | attr.type = PERF_TYPE_BREAKPOINT; | ||
623 | attr.bp_len = HW_BREAKPOINT_LEN_1; | 623 | attr.bp_len = HW_BREAKPOINT_LEN_1; |
624 | attr.bp_type = HW_BREAKPOINT_W; | 624 | attr.bp_type = HW_BREAKPOINT_W; |
625 | attr.disabled = 1; | 625 | attr.disabled = 1; |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 06d98ae5a802..6808b934d6c0 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -242,8 +242,6 @@ static void __cpuinit smp_callin(void) | |||
242 | end_local_APIC_setup(); | 242 | end_local_APIC_setup(); |
243 | map_cpu_to_logical_apicid(); | 243 | map_cpu_to_logical_apicid(); |
244 | 244 | ||
245 | notify_cpu_starting(cpuid); | ||
246 | |||
247 | /* | 245 | /* |
248 | * Need to setup vector mappings before we enable interrupts. | 246 | * Need to setup vector mappings before we enable interrupts. |
249 | */ | 247 | */ |
@@ -264,6 +262,8 @@ static void __cpuinit smp_callin(void) | |||
264 | */ | 262 | */ |
265 | smp_store_cpu_info(cpuid); | 263 | smp_store_cpu_info(cpuid); |
266 | 264 | ||
265 | notify_cpu_starting(cpuid); | ||
266 | |||
267 | /* | 267 | /* |
268 | * Allow the master to continue. | 268 | * Allow the master to continue. |
269 | */ | 269 | */ |
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 183f8ff5f400..096273984c3b 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c | |||
@@ -406,6 +406,7 @@ static int show_smap(struct seq_file *m, void *v) | |||
406 | 406 | ||
407 | memset(&mss, 0, sizeof mss); | 407 | memset(&mss, 0, sizeof mss); |
408 | mss.vma = vma; | 408 | mss.vma = vma; |
409 | /* mmap_sem is held in m_start */ | ||
409 | if (vma->vm_mm && !is_vm_hugetlb_page(vma)) | 410 | if (vma->vm_mm && !is_vm_hugetlb_page(vma)) |
410 | walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk); | 411 | walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk); |
411 | 412 | ||
@@ -552,7 +553,8 @@ const struct file_operations proc_clear_refs_operations = { | |||
552 | }; | 553 | }; |
553 | 554 | ||
554 | struct pagemapread { | 555 | struct pagemapread { |
555 | u64 __user *out, *end; | 556 | int pos, len; |
557 | u64 *buffer; | ||
556 | }; | 558 | }; |
557 | 559 | ||
558 | #define PM_ENTRY_BYTES sizeof(u64) | 560 | #define PM_ENTRY_BYTES sizeof(u64) |
@@ -575,10 +577,8 @@ struct pagemapread { | |||
575 | static int add_to_pagemap(unsigned long addr, u64 pfn, | 577 | static int add_to_pagemap(unsigned long addr, u64 pfn, |
576 | struct pagemapread *pm) | 578 | struct pagemapread *pm) |
577 | { | 579 | { |
578 | if (put_user(pfn, pm->out)) | 580 | pm->buffer[pm->pos++] = pfn; |
579 | return -EFAULT; | 581 | if (pm->pos >= pm->len) |
580 | pm->out++; | ||
581 | if (pm->out >= pm->end) | ||
582 | return PM_END_OF_BUFFER; | 582 | return PM_END_OF_BUFFER; |
583 | return 0; | 583 | return 0; |
584 | } | 584 | } |
@@ -720,21 +720,20 @@ static int pagemap_hugetlb_range(pte_t *pte, unsigned long addr, | |||
720 | * determine which areas of memory are actually mapped and llseek to | 720 | * determine which areas of memory are actually mapped and llseek to |
721 | * skip over unmapped regions. | 721 | * skip over unmapped regions. |
722 | */ | 722 | */ |
723 | #define PAGEMAP_WALK_SIZE (PMD_SIZE) | ||
723 | static ssize_t pagemap_read(struct file *file, char __user *buf, | 724 | static ssize_t pagemap_read(struct file *file, char __user *buf, |
724 | size_t count, loff_t *ppos) | 725 | size_t count, loff_t *ppos) |
725 | { | 726 | { |
726 | struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode); | 727 | struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode); |
727 | struct page **pages, *page; | ||
728 | unsigned long uaddr, uend; | ||
729 | struct mm_struct *mm; | 728 | struct mm_struct *mm; |
730 | struct pagemapread pm; | 729 | struct pagemapread pm; |
731 | int pagecount; | ||
732 | int ret = -ESRCH; | 730 | int ret = -ESRCH; |
733 | struct mm_walk pagemap_walk = {}; | 731 | struct mm_walk pagemap_walk = {}; |
734 | unsigned long src; | 732 | unsigned long src; |
735 | unsigned long svpfn; | 733 | unsigned long svpfn; |
736 | unsigned long start_vaddr; | 734 | unsigned long start_vaddr; |
737 | unsigned long end_vaddr; | 735 | unsigned long end_vaddr; |
736 | int copied = 0; | ||
738 | 737 | ||
739 | if (!task) | 738 | if (!task) |
740 | goto out; | 739 | goto out; |
@@ -757,35 +756,12 @@ static ssize_t pagemap_read(struct file *file, char __user *buf, | |||
757 | if (!mm) | 756 | if (!mm) |
758 | goto out_task; | 757 | goto out_task; |
759 | 758 | ||
760 | 759 | pm.len = PM_ENTRY_BYTES * (PAGEMAP_WALK_SIZE >> PAGE_SHIFT); | |
761 | uaddr = (unsigned long)buf & PAGE_MASK; | 760 | pm.buffer = kmalloc(pm.len, GFP_TEMPORARY); |
762 | uend = (unsigned long)(buf + count); | ||
763 | pagecount = (PAGE_ALIGN(uend) - uaddr) / PAGE_SIZE; | ||
764 | ret = 0; | ||
765 | if (pagecount == 0) | ||
766 | goto out_mm; | ||
767 | pages = kcalloc(pagecount, sizeof(struct page *), GFP_KERNEL); | ||
768 | ret = -ENOMEM; | 761 | ret = -ENOMEM; |
769 | if (!pages) | 762 | if (!pm.buffer) |
770 | goto out_mm; | 763 | goto out_mm; |
771 | 764 | ||
772 | down_read(¤t->mm->mmap_sem); | ||
773 | ret = get_user_pages(current, current->mm, uaddr, pagecount, | ||
774 | 1, 0, pages, NULL); | ||
775 | up_read(¤t->mm->mmap_sem); | ||
776 | |||
777 | if (ret < 0) | ||
778 | goto out_free; | ||
779 | |||
780 | if (ret != pagecount) { | ||
781 | pagecount = ret; | ||
782 | ret = -EFAULT; | ||
783 | goto out_pages; | ||
784 | } | ||
785 | |||
786 | pm.out = (u64 __user *)buf; | ||
787 | pm.end = (u64 __user *)(buf + count); | ||
788 | |||
789 | pagemap_walk.pmd_entry = pagemap_pte_range; | 765 | pagemap_walk.pmd_entry = pagemap_pte_range; |
790 | pagemap_walk.pte_hole = pagemap_pte_hole; | 766 | pagemap_walk.pte_hole = pagemap_pte_hole; |
791 | pagemap_walk.hugetlb_entry = pagemap_hugetlb_range; | 767 | pagemap_walk.hugetlb_entry = pagemap_hugetlb_range; |
@@ -807,23 +783,36 @@ static ssize_t pagemap_read(struct file *file, char __user *buf, | |||
807 | * user buffer is tracked in "pm", and the walk | 783 | * user buffer is tracked in "pm", and the walk |
808 | * will stop when we hit the end of the buffer. | 784 | * will stop when we hit the end of the buffer. |
809 | */ | 785 | */ |
810 | ret = walk_page_range(start_vaddr, end_vaddr, &pagemap_walk); | 786 | ret = 0; |
811 | if (ret == PM_END_OF_BUFFER) | 787 | while (count && (start_vaddr < end_vaddr)) { |
812 | ret = 0; | 788 | int len; |
813 | /* don't need mmap_sem for these, but this looks cleaner */ | 789 | unsigned long end; |
814 | *ppos += (char __user *)pm.out - buf; | 790 | |
815 | if (!ret) | 791 | pm.pos = 0; |
816 | ret = (char __user *)pm.out - buf; | 792 | end = start_vaddr + PAGEMAP_WALK_SIZE; |
817 | 793 | /* overflow ? */ | |
818 | out_pages: | 794 | if (end < start_vaddr || end > end_vaddr) |
819 | for (; pagecount; pagecount--) { | 795 | end = end_vaddr; |
820 | page = pages[pagecount-1]; | 796 | down_read(&mm->mmap_sem); |
821 | if (!PageReserved(page)) | 797 | ret = walk_page_range(start_vaddr, end, &pagemap_walk); |
822 | SetPageDirty(page); | 798 | up_read(&mm->mmap_sem); |
823 | page_cache_release(page); | 799 | start_vaddr = end; |
800 | |||
801 | len = min(count, PM_ENTRY_BYTES * pm.pos); | ||
802 | if (copy_to_user(buf, pm.buffer, len) < 0) { | ||
803 | ret = -EFAULT; | ||
804 | goto out_free; | ||
805 | } | ||
806 | copied += len; | ||
807 | buf += len; | ||
808 | count -= len; | ||
824 | } | 809 | } |
810 | *ppos += copied; | ||
811 | if (!ret || ret == PM_END_OF_BUFFER) | ||
812 | ret = copied; | ||
813 | |||
825 | out_free: | 814 | out_free: |
826 | kfree(pages); | 815 | kfree(pm.buffer); |
827 | out_mm: | 816 | out_mm: |
828 | mmput(mm); | 817 | mmput(mm); |
829 | out_task: | 818 | out_task: |
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 95477038a72a..c8e375440403 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h | |||
@@ -842,13 +842,6 @@ extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; | |||
842 | 842 | ||
843 | extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64); | 843 | extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64); |
844 | 844 | ||
845 | static inline void | ||
846 | perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr) | ||
847 | { | ||
848 | if (atomic_read(&perf_swevent_enabled[event_id])) | ||
849 | __perf_sw_event(event_id, nr, nmi, regs, addr); | ||
850 | } | ||
851 | |||
852 | extern void | 845 | extern void |
853 | perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip); | 846 | perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip); |
854 | 847 | ||
@@ -887,6 +880,20 @@ static inline void perf_fetch_caller_regs(struct pt_regs *regs, int skip) | |||
887 | return perf_arch_fetch_caller_regs(regs, ip, skip); | 880 | return perf_arch_fetch_caller_regs(regs, ip, skip); |
888 | } | 881 | } |
889 | 882 | ||
883 | static inline void | ||
884 | perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr) | ||
885 | { | ||
886 | if (atomic_read(&perf_swevent_enabled[event_id])) { | ||
887 | struct pt_regs hot_regs; | ||
888 | |||
889 | if (!regs) { | ||
890 | perf_fetch_caller_regs(&hot_regs, 1); | ||
891 | regs = &hot_regs; | ||
892 | } | ||
893 | __perf_sw_event(event_id, nr, nmi, regs, addr); | ||
894 | } | ||
895 | } | ||
896 | |||
890 | extern void __perf_event_mmap(struct vm_area_struct *vma); | 897 | extern void __perf_event_mmap(struct vm_area_struct *vma); |
891 | 898 | ||
892 | static inline void perf_event_mmap(struct vm_area_struct *vma) | 899 | static inline void perf_event_mmap(struct vm_area_struct *vma) |
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 574ee58a3046..681af806d76b 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
@@ -1164,11 +1164,9 @@ void perf_event_task_sched_out(struct task_struct *task, | |||
1164 | struct perf_event_context *ctx = task->perf_event_ctxp; | 1164 | struct perf_event_context *ctx = task->perf_event_ctxp; |
1165 | struct perf_event_context *next_ctx; | 1165 | struct perf_event_context *next_ctx; |
1166 | struct perf_event_context *parent; | 1166 | struct perf_event_context *parent; |
1167 | struct pt_regs *regs; | ||
1168 | int do_switch = 1; | 1167 | int do_switch = 1; |
1169 | 1168 | ||
1170 | regs = task_pt_regs(task); | 1169 | perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0); |
1171 | perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, regs, 0); | ||
1172 | 1170 | ||
1173 | if (likely(!ctx || !cpuctx->task_ctx)) | 1171 | if (likely(!ctx || !cpuctx->task_ctx)) |
1174 | return; | 1172 | return; |
@@ -2786,12 +2784,11 @@ __weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) | |||
2786 | return NULL; | 2784 | return NULL; |
2787 | } | 2785 | } |
2788 | 2786 | ||
2789 | #ifdef CONFIG_EVENT_TRACING | ||
2790 | __weak | 2787 | __weak |
2791 | void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip) | 2788 | void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip) |
2792 | { | 2789 | { |
2793 | } | 2790 | } |
2794 | #endif | 2791 | |
2795 | 2792 | ||
2796 | /* | 2793 | /* |
2797 | * Output | 2794 | * Output |
@@ -3378,15 +3375,23 @@ static void perf_event_task_output(struct perf_event *event, | |||
3378 | struct perf_task_event *task_event) | 3375 | struct perf_task_event *task_event) |
3379 | { | 3376 | { |
3380 | struct perf_output_handle handle; | 3377 | struct perf_output_handle handle; |
3381 | int size; | ||
3382 | struct task_struct *task = task_event->task; | 3378 | struct task_struct *task = task_event->task; |
3383 | int ret; | 3379 | unsigned long flags; |
3380 | int size, ret; | ||
3381 | |||
3382 | /* | ||
3383 | * If this CPU attempts to acquire an rq lock held by a CPU spinning | ||
3384 | * in perf_output_lock() from interrupt context, it's game over. | ||
3385 | */ | ||
3386 | local_irq_save(flags); | ||
3384 | 3387 | ||
3385 | size = task_event->event_id.header.size; | 3388 | size = task_event->event_id.header.size; |
3386 | ret = perf_output_begin(&handle, event, size, 0, 0); | 3389 | ret = perf_output_begin(&handle, event, size, 0, 0); |
3387 | 3390 | ||
3388 | if (ret) | 3391 | if (ret) { |
3392 | local_irq_restore(flags); | ||
3389 | return; | 3393 | return; |
3394 | } | ||
3390 | 3395 | ||
3391 | task_event->event_id.pid = perf_event_pid(event, task); | 3396 | task_event->event_id.pid = perf_event_pid(event, task); |
3392 | task_event->event_id.ppid = perf_event_pid(event, current); | 3397 | task_event->event_id.ppid = perf_event_pid(event, current); |
@@ -3397,6 +3402,7 @@ static void perf_event_task_output(struct perf_event *event, | |||
3397 | perf_output_put(&handle, task_event->event_id); | 3402 | perf_output_put(&handle, task_event->event_id); |
3398 | 3403 | ||
3399 | perf_output_end(&handle); | 3404 | perf_output_end(&handle); |
3405 | local_irq_restore(flags); | ||
3400 | } | 3406 | } |
3401 | 3407 | ||
3402 | static int perf_event_task_match(struct perf_event *event) | 3408 | static int perf_event_task_match(struct perf_event *event) |
diff --git a/kernel/sched.c b/kernel/sched.c index 49d2fa7b687a..528a10592c16 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -5387,7 +5387,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) | |||
5387 | 5387 | ||
5388 | get_task_struct(mt); | 5388 | get_task_struct(mt); |
5389 | task_rq_unlock(rq, &flags); | 5389 | task_rq_unlock(rq, &flags); |
5390 | wake_up_process(rq->migration_thread); | 5390 | wake_up_process(mt); |
5391 | put_task_struct(mt); | 5391 | put_task_struct(mt); |
5392 | wait_for_completion(&req.done); | 5392 | wait_for_completion(&req.done); |
5393 | tlb_migrate_finish(p->mm); | 5393 | tlb_migrate_finish(p->mm); |
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c index 67f95aada4b9..9b49db144037 100644 --- a/kernel/sched_debug.c +++ b/kernel/sched_debug.c | |||
@@ -518,8 +518,4 @@ void proc_sched_set_task(struct task_struct *p) | |||
518 | p->se.nr_wakeups_idle = 0; | 518 | p->se.nr_wakeups_idle = 0; |
519 | p->sched_info.bkl_count = 0; | 519 | p->sched_info.bkl_count = 0; |
520 | #endif | 520 | #endif |
521 | p->se.sum_exec_runtime = 0; | ||
522 | p->se.prev_sum_exec_runtime = 0; | ||
523 | p->nvcsw = 0; | ||
524 | p->nivcsw = 0; | ||
525 | } | 521 | } |
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index d1187ef20caf..9a0f9bf6a37b 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -1209,18 +1209,19 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages) | |||
1209 | 1209 | ||
1210 | for (i = 0; i < nr_pages; i++) { | 1210 | for (i = 0; i < nr_pages; i++) { |
1211 | if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages))) | 1211 | if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages))) |
1212 | return; | 1212 | goto out; |
1213 | p = cpu_buffer->pages->next; | 1213 | p = cpu_buffer->pages->next; |
1214 | bpage = list_entry(p, struct buffer_page, list); | 1214 | bpage = list_entry(p, struct buffer_page, list); |
1215 | list_del_init(&bpage->list); | 1215 | list_del_init(&bpage->list); |
1216 | free_buffer_page(bpage); | 1216 | free_buffer_page(bpage); |
1217 | } | 1217 | } |
1218 | if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages))) | 1218 | if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages))) |
1219 | return; | 1219 | goto out; |
1220 | 1220 | ||
1221 | rb_reset_cpu(cpu_buffer); | 1221 | rb_reset_cpu(cpu_buffer); |
1222 | rb_check_pages(cpu_buffer); | 1222 | rb_check_pages(cpu_buffer); |
1223 | 1223 | ||
1224 | out: | ||
1224 | spin_unlock_irq(&cpu_buffer->reader_lock); | 1225 | spin_unlock_irq(&cpu_buffer->reader_lock); |
1225 | } | 1226 | } |
1226 | 1227 | ||
@@ -1237,7 +1238,7 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer, | |||
1237 | 1238 | ||
1238 | for (i = 0; i < nr_pages; i++) { | 1239 | for (i = 0; i < nr_pages; i++) { |
1239 | if (RB_WARN_ON(cpu_buffer, list_empty(pages))) | 1240 | if (RB_WARN_ON(cpu_buffer, list_empty(pages))) |
1240 | return; | 1241 | goto out; |
1241 | p = pages->next; | 1242 | p = pages->next; |
1242 | bpage = list_entry(p, struct buffer_page, list); | 1243 | bpage = list_entry(p, struct buffer_page, list); |
1243 | list_del_init(&bpage->list); | 1244 | list_del_init(&bpage->list); |
@@ -1246,6 +1247,7 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer, | |||
1246 | rb_reset_cpu(cpu_buffer); | 1247 | rb_reset_cpu(cpu_buffer); |
1247 | rb_check_pages(cpu_buffer); | 1248 | rb_check_pages(cpu_buffer); |
1248 | 1249 | ||
1250 | out: | ||
1249 | spin_unlock_irq(&cpu_buffer->reader_lock); | 1251 | spin_unlock_irq(&cpu_buffer->reader_lock); |
1250 | } | 1252 | } |
1251 | 1253 | ||
diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c index 6fbfb8f417b9..9d589d8dcd1a 100644 --- a/kernel/trace/trace_clock.c +++ b/kernel/trace/trace_clock.c | |||
@@ -84,7 +84,7 @@ u64 notrace trace_clock_global(void) | |||
84 | int this_cpu; | 84 | int this_cpu; |
85 | u64 now; | 85 | u64 now; |
86 | 86 | ||
87 | raw_local_irq_save(flags); | 87 | local_irq_save(flags); |
88 | 88 | ||
89 | this_cpu = raw_smp_processor_id(); | 89 | this_cpu = raw_smp_processor_id(); |
90 | now = cpu_clock(this_cpu); | 90 | now = cpu_clock(this_cpu); |
@@ -110,7 +110,7 @@ u64 notrace trace_clock_global(void) | |||
110 | arch_spin_unlock(&trace_clock_struct.lock); | 110 | arch_spin_unlock(&trace_clock_struct.lock); |
111 | 111 | ||
112 | out: | 112 | out: |
113 | raw_local_irq_restore(flags); | 113 | local_irq_restore(flags); |
114 | 114 | ||
115 | return now; | 115 | return now; |
116 | } | 116 | } |
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c index 81f691eb3a30..0565bb42566f 100644 --- a/kernel/trace/trace_event_perf.c +++ b/kernel/trace/trace_event_perf.c | |||
@@ -17,7 +17,12 @@ EXPORT_SYMBOL_GPL(perf_arch_fetch_caller_regs); | |||
17 | static char *perf_trace_buf; | 17 | static char *perf_trace_buf; |
18 | static char *perf_trace_buf_nmi; | 18 | static char *perf_trace_buf_nmi; |
19 | 19 | ||
20 | typedef typeof(char [PERF_MAX_TRACE_SIZE]) perf_trace_t ; | 20 | /* |
21 | * Force it to be aligned to unsigned long to avoid misaligned accesses | ||
22 | * suprises | ||
23 | */ | ||
24 | typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)]) | ||
25 | perf_trace_t; | ||
21 | 26 | ||
22 | /* Count the events in use (per event id, not per instance) */ | 27 | /* Count the events in use (per event id, not per instance) */ |
23 | static int total_ref_count; | 28 | static int total_ref_count; |
@@ -130,6 +135,8 @@ __kprobes void *perf_trace_buf_prepare(int size, unsigned short type, | |||
130 | char *trace_buf, *raw_data; | 135 | char *trace_buf, *raw_data; |
131 | int pc, cpu; | 136 | int pc, cpu; |
132 | 137 | ||
138 | BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long)); | ||
139 | |||
133 | pc = preempt_count(); | 140 | pc = preempt_count(); |
134 | 141 | ||
135 | /* Protect the per cpu buffer, begin the rcu read side */ | 142 | /* Protect the per cpu buffer, begin the rcu read side */ |
@@ -152,7 +159,7 @@ __kprobes void *perf_trace_buf_prepare(int size, unsigned short type, | |||
152 | raw_data = per_cpu_ptr(trace_buf, cpu); | 159 | raw_data = per_cpu_ptr(trace_buf, cpu); |
153 | 160 | ||
154 | /* zero the dead bytes from align to not leak stack to user */ | 161 | /* zero the dead bytes from align to not leak stack to user */ |
155 | *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; | 162 | memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64)); |
156 | 163 | ||
157 | entry = (struct trace_entry *)raw_data; | 164 | entry = (struct trace_entry *)raw_data; |
158 | tracing_generic_entry_update(entry, *irq_flags, pc); | 165 | tracing_generic_entry_update(entry, *irq_flags, pc); |
diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 8a8f52db7e38..bc0f670a8338 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile | |||
@@ -200,7 +200,7 @@ endif | |||
200 | 200 | ||
201 | CFLAGS = -ggdb3 -Wall -Wextra -std=gnu99 -Werror $(CFLAGS_OPTIMIZE) -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS) $(EXTRA_CFLAGS) | 201 | CFLAGS = -ggdb3 -Wall -Wextra -std=gnu99 -Werror $(CFLAGS_OPTIMIZE) -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS) $(EXTRA_CFLAGS) |
202 | EXTLIBS = -lpthread -lrt -lelf -lm | 202 | EXTLIBS = -lpthread -lrt -lelf -lm |
203 | ALL_CFLAGS = $(CFLAGS) | 203 | ALL_CFLAGS = $(CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 |
204 | ALL_LDFLAGS = $(LDFLAGS) | 204 | ALL_LDFLAGS = $(LDFLAGS) |
205 | STRIP ?= strip | 205 | STRIP ?= strip |
206 | 206 | ||
@@ -492,19 +492,19 @@ ifeq ($(uname_S),Darwin) | |||
492 | PTHREAD_LIBS = | 492 | PTHREAD_LIBS = |
493 | endif | 493 | endif |
494 | 494 | ||
495 | ifeq ($(shell sh -c "(echo '\#include <libelf.h>'; echo 'int main(void) { Elf * elf = elf_begin(0, ELF_C_READ, 0); return (long)elf; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y) | 495 | ifeq ($(shell sh -c "(echo '\#include <libelf.h>'; echo 'int main(void) { Elf * elf = elf_begin(0, ELF_C_READ, 0); return (long)elf; }') | $(CC) -x c - $(ALL_CFLAGS) -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y) |
496 | ifneq ($(shell sh -c "(echo '\#include <gnu/libc-version.h>'; echo 'int main(void) { const char * version = gnu_get_libc_version(); return (long)version; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y) | 496 | ifneq ($(shell sh -c "(echo '\#include <gnu/libc-version.h>'; echo 'int main(void) { const char * version = gnu_get_libc_version(); return (long)version; }') | $(CC) -x c - $(ALL_CFLAGS) -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y) |
497 | msg := $(error No gnu/libc-version.h found, please install glibc-dev[el]/glibc-static); | 497 | msg := $(error No gnu/libc-version.h found, please install glibc-dev[el]/glibc-static); |
498 | endif | 498 | endif |
499 | 499 | ||
500 | ifneq ($(shell sh -c "(echo '\#include <libelf.h>'; echo 'int main(void) { Elf * elf = elf_begin(0, ELF_C_READ_MMAP, 0); return (long)elf; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y) | 500 | ifneq ($(shell sh -c "(echo '\#include <libelf.h>'; echo 'int main(void) { Elf * elf = elf_begin(0, ELF_C_READ_MMAP, 0); return (long)elf; }') | $(CC) -x c - $(ALL_CFLAGS) -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y) |
501 | BASIC_CFLAGS += -DLIBELF_NO_MMAP | 501 | BASIC_CFLAGS += -DLIBELF_NO_MMAP |
502 | endif | 502 | endif |
503 | else | 503 | else |
504 | msg := $(error No libelf.h/libelf found, please install libelf-dev/elfutils-libelf-devel and glibc-dev[el]); | 504 | msg := $(error No libelf.h/libelf found, please install libelf-dev/elfutils-libelf-devel and glibc-dev[el]); |
505 | endif | 505 | endif |
506 | 506 | ||
507 | ifneq ($(shell sh -c "(echo '\#include <dwarf.h>'; echo '\#include <libdw.h>'; echo 'int main(void) { Dwarf *dbg; dbg = dwarf_begin(0, DWARF_C_READ); return (long)dbg; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -I/usr/include/elfutils -ldw -lelf -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y) | 507 | ifneq ($(shell sh -c "(echo '\#include <dwarf.h>'; echo '\#include <libdw.h>'; echo 'int main(void) { Dwarf *dbg; dbg = dwarf_begin(0, DWARF_C_READ); return (long)dbg; }') | $(CC) -x c - $(ALL_CFLAGS) -I/usr/include/elfutils -ldw -lelf -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y) |
508 | msg := $(warning No libdw.h found or old libdw.h found, disables dwarf support. Please install elfutils-devel/elfutils-dev); | 508 | msg := $(warning No libdw.h found or old libdw.h found, disables dwarf support. Please install elfutils-devel/elfutils-dev); |
509 | BASIC_CFLAGS += -DNO_DWARF_SUPPORT | 509 | BASIC_CFLAGS += -DNO_DWARF_SUPPORT |
510 | else | 510 | else |
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c index 33a414bbba3e..6a72f14c5986 100644 --- a/tools/perf/util/scripting-engines/trace-event-python.c +++ b/tools/perf/util/scripting-engines/trace-event-python.c | |||
@@ -208,7 +208,7 @@ static void python_process_event(int cpu, void *data, | |||
208 | int size __unused, | 208 | int size __unused, |
209 | unsigned long long nsecs, char *comm) | 209 | unsigned long long nsecs, char *comm) |
210 | { | 210 | { |
211 | PyObject *handler, *retval, *context, *t; | 211 | PyObject *handler, *retval, *context, *t, *obj; |
212 | static char handler_name[256]; | 212 | static char handler_name[256]; |
213 | struct format_field *field; | 213 | struct format_field *field; |
214 | unsigned long long val; | 214 | unsigned long long val; |
@@ -256,16 +256,23 @@ static void python_process_event(int cpu, void *data, | |||
256 | offset &= 0xffff; | 256 | offset &= 0xffff; |
257 | } else | 257 | } else |
258 | offset = field->offset; | 258 | offset = field->offset; |
259 | PyTuple_SetItem(t, n++, | 259 | obj = PyString_FromString((char *)data + offset); |
260 | PyString_FromString((char *)data + offset)); | ||
261 | } else { /* FIELD_IS_NUMERIC */ | 260 | } else { /* FIELD_IS_NUMERIC */ |
262 | val = read_size(data + field->offset, field->size); | 261 | val = read_size(data + field->offset, field->size); |
263 | if (field->flags & FIELD_IS_SIGNED) { | 262 | if (field->flags & FIELD_IS_SIGNED) { |
264 | PyTuple_SetItem(t, n++, PyInt_FromLong(val)); | 263 | if ((long long)val >= LONG_MIN && |
264 | (long long)val <= LONG_MAX) | ||
265 | obj = PyInt_FromLong(val); | ||
266 | else | ||
267 | obj = PyLong_FromLongLong(val); | ||
265 | } else { | 268 | } else { |
266 | PyTuple_SetItem(t, n++, PyInt_FromLong(val)); | 269 | if (val <= LONG_MAX) |
270 | obj = PyInt_FromLong(val); | ||
271 | else | ||
272 | obj = PyLong_FromUnsignedLongLong(val); | ||
267 | } | 273 | } |
268 | } | 274 | } |
275 | PyTuple_SetItem(t, n++, obj); | ||
269 | } | 276 | } |
270 | 277 | ||
271 | if (_PyTuple_Resize(&t, n) == -1) | 278 | if (_PyTuple_Resize(&t, n) == -1) |