aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu/perf_event.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/cpu/perf_event.c')
-rw-r--r--arch/x86/kernel/cpu/perf_event.c36
1 files changed, 32 insertions, 4 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 2bf79d7c97df..1b443db2db50 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -482,6 +482,9 @@ int x86_pmu_hw_config(struct perf_event *event)
482 /* Support for IP fixup */ 482 /* Support for IP fixup */
483 if (x86_pmu.lbr_nr || x86_pmu.intel_cap.pebs_format >= 2) 483 if (x86_pmu.lbr_nr || x86_pmu.intel_cap.pebs_format >= 2)
484 precise++; 484 precise++;
485
486 if (x86_pmu.pebs_prec_dist)
487 precise++;
485 } 488 }
486 489
487 if (event->attr.precise_ip > precise) 490 if (event->attr.precise_ip > precise)
@@ -1531,6 +1534,7 @@ static void __init filter_events(struct attribute **attrs)
1531{ 1534{
1532 struct device_attribute *d; 1535 struct device_attribute *d;
1533 struct perf_pmu_events_attr *pmu_attr; 1536 struct perf_pmu_events_attr *pmu_attr;
1537 int offset = 0;
1534 int i, j; 1538 int i, j;
1535 1539
1536 for (i = 0; attrs[i]; i++) { 1540 for (i = 0; attrs[i]; i++) {
@@ -1539,7 +1543,7 @@ static void __init filter_events(struct attribute **attrs)
1539 /* str trumps id */ 1543 /* str trumps id */
1540 if (pmu_attr->event_str) 1544 if (pmu_attr->event_str)
1541 continue; 1545 continue;
1542 if (x86_pmu.event_map(i)) 1546 if (x86_pmu.event_map(i + offset))
1543 continue; 1547 continue;
1544 1548
1545 for (j = i; attrs[j]; j++) 1549 for (j = i; attrs[j]; j++)
@@ -1547,6 +1551,14 @@ static void __init filter_events(struct attribute **attrs)
1547 1551
1548 /* Check the shifted attr. */ 1552 /* Check the shifted attr. */
1549 i--; 1553 i--;
1554
1555 /*
1556 * event_map() is index based, the attrs array is organized
1557 * by increasing event index. If we shift the events, then
1558 * we need to compensate for the event_map(), otherwise
1559 * we are looking up the wrong event in the map
1560 */
1561 offset++;
1550 } 1562 }
1551} 1563}
1552 1564
@@ -2250,12 +2262,19 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
2250 ss_base = get_segment_base(regs->ss); 2262 ss_base = get_segment_base(regs->ss);
2251 2263
2252 fp = compat_ptr(ss_base + regs->bp); 2264 fp = compat_ptr(ss_base + regs->bp);
2265 pagefault_disable();
2253 while (entry->nr < PERF_MAX_STACK_DEPTH) { 2266 while (entry->nr < PERF_MAX_STACK_DEPTH) {
2254 unsigned long bytes; 2267 unsigned long bytes;
2255 frame.next_frame = 0; 2268 frame.next_frame = 0;
2256 frame.return_address = 0; 2269 frame.return_address = 0;
2257 2270
2258 bytes = copy_from_user_nmi(&frame, fp, sizeof(frame)); 2271 if (!access_ok(VERIFY_READ, fp, 8))
2272 break;
2273
2274 bytes = __copy_from_user_nmi(&frame.next_frame, fp, 4);
2275 if (bytes != 0)
2276 break;
2277 bytes = __copy_from_user_nmi(&frame.return_address, fp+4, 4);
2259 if (bytes != 0) 2278 if (bytes != 0)
2260 break; 2279 break;
2261 2280
@@ -2265,6 +2284,7 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
2265 perf_callchain_store(entry, cs_base + frame.return_address); 2284 perf_callchain_store(entry, cs_base + frame.return_address);
2266 fp = compat_ptr(ss_base + frame.next_frame); 2285 fp = compat_ptr(ss_base + frame.next_frame);
2267 } 2286 }
2287 pagefault_enable();
2268 return 1; 2288 return 1;
2269} 2289}
2270#else 2290#else
@@ -2302,12 +2322,19 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
2302 if (perf_callchain_user32(regs, entry)) 2322 if (perf_callchain_user32(regs, entry))
2303 return; 2323 return;
2304 2324
2325 pagefault_disable();
2305 while (entry->nr < PERF_MAX_STACK_DEPTH) { 2326 while (entry->nr < PERF_MAX_STACK_DEPTH) {
2306 unsigned long bytes; 2327 unsigned long bytes;
2307 frame.next_frame = NULL; 2328 frame.next_frame = NULL;
2308 frame.return_address = 0; 2329 frame.return_address = 0;
2309 2330
2310 bytes = copy_from_user_nmi(&frame, fp, sizeof(frame)); 2331 if (!access_ok(VERIFY_READ, fp, 16))
2332 break;
2333
2334 bytes = __copy_from_user_nmi(&frame.next_frame, fp, 8);
2335 if (bytes != 0)
2336 break;
2337 bytes = __copy_from_user_nmi(&frame.return_address, fp+8, 8);
2311 if (bytes != 0) 2338 if (bytes != 0)
2312 break; 2339 break;
2313 2340
@@ -2315,8 +2342,9 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
2315 break; 2342 break;
2316 2343
2317 perf_callchain_store(entry, frame.return_address); 2344 perf_callchain_store(entry, frame.return_address);
2318 fp = frame.next_frame; 2345 fp = (void __user *)frame.next_frame;
2319 } 2346 }
2347 pagefault_enable();
2320} 2348}
2321 2349
2322/* 2350/*