aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/perf_counter.c312
-rw-r--r--kernel/sched.c3
2 files changed, 180 insertions, 135 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 29b685f551aa..1a933a221ea4 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -124,7 +124,7 @@ void perf_enable(void)
124 124
125static void get_ctx(struct perf_counter_context *ctx) 125static void get_ctx(struct perf_counter_context *ctx)
126{ 126{
127 atomic_inc(&ctx->refcount); 127 WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
128} 128}
129 129
130static void free_ctx(struct rcu_head *head) 130static void free_ctx(struct rcu_head *head)
@@ -175,6 +175,11 @@ perf_lock_task_context(struct task_struct *task, unsigned long *flags)
175 spin_unlock_irqrestore(&ctx->lock, *flags); 175 spin_unlock_irqrestore(&ctx->lock, *flags);
176 goto retry; 176 goto retry;
177 } 177 }
178
179 if (!atomic_inc_not_zero(&ctx->refcount)) {
180 spin_unlock_irqrestore(&ctx->lock, *flags);
181 ctx = NULL;
182 }
178 } 183 }
179 rcu_read_unlock(); 184 rcu_read_unlock();
180 return ctx; 185 return ctx;
@@ -193,7 +198,6 @@ static struct perf_counter_context *perf_pin_task_context(struct task_struct *ta
193 ctx = perf_lock_task_context(task, &flags); 198 ctx = perf_lock_task_context(task, &flags);
194 if (ctx) { 199 if (ctx) {
195 ++ctx->pin_count; 200 ++ctx->pin_count;
196 get_ctx(ctx);
197 spin_unlock_irqrestore(&ctx->lock, flags); 201 spin_unlock_irqrestore(&ctx->lock, flags);
198 } 202 }
199 return ctx; 203 return ctx;
@@ -1283,7 +1287,7 @@ static void perf_ctx_adjust_freq(struct perf_counter_context *ctx)
1283 if (!interrupts) { 1287 if (!interrupts) {
1284 perf_disable(); 1288 perf_disable();
1285 counter->pmu->disable(counter); 1289 counter->pmu->disable(counter);
1286 atomic_set(&hwc->period_left, 0); 1290 atomic64_set(&hwc->period_left, 0);
1287 counter->pmu->enable(counter); 1291 counter->pmu->enable(counter);
1288 perf_enable(); 1292 perf_enable();
1289 } 1293 }
@@ -1459,11 +1463,6 @@ static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
1459 put_ctx(parent_ctx); 1463 put_ctx(parent_ctx);
1460 ctx->parent_ctx = NULL; /* no longer a clone */ 1464 ctx->parent_ctx = NULL; /* no longer a clone */
1461 } 1465 }
1462 /*
1463 * Get an extra reference before dropping the lock so that
1464 * this context won't get freed if the task exits.
1465 */
1466 get_ctx(ctx);
1467 spin_unlock_irqrestore(&ctx->lock, flags); 1466 spin_unlock_irqrestore(&ctx->lock, flags);
1468 } 1467 }
1469 1468
@@ -1553,7 +1552,7 @@ static int perf_release(struct inode *inode, struct file *file)
1553static ssize_t 1552static ssize_t
1554perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count) 1553perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
1555{ 1554{
1556 u64 values[3]; 1555 u64 values[4];
1557 int n; 1556 int n;
1558 1557
1559 /* 1558 /*
@@ -1620,22 +1619,6 @@ static void perf_counter_reset(struct perf_counter *counter)
1620 perf_counter_update_userpage(counter); 1619 perf_counter_update_userpage(counter);
1621} 1620}
1622 1621
1623static void perf_counter_for_each_sibling(struct perf_counter *counter,
1624 void (*func)(struct perf_counter *))
1625{
1626 struct perf_counter_context *ctx = counter->ctx;
1627 struct perf_counter *sibling;
1628
1629 WARN_ON_ONCE(ctx->parent_ctx);
1630 mutex_lock(&ctx->mutex);
1631 counter = counter->group_leader;
1632
1633 func(counter);
1634 list_for_each_entry(sibling, &counter->sibling_list, list_entry)
1635 func(sibling);
1636 mutex_unlock(&ctx->mutex);
1637}
1638
1639/* 1622/*
1640 * Holding the top-level counter's child_mutex means that any 1623 * Holding the top-level counter's child_mutex means that any
1641 * descendant process that has inherited this counter will block 1624 * descendant process that has inherited this counter will block
@@ -1658,14 +1641,18 @@ static void perf_counter_for_each_child(struct perf_counter *counter,
1658static void perf_counter_for_each(struct perf_counter *counter, 1641static void perf_counter_for_each(struct perf_counter *counter,
1659 void (*func)(struct perf_counter *)) 1642 void (*func)(struct perf_counter *))
1660{ 1643{
1661 struct perf_counter *child; 1644 struct perf_counter_context *ctx = counter->ctx;
1645 struct perf_counter *sibling;
1662 1646
1663 WARN_ON_ONCE(counter->ctx->parent_ctx); 1647 WARN_ON_ONCE(ctx->parent_ctx);
1664 mutex_lock(&counter->child_mutex); 1648 mutex_lock(&ctx->mutex);
1665 perf_counter_for_each_sibling(counter, func); 1649 counter = counter->group_leader;
1666 list_for_each_entry(child, &counter->child_list, child_list) 1650
1667 perf_counter_for_each_sibling(child, func); 1651 perf_counter_for_each_child(counter, func);
1668 mutex_unlock(&counter->child_mutex); 1652 func(counter);
1653 list_for_each_entry(sibling, &counter->sibling_list, list_entry)
1654 perf_counter_for_each_child(counter, func);
1655 mutex_unlock(&ctx->mutex);
1669} 1656}
1670 1657
1671static int perf_counter_period(struct perf_counter *counter, u64 __user *arg) 1658static int perf_counter_period(struct perf_counter *counter, u64 __user *arg)
@@ -1806,6 +1793,12 @@ static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1806 struct perf_mmap_data *data; 1793 struct perf_mmap_data *data;
1807 int ret = VM_FAULT_SIGBUS; 1794 int ret = VM_FAULT_SIGBUS;
1808 1795
1796 if (vmf->flags & FAULT_FLAG_MKWRITE) {
1797 if (vmf->pgoff == 0)
1798 ret = 0;
1799 return ret;
1800 }
1801
1809 rcu_read_lock(); 1802 rcu_read_lock();
1810 data = rcu_dereference(counter->data); 1803 data = rcu_dereference(counter->data);
1811 if (!data) 1804 if (!data)
@@ -1819,9 +1812,16 @@ static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1819 if ((unsigned)nr > data->nr_pages) 1812 if ((unsigned)nr > data->nr_pages)
1820 goto unlock; 1813 goto unlock;
1821 1814
1815 if (vmf->flags & FAULT_FLAG_WRITE)
1816 goto unlock;
1817
1822 vmf->page = virt_to_page(data->data_pages[nr]); 1818 vmf->page = virt_to_page(data->data_pages[nr]);
1823 } 1819 }
1820
1824 get_page(vmf->page); 1821 get_page(vmf->page);
1822 vmf->page->mapping = vma->vm_file->f_mapping;
1823 vmf->page->index = vmf->pgoff;
1824
1825 ret = 0; 1825 ret = 0;
1826unlock: 1826unlock:
1827 rcu_read_unlock(); 1827 rcu_read_unlock();
@@ -1874,6 +1874,14 @@ fail:
1874 return -ENOMEM; 1874 return -ENOMEM;
1875} 1875}
1876 1876
1877static void perf_mmap_free_page(unsigned long addr)
1878{
1879 struct page *page = virt_to_page(addr);
1880
1881 page->mapping = NULL;
1882 __free_page(page);
1883}
1884
1877static void __perf_mmap_data_free(struct rcu_head *rcu_head) 1885static void __perf_mmap_data_free(struct rcu_head *rcu_head)
1878{ 1886{
1879 struct perf_mmap_data *data; 1887 struct perf_mmap_data *data;
@@ -1881,9 +1889,10 @@ static void __perf_mmap_data_free(struct rcu_head *rcu_head)
1881 1889
1882 data = container_of(rcu_head, struct perf_mmap_data, rcu_head); 1890 data = container_of(rcu_head, struct perf_mmap_data, rcu_head);
1883 1891
1884 free_page((unsigned long)data->user_page); 1892 perf_mmap_free_page((unsigned long)data->user_page);
1885 for (i = 0; i < data->nr_pages; i++) 1893 for (i = 0; i < data->nr_pages; i++)
1886 free_page((unsigned long)data->data_pages[i]); 1894 perf_mmap_free_page((unsigned long)data->data_pages[i]);
1895
1887 kfree(data); 1896 kfree(data);
1888} 1897}
1889 1898
@@ -1920,9 +1929,10 @@ static void perf_mmap_close(struct vm_area_struct *vma)
1920} 1929}
1921 1930
1922static struct vm_operations_struct perf_mmap_vmops = { 1931static struct vm_operations_struct perf_mmap_vmops = {
1923 .open = perf_mmap_open, 1932 .open = perf_mmap_open,
1924 .close = perf_mmap_close, 1933 .close = perf_mmap_close,
1925 .fault = perf_mmap_fault, 1934 .fault = perf_mmap_fault,
1935 .page_mkwrite = perf_mmap_fault,
1926}; 1936};
1927 1937
1928static int perf_mmap(struct file *file, struct vm_area_struct *vma) 1938static int perf_mmap(struct file *file, struct vm_area_struct *vma)
@@ -1936,7 +1946,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
1936 long user_extra, extra; 1946 long user_extra, extra;
1937 int ret = 0; 1947 int ret = 0;
1938 1948
1939 if (!(vma->vm_flags & VM_SHARED) || (vma->vm_flags & VM_WRITE)) 1949 if (!(vma->vm_flags & VM_SHARED))
1940 return -EINVAL; 1950 return -EINVAL;
1941 1951
1942 vma_size = vma->vm_end - vma->vm_start; 1952 vma_size = vma->vm_end - vma->vm_start;
@@ -1995,10 +2005,12 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
1995 atomic_long_add(user_extra, &user->locked_vm); 2005 atomic_long_add(user_extra, &user->locked_vm);
1996 vma->vm_mm->locked_vm += extra; 2006 vma->vm_mm->locked_vm += extra;
1997 counter->data->nr_locked = extra; 2007 counter->data->nr_locked = extra;
2008 if (vma->vm_flags & VM_WRITE)
2009 counter->data->writable = 1;
2010
1998unlock: 2011unlock:
1999 mutex_unlock(&counter->mmap_mutex); 2012 mutex_unlock(&counter->mmap_mutex);
2000 2013
2001 vma->vm_flags &= ~VM_MAYWRITE;
2002 vma->vm_flags |= VM_RESERVED; 2014 vma->vm_flags |= VM_RESERVED;
2003 vma->vm_ops = &perf_mmap_vmops; 2015 vma->vm_ops = &perf_mmap_vmops;
2004 2016
@@ -2175,11 +2187,38 @@ struct perf_output_handle {
2175 unsigned long head; 2187 unsigned long head;
2176 unsigned long offset; 2188 unsigned long offset;
2177 int nmi; 2189 int nmi;
2178 int overflow; 2190 int sample;
2179 int locked; 2191 int locked;
2180 unsigned long flags; 2192 unsigned long flags;
2181}; 2193};
2182 2194
2195static bool perf_output_space(struct perf_mmap_data *data,
2196 unsigned int offset, unsigned int head)
2197{
2198 unsigned long tail;
2199 unsigned long mask;
2200
2201 if (!data->writable)
2202 return true;
2203
2204 mask = (data->nr_pages << PAGE_SHIFT) - 1;
2205 /*
2206 * Userspace could choose to issue a mb() before updating the tail
2207 * pointer. So that all reads will be completed before the write is
2208 * issued.
2209 */
2210 tail = ACCESS_ONCE(data->user_page->data_tail);
2211 smp_rmb();
2212
2213 offset = (offset - tail) & mask;
2214 head = (head - tail) & mask;
2215
2216 if ((int)(head - offset) < 0)
2217 return false;
2218
2219 return true;
2220}
2221
2183static void perf_output_wakeup(struct perf_output_handle *handle) 2222static void perf_output_wakeup(struct perf_output_handle *handle)
2184{ 2223{
2185 atomic_set(&handle->data->poll, POLL_IN); 2224 atomic_set(&handle->data->poll, POLL_IN);
@@ -2270,12 +2309,57 @@ out:
2270 local_irq_restore(handle->flags); 2309 local_irq_restore(handle->flags);
2271} 2310}
2272 2311
2312static void perf_output_copy(struct perf_output_handle *handle,
2313 const void *buf, unsigned int len)
2314{
2315 unsigned int pages_mask;
2316 unsigned int offset;
2317 unsigned int size;
2318 void **pages;
2319
2320 offset = handle->offset;
2321 pages_mask = handle->data->nr_pages - 1;
2322 pages = handle->data->data_pages;
2323
2324 do {
2325 unsigned int page_offset;
2326 int nr;
2327
2328 nr = (offset >> PAGE_SHIFT) & pages_mask;
2329 page_offset = offset & (PAGE_SIZE - 1);
2330 size = min_t(unsigned int, PAGE_SIZE - page_offset, len);
2331
2332 memcpy(pages[nr] + page_offset, buf, size);
2333
2334 len -= size;
2335 buf += size;
2336 offset += size;
2337 } while (len);
2338
2339 handle->offset = offset;
2340
2341 /*
2342 * Check we didn't copy past our reservation window, taking the
2343 * possible unsigned int wrap into account.
2344 */
2345 WARN_ON_ONCE(((long)(handle->head - handle->offset)) < 0);
2346}
2347
2348#define perf_output_put(handle, x) \
2349 perf_output_copy((handle), &(x), sizeof(x))
2350
2273static int perf_output_begin(struct perf_output_handle *handle, 2351static int perf_output_begin(struct perf_output_handle *handle,
2274 struct perf_counter *counter, unsigned int size, 2352 struct perf_counter *counter, unsigned int size,
2275 int nmi, int overflow) 2353 int nmi, int sample)
2276{ 2354{
2277 struct perf_mmap_data *data; 2355 struct perf_mmap_data *data;
2278 unsigned int offset, head; 2356 unsigned int offset, head;
2357 int have_lost;
2358 struct {
2359 struct perf_event_header header;
2360 u64 id;
2361 u64 lost;
2362 } lost_event;
2279 2363
2280 /* 2364 /*
2281 * For inherited counters we send all the output towards the parent. 2365 * For inherited counters we send all the output towards the parent.
@@ -2288,19 +2372,25 @@ static int perf_output_begin(struct perf_output_handle *handle,
2288 if (!data) 2372 if (!data)
2289 goto out; 2373 goto out;
2290 2374
2291 handle->data = data; 2375 handle->data = data;
2292 handle->counter = counter; 2376 handle->counter = counter;
2293 handle->nmi = nmi; 2377 handle->nmi = nmi;
2294 handle->overflow = overflow; 2378 handle->sample = sample;
2295 2379
2296 if (!data->nr_pages) 2380 if (!data->nr_pages)
2297 goto fail; 2381 goto fail;
2298 2382
2383 have_lost = atomic_read(&data->lost);
2384 if (have_lost)
2385 size += sizeof(lost_event);
2386
2299 perf_output_lock(handle); 2387 perf_output_lock(handle);
2300 2388
2301 do { 2389 do {
2302 offset = head = atomic_long_read(&data->head); 2390 offset = head = atomic_long_read(&data->head);
2303 head += size; 2391 head += size;
2392 if (unlikely(!perf_output_space(data, offset, head)))
2393 goto fail;
2304 } while (atomic_long_cmpxchg(&data->head, offset, head) != offset); 2394 } while (atomic_long_cmpxchg(&data->head, offset, head) != offset);
2305 2395
2306 handle->offset = offset; 2396 handle->offset = offset;
@@ -2309,55 +2399,27 @@ static int perf_output_begin(struct perf_output_handle *handle,
2309 if ((offset >> PAGE_SHIFT) != (head >> PAGE_SHIFT)) 2399 if ((offset >> PAGE_SHIFT) != (head >> PAGE_SHIFT))
2310 atomic_set(&data->wakeup, 1); 2400 atomic_set(&data->wakeup, 1);
2311 2401
2402 if (have_lost) {
2403 lost_event.header.type = PERF_EVENT_LOST;
2404 lost_event.header.misc = 0;
2405 lost_event.header.size = sizeof(lost_event);
2406 lost_event.id = counter->id;
2407 lost_event.lost = atomic_xchg(&data->lost, 0);
2408
2409 perf_output_put(handle, lost_event);
2410 }
2411
2312 return 0; 2412 return 0;
2313 2413
2314fail: 2414fail:
2315 perf_output_wakeup(handle); 2415 atomic_inc(&data->lost);
2416 perf_output_unlock(handle);
2316out: 2417out:
2317 rcu_read_unlock(); 2418 rcu_read_unlock();
2318 2419
2319 return -ENOSPC; 2420 return -ENOSPC;
2320} 2421}
2321 2422
2322static void perf_output_copy(struct perf_output_handle *handle,
2323 const void *buf, unsigned int len)
2324{
2325 unsigned int pages_mask;
2326 unsigned int offset;
2327 unsigned int size;
2328 void **pages;
2329
2330 offset = handle->offset;
2331 pages_mask = handle->data->nr_pages - 1;
2332 pages = handle->data->data_pages;
2333
2334 do {
2335 unsigned int page_offset;
2336 int nr;
2337
2338 nr = (offset >> PAGE_SHIFT) & pages_mask;
2339 page_offset = offset & (PAGE_SIZE - 1);
2340 size = min_t(unsigned int, PAGE_SIZE - page_offset, len);
2341
2342 memcpy(pages[nr] + page_offset, buf, size);
2343
2344 len -= size;
2345 buf += size;
2346 offset += size;
2347 } while (len);
2348
2349 handle->offset = offset;
2350
2351 /*
2352 * Check we didn't copy past our reservation window, taking the
2353 * possible unsigned int wrap into account.
2354 */
2355 WARN_ON_ONCE(((long)(handle->head - handle->offset)) < 0);
2356}
2357
2358#define perf_output_put(handle, x) \
2359 perf_output_copy((handle), &(x), sizeof(x))
2360
2361static void perf_output_end(struct perf_output_handle *handle) 2423static void perf_output_end(struct perf_output_handle *handle)
2362{ 2424{
2363 struct perf_counter *counter = handle->counter; 2425 struct perf_counter *counter = handle->counter;
@@ -2365,7 +2427,7 @@ static void perf_output_end(struct perf_output_handle *handle)
2365 2427
2366 int wakeup_events = counter->attr.wakeup_events; 2428 int wakeup_events = counter->attr.wakeup_events;
2367 2429
2368 if (handle->overflow && wakeup_events) { 2430 if (handle->sample && wakeup_events) {
2369 int events = atomic_inc_return(&data->events); 2431 int events = atomic_inc_return(&data->events);
2370 if (events >= wakeup_events) { 2432 if (events >= wakeup_events) {
2371 atomic_sub(wakeup_events, &data->events); 2433 atomic_sub(wakeup_events, &data->events);
@@ -2970,7 +3032,7 @@ static void perf_log_throttle(struct perf_counter *counter, int enable)
2970} 3032}
2971 3033
2972/* 3034/*
2973 * Generic counter overflow handling. 3035 * Generic counter overflow handling, sampling.
2974 */ 3036 */
2975 3037
2976int perf_counter_overflow(struct perf_counter *counter, int nmi, 3038int perf_counter_overflow(struct perf_counter *counter, int nmi,
@@ -3109,20 +3171,15 @@ static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
3109} 3171}
3110 3172
3111static void perf_swcounter_overflow(struct perf_counter *counter, 3173static void perf_swcounter_overflow(struct perf_counter *counter,
3112 int nmi, struct pt_regs *regs, u64 addr) 3174 int nmi, struct perf_sample_data *data)
3113{ 3175{
3114 struct perf_sample_data data = { 3176 data->period = counter->hw.last_period;
3115 .regs = regs,
3116 .addr = addr,
3117 .period = counter->hw.last_period,
3118 };
3119 3177
3120 perf_swcounter_update(counter); 3178 perf_swcounter_update(counter);
3121 perf_swcounter_set_period(counter); 3179 perf_swcounter_set_period(counter);
3122 if (perf_counter_overflow(counter, nmi, &data)) 3180 if (perf_counter_overflow(counter, nmi, data))
3123 /* soft-disable the counter */ 3181 /* soft-disable the counter */
3124 ; 3182 ;
3125
3126} 3183}
3127 3184
3128static int perf_swcounter_is_counting(struct perf_counter *counter) 3185static int perf_swcounter_is_counting(struct perf_counter *counter)
@@ -3187,18 +3244,18 @@ static int perf_swcounter_match(struct perf_counter *counter,
3187} 3244}
3188 3245
3189static void perf_swcounter_add(struct perf_counter *counter, u64 nr, 3246static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
3190 int nmi, struct pt_regs *regs, u64 addr) 3247 int nmi, struct perf_sample_data *data)
3191{ 3248{
3192 int neg = atomic64_add_negative(nr, &counter->hw.count); 3249 int neg = atomic64_add_negative(nr, &counter->hw.count);
3193 3250
3194 if (counter->hw.sample_period && !neg && regs) 3251 if (counter->hw.sample_period && !neg && data->regs)
3195 perf_swcounter_overflow(counter, nmi, regs, addr); 3252 perf_swcounter_overflow(counter, nmi, data);
3196} 3253}
3197 3254
3198static void perf_swcounter_ctx_event(struct perf_counter_context *ctx, 3255static void perf_swcounter_ctx_event(struct perf_counter_context *ctx,
3199 enum perf_type_id type, u32 event, 3256 enum perf_type_id type,
3200 u64 nr, int nmi, struct pt_regs *regs, 3257 u32 event, u64 nr, int nmi,
3201 u64 addr) 3258 struct perf_sample_data *data)
3202{ 3259{
3203 struct perf_counter *counter; 3260 struct perf_counter *counter;
3204 3261
@@ -3207,8 +3264,8 @@ static void perf_swcounter_ctx_event(struct perf_counter_context *ctx,
3207 3264
3208 rcu_read_lock(); 3265 rcu_read_lock();
3209 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) { 3266 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
3210 if (perf_swcounter_match(counter, type, event, regs)) 3267 if (perf_swcounter_match(counter, type, event, data->regs))
3211 perf_swcounter_add(counter, nr, nmi, regs, addr); 3268 perf_swcounter_add(counter, nr, nmi, data);
3212 } 3269 }
3213 rcu_read_unlock(); 3270 rcu_read_unlock();
3214} 3271}
@@ -3227,9 +3284,9 @@ static int *perf_swcounter_recursion_context(struct perf_cpu_context *cpuctx)
3227 return &cpuctx->recursion[0]; 3284 return &cpuctx->recursion[0];
3228} 3285}
3229 3286
3230static void __perf_swcounter_event(enum perf_type_id type, u32 event, 3287static void do_perf_swcounter_event(enum perf_type_id type, u32 event,
3231 u64 nr, int nmi, struct pt_regs *regs, 3288 u64 nr, int nmi,
3232 u64 addr) 3289 struct perf_sample_data *data)
3233{ 3290{
3234 struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context); 3291 struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context);
3235 int *recursion = perf_swcounter_recursion_context(cpuctx); 3292 int *recursion = perf_swcounter_recursion_context(cpuctx);
@@ -3242,7 +3299,7 @@ static void __perf_swcounter_event(enum perf_type_id type, u32 event,
3242 barrier(); 3299 barrier();
3243 3300
3244 perf_swcounter_ctx_event(&cpuctx->ctx, type, event, 3301 perf_swcounter_ctx_event(&cpuctx->ctx, type, event,
3245 nr, nmi, regs, addr); 3302 nr, nmi, data);
3246 rcu_read_lock(); 3303 rcu_read_lock();
3247 /* 3304 /*
3248 * doesn't really matter which of the child contexts the 3305 * doesn't really matter which of the child contexts the
@@ -3250,7 +3307,7 @@ static void __perf_swcounter_event(enum perf_type_id type, u32 event,
3250 */ 3307 */
3251 ctx = rcu_dereference(current->perf_counter_ctxp); 3308 ctx = rcu_dereference(current->perf_counter_ctxp);
3252 if (ctx) 3309 if (ctx)
3253 perf_swcounter_ctx_event(ctx, type, event, nr, nmi, regs, addr); 3310 perf_swcounter_ctx_event(ctx, type, event, nr, nmi, data);
3254 rcu_read_unlock(); 3311 rcu_read_unlock();
3255 3312
3256 barrier(); 3313 barrier();
@@ -3263,7 +3320,12 @@ out:
3263void 3320void
3264perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs, u64 addr) 3321perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
3265{ 3322{
3266 __perf_swcounter_event(PERF_TYPE_SOFTWARE, event, nr, nmi, regs, addr); 3323 struct perf_sample_data data = {
3324 .regs = regs,
3325 .addr = addr,
3326 };
3327
3328 do_perf_swcounter_event(PERF_TYPE_SOFTWARE, event, nr, nmi, &data);
3267} 3329}
3268 3330
3269static void perf_swcounter_read(struct perf_counter *counter) 3331static void perf_swcounter_read(struct perf_counter *counter)
@@ -3404,36 +3466,18 @@ static const struct pmu perf_ops_task_clock = {
3404 .read = task_clock_perf_counter_read, 3466 .read = task_clock_perf_counter_read,
3405}; 3467};
3406 3468
3407/*
3408 * Software counter: cpu migrations
3409 */
3410void perf_counter_task_migration(struct task_struct *task, int cpu)
3411{
3412 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
3413 struct perf_counter_context *ctx;
3414
3415 perf_swcounter_ctx_event(&cpuctx->ctx, PERF_TYPE_SOFTWARE,
3416 PERF_COUNT_SW_CPU_MIGRATIONS,
3417 1, 1, NULL, 0);
3418
3419 ctx = perf_pin_task_context(task);
3420 if (ctx) {
3421 perf_swcounter_ctx_event(ctx, PERF_TYPE_SOFTWARE,
3422 PERF_COUNT_SW_CPU_MIGRATIONS,
3423 1, 1, NULL, 0);
3424 perf_unpin_context(ctx);
3425 }
3426}
3427
3428#ifdef CONFIG_EVENT_PROFILE 3469#ifdef CONFIG_EVENT_PROFILE
3429void perf_tpcounter_event(int event_id) 3470void perf_tpcounter_event(int event_id)
3430{ 3471{
3431 struct pt_regs *regs = get_irq_regs(); 3472 struct perf_sample_data data = {
3473 .regs = get_irq_regs();
3474 .addr = 0,
3475 };
3432 3476
3433 if (!regs) 3477 if (!data.regs)
3434 regs = task_pt_regs(current); 3478 data.regs = task_pt_regs(current);
3435 3479
3436 __perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, 1, 1, regs, 0); 3480 do_perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, 1, 1, &data);
3437} 3481}
3438EXPORT_SYMBOL_GPL(perf_tpcounter_event); 3482EXPORT_SYMBOL_GPL(perf_tpcounter_event);
3439 3483
diff --git a/kernel/sched.c b/kernel/sched.c
index 92e51287b980..7c9098d186e6 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1978,7 +1978,8 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
1978 if (task_hot(p, old_rq->clock, NULL)) 1978 if (task_hot(p, old_rq->clock, NULL))
1979 schedstat_inc(p, se.nr_forced2_migrations); 1979 schedstat_inc(p, se.nr_forced2_migrations);
1980#endif 1980#endif
1981 perf_counter_task_migration(p, new_cpu); 1981 perf_swcounter_event(PERF_COUNT_SW_CPU_MIGRATIONS,
1982 1, 1, NULL, 0);
1982 } 1983 }
1983 p->se.vruntime -= old_cfsrq->min_vruntime - 1984 p->se.vruntime -= old_cfsrq->min_vruntime -
1984 new_cfsrq->min_vruntime; 1985 new_cfsrq->min_vruntime;