aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/Makefile3
-rw-r--r--kernel/audit.c18
-rw-r--r--kernel/audit_watch.c2
-rw-r--r--kernel/auditsc.c6
-rw-r--r--kernel/cgroup.c1128
-rw-r--r--kernel/cgroup_debug.c105
-rw-r--r--kernel/cgroup_freezer.c15
-rw-r--r--kernel/cpuset.c66
-rw-r--r--kernel/cred.c19
-rw-r--r--kernel/exit.c164
-rw-r--r--kernel/fork.c75
-rw-r--r--kernel/futex.c6
-rw-r--r--kernel/gcov/Kconfig2
-rw-r--r--kernel/hrtimer.c95
-rw-r--r--kernel/hung_task.c4
-rw-r--r--kernel/itimer.c169
-rw-r--r--kernel/kallsyms.c3
-rw-r--r--kernel/kprobes.c6
-rw-r--r--kernel/lockdep.c3
-rw-r--r--kernel/lockdep_proc.c2
-rw-r--r--kernel/module.c166
-rw-r--r--kernel/ns_cgroup.c16
-rw-r--r--kernel/panic.c2
-rw-r--r--kernel/params.c7
-rw-r--r--kernel/perf_event.c (renamed from kernel/perf_counter.c)2685
-rw-r--r--kernel/pid.c15
-rw-r--r--kernel/pid_namespace.c2
-rw-r--r--kernel/posix-cpu-timers.c155
-rw-r--r--kernel/power/process.c1
-rw-r--r--kernel/power/snapshot.c2
-rw-r--r--kernel/power/swap.c1
-rw-r--r--kernel/printk.c27
-rw-r--r--kernel/ptrace.c11
-rw-r--r--kernel/relay.c2
-rw-r--r--kernel/res_counter.c3
-rw-r--r--kernel/resource.c23
-rw-r--r--kernel/sched.c97
-rw-r--r--kernel/sched_clock.c4
-rw-r--r--kernel/sched_fair.c4
-rw-r--r--kernel/signal.c168
-rw-r--r--kernel/slow-work.c12
-rw-r--r--kernel/smp.c36
-rw-r--r--kernel/softlockup.c4
-rw-r--r--kernel/sys.c46
-rw-r--r--kernel/sys_ni.c3
-rw-r--r--kernel/sysctl.c149
-rw-r--r--kernel/time/Makefile2
-rw-r--r--kernel/time/clocksource.c4
-rw-r--r--kernel/time/tick-sched.c9
-rw-r--r--kernel/time/timeconv.c127
-rw-r--r--kernel/time/timer_list.c2
-rw-r--r--kernel/time/timer_stats.c2
-rw-r--r--kernel/timer.c36
-rw-r--r--kernel/trace/Kconfig2
-rw-r--r--kernel/trace/blktrace.c39
-rw-r--r--kernel/trace/ftrace.c58
-rw-r--r--kernel/trace/kmemtrace.c2
-rw-r--r--kernel/trace/trace.c13
-rw-r--r--kernel/trace/trace_branch.c8
-rw-r--r--kernel/trace/trace_event_profile.c15
-rw-r--r--kernel/trace/trace_events.c7
-rw-r--r--kernel/trace/trace_hw_branches.c10
-rw-r--r--kernel/trace/trace_output.c18
-rw-r--r--kernel/trace/trace_stack.c4
-rw-r--r--kernel/trace/trace_syscalls.c10
-rw-r--r--kernel/tracepoint.c2
-rw-r--r--kernel/uid16.c1
-rw-r--r--kernel/utsname_sysctl.c4
68 files changed, 3538 insertions, 2369 deletions
diff --git a/kernel/Makefile b/kernel/Makefile
index 7c9b0a585502..b8d4cd8ac0b9 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -58,7 +58,6 @@ obj-$(CONFIG_KEXEC) += kexec.o
58obj-$(CONFIG_BACKTRACE_SELF_TEST) += backtracetest.o 58obj-$(CONFIG_BACKTRACE_SELF_TEST) += backtracetest.o
59obj-$(CONFIG_COMPAT) += compat.o 59obj-$(CONFIG_COMPAT) += compat.o
60obj-$(CONFIG_CGROUPS) += cgroup.o 60obj-$(CONFIG_CGROUPS) += cgroup.o
61obj-$(CONFIG_CGROUP_DEBUG) += cgroup_debug.o
62obj-$(CONFIG_CGROUP_FREEZER) += cgroup_freezer.o 61obj-$(CONFIG_CGROUP_FREEZER) += cgroup_freezer.o
63obj-$(CONFIG_CPUSETS) += cpuset.o 62obj-$(CONFIG_CPUSETS) += cpuset.o
64obj-$(CONFIG_CGROUP_NS) += ns_cgroup.o 63obj-$(CONFIG_CGROUP_NS) += ns_cgroup.o
@@ -95,7 +94,7 @@ obj-$(CONFIG_X86_DS) += trace/
95obj-$(CONFIG_RING_BUFFER) += trace/ 94obj-$(CONFIG_RING_BUFFER) += trace/
96obj-$(CONFIG_SMP) += sched_cpupri.o 95obj-$(CONFIG_SMP) += sched_cpupri.o
97obj-$(CONFIG_SLOW_WORK) += slow-work.o 96obj-$(CONFIG_SLOW_WORK) += slow-work.o
98obj-$(CONFIG_PERF_COUNTERS) += perf_counter.o 97obj-$(CONFIG_PERF_EVENTS) += perf_event.o
99 98
100ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y) 99ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y)
101# According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is 100# According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is
diff --git a/kernel/audit.c b/kernel/audit.c
index defc2e6f1e3b..5feed232be9d 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -855,18 +855,24 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
855 break; 855 break;
856 } 856 }
857 case AUDIT_SIGNAL_INFO: 857 case AUDIT_SIGNAL_INFO:
858 err = security_secid_to_secctx(audit_sig_sid, &ctx, &len); 858 len = 0;
859 if (err) 859 if (audit_sig_sid) {
860 return err; 860 err = security_secid_to_secctx(audit_sig_sid, &ctx, &len);
861 if (err)
862 return err;
863 }
861 sig_data = kmalloc(sizeof(*sig_data) + len, GFP_KERNEL); 864 sig_data = kmalloc(sizeof(*sig_data) + len, GFP_KERNEL);
862 if (!sig_data) { 865 if (!sig_data) {
863 security_release_secctx(ctx, len); 866 if (audit_sig_sid)
867 security_release_secctx(ctx, len);
864 return -ENOMEM; 868 return -ENOMEM;
865 } 869 }
866 sig_data->uid = audit_sig_uid; 870 sig_data->uid = audit_sig_uid;
867 sig_data->pid = audit_sig_pid; 871 sig_data->pid = audit_sig_pid;
868 memcpy(sig_data->ctx, ctx, len); 872 if (audit_sig_sid) {
869 security_release_secctx(ctx, len); 873 memcpy(sig_data->ctx, ctx, len);
874 security_release_secctx(ctx, len);
875 }
870 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_SIGNAL_INFO, 876 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_SIGNAL_INFO,
871 0, 0, sig_data, sizeof(*sig_data) + len); 877 0, 0, sig_data, sizeof(*sig_data) + len);
872 kfree(sig_data); 878 kfree(sig_data);
diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c
index 0e96dbc60ea9..cc7e87936cbc 100644
--- a/kernel/audit_watch.c
+++ b/kernel/audit_watch.c
@@ -45,8 +45,8 @@
45 45
46struct audit_watch { 46struct audit_watch {
47 atomic_t count; /* reference count */ 47 atomic_t count; /* reference count */
48 char *path; /* insertion path */
49 dev_t dev; /* associated superblock device */ 48 dev_t dev; /* associated superblock device */
49 char *path; /* insertion path */
50 unsigned long ino; /* associated inode number */ 50 unsigned long ino; /* associated inode number */
51 struct audit_parent *parent; /* associated parent */ 51 struct audit_parent *parent; /* associated parent */
52 struct list_head wlist; /* entry in parent->watches list */ 52 struct list_head wlist; /* entry in parent->watches list */
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index 68d3c6a0ecd6..267e484f0198 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -168,12 +168,12 @@ struct audit_context {
168 int in_syscall; /* 1 if task is in a syscall */ 168 int in_syscall; /* 1 if task is in a syscall */
169 enum audit_state state, current_state; 169 enum audit_state state, current_state;
170 unsigned int serial; /* serial number for record */ 170 unsigned int serial; /* serial number for record */
171 struct timespec ctime; /* time of syscall entry */
172 int major; /* syscall number */ 171 int major; /* syscall number */
172 struct timespec ctime; /* time of syscall entry */
173 unsigned long argv[4]; /* syscall arguments */ 173 unsigned long argv[4]; /* syscall arguments */
174 int return_valid; /* return code is valid */
175 long return_code;/* syscall return code */ 174 long return_code;/* syscall return code */
176 u64 prio; 175 u64 prio;
176 int return_valid; /* return code is valid */
177 int name_count; 177 int name_count;
178 struct audit_names names[AUDIT_NAMES]; 178 struct audit_names names[AUDIT_NAMES];
179 char * filterkey; /* key for rule that triggered record */ 179 char * filterkey; /* key for rule that triggered record */
@@ -198,8 +198,8 @@ struct audit_context {
198 char target_comm[TASK_COMM_LEN]; 198 char target_comm[TASK_COMM_LEN];
199 199
200 struct audit_tree_refs *trees, *first_trees; 200 struct audit_tree_refs *trees, *first_trees;
201 int tree_count;
202 struct list_head killed_trees; 201 struct list_head killed_trees;
202 int tree_count;
203 203
204 int type; 204 int type;
205 union { 205 union {
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index c7ece8f027f2..ca83b73fba19 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -23,6 +23,7 @@
23 */ 23 */
24 24
25#include <linux/cgroup.h> 25#include <linux/cgroup.h>
26#include <linux/ctype.h>
26#include <linux/errno.h> 27#include <linux/errno.h>
27#include <linux/fs.h> 28#include <linux/fs.h>
28#include <linux/kernel.h> 29#include <linux/kernel.h>
@@ -48,6 +49,8 @@
48#include <linux/namei.h> 49#include <linux/namei.h>
49#include <linux/smp_lock.h> 50#include <linux/smp_lock.h>
50#include <linux/pid_namespace.h> 51#include <linux/pid_namespace.h>
52#include <linux/idr.h>
53#include <linux/vmalloc.h> /* TODO: replace with more sophisticated array */
51 54
52#include <asm/atomic.h> 55#include <asm/atomic.h>
53 56
@@ -60,6 +63,8 @@ static struct cgroup_subsys *subsys[] = {
60#include <linux/cgroup_subsys.h> 63#include <linux/cgroup_subsys.h>
61}; 64};
62 65
66#define MAX_CGROUP_ROOT_NAMELEN 64
67
63/* 68/*
64 * A cgroupfs_root represents the root of a cgroup hierarchy, 69 * A cgroupfs_root represents the root of a cgroup hierarchy,
65 * and may be associated with a superblock to form an active 70 * and may be associated with a superblock to form an active
@@ -74,6 +79,9 @@ struct cgroupfs_root {
74 */ 79 */
75 unsigned long subsys_bits; 80 unsigned long subsys_bits;
76 81
82 /* Unique id for this hierarchy. */
83 int hierarchy_id;
84
77 /* The bitmask of subsystems currently attached to this hierarchy */ 85 /* The bitmask of subsystems currently attached to this hierarchy */
78 unsigned long actual_subsys_bits; 86 unsigned long actual_subsys_bits;
79 87
@@ -94,6 +102,9 @@ struct cgroupfs_root {
94 102
95 /* The path to use for release notifications. */ 103 /* The path to use for release notifications. */
96 char release_agent_path[PATH_MAX]; 104 char release_agent_path[PATH_MAX];
105
106 /* The name for this hierarchy - may be empty */
107 char name[MAX_CGROUP_ROOT_NAMELEN];
97}; 108};
98 109
99/* 110/*
@@ -141,6 +152,10 @@ struct css_id {
141static LIST_HEAD(roots); 152static LIST_HEAD(roots);
142static int root_count; 153static int root_count;
143 154
155static DEFINE_IDA(hierarchy_ida);
156static int next_hierarchy_id;
157static DEFINE_SPINLOCK(hierarchy_id_lock);
158
144/* dummytop is a shorthand for the dummy hierarchy's top cgroup */ 159/* dummytop is a shorthand for the dummy hierarchy's top cgroup */
145#define dummytop (&rootnode.top_cgroup) 160#define dummytop (&rootnode.top_cgroup)
146 161
@@ -201,6 +216,7 @@ struct cg_cgroup_link {
201 * cgroup, anchored on cgroup->css_sets 216 * cgroup, anchored on cgroup->css_sets
202 */ 217 */
203 struct list_head cgrp_link_list; 218 struct list_head cgrp_link_list;
219 struct cgroup *cgrp;
204 /* 220 /*
205 * List running through cg_cgroup_links pointing at a 221 * List running through cg_cgroup_links pointing at a
206 * single css_set object, anchored on css_set->cg_links 222 * single css_set object, anchored on css_set->cg_links
@@ -227,8 +243,11 @@ static int cgroup_subsys_init_idr(struct cgroup_subsys *ss);
227static DEFINE_RWLOCK(css_set_lock); 243static DEFINE_RWLOCK(css_set_lock);
228static int css_set_count; 244static int css_set_count;
229 245
230/* hash table for cgroup groups. This improves the performance to 246/*
231 * find an existing css_set */ 247 * hash table for cgroup groups. This improves the performance to find
248 * an existing css_set. This hash doesn't (currently) take into
249 * account cgroups in empty hierarchies.
250 */
232#define CSS_SET_HASH_BITS 7 251#define CSS_SET_HASH_BITS 7
233#define CSS_SET_TABLE_SIZE (1 << CSS_SET_HASH_BITS) 252#define CSS_SET_TABLE_SIZE (1 << CSS_SET_HASH_BITS)
234static struct hlist_head css_set_table[CSS_SET_TABLE_SIZE]; 253static struct hlist_head css_set_table[CSS_SET_TABLE_SIZE];
@@ -248,48 +267,22 @@ static struct hlist_head *css_set_hash(struct cgroup_subsys_state *css[])
248 return &css_set_table[index]; 267 return &css_set_table[index];
249} 268}
250 269
270static void free_css_set_rcu(struct rcu_head *obj)
271{
272 struct css_set *cg = container_of(obj, struct css_set, rcu_head);
273 kfree(cg);
274}
275
251/* We don't maintain the lists running through each css_set to its 276/* We don't maintain the lists running through each css_set to its
252 * task until after the first call to cgroup_iter_start(). This 277 * task until after the first call to cgroup_iter_start(). This
253 * reduces the fork()/exit() overhead for people who have cgroups 278 * reduces the fork()/exit() overhead for people who have cgroups
254 * compiled into their kernel but not actually in use */ 279 * compiled into their kernel but not actually in use */
255static int use_task_css_set_links __read_mostly; 280static int use_task_css_set_links __read_mostly;
256 281
257/* When we create or destroy a css_set, the operation simply 282static void __put_css_set(struct css_set *cg, int taskexit)
258 * takes/releases a reference count on all the cgroups referenced
259 * by subsystems in this css_set. This can end up multiple-counting
260 * some cgroups, but that's OK - the ref-count is just a
261 * busy/not-busy indicator; ensuring that we only count each cgroup
262 * once would require taking a global lock to ensure that no
263 * subsystems moved between hierarchies while we were doing so.
264 *
265 * Possible TODO: decide at boot time based on the number of
266 * registered subsystems and the number of CPUs or NUMA nodes whether
267 * it's better for performance to ref-count every subsystem, or to
268 * take a global lock and only add one ref count to each hierarchy.
269 */
270
271/*
272 * unlink a css_set from the list and free it
273 */
274static void unlink_css_set(struct css_set *cg)
275{ 283{
276 struct cg_cgroup_link *link; 284 struct cg_cgroup_link *link;
277 struct cg_cgroup_link *saved_link; 285 struct cg_cgroup_link *saved_link;
278
279 hlist_del(&cg->hlist);
280 css_set_count--;
281
282 list_for_each_entry_safe(link, saved_link, &cg->cg_links,
283 cg_link_list) {
284 list_del(&link->cg_link_list);
285 list_del(&link->cgrp_link_list);
286 kfree(link);
287 }
288}
289
290static void __put_css_set(struct css_set *cg, int taskexit)
291{
292 int i;
293 /* 286 /*
294 * Ensure that the refcount doesn't hit zero while any readers 287 * Ensure that the refcount doesn't hit zero while any readers
295 * can see it. Similar to atomic_dec_and_lock(), but for an 288 * can see it. Similar to atomic_dec_and_lock(), but for an
@@ -302,21 +295,28 @@ static void __put_css_set(struct css_set *cg, int taskexit)
302 write_unlock(&css_set_lock); 295 write_unlock(&css_set_lock);
303 return; 296 return;
304 } 297 }
305 unlink_css_set(cg);
306 write_unlock(&css_set_lock);
307 298
308 rcu_read_lock(); 299 /* This css_set is dead. unlink it and release cgroup refcounts */
309 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { 300 hlist_del(&cg->hlist);
310 struct cgroup *cgrp = rcu_dereference(cg->subsys[i]->cgroup); 301 css_set_count--;
302
303 list_for_each_entry_safe(link, saved_link, &cg->cg_links,
304 cg_link_list) {
305 struct cgroup *cgrp = link->cgrp;
306 list_del(&link->cg_link_list);
307 list_del(&link->cgrp_link_list);
311 if (atomic_dec_and_test(&cgrp->count) && 308 if (atomic_dec_and_test(&cgrp->count) &&
312 notify_on_release(cgrp)) { 309 notify_on_release(cgrp)) {
313 if (taskexit) 310 if (taskexit)
314 set_bit(CGRP_RELEASABLE, &cgrp->flags); 311 set_bit(CGRP_RELEASABLE, &cgrp->flags);
315 check_for_release(cgrp); 312 check_for_release(cgrp);
316 } 313 }
314
315 kfree(link);
317 } 316 }
318 rcu_read_unlock(); 317
319 kfree(cg); 318 write_unlock(&css_set_lock);
319 call_rcu(&cg->rcu_head, free_css_set_rcu);
320} 320}
321 321
322/* 322/*
@@ -338,6 +338,78 @@ static inline void put_css_set_taskexit(struct css_set *cg)
338} 338}
339 339
340/* 340/*
341 * compare_css_sets - helper function for find_existing_css_set().
342 * @cg: candidate css_set being tested
343 * @old_cg: existing css_set for a task
344 * @new_cgrp: cgroup that's being entered by the task
345 * @template: desired set of css pointers in css_set (pre-calculated)
346 *
347 * Returns true if "cg" matches "old_cg" except for the hierarchy
348 * which "new_cgrp" belongs to, for which it should match "new_cgrp".
349 */
350static bool compare_css_sets(struct css_set *cg,
351 struct css_set *old_cg,
352 struct cgroup *new_cgrp,
353 struct cgroup_subsys_state *template[])
354{
355 struct list_head *l1, *l2;
356
357 if (memcmp(template, cg->subsys, sizeof(cg->subsys))) {
358 /* Not all subsystems matched */
359 return false;
360 }
361
362 /*
363 * Compare cgroup pointers in order to distinguish between
364 * different cgroups in heirarchies with no subsystems. We
365 * could get by with just this check alone (and skip the
366 * memcmp above) but on most setups the memcmp check will
367 * avoid the need for this more expensive check on almost all
368 * candidates.
369 */
370
371 l1 = &cg->cg_links;
372 l2 = &old_cg->cg_links;
373 while (1) {
374 struct cg_cgroup_link *cgl1, *cgl2;
375 struct cgroup *cg1, *cg2;
376
377 l1 = l1->next;
378 l2 = l2->next;
379 /* See if we reached the end - both lists are equal length. */
380 if (l1 == &cg->cg_links) {
381 BUG_ON(l2 != &old_cg->cg_links);
382 break;
383 } else {
384 BUG_ON(l2 == &old_cg->cg_links);
385 }
386 /* Locate the cgroups associated with these links. */
387 cgl1 = list_entry(l1, struct cg_cgroup_link, cg_link_list);
388 cgl2 = list_entry(l2, struct cg_cgroup_link, cg_link_list);
389 cg1 = cgl1->cgrp;
390 cg2 = cgl2->cgrp;
391 /* Hierarchies should be linked in the same order. */
392 BUG_ON(cg1->root != cg2->root);
393
394 /*
395 * If this hierarchy is the hierarchy of the cgroup
396 * that's changing, then we need to check that this
397 * css_set points to the new cgroup; if it's any other
398 * hierarchy, then this css_set should point to the
399 * same cgroup as the old css_set.
400 */
401 if (cg1->root == new_cgrp->root) {
402 if (cg1 != new_cgrp)
403 return false;
404 } else {
405 if (cg1 != cg2)
406 return false;
407 }
408 }
409 return true;
410}
411
412/*
341 * find_existing_css_set() is a helper for 413 * find_existing_css_set() is a helper for
342 * find_css_set(), and checks to see whether an existing 414 * find_css_set(), and checks to see whether an existing
343 * css_set is suitable. 415 * css_set is suitable.
@@ -378,10 +450,11 @@ static struct css_set *find_existing_css_set(
378 450
379 hhead = css_set_hash(template); 451 hhead = css_set_hash(template);
380 hlist_for_each_entry(cg, node, hhead, hlist) { 452 hlist_for_each_entry(cg, node, hhead, hlist) {
381 if (!memcmp(template, cg->subsys, sizeof(cg->subsys))) { 453 if (!compare_css_sets(cg, oldcg, cgrp, template))
382 /* All subsystems matched */ 454 continue;
383 return cg; 455
384 } 456 /* This css_set matches what we need */
457 return cg;
385 } 458 }
386 459
387 /* No existing cgroup group matched */ 460 /* No existing cgroup group matched */
@@ -435,8 +508,14 @@ static void link_css_set(struct list_head *tmp_cg_links,
435 link = list_first_entry(tmp_cg_links, struct cg_cgroup_link, 508 link = list_first_entry(tmp_cg_links, struct cg_cgroup_link,
436 cgrp_link_list); 509 cgrp_link_list);
437 link->cg = cg; 510 link->cg = cg;
511 link->cgrp = cgrp;
512 atomic_inc(&cgrp->count);
438 list_move(&link->cgrp_link_list, &cgrp->css_sets); 513 list_move(&link->cgrp_link_list, &cgrp->css_sets);
439 list_add(&link->cg_link_list, &cg->cg_links); 514 /*
515 * Always add links to the tail of the list so that the list
516 * is sorted by order of hierarchy creation
517 */
518 list_add_tail(&link->cg_link_list, &cg->cg_links);
440} 519}
441 520
442/* 521/*
@@ -451,11 +530,11 @@ static struct css_set *find_css_set(
451{ 530{
452 struct css_set *res; 531 struct css_set *res;
453 struct cgroup_subsys_state *template[CGROUP_SUBSYS_COUNT]; 532 struct cgroup_subsys_state *template[CGROUP_SUBSYS_COUNT];
454 int i;
455 533
456 struct list_head tmp_cg_links; 534 struct list_head tmp_cg_links;
457 535
458 struct hlist_head *hhead; 536 struct hlist_head *hhead;
537 struct cg_cgroup_link *link;
459 538
460 /* First see if we already have a cgroup group that matches 539 /* First see if we already have a cgroup group that matches
461 * the desired set */ 540 * the desired set */
@@ -489,20 +568,12 @@ static struct css_set *find_css_set(
489 568
490 write_lock(&css_set_lock); 569 write_lock(&css_set_lock);
491 /* Add reference counts and links from the new css_set. */ 570 /* Add reference counts and links from the new css_set. */
492 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { 571 list_for_each_entry(link, &oldcg->cg_links, cg_link_list) {
493 struct cgroup *cgrp = res->subsys[i]->cgroup; 572 struct cgroup *c = link->cgrp;
494 struct cgroup_subsys *ss = subsys[i]; 573 if (c->root == cgrp->root)
495 atomic_inc(&cgrp->count); 574 c = cgrp;
496 /* 575 link_css_set(&tmp_cg_links, res, c);
497 * We want to add a link once per cgroup, so we
498 * only do it for the first subsystem in each
499 * hierarchy
500 */
501 if (ss->root->subsys_list.next == &ss->sibling)
502 link_css_set(&tmp_cg_links, res, cgrp);
503 } 576 }
504 if (list_empty(&rootnode.subsys_list))
505 link_css_set(&tmp_cg_links, res, dummytop);
506 577
507 BUG_ON(!list_empty(&tmp_cg_links)); 578 BUG_ON(!list_empty(&tmp_cg_links));
508 579
@@ -518,6 +589,41 @@ static struct css_set *find_css_set(
518} 589}
519 590
520/* 591/*
592 * Return the cgroup for "task" from the given hierarchy. Must be
593 * called with cgroup_mutex held.
594 */
595static struct cgroup *task_cgroup_from_root(struct task_struct *task,
596 struct cgroupfs_root *root)
597{
598 struct css_set *css;
599 struct cgroup *res = NULL;
600
601 BUG_ON(!mutex_is_locked(&cgroup_mutex));
602 read_lock(&css_set_lock);
603 /*
604 * No need to lock the task - since we hold cgroup_mutex the
605 * task can't change groups, so the only thing that can happen
606 * is that it exits and its css is set back to init_css_set.
607 */
608 css = task->cgroups;
609 if (css == &init_css_set) {
610 res = &root->top_cgroup;
611 } else {
612 struct cg_cgroup_link *link;
613 list_for_each_entry(link, &css->cg_links, cg_link_list) {
614 struct cgroup *c = link->cgrp;
615 if (c->root == root) {
616 res = c;
617 break;
618 }
619 }
620 }
621 read_unlock(&css_set_lock);
622 BUG_ON(!res);
623 return res;
624}
625
626/*
521 * There is one global cgroup mutex. We also require taking 627 * There is one global cgroup mutex. We also require taking
522 * task_lock() when dereferencing a task's cgroup subsys pointers. 628 * task_lock() when dereferencing a task's cgroup subsys pointers.
523 * See "The task_lock() exception", at the end of this comment. 629 * See "The task_lock() exception", at the end of this comment.
@@ -596,8 +702,8 @@ void cgroup_unlock(void)
596static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, int mode); 702static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, int mode);
597static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry); 703static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry);
598static int cgroup_populate_dir(struct cgroup *cgrp); 704static int cgroup_populate_dir(struct cgroup *cgrp);
599static struct inode_operations cgroup_dir_inode_operations; 705static const struct inode_operations cgroup_dir_inode_operations;
600static struct file_operations proc_cgroupstats_operations; 706static const struct file_operations proc_cgroupstats_operations;
601 707
602static struct backing_dev_info cgroup_backing_dev_info = { 708static struct backing_dev_info cgroup_backing_dev_info = {
603 .name = "cgroup", 709 .name = "cgroup",
@@ -677,6 +783,12 @@ static void cgroup_diput(struct dentry *dentry, struct inode *inode)
677 */ 783 */
678 deactivate_super(cgrp->root->sb); 784 deactivate_super(cgrp->root->sb);
679 785
786 /*
787 * if we're getting rid of the cgroup, refcount should ensure
788 * that there are no pidlists left.
789 */
790 BUG_ON(!list_empty(&cgrp->pidlists));
791
680 call_rcu(&cgrp->rcu_head, free_cgroup_rcu); 792 call_rcu(&cgrp->rcu_head, free_cgroup_rcu);
681 } 793 }
682 iput(inode); 794 iput(inode);
@@ -841,6 +953,8 @@ static int cgroup_show_options(struct seq_file *seq, struct vfsmount *vfs)
841 seq_puts(seq, ",noprefix"); 953 seq_puts(seq, ",noprefix");
842 if (strlen(root->release_agent_path)) 954 if (strlen(root->release_agent_path))
843 seq_printf(seq, ",release_agent=%s", root->release_agent_path); 955 seq_printf(seq, ",release_agent=%s", root->release_agent_path);
956 if (strlen(root->name))
957 seq_printf(seq, ",name=%s", root->name);
844 mutex_unlock(&cgroup_mutex); 958 mutex_unlock(&cgroup_mutex);
845 return 0; 959 return 0;
846} 960}
@@ -849,6 +963,12 @@ struct cgroup_sb_opts {
849 unsigned long subsys_bits; 963 unsigned long subsys_bits;
850 unsigned long flags; 964 unsigned long flags;
851 char *release_agent; 965 char *release_agent;
966 char *name;
967 /* User explicitly requested empty subsystem */
968 bool none;
969
970 struct cgroupfs_root *new_root;
971
852}; 972};
853 973
854/* Convert a hierarchy specifier into a bitmask of subsystems and 974/* Convert a hierarchy specifier into a bitmask of subsystems and
@@ -863,9 +983,7 @@ static int parse_cgroupfs_options(char *data,
863 mask = ~(1UL << cpuset_subsys_id); 983 mask = ~(1UL << cpuset_subsys_id);
864#endif 984#endif
865 985
866 opts->subsys_bits = 0; 986 memset(opts, 0, sizeof(*opts));
867 opts->flags = 0;
868 opts->release_agent = NULL;
869 987
870 while ((token = strsep(&o, ",")) != NULL) { 988 while ((token = strsep(&o, ",")) != NULL) {
871 if (!*token) 989 if (!*token)
@@ -879,17 +997,42 @@ static int parse_cgroupfs_options(char *data,
879 if (!ss->disabled) 997 if (!ss->disabled)
880 opts->subsys_bits |= 1ul << i; 998 opts->subsys_bits |= 1ul << i;
881 } 999 }
1000 } else if (!strcmp(token, "none")) {
1001 /* Explicitly have no subsystems */
1002 opts->none = true;
882 } else if (!strcmp(token, "noprefix")) { 1003 } else if (!strcmp(token, "noprefix")) {
883 set_bit(ROOT_NOPREFIX, &opts->flags); 1004 set_bit(ROOT_NOPREFIX, &opts->flags);
884 } else if (!strncmp(token, "release_agent=", 14)) { 1005 } else if (!strncmp(token, "release_agent=", 14)) {
885 /* Specifying two release agents is forbidden */ 1006 /* Specifying two release agents is forbidden */
886 if (opts->release_agent) 1007 if (opts->release_agent)
887 return -EINVAL; 1008 return -EINVAL;
888 opts->release_agent = kzalloc(PATH_MAX, GFP_KERNEL); 1009 opts->release_agent =
1010 kstrndup(token + 14, PATH_MAX, GFP_KERNEL);
889 if (!opts->release_agent) 1011 if (!opts->release_agent)
890 return -ENOMEM; 1012 return -ENOMEM;
891 strncpy(opts->release_agent, token + 14, PATH_MAX - 1); 1013 } else if (!strncmp(token, "name=", 5)) {
892 opts->release_agent[PATH_MAX - 1] = 0; 1014 int i;
1015 const char *name = token + 5;
1016 /* Can't specify an empty name */
1017 if (!strlen(name))
1018 return -EINVAL;
1019 /* Must match [\w.-]+ */
1020 for (i = 0; i < strlen(name); i++) {
1021 char c = name[i];
1022 if (isalnum(c))
1023 continue;
1024 if ((c == '.') || (c == '-') || (c == '_'))
1025 continue;
1026 return -EINVAL;
1027 }
1028 /* Specifying two names is forbidden */
1029 if (opts->name)
1030 return -EINVAL;
1031 opts->name = kstrndup(name,
1032 MAX_CGROUP_ROOT_NAMELEN,
1033 GFP_KERNEL);
1034 if (!opts->name)
1035 return -ENOMEM;
893 } else { 1036 } else {
894 struct cgroup_subsys *ss; 1037 struct cgroup_subsys *ss;
895 int i; 1038 int i;
@@ -906,6 +1049,8 @@ static int parse_cgroupfs_options(char *data,
906 } 1049 }
907 } 1050 }
908 1051
1052 /* Consistency checks */
1053
909 /* 1054 /*
910 * Option noprefix was introduced just for backward compatibility 1055 * Option noprefix was introduced just for backward compatibility
911 * with the old cpuset, so we allow noprefix only if mounting just 1056 * with the old cpuset, so we allow noprefix only if mounting just
@@ -915,8 +1060,16 @@ static int parse_cgroupfs_options(char *data,
915 (opts->subsys_bits & mask)) 1060 (opts->subsys_bits & mask))
916 return -EINVAL; 1061 return -EINVAL;
917 1062
918 /* We can't have an empty hierarchy */ 1063
919 if (!opts->subsys_bits) 1064 /* Can't specify "none" and some subsystems */
1065 if (opts->subsys_bits && opts->none)
1066 return -EINVAL;
1067
1068 /*
1069 * We either have to specify by name or by subsystems. (So all
1070 * empty hierarchies must have a name).
1071 */
1072 if (!opts->subsys_bits && !opts->name)
920 return -EINVAL; 1073 return -EINVAL;
921 1074
922 return 0; 1075 return 0;
@@ -944,6 +1097,12 @@ static int cgroup_remount(struct super_block *sb, int *flags, char *data)
944 goto out_unlock; 1097 goto out_unlock;
945 } 1098 }
946 1099
1100 /* Don't allow name to change at remount */
1101 if (opts.name && strcmp(opts.name, root->name)) {
1102 ret = -EINVAL;
1103 goto out_unlock;
1104 }
1105
947 ret = rebind_subsystems(root, opts.subsys_bits); 1106 ret = rebind_subsystems(root, opts.subsys_bits);
948 if (ret) 1107 if (ret)
949 goto out_unlock; 1108 goto out_unlock;
@@ -955,13 +1114,14 @@ static int cgroup_remount(struct super_block *sb, int *flags, char *data)
955 strcpy(root->release_agent_path, opts.release_agent); 1114 strcpy(root->release_agent_path, opts.release_agent);
956 out_unlock: 1115 out_unlock:
957 kfree(opts.release_agent); 1116 kfree(opts.release_agent);
1117 kfree(opts.name);
958 mutex_unlock(&cgroup_mutex); 1118 mutex_unlock(&cgroup_mutex);
959 mutex_unlock(&cgrp->dentry->d_inode->i_mutex); 1119 mutex_unlock(&cgrp->dentry->d_inode->i_mutex);
960 unlock_kernel(); 1120 unlock_kernel();
961 return ret; 1121 return ret;
962} 1122}
963 1123
964static struct super_operations cgroup_ops = { 1124static const struct super_operations cgroup_ops = {
965 .statfs = simple_statfs, 1125 .statfs = simple_statfs,
966 .drop_inode = generic_delete_inode, 1126 .drop_inode = generic_delete_inode,
967 .show_options = cgroup_show_options, 1127 .show_options = cgroup_show_options,
@@ -974,9 +1134,10 @@ static void init_cgroup_housekeeping(struct cgroup *cgrp)
974 INIT_LIST_HEAD(&cgrp->children); 1134 INIT_LIST_HEAD(&cgrp->children);
975 INIT_LIST_HEAD(&cgrp->css_sets); 1135 INIT_LIST_HEAD(&cgrp->css_sets);
976 INIT_LIST_HEAD(&cgrp->release_list); 1136 INIT_LIST_HEAD(&cgrp->release_list);
977 INIT_LIST_HEAD(&cgrp->pids_list); 1137 INIT_LIST_HEAD(&cgrp->pidlists);
978 init_rwsem(&cgrp->pids_mutex); 1138 mutex_init(&cgrp->pidlist_mutex);
979} 1139}
1140
980static void init_cgroup_root(struct cgroupfs_root *root) 1141static void init_cgroup_root(struct cgroupfs_root *root)
981{ 1142{
982 struct cgroup *cgrp = &root->top_cgroup; 1143 struct cgroup *cgrp = &root->top_cgroup;
@@ -988,33 +1149,106 @@ static void init_cgroup_root(struct cgroupfs_root *root)
988 init_cgroup_housekeeping(cgrp); 1149 init_cgroup_housekeeping(cgrp);
989} 1150}
990 1151
1152static bool init_root_id(struct cgroupfs_root *root)
1153{
1154 int ret = 0;
1155
1156 do {
1157 if (!ida_pre_get(&hierarchy_ida, GFP_KERNEL))
1158 return false;
1159 spin_lock(&hierarchy_id_lock);
1160 /* Try to allocate the next unused ID */
1161 ret = ida_get_new_above(&hierarchy_ida, next_hierarchy_id,
1162 &root->hierarchy_id);
1163 if (ret == -ENOSPC)
1164 /* Try again starting from 0 */
1165 ret = ida_get_new(&hierarchy_ida, &root->hierarchy_id);
1166 if (!ret) {
1167 next_hierarchy_id = root->hierarchy_id + 1;
1168 } else if (ret != -EAGAIN) {
1169 /* Can only get here if the 31-bit IDR is full ... */
1170 BUG_ON(ret);
1171 }
1172 spin_unlock(&hierarchy_id_lock);
1173 } while (ret);
1174 return true;
1175}
1176
991static int cgroup_test_super(struct super_block *sb, void *data) 1177static int cgroup_test_super(struct super_block *sb, void *data)
992{ 1178{
993 struct cgroupfs_root *new = data; 1179 struct cgroup_sb_opts *opts = data;
994 struct cgroupfs_root *root = sb->s_fs_info; 1180 struct cgroupfs_root *root = sb->s_fs_info;
995 1181
996 /* First check subsystems */ 1182 /* If we asked for a name then it must match */
997 if (new->subsys_bits != root->subsys_bits) 1183 if (opts->name && strcmp(opts->name, root->name))
998 return 0; 1184 return 0;
999 1185
1000 /* Next check flags */ 1186 /*
1001 if (new->flags != root->flags) 1187 * If we asked for subsystems (or explicitly for no
1188 * subsystems) then they must match
1189 */
1190 if ((opts->subsys_bits || opts->none)
1191 && (opts->subsys_bits != root->subsys_bits))
1002 return 0; 1192 return 0;
1003 1193
1004 return 1; 1194 return 1;
1005} 1195}
1006 1196
1197static struct cgroupfs_root *cgroup_root_from_opts(struct cgroup_sb_opts *opts)
1198{
1199 struct cgroupfs_root *root;
1200
1201 if (!opts->subsys_bits && !opts->none)
1202 return NULL;
1203
1204 root = kzalloc(sizeof(*root), GFP_KERNEL);
1205 if (!root)
1206 return ERR_PTR(-ENOMEM);
1207
1208 if (!init_root_id(root)) {
1209 kfree(root);
1210 return ERR_PTR(-ENOMEM);
1211 }
1212 init_cgroup_root(root);
1213
1214 root->subsys_bits = opts->subsys_bits;
1215 root->flags = opts->flags;
1216 if (opts->release_agent)
1217 strcpy(root->release_agent_path, opts->release_agent);
1218 if (opts->name)
1219 strcpy(root->name, opts->name);
1220 return root;
1221}
1222
1223static void cgroup_drop_root(struct cgroupfs_root *root)
1224{
1225 if (!root)
1226 return;
1227
1228 BUG_ON(!root->hierarchy_id);
1229 spin_lock(&hierarchy_id_lock);
1230 ida_remove(&hierarchy_ida, root->hierarchy_id);
1231 spin_unlock(&hierarchy_id_lock);
1232 kfree(root);
1233}
1234
1007static int cgroup_set_super(struct super_block *sb, void *data) 1235static int cgroup_set_super(struct super_block *sb, void *data)
1008{ 1236{
1009 int ret; 1237 int ret;
1010 struct cgroupfs_root *root = data; 1238 struct cgroup_sb_opts *opts = data;
1239
1240 /* If we don't have a new root, we can't set up a new sb */
1241 if (!opts->new_root)
1242 return -EINVAL;
1243
1244 BUG_ON(!opts->subsys_bits && !opts->none);
1011 1245
1012 ret = set_anon_super(sb, NULL); 1246 ret = set_anon_super(sb, NULL);
1013 if (ret) 1247 if (ret)
1014 return ret; 1248 return ret;
1015 1249
1016 sb->s_fs_info = root; 1250 sb->s_fs_info = opts->new_root;
1017 root->sb = sb; 1251 opts->new_root->sb = sb;
1018 1252
1019 sb->s_blocksize = PAGE_CACHE_SIZE; 1253 sb->s_blocksize = PAGE_CACHE_SIZE;
1020 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 1254 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
@@ -1051,48 +1285,43 @@ static int cgroup_get_sb(struct file_system_type *fs_type,
1051 void *data, struct vfsmount *mnt) 1285 void *data, struct vfsmount *mnt)
1052{ 1286{
1053 struct cgroup_sb_opts opts; 1287 struct cgroup_sb_opts opts;
1288 struct cgroupfs_root *root;
1054 int ret = 0; 1289 int ret = 0;
1055 struct super_block *sb; 1290 struct super_block *sb;
1056 struct cgroupfs_root *root; 1291 struct cgroupfs_root *new_root;
1057 struct list_head tmp_cg_links;
1058 1292
1059 /* First find the desired set of subsystems */ 1293 /* First find the desired set of subsystems */
1060 ret = parse_cgroupfs_options(data, &opts); 1294 ret = parse_cgroupfs_options(data, &opts);
1061 if (ret) { 1295 if (ret)
1062 kfree(opts.release_agent); 1296 goto out_err;
1063 return ret;
1064 }
1065
1066 root = kzalloc(sizeof(*root), GFP_KERNEL);
1067 if (!root) {
1068 kfree(opts.release_agent);
1069 return -ENOMEM;
1070 }
1071 1297
1072 init_cgroup_root(root); 1298 /*
1073 root->subsys_bits = opts.subsys_bits; 1299 * Allocate a new cgroup root. We may not need it if we're
1074 root->flags = opts.flags; 1300 * reusing an existing hierarchy.
1075 if (opts.release_agent) { 1301 */
1076 strcpy(root->release_agent_path, opts.release_agent); 1302 new_root = cgroup_root_from_opts(&opts);
1077 kfree(opts.release_agent); 1303 if (IS_ERR(new_root)) {
1304 ret = PTR_ERR(new_root);
1305 goto out_err;
1078 } 1306 }
1307 opts.new_root = new_root;
1079 1308
1080 sb = sget(fs_type, cgroup_test_super, cgroup_set_super, root); 1309 /* Locate an existing or new sb for this hierarchy */
1081 1310 sb = sget(fs_type, cgroup_test_super, cgroup_set_super, &opts);
1082 if (IS_ERR(sb)) { 1311 if (IS_ERR(sb)) {
1083 kfree(root); 1312 ret = PTR_ERR(sb);
1084 return PTR_ERR(sb); 1313 cgroup_drop_root(opts.new_root);
1314 goto out_err;
1085 } 1315 }
1086 1316
1087 if (sb->s_fs_info != root) { 1317 root = sb->s_fs_info;
1088 /* Reusing an existing superblock */ 1318 BUG_ON(!root);
1089 BUG_ON(sb->s_root == NULL); 1319 if (root == opts.new_root) {
1090 kfree(root); 1320 /* We used the new root structure, so this is a new hierarchy */
1091 root = NULL; 1321 struct list_head tmp_cg_links;
1092 } else {
1093 /* New superblock */
1094 struct cgroup *root_cgrp = &root->top_cgroup; 1322 struct cgroup *root_cgrp = &root->top_cgroup;
1095 struct inode *inode; 1323 struct inode *inode;
1324 struct cgroupfs_root *existing_root;
1096 int i; 1325 int i;
1097 1326
1098 BUG_ON(sb->s_root != NULL); 1327 BUG_ON(sb->s_root != NULL);
@@ -1105,6 +1334,18 @@ static int cgroup_get_sb(struct file_system_type *fs_type,
1105 mutex_lock(&inode->i_mutex); 1334 mutex_lock(&inode->i_mutex);
1106 mutex_lock(&cgroup_mutex); 1335 mutex_lock(&cgroup_mutex);
1107 1336
1337 if (strlen(root->name)) {
1338 /* Check for name clashes with existing mounts */
1339 for_each_active_root(existing_root) {
1340 if (!strcmp(existing_root->name, root->name)) {
1341 ret = -EBUSY;
1342 mutex_unlock(&cgroup_mutex);
1343 mutex_unlock(&inode->i_mutex);
1344 goto drop_new_super;
1345 }
1346 }
1347 }
1348
1108 /* 1349 /*
1109 * We're accessing css_set_count without locking 1350 * We're accessing css_set_count without locking
1110 * css_set_lock here, but that's OK - it can only be 1351 * css_set_lock here, but that's OK - it can only be
@@ -1123,7 +1364,8 @@ static int cgroup_get_sb(struct file_system_type *fs_type,
1123 if (ret == -EBUSY) { 1364 if (ret == -EBUSY) {
1124 mutex_unlock(&cgroup_mutex); 1365 mutex_unlock(&cgroup_mutex);
1125 mutex_unlock(&inode->i_mutex); 1366 mutex_unlock(&inode->i_mutex);
1126 goto free_cg_links; 1367 free_cg_links(&tmp_cg_links);
1368 goto drop_new_super;
1127 } 1369 }
1128 1370
1129 /* EBUSY should be the only error here */ 1371 /* EBUSY should be the only error here */
@@ -1155,17 +1397,27 @@ static int cgroup_get_sb(struct file_system_type *fs_type,
1155 BUG_ON(root->number_of_cgroups != 1); 1397 BUG_ON(root->number_of_cgroups != 1);
1156 1398
1157 cgroup_populate_dir(root_cgrp); 1399 cgroup_populate_dir(root_cgrp);
1158 mutex_unlock(&inode->i_mutex);
1159 mutex_unlock(&cgroup_mutex); 1400 mutex_unlock(&cgroup_mutex);
1401 mutex_unlock(&inode->i_mutex);
1402 } else {
1403 /*
1404 * We re-used an existing hierarchy - the new root (if
1405 * any) is not needed
1406 */
1407 cgroup_drop_root(opts.new_root);
1160 } 1408 }
1161 1409
1162 simple_set_mnt(mnt, sb); 1410 simple_set_mnt(mnt, sb);
1411 kfree(opts.release_agent);
1412 kfree(opts.name);
1163 return 0; 1413 return 0;
1164 1414
1165 free_cg_links:
1166 free_cg_links(&tmp_cg_links);
1167 drop_new_super: 1415 drop_new_super:
1168 deactivate_locked_super(sb); 1416 deactivate_locked_super(sb);
1417 out_err:
1418 kfree(opts.release_agent);
1419 kfree(opts.name);
1420
1169 return ret; 1421 return ret;
1170} 1422}
1171 1423
@@ -1211,7 +1463,7 @@ static void cgroup_kill_sb(struct super_block *sb) {
1211 mutex_unlock(&cgroup_mutex); 1463 mutex_unlock(&cgroup_mutex);
1212 1464
1213 kill_litter_super(sb); 1465 kill_litter_super(sb);
1214 kfree(root); 1466 cgroup_drop_root(root);
1215} 1467}
1216 1468
1217static struct file_system_type cgroup_fs_type = { 1469static struct file_system_type cgroup_fs_type = {
@@ -1276,27 +1528,6 @@ int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen)
1276 return 0; 1528 return 0;
1277} 1529}
1278 1530
1279/*
1280 * Return the first subsystem attached to a cgroup's hierarchy, and
1281 * its subsystem id.
1282 */
1283
1284static void get_first_subsys(const struct cgroup *cgrp,
1285 struct cgroup_subsys_state **css, int *subsys_id)
1286{
1287 const struct cgroupfs_root *root = cgrp->root;
1288 const struct cgroup_subsys *test_ss;
1289 BUG_ON(list_empty(&root->subsys_list));
1290 test_ss = list_entry(root->subsys_list.next,
1291 struct cgroup_subsys, sibling);
1292 if (css) {
1293 *css = cgrp->subsys[test_ss->subsys_id];
1294 BUG_ON(!*css);
1295 }
1296 if (subsys_id)
1297 *subsys_id = test_ss->subsys_id;
1298}
1299
1300/** 1531/**
1301 * cgroup_attach_task - attach task 'tsk' to cgroup 'cgrp' 1532 * cgroup_attach_task - attach task 'tsk' to cgroup 'cgrp'
1302 * @cgrp: the cgroup the task is attaching to 1533 * @cgrp: the cgroup the task is attaching to
@@ -1313,18 +1544,15 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
1313 struct css_set *cg; 1544 struct css_set *cg;
1314 struct css_set *newcg; 1545 struct css_set *newcg;
1315 struct cgroupfs_root *root = cgrp->root; 1546 struct cgroupfs_root *root = cgrp->root;
1316 int subsys_id;
1317
1318 get_first_subsys(cgrp, NULL, &subsys_id);
1319 1547
1320 /* Nothing to do if the task is already in that cgroup */ 1548 /* Nothing to do if the task is already in that cgroup */
1321 oldcgrp = task_cgroup(tsk, subsys_id); 1549 oldcgrp = task_cgroup_from_root(tsk, root);
1322 if (cgrp == oldcgrp) 1550 if (cgrp == oldcgrp)
1323 return 0; 1551 return 0;
1324 1552
1325 for_each_subsys(root, ss) { 1553 for_each_subsys(root, ss) {
1326 if (ss->can_attach) { 1554 if (ss->can_attach) {
1327 retval = ss->can_attach(ss, cgrp, tsk); 1555 retval = ss->can_attach(ss, cgrp, tsk, false);
1328 if (retval) 1556 if (retval)
1329 return retval; 1557 return retval;
1330 } 1558 }
@@ -1362,7 +1590,7 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
1362 1590
1363 for_each_subsys(root, ss) { 1591 for_each_subsys(root, ss) {
1364 if (ss->attach) 1592 if (ss->attach)
1365 ss->attach(ss, cgrp, oldcgrp, tsk); 1593 ss->attach(ss, cgrp, oldcgrp, tsk, false);
1366 } 1594 }
1367 set_bit(CGRP_RELEASABLE, &oldcgrp->flags); 1595 set_bit(CGRP_RELEASABLE, &oldcgrp->flags);
1368 synchronize_rcu(); 1596 synchronize_rcu();
@@ -1423,15 +1651,6 @@ static int cgroup_tasks_write(struct cgroup *cgrp, struct cftype *cft, u64 pid)
1423 return ret; 1651 return ret;
1424} 1652}
1425 1653
1426/* The various types of files and directories in a cgroup file system */
1427enum cgroup_filetype {
1428 FILE_ROOT,
1429 FILE_DIR,
1430 FILE_TASKLIST,
1431 FILE_NOTIFY_ON_RELEASE,
1432 FILE_RELEASE_AGENT,
1433};
1434
1435/** 1654/**
1436 * cgroup_lock_live_group - take cgroup_mutex and check that cgrp is alive. 1655 * cgroup_lock_live_group - take cgroup_mutex and check that cgrp is alive.
1437 * @cgrp: the cgroup to be checked for liveness 1656 * @cgrp: the cgroup to be checked for liveness
@@ -1644,7 +1863,7 @@ static int cgroup_seqfile_release(struct inode *inode, struct file *file)
1644 return single_release(inode, file); 1863 return single_release(inode, file);
1645} 1864}
1646 1865
1647static struct file_operations cgroup_seqfile_operations = { 1866static const struct file_operations cgroup_seqfile_operations = {
1648 .read = seq_read, 1867 .read = seq_read,
1649 .write = cgroup_file_write, 1868 .write = cgroup_file_write,
1650 .llseek = seq_lseek, 1869 .llseek = seq_lseek,
@@ -1703,7 +1922,7 @@ static int cgroup_rename(struct inode *old_dir, struct dentry *old_dentry,
1703 return simple_rename(old_dir, old_dentry, new_dir, new_dentry); 1922 return simple_rename(old_dir, old_dentry, new_dir, new_dentry);
1704} 1923}
1705 1924
1706static struct file_operations cgroup_file_operations = { 1925static const struct file_operations cgroup_file_operations = {
1707 .read = cgroup_file_read, 1926 .read = cgroup_file_read,
1708 .write = cgroup_file_write, 1927 .write = cgroup_file_write,
1709 .llseek = generic_file_llseek, 1928 .llseek = generic_file_llseek,
@@ -1711,7 +1930,7 @@ static struct file_operations cgroup_file_operations = {
1711 .release = cgroup_file_release, 1930 .release = cgroup_file_release,
1712}; 1931};
1713 1932
1714static struct inode_operations cgroup_dir_inode_operations = { 1933static const struct inode_operations cgroup_dir_inode_operations = {
1715 .lookup = simple_lookup, 1934 .lookup = simple_lookup,
1716 .mkdir = cgroup_mkdir, 1935 .mkdir = cgroup_mkdir,
1717 .rmdir = cgroup_rmdir, 1936 .rmdir = cgroup_rmdir,
@@ -1876,7 +2095,7 @@ int cgroup_task_count(const struct cgroup *cgrp)
1876 * the start of a css_set 2095 * the start of a css_set
1877 */ 2096 */
1878static void cgroup_advance_iter(struct cgroup *cgrp, 2097static void cgroup_advance_iter(struct cgroup *cgrp,
1879 struct cgroup_iter *it) 2098 struct cgroup_iter *it)
1880{ 2099{
1881 struct list_head *l = it->cg_link; 2100 struct list_head *l = it->cg_link;
1882 struct cg_cgroup_link *link; 2101 struct cg_cgroup_link *link;
@@ -2129,7 +2348,7 @@ int cgroup_scan_tasks(struct cgroup_scanner *scan)
2129} 2348}
2130 2349
2131/* 2350/*
2132 * Stuff for reading the 'tasks' file. 2351 * Stuff for reading the 'tasks'/'procs' files.
2133 * 2352 *
2134 * Reading this file can return large amounts of data if a cgroup has 2353 * Reading this file can return large amounts of data if a cgroup has
2135 * *lots* of attached tasks. So it may need several calls to read(), 2354 * *lots* of attached tasks. So it may need several calls to read(),
@@ -2139,27 +2358,196 @@ int cgroup_scan_tasks(struct cgroup_scanner *scan)
2139 */ 2358 */
2140 2359
2141/* 2360/*
2142 * Load into 'pidarray' up to 'npids' of the tasks using cgroup 2361 * The following two functions "fix" the issue where there are more pids
2143 * 'cgrp'. Return actual number of pids loaded. No need to 2362 * than kmalloc will give memory for; in such cases, we use vmalloc/vfree.
2144 * task_lock(p) when reading out p->cgroup, since we're in an RCU 2363 * TODO: replace with a kernel-wide solution to this problem
2145 * read section, so the css_set can't go away, and is 2364 */
2146 * immutable after creation. 2365#define PIDLIST_TOO_LARGE(c) ((c) * sizeof(pid_t) > (PAGE_SIZE * 2))
2366static void *pidlist_allocate(int count)
2367{
2368 if (PIDLIST_TOO_LARGE(count))
2369 return vmalloc(count * sizeof(pid_t));
2370 else
2371 return kmalloc(count * sizeof(pid_t), GFP_KERNEL);
2372}
2373static void pidlist_free(void *p)
2374{
2375 if (is_vmalloc_addr(p))
2376 vfree(p);
2377 else
2378 kfree(p);
2379}
2380static void *pidlist_resize(void *p, int newcount)
2381{
2382 void *newlist;
2383 /* note: if new alloc fails, old p will still be valid either way */
2384 if (is_vmalloc_addr(p)) {
2385 newlist = vmalloc(newcount * sizeof(pid_t));
2386 if (!newlist)
2387 return NULL;
2388 memcpy(newlist, p, newcount * sizeof(pid_t));
2389 vfree(p);
2390 } else {
2391 newlist = krealloc(p, newcount * sizeof(pid_t), GFP_KERNEL);
2392 }
2393 return newlist;
2394}
2395
2396/*
2397 * pidlist_uniq - given a kmalloc()ed list, strip out all duplicate entries
2398 * If the new stripped list is sufficiently smaller and there's enough memory
2399 * to allocate a new buffer, will let go of the unneeded memory. Returns the
2400 * number of unique elements.
2401 */
2402/* is the size difference enough that we should re-allocate the array? */
2403#define PIDLIST_REALLOC_DIFFERENCE(old, new) ((old) - PAGE_SIZE >= (new))
2404static int pidlist_uniq(pid_t **p, int length)
2405{
2406 int src, dest = 1;
2407 pid_t *list = *p;
2408 pid_t *newlist;
2409
2410 /*
2411 * we presume the 0th element is unique, so i starts at 1. trivial
2412 * edge cases first; no work needs to be done for either
2413 */
2414 if (length == 0 || length == 1)
2415 return length;
2416 /* src and dest walk down the list; dest counts unique elements */
2417 for (src = 1; src < length; src++) {
2418 /* find next unique element */
2419 while (list[src] == list[src-1]) {
2420 src++;
2421 if (src == length)
2422 goto after;
2423 }
2424 /* dest always points to where the next unique element goes */
2425 list[dest] = list[src];
2426 dest++;
2427 }
2428after:
2429 /*
2430 * if the length difference is large enough, we want to allocate a
2431 * smaller buffer to save memory. if this fails due to out of memory,
2432 * we'll just stay with what we've got.
2433 */
2434 if (PIDLIST_REALLOC_DIFFERENCE(length, dest)) {
2435 newlist = pidlist_resize(list, dest);
2436 if (newlist)
2437 *p = newlist;
2438 }
2439 return dest;
2440}
2441
2442static int cmppid(const void *a, const void *b)
2443{
2444 return *(pid_t *)a - *(pid_t *)b;
2445}
2446
2447/*
2448 * find the appropriate pidlist for our purpose (given procs vs tasks)
2449 * returns with the lock on that pidlist already held, and takes care
2450 * of the use count, or returns NULL with no locks held if we're out of
2451 * memory.
2147 */ 2452 */
2148static int pid_array_load(pid_t *pidarray, int npids, struct cgroup *cgrp) 2453static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp,
2454 enum cgroup_filetype type)
2149{ 2455{
2150 int n = 0, pid; 2456 struct cgroup_pidlist *l;
2457 /* don't need task_nsproxy() if we're looking at ourself */
2458 struct pid_namespace *ns = get_pid_ns(current->nsproxy->pid_ns);
2459 /*
2460 * We can't drop the pidlist_mutex before taking the l->mutex in case
2461 * the last ref-holder is trying to remove l from the list at the same
2462 * time. Holding the pidlist_mutex precludes somebody taking whichever
2463 * list we find out from under us - compare release_pid_array().
2464 */
2465 mutex_lock(&cgrp->pidlist_mutex);
2466 list_for_each_entry(l, &cgrp->pidlists, links) {
2467 if (l->key.type == type && l->key.ns == ns) {
2468 /* found a matching list - drop the extra refcount */
2469 put_pid_ns(ns);
2470 /* make sure l doesn't vanish out from under us */
2471 down_write(&l->mutex);
2472 mutex_unlock(&cgrp->pidlist_mutex);
2473 l->use_count++;
2474 return l;
2475 }
2476 }
2477 /* entry not found; create a new one */
2478 l = kmalloc(sizeof(struct cgroup_pidlist), GFP_KERNEL);
2479 if (!l) {
2480 mutex_unlock(&cgrp->pidlist_mutex);
2481 put_pid_ns(ns);
2482 return l;
2483 }
2484 init_rwsem(&l->mutex);
2485 down_write(&l->mutex);
2486 l->key.type = type;
2487 l->key.ns = ns;
2488 l->use_count = 0; /* don't increment here */
2489 l->list = NULL;
2490 l->owner = cgrp;
2491 list_add(&l->links, &cgrp->pidlists);
2492 mutex_unlock(&cgrp->pidlist_mutex);
2493 return l;
2494}
2495
2496/*
2497 * Load a cgroup's pidarray with either procs' tgids or tasks' pids
2498 */
2499static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
2500 struct cgroup_pidlist **lp)
2501{
2502 pid_t *array;
2503 int length;
2504 int pid, n = 0; /* used for populating the array */
2151 struct cgroup_iter it; 2505 struct cgroup_iter it;
2152 struct task_struct *tsk; 2506 struct task_struct *tsk;
2507 struct cgroup_pidlist *l;
2508
2509 /*
2510 * If cgroup gets more users after we read count, we won't have
2511 * enough space - tough. This race is indistinguishable to the
2512 * caller from the case that the additional cgroup users didn't
2513 * show up until sometime later on.
2514 */
2515 length = cgroup_task_count(cgrp);
2516 array = pidlist_allocate(length);
2517 if (!array)
2518 return -ENOMEM;
2519 /* now, populate the array */
2153 cgroup_iter_start(cgrp, &it); 2520 cgroup_iter_start(cgrp, &it);
2154 while ((tsk = cgroup_iter_next(cgrp, &it))) { 2521 while ((tsk = cgroup_iter_next(cgrp, &it))) {
2155 if (unlikely(n == npids)) 2522 if (unlikely(n == length))
2156 break; 2523 break;
2157 pid = task_pid_vnr(tsk); 2524 /* get tgid or pid for procs or tasks file respectively */
2158 if (pid > 0) 2525 if (type == CGROUP_FILE_PROCS)
2159 pidarray[n++] = pid; 2526 pid = task_tgid_vnr(tsk);
2527 else
2528 pid = task_pid_vnr(tsk);
2529 if (pid > 0) /* make sure to only use valid results */
2530 array[n++] = pid;
2160 } 2531 }
2161 cgroup_iter_end(cgrp, &it); 2532 cgroup_iter_end(cgrp, &it);
2162 return n; 2533 length = n;
2534 /* now sort & (if procs) strip out duplicates */
2535 sort(array, length, sizeof(pid_t), cmppid, NULL);
2536 if (type == CGROUP_FILE_PROCS)
2537 length = pidlist_uniq(&array, length);
2538 l = cgroup_pidlist_find(cgrp, type);
2539 if (!l) {
2540 pidlist_free(array);
2541 return -ENOMEM;
2542 }
2543 /* store array, freeing old if necessary - lock already held */
2544 pidlist_free(l->list);
2545 l->list = array;
2546 l->length = length;
2547 l->use_count++;
2548 up_write(&l->mutex);
2549 *lp = l;
2550 return 0;
2163} 2551}
2164 2552
2165/** 2553/**
@@ -2216,37 +2604,14 @@ err:
2216 return ret; 2604 return ret;
2217} 2605}
2218 2606
2219/*
2220 * Cache pids for all threads in the same pid namespace that are
2221 * opening the same "tasks" file.
2222 */
2223struct cgroup_pids {
2224 /* The node in cgrp->pids_list */
2225 struct list_head list;
2226 /* The cgroup those pids belong to */
2227 struct cgroup *cgrp;
2228 /* The namepsace those pids belong to */
2229 struct pid_namespace *ns;
2230 /* Array of process ids in the cgroup */
2231 pid_t *tasks_pids;
2232 /* How many files are using the this tasks_pids array */
2233 int use_count;
2234 /* Length of the current tasks_pids array */
2235 int length;
2236};
2237
2238static int cmppid(const void *a, const void *b)
2239{
2240 return *(pid_t *)a - *(pid_t *)b;
2241}
2242 2607
2243/* 2608/*
2244 * seq_file methods for the "tasks" file. The seq_file position is the 2609 * seq_file methods for the tasks/procs files. The seq_file position is the
2245 * next pid to display; the seq_file iterator is a pointer to the pid 2610 * next pid to display; the seq_file iterator is a pointer to the pid
2246 * in the cgroup->tasks_pids array. 2611 * in the cgroup->l->list array.
2247 */ 2612 */
2248 2613
2249static void *cgroup_tasks_start(struct seq_file *s, loff_t *pos) 2614static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos)
2250{ 2615{
2251 /* 2616 /*
2252 * Initially we receive a position value that corresponds to 2617 * Initially we receive a position value that corresponds to
@@ -2254,48 +2619,45 @@ static void *cgroup_tasks_start(struct seq_file *s, loff_t *pos)
2254 * after a seek to the start). Use a binary-search to find the 2619 * after a seek to the start). Use a binary-search to find the
2255 * next pid to display, if any 2620 * next pid to display, if any
2256 */ 2621 */
2257 struct cgroup_pids *cp = s->private; 2622 struct cgroup_pidlist *l = s->private;
2258 struct cgroup *cgrp = cp->cgrp;
2259 int index = 0, pid = *pos; 2623 int index = 0, pid = *pos;
2260 int *iter; 2624 int *iter;
2261 2625
2262 down_read(&cgrp->pids_mutex); 2626 down_read(&l->mutex);
2263 if (pid) { 2627 if (pid) {
2264 int end = cp->length; 2628 int end = l->length;
2265 2629
2266 while (index < end) { 2630 while (index < end) {
2267 int mid = (index + end) / 2; 2631 int mid = (index + end) / 2;
2268 if (cp->tasks_pids[mid] == pid) { 2632 if (l->list[mid] == pid) {
2269 index = mid; 2633 index = mid;
2270 break; 2634 break;
2271 } else if (cp->tasks_pids[mid] <= pid) 2635 } else if (l->list[mid] <= pid)
2272 index = mid + 1; 2636 index = mid + 1;
2273 else 2637 else
2274 end = mid; 2638 end = mid;
2275 } 2639 }
2276 } 2640 }
2277 /* If we're off the end of the array, we're done */ 2641 /* If we're off the end of the array, we're done */
2278 if (index >= cp->length) 2642 if (index >= l->length)
2279 return NULL; 2643 return NULL;
2280 /* Update the abstract position to be the actual pid that we found */ 2644 /* Update the abstract position to be the actual pid that we found */
2281 iter = cp->tasks_pids + index; 2645 iter = l->list + index;
2282 *pos = *iter; 2646 *pos = *iter;
2283 return iter; 2647 return iter;
2284} 2648}
2285 2649
2286static void cgroup_tasks_stop(struct seq_file *s, void *v) 2650static void cgroup_pidlist_stop(struct seq_file *s, void *v)
2287{ 2651{
2288 struct cgroup_pids *cp = s->private; 2652 struct cgroup_pidlist *l = s->private;
2289 struct cgroup *cgrp = cp->cgrp; 2653 up_read(&l->mutex);
2290 up_read(&cgrp->pids_mutex);
2291} 2654}
2292 2655
2293static void *cgroup_tasks_next(struct seq_file *s, void *v, loff_t *pos) 2656static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos)
2294{ 2657{
2295 struct cgroup_pids *cp = s->private; 2658 struct cgroup_pidlist *l = s->private;
2296 int *p = v; 2659 pid_t *p = v;
2297 int *end = cp->tasks_pids + cp->length; 2660 pid_t *end = l->list + l->length;
2298
2299 /* 2661 /*
2300 * Advance to the next pid in the array. If this goes off the 2662 * Advance to the next pid in the array. If this goes off the
2301 * end, we're done 2663 * end, we're done
@@ -2309,124 +2671,107 @@ static void *cgroup_tasks_next(struct seq_file *s, void *v, loff_t *pos)
2309 } 2671 }
2310} 2672}
2311 2673
2312static int cgroup_tasks_show(struct seq_file *s, void *v) 2674static int cgroup_pidlist_show(struct seq_file *s, void *v)
2313{ 2675{
2314 return seq_printf(s, "%d\n", *(int *)v); 2676 return seq_printf(s, "%d\n", *(int *)v);
2315} 2677}
2316 2678
2317static struct seq_operations cgroup_tasks_seq_operations = { 2679/*
2318 .start = cgroup_tasks_start, 2680 * seq_operations functions for iterating on pidlists through seq_file -
2319 .stop = cgroup_tasks_stop, 2681 * independent of whether it's tasks or procs
2320 .next = cgroup_tasks_next, 2682 */
2321 .show = cgroup_tasks_show, 2683static const struct seq_operations cgroup_pidlist_seq_operations = {
2684 .start = cgroup_pidlist_start,
2685 .stop = cgroup_pidlist_stop,
2686 .next = cgroup_pidlist_next,
2687 .show = cgroup_pidlist_show,
2322}; 2688};
2323 2689
2324static void release_cgroup_pid_array(struct cgroup_pids *cp) 2690static void cgroup_release_pid_array(struct cgroup_pidlist *l)
2325{ 2691{
2326 struct cgroup *cgrp = cp->cgrp; 2692 /*
2327 2693 * the case where we're the last user of this particular pidlist will
2328 down_write(&cgrp->pids_mutex); 2694 * have us remove it from the cgroup's list, which entails taking the
2329 BUG_ON(!cp->use_count); 2695 * mutex. since in pidlist_find the pidlist->lock depends on cgroup->
2330 if (!--cp->use_count) { 2696 * pidlist_mutex, we have to take pidlist_mutex first.
2331 list_del(&cp->list); 2697 */
2332 put_pid_ns(cp->ns); 2698 mutex_lock(&l->owner->pidlist_mutex);
2333 kfree(cp->tasks_pids); 2699 down_write(&l->mutex);
2334 kfree(cp); 2700 BUG_ON(!l->use_count);
2701 if (!--l->use_count) {
2702 /* we're the last user if refcount is 0; remove and free */
2703 list_del(&l->links);
2704 mutex_unlock(&l->owner->pidlist_mutex);
2705 pidlist_free(l->list);
2706 put_pid_ns(l->key.ns);
2707 up_write(&l->mutex);
2708 kfree(l);
2709 return;
2335 } 2710 }
2336 up_write(&cgrp->pids_mutex); 2711 mutex_unlock(&l->owner->pidlist_mutex);
2712 up_write(&l->mutex);
2337} 2713}
2338 2714
2339static int cgroup_tasks_release(struct inode *inode, struct file *file) 2715static int cgroup_pidlist_release(struct inode *inode, struct file *file)
2340{ 2716{
2341 struct seq_file *seq; 2717 struct cgroup_pidlist *l;
2342 struct cgroup_pids *cp;
2343
2344 if (!(file->f_mode & FMODE_READ)) 2718 if (!(file->f_mode & FMODE_READ))
2345 return 0; 2719 return 0;
2346 2720 /*
2347 seq = file->private_data; 2721 * the seq_file will only be initialized if the file was opened for
2348 cp = seq->private; 2722 * reading; hence we check if it's not null only in that case.
2349 2723 */
2350 release_cgroup_pid_array(cp); 2724 l = ((struct seq_file *)file->private_data)->private;
2725 cgroup_release_pid_array(l);
2351 return seq_release(inode, file); 2726 return seq_release(inode, file);
2352} 2727}
2353 2728
2354static struct file_operations cgroup_tasks_operations = { 2729static const struct file_operations cgroup_pidlist_operations = {
2355 .read = seq_read, 2730 .read = seq_read,
2356 .llseek = seq_lseek, 2731 .llseek = seq_lseek,
2357 .write = cgroup_file_write, 2732 .write = cgroup_file_write,
2358 .release = cgroup_tasks_release, 2733 .release = cgroup_pidlist_release,
2359}; 2734};
2360 2735
2361/* 2736/*
2362 * Handle an open on 'tasks' file. Prepare an array containing the 2737 * The following functions handle opens on a file that displays a pidlist
2363 * process id's of tasks currently attached to the cgroup being opened. 2738 * (tasks or procs). Prepare an array of the process/thread IDs of whoever's
2739 * in the cgroup.
2364 */ 2740 */
2365 2741/* helper function for the two below it */
2366static int cgroup_tasks_open(struct inode *unused, struct file *file) 2742static int cgroup_pidlist_open(struct file *file, enum cgroup_filetype type)
2367{ 2743{
2368 struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent); 2744 struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent);
2369 struct pid_namespace *ns = current->nsproxy->pid_ns; 2745 struct cgroup_pidlist *l;
2370 struct cgroup_pids *cp;
2371 pid_t *pidarray;
2372 int npids;
2373 int retval; 2746 int retval;
2374 2747
2375 /* Nothing to do for write-only files */ 2748 /* Nothing to do for write-only files */
2376 if (!(file->f_mode & FMODE_READ)) 2749 if (!(file->f_mode & FMODE_READ))
2377 return 0; 2750 return 0;
2378 2751
2379 /* 2752 /* have the array populated */
2380 * If cgroup gets more users after we read count, we won't have 2753 retval = pidlist_array_load(cgrp, type, &l);
2381 * enough space - tough. This race is indistinguishable to the 2754 if (retval)
2382 * caller from the case that the additional cgroup users didn't 2755 return retval;
2383 * show up until sometime later on. 2756 /* configure file information */
2384 */ 2757 file->f_op = &cgroup_pidlist_operations;
2385 npids = cgroup_task_count(cgrp);
2386 pidarray = kmalloc(npids * sizeof(pid_t), GFP_KERNEL);
2387 if (!pidarray)
2388 return -ENOMEM;
2389 npids = pid_array_load(pidarray, npids, cgrp);
2390 sort(pidarray, npids, sizeof(pid_t), cmppid, NULL);
2391
2392 /*
2393 * Store the array in the cgroup, freeing the old
2394 * array if necessary
2395 */
2396 down_write(&cgrp->pids_mutex);
2397
2398 list_for_each_entry(cp, &cgrp->pids_list, list) {
2399 if (ns == cp->ns)
2400 goto found;
2401 }
2402
2403 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
2404 if (!cp) {
2405 up_write(&cgrp->pids_mutex);
2406 kfree(pidarray);
2407 return -ENOMEM;
2408 }
2409 cp->cgrp = cgrp;
2410 cp->ns = ns;
2411 get_pid_ns(ns);
2412 list_add(&cp->list, &cgrp->pids_list);
2413found:
2414 kfree(cp->tasks_pids);
2415 cp->tasks_pids = pidarray;
2416 cp->length = npids;
2417 cp->use_count++;
2418 up_write(&cgrp->pids_mutex);
2419
2420 file->f_op = &cgroup_tasks_operations;
2421 2758
2422 retval = seq_open(file, &cgroup_tasks_seq_operations); 2759 retval = seq_open(file, &cgroup_pidlist_seq_operations);
2423 if (retval) { 2760 if (retval) {
2424 release_cgroup_pid_array(cp); 2761 cgroup_release_pid_array(l);
2425 return retval; 2762 return retval;
2426 } 2763 }
2427 ((struct seq_file *)file->private_data)->private = cp; 2764 ((struct seq_file *)file->private_data)->private = l;
2428 return 0; 2765 return 0;
2429} 2766}
2767static int cgroup_tasks_open(struct inode *unused, struct file *file)
2768{
2769 return cgroup_pidlist_open(file, CGROUP_FILE_TASKS);
2770}
2771static int cgroup_procs_open(struct inode *unused, struct file *file)
2772{
2773 return cgroup_pidlist_open(file, CGROUP_FILE_PROCS);
2774}
2430 2775
2431static u64 cgroup_read_notify_on_release(struct cgroup *cgrp, 2776static u64 cgroup_read_notify_on_release(struct cgroup *cgrp,
2432 struct cftype *cft) 2777 struct cftype *cft)
@@ -2449,21 +2794,27 @@ static int cgroup_write_notify_on_release(struct cgroup *cgrp,
2449/* 2794/*
2450 * for the common functions, 'private' gives the type of file 2795 * for the common functions, 'private' gives the type of file
2451 */ 2796 */
2797/* for hysterical raisins, we can't put this on the older files */
2798#define CGROUP_FILE_GENERIC_PREFIX "cgroup."
2452static struct cftype files[] = { 2799static struct cftype files[] = {
2453 { 2800 {
2454 .name = "tasks", 2801 .name = "tasks",
2455 .open = cgroup_tasks_open, 2802 .open = cgroup_tasks_open,
2456 .write_u64 = cgroup_tasks_write, 2803 .write_u64 = cgroup_tasks_write,
2457 .release = cgroup_tasks_release, 2804 .release = cgroup_pidlist_release,
2458 .private = FILE_TASKLIST,
2459 .mode = S_IRUGO | S_IWUSR, 2805 .mode = S_IRUGO | S_IWUSR,
2460 }, 2806 },
2461 2807 {
2808 .name = CGROUP_FILE_GENERIC_PREFIX "procs",
2809 .open = cgroup_procs_open,
2810 /* .write_u64 = cgroup_procs_write, TODO */
2811 .release = cgroup_pidlist_release,
2812 .mode = S_IRUGO,
2813 },
2462 { 2814 {
2463 .name = "notify_on_release", 2815 .name = "notify_on_release",
2464 .read_u64 = cgroup_read_notify_on_release, 2816 .read_u64 = cgroup_read_notify_on_release,
2465 .write_u64 = cgroup_write_notify_on_release, 2817 .write_u64 = cgroup_write_notify_on_release,
2466 .private = FILE_NOTIFY_ON_RELEASE,
2467 }, 2818 },
2468}; 2819};
2469 2820
@@ -2472,7 +2823,6 @@ static struct cftype cft_release_agent = {
2472 .read_seq_string = cgroup_release_agent_show, 2823 .read_seq_string = cgroup_release_agent_show,
2473 .write_string = cgroup_release_agent_write, 2824 .write_string = cgroup_release_agent_write,
2474 .max_write_len = PATH_MAX, 2825 .max_write_len = PATH_MAX,
2475 .private = FILE_RELEASE_AGENT,
2476}; 2826};
2477 2827
2478static int cgroup_populate_dir(struct cgroup *cgrp) 2828static int cgroup_populate_dir(struct cgroup *cgrp)
@@ -2879,6 +3229,7 @@ int __init cgroup_init_early(void)
2879 init_task.cgroups = &init_css_set; 3229 init_task.cgroups = &init_css_set;
2880 3230
2881 init_css_set_link.cg = &init_css_set; 3231 init_css_set_link.cg = &init_css_set;
3232 init_css_set_link.cgrp = dummytop;
2882 list_add(&init_css_set_link.cgrp_link_list, 3233 list_add(&init_css_set_link.cgrp_link_list,
2883 &rootnode.top_cgroup.css_sets); 3234 &rootnode.top_cgroup.css_sets);
2884 list_add(&init_css_set_link.cg_link_list, 3235 list_add(&init_css_set_link.cg_link_list,
@@ -2933,7 +3284,7 @@ int __init cgroup_init(void)
2933 /* Add init_css_set to the hash table */ 3284 /* Add init_css_set to the hash table */
2934 hhead = css_set_hash(init_css_set.subsys); 3285 hhead = css_set_hash(init_css_set.subsys);
2935 hlist_add_head(&init_css_set.hlist, hhead); 3286 hlist_add_head(&init_css_set.hlist, hhead);
2936 3287 BUG_ON(!init_root_id(&rootnode));
2937 err = register_filesystem(&cgroup_fs_type); 3288 err = register_filesystem(&cgroup_fs_type);
2938 if (err < 0) 3289 if (err < 0)
2939 goto out; 3290 goto out;
@@ -2986,15 +3337,16 @@ static int proc_cgroup_show(struct seq_file *m, void *v)
2986 for_each_active_root(root) { 3337 for_each_active_root(root) {
2987 struct cgroup_subsys *ss; 3338 struct cgroup_subsys *ss;
2988 struct cgroup *cgrp; 3339 struct cgroup *cgrp;
2989 int subsys_id;
2990 int count = 0; 3340 int count = 0;
2991 3341
2992 seq_printf(m, "%lu:", root->subsys_bits); 3342 seq_printf(m, "%d:", root->hierarchy_id);
2993 for_each_subsys(root, ss) 3343 for_each_subsys(root, ss)
2994 seq_printf(m, "%s%s", count++ ? "," : "", ss->name); 3344 seq_printf(m, "%s%s", count++ ? "," : "", ss->name);
3345 if (strlen(root->name))
3346 seq_printf(m, "%sname=%s", count ? "," : "",
3347 root->name);
2995 seq_putc(m, ':'); 3348 seq_putc(m, ':');
2996 get_first_subsys(&root->top_cgroup, NULL, &subsys_id); 3349 cgrp = task_cgroup_from_root(tsk, root);
2997 cgrp = task_cgroup(tsk, subsys_id);
2998 retval = cgroup_path(cgrp, buf, PAGE_SIZE); 3350 retval = cgroup_path(cgrp, buf, PAGE_SIZE);
2999 if (retval < 0) 3351 if (retval < 0)
3000 goto out_unlock; 3352 goto out_unlock;
@@ -3017,7 +3369,7 @@ static int cgroup_open(struct inode *inode, struct file *file)
3017 return single_open(file, proc_cgroup_show, pid); 3369 return single_open(file, proc_cgroup_show, pid);
3018} 3370}
3019 3371
3020struct file_operations proc_cgroup_operations = { 3372const struct file_operations proc_cgroup_operations = {
3021 .open = cgroup_open, 3373 .open = cgroup_open,
3022 .read = seq_read, 3374 .read = seq_read,
3023 .llseek = seq_lseek, 3375 .llseek = seq_lseek,
@@ -3033,8 +3385,8 @@ static int proc_cgroupstats_show(struct seq_file *m, void *v)
3033 mutex_lock(&cgroup_mutex); 3385 mutex_lock(&cgroup_mutex);
3034 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { 3386 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
3035 struct cgroup_subsys *ss = subsys[i]; 3387 struct cgroup_subsys *ss = subsys[i];
3036 seq_printf(m, "%s\t%lu\t%d\t%d\n", 3388 seq_printf(m, "%s\t%d\t%d\t%d\n",
3037 ss->name, ss->root->subsys_bits, 3389 ss->name, ss->root->hierarchy_id,
3038 ss->root->number_of_cgroups, !ss->disabled); 3390 ss->root->number_of_cgroups, !ss->disabled);
3039 } 3391 }
3040 mutex_unlock(&cgroup_mutex); 3392 mutex_unlock(&cgroup_mutex);
@@ -3046,7 +3398,7 @@ static int cgroupstats_open(struct inode *inode, struct file *file)
3046 return single_open(file, proc_cgroupstats_show, NULL); 3398 return single_open(file, proc_cgroupstats_show, NULL);
3047} 3399}
3048 3400
3049static struct file_operations proc_cgroupstats_operations = { 3401static const struct file_operations proc_cgroupstats_operations = {
3050 .open = cgroupstats_open, 3402 .open = cgroupstats_open,
3051 .read = seq_read, 3403 .read = seq_read,
3052 .llseek = seq_lseek, 3404 .llseek = seq_lseek,
@@ -3320,13 +3672,11 @@ int cgroup_is_descendant(const struct cgroup *cgrp, struct task_struct *task)
3320{ 3672{
3321 int ret; 3673 int ret;
3322 struct cgroup *target; 3674 struct cgroup *target;
3323 int subsys_id;
3324 3675
3325 if (cgrp == dummytop) 3676 if (cgrp == dummytop)
3326 return 1; 3677 return 1;
3327 3678
3328 get_first_subsys(cgrp, NULL, &subsys_id); 3679 target = task_cgroup_from_root(task, cgrp->root);
3329 target = task_cgroup(task, subsys_id);
3330 while (cgrp != target && cgrp!= cgrp->top_cgroup) 3680 while (cgrp != target && cgrp!= cgrp->top_cgroup)
3331 cgrp = cgrp->parent; 3681 cgrp = cgrp->parent;
3332 ret = (cgrp == target); 3682 ret = (cgrp == target);
@@ -3358,8 +3708,10 @@ static void check_for_release(struct cgroup *cgrp)
3358void __css_put(struct cgroup_subsys_state *css) 3708void __css_put(struct cgroup_subsys_state *css)
3359{ 3709{
3360 struct cgroup *cgrp = css->cgroup; 3710 struct cgroup *cgrp = css->cgroup;
3711 int val;
3361 rcu_read_lock(); 3712 rcu_read_lock();
3362 if (atomic_dec_return(&css->refcnt) == 1) { 3713 val = atomic_dec_return(&css->refcnt);
3714 if (val == 1) {
3363 if (notify_on_release(cgrp)) { 3715 if (notify_on_release(cgrp)) {
3364 set_bit(CGRP_RELEASABLE, &cgrp->flags); 3716 set_bit(CGRP_RELEASABLE, &cgrp->flags);
3365 check_for_release(cgrp); 3717 check_for_release(cgrp);
@@ -3367,6 +3719,7 @@ void __css_put(struct cgroup_subsys_state *css)
3367 cgroup_wakeup_rmdir_waiter(cgrp); 3719 cgroup_wakeup_rmdir_waiter(cgrp);
3368 } 3720 }
3369 rcu_read_unlock(); 3721 rcu_read_unlock();
3722 WARN_ON_ONCE(val < 1);
3370} 3723}
3371 3724
3372/* 3725/*
@@ -3693,3 +4046,154 @@ css_get_next(struct cgroup_subsys *ss, int id,
3693 return ret; 4046 return ret;
3694} 4047}
3695 4048
4049#ifdef CONFIG_CGROUP_DEBUG
4050static struct cgroup_subsys_state *debug_create(struct cgroup_subsys *ss,
4051 struct cgroup *cont)
4052{
4053 struct cgroup_subsys_state *css = kzalloc(sizeof(*css), GFP_KERNEL);
4054
4055 if (!css)
4056 return ERR_PTR(-ENOMEM);
4057
4058 return css;
4059}
4060
4061static void debug_destroy(struct cgroup_subsys *ss, struct cgroup *cont)
4062{
4063 kfree(cont->subsys[debug_subsys_id]);
4064}
4065
4066static u64 cgroup_refcount_read(struct cgroup *cont, struct cftype *cft)
4067{
4068 return atomic_read(&cont->count);
4069}
4070
4071static u64 debug_taskcount_read(struct cgroup *cont, struct cftype *cft)
4072{
4073 return cgroup_task_count(cont);
4074}
4075
4076static u64 current_css_set_read(struct cgroup *cont, struct cftype *cft)
4077{
4078 return (u64)(unsigned long)current->cgroups;
4079}
4080
4081static u64 current_css_set_refcount_read(struct cgroup *cont,
4082 struct cftype *cft)
4083{
4084 u64 count;
4085
4086 rcu_read_lock();
4087 count = atomic_read(&current->cgroups->refcount);
4088 rcu_read_unlock();
4089 return count;
4090}
4091
4092static int current_css_set_cg_links_read(struct cgroup *cont,
4093 struct cftype *cft,
4094 struct seq_file *seq)
4095{
4096 struct cg_cgroup_link *link;
4097 struct css_set *cg;
4098
4099 read_lock(&css_set_lock);
4100 rcu_read_lock();
4101 cg = rcu_dereference(current->cgroups);
4102 list_for_each_entry(link, &cg->cg_links, cg_link_list) {
4103 struct cgroup *c = link->cgrp;
4104 const char *name;
4105
4106 if (c->dentry)
4107 name = c->dentry->d_name.name;
4108 else
4109 name = "?";
4110 seq_printf(seq, "Root %d group %s\n",
4111 c->root->hierarchy_id, name);
4112 }
4113 rcu_read_unlock();
4114 read_unlock(&css_set_lock);
4115 return 0;
4116}
4117
4118#define MAX_TASKS_SHOWN_PER_CSS 25
4119static int cgroup_css_links_read(struct cgroup *cont,
4120 struct cftype *cft,
4121 struct seq_file *seq)
4122{
4123 struct cg_cgroup_link *link;
4124
4125 read_lock(&css_set_lock);
4126 list_for_each_entry(link, &cont->css_sets, cgrp_link_list) {
4127 struct css_set *cg = link->cg;
4128 struct task_struct *task;
4129 int count = 0;
4130 seq_printf(seq, "css_set %p\n", cg);
4131 list_for_each_entry(task, &cg->tasks, cg_list) {
4132 if (count++ > MAX_TASKS_SHOWN_PER_CSS) {
4133 seq_puts(seq, " ...\n");
4134 break;
4135 } else {
4136 seq_printf(seq, " task %d\n",
4137 task_pid_vnr(task));
4138 }
4139 }
4140 }
4141 read_unlock(&css_set_lock);
4142 return 0;
4143}
4144
4145static u64 releasable_read(struct cgroup *cgrp, struct cftype *cft)
4146{
4147 return test_bit(CGRP_RELEASABLE, &cgrp->flags);
4148}
4149
4150static struct cftype debug_files[] = {
4151 {
4152 .name = "cgroup_refcount",
4153 .read_u64 = cgroup_refcount_read,
4154 },
4155 {
4156 .name = "taskcount",
4157 .read_u64 = debug_taskcount_read,
4158 },
4159
4160 {
4161 .name = "current_css_set",
4162 .read_u64 = current_css_set_read,
4163 },
4164
4165 {
4166 .name = "current_css_set_refcount",
4167 .read_u64 = current_css_set_refcount_read,
4168 },
4169
4170 {
4171 .name = "current_css_set_cg_links",
4172 .read_seq_string = current_css_set_cg_links_read,
4173 },
4174
4175 {
4176 .name = "cgroup_css_links",
4177 .read_seq_string = cgroup_css_links_read,
4178 },
4179
4180 {
4181 .name = "releasable",
4182 .read_u64 = releasable_read,
4183 },
4184};
4185
4186static int debug_populate(struct cgroup_subsys *ss, struct cgroup *cont)
4187{
4188 return cgroup_add_files(cont, ss, debug_files,
4189 ARRAY_SIZE(debug_files));
4190}
4191
4192struct cgroup_subsys debug_subsys = {
4193 .name = "debug",
4194 .create = debug_create,
4195 .destroy = debug_destroy,
4196 .populate = debug_populate,
4197 .subsys_id = debug_subsys_id,
4198};
4199#endif /* CONFIG_CGROUP_DEBUG */
diff --git a/kernel/cgroup_debug.c b/kernel/cgroup_debug.c
deleted file mode 100644
index 0c92d797baa6..000000000000
--- a/kernel/cgroup_debug.c
+++ /dev/null
@@ -1,105 +0,0 @@
1/*
2 * kernel/cgroup_debug.c - Example cgroup subsystem that
3 * exposes debug info
4 *
5 * Copyright (C) Google Inc, 2007
6 *
7 * Developed by Paul Menage (menage@google.com)
8 *
9 */
10
11#include <linux/cgroup.h>
12#include <linux/fs.h>
13#include <linux/slab.h>
14#include <linux/rcupdate.h>
15
16#include <asm/atomic.h>
17
18static struct cgroup_subsys_state *debug_create(struct cgroup_subsys *ss,
19 struct cgroup *cont)
20{
21 struct cgroup_subsys_state *css = kzalloc(sizeof(*css), GFP_KERNEL);
22
23 if (!css)
24 return ERR_PTR(-ENOMEM);
25
26 return css;
27}
28
29static void debug_destroy(struct cgroup_subsys *ss, struct cgroup *cont)
30{
31 kfree(cont->subsys[debug_subsys_id]);
32}
33
34static u64 cgroup_refcount_read(struct cgroup *cont, struct cftype *cft)
35{
36 return atomic_read(&cont->count);
37}
38
39static u64 taskcount_read(struct cgroup *cont, struct cftype *cft)
40{
41 u64 count;
42
43 count = cgroup_task_count(cont);
44 return count;
45}
46
47static u64 current_css_set_read(struct cgroup *cont, struct cftype *cft)
48{
49 return (u64)(long)current->cgroups;
50}
51
52static u64 current_css_set_refcount_read(struct cgroup *cont,
53 struct cftype *cft)
54{
55 u64 count;
56
57 rcu_read_lock();
58 count = atomic_read(&current->cgroups->refcount);
59 rcu_read_unlock();
60 return count;
61}
62
63static u64 releasable_read(struct cgroup *cgrp, struct cftype *cft)
64{
65 return test_bit(CGRP_RELEASABLE, &cgrp->flags);
66}
67
68static struct cftype files[] = {
69 {
70 .name = "cgroup_refcount",
71 .read_u64 = cgroup_refcount_read,
72 },
73 {
74 .name = "taskcount",
75 .read_u64 = taskcount_read,
76 },
77
78 {
79 .name = "current_css_set",
80 .read_u64 = current_css_set_read,
81 },
82
83 {
84 .name = "current_css_set_refcount",
85 .read_u64 = current_css_set_refcount_read,
86 },
87
88 {
89 .name = "releasable",
90 .read_u64 = releasable_read,
91 },
92};
93
94static int debug_populate(struct cgroup_subsys *ss, struct cgroup *cont)
95{
96 return cgroup_add_files(cont, ss, files, ARRAY_SIZE(files));
97}
98
99struct cgroup_subsys debug_subsys = {
100 .name = "debug",
101 .create = debug_create,
102 .destroy = debug_destroy,
103 .populate = debug_populate,
104 .subsys_id = debug_subsys_id,
105};
diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c
index fb249e2bcada..59e9ef6aab40 100644
--- a/kernel/cgroup_freezer.c
+++ b/kernel/cgroup_freezer.c
@@ -159,7 +159,7 @@ static bool is_task_frozen_enough(struct task_struct *task)
159 */ 159 */
160static int freezer_can_attach(struct cgroup_subsys *ss, 160static int freezer_can_attach(struct cgroup_subsys *ss,
161 struct cgroup *new_cgroup, 161 struct cgroup *new_cgroup,
162 struct task_struct *task) 162 struct task_struct *task, bool threadgroup)
163{ 163{
164 struct freezer *freezer; 164 struct freezer *freezer;
165 165
@@ -177,6 +177,19 @@ static int freezer_can_attach(struct cgroup_subsys *ss,
177 if (freezer->state == CGROUP_FROZEN) 177 if (freezer->state == CGROUP_FROZEN)
178 return -EBUSY; 178 return -EBUSY;
179 179
180 if (threadgroup) {
181 struct task_struct *c;
182
183 rcu_read_lock();
184 list_for_each_entry_rcu(c, &task->thread_group, thread_group) {
185 if (is_task_frozen_enough(c)) {
186 rcu_read_unlock();
187 return -EBUSY;
188 }
189 }
190 rcu_read_unlock();
191 }
192
180 return 0; 193 return 0;
181} 194}
182 195
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 7e75a41bd508..b5cb469d2545 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -1324,9 +1324,10 @@ static int fmeter_getrate(struct fmeter *fmp)
1324static cpumask_var_t cpus_attach; 1324static cpumask_var_t cpus_attach;
1325 1325
1326/* Called by cgroups to determine if a cpuset is usable; cgroup_mutex held */ 1326/* Called by cgroups to determine if a cpuset is usable; cgroup_mutex held */
1327static int cpuset_can_attach(struct cgroup_subsys *ss, 1327static int cpuset_can_attach(struct cgroup_subsys *ss, struct cgroup *cont,
1328 struct cgroup *cont, struct task_struct *tsk) 1328 struct task_struct *tsk, bool threadgroup)
1329{ 1329{
1330 int ret;
1330 struct cpuset *cs = cgroup_cs(cont); 1331 struct cpuset *cs = cgroup_cs(cont);
1331 1332
1332 if (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)) 1333 if (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))
@@ -1343,18 +1344,51 @@ static int cpuset_can_attach(struct cgroup_subsys *ss,
1343 if (tsk->flags & PF_THREAD_BOUND) 1344 if (tsk->flags & PF_THREAD_BOUND)
1344 return -EINVAL; 1345 return -EINVAL;
1345 1346
1346 return security_task_setscheduler(tsk, 0, NULL); 1347 ret = security_task_setscheduler(tsk, 0, NULL);
1348 if (ret)
1349 return ret;
1350 if (threadgroup) {
1351 struct task_struct *c;
1352
1353 rcu_read_lock();
1354 list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) {
1355 ret = security_task_setscheduler(c, 0, NULL);
1356 if (ret) {
1357 rcu_read_unlock();
1358 return ret;
1359 }
1360 }
1361 rcu_read_unlock();
1362 }
1363 return 0;
1364}
1365
1366static void cpuset_attach_task(struct task_struct *tsk, nodemask_t *to,
1367 struct cpuset *cs)
1368{
1369 int err;
1370 /*
1371 * can_attach beforehand should guarantee that this doesn't fail.
1372 * TODO: have a better way to handle failure here
1373 */
1374 err = set_cpus_allowed_ptr(tsk, cpus_attach);
1375 WARN_ON_ONCE(err);
1376
1377 task_lock(tsk);
1378 cpuset_change_task_nodemask(tsk, to);
1379 task_unlock(tsk);
1380 cpuset_update_task_spread_flag(cs, tsk);
1381
1347} 1382}
1348 1383
1349static void cpuset_attach(struct cgroup_subsys *ss, 1384static void cpuset_attach(struct cgroup_subsys *ss, struct cgroup *cont,
1350 struct cgroup *cont, struct cgroup *oldcont, 1385 struct cgroup *oldcont, struct task_struct *tsk,
1351 struct task_struct *tsk) 1386 bool threadgroup)
1352{ 1387{
1353 nodemask_t from, to; 1388 nodemask_t from, to;
1354 struct mm_struct *mm; 1389 struct mm_struct *mm;
1355 struct cpuset *cs = cgroup_cs(cont); 1390 struct cpuset *cs = cgroup_cs(cont);
1356 struct cpuset *oldcs = cgroup_cs(oldcont); 1391 struct cpuset *oldcs = cgroup_cs(oldcont);
1357 int err;
1358 1392
1359 if (cs == &top_cpuset) { 1393 if (cs == &top_cpuset) {
1360 cpumask_copy(cpus_attach, cpu_possible_mask); 1394 cpumask_copy(cpus_attach, cpu_possible_mask);
@@ -1363,15 +1397,19 @@ static void cpuset_attach(struct cgroup_subsys *ss,
1363 guarantee_online_cpus(cs, cpus_attach); 1397 guarantee_online_cpus(cs, cpus_attach);
1364 guarantee_online_mems(cs, &to); 1398 guarantee_online_mems(cs, &to);
1365 } 1399 }
1366 err = set_cpus_allowed_ptr(tsk, cpus_attach);
1367 if (err)
1368 return;
1369 1400
1370 task_lock(tsk); 1401 /* do per-task migration stuff possibly for each in the threadgroup */
1371 cpuset_change_task_nodemask(tsk, &to); 1402 cpuset_attach_task(tsk, &to, cs);
1372 task_unlock(tsk); 1403 if (threadgroup) {
1373 cpuset_update_task_spread_flag(cs, tsk); 1404 struct task_struct *c;
1405 rcu_read_lock();
1406 list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) {
1407 cpuset_attach_task(c, &to, cs);
1408 }
1409 rcu_read_unlock();
1410 }
1374 1411
1412 /* change mm; only needs to be done once even if threadgroup */
1375 from = oldcs->mems_allowed; 1413 from = oldcs->mems_allowed;
1376 to = cs->mems_allowed; 1414 to = cs->mems_allowed;
1377 mm = get_task_mm(tsk); 1415 mm = get_task_mm(tsk);
diff --git a/kernel/cred.c b/kernel/cred.c
index d7f7a01082eb..dd76cfe5f5b0 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -782,6 +782,25 @@ EXPORT_SYMBOL(set_create_files_as);
782 782
783#ifdef CONFIG_DEBUG_CREDENTIALS 783#ifdef CONFIG_DEBUG_CREDENTIALS
784 784
785bool creds_are_invalid(const struct cred *cred)
786{
787 if (cred->magic != CRED_MAGIC)
788 return true;
789 if (atomic_read(&cred->usage) < atomic_read(&cred->subscribers))
790 return true;
791#ifdef CONFIG_SECURITY_SELINUX
792 if (selinux_is_enabled()) {
793 if ((unsigned long) cred->security < PAGE_SIZE)
794 return true;
795 if ((*(u32 *)cred->security & 0xffffff00) ==
796 (POISON_FREE << 24 | POISON_FREE << 16 | POISON_FREE << 8))
797 return true;
798 }
799#endif
800 return false;
801}
802EXPORT_SYMBOL(creds_are_invalid);
803
785/* 804/*
786 * dump invalid credentials 805 * dump invalid credentials
787 */ 806 */
diff --git a/kernel/exit.c b/kernel/exit.c
index bc2b1fdfc354..e61891f80123 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -47,7 +47,7 @@
47#include <linux/tracehook.h> 47#include <linux/tracehook.h>
48#include <linux/fs_struct.h> 48#include <linux/fs_struct.h>
49#include <linux/init_task.h> 49#include <linux/init_task.h>
50#include <linux/perf_counter.h> 50#include <linux/perf_event.h>
51#include <trace/events/sched.h> 51#include <trace/events/sched.h>
52 52
53#include <asm/uaccess.h> 53#include <asm/uaccess.h>
@@ -154,8 +154,8 @@ static void delayed_put_task_struct(struct rcu_head *rhp)
154{ 154{
155 struct task_struct *tsk = container_of(rhp, struct task_struct, rcu); 155 struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
156 156
157#ifdef CONFIG_PERF_COUNTERS 157#ifdef CONFIG_PERF_EVENTS
158 WARN_ON_ONCE(tsk->perf_counter_ctxp); 158 WARN_ON_ONCE(tsk->perf_event_ctxp);
159#endif 159#endif
160 trace_sched_process_free(tsk); 160 trace_sched_process_free(tsk);
161 put_task_struct(tsk); 161 put_task_struct(tsk);
@@ -359,8 +359,10 @@ void __set_special_pids(struct pid *pid)
359{ 359{
360 struct task_struct *curr = current->group_leader; 360 struct task_struct *curr = current->group_leader;
361 361
362 if (task_session(curr) != pid) 362 if (task_session(curr) != pid) {
363 change_pid(curr, PIDTYPE_SID, pid); 363 change_pid(curr, PIDTYPE_SID, pid);
364 proc_sid_connector(curr);
365 }
364 366
365 if (task_pgrp(curr) != pid) 367 if (task_pgrp(curr) != pid)
366 change_pid(curr, PIDTYPE_PGID, pid); 368 change_pid(curr, PIDTYPE_PGID, pid);
@@ -945,6 +947,8 @@ NORET_TYPE void do_exit(long code)
945 if (group_dead) { 947 if (group_dead) {
946 hrtimer_cancel(&tsk->signal->real_timer); 948 hrtimer_cancel(&tsk->signal->real_timer);
947 exit_itimers(tsk->signal); 949 exit_itimers(tsk->signal);
950 if (tsk->mm)
951 setmax_mm_hiwater_rss(&tsk->signal->maxrss, tsk->mm);
948 } 952 }
949 acct_collect(code, group_dead); 953 acct_collect(code, group_dead);
950 if (group_dead) 954 if (group_dead)
@@ -972,8 +976,6 @@ NORET_TYPE void do_exit(long code)
972 disassociate_ctty(1); 976 disassociate_ctty(1);
973 977
974 module_put(task_thread_info(tsk)->exec_domain->module); 978 module_put(task_thread_info(tsk)->exec_domain->module);
975 if (tsk->binfmt)
976 module_put(tsk->binfmt->module);
977 979
978 proc_exit_connector(tsk); 980 proc_exit_connector(tsk);
979 981
@@ -981,7 +983,7 @@ NORET_TYPE void do_exit(long code)
981 * Flush inherited counters to the parent - before the parent 983 * Flush inherited counters to the parent - before the parent
982 * gets woken up by child-exit notifications. 984 * gets woken up by child-exit notifications.
983 */ 985 */
984 perf_counter_exit_task(tsk); 986 perf_event_exit_task(tsk);
985 987
986 exit_notify(tsk, group_dead); 988 exit_notify(tsk, group_dead);
987#ifdef CONFIG_NUMA 989#ifdef CONFIG_NUMA
@@ -1091,28 +1093,28 @@ struct wait_opts {
1091 int __user *wo_stat; 1093 int __user *wo_stat;
1092 struct rusage __user *wo_rusage; 1094 struct rusage __user *wo_rusage;
1093 1095
1096 wait_queue_t child_wait;
1094 int notask_error; 1097 int notask_error;
1095}; 1098};
1096 1099
1097static struct pid *task_pid_type(struct task_struct *task, enum pid_type type) 1100static inline
1101struct pid *task_pid_type(struct task_struct *task, enum pid_type type)
1098{ 1102{
1099 struct pid *pid = NULL; 1103 if (type != PIDTYPE_PID)
1100 if (type == PIDTYPE_PID) 1104 task = task->group_leader;
1101 pid = task->pids[type].pid; 1105 return task->pids[type].pid;
1102 else if (type < PIDTYPE_MAX)
1103 pid = task->group_leader->pids[type].pid;
1104 return pid;
1105} 1106}
1106 1107
1107static int eligible_child(struct wait_opts *wo, struct task_struct *p) 1108static int eligible_pid(struct wait_opts *wo, struct task_struct *p)
1108{ 1109{
1109 int err; 1110 return wo->wo_type == PIDTYPE_MAX ||
1110 1111 task_pid_type(p, wo->wo_type) == wo->wo_pid;
1111 if (wo->wo_type < PIDTYPE_MAX) { 1112}
1112 if (task_pid_type(p, wo->wo_type) != wo->wo_pid)
1113 return 0;
1114 }
1115 1113
1114static int eligible_child(struct wait_opts *wo, struct task_struct *p)
1115{
1116 if (!eligible_pid(wo, p))
1117 return 0;
1116 /* Wait for all children (clone and not) if __WALL is set; 1118 /* Wait for all children (clone and not) if __WALL is set;
1117 * otherwise, wait for clone children *only* if __WCLONE is 1119 * otherwise, wait for clone children *only* if __WCLONE is
1118 * set; otherwise, wait for non-clone children *only*. (Note: 1120 * set; otherwise, wait for non-clone children *only*. (Note:
@@ -1122,10 +1124,6 @@ static int eligible_child(struct wait_opts *wo, struct task_struct *p)
1122 && !(wo->wo_flags & __WALL)) 1124 && !(wo->wo_flags & __WALL))
1123 return 0; 1125 return 0;
1124 1126
1125 err = security_task_wait(p);
1126 if (err)
1127 return err;
1128
1129 return 1; 1127 return 1;
1130} 1128}
1131 1129
@@ -1138,18 +1136,20 @@ static int wait_noreap_copyout(struct wait_opts *wo, struct task_struct *p,
1138 1136
1139 put_task_struct(p); 1137 put_task_struct(p);
1140 infop = wo->wo_info; 1138 infop = wo->wo_info;
1141 if (!retval) 1139 if (infop) {
1142 retval = put_user(SIGCHLD, &infop->si_signo); 1140 if (!retval)
1143 if (!retval) 1141 retval = put_user(SIGCHLD, &infop->si_signo);
1144 retval = put_user(0, &infop->si_errno); 1142 if (!retval)
1145 if (!retval) 1143 retval = put_user(0, &infop->si_errno);
1146 retval = put_user((short)why, &infop->si_code); 1144 if (!retval)
1147 if (!retval) 1145 retval = put_user((short)why, &infop->si_code);
1148 retval = put_user(pid, &infop->si_pid); 1146 if (!retval)
1149 if (!retval) 1147 retval = put_user(pid, &infop->si_pid);
1150 retval = put_user(uid, &infop->si_uid); 1148 if (!retval)
1151 if (!retval) 1149 retval = put_user(uid, &infop->si_uid);
1152 retval = put_user(status, &infop->si_status); 1150 if (!retval)
1151 retval = put_user(status, &infop->si_status);
1152 }
1153 if (!retval) 1153 if (!retval)
1154 retval = pid; 1154 retval = pid;
1155 return retval; 1155 return retval;
@@ -1206,6 +1206,7 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
1206 if (likely(!traced) && likely(!task_detached(p))) { 1206 if (likely(!traced) && likely(!task_detached(p))) {
1207 struct signal_struct *psig; 1207 struct signal_struct *psig;
1208 struct signal_struct *sig; 1208 struct signal_struct *sig;
1209 unsigned long maxrss;
1209 1210
1210 /* 1211 /*
1211 * The resource counters for the group leader are in its 1212 * The resource counters for the group leader are in its
@@ -1254,6 +1255,9 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
1254 psig->coublock += 1255 psig->coublock +=
1255 task_io_get_oublock(p) + 1256 task_io_get_oublock(p) +
1256 sig->oublock + sig->coublock; 1257 sig->oublock + sig->coublock;
1258 maxrss = max(sig->maxrss, sig->cmaxrss);
1259 if (psig->cmaxrss < maxrss)
1260 psig->cmaxrss = maxrss;
1257 task_io_accounting_add(&psig->ioac, &p->ioac); 1261 task_io_accounting_add(&psig->ioac, &p->ioac);
1258 task_io_accounting_add(&psig->ioac, &sig->ioac); 1262 task_io_accounting_add(&psig->ioac, &sig->ioac);
1259 spin_unlock_irq(&p->real_parent->sighand->siglock); 1263 spin_unlock_irq(&p->real_parent->sighand->siglock);
@@ -1475,13 +1479,14 @@ static int wait_task_continued(struct wait_opts *wo, struct task_struct *p)
1475 * then ->notask_error is 0 if @p is an eligible child, 1479 * then ->notask_error is 0 if @p is an eligible child,
1476 * or another error from security_task_wait(), or still -ECHILD. 1480 * or another error from security_task_wait(), or still -ECHILD.
1477 */ 1481 */
1478static int wait_consider_task(struct wait_opts *wo, struct task_struct *parent, 1482static int wait_consider_task(struct wait_opts *wo, int ptrace,
1479 int ptrace, struct task_struct *p) 1483 struct task_struct *p)
1480{ 1484{
1481 int ret = eligible_child(wo, p); 1485 int ret = eligible_child(wo, p);
1482 if (!ret) 1486 if (!ret)
1483 return ret; 1487 return ret;
1484 1488
1489 ret = security_task_wait(p);
1485 if (unlikely(ret < 0)) { 1490 if (unlikely(ret < 0)) {
1486 /* 1491 /*
1487 * If we have not yet seen any eligible child, 1492 * If we have not yet seen any eligible child,
@@ -1543,7 +1548,7 @@ static int do_wait_thread(struct wait_opts *wo, struct task_struct *tsk)
1543 * Do not consider detached threads. 1548 * Do not consider detached threads.
1544 */ 1549 */
1545 if (!task_detached(p)) { 1550 if (!task_detached(p)) {
1546 int ret = wait_consider_task(wo, tsk, 0, p); 1551 int ret = wait_consider_task(wo, 0, p);
1547 if (ret) 1552 if (ret)
1548 return ret; 1553 return ret;
1549 } 1554 }
@@ -1557,7 +1562,7 @@ static int ptrace_do_wait(struct wait_opts *wo, struct task_struct *tsk)
1557 struct task_struct *p; 1562 struct task_struct *p;
1558 1563
1559 list_for_each_entry(p, &tsk->ptraced, ptrace_entry) { 1564 list_for_each_entry(p, &tsk->ptraced, ptrace_entry) {
1560 int ret = wait_consider_task(wo, tsk, 1, p); 1565 int ret = wait_consider_task(wo, 1, p);
1561 if (ret) 1566 if (ret)
1562 return ret; 1567 return ret;
1563 } 1568 }
@@ -1565,15 +1570,38 @@ static int ptrace_do_wait(struct wait_opts *wo, struct task_struct *tsk)
1565 return 0; 1570 return 0;
1566} 1571}
1567 1572
1573static int child_wait_callback(wait_queue_t *wait, unsigned mode,
1574 int sync, void *key)
1575{
1576 struct wait_opts *wo = container_of(wait, struct wait_opts,
1577 child_wait);
1578 struct task_struct *p = key;
1579
1580 if (!eligible_pid(wo, p))
1581 return 0;
1582
1583 if ((wo->wo_flags & __WNOTHREAD) && wait->private != p->parent)
1584 return 0;
1585
1586 return default_wake_function(wait, mode, sync, key);
1587}
1588
1589void __wake_up_parent(struct task_struct *p, struct task_struct *parent)
1590{
1591 __wake_up_sync_key(&parent->signal->wait_chldexit,
1592 TASK_INTERRUPTIBLE, 1, p);
1593}
1594
1568static long do_wait(struct wait_opts *wo) 1595static long do_wait(struct wait_opts *wo)
1569{ 1596{
1570 DECLARE_WAITQUEUE(wait, current);
1571 struct task_struct *tsk; 1597 struct task_struct *tsk;
1572 int retval; 1598 int retval;
1573 1599
1574 trace_sched_process_wait(wo->wo_pid); 1600 trace_sched_process_wait(wo->wo_pid);
1575 1601
1576 add_wait_queue(&current->signal->wait_chldexit,&wait); 1602 init_waitqueue_func_entry(&wo->child_wait, child_wait_callback);
1603 wo->child_wait.private = current;
1604 add_wait_queue(&current->signal->wait_chldexit, &wo->child_wait);
1577repeat: 1605repeat:
1578 /* 1606 /*
1579 * If there is nothing that can match our critiera just get out. 1607 * If there is nothing that can match our critiera just get out.
@@ -1614,32 +1642,7 @@ notask:
1614 } 1642 }
1615end: 1643end:
1616 __set_current_state(TASK_RUNNING); 1644 __set_current_state(TASK_RUNNING);
1617 remove_wait_queue(&current->signal->wait_chldexit,&wait); 1645 remove_wait_queue(&current->signal->wait_chldexit, &wo->child_wait);
1618 if (wo->wo_info) {
1619 struct siginfo __user *infop = wo->wo_info;
1620
1621 if (retval > 0)
1622 retval = 0;
1623 else {
1624 /*
1625 * For a WNOHANG return, clear out all the fields
1626 * we would set so the user can easily tell the
1627 * difference.
1628 */
1629 if (!retval)
1630 retval = put_user(0, &infop->si_signo);
1631 if (!retval)
1632 retval = put_user(0, &infop->si_errno);
1633 if (!retval)
1634 retval = put_user(0, &infop->si_code);
1635 if (!retval)
1636 retval = put_user(0, &infop->si_pid);
1637 if (!retval)
1638 retval = put_user(0, &infop->si_uid);
1639 if (!retval)
1640 retval = put_user(0, &infop->si_status);
1641 }
1642 }
1643 return retval; 1646 return retval;
1644} 1647}
1645 1648
@@ -1684,6 +1687,29 @@ SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *,
1684 wo.wo_stat = NULL; 1687 wo.wo_stat = NULL;
1685 wo.wo_rusage = ru; 1688 wo.wo_rusage = ru;
1686 ret = do_wait(&wo); 1689 ret = do_wait(&wo);
1690
1691 if (ret > 0) {
1692 ret = 0;
1693 } else if (infop) {
1694 /*
1695 * For a WNOHANG return, clear out all the fields
1696 * we would set so the user can easily tell the
1697 * difference.
1698 */
1699 if (!ret)
1700 ret = put_user(0, &infop->si_signo);
1701 if (!ret)
1702 ret = put_user(0, &infop->si_errno);
1703 if (!ret)
1704 ret = put_user(0, &infop->si_code);
1705 if (!ret)
1706 ret = put_user(0, &infop->si_pid);
1707 if (!ret)
1708 ret = put_user(0, &infop->si_uid);
1709 if (!ret)
1710 ret = put_user(0, &infop->si_status);
1711 }
1712
1687 put_pid(pid); 1713 put_pid(pid);
1688 1714
1689 /* avoid REGPARM breakage on x86: */ 1715 /* avoid REGPARM breakage on x86: */
diff --git a/kernel/fork.c b/kernel/fork.c
index 341965b0ab1c..4c20fff8c13a 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -49,6 +49,7 @@
49#include <linux/ftrace.h> 49#include <linux/ftrace.h>
50#include <linux/profile.h> 50#include <linux/profile.h>
51#include <linux/rmap.h> 51#include <linux/rmap.h>
52#include <linux/ksm.h>
52#include <linux/acct.h> 53#include <linux/acct.h>
53#include <linux/tsacct_kern.h> 54#include <linux/tsacct_kern.h>
54#include <linux/cn_proc.h> 55#include <linux/cn_proc.h>
@@ -61,7 +62,8 @@
61#include <linux/blkdev.h> 62#include <linux/blkdev.h>
62#include <linux/fs_struct.h> 63#include <linux/fs_struct.h>
63#include <linux/magic.h> 64#include <linux/magic.h>
64#include <linux/perf_counter.h> 65#include <linux/perf_event.h>
66#include <linux/posix-timers.h>
65 67
66#include <asm/pgtable.h> 68#include <asm/pgtable.h>
67#include <asm/pgalloc.h> 69#include <asm/pgalloc.h>
@@ -136,9 +138,17 @@ struct kmem_cache *vm_area_cachep;
136/* SLAB cache for mm_struct structures (tsk->mm) */ 138/* SLAB cache for mm_struct structures (tsk->mm) */
137static struct kmem_cache *mm_cachep; 139static struct kmem_cache *mm_cachep;
138 140
141static void account_kernel_stack(struct thread_info *ti, int account)
142{
143 struct zone *zone = page_zone(virt_to_page(ti));
144
145 mod_zone_page_state(zone, NR_KERNEL_STACK, account);
146}
147
139void free_task(struct task_struct *tsk) 148void free_task(struct task_struct *tsk)
140{ 149{
141 prop_local_destroy_single(&tsk->dirties); 150 prop_local_destroy_single(&tsk->dirties);
151 account_kernel_stack(tsk->stack, -1);
142 free_thread_info(tsk->stack); 152 free_thread_info(tsk->stack);
143 rt_mutex_debug_task_free(tsk); 153 rt_mutex_debug_task_free(tsk);
144 ftrace_graph_exit_task(tsk); 154 ftrace_graph_exit_task(tsk);
@@ -253,6 +263,9 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
253 tsk->btrace_seq = 0; 263 tsk->btrace_seq = 0;
254#endif 264#endif
255 tsk->splice_pipe = NULL; 265 tsk->splice_pipe = NULL;
266
267 account_kernel_stack(ti, 1);
268
256 return tsk; 269 return tsk;
257 270
258out: 271out:
@@ -288,6 +301,9 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
288 rb_link = &mm->mm_rb.rb_node; 301 rb_link = &mm->mm_rb.rb_node;
289 rb_parent = NULL; 302 rb_parent = NULL;
290 pprev = &mm->mmap; 303 pprev = &mm->mmap;
304 retval = ksm_fork(mm, oldmm);
305 if (retval)
306 goto out;
291 307
292 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) { 308 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
293 struct file *file; 309 struct file *file;
@@ -418,22 +434,30 @@ __setup("coredump_filter=", coredump_filter_setup);
418 434
419#include <linux/init_task.h> 435#include <linux/init_task.h>
420 436
437static void mm_init_aio(struct mm_struct *mm)
438{
439#ifdef CONFIG_AIO
440 spin_lock_init(&mm->ioctx_lock);
441 INIT_HLIST_HEAD(&mm->ioctx_list);
442#endif
443}
444
421static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p) 445static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p)
422{ 446{
423 atomic_set(&mm->mm_users, 1); 447 atomic_set(&mm->mm_users, 1);
424 atomic_set(&mm->mm_count, 1); 448 atomic_set(&mm->mm_count, 1);
425 init_rwsem(&mm->mmap_sem); 449 init_rwsem(&mm->mmap_sem);
426 INIT_LIST_HEAD(&mm->mmlist); 450 INIT_LIST_HEAD(&mm->mmlist);
427 mm->flags = (current->mm) ? current->mm->flags : default_dump_filter; 451 mm->flags = (current->mm) ?
452 (current->mm->flags & MMF_INIT_MASK) : default_dump_filter;
428 mm->core_state = NULL; 453 mm->core_state = NULL;
429 mm->nr_ptes = 0; 454 mm->nr_ptes = 0;
430 set_mm_counter(mm, file_rss, 0); 455 set_mm_counter(mm, file_rss, 0);
431 set_mm_counter(mm, anon_rss, 0); 456 set_mm_counter(mm, anon_rss, 0);
432 spin_lock_init(&mm->page_table_lock); 457 spin_lock_init(&mm->page_table_lock);
433 spin_lock_init(&mm->ioctx_lock);
434 INIT_HLIST_HEAD(&mm->ioctx_list);
435 mm->free_area_cache = TASK_UNMAPPED_BASE; 458 mm->free_area_cache = TASK_UNMAPPED_BASE;
436 mm->cached_hole_size = ~0UL; 459 mm->cached_hole_size = ~0UL;
460 mm_init_aio(mm);
437 mm_init_owner(mm, p); 461 mm_init_owner(mm, p);
438 462
439 if (likely(!mm_alloc_pgd(mm))) { 463 if (likely(!mm_alloc_pgd(mm))) {
@@ -485,6 +509,7 @@ void mmput(struct mm_struct *mm)
485 509
486 if (atomic_dec_and_test(&mm->mm_users)) { 510 if (atomic_dec_and_test(&mm->mm_users)) {
487 exit_aio(mm); 511 exit_aio(mm);
512 ksm_exit(mm);
488 exit_mmap(mm); 513 exit_mmap(mm);
489 set_mm_exe_file(mm, NULL); 514 set_mm_exe_file(mm, NULL);
490 if (!list_empty(&mm->mmlist)) { 515 if (!list_empty(&mm->mmlist)) {
@@ -493,6 +518,8 @@ void mmput(struct mm_struct *mm)
493 spin_unlock(&mmlist_lock); 518 spin_unlock(&mmlist_lock);
494 } 519 }
495 put_swap_token(mm); 520 put_swap_token(mm);
521 if (mm->binfmt)
522 module_put(mm->binfmt->module);
496 mmdrop(mm); 523 mmdrop(mm);
497 } 524 }
498} 525}
@@ -624,9 +651,14 @@ struct mm_struct *dup_mm(struct task_struct *tsk)
624 mm->hiwater_rss = get_mm_rss(mm); 651 mm->hiwater_rss = get_mm_rss(mm);
625 mm->hiwater_vm = mm->total_vm; 652 mm->hiwater_vm = mm->total_vm;
626 653
654 if (mm->binfmt && !try_module_get(mm->binfmt->module))
655 goto free_pt;
656
627 return mm; 657 return mm;
628 658
629free_pt: 659free_pt:
660 /* don't put binfmt in mmput, we haven't got module yet */
661 mm->binfmt = NULL;
630 mmput(mm); 662 mmput(mm);
631 663
632fail_nomem: 664fail_nomem:
@@ -794,10 +826,10 @@ static void posix_cpu_timers_init_group(struct signal_struct *sig)
794 thread_group_cputime_init(sig); 826 thread_group_cputime_init(sig);
795 827
796 /* Expiration times and increments. */ 828 /* Expiration times and increments. */
797 sig->it_virt_expires = cputime_zero; 829 sig->it[CPUCLOCK_PROF].expires = cputime_zero;
798 sig->it_virt_incr = cputime_zero; 830 sig->it[CPUCLOCK_PROF].incr = cputime_zero;
799 sig->it_prof_expires = cputime_zero; 831 sig->it[CPUCLOCK_VIRT].expires = cputime_zero;
800 sig->it_prof_incr = cputime_zero; 832 sig->it[CPUCLOCK_VIRT].incr = cputime_zero;
801 833
802 /* Cached expiration times. */ 834 /* Cached expiration times. */
803 sig->cputime_expires.prof_exp = cputime_zero; 835 sig->cputime_expires.prof_exp = cputime_zero;
@@ -855,6 +887,7 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
855 sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0; 887 sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0;
856 sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0; 888 sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0;
857 sig->inblock = sig->oublock = sig->cinblock = sig->coublock = 0; 889 sig->inblock = sig->oublock = sig->cinblock = sig->coublock = 0;
890 sig->maxrss = sig->cmaxrss = 0;
858 task_io_accounting_init(&sig->ioac); 891 task_io_accounting_init(&sig->ioac);
859 sig->sum_sched_runtime = 0; 892 sig->sum_sched_runtime = 0;
860 taskstats_tgid_init(sig); 893 taskstats_tgid_init(sig);
@@ -869,6 +902,8 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
869 902
870 tty_audit_fork(sig); 903 tty_audit_fork(sig);
871 904
905 sig->oom_adj = current->signal->oom_adj;
906
872 return 0; 907 return 0;
873} 908}
874 909
@@ -964,6 +999,16 @@ static struct task_struct *copy_process(unsigned long clone_flags,
964 if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM)) 999 if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))
965 return ERR_PTR(-EINVAL); 1000 return ERR_PTR(-EINVAL);
966 1001
1002 /*
1003 * Siblings of global init remain as zombies on exit since they are
1004 * not reaped by their parent (swapper). To solve this and to avoid
1005 * multi-rooted process trees, prevent global and container-inits
1006 * from creating siblings.
1007 */
1008 if ((clone_flags & CLONE_PARENT) &&
1009 current->signal->flags & SIGNAL_UNKILLABLE)
1010 return ERR_PTR(-EINVAL);
1011
967 retval = security_task_create(clone_flags); 1012 retval = security_task_create(clone_flags);
968 if (retval) 1013 if (retval)
969 goto fork_out; 1014 goto fork_out;
@@ -1005,9 +1050,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1005 if (!try_module_get(task_thread_info(p)->exec_domain->module)) 1050 if (!try_module_get(task_thread_info(p)->exec_domain->module))
1006 goto bad_fork_cleanup_count; 1051 goto bad_fork_cleanup_count;
1007 1052
1008 if (p->binfmt && !try_module_get(p->binfmt->module))
1009 goto bad_fork_cleanup_put_domain;
1010
1011 p->did_exec = 0; 1053 p->did_exec = 0;
1012 delayacct_tsk_init(p); /* Must remain after dup_task_struct() */ 1054 delayacct_tsk_init(p); /* Must remain after dup_task_struct() */
1013 copy_flags(clone_flags, p); 1055 copy_flags(clone_flags, p);
@@ -1081,10 +1123,12 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1081 1123
1082 p->bts = NULL; 1124 p->bts = NULL;
1083 1125
1126 p->stack_start = stack_start;
1127
1084 /* Perform scheduler related setup. Assign this task to a CPU. */ 1128 /* Perform scheduler related setup. Assign this task to a CPU. */
1085 sched_fork(p, clone_flags); 1129 sched_fork(p, clone_flags);
1086 1130
1087 retval = perf_counter_init_task(p); 1131 retval = perf_event_init_task(p);
1088 if (retval) 1132 if (retval)
1089 goto bad_fork_cleanup_policy; 1133 goto bad_fork_cleanup_policy;
1090 1134
@@ -1259,7 +1303,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1259 write_unlock_irq(&tasklist_lock); 1303 write_unlock_irq(&tasklist_lock);
1260 proc_fork_connector(p); 1304 proc_fork_connector(p);
1261 cgroup_post_fork(p); 1305 cgroup_post_fork(p);
1262 perf_counter_fork(p); 1306 perf_event_fork(p);
1263 return p; 1307 return p;
1264 1308
1265bad_fork_free_pid: 1309bad_fork_free_pid:
@@ -1286,16 +1330,13 @@ bad_fork_cleanup_semundo:
1286bad_fork_cleanup_audit: 1330bad_fork_cleanup_audit:
1287 audit_free(p); 1331 audit_free(p);
1288bad_fork_cleanup_policy: 1332bad_fork_cleanup_policy:
1289 perf_counter_free_task(p); 1333 perf_event_free_task(p);
1290#ifdef CONFIG_NUMA 1334#ifdef CONFIG_NUMA
1291 mpol_put(p->mempolicy); 1335 mpol_put(p->mempolicy);
1292bad_fork_cleanup_cgroup: 1336bad_fork_cleanup_cgroup:
1293#endif 1337#endif
1294 cgroup_exit(p, cgroup_callbacks_done); 1338 cgroup_exit(p, cgroup_callbacks_done);
1295 delayacct_tsk_free(p); 1339 delayacct_tsk_free(p);
1296 if (p->binfmt)
1297 module_put(p->binfmt->module);
1298bad_fork_cleanup_put_domain:
1299 module_put(task_thread_info(p)->exec_domain->module); 1340 module_put(task_thread_info(p)->exec_domain->module);
1300bad_fork_cleanup_count: 1341bad_fork_cleanup_count:
1301 atomic_dec(&p->cred->user->processes); 1342 atomic_dec(&p->cred->user->processes);
diff --git a/kernel/futex.c b/kernel/futex.c
index c3bb2fce11ba..4949d336d88d 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1656,6 +1656,12 @@ out:
1656static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q, 1656static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
1657 struct hrtimer_sleeper *timeout) 1657 struct hrtimer_sleeper *timeout)
1658{ 1658{
1659 /*
1660 * The task state is guaranteed to be set before another task can
1661 * wake it. set_current_state() is implemented using set_mb() and
1662 * queue_me() calls spin_unlock() upon completion, both serializing
1663 * access to the hash list and forcing another memory barrier.
1664 */
1659 set_current_state(TASK_INTERRUPTIBLE); 1665 set_current_state(TASK_INTERRUPTIBLE);
1660 queue_me(q, hb); 1666 queue_me(q, hb);
1661 1667
diff --git a/kernel/gcov/Kconfig b/kernel/gcov/Kconfig
index 654efd09f6a9..70a298d6da71 100644
--- a/kernel/gcov/Kconfig
+++ b/kernel/gcov/Kconfig
@@ -34,7 +34,7 @@ config GCOV_KERNEL
34config GCOV_PROFILE_ALL 34config GCOV_PROFILE_ALL
35 bool "Profile entire Kernel" 35 bool "Profile entire Kernel"
36 depends on GCOV_KERNEL 36 depends on GCOV_KERNEL
37 depends on S390 || X86 || (PPC && EXPERIMENTAL) 37 depends on S390 || X86 || (PPC && EXPERIMENTAL) || MICROBLAZE
38 default n 38 default n
39 ---help--- 39 ---help---
40 This options activates profiling for the entire kernel. 40 This options activates profiling for the entire kernel.
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index c03f221fee44..3e1c36e7998f 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -48,6 +48,8 @@
48 48
49#include <asm/uaccess.h> 49#include <asm/uaccess.h>
50 50
51#include <trace/events/timer.h>
52
51/* 53/*
52 * The timer bases: 54 * The timer bases:
53 * 55 *
@@ -442,6 +444,26 @@ static inline void debug_hrtimer_activate(struct hrtimer *timer) { }
442static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { } 444static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { }
443#endif 445#endif
444 446
447static inline void
448debug_init(struct hrtimer *timer, clockid_t clockid,
449 enum hrtimer_mode mode)
450{
451 debug_hrtimer_init(timer);
452 trace_hrtimer_init(timer, clockid, mode);
453}
454
455static inline void debug_activate(struct hrtimer *timer)
456{
457 debug_hrtimer_activate(timer);
458 trace_hrtimer_start(timer);
459}
460
461static inline void debug_deactivate(struct hrtimer *timer)
462{
463 debug_hrtimer_deactivate(timer);
464 trace_hrtimer_cancel(timer);
465}
466
445/* High resolution timer related functions */ 467/* High resolution timer related functions */
446#ifdef CONFIG_HIGH_RES_TIMERS 468#ifdef CONFIG_HIGH_RES_TIMERS
447 469
@@ -487,13 +509,14 @@ static inline int hrtimer_hres_active(void)
487 * next event 509 * next event
488 * Called with interrupts disabled and base->lock held 510 * Called with interrupts disabled and base->lock held
489 */ 511 */
490static void hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base) 512static void
513hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
491{ 514{
492 int i; 515 int i;
493 struct hrtimer_clock_base *base = cpu_base->clock_base; 516 struct hrtimer_clock_base *base = cpu_base->clock_base;
494 ktime_t expires; 517 ktime_t expires, expires_next;
495 518
496 cpu_base->expires_next.tv64 = KTIME_MAX; 519 expires_next.tv64 = KTIME_MAX;
497 520
498 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) { 521 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
499 struct hrtimer *timer; 522 struct hrtimer *timer;
@@ -509,10 +532,15 @@ static void hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base)
509 */ 532 */
510 if (expires.tv64 < 0) 533 if (expires.tv64 < 0)
511 expires.tv64 = 0; 534 expires.tv64 = 0;
512 if (expires.tv64 < cpu_base->expires_next.tv64) 535 if (expires.tv64 < expires_next.tv64)
513 cpu_base->expires_next = expires; 536 expires_next = expires;
514 } 537 }
515 538
539 if (skip_equal && expires_next.tv64 == cpu_base->expires_next.tv64)
540 return;
541
542 cpu_base->expires_next.tv64 = expires_next.tv64;
543
516 if (cpu_base->expires_next.tv64 != KTIME_MAX) 544 if (cpu_base->expires_next.tv64 != KTIME_MAX)
517 tick_program_event(cpu_base->expires_next, 1); 545 tick_program_event(cpu_base->expires_next, 1);
518} 546}
@@ -595,7 +623,7 @@ static void retrigger_next_event(void *arg)
595 base->clock_base[CLOCK_REALTIME].offset = 623 base->clock_base[CLOCK_REALTIME].offset =
596 timespec_to_ktime(realtime_offset); 624 timespec_to_ktime(realtime_offset);
597 625
598 hrtimer_force_reprogram(base); 626 hrtimer_force_reprogram(base, 0);
599 spin_unlock(&base->lock); 627 spin_unlock(&base->lock);
600} 628}
601 629
@@ -698,8 +726,6 @@ static int hrtimer_switch_to_hres(void)
698 /* "Retrigger" the interrupt to get things going */ 726 /* "Retrigger" the interrupt to get things going */
699 retrigger_next_event(NULL); 727 retrigger_next_event(NULL);
700 local_irq_restore(flags); 728 local_irq_restore(flags);
701 printk(KERN_DEBUG "Switched to high resolution mode on CPU %d\n",
702 smp_processor_id());
703 return 1; 729 return 1;
704} 730}
705 731
@@ -708,7 +734,8 @@ static int hrtimer_switch_to_hres(void)
708static inline int hrtimer_hres_active(void) { return 0; } 734static inline int hrtimer_hres_active(void) { return 0; }
709static inline int hrtimer_is_hres_enabled(void) { return 0; } 735static inline int hrtimer_is_hres_enabled(void) { return 0; }
710static inline int hrtimer_switch_to_hres(void) { return 0; } 736static inline int hrtimer_switch_to_hres(void) { return 0; }
711static inline void hrtimer_force_reprogram(struct hrtimer_cpu_base *base) { } 737static inline void
738hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { }
712static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, 739static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
713 struct hrtimer_clock_base *base, 740 struct hrtimer_clock_base *base,
714 int wakeup) 741 int wakeup)
@@ -798,7 +825,7 @@ static int enqueue_hrtimer(struct hrtimer *timer,
798 struct hrtimer *entry; 825 struct hrtimer *entry;
799 int leftmost = 1; 826 int leftmost = 1;
800 827
801 debug_hrtimer_activate(timer); 828 debug_activate(timer);
802 829
803 /* 830 /*
804 * Find the right place in the rbtree: 831 * Find the right place in the rbtree:
@@ -851,19 +878,29 @@ static void __remove_hrtimer(struct hrtimer *timer,
851 struct hrtimer_clock_base *base, 878 struct hrtimer_clock_base *base,
852 unsigned long newstate, int reprogram) 879 unsigned long newstate, int reprogram)
853{ 880{
854 if (timer->state & HRTIMER_STATE_ENQUEUED) { 881 if (!(timer->state & HRTIMER_STATE_ENQUEUED))
855 /* 882 goto out;
856 * Remove the timer from the rbtree and replace the 883
857 * first entry pointer if necessary. 884 /*
858 */ 885 * Remove the timer from the rbtree and replace the first
859 if (base->first == &timer->node) { 886 * entry pointer if necessary.
860 base->first = rb_next(&timer->node); 887 */
861 /* Reprogram the clock event device. if enabled */ 888 if (base->first == &timer->node) {
862 if (reprogram && hrtimer_hres_active()) 889 base->first = rb_next(&timer->node);
863 hrtimer_force_reprogram(base->cpu_base); 890#ifdef CONFIG_HIGH_RES_TIMERS
891 /* Reprogram the clock event device. if enabled */
892 if (reprogram && hrtimer_hres_active()) {
893 ktime_t expires;
894
895 expires = ktime_sub(hrtimer_get_expires(timer),
896 base->offset);
897 if (base->cpu_base->expires_next.tv64 == expires.tv64)
898 hrtimer_force_reprogram(base->cpu_base, 1);
864 } 899 }
865 rb_erase(&timer->node, &base->active); 900#endif
866 } 901 }
902 rb_erase(&timer->node, &base->active);
903out:
867 timer->state = newstate; 904 timer->state = newstate;
868} 905}
869 906
@@ -884,7 +921,7 @@ remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base)
884 * reprogramming happens in the interrupt handler. This is a 921 * reprogramming happens in the interrupt handler. This is a
885 * rare case and less expensive than a smp call. 922 * rare case and less expensive than a smp call.
886 */ 923 */
887 debug_hrtimer_deactivate(timer); 924 debug_deactivate(timer);
888 timer_stats_hrtimer_clear_start_info(timer); 925 timer_stats_hrtimer_clear_start_info(timer);
889 reprogram = base->cpu_base == &__get_cpu_var(hrtimer_bases); 926 reprogram = base->cpu_base == &__get_cpu_var(hrtimer_bases);
890 __remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE, 927 __remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE,
@@ -1117,7 +1154,7 @@ static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
1117void hrtimer_init(struct hrtimer *timer, clockid_t clock_id, 1154void hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
1118 enum hrtimer_mode mode) 1155 enum hrtimer_mode mode)
1119{ 1156{
1120 debug_hrtimer_init(timer); 1157 debug_init(timer, clock_id, mode);
1121 __hrtimer_init(timer, clock_id, mode); 1158 __hrtimer_init(timer, clock_id, mode);
1122} 1159}
1123EXPORT_SYMBOL_GPL(hrtimer_init); 1160EXPORT_SYMBOL_GPL(hrtimer_init);
@@ -1141,7 +1178,7 @@ int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp)
1141} 1178}
1142EXPORT_SYMBOL_GPL(hrtimer_get_res); 1179EXPORT_SYMBOL_GPL(hrtimer_get_res);
1143 1180
1144static void __run_hrtimer(struct hrtimer *timer) 1181static void __run_hrtimer(struct hrtimer *timer, ktime_t *now)
1145{ 1182{
1146 struct hrtimer_clock_base *base = timer->base; 1183 struct hrtimer_clock_base *base = timer->base;
1147 struct hrtimer_cpu_base *cpu_base = base->cpu_base; 1184 struct hrtimer_cpu_base *cpu_base = base->cpu_base;
@@ -1150,7 +1187,7 @@ static void __run_hrtimer(struct hrtimer *timer)
1150 1187
1151 WARN_ON(!irqs_disabled()); 1188 WARN_ON(!irqs_disabled());
1152 1189
1153 debug_hrtimer_deactivate(timer); 1190 debug_deactivate(timer);
1154 __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0); 1191 __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0);
1155 timer_stats_account_hrtimer(timer); 1192 timer_stats_account_hrtimer(timer);
1156 fn = timer->function; 1193 fn = timer->function;
@@ -1161,7 +1198,9 @@ static void __run_hrtimer(struct hrtimer *timer)
1161 * the timer base. 1198 * the timer base.
1162 */ 1199 */
1163 spin_unlock(&cpu_base->lock); 1200 spin_unlock(&cpu_base->lock);
1201 trace_hrtimer_expire_entry(timer, now);
1164 restart = fn(timer); 1202 restart = fn(timer);
1203 trace_hrtimer_expire_exit(timer);
1165 spin_lock(&cpu_base->lock); 1204 spin_lock(&cpu_base->lock);
1166 1205
1167 /* 1206 /*
@@ -1272,7 +1311,7 @@ void hrtimer_interrupt(struct clock_event_device *dev)
1272 break; 1311 break;
1273 } 1312 }
1274 1313
1275 __run_hrtimer(timer); 1314 __run_hrtimer(timer, &basenow);
1276 } 1315 }
1277 base++; 1316 base++;
1278 } 1317 }
@@ -1394,7 +1433,7 @@ void hrtimer_run_queues(void)
1394 hrtimer_get_expires_tv64(timer)) 1433 hrtimer_get_expires_tv64(timer))
1395 break; 1434 break;
1396 1435
1397 __run_hrtimer(timer); 1436 __run_hrtimer(timer, &base->softirq_time);
1398 } 1437 }
1399 spin_unlock(&cpu_base->lock); 1438 spin_unlock(&cpu_base->lock);
1400 } 1439 }
@@ -1571,7 +1610,7 @@ static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
1571 while ((node = rb_first(&old_base->active))) { 1610 while ((node = rb_first(&old_base->active))) {
1572 timer = rb_entry(node, struct hrtimer, node); 1611 timer = rb_entry(node, struct hrtimer, node);
1573 BUG_ON(hrtimer_callback_running(timer)); 1612 BUG_ON(hrtimer_callback_running(timer));
1574 debug_hrtimer_deactivate(timer); 1613 debug_deactivate(timer);
1575 1614
1576 /* 1615 /*
1577 * Mark it as STATE_MIGRATE not INACTIVE otherwise the 1616 * Mark it as STATE_MIGRATE not INACTIVE otherwise the
diff --git a/kernel/hung_task.c b/kernel/hung_task.c
index 022a4927b785..d4e841747400 100644
--- a/kernel/hung_task.c
+++ b/kernel/hung_task.c
@@ -171,12 +171,12 @@ static unsigned long timeout_jiffies(unsigned long timeout)
171 * Process updating of timeout sysctl 171 * Process updating of timeout sysctl
172 */ 172 */
173int proc_dohung_task_timeout_secs(struct ctl_table *table, int write, 173int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
174 struct file *filp, void __user *buffer, 174 void __user *buffer,
175 size_t *lenp, loff_t *ppos) 175 size_t *lenp, loff_t *ppos)
176{ 176{
177 int ret; 177 int ret;
178 178
179 ret = proc_doulongvec_minmax(table, write, filp, buffer, lenp, ppos); 179 ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
180 180
181 if (ret || !write) 181 if (ret || !write)
182 goto out; 182 goto out;
diff --git a/kernel/itimer.c b/kernel/itimer.c
index 58762f7077ec..b03451ede528 100644
--- a/kernel/itimer.c
+++ b/kernel/itimer.c
@@ -12,6 +12,7 @@
12#include <linux/time.h> 12#include <linux/time.h>
13#include <linux/posix-timers.h> 13#include <linux/posix-timers.h>
14#include <linux/hrtimer.h> 14#include <linux/hrtimer.h>
15#include <trace/events/timer.h>
15 16
16#include <asm/uaccess.h> 17#include <asm/uaccess.h>
17 18
@@ -41,10 +42,43 @@ static struct timeval itimer_get_remtime(struct hrtimer *timer)
41 return ktime_to_timeval(rem); 42 return ktime_to_timeval(rem);
42} 43}
43 44
45static void get_cpu_itimer(struct task_struct *tsk, unsigned int clock_id,
46 struct itimerval *const value)
47{
48 cputime_t cval, cinterval;
49 struct cpu_itimer *it = &tsk->signal->it[clock_id];
50
51 spin_lock_irq(&tsk->sighand->siglock);
52
53 cval = it->expires;
54 cinterval = it->incr;
55 if (!cputime_eq(cval, cputime_zero)) {
56 struct task_cputime cputime;
57 cputime_t t;
58
59 thread_group_cputimer(tsk, &cputime);
60 if (clock_id == CPUCLOCK_PROF)
61 t = cputime_add(cputime.utime, cputime.stime);
62 else
63 /* CPUCLOCK_VIRT */
64 t = cputime.utime;
65
66 if (cputime_le(cval, t))
67 /* about to fire */
68 cval = cputime_one_jiffy;
69 else
70 cval = cputime_sub(cval, t);
71 }
72
73 spin_unlock_irq(&tsk->sighand->siglock);
74
75 cputime_to_timeval(cval, &value->it_value);
76 cputime_to_timeval(cinterval, &value->it_interval);
77}
78
44int do_getitimer(int which, struct itimerval *value) 79int do_getitimer(int which, struct itimerval *value)
45{ 80{
46 struct task_struct *tsk = current; 81 struct task_struct *tsk = current;
47 cputime_t cinterval, cval;
48 82
49 switch (which) { 83 switch (which) {
50 case ITIMER_REAL: 84 case ITIMER_REAL:
@@ -55,44 +89,10 @@ int do_getitimer(int which, struct itimerval *value)
55 spin_unlock_irq(&tsk->sighand->siglock); 89 spin_unlock_irq(&tsk->sighand->siglock);
56 break; 90 break;
57 case ITIMER_VIRTUAL: 91 case ITIMER_VIRTUAL:
58 spin_lock_irq(&tsk->sighand->siglock); 92 get_cpu_itimer(tsk, CPUCLOCK_VIRT, value);
59 cval = tsk->signal->it_virt_expires;
60 cinterval = tsk->signal->it_virt_incr;
61 if (!cputime_eq(cval, cputime_zero)) {
62 struct task_cputime cputime;
63 cputime_t utime;
64
65 thread_group_cputimer(tsk, &cputime);
66 utime = cputime.utime;
67 if (cputime_le(cval, utime)) { /* about to fire */
68 cval = jiffies_to_cputime(1);
69 } else {
70 cval = cputime_sub(cval, utime);
71 }
72 }
73 spin_unlock_irq(&tsk->sighand->siglock);
74 cputime_to_timeval(cval, &value->it_value);
75 cputime_to_timeval(cinterval, &value->it_interval);
76 break; 93 break;
77 case ITIMER_PROF: 94 case ITIMER_PROF:
78 spin_lock_irq(&tsk->sighand->siglock); 95 get_cpu_itimer(tsk, CPUCLOCK_PROF, value);
79 cval = tsk->signal->it_prof_expires;
80 cinterval = tsk->signal->it_prof_incr;
81 if (!cputime_eq(cval, cputime_zero)) {
82 struct task_cputime times;
83 cputime_t ptime;
84
85 thread_group_cputimer(tsk, &times);
86 ptime = cputime_add(times.utime, times.stime);
87 if (cputime_le(cval, ptime)) { /* about to fire */
88 cval = jiffies_to_cputime(1);
89 } else {
90 cval = cputime_sub(cval, ptime);
91 }
92 }
93 spin_unlock_irq(&tsk->sighand->siglock);
94 cputime_to_timeval(cval, &value->it_value);
95 cputime_to_timeval(cinterval, &value->it_interval);
96 break; 96 break;
97 default: 97 default:
98 return(-EINVAL); 98 return(-EINVAL);
@@ -123,11 +123,62 @@ enum hrtimer_restart it_real_fn(struct hrtimer *timer)
123 struct signal_struct *sig = 123 struct signal_struct *sig =
124 container_of(timer, struct signal_struct, real_timer); 124 container_of(timer, struct signal_struct, real_timer);
125 125
126 trace_itimer_expire(ITIMER_REAL, sig->leader_pid, 0);
126 kill_pid_info(SIGALRM, SEND_SIG_PRIV, sig->leader_pid); 127 kill_pid_info(SIGALRM, SEND_SIG_PRIV, sig->leader_pid);
127 128
128 return HRTIMER_NORESTART; 129 return HRTIMER_NORESTART;
129} 130}
130 131
132static inline u32 cputime_sub_ns(cputime_t ct, s64 real_ns)
133{
134 struct timespec ts;
135 s64 cpu_ns;
136
137 cputime_to_timespec(ct, &ts);
138 cpu_ns = timespec_to_ns(&ts);
139
140 return (cpu_ns <= real_ns) ? 0 : cpu_ns - real_ns;
141}
142
143static void set_cpu_itimer(struct task_struct *tsk, unsigned int clock_id,
144 const struct itimerval *const value,
145 struct itimerval *const ovalue)
146{
147 cputime_t cval, nval, cinterval, ninterval;
148 s64 ns_ninterval, ns_nval;
149 struct cpu_itimer *it = &tsk->signal->it[clock_id];
150
151 nval = timeval_to_cputime(&value->it_value);
152 ns_nval = timeval_to_ns(&value->it_value);
153 ninterval = timeval_to_cputime(&value->it_interval);
154 ns_ninterval = timeval_to_ns(&value->it_interval);
155
156 it->incr_error = cputime_sub_ns(ninterval, ns_ninterval);
157 it->error = cputime_sub_ns(nval, ns_nval);
158
159 spin_lock_irq(&tsk->sighand->siglock);
160
161 cval = it->expires;
162 cinterval = it->incr;
163 if (!cputime_eq(cval, cputime_zero) ||
164 !cputime_eq(nval, cputime_zero)) {
165 if (cputime_gt(nval, cputime_zero))
166 nval = cputime_add(nval, cputime_one_jiffy);
167 set_process_cpu_timer(tsk, clock_id, &nval, &cval);
168 }
169 it->expires = nval;
170 it->incr = ninterval;
171 trace_itimer_state(clock_id == CPUCLOCK_VIRT ?
172 ITIMER_VIRTUAL : ITIMER_PROF, value, nval);
173
174 spin_unlock_irq(&tsk->sighand->siglock);
175
176 if (ovalue) {
177 cputime_to_timeval(cval, &ovalue->it_value);
178 cputime_to_timeval(cinterval, &ovalue->it_interval);
179 }
180}
181
131/* 182/*
132 * Returns true if the timeval is in canonical form 183 * Returns true if the timeval is in canonical form
133 */ 184 */
@@ -139,7 +190,6 @@ int do_setitimer(int which, struct itimerval *value, struct itimerval *ovalue)
139 struct task_struct *tsk = current; 190 struct task_struct *tsk = current;
140 struct hrtimer *timer; 191 struct hrtimer *timer;
141 ktime_t expires; 192 ktime_t expires;
142 cputime_t cval, cinterval, nval, ninterval;
143 193
144 /* 194 /*
145 * Validate the timevals in value. 195 * Validate the timevals in value.
@@ -171,51 +221,14 @@ again:
171 } else 221 } else
172 tsk->signal->it_real_incr.tv64 = 0; 222 tsk->signal->it_real_incr.tv64 = 0;
173 223
224 trace_itimer_state(ITIMER_REAL, value, 0);
174 spin_unlock_irq(&tsk->sighand->siglock); 225 spin_unlock_irq(&tsk->sighand->siglock);
175 break; 226 break;
176 case ITIMER_VIRTUAL: 227 case ITIMER_VIRTUAL:
177 nval = timeval_to_cputime(&value->it_value); 228 set_cpu_itimer(tsk, CPUCLOCK_VIRT, value, ovalue);
178 ninterval = timeval_to_cputime(&value->it_interval);
179 spin_lock_irq(&tsk->sighand->siglock);
180 cval = tsk->signal->it_virt_expires;
181 cinterval = tsk->signal->it_virt_incr;
182 if (!cputime_eq(cval, cputime_zero) ||
183 !cputime_eq(nval, cputime_zero)) {
184 if (cputime_gt(nval, cputime_zero))
185 nval = cputime_add(nval,
186 jiffies_to_cputime(1));
187 set_process_cpu_timer(tsk, CPUCLOCK_VIRT,
188 &nval, &cval);
189 }
190 tsk->signal->it_virt_expires = nval;
191 tsk->signal->it_virt_incr = ninterval;
192 spin_unlock_irq(&tsk->sighand->siglock);
193 if (ovalue) {
194 cputime_to_timeval(cval, &ovalue->it_value);
195 cputime_to_timeval(cinterval, &ovalue->it_interval);
196 }
197 break; 229 break;
198 case ITIMER_PROF: 230 case ITIMER_PROF:
199 nval = timeval_to_cputime(&value->it_value); 231 set_cpu_itimer(tsk, CPUCLOCK_PROF, value, ovalue);
200 ninterval = timeval_to_cputime(&value->it_interval);
201 spin_lock_irq(&tsk->sighand->siglock);
202 cval = tsk->signal->it_prof_expires;
203 cinterval = tsk->signal->it_prof_incr;
204 if (!cputime_eq(cval, cputime_zero) ||
205 !cputime_eq(nval, cputime_zero)) {
206 if (cputime_gt(nval, cputime_zero))
207 nval = cputime_add(nval,
208 jiffies_to_cputime(1));
209 set_process_cpu_timer(tsk, CPUCLOCK_PROF,
210 &nval, &cval);
211 }
212 tsk->signal->it_prof_expires = nval;
213 tsk->signal->it_prof_incr = ninterval;
214 spin_unlock_irq(&tsk->sighand->siglock);
215 if (ovalue) {
216 cputime_to_timeval(cval, &ovalue->it_value);
217 cputime_to_timeval(cinterval, &ovalue->it_interval);
218 }
219 break; 232 break;
220 default: 233 default:
221 return -EINVAL; 234 return -EINVAL;
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
index 3a29dbe7898e..8b6b8b697c68 100644
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -59,7 +59,8 @@ static inline int is_kernel_inittext(unsigned long addr)
59 59
60static inline int is_kernel_text(unsigned long addr) 60static inline int is_kernel_text(unsigned long addr)
61{ 61{
62 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) 62 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
63 arch_is_kernel_text(addr))
63 return 1; 64 return 1;
64 return in_gate_area_no_task(addr); 65 return in_gate_area_no_task(addr);
65} 66}
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index ef177d653b2c..5240d75f4c60 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -1321,7 +1321,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
1321 return 0; 1321 return 0;
1322} 1322}
1323 1323
1324static struct seq_operations kprobes_seq_ops = { 1324static const struct seq_operations kprobes_seq_ops = {
1325 .start = kprobe_seq_start, 1325 .start = kprobe_seq_start,
1326 .next = kprobe_seq_next, 1326 .next = kprobe_seq_next,
1327 .stop = kprobe_seq_stop, 1327 .stop = kprobe_seq_stop,
@@ -1333,7 +1333,7 @@ static int __kprobes kprobes_open(struct inode *inode, struct file *filp)
1333 return seq_open(filp, &kprobes_seq_ops); 1333 return seq_open(filp, &kprobes_seq_ops);
1334} 1334}
1335 1335
1336static struct file_operations debugfs_kprobes_operations = { 1336static const struct file_operations debugfs_kprobes_operations = {
1337 .open = kprobes_open, 1337 .open = kprobes_open,
1338 .read = seq_read, 1338 .read = seq_read,
1339 .llseek = seq_lseek, 1339 .llseek = seq_lseek,
@@ -1515,7 +1515,7 @@ static ssize_t write_enabled_file_bool(struct file *file,
1515 return count; 1515 return count;
1516} 1516}
1517 1517
1518static struct file_operations fops_kp = { 1518static const struct file_operations fops_kp = {
1519 .read = read_enabled_file_bool, 1519 .read = read_enabled_file_bool,
1520 .write = write_enabled_file_bool, 1520 .write = write_enabled_file_bool,
1521}; 1521};
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index f74d2d7aa605..3815ac1d58b2 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -578,6 +578,9 @@ static int static_obj(void *obj)
578 if ((addr >= start) && (addr < end)) 578 if ((addr >= start) && (addr < end))
579 return 1; 579 return 1;
580 580
581 if (arch_is_kernel_data(addr))
582 return 1;
583
581#ifdef CONFIG_SMP 584#ifdef CONFIG_SMP
582 /* 585 /*
583 * percpu var? 586 * percpu var?
diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
index d4b3dbc79fdb..d4aba4f3584c 100644
--- a/kernel/lockdep_proc.c
+++ b/kernel/lockdep_proc.c
@@ -594,7 +594,7 @@ static int ls_show(struct seq_file *m, void *v)
594 return 0; 594 return 0;
595} 595}
596 596
597static struct seq_operations lockstat_ops = { 597static const struct seq_operations lockstat_ops = {
598 .start = ls_start, 598 .start = ls_start,
599 .next = ls_next, 599 .next = ls_next,
600 .stop = ls_stop, 600 .stop = ls_stop,
diff --git a/kernel/module.c b/kernel/module.c
index b6ee424245dd..8b7d8805819d 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -47,6 +47,7 @@
47#include <linux/rculist.h> 47#include <linux/rculist.h>
48#include <asm/uaccess.h> 48#include <asm/uaccess.h>
49#include <asm/cacheflush.h> 49#include <asm/cacheflush.h>
50#include <asm/mmu_context.h>
50#include <linux/license.h> 51#include <linux/license.h>
51#include <asm/sections.h> 52#include <asm/sections.h>
52#include <linux/tracepoint.h> 53#include <linux/tracepoint.h>
@@ -1535,6 +1536,10 @@ static void free_module(struct module *mod)
1535 1536
1536 /* Finally, free the core (containing the module structure) */ 1537 /* Finally, free the core (containing the module structure) */
1537 module_free(mod, mod->module_core); 1538 module_free(mod, mod->module_core);
1539
1540#ifdef CONFIG_MPU
1541 update_protections(current->mm);
1542#endif
1538} 1543}
1539 1544
1540void *__symbol_get(const char *symbol) 1545void *__symbol_get(const char *symbol)
@@ -1792,6 +1797,17 @@ static void setup_modinfo(struct module *mod, Elf_Shdr *sechdrs,
1792 } 1797 }
1793} 1798}
1794 1799
1800static void free_modinfo(struct module *mod)
1801{
1802 struct module_attribute *attr;
1803 int i;
1804
1805 for (i = 0; (attr = modinfo_attrs[i]); i++) {
1806 if (attr->free)
1807 attr->free(mod);
1808 }
1809}
1810
1795#ifdef CONFIG_KALLSYMS 1811#ifdef CONFIG_KALLSYMS
1796 1812
1797/* lookup symbol in given range of kernel_symbols */ 1813/* lookup symbol in given range of kernel_symbols */
@@ -1857,13 +1873,93 @@ static char elf_type(const Elf_Sym *sym,
1857 return '?'; 1873 return '?';
1858} 1874}
1859 1875
1876static bool is_core_symbol(const Elf_Sym *src, const Elf_Shdr *sechdrs,
1877 unsigned int shnum)
1878{
1879 const Elf_Shdr *sec;
1880
1881 if (src->st_shndx == SHN_UNDEF
1882 || src->st_shndx >= shnum
1883 || !src->st_name)
1884 return false;
1885
1886 sec = sechdrs + src->st_shndx;
1887 if (!(sec->sh_flags & SHF_ALLOC)
1888#ifndef CONFIG_KALLSYMS_ALL
1889 || !(sec->sh_flags & SHF_EXECINSTR)
1890#endif
1891 || (sec->sh_entsize & INIT_OFFSET_MASK))
1892 return false;
1893
1894 return true;
1895}
1896
1897static unsigned long layout_symtab(struct module *mod,
1898 Elf_Shdr *sechdrs,
1899 unsigned int symindex,
1900 unsigned int strindex,
1901 const Elf_Ehdr *hdr,
1902 const char *secstrings,
1903 unsigned long *pstroffs,
1904 unsigned long *strmap)
1905{
1906 unsigned long symoffs;
1907 Elf_Shdr *symsect = sechdrs + symindex;
1908 Elf_Shdr *strsect = sechdrs + strindex;
1909 const Elf_Sym *src;
1910 const char *strtab;
1911 unsigned int i, nsrc, ndst;
1912
1913 /* Put symbol section at end of init part of module. */
1914 symsect->sh_flags |= SHF_ALLOC;
1915 symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
1916 symindex) | INIT_OFFSET_MASK;
1917 DEBUGP("\t%s\n", secstrings + symsect->sh_name);
1918
1919 src = (void *)hdr + symsect->sh_offset;
1920 nsrc = symsect->sh_size / sizeof(*src);
1921 strtab = (void *)hdr + strsect->sh_offset;
1922 for (ndst = i = 1; i < nsrc; ++i, ++src)
1923 if (is_core_symbol(src, sechdrs, hdr->e_shnum)) {
1924 unsigned int j = src->st_name;
1925
1926 while(!__test_and_set_bit(j, strmap) && strtab[j])
1927 ++j;
1928 ++ndst;
1929 }
1930
1931 /* Append room for core symbols at end of core part. */
1932 symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
1933 mod->core_size = symoffs + ndst * sizeof(Elf_Sym);
1934
1935 /* Put string table section at end of init part of module. */
1936 strsect->sh_flags |= SHF_ALLOC;
1937 strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
1938 strindex) | INIT_OFFSET_MASK;
1939 DEBUGP("\t%s\n", secstrings + strsect->sh_name);
1940
1941 /* Append room for core symbols' strings at end of core part. */
1942 *pstroffs = mod->core_size;
1943 __set_bit(0, strmap);
1944 mod->core_size += bitmap_weight(strmap, strsect->sh_size);
1945
1946 return symoffs;
1947}
1948
1860static void add_kallsyms(struct module *mod, 1949static void add_kallsyms(struct module *mod,
1861 Elf_Shdr *sechdrs, 1950 Elf_Shdr *sechdrs,
1951 unsigned int shnum,
1862 unsigned int symindex, 1952 unsigned int symindex,
1863 unsigned int strindex, 1953 unsigned int strindex,
1864 const char *secstrings) 1954 unsigned long symoffs,
1955 unsigned long stroffs,
1956 const char *secstrings,
1957 unsigned long *strmap)
1865{ 1958{
1866 unsigned int i; 1959 unsigned int i, ndst;
1960 const Elf_Sym *src;
1961 Elf_Sym *dst;
1962 char *s;
1867 1963
1868 mod->symtab = (void *)sechdrs[symindex].sh_addr; 1964 mod->symtab = (void *)sechdrs[symindex].sh_addr;
1869 mod->num_symtab = sechdrs[symindex].sh_size / sizeof(Elf_Sym); 1965 mod->num_symtab = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
@@ -1873,13 +1969,46 @@ static void add_kallsyms(struct module *mod,
1873 for (i = 0; i < mod->num_symtab; i++) 1969 for (i = 0; i < mod->num_symtab; i++)
1874 mod->symtab[i].st_info 1970 mod->symtab[i].st_info
1875 = elf_type(&mod->symtab[i], sechdrs, secstrings, mod); 1971 = elf_type(&mod->symtab[i], sechdrs, secstrings, mod);
1972
1973 mod->core_symtab = dst = mod->module_core + symoffs;
1974 src = mod->symtab;
1975 *dst = *src;
1976 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
1977 if (!is_core_symbol(src, sechdrs, shnum))
1978 continue;
1979 dst[ndst] = *src;
1980 dst[ndst].st_name = bitmap_weight(strmap, dst[ndst].st_name);
1981 ++ndst;
1982 }
1983 mod->core_num_syms = ndst;
1984
1985 mod->core_strtab = s = mod->module_core + stroffs;
1986 for (*s = 0, i = 1; i < sechdrs[strindex].sh_size; ++i)
1987 if (test_bit(i, strmap))
1988 *++s = mod->strtab[i];
1876} 1989}
1877#else 1990#else
1991static inline unsigned long layout_symtab(struct module *mod,
1992 Elf_Shdr *sechdrs,
1993 unsigned int symindex,
1994 unsigned int strindex,
1995 const Elf_Ehdr *hdr,
1996 const char *secstrings,
1997 unsigned long *pstroffs,
1998 unsigned long *strmap)
1999{
2000 return 0;
2001}
2002
1878static inline void add_kallsyms(struct module *mod, 2003static inline void add_kallsyms(struct module *mod,
1879 Elf_Shdr *sechdrs, 2004 Elf_Shdr *sechdrs,
2005 unsigned int shnum,
1880 unsigned int symindex, 2006 unsigned int symindex,
1881 unsigned int strindex, 2007 unsigned int strindex,
1882 const char *secstrings) 2008 unsigned long symoffs,
2009 unsigned long stroffs,
2010 const char *secstrings,
2011 const unsigned long *strmap)
1883{ 2012{
1884} 2013}
1885#endif /* CONFIG_KALLSYMS */ 2014#endif /* CONFIG_KALLSYMS */
@@ -1954,6 +2083,8 @@ static noinline struct module *load_module(void __user *umod,
1954 struct module *mod; 2083 struct module *mod;
1955 long err = 0; 2084 long err = 0;
1956 void *percpu = NULL, *ptr = NULL; /* Stops spurious gcc warning */ 2085 void *percpu = NULL, *ptr = NULL; /* Stops spurious gcc warning */
2086 unsigned long symoffs, stroffs, *strmap;
2087
1957 mm_segment_t old_fs; 2088 mm_segment_t old_fs;
1958 2089
1959 DEBUGP("load_module: umod=%p, len=%lu, uargs=%p\n", 2090 DEBUGP("load_module: umod=%p, len=%lu, uargs=%p\n",
@@ -2035,11 +2166,6 @@ static noinline struct module *load_module(void __user *umod,
2035 /* Don't keep modinfo and version sections. */ 2166 /* Don't keep modinfo and version sections. */
2036 sechdrs[infoindex].sh_flags &= ~(unsigned long)SHF_ALLOC; 2167 sechdrs[infoindex].sh_flags &= ~(unsigned long)SHF_ALLOC;
2037 sechdrs[versindex].sh_flags &= ~(unsigned long)SHF_ALLOC; 2168 sechdrs[versindex].sh_flags &= ~(unsigned long)SHF_ALLOC;
2038#ifdef CONFIG_KALLSYMS
2039 /* Keep symbol and string tables for decoding later. */
2040 sechdrs[symindex].sh_flags |= SHF_ALLOC;
2041 sechdrs[strindex].sh_flags |= SHF_ALLOC;
2042#endif
2043 2169
2044 /* Check module struct version now, before we try to use module. */ 2170 /* Check module struct version now, before we try to use module. */
2045 if (!check_modstruct_version(sechdrs, versindex, mod)) { 2171 if (!check_modstruct_version(sechdrs, versindex, mod)) {
@@ -2075,6 +2201,13 @@ static noinline struct module *load_module(void __user *umod,
2075 goto free_hdr; 2201 goto free_hdr;
2076 } 2202 }
2077 2203
2204 strmap = kzalloc(BITS_TO_LONGS(sechdrs[strindex].sh_size)
2205 * sizeof(long), GFP_KERNEL);
2206 if (!strmap) {
2207 err = -ENOMEM;
2208 goto free_mod;
2209 }
2210
2078 if (find_module(mod->name)) { 2211 if (find_module(mod->name)) {
2079 err = -EEXIST; 2212 err = -EEXIST;
2080 goto free_mod; 2213 goto free_mod;
@@ -2104,6 +2237,8 @@ static noinline struct module *load_module(void __user *umod,
2104 this is done generically; there doesn't appear to be any 2237 this is done generically; there doesn't appear to be any
2105 special cases for the architectures. */ 2238 special cases for the architectures. */
2106 layout_sections(mod, hdr, sechdrs, secstrings); 2239 layout_sections(mod, hdr, sechdrs, secstrings);
2240 symoffs = layout_symtab(mod, sechdrs, symindex, strindex, hdr,
2241 secstrings, &stroffs, strmap);
2107 2242
2108 /* Do the allocs. */ 2243 /* Do the allocs. */
2109 ptr = module_alloc_update_bounds(mod->core_size); 2244 ptr = module_alloc_update_bounds(mod->core_size);
@@ -2308,7 +2443,10 @@ static noinline struct module *load_module(void __user *umod,
2308 percpu_modcopy(mod->percpu, (void *)sechdrs[pcpuindex].sh_addr, 2443 percpu_modcopy(mod->percpu, (void *)sechdrs[pcpuindex].sh_addr,
2309 sechdrs[pcpuindex].sh_size); 2444 sechdrs[pcpuindex].sh_size);
2310 2445
2311 add_kallsyms(mod, sechdrs, symindex, strindex, secstrings); 2446 add_kallsyms(mod, sechdrs, hdr->e_shnum, symindex, strindex,
2447 symoffs, stroffs, secstrings, strmap);
2448 kfree(strmap);
2449 strmap = NULL;
2312 2450
2313 if (!mod->taints) { 2451 if (!mod->taints) {
2314 struct _ddebug *debug; 2452 struct _ddebug *debug;
@@ -2380,13 +2518,14 @@ static noinline struct module *load_module(void __user *umod,
2380 synchronize_sched(); 2518 synchronize_sched();
2381 module_arch_cleanup(mod); 2519 module_arch_cleanup(mod);
2382 cleanup: 2520 cleanup:
2521 free_modinfo(mod);
2383 kobject_del(&mod->mkobj.kobj); 2522 kobject_del(&mod->mkobj.kobj);
2384 kobject_put(&mod->mkobj.kobj); 2523 kobject_put(&mod->mkobj.kobj);
2385 free_unload: 2524 free_unload:
2386 module_unload_free(mod); 2525 module_unload_free(mod);
2387#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP) 2526#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
2388 free_init:
2389 percpu_modfree(mod->refptr); 2527 percpu_modfree(mod->refptr);
2528 free_init:
2390#endif 2529#endif
2391 module_free(mod, mod->module_init); 2530 module_free(mod, mod->module_init);
2392 free_core: 2531 free_core:
@@ -2397,6 +2536,7 @@ static noinline struct module *load_module(void __user *umod,
2397 percpu_modfree(percpu); 2536 percpu_modfree(percpu);
2398 free_mod: 2537 free_mod:
2399 kfree(args); 2538 kfree(args);
2539 kfree(strmap);
2400 free_hdr: 2540 free_hdr:
2401 vfree(hdr); 2541 vfree(hdr);
2402 return ERR_PTR(err); 2542 return ERR_PTR(err);
@@ -2486,6 +2626,11 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
2486 /* Drop initial reference. */ 2626 /* Drop initial reference. */
2487 module_put(mod); 2627 module_put(mod);
2488 trim_init_extable(mod); 2628 trim_init_extable(mod);
2629#ifdef CONFIG_KALLSYMS
2630 mod->num_symtab = mod->core_num_syms;
2631 mod->symtab = mod->core_symtab;
2632 mod->strtab = mod->core_strtab;
2633#endif
2489 module_free(mod, mod->module_init); 2634 module_free(mod, mod->module_init);
2490 mod->module_init = NULL; 2635 mod->module_init = NULL;
2491 mod->init_size = 0; 2636 mod->init_size = 0;
@@ -2947,7 +3092,6 @@ void module_layout(struct module *mod,
2947 struct modversion_info *ver, 3092 struct modversion_info *ver,
2948 struct kernel_param *kp, 3093 struct kernel_param *kp,
2949 struct kernel_symbol *ks, 3094 struct kernel_symbol *ks,
2950 struct marker *marker,
2951 struct tracepoint *tp) 3095 struct tracepoint *tp)
2952{ 3096{
2953} 3097}
diff --git a/kernel/ns_cgroup.c b/kernel/ns_cgroup.c
index 5aa854f9e5ae..2a5dfec8efe0 100644
--- a/kernel/ns_cgroup.c
+++ b/kernel/ns_cgroup.c
@@ -42,8 +42,8 @@ int ns_cgroup_clone(struct task_struct *task, struct pid *pid)
42 * (hence either you are in the same cgroup as task, or in an 42 * (hence either you are in the same cgroup as task, or in an
43 * ancestor cgroup thereof) 43 * ancestor cgroup thereof)
44 */ 44 */
45static int ns_can_attach(struct cgroup_subsys *ss, 45static int ns_can_attach(struct cgroup_subsys *ss, struct cgroup *new_cgroup,
46 struct cgroup *new_cgroup, struct task_struct *task) 46 struct task_struct *task, bool threadgroup)
47{ 47{
48 if (current != task) { 48 if (current != task) {
49 if (!capable(CAP_SYS_ADMIN)) 49 if (!capable(CAP_SYS_ADMIN))
@@ -56,6 +56,18 @@ static int ns_can_attach(struct cgroup_subsys *ss,
56 if (!cgroup_is_descendant(new_cgroup, task)) 56 if (!cgroup_is_descendant(new_cgroup, task))
57 return -EPERM; 57 return -EPERM;
58 58
59 if (threadgroup) {
60 struct task_struct *c;
61 rcu_read_lock();
62 list_for_each_entry_rcu(c, &task->thread_group, thread_group) {
63 if (!cgroup_is_descendant(new_cgroup, c)) {
64 rcu_read_unlock();
65 return -EPERM;
66 }
67 }
68 rcu_read_unlock();
69 }
70
59 return 0; 71 return 0;
60} 72}
61 73
diff --git a/kernel/panic.c b/kernel/panic.c
index bc4dcb6a389b..96b45d0b4ba5 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -178,7 +178,7 @@ static const struct tnt tnts[] = {
178 * 'W' - Taint on warning. 178 * 'W' - Taint on warning.
179 * 'C' - modules from drivers/staging are loaded. 179 * 'C' - modules from drivers/staging are loaded.
180 * 180 *
181 * The string is overwritten by the next call to print_taint(). 181 * The string is overwritten by the next call to print_tainted().
182 */ 182 */
183const char *print_tainted(void) 183const char *print_tainted(void)
184{ 184{
diff --git a/kernel/params.c b/kernel/params.c
index 7f6912ced2ba..9da58eabdcb2 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -23,6 +23,7 @@
23#include <linux/device.h> 23#include <linux/device.h>
24#include <linux/err.h> 24#include <linux/err.h>
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/ctype.h>
26 27
27#if 0 28#if 0
28#define DEBUGP printk 29#define DEBUGP printk
@@ -87,7 +88,7 @@ static char *next_arg(char *args, char **param, char **val)
87 } 88 }
88 89
89 for (i = 0; args[i]; i++) { 90 for (i = 0; args[i]; i++) {
90 if (args[i] == ' ' && !in_quote) 91 if (isspace(args[i]) && !in_quote)
91 break; 92 break;
92 if (equals == 0) { 93 if (equals == 0) {
93 if (args[i] == '=') 94 if (args[i] == '=')
@@ -121,7 +122,7 @@ static char *next_arg(char *args, char **param, char **val)
121 next = args + i; 122 next = args + i;
122 123
123 /* Chew up trailing spaces. */ 124 /* Chew up trailing spaces. */
124 while (*next == ' ') 125 while (isspace(*next))
125 next++; 126 next++;
126 return next; 127 return next;
127} 128}
@@ -138,7 +139,7 @@ int parse_args(const char *name,
138 DEBUGP("Parsing ARGS: %s\n", args); 139 DEBUGP("Parsing ARGS: %s\n", args);
139 140
140 /* Chew leading spaces */ 141 /* Chew leading spaces */
141 while (*args == ' ') 142 while (isspace(*args))
142 args++; 143 args++;
143 144
144 while (*args) { 145 while (*args) {
diff --git a/kernel/perf_counter.c b/kernel/perf_event.c
index cc768ab81ac8..9d0b5c665883 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_event.c
@@ -1,12 +1,12 @@
1/* 1/*
2 * Performance counter core code 2 * Performance events core code:
3 * 3 *
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> 4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar 5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 6 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> 7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
8 * 8 *
9 * For licensing details see kernel-base/COPYING 9 * For licensing details see kernel-base/COPYING
10 */ 10 */
11 11
12#include <linux/fs.h> 12#include <linux/fs.h>
@@ -20,72 +20,73 @@
20#include <linux/percpu.h> 20#include <linux/percpu.h>
21#include <linux/ptrace.h> 21#include <linux/ptrace.h>
22#include <linux/vmstat.h> 22#include <linux/vmstat.h>
23#include <linux/vmalloc.h>
23#include <linux/hardirq.h> 24#include <linux/hardirq.h>
24#include <linux/rculist.h> 25#include <linux/rculist.h>
25#include <linux/uaccess.h> 26#include <linux/uaccess.h>
26#include <linux/syscalls.h> 27#include <linux/syscalls.h>
27#include <linux/anon_inodes.h> 28#include <linux/anon_inodes.h>
28#include <linux/kernel_stat.h> 29#include <linux/kernel_stat.h>
29#include <linux/perf_counter.h> 30#include <linux/perf_event.h>
30 31
31#include <asm/irq_regs.h> 32#include <asm/irq_regs.h>
32 33
33/* 34/*
34 * Each CPU has a list of per CPU counters: 35 * Each CPU has a list of per CPU events:
35 */ 36 */
36DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context); 37DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);
37 38
38int perf_max_counters __read_mostly = 1; 39int perf_max_events __read_mostly = 1;
39static int perf_reserved_percpu __read_mostly; 40static int perf_reserved_percpu __read_mostly;
40static int perf_overcommit __read_mostly = 1; 41static int perf_overcommit __read_mostly = 1;
41 42
42static atomic_t nr_counters __read_mostly; 43static atomic_t nr_events __read_mostly;
43static atomic_t nr_mmap_counters __read_mostly; 44static atomic_t nr_mmap_events __read_mostly;
44static atomic_t nr_comm_counters __read_mostly; 45static atomic_t nr_comm_events __read_mostly;
45static atomic_t nr_task_counters __read_mostly; 46static atomic_t nr_task_events __read_mostly;
46 47
47/* 48/*
48 * perf counter paranoia level: 49 * perf event paranoia level:
49 * -1 - not paranoid at all 50 * -1 - not paranoid at all
50 * 0 - disallow raw tracepoint access for unpriv 51 * 0 - disallow raw tracepoint access for unpriv
51 * 1 - disallow cpu counters for unpriv 52 * 1 - disallow cpu events for unpriv
52 * 2 - disallow kernel profiling for unpriv 53 * 2 - disallow kernel profiling for unpriv
53 */ 54 */
54int sysctl_perf_counter_paranoid __read_mostly = 1; 55int sysctl_perf_event_paranoid __read_mostly = 1;
55 56
56static inline bool perf_paranoid_tracepoint_raw(void) 57static inline bool perf_paranoid_tracepoint_raw(void)
57{ 58{
58 return sysctl_perf_counter_paranoid > -1; 59 return sysctl_perf_event_paranoid > -1;
59} 60}
60 61
61static inline bool perf_paranoid_cpu(void) 62static inline bool perf_paranoid_cpu(void)
62{ 63{
63 return sysctl_perf_counter_paranoid > 0; 64 return sysctl_perf_event_paranoid > 0;
64} 65}
65 66
66static inline bool perf_paranoid_kernel(void) 67static inline bool perf_paranoid_kernel(void)
67{ 68{
68 return sysctl_perf_counter_paranoid > 1; 69 return sysctl_perf_event_paranoid > 1;
69} 70}
70 71
71int sysctl_perf_counter_mlock __read_mostly = 512; /* 'free' kb per user */ 72int sysctl_perf_event_mlock __read_mostly = 512; /* 'free' kb per user */
72 73
73/* 74/*
74 * max perf counter sample rate 75 * max perf event sample rate
75 */ 76 */
76int sysctl_perf_counter_sample_rate __read_mostly = 100000; 77int sysctl_perf_event_sample_rate __read_mostly = 100000;
77 78
78static atomic64_t perf_counter_id; 79static atomic64_t perf_event_id;
79 80
80/* 81/*
81 * Lock for (sysadmin-configurable) counter reservations: 82 * Lock for (sysadmin-configurable) event reservations:
82 */ 83 */
83static DEFINE_SPINLOCK(perf_resource_lock); 84static DEFINE_SPINLOCK(perf_resource_lock);
84 85
85/* 86/*
86 * Architecture provided APIs - weak aliases: 87 * Architecture provided APIs - weak aliases:
87 */ 88 */
88extern __weak const struct pmu *hw_perf_counter_init(struct perf_counter *counter) 89extern __weak const struct pmu *hw_perf_event_init(struct perf_event *event)
89{ 90{
90 return NULL; 91 return NULL;
91} 92}
@@ -93,18 +94,18 @@ extern __weak const struct pmu *hw_perf_counter_init(struct perf_counter *counte
93void __weak hw_perf_disable(void) { barrier(); } 94void __weak hw_perf_disable(void) { barrier(); }
94void __weak hw_perf_enable(void) { barrier(); } 95void __weak hw_perf_enable(void) { barrier(); }
95 96
96void __weak hw_perf_counter_setup(int cpu) { barrier(); } 97void __weak hw_perf_event_setup(int cpu) { barrier(); }
97void __weak hw_perf_counter_setup_online(int cpu) { barrier(); } 98void __weak hw_perf_event_setup_online(int cpu) { barrier(); }
98 99
99int __weak 100int __weak
100hw_perf_group_sched_in(struct perf_counter *group_leader, 101hw_perf_group_sched_in(struct perf_event *group_leader,
101 struct perf_cpu_context *cpuctx, 102 struct perf_cpu_context *cpuctx,
102 struct perf_counter_context *ctx, int cpu) 103 struct perf_event_context *ctx, int cpu)
103{ 104{
104 return 0; 105 return 0;
105} 106}
106 107
107void __weak perf_counter_print_debug(void) { } 108void __weak perf_event_print_debug(void) { }
108 109
109static DEFINE_PER_CPU(int, perf_disable_count); 110static DEFINE_PER_CPU(int, perf_disable_count);
110 111
@@ -130,20 +131,20 @@ void perf_enable(void)
130 hw_perf_enable(); 131 hw_perf_enable();
131} 132}
132 133
133static void get_ctx(struct perf_counter_context *ctx) 134static void get_ctx(struct perf_event_context *ctx)
134{ 135{
135 WARN_ON(!atomic_inc_not_zero(&ctx->refcount)); 136 WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
136} 137}
137 138
138static void free_ctx(struct rcu_head *head) 139static void free_ctx(struct rcu_head *head)
139{ 140{
140 struct perf_counter_context *ctx; 141 struct perf_event_context *ctx;
141 142
142 ctx = container_of(head, struct perf_counter_context, rcu_head); 143 ctx = container_of(head, struct perf_event_context, rcu_head);
143 kfree(ctx); 144 kfree(ctx);
144} 145}
145 146
146static void put_ctx(struct perf_counter_context *ctx) 147static void put_ctx(struct perf_event_context *ctx)
147{ 148{
148 if (atomic_dec_and_test(&ctx->refcount)) { 149 if (atomic_dec_and_test(&ctx->refcount)) {
149 if (ctx->parent_ctx) 150 if (ctx->parent_ctx)
@@ -154,7 +155,7 @@ static void put_ctx(struct perf_counter_context *ctx)
154 } 155 }
155} 156}
156 157
157static void unclone_ctx(struct perf_counter_context *ctx) 158static void unclone_ctx(struct perf_event_context *ctx)
158{ 159{
159 if (ctx->parent_ctx) { 160 if (ctx->parent_ctx) {
160 put_ctx(ctx->parent_ctx); 161 put_ctx(ctx->parent_ctx);
@@ -163,37 +164,37 @@ static void unclone_ctx(struct perf_counter_context *ctx)
163} 164}
164 165
165/* 166/*
166 * If we inherit counters we want to return the parent counter id 167 * If we inherit events we want to return the parent event id
167 * to userspace. 168 * to userspace.
168 */ 169 */
169static u64 primary_counter_id(struct perf_counter *counter) 170static u64 primary_event_id(struct perf_event *event)
170{ 171{
171 u64 id = counter->id; 172 u64 id = event->id;
172 173
173 if (counter->parent) 174 if (event->parent)
174 id = counter->parent->id; 175 id = event->parent->id;
175 176
176 return id; 177 return id;
177} 178}
178 179
179/* 180/*
180 * Get the perf_counter_context for a task and lock it. 181 * Get the perf_event_context for a task and lock it.
181 * This has to cope with with the fact that until it is locked, 182 * This has to cope with with the fact that until it is locked,
182 * the context could get moved to another task. 183 * the context could get moved to another task.
183 */ 184 */
184static struct perf_counter_context * 185static struct perf_event_context *
185perf_lock_task_context(struct task_struct *task, unsigned long *flags) 186perf_lock_task_context(struct task_struct *task, unsigned long *flags)
186{ 187{
187 struct perf_counter_context *ctx; 188 struct perf_event_context *ctx;
188 189
189 rcu_read_lock(); 190 rcu_read_lock();
190 retry: 191 retry:
191 ctx = rcu_dereference(task->perf_counter_ctxp); 192 ctx = rcu_dereference(task->perf_event_ctxp);
192 if (ctx) { 193 if (ctx) {
193 /* 194 /*
194 * If this context is a clone of another, it might 195 * If this context is a clone of another, it might
195 * get swapped for another underneath us by 196 * get swapped for another underneath us by
196 * perf_counter_task_sched_out, though the 197 * perf_event_task_sched_out, though the
197 * rcu_read_lock() protects us from any context 198 * rcu_read_lock() protects us from any context
198 * getting freed. Lock the context and check if it 199 * getting freed. Lock the context and check if it
199 * got swapped before we could get the lock, and retry 200 * got swapped before we could get the lock, and retry
@@ -201,7 +202,7 @@ perf_lock_task_context(struct task_struct *task, unsigned long *flags)
201 * can't get swapped on us any more. 202 * can't get swapped on us any more.
202 */ 203 */
203 spin_lock_irqsave(&ctx->lock, *flags); 204 spin_lock_irqsave(&ctx->lock, *flags);
204 if (ctx != rcu_dereference(task->perf_counter_ctxp)) { 205 if (ctx != rcu_dereference(task->perf_event_ctxp)) {
205 spin_unlock_irqrestore(&ctx->lock, *flags); 206 spin_unlock_irqrestore(&ctx->lock, *flags);
206 goto retry; 207 goto retry;
207 } 208 }
@@ -220,9 +221,9 @@ perf_lock_task_context(struct task_struct *task, unsigned long *flags)
220 * can't get swapped to another task. This also increments its 221 * can't get swapped to another task. This also increments its
221 * reference count so that the context can't get freed. 222 * reference count so that the context can't get freed.
222 */ 223 */
223static struct perf_counter_context *perf_pin_task_context(struct task_struct *task) 224static struct perf_event_context *perf_pin_task_context(struct task_struct *task)
224{ 225{
225 struct perf_counter_context *ctx; 226 struct perf_event_context *ctx;
226 unsigned long flags; 227 unsigned long flags;
227 228
228 ctx = perf_lock_task_context(task, &flags); 229 ctx = perf_lock_task_context(task, &flags);
@@ -233,7 +234,7 @@ static struct perf_counter_context *perf_pin_task_context(struct task_struct *ta
233 return ctx; 234 return ctx;
234} 235}
235 236
236static void perf_unpin_context(struct perf_counter_context *ctx) 237static void perf_unpin_context(struct perf_event_context *ctx)
237{ 238{
238 unsigned long flags; 239 unsigned long flags;
239 240
@@ -244,123 +245,122 @@ static void perf_unpin_context(struct perf_counter_context *ctx)
244} 245}
245 246
246/* 247/*
247 * Add a counter from the lists for its context. 248 * Add a event from the lists for its context.
248 * Must be called with ctx->mutex and ctx->lock held. 249 * Must be called with ctx->mutex and ctx->lock held.
249 */ 250 */
250static void 251static void
251list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx) 252list_add_event(struct perf_event *event, struct perf_event_context *ctx)
252{ 253{
253 struct perf_counter *group_leader = counter->group_leader; 254 struct perf_event *group_leader = event->group_leader;
254 255
255 /* 256 /*
256 * Depending on whether it is a standalone or sibling counter, 257 * Depending on whether it is a standalone or sibling event,
257 * add it straight to the context's counter list, or to the group 258 * add it straight to the context's event list, or to the group
258 * leader's sibling list: 259 * leader's sibling list:
259 */ 260 */
260 if (group_leader == counter) 261 if (group_leader == event)
261 list_add_tail(&counter->list_entry, &ctx->counter_list); 262 list_add_tail(&event->group_entry, &ctx->group_list);
262 else { 263 else {
263 list_add_tail(&counter->list_entry, &group_leader->sibling_list); 264 list_add_tail(&event->group_entry, &group_leader->sibling_list);
264 group_leader->nr_siblings++; 265 group_leader->nr_siblings++;
265 } 266 }
266 267
267 list_add_rcu(&counter->event_entry, &ctx->event_list); 268 list_add_rcu(&event->event_entry, &ctx->event_list);
268 ctx->nr_counters++; 269 ctx->nr_events++;
269 if (counter->attr.inherit_stat) 270 if (event->attr.inherit_stat)
270 ctx->nr_stat++; 271 ctx->nr_stat++;
271} 272}
272 273
273/* 274/*
274 * Remove a counter from the lists for its context. 275 * Remove a event from the lists for its context.
275 * Must be called with ctx->mutex and ctx->lock held. 276 * Must be called with ctx->mutex and ctx->lock held.
276 */ 277 */
277static void 278static void
278list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx) 279list_del_event(struct perf_event *event, struct perf_event_context *ctx)
279{ 280{
280 struct perf_counter *sibling, *tmp; 281 struct perf_event *sibling, *tmp;
281 282
282 if (list_empty(&counter->list_entry)) 283 if (list_empty(&event->group_entry))
283 return; 284 return;
284 ctx->nr_counters--; 285 ctx->nr_events--;
285 if (counter->attr.inherit_stat) 286 if (event->attr.inherit_stat)
286 ctx->nr_stat--; 287 ctx->nr_stat--;
287 288
288 list_del_init(&counter->list_entry); 289 list_del_init(&event->group_entry);
289 list_del_rcu(&counter->event_entry); 290 list_del_rcu(&event->event_entry);
290 291
291 if (counter->group_leader != counter) 292 if (event->group_leader != event)
292 counter->group_leader->nr_siblings--; 293 event->group_leader->nr_siblings--;
293 294
294 /* 295 /*
295 * If this was a group counter with sibling counters then 296 * If this was a group event with sibling events then
296 * upgrade the siblings to singleton counters by adding them 297 * upgrade the siblings to singleton events by adding them
297 * to the context list directly: 298 * to the context list directly:
298 */ 299 */
299 list_for_each_entry_safe(sibling, tmp, 300 list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) {
300 &counter->sibling_list, list_entry) {
301 301
302 list_move_tail(&sibling->list_entry, &ctx->counter_list); 302 list_move_tail(&sibling->group_entry, &ctx->group_list);
303 sibling->group_leader = sibling; 303 sibling->group_leader = sibling;
304 } 304 }
305} 305}
306 306
307static void 307static void
308counter_sched_out(struct perf_counter *counter, 308event_sched_out(struct perf_event *event,
309 struct perf_cpu_context *cpuctx, 309 struct perf_cpu_context *cpuctx,
310 struct perf_counter_context *ctx) 310 struct perf_event_context *ctx)
311{ 311{
312 if (counter->state != PERF_COUNTER_STATE_ACTIVE) 312 if (event->state != PERF_EVENT_STATE_ACTIVE)
313 return; 313 return;
314 314
315 counter->state = PERF_COUNTER_STATE_INACTIVE; 315 event->state = PERF_EVENT_STATE_INACTIVE;
316 if (counter->pending_disable) { 316 if (event->pending_disable) {
317 counter->pending_disable = 0; 317 event->pending_disable = 0;
318 counter->state = PERF_COUNTER_STATE_OFF; 318 event->state = PERF_EVENT_STATE_OFF;
319 } 319 }
320 counter->tstamp_stopped = ctx->time; 320 event->tstamp_stopped = ctx->time;
321 counter->pmu->disable(counter); 321 event->pmu->disable(event);
322 counter->oncpu = -1; 322 event->oncpu = -1;
323 323
324 if (!is_software_counter(counter)) 324 if (!is_software_event(event))
325 cpuctx->active_oncpu--; 325 cpuctx->active_oncpu--;
326 ctx->nr_active--; 326 ctx->nr_active--;
327 if (counter->attr.exclusive || !cpuctx->active_oncpu) 327 if (event->attr.exclusive || !cpuctx->active_oncpu)
328 cpuctx->exclusive = 0; 328 cpuctx->exclusive = 0;
329} 329}
330 330
331static void 331static void
332group_sched_out(struct perf_counter *group_counter, 332group_sched_out(struct perf_event *group_event,
333 struct perf_cpu_context *cpuctx, 333 struct perf_cpu_context *cpuctx,
334 struct perf_counter_context *ctx) 334 struct perf_event_context *ctx)
335{ 335{
336 struct perf_counter *counter; 336 struct perf_event *event;
337 337
338 if (group_counter->state != PERF_COUNTER_STATE_ACTIVE) 338 if (group_event->state != PERF_EVENT_STATE_ACTIVE)
339 return; 339 return;
340 340
341 counter_sched_out(group_counter, cpuctx, ctx); 341 event_sched_out(group_event, cpuctx, ctx);
342 342
343 /* 343 /*
344 * Schedule out siblings (if any): 344 * Schedule out siblings (if any):
345 */ 345 */
346 list_for_each_entry(counter, &group_counter->sibling_list, list_entry) 346 list_for_each_entry(event, &group_event->sibling_list, group_entry)
347 counter_sched_out(counter, cpuctx, ctx); 347 event_sched_out(event, cpuctx, ctx);
348 348
349 if (group_counter->attr.exclusive) 349 if (group_event->attr.exclusive)
350 cpuctx->exclusive = 0; 350 cpuctx->exclusive = 0;
351} 351}
352 352
353/* 353/*
354 * Cross CPU call to remove a performance counter 354 * Cross CPU call to remove a performance event
355 * 355 *
356 * We disable the counter on the hardware level first. After that we 356 * We disable the event on the hardware level first. After that we
357 * remove it from the context list. 357 * remove it from the context list.
358 */ 358 */
359static void __perf_counter_remove_from_context(void *info) 359static void __perf_event_remove_from_context(void *info)
360{ 360{
361 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); 361 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
362 struct perf_counter *counter = info; 362 struct perf_event *event = info;
363 struct perf_counter_context *ctx = counter->ctx; 363 struct perf_event_context *ctx = event->ctx;
364 364
365 /* 365 /*
366 * If this is a task context, we need to check whether it is 366 * If this is a task context, we need to check whether it is
@@ -373,22 +373,22 @@ static void __perf_counter_remove_from_context(void *info)
373 spin_lock(&ctx->lock); 373 spin_lock(&ctx->lock);
374 /* 374 /*
375 * Protect the list operation against NMI by disabling the 375 * Protect the list operation against NMI by disabling the
376 * counters on a global level. 376 * events on a global level.
377 */ 377 */
378 perf_disable(); 378 perf_disable();
379 379
380 counter_sched_out(counter, cpuctx, ctx); 380 event_sched_out(event, cpuctx, ctx);
381 381
382 list_del_counter(counter, ctx); 382 list_del_event(event, ctx);
383 383
384 if (!ctx->task) { 384 if (!ctx->task) {
385 /* 385 /*
386 * Allow more per task counters with respect to the 386 * Allow more per task events with respect to the
387 * reservation: 387 * reservation:
388 */ 388 */
389 cpuctx->max_pertask = 389 cpuctx->max_pertask =
390 min(perf_max_counters - ctx->nr_counters, 390 min(perf_max_events - ctx->nr_events,
391 perf_max_counters - perf_reserved_percpu); 391 perf_max_events - perf_reserved_percpu);
392 } 392 }
393 393
394 perf_enable(); 394 perf_enable();
@@ -397,56 +397,56 @@ static void __perf_counter_remove_from_context(void *info)
397 397
398 398
399/* 399/*
400 * Remove the counter from a task's (or a CPU's) list of counters. 400 * Remove the event from a task's (or a CPU's) list of events.
401 * 401 *
402 * Must be called with ctx->mutex held. 402 * Must be called with ctx->mutex held.
403 * 403 *
404 * CPU counters are removed with a smp call. For task counters we only 404 * CPU events are removed with a smp call. For task events we only
405 * call when the task is on a CPU. 405 * call when the task is on a CPU.
406 * 406 *
407 * If counter->ctx is a cloned context, callers must make sure that 407 * If event->ctx is a cloned context, callers must make sure that
408 * every task struct that counter->ctx->task could possibly point to 408 * every task struct that event->ctx->task could possibly point to
409 * remains valid. This is OK when called from perf_release since 409 * remains valid. This is OK when called from perf_release since
410 * that only calls us on the top-level context, which can't be a clone. 410 * that only calls us on the top-level context, which can't be a clone.
411 * When called from perf_counter_exit_task, it's OK because the 411 * When called from perf_event_exit_task, it's OK because the
412 * context has been detached from its task. 412 * context has been detached from its task.
413 */ 413 */
414static void perf_counter_remove_from_context(struct perf_counter *counter) 414static void perf_event_remove_from_context(struct perf_event *event)
415{ 415{
416 struct perf_counter_context *ctx = counter->ctx; 416 struct perf_event_context *ctx = event->ctx;
417 struct task_struct *task = ctx->task; 417 struct task_struct *task = ctx->task;
418 418
419 if (!task) { 419 if (!task) {
420 /* 420 /*
421 * Per cpu counters are removed via an smp call and 421 * Per cpu events are removed via an smp call and
422 * the removal is always sucessful. 422 * the removal is always sucessful.
423 */ 423 */
424 smp_call_function_single(counter->cpu, 424 smp_call_function_single(event->cpu,
425 __perf_counter_remove_from_context, 425 __perf_event_remove_from_context,
426 counter, 1); 426 event, 1);
427 return; 427 return;
428 } 428 }
429 429
430retry: 430retry:
431 task_oncpu_function_call(task, __perf_counter_remove_from_context, 431 task_oncpu_function_call(task, __perf_event_remove_from_context,
432 counter); 432 event);
433 433
434 spin_lock_irq(&ctx->lock); 434 spin_lock_irq(&ctx->lock);
435 /* 435 /*
436 * If the context is active we need to retry the smp call. 436 * If the context is active we need to retry the smp call.
437 */ 437 */
438 if (ctx->nr_active && !list_empty(&counter->list_entry)) { 438 if (ctx->nr_active && !list_empty(&event->group_entry)) {
439 spin_unlock_irq(&ctx->lock); 439 spin_unlock_irq(&ctx->lock);
440 goto retry; 440 goto retry;
441 } 441 }
442 442
443 /* 443 /*
444 * The lock prevents that this context is scheduled in so we 444 * The lock prevents that this context is scheduled in so we
445 * can remove the counter safely, if the call above did not 445 * can remove the event safely, if the call above did not
446 * succeed. 446 * succeed.
447 */ 447 */
448 if (!list_empty(&counter->list_entry)) { 448 if (!list_empty(&event->group_entry)) {
449 list_del_counter(counter, ctx); 449 list_del_event(event, ctx);
450 } 450 }
451 spin_unlock_irq(&ctx->lock); 451 spin_unlock_irq(&ctx->lock);
452} 452}
@@ -459,7 +459,7 @@ static inline u64 perf_clock(void)
459/* 459/*
460 * Update the record of the current time in a context. 460 * Update the record of the current time in a context.
461 */ 461 */
462static void update_context_time(struct perf_counter_context *ctx) 462static void update_context_time(struct perf_event_context *ctx)
463{ 463{
464 u64 now = perf_clock(); 464 u64 now = perf_clock();
465 465
@@ -468,51 +468,51 @@ static void update_context_time(struct perf_counter_context *ctx)
468} 468}
469 469
470/* 470/*
471 * Update the total_time_enabled and total_time_running fields for a counter. 471 * Update the total_time_enabled and total_time_running fields for a event.
472 */ 472 */
473static void update_counter_times(struct perf_counter *counter) 473static void update_event_times(struct perf_event *event)
474{ 474{
475 struct perf_counter_context *ctx = counter->ctx; 475 struct perf_event_context *ctx = event->ctx;
476 u64 run_end; 476 u64 run_end;
477 477
478 if (counter->state < PERF_COUNTER_STATE_INACTIVE || 478 if (event->state < PERF_EVENT_STATE_INACTIVE ||
479 counter->group_leader->state < PERF_COUNTER_STATE_INACTIVE) 479 event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
480 return; 480 return;
481 481
482 counter->total_time_enabled = ctx->time - counter->tstamp_enabled; 482 event->total_time_enabled = ctx->time - event->tstamp_enabled;
483 483
484 if (counter->state == PERF_COUNTER_STATE_INACTIVE) 484 if (event->state == PERF_EVENT_STATE_INACTIVE)
485 run_end = counter->tstamp_stopped; 485 run_end = event->tstamp_stopped;
486 else 486 else
487 run_end = ctx->time; 487 run_end = ctx->time;
488 488
489 counter->total_time_running = run_end - counter->tstamp_running; 489 event->total_time_running = run_end - event->tstamp_running;
490} 490}
491 491
492/* 492/*
493 * Update total_time_enabled and total_time_running for all counters in a group. 493 * Update total_time_enabled and total_time_running for all events in a group.
494 */ 494 */
495static void update_group_times(struct perf_counter *leader) 495static void update_group_times(struct perf_event *leader)
496{ 496{
497 struct perf_counter *counter; 497 struct perf_event *event;
498 498
499 update_counter_times(leader); 499 update_event_times(leader);
500 list_for_each_entry(counter, &leader->sibling_list, list_entry) 500 list_for_each_entry(event, &leader->sibling_list, group_entry)
501 update_counter_times(counter); 501 update_event_times(event);
502} 502}
503 503
504/* 504/*
505 * Cross CPU call to disable a performance counter 505 * Cross CPU call to disable a performance event
506 */ 506 */
507static void __perf_counter_disable(void *info) 507static void __perf_event_disable(void *info)
508{ 508{
509 struct perf_counter *counter = info; 509 struct perf_event *event = info;
510 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); 510 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
511 struct perf_counter_context *ctx = counter->ctx; 511 struct perf_event_context *ctx = event->ctx;
512 512
513 /* 513 /*
514 * If this is a per-task counter, need to check whether this 514 * If this is a per-task event, need to check whether this
515 * counter's task is the current task on this cpu. 515 * event's task is the current task on this cpu.
516 */ 516 */
517 if (ctx->task && cpuctx->task_ctx != ctx) 517 if (ctx->task && cpuctx->task_ctx != ctx)
518 return; 518 return;
@@ -520,57 +520,57 @@ static void __perf_counter_disable(void *info)
520 spin_lock(&ctx->lock); 520 spin_lock(&ctx->lock);
521 521
522 /* 522 /*
523 * If the counter is on, turn it off. 523 * If the event is on, turn it off.
524 * If it is in error state, leave it in error state. 524 * If it is in error state, leave it in error state.
525 */ 525 */
526 if (counter->state >= PERF_COUNTER_STATE_INACTIVE) { 526 if (event->state >= PERF_EVENT_STATE_INACTIVE) {
527 update_context_time(ctx); 527 update_context_time(ctx);
528 update_group_times(counter); 528 update_group_times(event);
529 if (counter == counter->group_leader) 529 if (event == event->group_leader)
530 group_sched_out(counter, cpuctx, ctx); 530 group_sched_out(event, cpuctx, ctx);
531 else 531 else
532 counter_sched_out(counter, cpuctx, ctx); 532 event_sched_out(event, cpuctx, ctx);
533 counter->state = PERF_COUNTER_STATE_OFF; 533 event->state = PERF_EVENT_STATE_OFF;
534 } 534 }
535 535
536 spin_unlock(&ctx->lock); 536 spin_unlock(&ctx->lock);
537} 537}
538 538
539/* 539/*
540 * Disable a counter. 540 * Disable a event.
541 * 541 *
542 * If counter->ctx is a cloned context, callers must make sure that 542 * If event->ctx is a cloned context, callers must make sure that
543 * every task struct that counter->ctx->task could possibly point to 543 * every task struct that event->ctx->task could possibly point to
544 * remains valid. This condition is satisifed when called through 544 * remains valid. This condition is satisifed when called through
545 * perf_counter_for_each_child or perf_counter_for_each because they 545 * perf_event_for_each_child or perf_event_for_each because they
546 * hold the top-level counter's child_mutex, so any descendant that 546 * hold the top-level event's child_mutex, so any descendant that
547 * goes to exit will block in sync_child_counter. 547 * goes to exit will block in sync_child_event.
548 * When called from perf_pending_counter it's OK because counter->ctx 548 * When called from perf_pending_event it's OK because event->ctx
549 * is the current context on this CPU and preemption is disabled, 549 * is the current context on this CPU and preemption is disabled,
550 * hence we can't get into perf_counter_task_sched_out for this context. 550 * hence we can't get into perf_event_task_sched_out for this context.
551 */ 551 */
552static void perf_counter_disable(struct perf_counter *counter) 552static void perf_event_disable(struct perf_event *event)
553{ 553{
554 struct perf_counter_context *ctx = counter->ctx; 554 struct perf_event_context *ctx = event->ctx;
555 struct task_struct *task = ctx->task; 555 struct task_struct *task = ctx->task;
556 556
557 if (!task) { 557 if (!task) {
558 /* 558 /*
559 * Disable the counter on the cpu that it's on 559 * Disable the event on the cpu that it's on
560 */ 560 */
561 smp_call_function_single(counter->cpu, __perf_counter_disable, 561 smp_call_function_single(event->cpu, __perf_event_disable,
562 counter, 1); 562 event, 1);
563 return; 563 return;
564 } 564 }
565 565
566 retry: 566 retry:
567 task_oncpu_function_call(task, __perf_counter_disable, counter); 567 task_oncpu_function_call(task, __perf_event_disable, event);
568 568
569 spin_lock_irq(&ctx->lock); 569 spin_lock_irq(&ctx->lock);
570 /* 570 /*
571 * If the counter is still active, we need to retry the cross-call. 571 * If the event is still active, we need to retry the cross-call.
572 */ 572 */
573 if (counter->state == PERF_COUNTER_STATE_ACTIVE) { 573 if (event->state == PERF_EVENT_STATE_ACTIVE) {
574 spin_unlock_irq(&ctx->lock); 574 spin_unlock_irq(&ctx->lock);
575 goto retry; 575 goto retry;
576 } 576 }
@@ -579,73 +579,73 @@ static void perf_counter_disable(struct perf_counter *counter)
579 * Since we have the lock this context can't be scheduled 579 * Since we have the lock this context can't be scheduled
580 * in, so we can change the state safely. 580 * in, so we can change the state safely.
581 */ 581 */
582 if (counter->state == PERF_COUNTER_STATE_INACTIVE) { 582 if (event->state == PERF_EVENT_STATE_INACTIVE) {
583 update_group_times(counter); 583 update_group_times(event);
584 counter->state = PERF_COUNTER_STATE_OFF; 584 event->state = PERF_EVENT_STATE_OFF;
585 } 585 }
586 586
587 spin_unlock_irq(&ctx->lock); 587 spin_unlock_irq(&ctx->lock);
588} 588}
589 589
590static int 590static int
591counter_sched_in(struct perf_counter *counter, 591event_sched_in(struct perf_event *event,
592 struct perf_cpu_context *cpuctx, 592 struct perf_cpu_context *cpuctx,
593 struct perf_counter_context *ctx, 593 struct perf_event_context *ctx,
594 int cpu) 594 int cpu)
595{ 595{
596 if (counter->state <= PERF_COUNTER_STATE_OFF) 596 if (event->state <= PERF_EVENT_STATE_OFF)
597 return 0; 597 return 0;
598 598
599 counter->state = PERF_COUNTER_STATE_ACTIVE; 599 event->state = PERF_EVENT_STATE_ACTIVE;
600 counter->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */ 600 event->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */
601 /* 601 /*
602 * The new state must be visible before we turn it on in the hardware: 602 * The new state must be visible before we turn it on in the hardware:
603 */ 603 */
604 smp_wmb(); 604 smp_wmb();
605 605
606 if (counter->pmu->enable(counter)) { 606 if (event->pmu->enable(event)) {
607 counter->state = PERF_COUNTER_STATE_INACTIVE; 607 event->state = PERF_EVENT_STATE_INACTIVE;
608 counter->oncpu = -1; 608 event->oncpu = -1;
609 return -EAGAIN; 609 return -EAGAIN;
610 } 610 }
611 611
612 counter->tstamp_running += ctx->time - counter->tstamp_stopped; 612 event->tstamp_running += ctx->time - event->tstamp_stopped;
613 613
614 if (!is_software_counter(counter)) 614 if (!is_software_event(event))
615 cpuctx->active_oncpu++; 615 cpuctx->active_oncpu++;
616 ctx->nr_active++; 616 ctx->nr_active++;
617 617
618 if (counter->attr.exclusive) 618 if (event->attr.exclusive)
619 cpuctx->exclusive = 1; 619 cpuctx->exclusive = 1;
620 620
621 return 0; 621 return 0;
622} 622}
623 623
624static int 624static int
625group_sched_in(struct perf_counter *group_counter, 625group_sched_in(struct perf_event *group_event,
626 struct perf_cpu_context *cpuctx, 626 struct perf_cpu_context *cpuctx,
627 struct perf_counter_context *ctx, 627 struct perf_event_context *ctx,
628 int cpu) 628 int cpu)
629{ 629{
630 struct perf_counter *counter, *partial_group; 630 struct perf_event *event, *partial_group;
631 int ret; 631 int ret;
632 632
633 if (group_counter->state == PERF_COUNTER_STATE_OFF) 633 if (group_event->state == PERF_EVENT_STATE_OFF)
634 return 0; 634 return 0;
635 635
636 ret = hw_perf_group_sched_in(group_counter, cpuctx, ctx, cpu); 636 ret = hw_perf_group_sched_in(group_event, cpuctx, ctx, cpu);
637 if (ret) 637 if (ret)
638 return ret < 0 ? ret : 0; 638 return ret < 0 ? ret : 0;
639 639
640 if (counter_sched_in(group_counter, cpuctx, ctx, cpu)) 640 if (event_sched_in(group_event, cpuctx, ctx, cpu))
641 return -EAGAIN; 641 return -EAGAIN;
642 642
643 /* 643 /*
644 * Schedule in siblings as one group (if any): 644 * Schedule in siblings as one group (if any):
645 */ 645 */
646 list_for_each_entry(counter, &group_counter->sibling_list, list_entry) { 646 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
647 if (counter_sched_in(counter, cpuctx, ctx, cpu)) { 647 if (event_sched_in(event, cpuctx, ctx, cpu)) {
648 partial_group = counter; 648 partial_group = event;
649 goto group_error; 649 goto group_error;
650 } 650 }
651 } 651 }
@@ -657,57 +657,57 @@ group_error:
657 * Groups can be scheduled in as one unit only, so undo any 657 * Groups can be scheduled in as one unit only, so undo any
658 * partial group before returning: 658 * partial group before returning:
659 */ 659 */
660 list_for_each_entry(counter, &group_counter->sibling_list, list_entry) { 660 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
661 if (counter == partial_group) 661 if (event == partial_group)
662 break; 662 break;
663 counter_sched_out(counter, cpuctx, ctx); 663 event_sched_out(event, cpuctx, ctx);
664 } 664 }
665 counter_sched_out(group_counter, cpuctx, ctx); 665 event_sched_out(group_event, cpuctx, ctx);
666 666
667 return -EAGAIN; 667 return -EAGAIN;
668} 668}
669 669
670/* 670/*
671 * Return 1 for a group consisting entirely of software counters, 671 * Return 1 for a group consisting entirely of software events,
672 * 0 if the group contains any hardware counters. 672 * 0 if the group contains any hardware events.
673 */ 673 */
674static int is_software_only_group(struct perf_counter *leader) 674static int is_software_only_group(struct perf_event *leader)
675{ 675{
676 struct perf_counter *counter; 676 struct perf_event *event;
677 677
678 if (!is_software_counter(leader)) 678 if (!is_software_event(leader))
679 return 0; 679 return 0;
680 680
681 list_for_each_entry(counter, &leader->sibling_list, list_entry) 681 list_for_each_entry(event, &leader->sibling_list, group_entry)
682 if (!is_software_counter(counter)) 682 if (!is_software_event(event))
683 return 0; 683 return 0;
684 684
685 return 1; 685 return 1;
686} 686}
687 687
688/* 688/*
689 * Work out whether we can put this counter group on the CPU now. 689 * Work out whether we can put this event group on the CPU now.
690 */ 690 */
691static int group_can_go_on(struct perf_counter *counter, 691static int group_can_go_on(struct perf_event *event,
692 struct perf_cpu_context *cpuctx, 692 struct perf_cpu_context *cpuctx,
693 int can_add_hw) 693 int can_add_hw)
694{ 694{
695 /* 695 /*
696 * Groups consisting entirely of software counters can always go on. 696 * Groups consisting entirely of software events can always go on.
697 */ 697 */
698 if (is_software_only_group(counter)) 698 if (is_software_only_group(event))
699 return 1; 699 return 1;
700 /* 700 /*
701 * If an exclusive group is already on, no other hardware 701 * If an exclusive group is already on, no other hardware
702 * counters can go on. 702 * events can go on.
703 */ 703 */
704 if (cpuctx->exclusive) 704 if (cpuctx->exclusive)
705 return 0; 705 return 0;
706 /* 706 /*
707 * If this group is exclusive and there are already 707 * If this group is exclusive and there are already
708 * counters on the CPU, it can't go on. 708 * events on the CPU, it can't go on.
709 */ 709 */
710 if (counter->attr.exclusive && cpuctx->active_oncpu) 710 if (event->attr.exclusive && cpuctx->active_oncpu)
711 return 0; 711 return 0;
712 /* 712 /*
713 * Otherwise, try to add it if all previous groups were able 713 * Otherwise, try to add it if all previous groups were able
@@ -716,26 +716,26 @@ static int group_can_go_on(struct perf_counter *counter,
716 return can_add_hw; 716 return can_add_hw;
717} 717}
718 718
719static void add_counter_to_ctx(struct perf_counter *counter, 719static void add_event_to_ctx(struct perf_event *event,
720 struct perf_counter_context *ctx) 720 struct perf_event_context *ctx)
721{ 721{
722 list_add_counter(counter, ctx); 722 list_add_event(event, ctx);
723 counter->tstamp_enabled = ctx->time; 723 event->tstamp_enabled = ctx->time;
724 counter->tstamp_running = ctx->time; 724 event->tstamp_running = ctx->time;
725 counter->tstamp_stopped = ctx->time; 725 event->tstamp_stopped = ctx->time;
726} 726}
727 727
728/* 728/*
729 * Cross CPU call to install and enable a performance counter 729 * Cross CPU call to install and enable a performance event
730 * 730 *
731 * Must be called with ctx->mutex held 731 * Must be called with ctx->mutex held
732 */ 732 */
733static void __perf_install_in_context(void *info) 733static void __perf_install_in_context(void *info)
734{ 734{
735 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); 735 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
736 struct perf_counter *counter = info; 736 struct perf_event *event = info;
737 struct perf_counter_context *ctx = counter->ctx; 737 struct perf_event_context *ctx = event->ctx;
738 struct perf_counter *leader = counter->group_leader; 738 struct perf_event *leader = event->group_leader;
739 int cpu = smp_processor_id(); 739 int cpu = smp_processor_id();
740 int err; 740 int err;
741 741
@@ -744,7 +744,7 @@ static void __perf_install_in_context(void *info)
744 * the current task context of this cpu. If not it has been 744 * the current task context of this cpu. If not it has been
745 * scheduled out before the smp call arrived. 745 * scheduled out before the smp call arrived.
746 * Or possibly this is the right context but it isn't 746 * Or possibly this is the right context but it isn't
747 * on this cpu because it had no counters. 747 * on this cpu because it had no events.
748 */ 748 */
749 if (ctx->task && cpuctx->task_ctx != ctx) { 749 if (ctx->task && cpuctx->task_ctx != ctx) {
750 if (cpuctx->task_ctx || ctx->task != current) 750 if (cpuctx->task_ctx || ctx->task != current)
@@ -758,41 +758,41 @@ static void __perf_install_in_context(void *info)
758 758
759 /* 759 /*
760 * Protect the list operation against NMI by disabling the 760 * Protect the list operation against NMI by disabling the
761 * counters on a global level. NOP for non NMI based counters. 761 * events on a global level. NOP for non NMI based events.
762 */ 762 */
763 perf_disable(); 763 perf_disable();
764 764
765 add_counter_to_ctx(counter, ctx); 765 add_event_to_ctx(event, ctx);
766 766
767 /* 767 /*
768 * Don't put the counter on if it is disabled or if 768 * Don't put the event on if it is disabled or if
769 * it is in a group and the group isn't on. 769 * it is in a group and the group isn't on.
770 */ 770 */
771 if (counter->state != PERF_COUNTER_STATE_INACTIVE || 771 if (event->state != PERF_EVENT_STATE_INACTIVE ||
772 (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE)) 772 (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE))
773 goto unlock; 773 goto unlock;
774 774
775 /* 775 /*
776 * An exclusive counter can't go on if there are already active 776 * An exclusive event can't go on if there are already active
777 * hardware counters, and no hardware counter can go on if there 777 * hardware events, and no hardware event can go on if there
778 * is already an exclusive counter on. 778 * is already an exclusive event on.
779 */ 779 */
780 if (!group_can_go_on(counter, cpuctx, 1)) 780 if (!group_can_go_on(event, cpuctx, 1))
781 err = -EEXIST; 781 err = -EEXIST;
782 else 782 else
783 err = counter_sched_in(counter, cpuctx, ctx, cpu); 783 err = event_sched_in(event, cpuctx, ctx, cpu);
784 784
785 if (err) { 785 if (err) {
786 /* 786 /*
787 * This counter couldn't go on. If it is in a group 787 * This event couldn't go on. If it is in a group
788 * then we have to pull the whole group off. 788 * then we have to pull the whole group off.
789 * If the counter group is pinned then put it in error state. 789 * If the event group is pinned then put it in error state.
790 */ 790 */
791 if (leader != counter) 791 if (leader != event)
792 group_sched_out(leader, cpuctx, ctx); 792 group_sched_out(leader, cpuctx, ctx);
793 if (leader->attr.pinned) { 793 if (leader->attr.pinned) {
794 update_group_times(leader); 794 update_group_times(leader);
795 leader->state = PERF_COUNTER_STATE_ERROR; 795 leader->state = PERF_EVENT_STATE_ERROR;
796 } 796 }
797 } 797 }
798 798
@@ -806,92 +806,92 @@ static void __perf_install_in_context(void *info)
806} 806}
807 807
808/* 808/*
809 * Attach a performance counter to a context 809 * Attach a performance event to a context
810 * 810 *
811 * First we add the counter to the list with the hardware enable bit 811 * First we add the event to the list with the hardware enable bit
812 * in counter->hw_config cleared. 812 * in event->hw_config cleared.
813 * 813 *
814 * If the counter is attached to a task which is on a CPU we use a smp 814 * If the event is attached to a task which is on a CPU we use a smp
815 * call to enable it in the task context. The task might have been 815 * call to enable it in the task context. The task might have been
816 * scheduled away, but we check this in the smp call again. 816 * scheduled away, but we check this in the smp call again.
817 * 817 *
818 * Must be called with ctx->mutex held. 818 * Must be called with ctx->mutex held.
819 */ 819 */
820static void 820static void
821perf_install_in_context(struct perf_counter_context *ctx, 821perf_install_in_context(struct perf_event_context *ctx,
822 struct perf_counter *counter, 822 struct perf_event *event,
823 int cpu) 823 int cpu)
824{ 824{
825 struct task_struct *task = ctx->task; 825 struct task_struct *task = ctx->task;
826 826
827 if (!task) { 827 if (!task) {
828 /* 828 /*
829 * Per cpu counters are installed via an smp call and 829 * Per cpu events are installed via an smp call and
830 * the install is always sucessful. 830 * the install is always sucessful.
831 */ 831 */
832 smp_call_function_single(cpu, __perf_install_in_context, 832 smp_call_function_single(cpu, __perf_install_in_context,
833 counter, 1); 833 event, 1);
834 return; 834 return;
835 } 835 }
836 836
837retry: 837retry:
838 task_oncpu_function_call(task, __perf_install_in_context, 838 task_oncpu_function_call(task, __perf_install_in_context,
839 counter); 839 event);
840 840
841 spin_lock_irq(&ctx->lock); 841 spin_lock_irq(&ctx->lock);
842 /* 842 /*
843 * we need to retry the smp call. 843 * we need to retry the smp call.
844 */ 844 */
845 if (ctx->is_active && list_empty(&counter->list_entry)) { 845 if (ctx->is_active && list_empty(&event->group_entry)) {
846 spin_unlock_irq(&ctx->lock); 846 spin_unlock_irq(&ctx->lock);
847 goto retry; 847 goto retry;
848 } 848 }
849 849
850 /* 850 /*
851 * The lock prevents that this context is scheduled in so we 851 * The lock prevents that this context is scheduled in so we
852 * can add the counter safely, if it the call above did not 852 * can add the event safely, if it the call above did not
853 * succeed. 853 * succeed.
854 */ 854 */
855 if (list_empty(&counter->list_entry)) 855 if (list_empty(&event->group_entry))
856 add_counter_to_ctx(counter, ctx); 856 add_event_to_ctx(event, ctx);
857 spin_unlock_irq(&ctx->lock); 857 spin_unlock_irq(&ctx->lock);
858} 858}
859 859
860/* 860/*
861 * Put a counter into inactive state and update time fields. 861 * Put a event into inactive state and update time fields.
862 * Enabling the leader of a group effectively enables all 862 * Enabling the leader of a group effectively enables all
863 * the group members that aren't explicitly disabled, so we 863 * the group members that aren't explicitly disabled, so we
864 * have to update their ->tstamp_enabled also. 864 * have to update their ->tstamp_enabled also.
865 * Note: this works for group members as well as group leaders 865 * Note: this works for group members as well as group leaders
866 * since the non-leader members' sibling_lists will be empty. 866 * since the non-leader members' sibling_lists will be empty.
867 */ 867 */
868static void __perf_counter_mark_enabled(struct perf_counter *counter, 868static void __perf_event_mark_enabled(struct perf_event *event,
869 struct perf_counter_context *ctx) 869 struct perf_event_context *ctx)
870{ 870{
871 struct perf_counter *sub; 871 struct perf_event *sub;
872 872
873 counter->state = PERF_COUNTER_STATE_INACTIVE; 873 event->state = PERF_EVENT_STATE_INACTIVE;
874 counter->tstamp_enabled = ctx->time - counter->total_time_enabled; 874 event->tstamp_enabled = ctx->time - event->total_time_enabled;
875 list_for_each_entry(sub, &counter->sibling_list, list_entry) 875 list_for_each_entry(sub, &event->sibling_list, group_entry)
876 if (sub->state >= PERF_COUNTER_STATE_INACTIVE) 876 if (sub->state >= PERF_EVENT_STATE_INACTIVE)
877 sub->tstamp_enabled = 877 sub->tstamp_enabled =
878 ctx->time - sub->total_time_enabled; 878 ctx->time - sub->total_time_enabled;
879} 879}
880 880
881/* 881/*
882 * Cross CPU call to enable a performance counter 882 * Cross CPU call to enable a performance event
883 */ 883 */
884static void __perf_counter_enable(void *info) 884static void __perf_event_enable(void *info)
885{ 885{
886 struct perf_counter *counter = info; 886 struct perf_event *event = info;
887 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); 887 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
888 struct perf_counter_context *ctx = counter->ctx; 888 struct perf_event_context *ctx = event->ctx;
889 struct perf_counter *leader = counter->group_leader; 889 struct perf_event *leader = event->group_leader;
890 int err; 890 int err;
891 891
892 /* 892 /*
893 * If this is a per-task counter, need to check whether this 893 * If this is a per-task event, need to check whether this
894 * counter's task is the current task on this cpu. 894 * event's task is the current task on this cpu.
895 */ 895 */
896 if (ctx->task && cpuctx->task_ctx != ctx) { 896 if (ctx->task && cpuctx->task_ctx != ctx) {
897 if (cpuctx->task_ctx || ctx->task != current) 897 if (cpuctx->task_ctx || ctx->task != current)
@@ -903,40 +903,40 @@ static void __perf_counter_enable(void *info)
903 ctx->is_active = 1; 903 ctx->is_active = 1;
904 update_context_time(ctx); 904 update_context_time(ctx);
905 905
906 if (counter->state >= PERF_COUNTER_STATE_INACTIVE) 906 if (event->state >= PERF_EVENT_STATE_INACTIVE)
907 goto unlock; 907 goto unlock;
908 __perf_counter_mark_enabled(counter, ctx); 908 __perf_event_mark_enabled(event, ctx);
909 909
910 /* 910 /*
911 * If the counter is in a group and isn't the group leader, 911 * If the event is in a group and isn't the group leader,
912 * then don't put it on unless the group is on. 912 * then don't put it on unless the group is on.
913 */ 913 */
914 if (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE) 914 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE)
915 goto unlock; 915 goto unlock;
916 916
917 if (!group_can_go_on(counter, cpuctx, 1)) { 917 if (!group_can_go_on(event, cpuctx, 1)) {
918 err = -EEXIST; 918 err = -EEXIST;
919 } else { 919 } else {
920 perf_disable(); 920 perf_disable();
921 if (counter == leader) 921 if (event == leader)
922 err = group_sched_in(counter, cpuctx, ctx, 922 err = group_sched_in(event, cpuctx, ctx,
923 smp_processor_id()); 923 smp_processor_id());
924 else 924 else
925 err = counter_sched_in(counter, cpuctx, ctx, 925 err = event_sched_in(event, cpuctx, ctx,
926 smp_processor_id()); 926 smp_processor_id());
927 perf_enable(); 927 perf_enable();
928 } 928 }
929 929
930 if (err) { 930 if (err) {
931 /* 931 /*
932 * If this counter can't go on and it's part of a 932 * If this event can't go on and it's part of a
933 * group, then the whole group has to come off. 933 * group, then the whole group has to come off.
934 */ 934 */
935 if (leader != counter) 935 if (leader != event)
936 group_sched_out(leader, cpuctx, ctx); 936 group_sched_out(leader, cpuctx, ctx);
937 if (leader->attr.pinned) { 937 if (leader->attr.pinned) {
938 update_group_times(leader); 938 update_group_times(leader);
939 leader->state = PERF_COUNTER_STATE_ERROR; 939 leader->state = PERF_EVENT_STATE_ERROR;
940 } 940 }
941 } 941 }
942 942
@@ -945,100 +945,96 @@ static void __perf_counter_enable(void *info)
945} 945}
946 946
947/* 947/*
948 * Enable a counter. 948 * Enable a event.
949 * 949 *
950 * If counter->ctx is a cloned context, callers must make sure that 950 * If event->ctx is a cloned context, callers must make sure that
951 * every task struct that counter->ctx->task could possibly point to 951 * every task struct that event->ctx->task could possibly point to
952 * remains valid. This condition is satisfied when called through 952 * remains valid. This condition is satisfied when called through
953 * perf_counter_for_each_child or perf_counter_for_each as described 953 * perf_event_for_each_child or perf_event_for_each as described
954 * for perf_counter_disable. 954 * for perf_event_disable.
955 */ 955 */
956static void perf_counter_enable(struct perf_counter *counter) 956static void perf_event_enable(struct perf_event *event)
957{ 957{
958 struct perf_counter_context *ctx = counter->ctx; 958 struct perf_event_context *ctx = event->ctx;
959 struct task_struct *task = ctx->task; 959 struct task_struct *task = ctx->task;
960 960
961 if (!task) { 961 if (!task) {
962 /* 962 /*
963 * Enable the counter on the cpu that it's on 963 * Enable the event on the cpu that it's on
964 */ 964 */
965 smp_call_function_single(counter->cpu, __perf_counter_enable, 965 smp_call_function_single(event->cpu, __perf_event_enable,
966 counter, 1); 966 event, 1);
967 return; 967 return;
968 } 968 }
969 969
970 spin_lock_irq(&ctx->lock); 970 spin_lock_irq(&ctx->lock);
971 if (counter->state >= PERF_COUNTER_STATE_INACTIVE) 971 if (event->state >= PERF_EVENT_STATE_INACTIVE)
972 goto out; 972 goto out;
973 973
974 /* 974 /*
975 * If the counter is in error state, clear that first. 975 * If the event is in error state, clear that first.
976 * That way, if we see the counter in error state below, we 976 * That way, if we see the event in error state below, we
977 * know that it has gone back into error state, as distinct 977 * know that it has gone back into error state, as distinct
978 * from the task having been scheduled away before the 978 * from the task having been scheduled away before the
979 * cross-call arrived. 979 * cross-call arrived.
980 */ 980 */
981 if (counter->state == PERF_COUNTER_STATE_ERROR) 981 if (event->state == PERF_EVENT_STATE_ERROR)
982 counter->state = PERF_COUNTER_STATE_OFF; 982 event->state = PERF_EVENT_STATE_OFF;
983 983
984 retry: 984 retry:
985 spin_unlock_irq(&ctx->lock); 985 spin_unlock_irq(&ctx->lock);
986 task_oncpu_function_call(task, __perf_counter_enable, counter); 986 task_oncpu_function_call(task, __perf_event_enable, event);
987 987
988 spin_lock_irq(&ctx->lock); 988 spin_lock_irq(&ctx->lock);
989 989
990 /* 990 /*
991 * If the context is active and the counter is still off, 991 * If the context is active and the event is still off,
992 * we need to retry the cross-call. 992 * we need to retry the cross-call.
993 */ 993 */
994 if (ctx->is_active && counter->state == PERF_COUNTER_STATE_OFF) 994 if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF)
995 goto retry; 995 goto retry;
996 996
997 /* 997 /*
998 * Since we have the lock this context can't be scheduled 998 * Since we have the lock this context can't be scheduled
999 * in, so we can change the state safely. 999 * in, so we can change the state safely.
1000 */ 1000 */
1001 if (counter->state == PERF_COUNTER_STATE_OFF) 1001 if (event->state == PERF_EVENT_STATE_OFF)
1002 __perf_counter_mark_enabled(counter, ctx); 1002 __perf_event_mark_enabled(event, ctx);
1003 1003
1004 out: 1004 out:
1005 spin_unlock_irq(&ctx->lock); 1005 spin_unlock_irq(&ctx->lock);
1006} 1006}
1007 1007
1008static int perf_counter_refresh(struct perf_counter *counter, int refresh) 1008static int perf_event_refresh(struct perf_event *event, int refresh)
1009{ 1009{
1010 /* 1010 /*
1011 * not supported on inherited counters 1011 * not supported on inherited events
1012 */ 1012 */
1013 if (counter->attr.inherit) 1013 if (event->attr.inherit)
1014 return -EINVAL; 1014 return -EINVAL;
1015 1015
1016 atomic_add(refresh, &counter->event_limit); 1016 atomic_add(refresh, &event->event_limit);
1017 perf_counter_enable(counter); 1017 perf_event_enable(event);
1018 1018
1019 return 0; 1019 return 0;
1020} 1020}
1021 1021
1022void __perf_counter_sched_out(struct perf_counter_context *ctx, 1022void __perf_event_sched_out(struct perf_event_context *ctx,
1023 struct perf_cpu_context *cpuctx) 1023 struct perf_cpu_context *cpuctx)
1024{ 1024{
1025 struct perf_counter *counter; 1025 struct perf_event *event;
1026 1026
1027 spin_lock(&ctx->lock); 1027 spin_lock(&ctx->lock);
1028 ctx->is_active = 0; 1028 ctx->is_active = 0;
1029 if (likely(!ctx->nr_counters)) 1029 if (likely(!ctx->nr_events))
1030 goto out; 1030 goto out;
1031 update_context_time(ctx); 1031 update_context_time(ctx);
1032 1032
1033 perf_disable(); 1033 perf_disable();
1034 if (ctx->nr_active) { 1034 if (ctx->nr_active)
1035 list_for_each_entry(counter, &ctx->counter_list, list_entry) { 1035 list_for_each_entry(event, &ctx->group_list, group_entry)
1036 if (counter != counter->group_leader) 1036 group_sched_out(event, cpuctx, ctx);
1037 counter_sched_out(counter, cpuctx, ctx); 1037
1038 else
1039 group_sched_out(counter, cpuctx, ctx);
1040 }
1041 }
1042 perf_enable(); 1038 perf_enable();
1043 out: 1039 out:
1044 spin_unlock(&ctx->lock); 1040 spin_unlock(&ctx->lock);
@@ -1047,46 +1043,46 @@ void __perf_counter_sched_out(struct perf_counter_context *ctx,
1047/* 1043/*
1048 * Test whether two contexts are equivalent, i.e. whether they 1044 * Test whether two contexts are equivalent, i.e. whether they
1049 * have both been cloned from the same version of the same context 1045 * have both been cloned from the same version of the same context
1050 * and they both have the same number of enabled counters. 1046 * and they both have the same number of enabled events.
1051 * If the number of enabled counters is the same, then the set 1047 * If the number of enabled events is the same, then the set
1052 * of enabled counters should be the same, because these are both 1048 * of enabled events should be the same, because these are both
1053 * inherited contexts, therefore we can't access individual counters 1049 * inherited contexts, therefore we can't access individual events
1054 * in them directly with an fd; we can only enable/disable all 1050 * in them directly with an fd; we can only enable/disable all
1055 * counters via prctl, or enable/disable all counters in a family 1051 * events via prctl, or enable/disable all events in a family
1056 * via ioctl, which will have the same effect on both contexts. 1052 * via ioctl, which will have the same effect on both contexts.
1057 */ 1053 */
1058static int context_equiv(struct perf_counter_context *ctx1, 1054static int context_equiv(struct perf_event_context *ctx1,
1059 struct perf_counter_context *ctx2) 1055 struct perf_event_context *ctx2)
1060{ 1056{
1061 return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx 1057 return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx
1062 && ctx1->parent_gen == ctx2->parent_gen 1058 && ctx1->parent_gen == ctx2->parent_gen
1063 && !ctx1->pin_count && !ctx2->pin_count; 1059 && !ctx1->pin_count && !ctx2->pin_count;
1064} 1060}
1065 1061
1066static void __perf_counter_read(void *counter); 1062static void __perf_event_read(void *event);
1067 1063
1068static void __perf_counter_sync_stat(struct perf_counter *counter, 1064static void __perf_event_sync_stat(struct perf_event *event,
1069 struct perf_counter *next_counter) 1065 struct perf_event *next_event)
1070{ 1066{
1071 u64 value; 1067 u64 value;
1072 1068
1073 if (!counter->attr.inherit_stat) 1069 if (!event->attr.inherit_stat)
1074 return; 1070 return;
1075 1071
1076 /* 1072 /*
1077 * Update the counter value, we cannot use perf_counter_read() 1073 * Update the event value, we cannot use perf_event_read()
1078 * because we're in the middle of a context switch and have IRQs 1074 * because we're in the middle of a context switch and have IRQs
1079 * disabled, which upsets smp_call_function_single(), however 1075 * disabled, which upsets smp_call_function_single(), however
1080 * we know the counter must be on the current CPU, therefore we 1076 * we know the event must be on the current CPU, therefore we
1081 * don't need to use it. 1077 * don't need to use it.
1082 */ 1078 */
1083 switch (counter->state) { 1079 switch (event->state) {
1084 case PERF_COUNTER_STATE_ACTIVE: 1080 case PERF_EVENT_STATE_ACTIVE:
1085 __perf_counter_read(counter); 1081 __perf_event_read(event);
1086 break; 1082 break;
1087 1083
1088 case PERF_COUNTER_STATE_INACTIVE: 1084 case PERF_EVENT_STATE_INACTIVE:
1089 update_counter_times(counter); 1085 update_event_times(event);
1090 break; 1086 break;
1091 1087
1092 default: 1088 default:
@@ -1094,73 +1090,73 @@ static void __perf_counter_sync_stat(struct perf_counter *counter,
1094 } 1090 }
1095 1091
1096 /* 1092 /*
1097 * In order to keep per-task stats reliable we need to flip the counter 1093 * In order to keep per-task stats reliable we need to flip the event
1098 * values when we flip the contexts. 1094 * values when we flip the contexts.
1099 */ 1095 */
1100 value = atomic64_read(&next_counter->count); 1096 value = atomic64_read(&next_event->count);
1101 value = atomic64_xchg(&counter->count, value); 1097 value = atomic64_xchg(&event->count, value);
1102 atomic64_set(&next_counter->count, value); 1098 atomic64_set(&next_event->count, value);
1103 1099
1104 swap(counter->total_time_enabled, next_counter->total_time_enabled); 1100 swap(event->total_time_enabled, next_event->total_time_enabled);
1105 swap(counter->total_time_running, next_counter->total_time_running); 1101 swap(event->total_time_running, next_event->total_time_running);
1106 1102
1107 /* 1103 /*
1108 * Since we swizzled the values, update the user visible data too. 1104 * Since we swizzled the values, update the user visible data too.
1109 */ 1105 */
1110 perf_counter_update_userpage(counter); 1106 perf_event_update_userpage(event);
1111 perf_counter_update_userpage(next_counter); 1107 perf_event_update_userpage(next_event);
1112} 1108}
1113 1109
1114#define list_next_entry(pos, member) \ 1110#define list_next_entry(pos, member) \
1115 list_entry(pos->member.next, typeof(*pos), member) 1111 list_entry(pos->member.next, typeof(*pos), member)
1116 1112
1117static void perf_counter_sync_stat(struct perf_counter_context *ctx, 1113static void perf_event_sync_stat(struct perf_event_context *ctx,
1118 struct perf_counter_context *next_ctx) 1114 struct perf_event_context *next_ctx)
1119{ 1115{
1120 struct perf_counter *counter, *next_counter; 1116 struct perf_event *event, *next_event;
1121 1117
1122 if (!ctx->nr_stat) 1118 if (!ctx->nr_stat)
1123 return; 1119 return;
1124 1120
1125 counter = list_first_entry(&ctx->event_list, 1121 event = list_first_entry(&ctx->event_list,
1126 struct perf_counter, event_entry); 1122 struct perf_event, event_entry);
1127 1123
1128 next_counter = list_first_entry(&next_ctx->event_list, 1124 next_event = list_first_entry(&next_ctx->event_list,
1129 struct perf_counter, event_entry); 1125 struct perf_event, event_entry);
1130 1126
1131 while (&counter->event_entry != &ctx->event_list && 1127 while (&event->event_entry != &ctx->event_list &&
1132 &next_counter->event_entry != &next_ctx->event_list) { 1128 &next_event->event_entry != &next_ctx->event_list) {
1133 1129
1134 __perf_counter_sync_stat(counter, next_counter); 1130 __perf_event_sync_stat(event, next_event);
1135 1131
1136 counter = list_next_entry(counter, event_entry); 1132 event = list_next_entry(event, event_entry);
1137 next_counter = list_next_entry(next_counter, event_entry); 1133 next_event = list_next_entry(next_event, event_entry);
1138 } 1134 }
1139} 1135}
1140 1136
1141/* 1137/*
1142 * Called from scheduler to remove the counters of the current task, 1138 * Called from scheduler to remove the events of the current task,
1143 * with interrupts disabled. 1139 * with interrupts disabled.
1144 * 1140 *
1145 * We stop each counter and update the counter value in counter->count. 1141 * We stop each event and update the event value in event->count.
1146 * 1142 *
1147 * This does not protect us against NMI, but disable() 1143 * This does not protect us against NMI, but disable()
1148 * sets the disabled bit in the control field of counter _before_ 1144 * sets the disabled bit in the control field of event _before_
1149 * accessing the counter control register. If a NMI hits, then it will 1145 * accessing the event control register. If a NMI hits, then it will
1150 * not restart the counter. 1146 * not restart the event.
1151 */ 1147 */
1152void perf_counter_task_sched_out(struct task_struct *task, 1148void perf_event_task_sched_out(struct task_struct *task,
1153 struct task_struct *next, int cpu) 1149 struct task_struct *next, int cpu)
1154{ 1150{
1155 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); 1151 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
1156 struct perf_counter_context *ctx = task->perf_counter_ctxp; 1152 struct perf_event_context *ctx = task->perf_event_ctxp;
1157 struct perf_counter_context *next_ctx; 1153 struct perf_event_context *next_ctx;
1158 struct perf_counter_context *parent; 1154 struct perf_event_context *parent;
1159 struct pt_regs *regs; 1155 struct pt_regs *regs;
1160 int do_switch = 1; 1156 int do_switch = 1;
1161 1157
1162 regs = task_pt_regs(task); 1158 regs = task_pt_regs(task);
1163 perf_swcounter_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, regs, 0); 1159 perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, regs, 0);
1164 1160
1165 if (likely(!ctx || !cpuctx->task_ctx)) 1161 if (likely(!ctx || !cpuctx->task_ctx))
1166 return; 1162 return;
@@ -1169,7 +1165,7 @@ void perf_counter_task_sched_out(struct task_struct *task,
1169 1165
1170 rcu_read_lock(); 1166 rcu_read_lock();
1171 parent = rcu_dereference(ctx->parent_ctx); 1167 parent = rcu_dereference(ctx->parent_ctx);
1172 next_ctx = next->perf_counter_ctxp; 1168 next_ctx = next->perf_event_ctxp;
1173 if (parent && next_ctx && 1169 if (parent && next_ctx &&
1174 rcu_dereference(next_ctx->parent_ctx) == parent) { 1170 rcu_dereference(next_ctx->parent_ctx) == parent) {
1175 /* 1171 /*
@@ -1186,15 +1182,15 @@ void perf_counter_task_sched_out(struct task_struct *task,
1186 if (context_equiv(ctx, next_ctx)) { 1182 if (context_equiv(ctx, next_ctx)) {
1187 /* 1183 /*
1188 * XXX do we need a memory barrier of sorts 1184 * XXX do we need a memory barrier of sorts
1189 * wrt to rcu_dereference() of perf_counter_ctxp 1185 * wrt to rcu_dereference() of perf_event_ctxp
1190 */ 1186 */
1191 task->perf_counter_ctxp = next_ctx; 1187 task->perf_event_ctxp = next_ctx;
1192 next->perf_counter_ctxp = ctx; 1188 next->perf_event_ctxp = ctx;
1193 ctx->task = next; 1189 ctx->task = next;
1194 next_ctx->task = task; 1190 next_ctx->task = task;
1195 do_switch = 0; 1191 do_switch = 0;
1196 1192
1197 perf_counter_sync_stat(ctx, next_ctx); 1193 perf_event_sync_stat(ctx, next_ctx);
1198 } 1194 }
1199 spin_unlock(&next_ctx->lock); 1195 spin_unlock(&next_ctx->lock);
1200 spin_unlock(&ctx->lock); 1196 spin_unlock(&ctx->lock);
@@ -1202,7 +1198,7 @@ void perf_counter_task_sched_out(struct task_struct *task,
1202 rcu_read_unlock(); 1198 rcu_read_unlock();
1203 1199
1204 if (do_switch) { 1200 if (do_switch) {
1205 __perf_counter_sched_out(ctx, cpuctx); 1201 __perf_event_sched_out(ctx, cpuctx);
1206 cpuctx->task_ctx = NULL; 1202 cpuctx->task_ctx = NULL;
1207 } 1203 }
1208} 1204}
@@ -1210,7 +1206,7 @@ void perf_counter_task_sched_out(struct task_struct *task,
1210/* 1206/*
1211 * Called with IRQs disabled 1207 * Called with IRQs disabled
1212 */ 1208 */
1213static void __perf_counter_task_sched_out(struct perf_counter_context *ctx) 1209static void __perf_event_task_sched_out(struct perf_event_context *ctx)
1214{ 1210{
1215 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); 1211 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1216 1212
@@ -1220,28 +1216,28 @@ static void __perf_counter_task_sched_out(struct perf_counter_context *ctx)
1220 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx)) 1216 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
1221 return; 1217 return;
1222 1218
1223 __perf_counter_sched_out(ctx, cpuctx); 1219 __perf_event_sched_out(ctx, cpuctx);
1224 cpuctx->task_ctx = NULL; 1220 cpuctx->task_ctx = NULL;
1225} 1221}
1226 1222
1227/* 1223/*
1228 * Called with IRQs disabled 1224 * Called with IRQs disabled
1229 */ 1225 */
1230static void perf_counter_cpu_sched_out(struct perf_cpu_context *cpuctx) 1226static void perf_event_cpu_sched_out(struct perf_cpu_context *cpuctx)
1231{ 1227{
1232 __perf_counter_sched_out(&cpuctx->ctx, cpuctx); 1228 __perf_event_sched_out(&cpuctx->ctx, cpuctx);
1233} 1229}
1234 1230
1235static void 1231static void
1236__perf_counter_sched_in(struct perf_counter_context *ctx, 1232__perf_event_sched_in(struct perf_event_context *ctx,
1237 struct perf_cpu_context *cpuctx, int cpu) 1233 struct perf_cpu_context *cpuctx, int cpu)
1238{ 1234{
1239 struct perf_counter *counter; 1235 struct perf_event *event;
1240 int can_add_hw = 1; 1236 int can_add_hw = 1;
1241 1237
1242 spin_lock(&ctx->lock); 1238 spin_lock(&ctx->lock);
1243 ctx->is_active = 1; 1239 ctx->is_active = 1;
1244 if (likely(!ctx->nr_counters)) 1240 if (likely(!ctx->nr_events))
1245 goto out; 1241 goto out;
1246 1242
1247 ctx->timestamp = perf_clock(); 1243 ctx->timestamp = perf_clock();
@@ -1252,55 +1248,45 @@ __perf_counter_sched_in(struct perf_counter_context *ctx,
1252 * First go through the list and put on any pinned groups 1248 * First go through the list and put on any pinned groups
1253 * in order to give them the best chance of going on. 1249 * in order to give them the best chance of going on.
1254 */ 1250 */
1255 list_for_each_entry(counter, &ctx->counter_list, list_entry) { 1251 list_for_each_entry(event, &ctx->group_list, group_entry) {
1256 if (counter->state <= PERF_COUNTER_STATE_OFF || 1252 if (event->state <= PERF_EVENT_STATE_OFF ||
1257 !counter->attr.pinned) 1253 !event->attr.pinned)
1258 continue; 1254 continue;
1259 if (counter->cpu != -1 && counter->cpu != cpu) 1255 if (event->cpu != -1 && event->cpu != cpu)
1260 continue; 1256 continue;
1261 1257
1262 if (counter != counter->group_leader) 1258 if (group_can_go_on(event, cpuctx, 1))
1263 counter_sched_in(counter, cpuctx, ctx, cpu); 1259 group_sched_in(event, cpuctx, ctx, cpu);
1264 else {
1265 if (group_can_go_on(counter, cpuctx, 1))
1266 group_sched_in(counter, cpuctx, ctx, cpu);
1267 }
1268 1260
1269 /* 1261 /*
1270 * If this pinned group hasn't been scheduled, 1262 * If this pinned group hasn't been scheduled,
1271 * put it in error state. 1263 * put it in error state.
1272 */ 1264 */
1273 if (counter->state == PERF_COUNTER_STATE_INACTIVE) { 1265 if (event->state == PERF_EVENT_STATE_INACTIVE) {
1274 update_group_times(counter); 1266 update_group_times(event);
1275 counter->state = PERF_COUNTER_STATE_ERROR; 1267 event->state = PERF_EVENT_STATE_ERROR;
1276 } 1268 }
1277 } 1269 }
1278 1270
1279 list_for_each_entry(counter, &ctx->counter_list, list_entry) { 1271 list_for_each_entry(event, &ctx->group_list, group_entry) {
1280 /* 1272 /*
1281 * Ignore counters in OFF or ERROR state, and 1273 * Ignore events in OFF or ERROR state, and
1282 * ignore pinned counters since we did them already. 1274 * ignore pinned events since we did them already.
1283 */ 1275 */
1284 if (counter->state <= PERF_COUNTER_STATE_OFF || 1276 if (event->state <= PERF_EVENT_STATE_OFF ||
1285 counter->attr.pinned) 1277 event->attr.pinned)
1286 continue; 1278 continue;
1287 1279
1288 /* 1280 /*
1289 * Listen to the 'cpu' scheduling filter constraint 1281 * Listen to the 'cpu' scheduling filter constraint
1290 * of counters: 1282 * of events:
1291 */ 1283 */
1292 if (counter->cpu != -1 && counter->cpu != cpu) 1284 if (event->cpu != -1 && event->cpu != cpu)
1293 continue; 1285 continue;
1294 1286
1295 if (counter != counter->group_leader) { 1287 if (group_can_go_on(event, cpuctx, can_add_hw))
1296 if (counter_sched_in(counter, cpuctx, ctx, cpu)) 1288 if (group_sched_in(event, cpuctx, ctx, cpu))
1297 can_add_hw = 0; 1289 can_add_hw = 0;
1298 } else {
1299 if (group_can_go_on(counter, cpuctx, can_add_hw)) {
1300 if (group_sched_in(counter, cpuctx, ctx, cpu))
1301 can_add_hw = 0;
1302 }
1303 }
1304 } 1290 }
1305 perf_enable(); 1291 perf_enable();
1306 out: 1292 out:
@@ -1308,48 +1294,48 @@ __perf_counter_sched_in(struct perf_counter_context *ctx,
1308} 1294}
1309 1295
1310/* 1296/*
1311 * Called from scheduler to add the counters of the current task 1297 * Called from scheduler to add the events of the current task
1312 * with interrupts disabled. 1298 * with interrupts disabled.
1313 * 1299 *
1314 * We restore the counter value and then enable it. 1300 * We restore the event value and then enable it.
1315 * 1301 *
1316 * This does not protect us against NMI, but enable() 1302 * This does not protect us against NMI, but enable()
1317 * sets the enabled bit in the control field of counter _before_ 1303 * sets the enabled bit in the control field of event _before_
1318 * accessing the counter control register. If a NMI hits, then it will 1304 * accessing the event control register. If a NMI hits, then it will
1319 * keep the counter running. 1305 * keep the event running.
1320 */ 1306 */
1321void perf_counter_task_sched_in(struct task_struct *task, int cpu) 1307void perf_event_task_sched_in(struct task_struct *task, int cpu)
1322{ 1308{
1323 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); 1309 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
1324 struct perf_counter_context *ctx = task->perf_counter_ctxp; 1310 struct perf_event_context *ctx = task->perf_event_ctxp;
1325 1311
1326 if (likely(!ctx)) 1312 if (likely(!ctx))
1327 return; 1313 return;
1328 if (cpuctx->task_ctx == ctx) 1314 if (cpuctx->task_ctx == ctx)
1329 return; 1315 return;
1330 __perf_counter_sched_in(ctx, cpuctx, cpu); 1316 __perf_event_sched_in(ctx, cpuctx, cpu);
1331 cpuctx->task_ctx = ctx; 1317 cpuctx->task_ctx = ctx;
1332} 1318}
1333 1319
1334static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu) 1320static void perf_event_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu)
1335{ 1321{
1336 struct perf_counter_context *ctx = &cpuctx->ctx; 1322 struct perf_event_context *ctx = &cpuctx->ctx;
1337 1323
1338 __perf_counter_sched_in(ctx, cpuctx, cpu); 1324 __perf_event_sched_in(ctx, cpuctx, cpu);
1339} 1325}
1340 1326
1341#define MAX_INTERRUPTS (~0ULL) 1327#define MAX_INTERRUPTS (~0ULL)
1342 1328
1343static void perf_log_throttle(struct perf_counter *counter, int enable); 1329static void perf_log_throttle(struct perf_event *event, int enable);
1344 1330
1345static void perf_adjust_period(struct perf_counter *counter, u64 events) 1331static void perf_adjust_period(struct perf_event *event, u64 events)
1346{ 1332{
1347 struct hw_perf_counter *hwc = &counter->hw; 1333 struct hw_perf_event *hwc = &event->hw;
1348 u64 period, sample_period; 1334 u64 period, sample_period;
1349 s64 delta; 1335 s64 delta;
1350 1336
1351 events *= hwc->sample_period; 1337 events *= hwc->sample_period;
1352 period = div64_u64(events, counter->attr.sample_freq); 1338 period = div64_u64(events, event->attr.sample_freq);
1353 1339
1354 delta = (s64)(period - hwc->sample_period); 1340 delta = (s64)(period - hwc->sample_period);
1355 delta = (delta + 7) / 8; /* low pass filter */ 1341 delta = (delta + 7) / 8; /* low pass filter */
@@ -1362,39 +1348,39 @@ static void perf_adjust_period(struct perf_counter *counter, u64 events)
1362 hwc->sample_period = sample_period; 1348 hwc->sample_period = sample_period;
1363} 1349}
1364 1350
1365static void perf_ctx_adjust_freq(struct perf_counter_context *ctx) 1351static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
1366{ 1352{
1367 struct perf_counter *counter; 1353 struct perf_event *event;
1368 struct hw_perf_counter *hwc; 1354 struct hw_perf_event *hwc;
1369 u64 interrupts, freq; 1355 u64 interrupts, freq;
1370 1356
1371 spin_lock(&ctx->lock); 1357 spin_lock(&ctx->lock);
1372 list_for_each_entry(counter, &ctx->counter_list, list_entry) { 1358 list_for_each_entry(event, &ctx->group_list, group_entry) {
1373 if (counter->state != PERF_COUNTER_STATE_ACTIVE) 1359 if (event->state != PERF_EVENT_STATE_ACTIVE)
1374 continue; 1360 continue;
1375 1361
1376 hwc = &counter->hw; 1362 hwc = &event->hw;
1377 1363
1378 interrupts = hwc->interrupts; 1364 interrupts = hwc->interrupts;
1379 hwc->interrupts = 0; 1365 hwc->interrupts = 0;
1380 1366
1381 /* 1367 /*
1382 * unthrottle counters on the tick 1368 * unthrottle events on the tick
1383 */ 1369 */
1384 if (interrupts == MAX_INTERRUPTS) { 1370 if (interrupts == MAX_INTERRUPTS) {
1385 perf_log_throttle(counter, 1); 1371 perf_log_throttle(event, 1);
1386 counter->pmu->unthrottle(counter); 1372 event->pmu->unthrottle(event);
1387 interrupts = 2*sysctl_perf_counter_sample_rate/HZ; 1373 interrupts = 2*sysctl_perf_event_sample_rate/HZ;
1388 } 1374 }
1389 1375
1390 if (!counter->attr.freq || !counter->attr.sample_freq) 1376 if (!event->attr.freq || !event->attr.sample_freq)
1391 continue; 1377 continue;
1392 1378
1393 /* 1379 /*
1394 * if the specified freq < HZ then we need to skip ticks 1380 * if the specified freq < HZ then we need to skip ticks
1395 */ 1381 */
1396 if (counter->attr.sample_freq < HZ) { 1382 if (event->attr.sample_freq < HZ) {
1397 freq = counter->attr.sample_freq; 1383 freq = event->attr.sample_freq;
1398 1384
1399 hwc->freq_count += freq; 1385 hwc->freq_count += freq;
1400 hwc->freq_interrupts += interrupts; 1386 hwc->freq_interrupts += interrupts;
@@ -1408,7 +1394,7 @@ static void perf_ctx_adjust_freq(struct perf_counter_context *ctx)
1408 } else 1394 } else
1409 freq = HZ; 1395 freq = HZ;
1410 1396
1411 perf_adjust_period(counter, freq * interrupts); 1397 perf_adjust_period(event, freq * interrupts);
1412 1398
1413 /* 1399 /*
1414 * In order to avoid being stalled by an (accidental) huge 1400 * In order to avoid being stalled by an (accidental) huge
@@ -1417,9 +1403,9 @@ static void perf_ctx_adjust_freq(struct perf_counter_context *ctx)
1417 */ 1403 */
1418 if (!interrupts) { 1404 if (!interrupts) {
1419 perf_disable(); 1405 perf_disable();
1420 counter->pmu->disable(counter); 1406 event->pmu->disable(event);
1421 atomic64_set(&hwc->period_left, 0); 1407 atomic64_set(&hwc->period_left, 0);
1422 counter->pmu->enable(counter); 1408 event->pmu->enable(event);
1423 perf_enable(); 1409 perf_enable();
1424 } 1410 }
1425 } 1411 }
@@ -1427,22 +1413,22 @@ static void perf_ctx_adjust_freq(struct perf_counter_context *ctx)
1427} 1413}
1428 1414
1429/* 1415/*
1430 * Round-robin a context's counters: 1416 * Round-robin a context's events:
1431 */ 1417 */
1432static void rotate_ctx(struct perf_counter_context *ctx) 1418static void rotate_ctx(struct perf_event_context *ctx)
1433{ 1419{
1434 struct perf_counter *counter; 1420 struct perf_event *event;
1435 1421
1436 if (!ctx->nr_counters) 1422 if (!ctx->nr_events)
1437 return; 1423 return;
1438 1424
1439 spin_lock(&ctx->lock); 1425 spin_lock(&ctx->lock);
1440 /* 1426 /*
1441 * Rotate the first entry last (works just fine for group counters too): 1427 * Rotate the first entry last (works just fine for group events too):
1442 */ 1428 */
1443 perf_disable(); 1429 perf_disable();
1444 list_for_each_entry(counter, &ctx->counter_list, list_entry) { 1430 list_for_each_entry(event, &ctx->group_list, group_entry) {
1445 list_move_tail(&counter->list_entry, &ctx->counter_list); 1431 list_move_tail(&event->group_entry, &ctx->group_list);
1446 break; 1432 break;
1447 } 1433 }
1448 perf_enable(); 1434 perf_enable();
@@ -1450,93 +1436,93 @@ static void rotate_ctx(struct perf_counter_context *ctx)
1450 spin_unlock(&ctx->lock); 1436 spin_unlock(&ctx->lock);
1451} 1437}
1452 1438
1453void perf_counter_task_tick(struct task_struct *curr, int cpu) 1439void perf_event_task_tick(struct task_struct *curr, int cpu)
1454{ 1440{
1455 struct perf_cpu_context *cpuctx; 1441 struct perf_cpu_context *cpuctx;
1456 struct perf_counter_context *ctx; 1442 struct perf_event_context *ctx;
1457 1443
1458 if (!atomic_read(&nr_counters)) 1444 if (!atomic_read(&nr_events))
1459 return; 1445 return;
1460 1446
1461 cpuctx = &per_cpu(perf_cpu_context, cpu); 1447 cpuctx = &per_cpu(perf_cpu_context, cpu);
1462 ctx = curr->perf_counter_ctxp; 1448 ctx = curr->perf_event_ctxp;
1463 1449
1464 perf_ctx_adjust_freq(&cpuctx->ctx); 1450 perf_ctx_adjust_freq(&cpuctx->ctx);
1465 if (ctx) 1451 if (ctx)
1466 perf_ctx_adjust_freq(ctx); 1452 perf_ctx_adjust_freq(ctx);
1467 1453
1468 perf_counter_cpu_sched_out(cpuctx); 1454 perf_event_cpu_sched_out(cpuctx);
1469 if (ctx) 1455 if (ctx)
1470 __perf_counter_task_sched_out(ctx); 1456 __perf_event_task_sched_out(ctx);
1471 1457
1472 rotate_ctx(&cpuctx->ctx); 1458 rotate_ctx(&cpuctx->ctx);
1473 if (ctx) 1459 if (ctx)
1474 rotate_ctx(ctx); 1460 rotate_ctx(ctx);
1475 1461
1476 perf_counter_cpu_sched_in(cpuctx, cpu); 1462 perf_event_cpu_sched_in(cpuctx, cpu);
1477 if (ctx) 1463 if (ctx)
1478 perf_counter_task_sched_in(curr, cpu); 1464 perf_event_task_sched_in(curr, cpu);
1479} 1465}
1480 1466
1481/* 1467/*
1482 * Enable all of a task's counters that have been marked enable-on-exec. 1468 * Enable all of a task's events that have been marked enable-on-exec.
1483 * This expects task == current. 1469 * This expects task == current.
1484 */ 1470 */
1485static void perf_counter_enable_on_exec(struct task_struct *task) 1471static void perf_event_enable_on_exec(struct task_struct *task)
1486{ 1472{
1487 struct perf_counter_context *ctx; 1473 struct perf_event_context *ctx;
1488 struct perf_counter *counter; 1474 struct perf_event *event;
1489 unsigned long flags; 1475 unsigned long flags;
1490 int enabled = 0; 1476 int enabled = 0;
1491 1477
1492 local_irq_save(flags); 1478 local_irq_save(flags);
1493 ctx = task->perf_counter_ctxp; 1479 ctx = task->perf_event_ctxp;
1494 if (!ctx || !ctx->nr_counters) 1480 if (!ctx || !ctx->nr_events)
1495 goto out; 1481 goto out;
1496 1482
1497 __perf_counter_task_sched_out(ctx); 1483 __perf_event_task_sched_out(ctx);
1498 1484
1499 spin_lock(&ctx->lock); 1485 spin_lock(&ctx->lock);
1500 1486
1501 list_for_each_entry(counter, &ctx->counter_list, list_entry) { 1487 list_for_each_entry(event, &ctx->group_list, group_entry) {
1502 if (!counter->attr.enable_on_exec) 1488 if (!event->attr.enable_on_exec)
1503 continue; 1489 continue;
1504 counter->attr.enable_on_exec = 0; 1490 event->attr.enable_on_exec = 0;
1505 if (counter->state >= PERF_COUNTER_STATE_INACTIVE) 1491 if (event->state >= PERF_EVENT_STATE_INACTIVE)
1506 continue; 1492 continue;
1507 __perf_counter_mark_enabled(counter, ctx); 1493 __perf_event_mark_enabled(event, ctx);
1508 enabled = 1; 1494 enabled = 1;
1509 } 1495 }
1510 1496
1511 /* 1497 /*
1512 * Unclone this context if we enabled any counter. 1498 * Unclone this context if we enabled any event.
1513 */ 1499 */
1514 if (enabled) 1500 if (enabled)
1515 unclone_ctx(ctx); 1501 unclone_ctx(ctx);
1516 1502
1517 spin_unlock(&ctx->lock); 1503 spin_unlock(&ctx->lock);
1518 1504
1519 perf_counter_task_sched_in(task, smp_processor_id()); 1505 perf_event_task_sched_in(task, smp_processor_id());
1520 out: 1506 out:
1521 local_irq_restore(flags); 1507 local_irq_restore(flags);
1522} 1508}
1523 1509
1524/* 1510/*
1525 * Cross CPU call to read the hardware counter 1511 * Cross CPU call to read the hardware event
1526 */ 1512 */
1527static void __perf_counter_read(void *info) 1513static void __perf_event_read(void *info)
1528{ 1514{
1529 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); 1515 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1530 struct perf_counter *counter = info; 1516 struct perf_event *event = info;
1531 struct perf_counter_context *ctx = counter->ctx; 1517 struct perf_event_context *ctx = event->ctx;
1532 unsigned long flags; 1518 unsigned long flags;
1533 1519
1534 /* 1520 /*
1535 * If this is a task context, we need to check whether it is 1521 * If this is a task context, we need to check whether it is
1536 * the current task context of this cpu. If not it has been 1522 * the current task context of this cpu. If not it has been
1537 * scheduled out before the smp call arrived. In that case 1523 * scheduled out before the smp call arrived. In that case
1538 * counter->count would have been updated to a recent sample 1524 * event->count would have been updated to a recent sample
1539 * when the counter was scheduled out. 1525 * when the event was scheduled out.
1540 */ 1526 */
1541 if (ctx->task && cpuctx->task_ctx != ctx) 1527 if (ctx->task && cpuctx->task_ctx != ctx)
1542 return; 1528 return;
@@ -1544,56 +1530,56 @@ static void __perf_counter_read(void *info)
1544 local_irq_save(flags); 1530 local_irq_save(flags);
1545 if (ctx->is_active) 1531 if (ctx->is_active)
1546 update_context_time(ctx); 1532 update_context_time(ctx);
1547 counter->pmu->read(counter); 1533 event->pmu->read(event);
1548 update_counter_times(counter); 1534 update_event_times(event);
1549 local_irq_restore(flags); 1535 local_irq_restore(flags);
1550} 1536}
1551 1537
1552static u64 perf_counter_read(struct perf_counter *counter) 1538static u64 perf_event_read(struct perf_event *event)
1553{ 1539{
1554 /* 1540 /*
1555 * If counter is enabled and currently active on a CPU, update the 1541 * If event is enabled and currently active on a CPU, update the
1556 * value in the counter structure: 1542 * value in the event structure:
1557 */ 1543 */
1558 if (counter->state == PERF_COUNTER_STATE_ACTIVE) { 1544 if (event->state == PERF_EVENT_STATE_ACTIVE) {
1559 smp_call_function_single(counter->oncpu, 1545 smp_call_function_single(event->oncpu,
1560 __perf_counter_read, counter, 1); 1546 __perf_event_read, event, 1);
1561 } else if (counter->state == PERF_COUNTER_STATE_INACTIVE) { 1547 } else if (event->state == PERF_EVENT_STATE_INACTIVE) {
1562 update_counter_times(counter); 1548 update_event_times(event);
1563 } 1549 }
1564 1550
1565 return atomic64_read(&counter->count); 1551 return atomic64_read(&event->count);
1566} 1552}
1567 1553
1568/* 1554/*
1569 * Initialize the perf_counter context in a task_struct: 1555 * Initialize the perf_event context in a task_struct:
1570 */ 1556 */
1571static void 1557static void
1572__perf_counter_init_context(struct perf_counter_context *ctx, 1558__perf_event_init_context(struct perf_event_context *ctx,
1573 struct task_struct *task) 1559 struct task_struct *task)
1574{ 1560{
1575 memset(ctx, 0, sizeof(*ctx)); 1561 memset(ctx, 0, sizeof(*ctx));
1576 spin_lock_init(&ctx->lock); 1562 spin_lock_init(&ctx->lock);
1577 mutex_init(&ctx->mutex); 1563 mutex_init(&ctx->mutex);
1578 INIT_LIST_HEAD(&ctx->counter_list); 1564 INIT_LIST_HEAD(&ctx->group_list);
1579 INIT_LIST_HEAD(&ctx->event_list); 1565 INIT_LIST_HEAD(&ctx->event_list);
1580 atomic_set(&ctx->refcount, 1); 1566 atomic_set(&ctx->refcount, 1);
1581 ctx->task = task; 1567 ctx->task = task;
1582} 1568}
1583 1569
1584static struct perf_counter_context *find_get_context(pid_t pid, int cpu) 1570static struct perf_event_context *find_get_context(pid_t pid, int cpu)
1585{ 1571{
1586 struct perf_counter_context *ctx; 1572 struct perf_event_context *ctx;
1587 struct perf_cpu_context *cpuctx; 1573 struct perf_cpu_context *cpuctx;
1588 struct task_struct *task; 1574 struct task_struct *task;
1589 unsigned long flags; 1575 unsigned long flags;
1590 int err; 1576 int err;
1591 1577
1592 /* 1578 /*
1593 * If cpu is not a wildcard then this is a percpu counter: 1579 * If cpu is not a wildcard then this is a percpu event:
1594 */ 1580 */
1595 if (cpu != -1) { 1581 if (cpu != -1) {
1596 /* Must be root to operate on a CPU counter: */ 1582 /* Must be root to operate on a CPU event: */
1597 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN)) 1583 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
1598 return ERR_PTR(-EACCES); 1584 return ERR_PTR(-EACCES);
1599 1585
@@ -1601,7 +1587,7 @@ static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
1601 return ERR_PTR(-EINVAL); 1587 return ERR_PTR(-EINVAL);
1602 1588
1603 /* 1589 /*
1604 * We could be clever and allow to attach a counter to an 1590 * We could be clever and allow to attach a event to an
1605 * offline CPU and activate it when the CPU comes up, but 1591 * offline CPU and activate it when the CPU comes up, but
1606 * that's for later. 1592 * that's for later.
1607 */ 1593 */
@@ -1628,7 +1614,7 @@ static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
1628 return ERR_PTR(-ESRCH); 1614 return ERR_PTR(-ESRCH);
1629 1615
1630 /* 1616 /*
1631 * Can't attach counters to a dying task. 1617 * Can't attach events to a dying task.
1632 */ 1618 */
1633 err = -ESRCH; 1619 err = -ESRCH;
1634 if (task->flags & PF_EXITING) 1620 if (task->flags & PF_EXITING)
@@ -1647,13 +1633,13 @@ static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
1647 } 1633 }
1648 1634
1649 if (!ctx) { 1635 if (!ctx) {
1650 ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL); 1636 ctx = kmalloc(sizeof(struct perf_event_context), GFP_KERNEL);
1651 err = -ENOMEM; 1637 err = -ENOMEM;
1652 if (!ctx) 1638 if (!ctx)
1653 goto errout; 1639 goto errout;
1654 __perf_counter_init_context(ctx, task); 1640 __perf_event_init_context(ctx, task);
1655 get_ctx(ctx); 1641 get_ctx(ctx);
1656 if (cmpxchg(&task->perf_counter_ctxp, NULL, ctx)) { 1642 if (cmpxchg(&task->perf_event_ctxp, NULL, ctx)) {
1657 /* 1643 /*
1658 * We raced with some other task; use 1644 * We raced with some other task; use
1659 * the context they set. 1645 * the context they set.
@@ -1672,42 +1658,42 @@ static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
1672 return ERR_PTR(err); 1658 return ERR_PTR(err);
1673} 1659}
1674 1660
1675static void free_counter_rcu(struct rcu_head *head) 1661static void free_event_rcu(struct rcu_head *head)
1676{ 1662{
1677 struct perf_counter *counter; 1663 struct perf_event *event;
1678 1664
1679 counter = container_of(head, struct perf_counter, rcu_head); 1665 event = container_of(head, struct perf_event, rcu_head);
1680 if (counter->ns) 1666 if (event->ns)
1681 put_pid_ns(counter->ns); 1667 put_pid_ns(event->ns);
1682 kfree(counter); 1668 kfree(event);
1683} 1669}
1684 1670
1685static void perf_pending_sync(struct perf_counter *counter); 1671static void perf_pending_sync(struct perf_event *event);
1686 1672
1687static void free_counter(struct perf_counter *counter) 1673static void free_event(struct perf_event *event)
1688{ 1674{
1689 perf_pending_sync(counter); 1675 perf_pending_sync(event);
1690 1676
1691 if (!counter->parent) { 1677 if (!event->parent) {
1692 atomic_dec(&nr_counters); 1678 atomic_dec(&nr_events);
1693 if (counter->attr.mmap) 1679 if (event->attr.mmap)
1694 atomic_dec(&nr_mmap_counters); 1680 atomic_dec(&nr_mmap_events);
1695 if (counter->attr.comm) 1681 if (event->attr.comm)
1696 atomic_dec(&nr_comm_counters); 1682 atomic_dec(&nr_comm_events);
1697 if (counter->attr.task) 1683 if (event->attr.task)
1698 atomic_dec(&nr_task_counters); 1684 atomic_dec(&nr_task_events);
1699 } 1685 }
1700 1686
1701 if (counter->output) { 1687 if (event->output) {
1702 fput(counter->output->filp); 1688 fput(event->output->filp);
1703 counter->output = NULL; 1689 event->output = NULL;
1704 } 1690 }
1705 1691
1706 if (counter->destroy) 1692 if (event->destroy)
1707 counter->destroy(counter); 1693 event->destroy(event);
1708 1694
1709 put_ctx(counter->ctx); 1695 put_ctx(event->ctx);
1710 call_rcu(&counter->rcu_head, free_counter_rcu); 1696 call_rcu(&event->rcu_head, free_event_rcu);
1711} 1697}
1712 1698
1713/* 1699/*
@@ -1715,43 +1701,43 @@ static void free_counter(struct perf_counter *counter)
1715 */ 1701 */
1716static int perf_release(struct inode *inode, struct file *file) 1702static int perf_release(struct inode *inode, struct file *file)
1717{ 1703{
1718 struct perf_counter *counter = file->private_data; 1704 struct perf_event *event = file->private_data;
1719 struct perf_counter_context *ctx = counter->ctx; 1705 struct perf_event_context *ctx = event->ctx;
1720 1706
1721 file->private_data = NULL; 1707 file->private_data = NULL;
1722 1708
1723 WARN_ON_ONCE(ctx->parent_ctx); 1709 WARN_ON_ONCE(ctx->parent_ctx);
1724 mutex_lock(&ctx->mutex); 1710 mutex_lock(&ctx->mutex);
1725 perf_counter_remove_from_context(counter); 1711 perf_event_remove_from_context(event);
1726 mutex_unlock(&ctx->mutex); 1712 mutex_unlock(&ctx->mutex);
1727 1713
1728 mutex_lock(&counter->owner->perf_counter_mutex); 1714 mutex_lock(&event->owner->perf_event_mutex);
1729 list_del_init(&counter->owner_entry); 1715 list_del_init(&event->owner_entry);
1730 mutex_unlock(&counter->owner->perf_counter_mutex); 1716 mutex_unlock(&event->owner->perf_event_mutex);
1731 put_task_struct(counter->owner); 1717 put_task_struct(event->owner);
1732 1718
1733 free_counter(counter); 1719 free_event(event);
1734 1720
1735 return 0; 1721 return 0;
1736} 1722}
1737 1723
1738static int perf_counter_read_size(struct perf_counter *counter) 1724static int perf_event_read_size(struct perf_event *event)
1739{ 1725{
1740 int entry = sizeof(u64); /* value */ 1726 int entry = sizeof(u64); /* value */
1741 int size = 0; 1727 int size = 0;
1742 int nr = 1; 1728 int nr = 1;
1743 1729
1744 if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 1730 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1745 size += sizeof(u64); 1731 size += sizeof(u64);
1746 1732
1747 if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 1733 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1748 size += sizeof(u64); 1734 size += sizeof(u64);
1749 1735
1750 if (counter->attr.read_format & PERF_FORMAT_ID) 1736 if (event->attr.read_format & PERF_FORMAT_ID)
1751 entry += sizeof(u64); 1737 entry += sizeof(u64);
1752 1738
1753 if (counter->attr.read_format & PERF_FORMAT_GROUP) { 1739 if (event->attr.read_format & PERF_FORMAT_GROUP) {
1754 nr += counter->group_leader->nr_siblings; 1740 nr += event->group_leader->nr_siblings;
1755 size += sizeof(u64); 1741 size += sizeof(u64);
1756 } 1742 }
1757 1743
@@ -1760,27 +1746,27 @@ static int perf_counter_read_size(struct perf_counter *counter)
1760 return size; 1746 return size;
1761} 1747}
1762 1748
1763static u64 perf_counter_read_value(struct perf_counter *counter) 1749static u64 perf_event_read_value(struct perf_event *event)
1764{ 1750{
1765 struct perf_counter *child; 1751 struct perf_event *child;
1766 u64 total = 0; 1752 u64 total = 0;
1767 1753
1768 total += perf_counter_read(counter); 1754 total += perf_event_read(event);
1769 list_for_each_entry(child, &counter->child_list, child_list) 1755 list_for_each_entry(child, &event->child_list, child_list)
1770 total += perf_counter_read(child); 1756 total += perf_event_read(child);
1771 1757
1772 return total; 1758 return total;
1773} 1759}
1774 1760
1775static int perf_counter_read_entry(struct perf_counter *counter, 1761static int perf_event_read_entry(struct perf_event *event,
1776 u64 read_format, char __user *buf) 1762 u64 read_format, char __user *buf)
1777{ 1763{
1778 int n = 0, count = 0; 1764 int n = 0, count = 0;
1779 u64 values[2]; 1765 u64 values[2];
1780 1766
1781 values[n++] = perf_counter_read_value(counter); 1767 values[n++] = perf_event_read_value(event);
1782 if (read_format & PERF_FORMAT_ID) 1768 if (read_format & PERF_FORMAT_ID)
1783 values[n++] = primary_counter_id(counter); 1769 values[n++] = primary_event_id(event);
1784 1770
1785 count = n * sizeof(u64); 1771 count = n * sizeof(u64);
1786 1772
@@ -1790,10 +1776,10 @@ static int perf_counter_read_entry(struct perf_counter *counter,
1790 return count; 1776 return count;
1791} 1777}
1792 1778
1793static int perf_counter_read_group(struct perf_counter *counter, 1779static int perf_event_read_group(struct perf_event *event,
1794 u64 read_format, char __user *buf) 1780 u64 read_format, char __user *buf)
1795{ 1781{
1796 struct perf_counter *leader = counter->group_leader, *sub; 1782 struct perf_event *leader = event->group_leader, *sub;
1797 int n = 0, size = 0, err = -EFAULT; 1783 int n = 0, size = 0, err = -EFAULT;
1798 u64 values[3]; 1784 u64 values[3];
1799 1785
@@ -1812,14 +1798,14 @@ static int perf_counter_read_group(struct perf_counter *counter,
1812 if (copy_to_user(buf, values, size)) 1798 if (copy_to_user(buf, values, size))
1813 return -EFAULT; 1799 return -EFAULT;
1814 1800
1815 err = perf_counter_read_entry(leader, read_format, buf + size); 1801 err = perf_event_read_entry(leader, read_format, buf + size);
1816 if (err < 0) 1802 if (err < 0)
1817 return err; 1803 return err;
1818 1804
1819 size += err; 1805 size += err;
1820 1806
1821 list_for_each_entry(sub, &leader->sibling_list, list_entry) { 1807 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
1822 err = perf_counter_read_entry(sub, read_format, 1808 err = perf_event_read_entry(sub, read_format,
1823 buf + size); 1809 buf + size);
1824 if (err < 0) 1810 if (err < 0)
1825 return err; 1811 return err;
@@ -1830,23 +1816,23 @@ static int perf_counter_read_group(struct perf_counter *counter,
1830 return size; 1816 return size;
1831} 1817}
1832 1818
1833static int perf_counter_read_one(struct perf_counter *counter, 1819static int perf_event_read_one(struct perf_event *event,
1834 u64 read_format, char __user *buf) 1820 u64 read_format, char __user *buf)
1835{ 1821{
1836 u64 values[4]; 1822 u64 values[4];
1837 int n = 0; 1823 int n = 0;
1838 1824
1839 values[n++] = perf_counter_read_value(counter); 1825 values[n++] = perf_event_read_value(event);
1840 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { 1826 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
1841 values[n++] = counter->total_time_enabled + 1827 values[n++] = event->total_time_enabled +
1842 atomic64_read(&counter->child_total_time_enabled); 1828 atomic64_read(&event->child_total_time_enabled);
1843 } 1829 }
1844 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { 1830 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
1845 values[n++] = counter->total_time_running + 1831 values[n++] = event->total_time_running +
1846 atomic64_read(&counter->child_total_time_running); 1832 atomic64_read(&event->child_total_time_running);
1847 } 1833 }
1848 if (read_format & PERF_FORMAT_ID) 1834 if (read_format & PERF_FORMAT_ID)
1849 values[n++] = primary_counter_id(counter); 1835 values[n++] = primary_event_id(event);
1850 1836
1851 if (copy_to_user(buf, values, n * sizeof(u64))) 1837 if (copy_to_user(buf, values, n * sizeof(u64)))
1852 return -EFAULT; 1838 return -EFAULT;
@@ -1855,32 +1841,32 @@ static int perf_counter_read_one(struct perf_counter *counter,
1855} 1841}
1856 1842
1857/* 1843/*
1858 * Read the performance counter - simple non blocking version for now 1844 * Read the performance event - simple non blocking version for now
1859 */ 1845 */
1860static ssize_t 1846static ssize_t
1861perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count) 1847perf_read_hw(struct perf_event *event, char __user *buf, size_t count)
1862{ 1848{
1863 u64 read_format = counter->attr.read_format; 1849 u64 read_format = event->attr.read_format;
1864 int ret; 1850 int ret;
1865 1851
1866 /* 1852 /*
1867 * Return end-of-file for a read on a counter that is in 1853 * Return end-of-file for a read on a event that is in
1868 * error state (i.e. because it was pinned but it couldn't be 1854 * error state (i.e. because it was pinned but it couldn't be
1869 * scheduled on to the CPU at some point). 1855 * scheduled on to the CPU at some point).
1870 */ 1856 */
1871 if (counter->state == PERF_COUNTER_STATE_ERROR) 1857 if (event->state == PERF_EVENT_STATE_ERROR)
1872 return 0; 1858 return 0;
1873 1859
1874 if (count < perf_counter_read_size(counter)) 1860 if (count < perf_event_read_size(event))
1875 return -ENOSPC; 1861 return -ENOSPC;
1876 1862
1877 WARN_ON_ONCE(counter->ctx->parent_ctx); 1863 WARN_ON_ONCE(event->ctx->parent_ctx);
1878 mutex_lock(&counter->child_mutex); 1864 mutex_lock(&event->child_mutex);
1879 if (read_format & PERF_FORMAT_GROUP) 1865 if (read_format & PERF_FORMAT_GROUP)
1880 ret = perf_counter_read_group(counter, read_format, buf); 1866 ret = perf_event_read_group(event, read_format, buf);
1881 else 1867 else
1882 ret = perf_counter_read_one(counter, read_format, buf); 1868 ret = perf_event_read_one(event, read_format, buf);
1883 mutex_unlock(&counter->child_mutex); 1869 mutex_unlock(&event->child_mutex);
1884 1870
1885 return ret; 1871 return ret;
1886} 1872}
@@ -1888,79 +1874,79 @@ perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
1888static ssize_t 1874static ssize_t
1889perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) 1875perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
1890{ 1876{
1891 struct perf_counter *counter = file->private_data; 1877 struct perf_event *event = file->private_data;
1892 1878
1893 return perf_read_hw(counter, buf, count); 1879 return perf_read_hw(event, buf, count);
1894} 1880}
1895 1881
1896static unsigned int perf_poll(struct file *file, poll_table *wait) 1882static unsigned int perf_poll(struct file *file, poll_table *wait)
1897{ 1883{
1898 struct perf_counter *counter = file->private_data; 1884 struct perf_event *event = file->private_data;
1899 struct perf_mmap_data *data; 1885 struct perf_mmap_data *data;
1900 unsigned int events = POLL_HUP; 1886 unsigned int events = POLL_HUP;
1901 1887
1902 rcu_read_lock(); 1888 rcu_read_lock();
1903 data = rcu_dereference(counter->data); 1889 data = rcu_dereference(event->data);
1904 if (data) 1890 if (data)
1905 events = atomic_xchg(&data->poll, 0); 1891 events = atomic_xchg(&data->poll, 0);
1906 rcu_read_unlock(); 1892 rcu_read_unlock();
1907 1893
1908 poll_wait(file, &counter->waitq, wait); 1894 poll_wait(file, &event->waitq, wait);
1909 1895
1910 return events; 1896 return events;
1911} 1897}
1912 1898
1913static void perf_counter_reset(struct perf_counter *counter) 1899static void perf_event_reset(struct perf_event *event)
1914{ 1900{
1915 (void)perf_counter_read(counter); 1901 (void)perf_event_read(event);
1916 atomic64_set(&counter->count, 0); 1902 atomic64_set(&event->count, 0);
1917 perf_counter_update_userpage(counter); 1903 perf_event_update_userpage(event);
1918} 1904}
1919 1905
1920/* 1906/*
1921 * Holding the top-level counter's child_mutex means that any 1907 * Holding the top-level event's child_mutex means that any
1922 * descendant process that has inherited this counter will block 1908 * descendant process that has inherited this event will block
1923 * in sync_child_counter if it goes to exit, thus satisfying the 1909 * in sync_child_event if it goes to exit, thus satisfying the
1924 * task existence requirements of perf_counter_enable/disable. 1910 * task existence requirements of perf_event_enable/disable.
1925 */ 1911 */
1926static void perf_counter_for_each_child(struct perf_counter *counter, 1912static void perf_event_for_each_child(struct perf_event *event,
1927 void (*func)(struct perf_counter *)) 1913 void (*func)(struct perf_event *))
1928{ 1914{
1929 struct perf_counter *child; 1915 struct perf_event *child;
1930 1916
1931 WARN_ON_ONCE(counter->ctx->parent_ctx); 1917 WARN_ON_ONCE(event->ctx->parent_ctx);
1932 mutex_lock(&counter->child_mutex); 1918 mutex_lock(&event->child_mutex);
1933 func(counter); 1919 func(event);
1934 list_for_each_entry(child, &counter->child_list, child_list) 1920 list_for_each_entry(child, &event->child_list, child_list)
1935 func(child); 1921 func(child);
1936 mutex_unlock(&counter->child_mutex); 1922 mutex_unlock(&event->child_mutex);
1937} 1923}
1938 1924
1939static void perf_counter_for_each(struct perf_counter *counter, 1925static void perf_event_for_each(struct perf_event *event,
1940 void (*func)(struct perf_counter *)) 1926 void (*func)(struct perf_event *))
1941{ 1927{
1942 struct perf_counter_context *ctx = counter->ctx; 1928 struct perf_event_context *ctx = event->ctx;
1943 struct perf_counter *sibling; 1929 struct perf_event *sibling;
1944 1930
1945 WARN_ON_ONCE(ctx->parent_ctx); 1931 WARN_ON_ONCE(ctx->parent_ctx);
1946 mutex_lock(&ctx->mutex); 1932 mutex_lock(&ctx->mutex);
1947 counter = counter->group_leader; 1933 event = event->group_leader;
1948 1934
1949 perf_counter_for_each_child(counter, func); 1935 perf_event_for_each_child(event, func);
1950 func(counter); 1936 func(event);
1951 list_for_each_entry(sibling, &counter->sibling_list, list_entry) 1937 list_for_each_entry(sibling, &event->sibling_list, group_entry)
1952 perf_counter_for_each_child(counter, func); 1938 perf_event_for_each_child(event, func);
1953 mutex_unlock(&ctx->mutex); 1939 mutex_unlock(&ctx->mutex);
1954} 1940}
1955 1941
1956static int perf_counter_period(struct perf_counter *counter, u64 __user *arg) 1942static int perf_event_period(struct perf_event *event, u64 __user *arg)
1957{ 1943{
1958 struct perf_counter_context *ctx = counter->ctx; 1944 struct perf_event_context *ctx = event->ctx;
1959 unsigned long size; 1945 unsigned long size;
1960 int ret = 0; 1946 int ret = 0;
1961 u64 value; 1947 u64 value;
1962 1948
1963 if (!counter->attr.sample_period) 1949 if (!event->attr.sample_period)
1964 return -EINVAL; 1950 return -EINVAL;
1965 1951
1966 size = copy_from_user(&value, arg, sizeof(value)); 1952 size = copy_from_user(&value, arg, sizeof(value));
@@ -1971,16 +1957,16 @@ static int perf_counter_period(struct perf_counter *counter, u64 __user *arg)
1971 return -EINVAL; 1957 return -EINVAL;
1972 1958
1973 spin_lock_irq(&ctx->lock); 1959 spin_lock_irq(&ctx->lock);
1974 if (counter->attr.freq) { 1960 if (event->attr.freq) {
1975 if (value > sysctl_perf_counter_sample_rate) { 1961 if (value > sysctl_perf_event_sample_rate) {
1976 ret = -EINVAL; 1962 ret = -EINVAL;
1977 goto unlock; 1963 goto unlock;
1978 } 1964 }
1979 1965
1980 counter->attr.sample_freq = value; 1966 event->attr.sample_freq = value;
1981 } else { 1967 } else {
1982 counter->attr.sample_period = value; 1968 event->attr.sample_period = value;
1983 counter->hw.sample_period = value; 1969 event->hw.sample_period = value;
1984 } 1970 }
1985unlock: 1971unlock:
1986 spin_unlock_irq(&ctx->lock); 1972 spin_unlock_irq(&ctx->lock);
@@ -1988,80 +1974,80 @@ unlock:
1988 return ret; 1974 return ret;
1989} 1975}
1990 1976
1991int perf_counter_set_output(struct perf_counter *counter, int output_fd); 1977int perf_event_set_output(struct perf_event *event, int output_fd);
1992 1978
1993static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 1979static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1994{ 1980{
1995 struct perf_counter *counter = file->private_data; 1981 struct perf_event *event = file->private_data;
1996 void (*func)(struct perf_counter *); 1982 void (*func)(struct perf_event *);
1997 u32 flags = arg; 1983 u32 flags = arg;
1998 1984
1999 switch (cmd) { 1985 switch (cmd) {
2000 case PERF_COUNTER_IOC_ENABLE: 1986 case PERF_EVENT_IOC_ENABLE:
2001 func = perf_counter_enable; 1987 func = perf_event_enable;
2002 break; 1988 break;
2003 case PERF_COUNTER_IOC_DISABLE: 1989 case PERF_EVENT_IOC_DISABLE:
2004 func = perf_counter_disable; 1990 func = perf_event_disable;
2005 break; 1991 break;
2006 case PERF_COUNTER_IOC_RESET: 1992 case PERF_EVENT_IOC_RESET:
2007 func = perf_counter_reset; 1993 func = perf_event_reset;
2008 break; 1994 break;
2009 1995
2010 case PERF_COUNTER_IOC_REFRESH: 1996 case PERF_EVENT_IOC_REFRESH:
2011 return perf_counter_refresh(counter, arg); 1997 return perf_event_refresh(event, arg);
2012 1998
2013 case PERF_COUNTER_IOC_PERIOD: 1999 case PERF_EVENT_IOC_PERIOD:
2014 return perf_counter_period(counter, (u64 __user *)arg); 2000 return perf_event_period(event, (u64 __user *)arg);
2015 2001
2016 case PERF_COUNTER_IOC_SET_OUTPUT: 2002 case PERF_EVENT_IOC_SET_OUTPUT:
2017 return perf_counter_set_output(counter, arg); 2003 return perf_event_set_output(event, arg);
2018 2004
2019 default: 2005 default:
2020 return -ENOTTY; 2006 return -ENOTTY;
2021 } 2007 }
2022 2008
2023 if (flags & PERF_IOC_FLAG_GROUP) 2009 if (flags & PERF_IOC_FLAG_GROUP)
2024 perf_counter_for_each(counter, func); 2010 perf_event_for_each(event, func);
2025 else 2011 else
2026 perf_counter_for_each_child(counter, func); 2012 perf_event_for_each_child(event, func);
2027 2013
2028 return 0; 2014 return 0;
2029} 2015}
2030 2016
2031int perf_counter_task_enable(void) 2017int perf_event_task_enable(void)
2032{ 2018{
2033 struct perf_counter *counter; 2019 struct perf_event *event;
2034 2020
2035 mutex_lock(&current->perf_counter_mutex); 2021 mutex_lock(&current->perf_event_mutex);
2036 list_for_each_entry(counter, &current->perf_counter_list, owner_entry) 2022 list_for_each_entry(event, &current->perf_event_list, owner_entry)
2037 perf_counter_for_each_child(counter, perf_counter_enable); 2023 perf_event_for_each_child(event, perf_event_enable);
2038 mutex_unlock(&current->perf_counter_mutex); 2024 mutex_unlock(&current->perf_event_mutex);
2039 2025
2040 return 0; 2026 return 0;
2041} 2027}
2042 2028
2043int perf_counter_task_disable(void) 2029int perf_event_task_disable(void)
2044{ 2030{
2045 struct perf_counter *counter; 2031 struct perf_event *event;
2046 2032
2047 mutex_lock(&current->perf_counter_mutex); 2033 mutex_lock(&current->perf_event_mutex);
2048 list_for_each_entry(counter, &current->perf_counter_list, owner_entry) 2034 list_for_each_entry(event, &current->perf_event_list, owner_entry)
2049 perf_counter_for_each_child(counter, perf_counter_disable); 2035 perf_event_for_each_child(event, perf_event_disable);
2050 mutex_unlock(&current->perf_counter_mutex); 2036 mutex_unlock(&current->perf_event_mutex);
2051 2037
2052 return 0; 2038 return 0;
2053} 2039}
2054 2040
2055#ifndef PERF_COUNTER_INDEX_OFFSET 2041#ifndef PERF_EVENT_INDEX_OFFSET
2056# define PERF_COUNTER_INDEX_OFFSET 0 2042# define PERF_EVENT_INDEX_OFFSET 0
2057#endif 2043#endif
2058 2044
2059static int perf_counter_index(struct perf_counter *counter) 2045static int perf_event_index(struct perf_event *event)
2060{ 2046{
2061 if (counter->state != PERF_COUNTER_STATE_ACTIVE) 2047 if (event->state != PERF_EVENT_STATE_ACTIVE)
2062 return 0; 2048 return 0;
2063 2049
2064 return counter->hw.idx + 1 - PERF_COUNTER_INDEX_OFFSET; 2050 return event->hw.idx + 1 - PERF_EVENT_INDEX_OFFSET;
2065} 2051}
2066 2052
2067/* 2053/*
@@ -2069,13 +2055,13 @@ static int perf_counter_index(struct perf_counter *counter)
2069 * the seqlock logic goes bad. We can not serialize this because the arch 2055 * the seqlock logic goes bad. We can not serialize this because the arch
2070 * code calls this from NMI context. 2056 * code calls this from NMI context.
2071 */ 2057 */
2072void perf_counter_update_userpage(struct perf_counter *counter) 2058void perf_event_update_userpage(struct perf_event *event)
2073{ 2059{
2074 struct perf_counter_mmap_page *userpg; 2060 struct perf_event_mmap_page *userpg;
2075 struct perf_mmap_data *data; 2061 struct perf_mmap_data *data;
2076 2062
2077 rcu_read_lock(); 2063 rcu_read_lock();
2078 data = rcu_dereference(counter->data); 2064 data = rcu_dereference(event->data);
2079 if (!data) 2065 if (!data)
2080 goto unlock; 2066 goto unlock;
2081 2067
@@ -2088,16 +2074,16 @@ void perf_counter_update_userpage(struct perf_counter *counter)
2088 preempt_disable(); 2074 preempt_disable();
2089 ++userpg->lock; 2075 ++userpg->lock;
2090 barrier(); 2076 barrier();
2091 userpg->index = perf_counter_index(counter); 2077 userpg->index = perf_event_index(event);
2092 userpg->offset = atomic64_read(&counter->count); 2078 userpg->offset = atomic64_read(&event->count);
2093 if (counter->state == PERF_COUNTER_STATE_ACTIVE) 2079 if (event->state == PERF_EVENT_STATE_ACTIVE)
2094 userpg->offset -= atomic64_read(&counter->hw.prev_count); 2080 userpg->offset -= atomic64_read(&event->hw.prev_count);
2095 2081
2096 userpg->time_enabled = counter->total_time_enabled + 2082 userpg->time_enabled = event->total_time_enabled +
2097 atomic64_read(&counter->child_total_time_enabled); 2083 atomic64_read(&event->child_total_time_enabled);
2098 2084
2099 userpg->time_running = counter->total_time_running + 2085 userpg->time_running = event->total_time_running +
2100 atomic64_read(&counter->child_total_time_running); 2086 atomic64_read(&event->child_total_time_running);
2101 2087
2102 barrier(); 2088 barrier();
2103 ++userpg->lock; 2089 ++userpg->lock;
@@ -2106,55 +2092,37 @@ unlock:
2106 rcu_read_unlock(); 2092 rcu_read_unlock();
2107} 2093}
2108 2094
2109static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 2095static unsigned long perf_data_size(struct perf_mmap_data *data)
2110{ 2096{
2111 struct perf_counter *counter = vma->vm_file->private_data; 2097 return data->nr_pages << (PAGE_SHIFT + data->data_order);
2112 struct perf_mmap_data *data; 2098}
2113 int ret = VM_FAULT_SIGBUS;
2114
2115 if (vmf->flags & FAULT_FLAG_MKWRITE) {
2116 if (vmf->pgoff == 0)
2117 ret = 0;
2118 return ret;
2119 }
2120
2121 rcu_read_lock();
2122 data = rcu_dereference(counter->data);
2123 if (!data)
2124 goto unlock;
2125
2126 if (vmf->pgoff == 0) {
2127 vmf->page = virt_to_page(data->user_page);
2128 } else {
2129 int nr = vmf->pgoff - 1;
2130
2131 if ((unsigned)nr > data->nr_pages)
2132 goto unlock;
2133 2099
2134 if (vmf->flags & FAULT_FLAG_WRITE) 2100#ifndef CONFIG_PERF_USE_VMALLOC
2135 goto unlock;
2136 2101
2137 vmf->page = virt_to_page(data->data_pages[nr]); 2102/*
2138 } 2103 * Back perf_mmap() with regular GFP_KERNEL-0 pages.
2104 */
2139 2105
2140 get_page(vmf->page); 2106static struct page *
2141 vmf->page->mapping = vma->vm_file->f_mapping; 2107perf_mmap_to_page(struct perf_mmap_data *data, unsigned long pgoff)
2142 vmf->page->index = vmf->pgoff; 2108{
2109 if (pgoff > data->nr_pages)
2110 return NULL;
2143 2111
2144 ret = 0; 2112 if (pgoff == 0)
2145unlock: 2113 return virt_to_page(data->user_page);
2146 rcu_read_unlock();
2147 2114
2148 return ret; 2115 return virt_to_page(data->data_pages[pgoff - 1]);
2149} 2116}
2150 2117
2151static int perf_mmap_data_alloc(struct perf_counter *counter, int nr_pages) 2118static struct perf_mmap_data *
2119perf_mmap_data_alloc(struct perf_event *event, int nr_pages)
2152{ 2120{
2153 struct perf_mmap_data *data; 2121 struct perf_mmap_data *data;
2154 unsigned long size; 2122 unsigned long size;
2155 int i; 2123 int i;
2156 2124
2157 WARN_ON(atomic_read(&counter->mmap_count)); 2125 WARN_ON(atomic_read(&event->mmap_count));
2158 2126
2159 size = sizeof(struct perf_mmap_data); 2127 size = sizeof(struct perf_mmap_data);
2160 size += nr_pages * sizeof(void *); 2128 size += nr_pages * sizeof(void *);
@@ -2173,19 +2141,10 @@ static int perf_mmap_data_alloc(struct perf_counter *counter, int nr_pages)
2173 goto fail_data_pages; 2141 goto fail_data_pages;
2174 } 2142 }
2175 2143
2144 data->data_order = 0;
2176 data->nr_pages = nr_pages; 2145 data->nr_pages = nr_pages;
2177 atomic_set(&data->lock, -1);
2178
2179 if (counter->attr.watermark) {
2180 data->watermark = min_t(long, PAGE_SIZE * nr_pages,
2181 counter->attr.wakeup_watermark);
2182 }
2183 if (!data->watermark)
2184 data->watermark = max(PAGE_SIZE, PAGE_SIZE * nr_pages / 4);
2185
2186 rcu_assign_pointer(counter->data, data);
2187 2146
2188 return 0; 2147 return data;
2189 2148
2190fail_data_pages: 2149fail_data_pages:
2191 for (i--; i >= 0; i--) 2150 for (i--; i >= 0; i--)
@@ -2197,7 +2156,7 @@ fail_user_page:
2197 kfree(data); 2156 kfree(data);
2198 2157
2199fail: 2158fail:
2200 return -ENOMEM; 2159 return NULL;
2201} 2160}
2202 2161
2203static void perf_mmap_free_page(unsigned long addr) 2162static void perf_mmap_free_page(unsigned long addr)
@@ -2208,53 +2167,195 @@ static void perf_mmap_free_page(unsigned long addr)
2208 __free_page(page); 2167 __free_page(page);
2209} 2168}
2210 2169
2211static void __perf_mmap_data_free(struct rcu_head *rcu_head) 2170static void perf_mmap_data_free(struct perf_mmap_data *data)
2212{ 2171{
2213 struct perf_mmap_data *data;
2214 int i; 2172 int i;
2215 2173
2216 data = container_of(rcu_head, struct perf_mmap_data, rcu_head);
2217
2218 perf_mmap_free_page((unsigned long)data->user_page); 2174 perf_mmap_free_page((unsigned long)data->user_page);
2219 for (i = 0; i < data->nr_pages; i++) 2175 for (i = 0; i < data->nr_pages; i++)
2220 perf_mmap_free_page((unsigned long)data->data_pages[i]); 2176 perf_mmap_free_page((unsigned long)data->data_pages[i]);
2177}
2178
2179#else
2180
2181/*
2182 * Back perf_mmap() with vmalloc memory.
2183 *
2184 * Required for architectures that have d-cache aliasing issues.
2185 */
2186
2187static struct page *
2188perf_mmap_to_page(struct perf_mmap_data *data, unsigned long pgoff)
2189{
2190 if (pgoff > (1UL << data->data_order))
2191 return NULL;
2192
2193 return vmalloc_to_page((void *)data->user_page + pgoff * PAGE_SIZE);
2194}
2195
2196static void perf_mmap_unmark_page(void *addr)
2197{
2198 struct page *page = vmalloc_to_page(addr);
2221 2199
2200 page->mapping = NULL;
2201}
2202
2203static void perf_mmap_data_free_work(struct work_struct *work)
2204{
2205 struct perf_mmap_data *data;
2206 void *base;
2207 int i, nr;
2208
2209 data = container_of(work, struct perf_mmap_data, work);
2210 nr = 1 << data->data_order;
2211
2212 base = data->user_page;
2213 for (i = 0; i < nr + 1; i++)
2214 perf_mmap_unmark_page(base + (i * PAGE_SIZE));
2215
2216 vfree(base);
2217}
2218
2219static void perf_mmap_data_free(struct perf_mmap_data *data)
2220{
2221 schedule_work(&data->work);
2222}
2223
2224static struct perf_mmap_data *
2225perf_mmap_data_alloc(struct perf_event *event, int nr_pages)
2226{
2227 struct perf_mmap_data *data;
2228 unsigned long size;
2229 void *all_buf;
2230
2231 WARN_ON(atomic_read(&event->mmap_count));
2232
2233 size = sizeof(struct perf_mmap_data);
2234 size += sizeof(void *);
2235
2236 data = kzalloc(size, GFP_KERNEL);
2237 if (!data)
2238 goto fail;
2239
2240 INIT_WORK(&data->work, perf_mmap_data_free_work);
2241
2242 all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE);
2243 if (!all_buf)
2244 goto fail_all_buf;
2245
2246 data->user_page = all_buf;
2247 data->data_pages[0] = all_buf + PAGE_SIZE;
2248 data->data_order = ilog2(nr_pages);
2249 data->nr_pages = 1;
2250
2251 return data;
2252
2253fail_all_buf:
2222 kfree(data); 2254 kfree(data);
2255
2256fail:
2257 return NULL;
2223} 2258}
2224 2259
2225static void perf_mmap_data_free(struct perf_counter *counter) 2260#endif
2261
2262static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2226{ 2263{
2227 struct perf_mmap_data *data = counter->data; 2264 struct perf_event *event = vma->vm_file->private_data;
2265 struct perf_mmap_data *data;
2266 int ret = VM_FAULT_SIGBUS;
2228 2267
2229 WARN_ON(atomic_read(&counter->mmap_count)); 2268 if (vmf->flags & FAULT_FLAG_MKWRITE) {
2269 if (vmf->pgoff == 0)
2270 ret = 0;
2271 return ret;
2272 }
2230 2273
2231 rcu_assign_pointer(counter->data, NULL); 2274 rcu_read_lock();
2232 call_rcu(&data->rcu_head, __perf_mmap_data_free); 2275 data = rcu_dereference(event->data);
2276 if (!data)
2277 goto unlock;
2278
2279 if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE))
2280 goto unlock;
2281
2282 vmf->page = perf_mmap_to_page(data, vmf->pgoff);
2283 if (!vmf->page)
2284 goto unlock;
2285
2286 get_page(vmf->page);
2287 vmf->page->mapping = vma->vm_file->f_mapping;
2288 vmf->page->index = vmf->pgoff;
2289
2290 ret = 0;
2291unlock:
2292 rcu_read_unlock();
2293
2294 return ret;
2295}
2296
2297static void
2298perf_mmap_data_init(struct perf_event *event, struct perf_mmap_data *data)
2299{
2300 long max_size = perf_data_size(data);
2301
2302 atomic_set(&data->lock, -1);
2303
2304 if (event->attr.watermark) {
2305 data->watermark = min_t(long, max_size,
2306 event->attr.wakeup_watermark);
2307 }
2308
2309 if (!data->watermark)
2310 data->watermark = max_t(long, PAGE_SIZE, max_size / 2);
2311
2312
2313 rcu_assign_pointer(event->data, data);
2314}
2315
2316static void perf_mmap_data_free_rcu(struct rcu_head *rcu_head)
2317{
2318 struct perf_mmap_data *data;
2319
2320 data = container_of(rcu_head, struct perf_mmap_data, rcu_head);
2321 perf_mmap_data_free(data);
2322 kfree(data);
2323}
2324
2325static void perf_mmap_data_release(struct perf_event *event)
2326{
2327 struct perf_mmap_data *data = event->data;
2328
2329 WARN_ON(atomic_read(&event->mmap_count));
2330
2331 rcu_assign_pointer(event->data, NULL);
2332 call_rcu(&data->rcu_head, perf_mmap_data_free_rcu);
2233} 2333}
2234 2334
2235static void perf_mmap_open(struct vm_area_struct *vma) 2335static void perf_mmap_open(struct vm_area_struct *vma)
2236{ 2336{
2237 struct perf_counter *counter = vma->vm_file->private_data; 2337 struct perf_event *event = vma->vm_file->private_data;
2238 2338
2239 atomic_inc(&counter->mmap_count); 2339 atomic_inc(&event->mmap_count);
2240} 2340}
2241 2341
2242static void perf_mmap_close(struct vm_area_struct *vma) 2342static void perf_mmap_close(struct vm_area_struct *vma)
2243{ 2343{
2244 struct perf_counter *counter = vma->vm_file->private_data; 2344 struct perf_event *event = vma->vm_file->private_data;
2245 2345
2246 WARN_ON_ONCE(counter->ctx->parent_ctx); 2346 WARN_ON_ONCE(event->ctx->parent_ctx);
2247 if (atomic_dec_and_mutex_lock(&counter->mmap_count, &counter->mmap_mutex)) { 2347 if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) {
2348 unsigned long size = perf_data_size(event->data);
2248 struct user_struct *user = current_user(); 2349 struct user_struct *user = current_user();
2249 2350
2250 atomic_long_sub(counter->data->nr_pages + 1, &user->locked_vm); 2351 atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
2251 vma->vm_mm->locked_vm -= counter->data->nr_locked; 2352 vma->vm_mm->locked_vm -= event->data->nr_locked;
2252 perf_mmap_data_free(counter); 2353 perf_mmap_data_release(event);
2253 mutex_unlock(&counter->mmap_mutex); 2354 mutex_unlock(&event->mmap_mutex);
2254 } 2355 }
2255} 2356}
2256 2357
2257static struct vm_operations_struct perf_mmap_vmops = { 2358static const struct vm_operations_struct perf_mmap_vmops = {
2258 .open = perf_mmap_open, 2359 .open = perf_mmap_open,
2259 .close = perf_mmap_close, 2360 .close = perf_mmap_close,
2260 .fault = perf_mmap_fault, 2361 .fault = perf_mmap_fault,
@@ -2263,10 +2364,11 @@ static struct vm_operations_struct perf_mmap_vmops = {
2263 2364
2264static int perf_mmap(struct file *file, struct vm_area_struct *vma) 2365static int perf_mmap(struct file *file, struct vm_area_struct *vma)
2265{ 2366{
2266 struct perf_counter *counter = file->private_data; 2367 struct perf_event *event = file->private_data;
2267 unsigned long user_locked, user_lock_limit; 2368 unsigned long user_locked, user_lock_limit;
2268 struct user_struct *user = current_user(); 2369 struct user_struct *user = current_user();
2269 unsigned long locked, lock_limit; 2370 unsigned long locked, lock_limit;
2371 struct perf_mmap_data *data;
2270 unsigned long vma_size; 2372 unsigned long vma_size;
2271 unsigned long nr_pages; 2373 unsigned long nr_pages;
2272 long user_extra, extra; 2374 long user_extra, extra;
@@ -2291,21 +2393,21 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
2291 if (vma->vm_pgoff != 0) 2393 if (vma->vm_pgoff != 0)
2292 return -EINVAL; 2394 return -EINVAL;
2293 2395
2294 WARN_ON_ONCE(counter->ctx->parent_ctx); 2396 WARN_ON_ONCE(event->ctx->parent_ctx);
2295 mutex_lock(&counter->mmap_mutex); 2397 mutex_lock(&event->mmap_mutex);
2296 if (counter->output) { 2398 if (event->output) {
2297 ret = -EINVAL; 2399 ret = -EINVAL;
2298 goto unlock; 2400 goto unlock;
2299 } 2401 }
2300 2402
2301 if (atomic_inc_not_zero(&counter->mmap_count)) { 2403 if (atomic_inc_not_zero(&event->mmap_count)) {
2302 if (nr_pages != counter->data->nr_pages) 2404 if (nr_pages != event->data->nr_pages)
2303 ret = -EINVAL; 2405 ret = -EINVAL;
2304 goto unlock; 2406 goto unlock;
2305 } 2407 }
2306 2408
2307 user_extra = nr_pages + 1; 2409 user_extra = nr_pages + 1;
2308 user_lock_limit = sysctl_perf_counter_mlock >> (PAGE_SHIFT - 10); 2410 user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
2309 2411
2310 /* 2412 /*
2311 * Increase the limit linearly with more CPUs: 2413 * Increase the limit linearly with more CPUs:
@@ -2328,20 +2430,25 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
2328 goto unlock; 2430 goto unlock;
2329 } 2431 }
2330 2432
2331 WARN_ON(counter->data); 2433 WARN_ON(event->data);
2332 ret = perf_mmap_data_alloc(counter, nr_pages); 2434
2333 if (ret) 2435 data = perf_mmap_data_alloc(event, nr_pages);
2436 ret = -ENOMEM;
2437 if (!data)
2334 goto unlock; 2438 goto unlock;
2335 2439
2336 atomic_set(&counter->mmap_count, 1); 2440 ret = 0;
2441 perf_mmap_data_init(event, data);
2442
2443 atomic_set(&event->mmap_count, 1);
2337 atomic_long_add(user_extra, &user->locked_vm); 2444 atomic_long_add(user_extra, &user->locked_vm);
2338 vma->vm_mm->locked_vm += extra; 2445 vma->vm_mm->locked_vm += extra;
2339 counter->data->nr_locked = extra; 2446 event->data->nr_locked = extra;
2340 if (vma->vm_flags & VM_WRITE) 2447 if (vma->vm_flags & VM_WRITE)
2341 counter->data->writable = 1; 2448 event->data->writable = 1;
2342 2449
2343unlock: 2450unlock:
2344 mutex_unlock(&counter->mmap_mutex); 2451 mutex_unlock(&event->mmap_mutex);
2345 2452
2346 vma->vm_flags |= VM_RESERVED; 2453 vma->vm_flags |= VM_RESERVED;
2347 vma->vm_ops = &perf_mmap_vmops; 2454 vma->vm_ops = &perf_mmap_vmops;
@@ -2352,11 +2459,11 @@ unlock:
2352static int perf_fasync(int fd, struct file *filp, int on) 2459static int perf_fasync(int fd, struct file *filp, int on)
2353{ 2460{
2354 struct inode *inode = filp->f_path.dentry->d_inode; 2461 struct inode *inode = filp->f_path.dentry->d_inode;
2355 struct perf_counter *counter = filp->private_data; 2462 struct perf_event *event = filp->private_data;
2356 int retval; 2463 int retval;
2357 2464
2358 mutex_lock(&inode->i_mutex); 2465 mutex_lock(&inode->i_mutex);
2359 retval = fasync_helper(fd, filp, on, &counter->fasync); 2466 retval = fasync_helper(fd, filp, on, &event->fasync);
2360 mutex_unlock(&inode->i_mutex); 2467 mutex_unlock(&inode->i_mutex);
2361 2468
2362 if (retval < 0) 2469 if (retval < 0)
@@ -2376,19 +2483,19 @@ static const struct file_operations perf_fops = {
2376}; 2483};
2377 2484
2378/* 2485/*
2379 * Perf counter wakeup 2486 * Perf event wakeup
2380 * 2487 *
2381 * If there's data, ensure we set the poll() state and publish everything 2488 * If there's data, ensure we set the poll() state and publish everything
2382 * to user-space before waking everybody up. 2489 * to user-space before waking everybody up.
2383 */ 2490 */
2384 2491
2385void perf_counter_wakeup(struct perf_counter *counter) 2492void perf_event_wakeup(struct perf_event *event)
2386{ 2493{
2387 wake_up_all(&counter->waitq); 2494 wake_up_all(&event->waitq);
2388 2495
2389 if (counter->pending_kill) { 2496 if (event->pending_kill) {
2390 kill_fasync(&counter->fasync, SIGIO, counter->pending_kill); 2497 kill_fasync(&event->fasync, SIGIO, event->pending_kill);
2391 counter->pending_kill = 0; 2498 event->pending_kill = 0;
2392 } 2499 }
2393} 2500}
2394 2501
@@ -2401,19 +2508,19 @@ void perf_counter_wakeup(struct perf_counter *counter)
2401 * single linked list and use cmpxchg() to add entries lockless. 2508 * single linked list and use cmpxchg() to add entries lockless.
2402 */ 2509 */
2403 2510
2404static void perf_pending_counter(struct perf_pending_entry *entry) 2511static void perf_pending_event(struct perf_pending_entry *entry)
2405{ 2512{
2406 struct perf_counter *counter = container_of(entry, 2513 struct perf_event *event = container_of(entry,
2407 struct perf_counter, pending); 2514 struct perf_event, pending);
2408 2515
2409 if (counter->pending_disable) { 2516 if (event->pending_disable) {
2410 counter->pending_disable = 0; 2517 event->pending_disable = 0;
2411 __perf_counter_disable(counter); 2518 __perf_event_disable(event);
2412 } 2519 }
2413 2520
2414 if (counter->pending_wakeup) { 2521 if (event->pending_wakeup) {
2415 counter->pending_wakeup = 0; 2522 event->pending_wakeup = 0;
2416 perf_counter_wakeup(counter); 2523 perf_event_wakeup(event);
2417 } 2524 }
2418} 2525}
2419 2526
@@ -2439,7 +2546,7 @@ static void perf_pending_queue(struct perf_pending_entry *entry,
2439 entry->next = *head; 2546 entry->next = *head;
2440 } while (cmpxchg(head, entry->next, entry) != entry->next); 2547 } while (cmpxchg(head, entry->next, entry) != entry->next);
2441 2548
2442 set_perf_counter_pending(); 2549 set_perf_event_pending();
2443 2550
2444 put_cpu_var(perf_pending_head); 2551 put_cpu_var(perf_pending_head);
2445} 2552}
@@ -2472,7 +2579,7 @@ static int __perf_pending_run(void)
2472 return nr; 2579 return nr;
2473} 2580}
2474 2581
2475static inline int perf_not_pending(struct perf_counter *counter) 2582static inline int perf_not_pending(struct perf_event *event)
2476{ 2583{
2477 /* 2584 /*
2478 * If we flush on whatever cpu we run, there is a chance we don't 2585 * If we flush on whatever cpu we run, there is a chance we don't
@@ -2487,15 +2594,15 @@ static inline int perf_not_pending(struct perf_counter *counter)
2487 * so that we do not miss the wakeup. -- see perf_pending_handle() 2594 * so that we do not miss the wakeup. -- see perf_pending_handle()
2488 */ 2595 */
2489 smp_rmb(); 2596 smp_rmb();
2490 return counter->pending.next == NULL; 2597 return event->pending.next == NULL;
2491} 2598}
2492 2599
2493static void perf_pending_sync(struct perf_counter *counter) 2600static void perf_pending_sync(struct perf_event *event)
2494{ 2601{
2495 wait_event(counter->waitq, perf_not_pending(counter)); 2602 wait_event(event->waitq, perf_not_pending(event));
2496} 2603}
2497 2604
2498void perf_counter_do_pending(void) 2605void perf_event_do_pending(void)
2499{ 2606{
2500 __perf_pending_run(); 2607 __perf_pending_run();
2501} 2608}
@@ -2520,7 +2627,7 @@ static bool perf_output_space(struct perf_mmap_data *data, unsigned long tail,
2520 if (!data->writable) 2627 if (!data->writable)
2521 return true; 2628 return true;
2522 2629
2523 mask = (data->nr_pages << PAGE_SHIFT) - 1; 2630 mask = perf_data_size(data) - 1;
2524 2631
2525 offset = (offset - tail) & mask; 2632 offset = (offset - tail) & mask;
2526 head = (head - tail) & mask; 2633 head = (head - tail) & mask;
@@ -2536,25 +2643,25 @@ static void perf_output_wakeup(struct perf_output_handle *handle)
2536 atomic_set(&handle->data->poll, POLL_IN); 2643 atomic_set(&handle->data->poll, POLL_IN);
2537 2644
2538 if (handle->nmi) { 2645 if (handle->nmi) {
2539 handle->counter->pending_wakeup = 1; 2646 handle->event->pending_wakeup = 1;
2540 perf_pending_queue(&handle->counter->pending, 2647 perf_pending_queue(&handle->event->pending,
2541 perf_pending_counter); 2648 perf_pending_event);
2542 } else 2649 } else
2543 perf_counter_wakeup(handle->counter); 2650 perf_event_wakeup(handle->event);
2544} 2651}
2545 2652
2546/* 2653/*
2547 * Curious locking construct. 2654 * Curious locking construct.
2548 * 2655 *
2549 * We need to ensure a later event doesn't publish a head when a former 2656 * We need to ensure a later event_id doesn't publish a head when a former
2550 * event isn't done writing. However since we need to deal with NMIs we 2657 * event_id isn't done writing. However since we need to deal with NMIs we
2551 * cannot fully serialize things. 2658 * cannot fully serialize things.
2552 * 2659 *
2553 * What we do is serialize between CPUs so we only have to deal with NMI 2660 * What we do is serialize between CPUs so we only have to deal with NMI
2554 * nesting on a single CPU. 2661 * nesting on a single CPU.
2555 * 2662 *
2556 * We only publish the head (and generate a wakeup) when the outer-most 2663 * We only publish the head (and generate a wakeup) when the outer-most
2557 * event completes. 2664 * event_id completes.
2558 */ 2665 */
2559static void perf_output_lock(struct perf_output_handle *handle) 2666static void perf_output_lock(struct perf_output_handle *handle)
2560{ 2667{
@@ -2625,7 +2732,7 @@ void perf_output_copy(struct perf_output_handle *handle,
2625 const void *buf, unsigned int len) 2732 const void *buf, unsigned int len)
2626{ 2733{
2627 unsigned int pages_mask; 2734 unsigned int pages_mask;
2628 unsigned int offset; 2735 unsigned long offset;
2629 unsigned int size; 2736 unsigned int size;
2630 void **pages; 2737 void **pages;
2631 2738
@@ -2634,12 +2741,14 @@ void perf_output_copy(struct perf_output_handle *handle,
2634 pages = handle->data->data_pages; 2741 pages = handle->data->data_pages;
2635 2742
2636 do { 2743 do {
2637 unsigned int page_offset; 2744 unsigned long page_offset;
2745 unsigned long page_size;
2638 int nr; 2746 int nr;
2639 2747
2640 nr = (offset >> PAGE_SHIFT) & pages_mask; 2748 nr = (offset >> PAGE_SHIFT) & pages_mask;
2641 page_offset = offset & (PAGE_SIZE - 1); 2749 page_size = 1UL << (handle->data->data_order + PAGE_SHIFT);
2642 size = min_t(unsigned int, PAGE_SIZE - page_offset, len); 2750 page_offset = offset & (page_size - 1);
2751 size = min_t(unsigned int, page_size - page_offset, len);
2643 2752
2644 memcpy(pages[nr] + page_offset, buf, size); 2753 memcpy(pages[nr] + page_offset, buf, size);
2645 2754
@@ -2658,10 +2767,10 @@ void perf_output_copy(struct perf_output_handle *handle,
2658} 2767}
2659 2768
2660int perf_output_begin(struct perf_output_handle *handle, 2769int perf_output_begin(struct perf_output_handle *handle,
2661 struct perf_counter *counter, unsigned int size, 2770 struct perf_event *event, unsigned int size,
2662 int nmi, int sample) 2771 int nmi, int sample)
2663{ 2772{
2664 struct perf_counter *output_counter; 2773 struct perf_event *output_event;
2665 struct perf_mmap_data *data; 2774 struct perf_mmap_data *data;
2666 unsigned long tail, offset, head; 2775 unsigned long tail, offset, head;
2667 int have_lost; 2776 int have_lost;
@@ -2673,21 +2782,21 @@ int perf_output_begin(struct perf_output_handle *handle,
2673 2782
2674 rcu_read_lock(); 2783 rcu_read_lock();
2675 /* 2784 /*
2676 * For inherited counters we send all the output towards the parent. 2785 * For inherited events we send all the output towards the parent.
2677 */ 2786 */
2678 if (counter->parent) 2787 if (event->parent)
2679 counter = counter->parent; 2788 event = event->parent;
2680 2789
2681 output_counter = rcu_dereference(counter->output); 2790 output_event = rcu_dereference(event->output);
2682 if (output_counter) 2791 if (output_event)
2683 counter = output_counter; 2792 event = output_event;
2684 2793
2685 data = rcu_dereference(counter->data); 2794 data = rcu_dereference(event->data);
2686 if (!data) 2795 if (!data)
2687 goto out; 2796 goto out;
2688 2797
2689 handle->data = data; 2798 handle->data = data;
2690 handle->counter = counter; 2799 handle->event = event;
2691 handle->nmi = nmi; 2800 handle->nmi = nmi;
2692 handle->sample = sample; 2801 handle->sample = sample;
2693 2802
@@ -2721,10 +2830,10 @@ int perf_output_begin(struct perf_output_handle *handle,
2721 atomic_set(&data->wakeup, 1); 2830 atomic_set(&data->wakeup, 1);
2722 2831
2723 if (have_lost) { 2832 if (have_lost) {
2724 lost_event.header.type = PERF_EVENT_LOST; 2833 lost_event.header.type = PERF_RECORD_LOST;
2725 lost_event.header.misc = 0; 2834 lost_event.header.misc = 0;
2726 lost_event.header.size = sizeof(lost_event); 2835 lost_event.header.size = sizeof(lost_event);
2727 lost_event.id = counter->id; 2836 lost_event.id = event->id;
2728 lost_event.lost = atomic_xchg(&data->lost, 0); 2837 lost_event.lost = atomic_xchg(&data->lost, 0);
2729 2838
2730 perf_output_put(handle, lost_event); 2839 perf_output_put(handle, lost_event);
@@ -2743,10 +2852,10 @@ out:
2743 2852
2744void perf_output_end(struct perf_output_handle *handle) 2853void perf_output_end(struct perf_output_handle *handle)
2745{ 2854{
2746 struct perf_counter *counter = handle->counter; 2855 struct perf_event *event = handle->event;
2747 struct perf_mmap_data *data = handle->data; 2856 struct perf_mmap_data *data = handle->data;
2748 2857
2749 int wakeup_events = counter->attr.wakeup_events; 2858 int wakeup_events = event->attr.wakeup_events;
2750 2859
2751 if (handle->sample && wakeup_events) { 2860 if (handle->sample && wakeup_events) {
2752 int events = atomic_inc_return(&data->events); 2861 int events = atomic_inc_return(&data->events);
@@ -2760,58 +2869,58 @@ void perf_output_end(struct perf_output_handle *handle)
2760 rcu_read_unlock(); 2869 rcu_read_unlock();
2761} 2870}
2762 2871
2763static u32 perf_counter_pid(struct perf_counter *counter, struct task_struct *p) 2872static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
2764{ 2873{
2765 /* 2874 /*
2766 * only top level counters have the pid namespace they were created in 2875 * only top level events have the pid namespace they were created in
2767 */ 2876 */
2768 if (counter->parent) 2877 if (event->parent)
2769 counter = counter->parent; 2878 event = event->parent;
2770 2879
2771 return task_tgid_nr_ns(p, counter->ns); 2880 return task_tgid_nr_ns(p, event->ns);
2772} 2881}
2773 2882
2774static u32 perf_counter_tid(struct perf_counter *counter, struct task_struct *p) 2883static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
2775{ 2884{
2776 /* 2885 /*
2777 * only top level counters have the pid namespace they were created in 2886 * only top level events have the pid namespace they were created in
2778 */ 2887 */
2779 if (counter->parent) 2888 if (event->parent)
2780 counter = counter->parent; 2889 event = event->parent;
2781 2890
2782 return task_pid_nr_ns(p, counter->ns); 2891 return task_pid_nr_ns(p, event->ns);
2783} 2892}
2784 2893
2785static void perf_output_read_one(struct perf_output_handle *handle, 2894static void perf_output_read_one(struct perf_output_handle *handle,
2786 struct perf_counter *counter) 2895 struct perf_event *event)
2787{ 2896{
2788 u64 read_format = counter->attr.read_format; 2897 u64 read_format = event->attr.read_format;
2789 u64 values[4]; 2898 u64 values[4];
2790 int n = 0; 2899 int n = 0;
2791 2900
2792 values[n++] = atomic64_read(&counter->count); 2901 values[n++] = atomic64_read(&event->count);
2793 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { 2902 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
2794 values[n++] = counter->total_time_enabled + 2903 values[n++] = event->total_time_enabled +
2795 atomic64_read(&counter->child_total_time_enabled); 2904 atomic64_read(&event->child_total_time_enabled);
2796 } 2905 }
2797 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { 2906 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
2798 values[n++] = counter->total_time_running + 2907 values[n++] = event->total_time_running +
2799 atomic64_read(&counter->child_total_time_running); 2908 atomic64_read(&event->child_total_time_running);
2800 } 2909 }
2801 if (read_format & PERF_FORMAT_ID) 2910 if (read_format & PERF_FORMAT_ID)
2802 values[n++] = primary_counter_id(counter); 2911 values[n++] = primary_event_id(event);
2803 2912
2804 perf_output_copy(handle, values, n * sizeof(u64)); 2913 perf_output_copy(handle, values, n * sizeof(u64));
2805} 2914}
2806 2915
2807/* 2916/*
2808 * XXX PERF_FORMAT_GROUP vs inherited counters seems difficult. 2917 * XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
2809 */ 2918 */
2810static void perf_output_read_group(struct perf_output_handle *handle, 2919static void perf_output_read_group(struct perf_output_handle *handle,
2811 struct perf_counter *counter) 2920 struct perf_event *event)
2812{ 2921{
2813 struct perf_counter *leader = counter->group_leader, *sub; 2922 struct perf_event *leader = event->group_leader, *sub;
2814 u64 read_format = counter->attr.read_format; 2923 u64 read_format = event->attr.read_format;
2815 u64 values[5]; 2924 u64 values[5];
2816 int n = 0; 2925 int n = 0;
2817 2926
@@ -2823,42 +2932,42 @@ static void perf_output_read_group(struct perf_output_handle *handle,
2823 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 2932 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
2824 values[n++] = leader->total_time_running; 2933 values[n++] = leader->total_time_running;
2825 2934
2826 if (leader != counter) 2935 if (leader != event)
2827 leader->pmu->read(leader); 2936 leader->pmu->read(leader);
2828 2937
2829 values[n++] = atomic64_read(&leader->count); 2938 values[n++] = atomic64_read(&leader->count);
2830 if (read_format & PERF_FORMAT_ID) 2939 if (read_format & PERF_FORMAT_ID)
2831 values[n++] = primary_counter_id(leader); 2940 values[n++] = primary_event_id(leader);
2832 2941
2833 perf_output_copy(handle, values, n * sizeof(u64)); 2942 perf_output_copy(handle, values, n * sizeof(u64));
2834 2943
2835 list_for_each_entry(sub, &leader->sibling_list, list_entry) { 2944 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
2836 n = 0; 2945 n = 0;
2837 2946
2838 if (sub != counter) 2947 if (sub != event)
2839 sub->pmu->read(sub); 2948 sub->pmu->read(sub);
2840 2949
2841 values[n++] = atomic64_read(&sub->count); 2950 values[n++] = atomic64_read(&sub->count);
2842 if (read_format & PERF_FORMAT_ID) 2951 if (read_format & PERF_FORMAT_ID)
2843 values[n++] = primary_counter_id(sub); 2952 values[n++] = primary_event_id(sub);
2844 2953
2845 perf_output_copy(handle, values, n * sizeof(u64)); 2954 perf_output_copy(handle, values, n * sizeof(u64));
2846 } 2955 }
2847} 2956}
2848 2957
2849static void perf_output_read(struct perf_output_handle *handle, 2958static void perf_output_read(struct perf_output_handle *handle,
2850 struct perf_counter *counter) 2959 struct perf_event *event)
2851{ 2960{
2852 if (counter->attr.read_format & PERF_FORMAT_GROUP) 2961 if (event->attr.read_format & PERF_FORMAT_GROUP)
2853 perf_output_read_group(handle, counter); 2962 perf_output_read_group(handle, event);
2854 else 2963 else
2855 perf_output_read_one(handle, counter); 2964 perf_output_read_one(handle, event);
2856} 2965}
2857 2966
2858void perf_output_sample(struct perf_output_handle *handle, 2967void perf_output_sample(struct perf_output_handle *handle,
2859 struct perf_event_header *header, 2968 struct perf_event_header *header,
2860 struct perf_sample_data *data, 2969 struct perf_sample_data *data,
2861 struct perf_counter *counter) 2970 struct perf_event *event)
2862{ 2971{
2863 u64 sample_type = data->type; 2972 u64 sample_type = data->type;
2864 2973
@@ -2889,7 +2998,7 @@ void perf_output_sample(struct perf_output_handle *handle,
2889 perf_output_put(handle, data->period); 2998 perf_output_put(handle, data->period);
2890 2999
2891 if (sample_type & PERF_SAMPLE_READ) 3000 if (sample_type & PERF_SAMPLE_READ)
2892 perf_output_read(handle, counter); 3001 perf_output_read(handle, event);
2893 3002
2894 if (sample_type & PERF_SAMPLE_CALLCHAIN) { 3003 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
2895 if (data->callchain) { 3004 if (data->callchain) {
@@ -2927,14 +3036,14 @@ void perf_output_sample(struct perf_output_handle *handle,
2927 3036
2928void perf_prepare_sample(struct perf_event_header *header, 3037void perf_prepare_sample(struct perf_event_header *header,
2929 struct perf_sample_data *data, 3038 struct perf_sample_data *data,
2930 struct perf_counter *counter, 3039 struct perf_event *event,
2931 struct pt_regs *regs) 3040 struct pt_regs *regs)
2932{ 3041{
2933 u64 sample_type = counter->attr.sample_type; 3042 u64 sample_type = event->attr.sample_type;
2934 3043
2935 data->type = sample_type; 3044 data->type = sample_type;
2936 3045
2937 header->type = PERF_EVENT_SAMPLE; 3046 header->type = PERF_RECORD_SAMPLE;
2938 header->size = sizeof(*header); 3047 header->size = sizeof(*header);
2939 3048
2940 header->misc = 0; 3049 header->misc = 0;
@@ -2948,8 +3057,8 @@ void perf_prepare_sample(struct perf_event_header *header,
2948 3057
2949 if (sample_type & PERF_SAMPLE_TID) { 3058 if (sample_type & PERF_SAMPLE_TID) {
2950 /* namespace issues */ 3059 /* namespace issues */
2951 data->tid_entry.pid = perf_counter_pid(counter, current); 3060 data->tid_entry.pid = perf_event_pid(event, current);
2952 data->tid_entry.tid = perf_counter_tid(counter, current); 3061 data->tid_entry.tid = perf_event_tid(event, current);
2953 3062
2954 header->size += sizeof(data->tid_entry); 3063 header->size += sizeof(data->tid_entry);
2955 } 3064 }
@@ -2964,13 +3073,13 @@ void perf_prepare_sample(struct perf_event_header *header,
2964 header->size += sizeof(data->addr); 3073 header->size += sizeof(data->addr);
2965 3074
2966 if (sample_type & PERF_SAMPLE_ID) { 3075 if (sample_type & PERF_SAMPLE_ID) {
2967 data->id = primary_counter_id(counter); 3076 data->id = primary_event_id(event);
2968 3077
2969 header->size += sizeof(data->id); 3078 header->size += sizeof(data->id);
2970 } 3079 }
2971 3080
2972 if (sample_type & PERF_SAMPLE_STREAM_ID) { 3081 if (sample_type & PERF_SAMPLE_STREAM_ID) {
2973 data->stream_id = counter->id; 3082 data->stream_id = event->id;
2974 3083
2975 header->size += sizeof(data->stream_id); 3084 header->size += sizeof(data->stream_id);
2976 } 3085 }
@@ -2986,7 +3095,7 @@ void perf_prepare_sample(struct perf_event_header *header,
2986 header->size += sizeof(data->period); 3095 header->size += sizeof(data->period);
2987 3096
2988 if (sample_type & PERF_SAMPLE_READ) 3097 if (sample_type & PERF_SAMPLE_READ)
2989 header->size += perf_counter_read_size(counter); 3098 header->size += perf_event_read_size(event);
2990 3099
2991 if (sample_type & PERF_SAMPLE_CALLCHAIN) { 3100 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
2992 int size = 1; 3101 int size = 1;
@@ -3012,25 +3121,25 @@ void perf_prepare_sample(struct perf_event_header *header,
3012 } 3121 }
3013} 3122}
3014 3123
3015static void perf_counter_output(struct perf_counter *counter, int nmi, 3124static void perf_event_output(struct perf_event *event, int nmi,
3016 struct perf_sample_data *data, 3125 struct perf_sample_data *data,
3017 struct pt_regs *regs) 3126 struct pt_regs *regs)
3018{ 3127{
3019 struct perf_output_handle handle; 3128 struct perf_output_handle handle;
3020 struct perf_event_header header; 3129 struct perf_event_header header;
3021 3130
3022 perf_prepare_sample(&header, data, counter, regs); 3131 perf_prepare_sample(&header, data, event, regs);
3023 3132
3024 if (perf_output_begin(&handle, counter, header.size, nmi, 1)) 3133 if (perf_output_begin(&handle, event, header.size, nmi, 1))
3025 return; 3134 return;
3026 3135
3027 perf_output_sample(&handle, &header, data, counter); 3136 perf_output_sample(&handle, &header, data, event);
3028 3137
3029 perf_output_end(&handle); 3138 perf_output_end(&handle);
3030} 3139}
3031 3140
3032/* 3141/*
3033 * read event 3142 * read event_id
3034 */ 3143 */
3035 3144
3036struct perf_read_event { 3145struct perf_read_event {
@@ -3041,27 +3150,27 @@ struct perf_read_event {
3041}; 3150};
3042 3151
3043static void 3152static void
3044perf_counter_read_event(struct perf_counter *counter, 3153perf_event_read_event(struct perf_event *event,
3045 struct task_struct *task) 3154 struct task_struct *task)
3046{ 3155{
3047 struct perf_output_handle handle; 3156 struct perf_output_handle handle;
3048 struct perf_read_event event = { 3157 struct perf_read_event read_event = {
3049 .header = { 3158 .header = {
3050 .type = PERF_EVENT_READ, 3159 .type = PERF_RECORD_READ,
3051 .misc = 0, 3160 .misc = 0,
3052 .size = sizeof(event) + perf_counter_read_size(counter), 3161 .size = sizeof(read_event) + perf_event_read_size(event),
3053 }, 3162 },
3054 .pid = perf_counter_pid(counter, task), 3163 .pid = perf_event_pid(event, task),
3055 .tid = perf_counter_tid(counter, task), 3164 .tid = perf_event_tid(event, task),
3056 }; 3165 };
3057 int ret; 3166 int ret;
3058 3167
3059 ret = perf_output_begin(&handle, counter, event.header.size, 0, 0); 3168 ret = perf_output_begin(&handle, event, read_event.header.size, 0, 0);
3060 if (ret) 3169 if (ret)
3061 return; 3170 return;
3062 3171
3063 perf_output_put(&handle, event); 3172 perf_output_put(&handle, read_event);
3064 perf_output_read(&handle, counter); 3173 perf_output_read(&handle, event);
3065 3174
3066 perf_output_end(&handle); 3175 perf_output_end(&handle);
3067} 3176}
@@ -3074,7 +3183,7 @@ perf_counter_read_event(struct perf_counter *counter,
3074 3183
3075struct perf_task_event { 3184struct perf_task_event {
3076 struct task_struct *task; 3185 struct task_struct *task;
3077 struct perf_counter_context *task_ctx; 3186 struct perf_event_context *task_ctx;
3078 3187
3079 struct { 3188 struct {
3080 struct perf_event_header header; 3189 struct perf_event_header header;
@@ -3084,10 +3193,10 @@ struct perf_task_event {
3084 u32 tid; 3193 u32 tid;
3085 u32 ptid; 3194 u32 ptid;
3086 u64 time; 3195 u64 time;
3087 } event; 3196 } event_id;
3088}; 3197};
3089 3198
3090static void perf_counter_task_output(struct perf_counter *counter, 3199static void perf_event_task_output(struct perf_event *event,
3091 struct perf_task_event *task_event) 3200 struct perf_task_event *task_event)
3092{ 3201{
3093 struct perf_output_handle handle; 3202 struct perf_output_handle handle;
@@ -3095,85 +3204,85 @@ static void perf_counter_task_output(struct perf_counter *counter,
3095 struct task_struct *task = task_event->task; 3204 struct task_struct *task = task_event->task;
3096 int ret; 3205 int ret;
3097 3206
3098 size = task_event->event.header.size; 3207 size = task_event->event_id.header.size;
3099 ret = perf_output_begin(&handle, counter, size, 0, 0); 3208 ret = perf_output_begin(&handle, event, size, 0, 0);
3100 3209
3101 if (ret) 3210 if (ret)
3102 return; 3211 return;
3103 3212
3104 task_event->event.pid = perf_counter_pid(counter, task); 3213 task_event->event_id.pid = perf_event_pid(event, task);
3105 task_event->event.ppid = perf_counter_pid(counter, current); 3214 task_event->event_id.ppid = perf_event_pid(event, current);
3106 3215
3107 task_event->event.tid = perf_counter_tid(counter, task); 3216 task_event->event_id.tid = perf_event_tid(event, task);
3108 task_event->event.ptid = perf_counter_tid(counter, current); 3217 task_event->event_id.ptid = perf_event_tid(event, current);
3109 3218
3110 task_event->event.time = perf_clock(); 3219 task_event->event_id.time = perf_clock();
3111 3220
3112 perf_output_put(&handle, task_event->event); 3221 perf_output_put(&handle, task_event->event_id);
3113 3222
3114 perf_output_end(&handle); 3223 perf_output_end(&handle);
3115} 3224}
3116 3225
3117static int perf_counter_task_match(struct perf_counter *counter) 3226static int perf_event_task_match(struct perf_event *event)
3118{ 3227{
3119 if (counter->attr.comm || counter->attr.mmap || counter->attr.task) 3228 if (event->attr.comm || event->attr.mmap || event->attr.task)
3120 return 1; 3229 return 1;
3121 3230
3122 return 0; 3231 return 0;
3123} 3232}
3124 3233
3125static void perf_counter_task_ctx(struct perf_counter_context *ctx, 3234static void perf_event_task_ctx(struct perf_event_context *ctx,
3126 struct perf_task_event *task_event) 3235 struct perf_task_event *task_event)
3127{ 3236{
3128 struct perf_counter *counter; 3237 struct perf_event *event;
3129 3238
3130 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list)) 3239 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
3131 return; 3240 return;
3132 3241
3133 rcu_read_lock(); 3242 rcu_read_lock();
3134 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) { 3243 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3135 if (perf_counter_task_match(counter)) 3244 if (perf_event_task_match(event))
3136 perf_counter_task_output(counter, task_event); 3245 perf_event_task_output(event, task_event);
3137 } 3246 }
3138 rcu_read_unlock(); 3247 rcu_read_unlock();
3139} 3248}
3140 3249
3141static void perf_counter_task_event(struct perf_task_event *task_event) 3250static void perf_event_task_event(struct perf_task_event *task_event)
3142{ 3251{
3143 struct perf_cpu_context *cpuctx; 3252 struct perf_cpu_context *cpuctx;
3144 struct perf_counter_context *ctx = task_event->task_ctx; 3253 struct perf_event_context *ctx = task_event->task_ctx;
3145 3254
3146 cpuctx = &get_cpu_var(perf_cpu_context); 3255 cpuctx = &get_cpu_var(perf_cpu_context);
3147 perf_counter_task_ctx(&cpuctx->ctx, task_event); 3256 perf_event_task_ctx(&cpuctx->ctx, task_event);
3148 put_cpu_var(perf_cpu_context); 3257 put_cpu_var(perf_cpu_context);
3149 3258
3150 rcu_read_lock(); 3259 rcu_read_lock();
3151 if (!ctx) 3260 if (!ctx)
3152 ctx = rcu_dereference(task_event->task->perf_counter_ctxp); 3261 ctx = rcu_dereference(task_event->task->perf_event_ctxp);
3153 if (ctx) 3262 if (ctx)
3154 perf_counter_task_ctx(ctx, task_event); 3263 perf_event_task_ctx(ctx, task_event);
3155 rcu_read_unlock(); 3264 rcu_read_unlock();
3156} 3265}
3157 3266
3158static void perf_counter_task(struct task_struct *task, 3267static void perf_event_task(struct task_struct *task,
3159 struct perf_counter_context *task_ctx, 3268 struct perf_event_context *task_ctx,
3160 int new) 3269 int new)
3161{ 3270{
3162 struct perf_task_event task_event; 3271 struct perf_task_event task_event;
3163 3272
3164 if (!atomic_read(&nr_comm_counters) && 3273 if (!atomic_read(&nr_comm_events) &&
3165 !atomic_read(&nr_mmap_counters) && 3274 !atomic_read(&nr_mmap_events) &&
3166 !atomic_read(&nr_task_counters)) 3275 !atomic_read(&nr_task_events))
3167 return; 3276 return;
3168 3277
3169 task_event = (struct perf_task_event){ 3278 task_event = (struct perf_task_event){
3170 .task = task, 3279 .task = task,
3171 .task_ctx = task_ctx, 3280 .task_ctx = task_ctx,
3172 .event = { 3281 .event_id = {
3173 .header = { 3282 .header = {
3174 .type = new ? PERF_EVENT_FORK : PERF_EVENT_EXIT, 3283 .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT,
3175 .misc = 0, 3284 .misc = 0,
3176 .size = sizeof(task_event.event), 3285 .size = sizeof(task_event.event_id),
3177 }, 3286 },
3178 /* .pid */ 3287 /* .pid */
3179 /* .ppid */ 3288 /* .ppid */
@@ -3182,12 +3291,12 @@ static void perf_counter_task(struct task_struct *task,
3182 }, 3291 },
3183 }; 3292 };
3184 3293
3185 perf_counter_task_event(&task_event); 3294 perf_event_task_event(&task_event);
3186} 3295}
3187 3296
3188void perf_counter_fork(struct task_struct *task) 3297void perf_event_fork(struct task_struct *task)
3189{ 3298{
3190 perf_counter_task(task, NULL, 1); 3299 perf_event_task(task, NULL, 1);
3191} 3300}
3192 3301
3193/* 3302/*
@@ -3204,56 +3313,56 @@ struct perf_comm_event {
3204 3313
3205 u32 pid; 3314 u32 pid;
3206 u32 tid; 3315 u32 tid;
3207 } event; 3316 } event_id;
3208}; 3317};
3209 3318
3210static void perf_counter_comm_output(struct perf_counter *counter, 3319static void perf_event_comm_output(struct perf_event *event,
3211 struct perf_comm_event *comm_event) 3320 struct perf_comm_event *comm_event)
3212{ 3321{
3213 struct perf_output_handle handle; 3322 struct perf_output_handle handle;
3214 int size = comm_event->event.header.size; 3323 int size = comm_event->event_id.header.size;
3215 int ret = perf_output_begin(&handle, counter, size, 0, 0); 3324 int ret = perf_output_begin(&handle, event, size, 0, 0);
3216 3325
3217 if (ret) 3326 if (ret)
3218 return; 3327 return;
3219 3328
3220 comm_event->event.pid = perf_counter_pid(counter, comm_event->task); 3329 comm_event->event_id.pid = perf_event_pid(event, comm_event->task);
3221 comm_event->event.tid = perf_counter_tid(counter, comm_event->task); 3330 comm_event->event_id.tid = perf_event_tid(event, comm_event->task);
3222 3331
3223 perf_output_put(&handle, comm_event->event); 3332 perf_output_put(&handle, comm_event->event_id);
3224 perf_output_copy(&handle, comm_event->comm, 3333 perf_output_copy(&handle, comm_event->comm,
3225 comm_event->comm_size); 3334 comm_event->comm_size);
3226 perf_output_end(&handle); 3335 perf_output_end(&handle);
3227} 3336}
3228 3337
3229static int perf_counter_comm_match(struct perf_counter *counter) 3338static int perf_event_comm_match(struct perf_event *event)
3230{ 3339{
3231 if (counter->attr.comm) 3340 if (event->attr.comm)
3232 return 1; 3341 return 1;
3233 3342
3234 return 0; 3343 return 0;
3235} 3344}
3236 3345
3237static void perf_counter_comm_ctx(struct perf_counter_context *ctx, 3346static void perf_event_comm_ctx(struct perf_event_context *ctx,
3238 struct perf_comm_event *comm_event) 3347 struct perf_comm_event *comm_event)
3239{ 3348{
3240 struct perf_counter *counter; 3349 struct perf_event *event;
3241 3350
3242 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list)) 3351 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
3243 return; 3352 return;
3244 3353
3245 rcu_read_lock(); 3354 rcu_read_lock();
3246 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) { 3355 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3247 if (perf_counter_comm_match(counter)) 3356 if (perf_event_comm_match(event))
3248 perf_counter_comm_output(counter, comm_event); 3357 perf_event_comm_output(event, comm_event);
3249 } 3358 }
3250 rcu_read_unlock(); 3359 rcu_read_unlock();
3251} 3360}
3252 3361
3253static void perf_counter_comm_event(struct perf_comm_event *comm_event) 3362static void perf_event_comm_event(struct perf_comm_event *comm_event)
3254{ 3363{
3255 struct perf_cpu_context *cpuctx; 3364 struct perf_cpu_context *cpuctx;
3256 struct perf_counter_context *ctx; 3365 struct perf_event_context *ctx;
3257 unsigned int size; 3366 unsigned int size;
3258 char comm[TASK_COMM_LEN]; 3367 char comm[TASK_COMM_LEN];
3259 3368
@@ -3264,10 +3373,10 @@ static void perf_counter_comm_event(struct perf_comm_event *comm_event)
3264 comm_event->comm = comm; 3373 comm_event->comm = comm;
3265 comm_event->comm_size = size; 3374 comm_event->comm_size = size;
3266 3375
3267 comm_event->event.header.size = sizeof(comm_event->event) + size; 3376 comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
3268 3377
3269 cpuctx = &get_cpu_var(perf_cpu_context); 3378 cpuctx = &get_cpu_var(perf_cpu_context);
3270 perf_counter_comm_ctx(&cpuctx->ctx, comm_event); 3379 perf_event_comm_ctx(&cpuctx->ctx, comm_event);
3271 put_cpu_var(perf_cpu_context); 3380 put_cpu_var(perf_cpu_context);
3272 3381
3273 rcu_read_lock(); 3382 rcu_read_lock();
@@ -3275,29 +3384,29 @@ static void perf_counter_comm_event(struct perf_comm_event *comm_event)
3275 * doesn't really matter which of the child contexts the 3384 * doesn't really matter which of the child contexts the
3276 * events ends up in. 3385 * events ends up in.
3277 */ 3386 */
3278 ctx = rcu_dereference(current->perf_counter_ctxp); 3387 ctx = rcu_dereference(current->perf_event_ctxp);
3279 if (ctx) 3388 if (ctx)
3280 perf_counter_comm_ctx(ctx, comm_event); 3389 perf_event_comm_ctx(ctx, comm_event);
3281 rcu_read_unlock(); 3390 rcu_read_unlock();
3282} 3391}
3283 3392
3284void perf_counter_comm(struct task_struct *task) 3393void perf_event_comm(struct task_struct *task)
3285{ 3394{
3286 struct perf_comm_event comm_event; 3395 struct perf_comm_event comm_event;
3287 3396
3288 if (task->perf_counter_ctxp) 3397 if (task->perf_event_ctxp)
3289 perf_counter_enable_on_exec(task); 3398 perf_event_enable_on_exec(task);
3290 3399
3291 if (!atomic_read(&nr_comm_counters)) 3400 if (!atomic_read(&nr_comm_events))
3292 return; 3401 return;
3293 3402
3294 comm_event = (struct perf_comm_event){ 3403 comm_event = (struct perf_comm_event){
3295 .task = task, 3404 .task = task,
3296 /* .comm */ 3405 /* .comm */
3297 /* .comm_size */ 3406 /* .comm_size */
3298 .event = { 3407 .event_id = {
3299 .header = { 3408 .header = {
3300 .type = PERF_EVENT_COMM, 3409 .type = PERF_RECORD_COMM,
3301 .misc = 0, 3410 .misc = 0,
3302 /* .size */ 3411 /* .size */
3303 }, 3412 },
@@ -3306,7 +3415,7 @@ void perf_counter_comm(struct task_struct *task)
3306 }, 3415 },
3307 }; 3416 };
3308 3417
3309 perf_counter_comm_event(&comm_event); 3418 perf_event_comm_event(&comm_event);
3310} 3419}
3311 3420
3312/* 3421/*
@@ -3327,57 +3436,57 @@ struct perf_mmap_event {
3327 u64 start; 3436 u64 start;
3328 u64 len; 3437 u64 len;
3329 u64 pgoff; 3438 u64 pgoff;
3330 } event; 3439 } event_id;
3331}; 3440};
3332 3441
3333static void perf_counter_mmap_output(struct perf_counter *counter, 3442static void perf_event_mmap_output(struct perf_event *event,
3334 struct perf_mmap_event *mmap_event) 3443 struct perf_mmap_event *mmap_event)
3335{ 3444{
3336 struct perf_output_handle handle; 3445 struct perf_output_handle handle;
3337 int size = mmap_event->event.header.size; 3446 int size = mmap_event->event_id.header.size;
3338 int ret = perf_output_begin(&handle, counter, size, 0, 0); 3447 int ret = perf_output_begin(&handle, event, size, 0, 0);
3339 3448
3340 if (ret) 3449 if (ret)
3341 return; 3450 return;
3342 3451
3343 mmap_event->event.pid = perf_counter_pid(counter, current); 3452 mmap_event->event_id.pid = perf_event_pid(event, current);
3344 mmap_event->event.tid = perf_counter_tid(counter, current); 3453 mmap_event->event_id.tid = perf_event_tid(event, current);
3345 3454
3346 perf_output_put(&handle, mmap_event->event); 3455 perf_output_put(&handle, mmap_event->event_id);
3347 perf_output_copy(&handle, mmap_event->file_name, 3456 perf_output_copy(&handle, mmap_event->file_name,
3348 mmap_event->file_size); 3457 mmap_event->file_size);
3349 perf_output_end(&handle); 3458 perf_output_end(&handle);
3350} 3459}
3351 3460
3352static int perf_counter_mmap_match(struct perf_counter *counter, 3461static int perf_event_mmap_match(struct perf_event *event,
3353 struct perf_mmap_event *mmap_event) 3462 struct perf_mmap_event *mmap_event)
3354{ 3463{
3355 if (counter->attr.mmap) 3464 if (event->attr.mmap)
3356 return 1; 3465 return 1;
3357 3466
3358 return 0; 3467 return 0;
3359} 3468}
3360 3469
3361static void perf_counter_mmap_ctx(struct perf_counter_context *ctx, 3470static void perf_event_mmap_ctx(struct perf_event_context *ctx,
3362 struct perf_mmap_event *mmap_event) 3471 struct perf_mmap_event *mmap_event)
3363{ 3472{
3364 struct perf_counter *counter; 3473 struct perf_event *event;
3365 3474
3366 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list)) 3475 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
3367 return; 3476 return;
3368 3477
3369 rcu_read_lock(); 3478 rcu_read_lock();
3370 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) { 3479 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3371 if (perf_counter_mmap_match(counter, mmap_event)) 3480 if (perf_event_mmap_match(event, mmap_event))
3372 perf_counter_mmap_output(counter, mmap_event); 3481 perf_event_mmap_output(event, mmap_event);
3373 } 3482 }
3374 rcu_read_unlock(); 3483 rcu_read_unlock();
3375} 3484}
3376 3485
3377static void perf_counter_mmap_event(struct perf_mmap_event *mmap_event) 3486static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
3378{ 3487{
3379 struct perf_cpu_context *cpuctx; 3488 struct perf_cpu_context *cpuctx;
3380 struct perf_counter_context *ctx; 3489 struct perf_event_context *ctx;
3381 struct vm_area_struct *vma = mmap_event->vma; 3490 struct vm_area_struct *vma = mmap_event->vma;
3382 struct file *file = vma->vm_file; 3491 struct file *file = vma->vm_file;
3383 unsigned int size; 3492 unsigned int size;
@@ -3425,10 +3534,10 @@ got_name:
3425 mmap_event->file_name = name; 3534 mmap_event->file_name = name;
3426 mmap_event->file_size = size; 3535 mmap_event->file_size = size;
3427 3536
3428 mmap_event->event.header.size = sizeof(mmap_event->event) + size; 3537 mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
3429 3538
3430 cpuctx = &get_cpu_var(perf_cpu_context); 3539 cpuctx = &get_cpu_var(perf_cpu_context);
3431 perf_counter_mmap_ctx(&cpuctx->ctx, mmap_event); 3540 perf_event_mmap_ctx(&cpuctx->ctx, mmap_event);
3432 put_cpu_var(perf_cpu_context); 3541 put_cpu_var(perf_cpu_context);
3433 3542
3434 rcu_read_lock(); 3543 rcu_read_lock();
@@ -3436,28 +3545,28 @@ got_name:
3436 * doesn't really matter which of the child contexts the 3545 * doesn't really matter which of the child contexts the
3437 * events ends up in. 3546 * events ends up in.
3438 */ 3547 */
3439 ctx = rcu_dereference(current->perf_counter_ctxp); 3548 ctx = rcu_dereference(current->perf_event_ctxp);
3440 if (ctx) 3549 if (ctx)
3441 perf_counter_mmap_ctx(ctx, mmap_event); 3550 perf_event_mmap_ctx(ctx, mmap_event);
3442 rcu_read_unlock(); 3551 rcu_read_unlock();
3443 3552
3444 kfree(buf); 3553 kfree(buf);
3445} 3554}
3446 3555
3447void __perf_counter_mmap(struct vm_area_struct *vma) 3556void __perf_event_mmap(struct vm_area_struct *vma)
3448{ 3557{
3449 struct perf_mmap_event mmap_event; 3558 struct perf_mmap_event mmap_event;
3450 3559
3451 if (!atomic_read(&nr_mmap_counters)) 3560 if (!atomic_read(&nr_mmap_events))
3452 return; 3561 return;
3453 3562
3454 mmap_event = (struct perf_mmap_event){ 3563 mmap_event = (struct perf_mmap_event){
3455 .vma = vma, 3564 .vma = vma,
3456 /* .file_name */ 3565 /* .file_name */
3457 /* .file_size */ 3566 /* .file_size */
3458 .event = { 3567 .event_id = {
3459 .header = { 3568 .header = {
3460 .type = PERF_EVENT_MMAP, 3569 .type = PERF_RECORD_MMAP,
3461 .misc = 0, 3570 .misc = 0,
3462 /* .size */ 3571 /* .size */
3463 }, 3572 },
@@ -3469,14 +3578,14 @@ void __perf_counter_mmap(struct vm_area_struct *vma)
3469 }, 3578 },
3470 }; 3579 };
3471 3580
3472 perf_counter_mmap_event(&mmap_event); 3581 perf_event_mmap_event(&mmap_event);
3473} 3582}
3474 3583
3475/* 3584/*
3476 * IRQ throttle logging 3585 * IRQ throttle logging
3477 */ 3586 */
3478 3587
3479static void perf_log_throttle(struct perf_counter *counter, int enable) 3588static void perf_log_throttle(struct perf_event *event, int enable)
3480{ 3589{
3481 struct perf_output_handle handle; 3590 struct perf_output_handle handle;
3482 int ret; 3591 int ret;
@@ -3488,19 +3597,19 @@ static void perf_log_throttle(struct perf_counter *counter, int enable)
3488 u64 stream_id; 3597 u64 stream_id;
3489 } throttle_event = { 3598 } throttle_event = {
3490 .header = { 3599 .header = {
3491 .type = PERF_EVENT_THROTTLE, 3600 .type = PERF_RECORD_THROTTLE,
3492 .misc = 0, 3601 .misc = 0,
3493 .size = sizeof(throttle_event), 3602 .size = sizeof(throttle_event),
3494 }, 3603 },
3495 .time = perf_clock(), 3604 .time = perf_clock(),
3496 .id = primary_counter_id(counter), 3605 .id = primary_event_id(event),
3497 .stream_id = counter->id, 3606 .stream_id = event->id,
3498 }; 3607 };
3499 3608
3500 if (enable) 3609 if (enable)
3501 throttle_event.header.type = PERF_EVENT_UNTHROTTLE; 3610 throttle_event.header.type = PERF_RECORD_UNTHROTTLE;
3502 3611
3503 ret = perf_output_begin(&handle, counter, sizeof(throttle_event), 1, 0); 3612 ret = perf_output_begin(&handle, event, sizeof(throttle_event), 1, 0);
3504 if (ret) 3613 if (ret)
3505 return; 3614 return;
3506 3615
@@ -3509,18 +3618,18 @@ static void perf_log_throttle(struct perf_counter *counter, int enable)
3509} 3618}
3510 3619
3511/* 3620/*
3512 * Generic counter overflow handling, sampling. 3621 * Generic event overflow handling, sampling.
3513 */ 3622 */
3514 3623
3515static int __perf_counter_overflow(struct perf_counter *counter, int nmi, 3624static int __perf_event_overflow(struct perf_event *event, int nmi,
3516 int throttle, struct perf_sample_data *data, 3625 int throttle, struct perf_sample_data *data,
3517 struct pt_regs *regs) 3626 struct pt_regs *regs)
3518{ 3627{
3519 int events = atomic_read(&counter->event_limit); 3628 int events = atomic_read(&event->event_limit);
3520 struct hw_perf_counter *hwc = &counter->hw; 3629 struct hw_perf_event *hwc = &event->hw;
3521 int ret = 0; 3630 int ret = 0;
3522 3631
3523 throttle = (throttle && counter->pmu->unthrottle != NULL); 3632 throttle = (throttle && event->pmu->unthrottle != NULL);
3524 3633
3525 if (!throttle) { 3634 if (!throttle) {
3526 hwc->interrupts++; 3635 hwc->interrupts++;
@@ -3528,73 +3637,73 @@ static int __perf_counter_overflow(struct perf_counter *counter, int nmi,
3528 if (hwc->interrupts != MAX_INTERRUPTS) { 3637 if (hwc->interrupts != MAX_INTERRUPTS) {
3529 hwc->interrupts++; 3638 hwc->interrupts++;
3530 if (HZ * hwc->interrupts > 3639 if (HZ * hwc->interrupts >
3531 (u64)sysctl_perf_counter_sample_rate) { 3640 (u64)sysctl_perf_event_sample_rate) {
3532 hwc->interrupts = MAX_INTERRUPTS; 3641 hwc->interrupts = MAX_INTERRUPTS;
3533 perf_log_throttle(counter, 0); 3642 perf_log_throttle(event, 0);
3534 ret = 1; 3643 ret = 1;
3535 } 3644 }
3536 } else { 3645 } else {
3537 /* 3646 /*
3538 * Keep re-disabling counters even though on the previous 3647 * Keep re-disabling events even though on the previous
3539 * pass we disabled it - just in case we raced with a 3648 * pass we disabled it - just in case we raced with a
3540 * sched-in and the counter got enabled again: 3649 * sched-in and the event got enabled again:
3541 */ 3650 */
3542 ret = 1; 3651 ret = 1;
3543 } 3652 }
3544 } 3653 }
3545 3654
3546 if (counter->attr.freq) { 3655 if (event->attr.freq) {
3547 u64 now = perf_clock(); 3656 u64 now = perf_clock();
3548 s64 delta = now - hwc->freq_stamp; 3657 s64 delta = now - hwc->freq_stamp;
3549 3658
3550 hwc->freq_stamp = now; 3659 hwc->freq_stamp = now;
3551 3660
3552 if (delta > 0 && delta < TICK_NSEC) 3661 if (delta > 0 && delta < TICK_NSEC)
3553 perf_adjust_period(counter, NSEC_PER_SEC / (int)delta); 3662 perf_adjust_period(event, NSEC_PER_SEC / (int)delta);
3554 } 3663 }
3555 3664
3556 /* 3665 /*
3557 * XXX event_limit might not quite work as expected on inherited 3666 * XXX event_limit might not quite work as expected on inherited
3558 * counters 3667 * events
3559 */ 3668 */
3560 3669
3561 counter->pending_kill = POLL_IN; 3670 event->pending_kill = POLL_IN;
3562 if (events && atomic_dec_and_test(&counter->event_limit)) { 3671 if (events && atomic_dec_and_test(&event->event_limit)) {
3563 ret = 1; 3672 ret = 1;
3564 counter->pending_kill = POLL_HUP; 3673 event->pending_kill = POLL_HUP;
3565 if (nmi) { 3674 if (nmi) {
3566 counter->pending_disable = 1; 3675 event->pending_disable = 1;
3567 perf_pending_queue(&counter->pending, 3676 perf_pending_queue(&event->pending,
3568 perf_pending_counter); 3677 perf_pending_event);
3569 } else 3678 } else
3570 perf_counter_disable(counter); 3679 perf_event_disable(event);
3571 } 3680 }
3572 3681
3573 perf_counter_output(counter, nmi, data, regs); 3682 perf_event_output(event, nmi, data, regs);
3574 return ret; 3683 return ret;
3575} 3684}
3576 3685
3577int perf_counter_overflow(struct perf_counter *counter, int nmi, 3686int perf_event_overflow(struct perf_event *event, int nmi,
3578 struct perf_sample_data *data, 3687 struct perf_sample_data *data,
3579 struct pt_regs *regs) 3688 struct pt_regs *regs)
3580{ 3689{
3581 return __perf_counter_overflow(counter, nmi, 1, data, regs); 3690 return __perf_event_overflow(event, nmi, 1, data, regs);
3582} 3691}
3583 3692
3584/* 3693/*
3585 * Generic software counter infrastructure 3694 * Generic software event infrastructure
3586 */ 3695 */
3587 3696
3588/* 3697/*
3589 * We directly increment counter->count and keep a second value in 3698 * We directly increment event->count and keep a second value in
3590 * counter->hw.period_left to count intervals. This period counter 3699 * event->hw.period_left to count intervals. This period event
3591 * is kept in the range [-sample_period, 0] so that we can use the 3700 * is kept in the range [-sample_period, 0] so that we can use the
3592 * sign as trigger. 3701 * sign as trigger.
3593 */ 3702 */
3594 3703
3595static u64 perf_swcounter_set_period(struct perf_counter *counter) 3704static u64 perf_swevent_set_period(struct perf_event *event)
3596{ 3705{
3597 struct hw_perf_counter *hwc = &counter->hw; 3706 struct hw_perf_event *hwc = &event->hw;
3598 u64 period = hwc->last_period; 3707 u64 period = hwc->last_period;
3599 u64 nr, offset; 3708 u64 nr, offset;
3600 s64 old, val; 3709 s64 old, val;
@@ -3615,22 +3724,22 @@ again:
3615 return nr; 3724 return nr;
3616} 3725}
3617 3726
3618static void perf_swcounter_overflow(struct perf_counter *counter, 3727static void perf_swevent_overflow(struct perf_event *event,
3619 int nmi, struct perf_sample_data *data, 3728 int nmi, struct perf_sample_data *data,
3620 struct pt_regs *regs) 3729 struct pt_regs *regs)
3621{ 3730{
3622 struct hw_perf_counter *hwc = &counter->hw; 3731 struct hw_perf_event *hwc = &event->hw;
3623 int throttle = 0; 3732 int throttle = 0;
3624 u64 overflow; 3733 u64 overflow;
3625 3734
3626 data->period = counter->hw.last_period; 3735 data->period = event->hw.last_period;
3627 overflow = perf_swcounter_set_period(counter); 3736 overflow = perf_swevent_set_period(event);
3628 3737
3629 if (hwc->interrupts == MAX_INTERRUPTS) 3738 if (hwc->interrupts == MAX_INTERRUPTS)
3630 return; 3739 return;
3631 3740
3632 for (; overflow; overflow--) { 3741 for (; overflow; overflow--) {
3633 if (__perf_counter_overflow(counter, nmi, throttle, 3742 if (__perf_event_overflow(event, nmi, throttle,
3634 data, regs)) { 3743 data, regs)) {
3635 /* 3744 /*
3636 * We inhibit the overflow from happening when 3745 * We inhibit the overflow from happening when
@@ -3642,20 +3751,20 @@ static void perf_swcounter_overflow(struct perf_counter *counter,
3642 } 3751 }
3643} 3752}
3644 3753
3645static void perf_swcounter_unthrottle(struct perf_counter *counter) 3754static void perf_swevent_unthrottle(struct perf_event *event)
3646{ 3755{
3647 /* 3756 /*
3648 * Nothing to do, we already reset hwc->interrupts. 3757 * Nothing to do, we already reset hwc->interrupts.
3649 */ 3758 */
3650} 3759}
3651 3760
3652static void perf_swcounter_add(struct perf_counter *counter, u64 nr, 3761static void perf_swevent_add(struct perf_event *event, u64 nr,
3653 int nmi, struct perf_sample_data *data, 3762 int nmi, struct perf_sample_data *data,
3654 struct pt_regs *regs) 3763 struct pt_regs *regs)
3655{ 3764{
3656 struct hw_perf_counter *hwc = &counter->hw; 3765 struct hw_perf_event *hwc = &event->hw;
3657 3766
3658 atomic64_add(nr, &counter->count); 3767 atomic64_add(nr, &event->count);
3659 3768
3660 if (!hwc->sample_period) 3769 if (!hwc->sample_period)
3661 return; 3770 return;
@@ -3664,29 +3773,29 @@ static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
3664 return; 3773 return;
3665 3774
3666 if (!atomic64_add_negative(nr, &hwc->period_left)) 3775 if (!atomic64_add_negative(nr, &hwc->period_left))
3667 perf_swcounter_overflow(counter, nmi, data, regs); 3776 perf_swevent_overflow(event, nmi, data, regs);
3668} 3777}
3669 3778
3670static int perf_swcounter_is_counting(struct perf_counter *counter) 3779static int perf_swevent_is_counting(struct perf_event *event)
3671{ 3780{
3672 /* 3781 /*
3673 * The counter is active, we're good! 3782 * The event is active, we're good!
3674 */ 3783 */
3675 if (counter->state == PERF_COUNTER_STATE_ACTIVE) 3784 if (event->state == PERF_EVENT_STATE_ACTIVE)
3676 return 1; 3785 return 1;
3677 3786
3678 /* 3787 /*
3679 * The counter is off/error, not counting. 3788 * The event is off/error, not counting.
3680 */ 3789 */
3681 if (counter->state != PERF_COUNTER_STATE_INACTIVE) 3790 if (event->state != PERF_EVENT_STATE_INACTIVE)
3682 return 0; 3791 return 0;
3683 3792
3684 /* 3793 /*
3685 * The counter is inactive, if the context is active 3794 * The event is inactive, if the context is active
3686 * we're part of a group that didn't make it on the 'pmu', 3795 * we're part of a group that didn't make it on the 'pmu',
3687 * not counting. 3796 * not counting.
3688 */ 3797 */
3689 if (counter->ctx->is_active) 3798 if (event->ctx->is_active)
3690 return 0; 3799 return 0;
3691 3800
3692 /* 3801 /*
@@ -3697,49 +3806,49 @@ static int perf_swcounter_is_counting(struct perf_counter *counter)
3697 return 1; 3806 return 1;
3698} 3807}
3699 3808
3700static int perf_swcounter_match(struct perf_counter *counter, 3809static int perf_swevent_match(struct perf_event *event,
3701 enum perf_type_id type, 3810 enum perf_type_id type,
3702 u32 event, struct pt_regs *regs) 3811 u32 event_id, struct pt_regs *regs)
3703{ 3812{
3704 if (!perf_swcounter_is_counting(counter)) 3813 if (!perf_swevent_is_counting(event))
3705 return 0; 3814 return 0;
3706 3815
3707 if (counter->attr.type != type) 3816 if (event->attr.type != type)
3708 return 0; 3817 return 0;
3709 if (counter->attr.config != event) 3818 if (event->attr.config != event_id)
3710 return 0; 3819 return 0;
3711 3820
3712 if (regs) { 3821 if (regs) {
3713 if (counter->attr.exclude_user && user_mode(regs)) 3822 if (event->attr.exclude_user && user_mode(regs))
3714 return 0; 3823 return 0;
3715 3824
3716 if (counter->attr.exclude_kernel && !user_mode(regs)) 3825 if (event->attr.exclude_kernel && !user_mode(regs))
3717 return 0; 3826 return 0;
3718 } 3827 }
3719 3828
3720 return 1; 3829 return 1;
3721} 3830}
3722 3831
3723static void perf_swcounter_ctx_event(struct perf_counter_context *ctx, 3832static void perf_swevent_ctx_event(struct perf_event_context *ctx,
3724 enum perf_type_id type, 3833 enum perf_type_id type,
3725 u32 event, u64 nr, int nmi, 3834 u32 event_id, u64 nr, int nmi,
3726 struct perf_sample_data *data, 3835 struct perf_sample_data *data,
3727 struct pt_regs *regs) 3836 struct pt_regs *regs)
3728{ 3837{
3729 struct perf_counter *counter; 3838 struct perf_event *event;
3730 3839
3731 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list)) 3840 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
3732 return; 3841 return;
3733 3842
3734 rcu_read_lock(); 3843 rcu_read_lock();
3735 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) { 3844 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3736 if (perf_swcounter_match(counter, type, event, regs)) 3845 if (perf_swevent_match(event, type, event_id, regs))
3737 perf_swcounter_add(counter, nr, nmi, data, regs); 3846 perf_swevent_add(event, nr, nmi, data, regs);
3738 } 3847 }
3739 rcu_read_unlock(); 3848 rcu_read_unlock();
3740} 3849}
3741 3850
3742static int *perf_swcounter_recursion_context(struct perf_cpu_context *cpuctx) 3851static int *perf_swevent_recursion_context(struct perf_cpu_context *cpuctx)
3743{ 3852{
3744 if (in_nmi()) 3853 if (in_nmi())
3745 return &cpuctx->recursion[3]; 3854 return &cpuctx->recursion[3];
@@ -3753,14 +3862,14 @@ static int *perf_swcounter_recursion_context(struct perf_cpu_context *cpuctx)
3753 return &cpuctx->recursion[0]; 3862 return &cpuctx->recursion[0];
3754} 3863}
3755 3864
3756static void do_perf_swcounter_event(enum perf_type_id type, u32 event, 3865static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
3757 u64 nr, int nmi, 3866 u64 nr, int nmi,
3758 struct perf_sample_data *data, 3867 struct perf_sample_data *data,
3759 struct pt_regs *regs) 3868 struct pt_regs *regs)
3760{ 3869{
3761 struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context); 3870 struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context);
3762 int *recursion = perf_swcounter_recursion_context(cpuctx); 3871 int *recursion = perf_swevent_recursion_context(cpuctx);
3763 struct perf_counter_context *ctx; 3872 struct perf_event_context *ctx;
3764 3873
3765 if (*recursion) 3874 if (*recursion)
3766 goto out; 3875 goto out;
@@ -3768,16 +3877,16 @@ static void do_perf_swcounter_event(enum perf_type_id type, u32 event,
3768 (*recursion)++; 3877 (*recursion)++;
3769 barrier(); 3878 barrier();
3770 3879
3771 perf_swcounter_ctx_event(&cpuctx->ctx, type, event, 3880 perf_swevent_ctx_event(&cpuctx->ctx, type, event_id,
3772 nr, nmi, data, regs); 3881 nr, nmi, data, regs);
3773 rcu_read_lock(); 3882 rcu_read_lock();
3774 /* 3883 /*
3775 * doesn't really matter which of the child contexts the 3884 * doesn't really matter which of the child contexts the
3776 * events ends up in. 3885 * events ends up in.
3777 */ 3886 */
3778 ctx = rcu_dereference(current->perf_counter_ctxp); 3887 ctx = rcu_dereference(current->perf_event_ctxp);
3779 if (ctx) 3888 if (ctx)
3780 perf_swcounter_ctx_event(ctx, type, event, nr, nmi, data, regs); 3889 perf_swevent_ctx_event(ctx, type, event_id, nr, nmi, data, regs);
3781 rcu_read_unlock(); 3890 rcu_read_unlock();
3782 3891
3783 barrier(); 3892 barrier();
@@ -3787,57 +3896,57 @@ out:
3787 put_cpu_var(perf_cpu_context); 3896 put_cpu_var(perf_cpu_context);
3788} 3897}
3789 3898
3790void __perf_swcounter_event(u32 event, u64 nr, int nmi, 3899void __perf_sw_event(u32 event_id, u64 nr, int nmi,
3791 struct pt_regs *regs, u64 addr) 3900 struct pt_regs *regs, u64 addr)
3792{ 3901{
3793 struct perf_sample_data data = { 3902 struct perf_sample_data data = {
3794 .addr = addr, 3903 .addr = addr,
3795 }; 3904 };
3796 3905
3797 do_perf_swcounter_event(PERF_TYPE_SOFTWARE, event, nr, nmi, 3906 do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, nmi,
3798 &data, regs); 3907 &data, regs);
3799} 3908}
3800 3909
3801static void perf_swcounter_read(struct perf_counter *counter) 3910static void perf_swevent_read(struct perf_event *event)
3802{ 3911{
3803} 3912}
3804 3913
3805static int perf_swcounter_enable(struct perf_counter *counter) 3914static int perf_swevent_enable(struct perf_event *event)
3806{ 3915{
3807 struct hw_perf_counter *hwc = &counter->hw; 3916 struct hw_perf_event *hwc = &event->hw;
3808 3917
3809 if (hwc->sample_period) { 3918 if (hwc->sample_period) {
3810 hwc->last_period = hwc->sample_period; 3919 hwc->last_period = hwc->sample_period;
3811 perf_swcounter_set_period(counter); 3920 perf_swevent_set_period(event);
3812 } 3921 }
3813 return 0; 3922 return 0;
3814} 3923}
3815 3924
3816static void perf_swcounter_disable(struct perf_counter *counter) 3925static void perf_swevent_disable(struct perf_event *event)
3817{ 3926{
3818} 3927}
3819 3928
3820static const struct pmu perf_ops_generic = { 3929static const struct pmu perf_ops_generic = {
3821 .enable = perf_swcounter_enable, 3930 .enable = perf_swevent_enable,
3822 .disable = perf_swcounter_disable, 3931 .disable = perf_swevent_disable,
3823 .read = perf_swcounter_read, 3932 .read = perf_swevent_read,
3824 .unthrottle = perf_swcounter_unthrottle, 3933 .unthrottle = perf_swevent_unthrottle,
3825}; 3934};
3826 3935
3827/* 3936/*
3828 * hrtimer based swcounter callback 3937 * hrtimer based swevent callback
3829 */ 3938 */
3830 3939
3831static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer) 3940static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
3832{ 3941{
3833 enum hrtimer_restart ret = HRTIMER_RESTART; 3942 enum hrtimer_restart ret = HRTIMER_RESTART;
3834 struct perf_sample_data data; 3943 struct perf_sample_data data;
3835 struct pt_regs *regs; 3944 struct pt_regs *regs;
3836 struct perf_counter *counter; 3945 struct perf_event *event;
3837 u64 period; 3946 u64 period;
3838 3947
3839 counter = container_of(hrtimer, struct perf_counter, hw.hrtimer); 3948 event = container_of(hrtimer, struct perf_event, hw.hrtimer);
3840 counter->pmu->read(counter); 3949 event->pmu->read(event);
3841 3950
3842 data.addr = 0; 3951 data.addr = 0;
3843 regs = get_irq_regs(); 3952 regs = get_irq_regs();
@@ -3845,45 +3954,45 @@ static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
3845 * In case we exclude kernel IPs or are somehow not in interrupt 3954 * In case we exclude kernel IPs or are somehow not in interrupt
3846 * context, provide the next best thing, the user IP. 3955 * context, provide the next best thing, the user IP.
3847 */ 3956 */
3848 if ((counter->attr.exclude_kernel || !regs) && 3957 if ((event->attr.exclude_kernel || !regs) &&
3849 !counter->attr.exclude_user) 3958 !event->attr.exclude_user)
3850 regs = task_pt_regs(current); 3959 regs = task_pt_regs(current);
3851 3960
3852 if (regs) { 3961 if (regs) {
3853 if (perf_counter_overflow(counter, 0, &data, regs)) 3962 if (perf_event_overflow(event, 0, &data, regs))
3854 ret = HRTIMER_NORESTART; 3963 ret = HRTIMER_NORESTART;
3855 } 3964 }
3856 3965
3857 period = max_t(u64, 10000, counter->hw.sample_period); 3966 period = max_t(u64, 10000, event->hw.sample_period);
3858 hrtimer_forward_now(hrtimer, ns_to_ktime(period)); 3967 hrtimer_forward_now(hrtimer, ns_to_ktime(period));
3859 3968
3860 return ret; 3969 return ret;
3861} 3970}
3862 3971
3863/* 3972/*
3864 * Software counter: cpu wall time clock 3973 * Software event: cpu wall time clock
3865 */ 3974 */
3866 3975
3867static void cpu_clock_perf_counter_update(struct perf_counter *counter) 3976static void cpu_clock_perf_event_update(struct perf_event *event)
3868{ 3977{
3869 int cpu = raw_smp_processor_id(); 3978 int cpu = raw_smp_processor_id();
3870 s64 prev; 3979 s64 prev;
3871 u64 now; 3980 u64 now;
3872 3981
3873 now = cpu_clock(cpu); 3982 now = cpu_clock(cpu);
3874 prev = atomic64_read(&counter->hw.prev_count); 3983 prev = atomic64_read(&event->hw.prev_count);
3875 atomic64_set(&counter->hw.prev_count, now); 3984 atomic64_set(&event->hw.prev_count, now);
3876 atomic64_add(now - prev, &counter->count); 3985 atomic64_add(now - prev, &event->count);
3877} 3986}
3878 3987
3879static int cpu_clock_perf_counter_enable(struct perf_counter *counter) 3988static int cpu_clock_perf_event_enable(struct perf_event *event)
3880{ 3989{
3881 struct hw_perf_counter *hwc = &counter->hw; 3990 struct hw_perf_event *hwc = &event->hw;
3882 int cpu = raw_smp_processor_id(); 3991 int cpu = raw_smp_processor_id();
3883 3992
3884 atomic64_set(&hwc->prev_count, cpu_clock(cpu)); 3993 atomic64_set(&hwc->prev_count, cpu_clock(cpu));
3885 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 3994 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3886 hwc->hrtimer.function = perf_swcounter_hrtimer; 3995 hwc->hrtimer.function = perf_swevent_hrtimer;
3887 if (hwc->sample_period) { 3996 if (hwc->sample_period) {
3888 u64 period = max_t(u64, 10000, hwc->sample_period); 3997 u64 period = max_t(u64, 10000, hwc->sample_period);
3889 __hrtimer_start_range_ns(&hwc->hrtimer, 3998 __hrtimer_start_range_ns(&hwc->hrtimer,
@@ -3894,48 +4003,48 @@ static int cpu_clock_perf_counter_enable(struct perf_counter *counter)
3894 return 0; 4003 return 0;
3895} 4004}
3896 4005
3897static void cpu_clock_perf_counter_disable(struct perf_counter *counter) 4006static void cpu_clock_perf_event_disable(struct perf_event *event)
3898{ 4007{
3899 if (counter->hw.sample_period) 4008 if (event->hw.sample_period)
3900 hrtimer_cancel(&counter->hw.hrtimer); 4009 hrtimer_cancel(&event->hw.hrtimer);
3901 cpu_clock_perf_counter_update(counter); 4010 cpu_clock_perf_event_update(event);
3902} 4011}
3903 4012
3904static void cpu_clock_perf_counter_read(struct perf_counter *counter) 4013static void cpu_clock_perf_event_read(struct perf_event *event)
3905{ 4014{
3906 cpu_clock_perf_counter_update(counter); 4015 cpu_clock_perf_event_update(event);
3907} 4016}
3908 4017
3909static const struct pmu perf_ops_cpu_clock = { 4018static const struct pmu perf_ops_cpu_clock = {
3910 .enable = cpu_clock_perf_counter_enable, 4019 .enable = cpu_clock_perf_event_enable,
3911 .disable = cpu_clock_perf_counter_disable, 4020 .disable = cpu_clock_perf_event_disable,
3912 .read = cpu_clock_perf_counter_read, 4021 .read = cpu_clock_perf_event_read,
3913}; 4022};
3914 4023
3915/* 4024/*
3916 * Software counter: task time clock 4025 * Software event: task time clock
3917 */ 4026 */
3918 4027
3919static void task_clock_perf_counter_update(struct perf_counter *counter, u64 now) 4028static void task_clock_perf_event_update(struct perf_event *event, u64 now)
3920{ 4029{
3921 u64 prev; 4030 u64 prev;
3922 s64 delta; 4031 s64 delta;
3923 4032
3924 prev = atomic64_xchg(&counter->hw.prev_count, now); 4033 prev = atomic64_xchg(&event->hw.prev_count, now);
3925 delta = now - prev; 4034 delta = now - prev;
3926 atomic64_add(delta, &counter->count); 4035 atomic64_add(delta, &event->count);
3927} 4036}
3928 4037
3929static int task_clock_perf_counter_enable(struct perf_counter *counter) 4038static int task_clock_perf_event_enable(struct perf_event *event)
3930{ 4039{
3931 struct hw_perf_counter *hwc = &counter->hw; 4040 struct hw_perf_event *hwc = &event->hw;
3932 u64 now; 4041 u64 now;
3933 4042
3934 now = counter->ctx->time; 4043 now = event->ctx->time;
3935 4044
3936 atomic64_set(&hwc->prev_count, now); 4045 atomic64_set(&hwc->prev_count, now);
3937 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 4046 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3938 hwc->hrtimer.function = perf_swcounter_hrtimer; 4047 hwc->hrtimer.function = perf_swevent_hrtimer;
3939 if (hwc->sample_period) { 4048 if (hwc->sample_period) {
3940 u64 period = max_t(u64, 10000, hwc->sample_period); 4049 u64 period = max_t(u64, 10000, hwc->sample_period);
3941 __hrtimer_start_range_ns(&hwc->hrtimer, 4050 __hrtimer_start_range_ns(&hwc->hrtimer,
@@ -3946,38 +4055,38 @@ static int task_clock_perf_counter_enable(struct perf_counter *counter)
3946 return 0; 4055 return 0;
3947} 4056}
3948 4057
3949static void task_clock_perf_counter_disable(struct perf_counter *counter) 4058static void task_clock_perf_event_disable(struct perf_event *event)
3950{ 4059{
3951 if (counter->hw.sample_period) 4060 if (event->hw.sample_period)
3952 hrtimer_cancel(&counter->hw.hrtimer); 4061 hrtimer_cancel(&event->hw.hrtimer);
3953 task_clock_perf_counter_update(counter, counter->ctx->time); 4062 task_clock_perf_event_update(event, event->ctx->time);
3954 4063
3955} 4064}
3956 4065
3957static void task_clock_perf_counter_read(struct perf_counter *counter) 4066static void task_clock_perf_event_read(struct perf_event *event)
3958{ 4067{
3959 u64 time; 4068 u64 time;
3960 4069
3961 if (!in_nmi()) { 4070 if (!in_nmi()) {
3962 update_context_time(counter->ctx); 4071 update_context_time(event->ctx);
3963 time = counter->ctx->time; 4072 time = event->ctx->time;
3964 } else { 4073 } else {
3965 u64 now = perf_clock(); 4074 u64 now = perf_clock();
3966 u64 delta = now - counter->ctx->timestamp; 4075 u64 delta = now - event->ctx->timestamp;
3967 time = counter->ctx->time + delta; 4076 time = event->ctx->time + delta;
3968 } 4077 }
3969 4078
3970 task_clock_perf_counter_update(counter, time); 4079 task_clock_perf_event_update(event, time);
3971} 4080}
3972 4081
3973static const struct pmu perf_ops_task_clock = { 4082static const struct pmu perf_ops_task_clock = {
3974 .enable = task_clock_perf_counter_enable, 4083 .enable = task_clock_perf_event_enable,
3975 .disable = task_clock_perf_counter_disable, 4084 .disable = task_clock_perf_event_disable,
3976 .read = task_clock_perf_counter_read, 4085 .read = task_clock_perf_event_read,
3977}; 4086};
3978 4087
3979#ifdef CONFIG_EVENT_PROFILE 4088#ifdef CONFIG_EVENT_PROFILE
3980void perf_tpcounter_event(int event_id, u64 addr, u64 count, void *record, 4089void perf_tp_event(int event_id, u64 addr, u64 count, void *record,
3981 int entry_size) 4090 int entry_size)
3982{ 4091{
3983 struct perf_raw_record raw = { 4092 struct perf_raw_record raw = {
@@ -3995,78 +4104,78 @@ void perf_tpcounter_event(int event_id, u64 addr, u64 count, void *record,
3995 if (!regs) 4104 if (!regs)
3996 regs = task_pt_regs(current); 4105 regs = task_pt_regs(current);
3997 4106
3998 do_perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, count, 1, 4107 do_perf_sw_event(PERF_TYPE_TRACEPOINT, event_id, count, 1,
3999 &data, regs); 4108 &data, regs);
4000} 4109}
4001EXPORT_SYMBOL_GPL(perf_tpcounter_event); 4110EXPORT_SYMBOL_GPL(perf_tp_event);
4002 4111
4003extern int ftrace_profile_enable(int); 4112extern int ftrace_profile_enable(int);
4004extern void ftrace_profile_disable(int); 4113extern void ftrace_profile_disable(int);
4005 4114
4006static void tp_perf_counter_destroy(struct perf_counter *counter) 4115static void tp_perf_event_destroy(struct perf_event *event)
4007{ 4116{
4008 ftrace_profile_disable(counter->attr.config); 4117 ftrace_profile_disable(event->attr.config);
4009} 4118}
4010 4119
4011static const struct pmu *tp_perf_counter_init(struct perf_counter *counter) 4120static const struct pmu *tp_perf_event_init(struct perf_event *event)
4012{ 4121{
4013 /* 4122 /*
4014 * Raw tracepoint data is a severe data leak, only allow root to 4123 * Raw tracepoint data is a severe data leak, only allow root to
4015 * have these. 4124 * have these.
4016 */ 4125 */
4017 if ((counter->attr.sample_type & PERF_SAMPLE_RAW) && 4126 if ((event->attr.sample_type & PERF_SAMPLE_RAW) &&
4018 perf_paranoid_tracepoint_raw() && 4127 perf_paranoid_tracepoint_raw() &&
4019 !capable(CAP_SYS_ADMIN)) 4128 !capable(CAP_SYS_ADMIN))
4020 return ERR_PTR(-EPERM); 4129 return ERR_PTR(-EPERM);
4021 4130
4022 if (ftrace_profile_enable(counter->attr.config)) 4131 if (ftrace_profile_enable(event->attr.config))
4023 return NULL; 4132 return NULL;
4024 4133
4025 counter->destroy = tp_perf_counter_destroy; 4134 event->destroy = tp_perf_event_destroy;
4026 4135
4027 return &perf_ops_generic; 4136 return &perf_ops_generic;
4028} 4137}
4029#else 4138#else
4030static const struct pmu *tp_perf_counter_init(struct perf_counter *counter) 4139static const struct pmu *tp_perf_event_init(struct perf_event *event)
4031{ 4140{
4032 return NULL; 4141 return NULL;
4033} 4142}
4034#endif 4143#endif
4035 4144
4036atomic_t perf_swcounter_enabled[PERF_COUNT_SW_MAX]; 4145atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
4037 4146
4038static void sw_perf_counter_destroy(struct perf_counter *counter) 4147static void sw_perf_event_destroy(struct perf_event *event)
4039{ 4148{
4040 u64 event = counter->attr.config; 4149 u64 event_id = event->attr.config;
4041 4150
4042 WARN_ON(counter->parent); 4151 WARN_ON(event->parent);
4043 4152
4044 atomic_dec(&perf_swcounter_enabled[event]); 4153 atomic_dec(&perf_swevent_enabled[event_id]);
4045} 4154}
4046 4155
4047static const struct pmu *sw_perf_counter_init(struct perf_counter *counter) 4156static const struct pmu *sw_perf_event_init(struct perf_event *event)
4048{ 4157{
4049 const struct pmu *pmu = NULL; 4158 const struct pmu *pmu = NULL;
4050 u64 event = counter->attr.config; 4159 u64 event_id = event->attr.config;
4051 4160
4052 /* 4161 /*
4053 * Software counters (currently) can't in general distinguish 4162 * Software events (currently) can't in general distinguish
4054 * between user, kernel and hypervisor events. 4163 * between user, kernel and hypervisor events.
4055 * However, context switches and cpu migrations are considered 4164 * However, context switches and cpu migrations are considered
4056 * to be kernel events, and page faults are never hypervisor 4165 * to be kernel events, and page faults are never hypervisor
4057 * events. 4166 * events.
4058 */ 4167 */
4059 switch (event) { 4168 switch (event_id) {
4060 case PERF_COUNT_SW_CPU_CLOCK: 4169 case PERF_COUNT_SW_CPU_CLOCK:
4061 pmu = &perf_ops_cpu_clock; 4170 pmu = &perf_ops_cpu_clock;
4062 4171
4063 break; 4172 break;
4064 case PERF_COUNT_SW_TASK_CLOCK: 4173 case PERF_COUNT_SW_TASK_CLOCK:
4065 /* 4174 /*
4066 * If the user instantiates this as a per-cpu counter, 4175 * If the user instantiates this as a per-cpu event,
4067 * use the cpu_clock counter instead. 4176 * use the cpu_clock event instead.
4068 */ 4177 */
4069 if (counter->ctx->task) 4178 if (event->ctx->task)
4070 pmu = &perf_ops_task_clock; 4179 pmu = &perf_ops_task_clock;
4071 else 4180 else
4072 pmu = &perf_ops_cpu_clock; 4181 pmu = &perf_ops_cpu_clock;
@@ -4077,9 +4186,9 @@ static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
4077 case PERF_COUNT_SW_PAGE_FAULTS_MAJ: 4186 case PERF_COUNT_SW_PAGE_FAULTS_MAJ:
4078 case PERF_COUNT_SW_CONTEXT_SWITCHES: 4187 case PERF_COUNT_SW_CONTEXT_SWITCHES:
4079 case PERF_COUNT_SW_CPU_MIGRATIONS: 4188 case PERF_COUNT_SW_CPU_MIGRATIONS:
4080 if (!counter->parent) { 4189 if (!event->parent) {
4081 atomic_inc(&perf_swcounter_enabled[event]); 4190 atomic_inc(&perf_swevent_enabled[event_id]);
4082 counter->destroy = sw_perf_counter_destroy; 4191 event->destroy = sw_perf_event_destroy;
4083 } 4192 }
4084 pmu = &perf_ops_generic; 4193 pmu = &perf_ops_generic;
4085 break; 4194 break;
@@ -4089,62 +4198,62 @@ static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
4089} 4198}
4090 4199
4091/* 4200/*
4092 * Allocate and initialize a counter structure 4201 * Allocate and initialize a event structure
4093 */ 4202 */
4094static struct perf_counter * 4203static struct perf_event *
4095perf_counter_alloc(struct perf_counter_attr *attr, 4204perf_event_alloc(struct perf_event_attr *attr,
4096 int cpu, 4205 int cpu,
4097 struct perf_counter_context *ctx, 4206 struct perf_event_context *ctx,
4098 struct perf_counter *group_leader, 4207 struct perf_event *group_leader,
4099 struct perf_counter *parent_counter, 4208 struct perf_event *parent_event,
4100 gfp_t gfpflags) 4209 gfp_t gfpflags)
4101{ 4210{
4102 const struct pmu *pmu; 4211 const struct pmu *pmu;
4103 struct perf_counter *counter; 4212 struct perf_event *event;
4104 struct hw_perf_counter *hwc; 4213 struct hw_perf_event *hwc;
4105 long err; 4214 long err;
4106 4215
4107 counter = kzalloc(sizeof(*counter), gfpflags); 4216 event = kzalloc(sizeof(*event), gfpflags);
4108 if (!counter) 4217 if (!event)
4109 return ERR_PTR(-ENOMEM); 4218 return ERR_PTR(-ENOMEM);
4110 4219
4111 /* 4220 /*
4112 * Single counters are their own group leaders, with an 4221 * Single events are their own group leaders, with an
4113 * empty sibling list: 4222 * empty sibling list:
4114 */ 4223 */
4115 if (!group_leader) 4224 if (!group_leader)
4116 group_leader = counter; 4225 group_leader = event;
4117 4226
4118 mutex_init(&counter->child_mutex); 4227 mutex_init(&event->child_mutex);
4119 INIT_LIST_HEAD(&counter->child_list); 4228 INIT_LIST_HEAD(&event->child_list);
4120 4229
4121 INIT_LIST_HEAD(&counter->list_entry); 4230 INIT_LIST_HEAD(&event->group_entry);
4122 INIT_LIST_HEAD(&counter->event_entry); 4231 INIT_LIST_HEAD(&event->event_entry);
4123 INIT_LIST_HEAD(&counter->sibling_list); 4232 INIT_LIST_HEAD(&event->sibling_list);
4124 init_waitqueue_head(&counter->waitq); 4233 init_waitqueue_head(&event->waitq);
4125 4234
4126 mutex_init(&counter->mmap_mutex); 4235 mutex_init(&event->mmap_mutex);
4127 4236
4128 counter->cpu = cpu; 4237 event->cpu = cpu;
4129 counter->attr = *attr; 4238 event->attr = *attr;
4130 counter->group_leader = group_leader; 4239 event->group_leader = group_leader;
4131 counter->pmu = NULL; 4240 event->pmu = NULL;
4132 counter->ctx = ctx; 4241 event->ctx = ctx;
4133 counter->oncpu = -1; 4242 event->oncpu = -1;
4134 4243
4135 counter->parent = parent_counter; 4244 event->parent = parent_event;
4136 4245
4137 counter->ns = get_pid_ns(current->nsproxy->pid_ns); 4246 event->ns = get_pid_ns(current->nsproxy->pid_ns);
4138 counter->id = atomic64_inc_return(&perf_counter_id); 4247 event->id = atomic64_inc_return(&perf_event_id);
4139 4248
4140 counter->state = PERF_COUNTER_STATE_INACTIVE; 4249 event->state = PERF_EVENT_STATE_INACTIVE;
4141 4250
4142 if (attr->disabled) 4251 if (attr->disabled)
4143 counter->state = PERF_COUNTER_STATE_OFF; 4252 event->state = PERF_EVENT_STATE_OFF;
4144 4253
4145 pmu = NULL; 4254 pmu = NULL;
4146 4255
4147 hwc = &counter->hw; 4256 hwc = &event->hw;
4148 hwc->sample_period = attr->sample_period; 4257 hwc->sample_period = attr->sample_period;
4149 if (attr->freq && attr->sample_freq) 4258 if (attr->freq && attr->sample_freq)
4150 hwc->sample_period = 1; 4259 hwc->sample_period = 1;
@@ -4153,7 +4262,7 @@ perf_counter_alloc(struct perf_counter_attr *attr,
4153 atomic64_set(&hwc->period_left, hwc->sample_period); 4262 atomic64_set(&hwc->period_left, hwc->sample_period);
4154 4263
4155 /* 4264 /*
4156 * we currently do not support PERF_FORMAT_GROUP on inherited counters 4265 * we currently do not support PERF_FORMAT_GROUP on inherited events
4157 */ 4266 */
4158 if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP)) 4267 if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
4159 goto done; 4268 goto done;
@@ -4162,15 +4271,15 @@ perf_counter_alloc(struct perf_counter_attr *attr,
4162 case PERF_TYPE_RAW: 4271 case PERF_TYPE_RAW:
4163 case PERF_TYPE_HARDWARE: 4272 case PERF_TYPE_HARDWARE:
4164 case PERF_TYPE_HW_CACHE: 4273 case PERF_TYPE_HW_CACHE:
4165 pmu = hw_perf_counter_init(counter); 4274 pmu = hw_perf_event_init(event);
4166 break; 4275 break;
4167 4276
4168 case PERF_TYPE_SOFTWARE: 4277 case PERF_TYPE_SOFTWARE:
4169 pmu = sw_perf_counter_init(counter); 4278 pmu = sw_perf_event_init(event);
4170 break; 4279 break;
4171 4280
4172 case PERF_TYPE_TRACEPOINT: 4281 case PERF_TYPE_TRACEPOINT:
4173 pmu = tp_perf_counter_init(counter); 4282 pmu = tp_perf_event_init(event);
4174 break; 4283 break;
4175 4284
4176 default: 4285 default:
@@ -4184,29 +4293,29 @@ done:
4184 err = PTR_ERR(pmu); 4293 err = PTR_ERR(pmu);
4185 4294
4186 if (err) { 4295 if (err) {
4187 if (counter->ns) 4296 if (event->ns)
4188 put_pid_ns(counter->ns); 4297 put_pid_ns(event->ns);
4189 kfree(counter); 4298 kfree(event);
4190 return ERR_PTR(err); 4299 return ERR_PTR(err);
4191 } 4300 }
4192 4301
4193 counter->pmu = pmu; 4302 event->pmu = pmu;
4194 4303
4195 if (!counter->parent) { 4304 if (!event->parent) {
4196 atomic_inc(&nr_counters); 4305 atomic_inc(&nr_events);
4197 if (counter->attr.mmap) 4306 if (event->attr.mmap)
4198 atomic_inc(&nr_mmap_counters); 4307 atomic_inc(&nr_mmap_events);
4199 if (counter->attr.comm) 4308 if (event->attr.comm)
4200 atomic_inc(&nr_comm_counters); 4309 atomic_inc(&nr_comm_events);
4201 if (counter->attr.task) 4310 if (event->attr.task)
4202 atomic_inc(&nr_task_counters); 4311 atomic_inc(&nr_task_events);
4203 } 4312 }
4204 4313
4205 return counter; 4314 return event;
4206} 4315}
4207 4316
4208static int perf_copy_attr(struct perf_counter_attr __user *uattr, 4317static int perf_copy_attr(struct perf_event_attr __user *uattr,
4209 struct perf_counter_attr *attr) 4318 struct perf_event_attr *attr)
4210{ 4319{
4211 u32 size; 4320 u32 size;
4212 int ret; 4321 int ret;
@@ -4285,11 +4394,11 @@ err_size:
4285 goto out; 4394 goto out;
4286} 4395}
4287 4396
4288int perf_counter_set_output(struct perf_counter *counter, int output_fd) 4397int perf_event_set_output(struct perf_event *event, int output_fd)
4289{ 4398{
4290 struct perf_counter *output_counter = NULL; 4399 struct perf_event *output_event = NULL;
4291 struct file *output_file = NULL; 4400 struct file *output_file = NULL;
4292 struct perf_counter *old_output; 4401 struct perf_event *old_output;
4293 int fput_needed = 0; 4402 int fput_needed = 0;
4294 int ret = -EINVAL; 4403 int ret = -EINVAL;
4295 4404
@@ -4303,28 +4412,28 @@ int perf_counter_set_output(struct perf_counter *counter, int output_fd)
4303 if (output_file->f_op != &perf_fops) 4412 if (output_file->f_op != &perf_fops)
4304 goto out; 4413 goto out;
4305 4414
4306 output_counter = output_file->private_data; 4415 output_event = output_file->private_data;
4307 4416
4308 /* Don't chain output fds */ 4417 /* Don't chain output fds */
4309 if (output_counter->output) 4418 if (output_event->output)
4310 goto out; 4419 goto out;
4311 4420
4312 /* Don't set an output fd when we already have an output channel */ 4421 /* Don't set an output fd when we already have an output channel */
4313 if (counter->data) 4422 if (event->data)
4314 goto out; 4423 goto out;
4315 4424
4316 atomic_long_inc(&output_file->f_count); 4425 atomic_long_inc(&output_file->f_count);
4317 4426
4318set: 4427set:
4319 mutex_lock(&counter->mmap_mutex); 4428 mutex_lock(&event->mmap_mutex);
4320 old_output = counter->output; 4429 old_output = event->output;
4321 rcu_assign_pointer(counter->output, output_counter); 4430 rcu_assign_pointer(event->output, output_event);
4322 mutex_unlock(&counter->mmap_mutex); 4431 mutex_unlock(&event->mmap_mutex);
4323 4432
4324 if (old_output) { 4433 if (old_output) {
4325 /* 4434 /*
4326 * we need to make sure no existing perf_output_*() 4435 * we need to make sure no existing perf_output_*()
4327 * is still referencing this counter. 4436 * is still referencing this event.
4328 */ 4437 */
4329 synchronize_rcu(); 4438 synchronize_rcu();
4330 fput(old_output->filp); 4439 fput(old_output->filp);
@@ -4337,21 +4446,21 @@ out:
4337} 4446}
4338 4447
4339/** 4448/**
4340 * sys_perf_counter_open - open a performance counter, associate it to a task/cpu 4449 * sys_perf_event_open - open a performance event, associate it to a task/cpu
4341 * 4450 *
4342 * @attr_uptr: event type attributes for monitoring/sampling 4451 * @attr_uptr: event_id type attributes for monitoring/sampling
4343 * @pid: target pid 4452 * @pid: target pid
4344 * @cpu: target cpu 4453 * @cpu: target cpu
4345 * @group_fd: group leader counter fd 4454 * @group_fd: group leader event fd
4346 */ 4455 */
4347SYSCALL_DEFINE5(perf_counter_open, 4456SYSCALL_DEFINE5(perf_event_open,
4348 struct perf_counter_attr __user *, attr_uptr, 4457 struct perf_event_attr __user *, attr_uptr,
4349 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags) 4458 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
4350{ 4459{
4351 struct perf_counter *counter, *group_leader; 4460 struct perf_event *event, *group_leader;
4352 struct perf_counter_attr attr; 4461 struct perf_event_attr attr;
4353 struct perf_counter_context *ctx; 4462 struct perf_event_context *ctx;
4354 struct file *counter_file = NULL; 4463 struct file *event_file = NULL;
4355 struct file *group_file = NULL; 4464 struct file *group_file = NULL;
4356 int fput_needed = 0; 4465 int fput_needed = 0;
4357 int fput_needed2 = 0; 4466 int fput_needed2 = 0;
@@ -4371,7 +4480,7 @@ SYSCALL_DEFINE5(perf_counter_open,
4371 } 4480 }
4372 4481
4373 if (attr.freq) { 4482 if (attr.freq) {
4374 if (attr.sample_freq > sysctl_perf_counter_sample_rate) 4483 if (attr.sample_freq > sysctl_perf_event_sample_rate)
4375 return -EINVAL; 4484 return -EINVAL;
4376 } 4485 }
4377 4486
@@ -4383,7 +4492,7 @@ SYSCALL_DEFINE5(perf_counter_open,
4383 return PTR_ERR(ctx); 4492 return PTR_ERR(ctx);
4384 4493
4385 /* 4494 /*
4386 * Look up the group leader (we will attach this counter to it): 4495 * Look up the group leader (we will attach this event to it):
4387 */ 4496 */
4388 group_leader = NULL; 4497 group_leader = NULL;
4389 if (group_fd != -1 && !(flags & PERF_FLAG_FD_NO_GROUP)) { 4498 if (group_fd != -1 && !(flags & PERF_FLAG_FD_NO_GROUP)) {
@@ -4414,45 +4523,45 @@ SYSCALL_DEFINE5(perf_counter_open,
4414 goto err_put_context; 4523 goto err_put_context;
4415 } 4524 }
4416 4525
4417 counter = perf_counter_alloc(&attr, cpu, ctx, group_leader, 4526 event = perf_event_alloc(&attr, cpu, ctx, group_leader,
4418 NULL, GFP_KERNEL); 4527 NULL, GFP_KERNEL);
4419 err = PTR_ERR(counter); 4528 err = PTR_ERR(event);
4420 if (IS_ERR(counter)) 4529 if (IS_ERR(event))
4421 goto err_put_context; 4530 goto err_put_context;
4422 4531
4423 err = anon_inode_getfd("[perf_counter]", &perf_fops, counter, 0); 4532 err = anon_inode_getfd("[perf_event]", &perf_fops, event, 0);
4424 if (err < 0) 4533 if (err < 0)
4425 goto err_free_put_context; 4534 goto err_free_put_context;
4426 4535
4427 counter_file = fget_light(err, &fput_needed2); 4536 event_file = fget_light(err, &fput_needed2);
4428 if (!counter_file) 4537 if (!event_file)
4429 goto err_free_put_context; 4538 goto err_free_put_context;
4430 4539
4431 if (flags & PERF_FLAG_FD_OUTPUT) { 4540 if (flags & PERF_FLAG_FD_OUTPUT) {
4432 err = perf_counter_set_output(counter, group_fd); 4541 err = perf_event_set_output(event, group_fd);
4433 if (err) 4542 if (err)
4434 goto err_fput_free_put_context; 4543 goto err_fput_free_put_context;
4435 } 4544 }
4436 4545
4437 counter->filp = counter_file; 4546 event->filp = event_file;
4438 WARN_ON_ONCE(ctx->parent_ctx); 4547 WARN_ON_ONCE(ctx->parent_ctx);
4439 mutex_lock(&ctx->mutex); 4548 mutex_lock(&ctx->mutex);
4440 perf_install_in_context(ctx, counter, cpu); 4549 perf_install_in_context(ctx, event, cpu);
4441 ++ctx->generation; 4550 ++ctx->generation;
4442 mutex_unlock(&ctx->mutex); 4551 mutex_unlock(&ctx->mutex);
4443 4552
4444 counter->owner = current; 4553 event->owner = current;
4445 get_task_struct(current); 4554 get_task_struct(current);
4446 mutex_lock(&current->perf_counter_mutex); 4555 mutex_lock(&current->perf_event_mutex);
4447 list_add_tail(&counter->owner_entry, &current->perf_counter_list); 4556 list_add_tail(&event->owner_entry, &current->perf_event_list);
4448 mutex_unlock(&current->perf_counter_mutex); 4557 mutex_unlock(&current->perf_event_mutex);
4449 4558
4450err_fput_free_put_context: 4559err_fput_free_put_context:
4451 fput_light(counter_file, fput_needed2); 4560 fput_light(event_file, fput_needed2);
4452 4561
4453err_free_put_context: 4562err_free_put_context:
4454 if (err < 0) 4563 if (err < 0)
4455 kfree(counter); 4564 kfree(event);
4456 4565
4457err_put_context: 4566err_put_context:
4458 if (err < 0) 4567 if (err < 0)
@@ -4464,88 +4573,88 @@ err_put_context:
4464} 4573}
4465 4574
4466/* 4575/*
4467 * inherit a counter from parent task to child task: 4576 * inherit a event from parent task to child task:
4468 */ 4577 */
4469static struct perf_counter * 4578static struct perf_event *
4470inherit_counter(struct perf_counter *parent_counter, 4579inherit_event(struct perf_event *parent_event,
4471 struct task_struct *parent, 4580 struct task_struct *parent,
4472 struct perf_counter_context *parent_ctx, 4581 struct perf_event_context *parent_ctx,
4473 struct task_struct *child, 4582 struct task_struct *child,
4474 struct perf_counter *group_leader, 4583 struct perf_event *group_leader,
4475 struct perf_counter_context *child_ctx) 4584 struct perf_event_context *child_ctx)
4476{ 4585{
4477 struct perf_counter *child_counter; 4586 struct perf_event *child_event;
4478 4587
4479 /* 4588 /*
4480 * Instead of creating recursive hierarchies of counters, 4589 * Instead of creating recursive hierarchies of events,
4481 * we link inherited counters back to the original parent, 4590 * we link inherited events back to the original parent,
4482 * which has a filp for sure, which we use as the reference 4591 * which has a filp for sure, which we use as the reference
4483 * count: 4592 * count:
4484 */ 4593 */
4485 if (parent_counter->parent) 4594 if (parent_event->parent)
4486 parent_counter = parent_counter->parent; 4595 parent_event = parent_event->parent;
4487 4596
4488 child_counter = perf_counter_alloc(&parent_counter->attr, 4597 child_event = perf_event_alloc(&parent_event->attr,
4489 parent_counter->cpu, child_ctx, 4598 parent_event->cpu, child_ctx,
4490 group_leader, parent_counter, 4599 group_leader, parent_event,
4491 GFP_KERNEL); 4600 GFP_KERNEL);
4492 if (IS_ERR(child_counter)) 4601 if (IS_ERR(child_event))
4493 return child_counter; 4602 return child_event;
4494 get_ctx(child_ctx); 4603 get_ctx(child_ctx);
4495 4604
4496 /* 4605 /*
4497 * Make the child state follow the state of the parent counter, 4606 * Make the child state follow the state of the parent event,
4498 * not its attr.disabled bit. We hold the parent's mutex, 4607 * not its attr.disabled bit. We hold the parent's mutex,
4499 * so we won't race with perf_counter_{en, dis}able_family. 4608 * so we won't race with perf_event_{en, dis}able_family.
4500 */ 4609 */
4501 if (parent_counter->state >= PERF_COUNTER_STATE_INACTIVE) 4610 if (parent_event->state >= PERF_EVENT_STATE_INACTIVE)
4502 child_counter->state = PERF_COUNTER_STATE_INACTIVE; 4611 child_event->state = PERF_EVENT_STATE_INACTIVE;
4503 else 4612 else
4504 child_counter->state = PERF_COUNTER_STATE_OFF; 4613 child_event->state = PERF_EVENT_STATE_OFF;
4505 4614
4506 if (parent_counter->attr.freq) 4615 if (parent_event->attr.freq)
4507 child_counter->hw.sample_period = parent_counter->hw.sample_period; 4616 child_event->hw.sample_period = parent_event->hw.sample_period;
4508 4617
4509 /* 4618 /*
4510 * Link it up in the child's context: 4619 * Link it up in the child's context:
4511 */ 4620 */
4512 add_counter_to_ctx(child_counter, child_ctx); 4621 add_event_to_ctx(child_event, child_ctx);
4513 4622
4514 /* 4623 /*
4515 * Get a reference to the parent filp - we will fput it 4624 * Get a reference to the parent filp - we will fput it
4516 * when the child counter exits. This is safe to do because 4625 * when the child event exits. This is safe to do because
4517 * we are in the parent and we know that the filp still 4626 * we are in the parent and we know that the filp still
4518 * exists and has a nonzero count: 4627 * exists and has a nonzero count:
4519 */ 4628 */
4520 atomic_long_inc(&parent_counter->filp->f_count); 4629 atomic_long_inc(&parent_event->filp->f_count);
4521 4630
4522 /* 4631 /*
4523 * Link this into the parent counter's child list 4632 * Link this into the parent event's child list
4524 */ 4633 */
4525 WARN_ON_ONCE(parent_counter->ctx->parent_ctx); 4634 WARN_ON_ONCE(parent_event->ctx->parent_ctx);
4526 mutex_lock(&parent_counter->child_mutex); 4635 mutex_lock(&parent_event->child_mutex);
4527 list_add_tail(&child_counter->child_list, &parent_counter->child_list); 4636 list_add_tail(&child_event->child_list, &parent_event->child_list);
4528 mutex_unlock(&parent_counter->child_mutex); 4637 mutex_unlock(&parent_event->child_mutex);
4529 4638
4530 return child_counter; 4639 return child_event;
4531} 4640}
4532 4641
4533static int inherit_group(struct perf_counter *parent_counter, 4642static int inherit_group(struct perf_event *parent_event,
4534 struct task_struct *parent, 4643 struct task_struct *parent,
4535 struct perf_counter_context *parent_ctx, 4644 struct perf_event_context *parent_ctx,
4536 struct task_struct *child, 4645 struct task_struct *child,
4537 struct perf_counter_context *child_ctx) 4646 struct perf_event_context *child_ctx)
4538{ 4647{
4539 struct perf_counter *leader; 4648 struct perf_event *leader;
4540 struct perf_counter *sub; 4649 struct perf_event *sub;
4541 struct perf_counter *child_ctr; 4650 struct perf_event *child_ctr;
4542 4651
4543 leader = inherit_counter(parent_counter, parent, parent_ctx, 4652 leader = inherit_event(parent_event, parent, parent_ctx,
4544 child, NULL, child_ctx); 4653 child, NULL, child_ctx);
4545 if (IS_ERR(leader)) 4654 if (IS_ERR(leader))
4546 return PTR_ERR(leader); 4655 return PTR_ERR(leader);
4547 list_for_each_entry(sub, &parent_counter->sibling_list, list_entry) { 4656 list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
4548 child_ctr = inherit_counter(sub, parent, parent_ctx, 4657 child_ctr = inherit_event(sub, parent, parent_ctx,
4549 child, leader, child_ctx); 4658 child, leader, child_ctx);
4550 if (IS_ERR(child_ctr)) 4659 if (IS_ERR(child_ctr))
4551 return PTR_ERR(child_ctr); 4660 return PTR_ERR(child_ctr);
@@ -4553,74 +4662,74 @@ static int inherit_group(struct perf_counter *parent_counter,
4553 return 0; 4662 return 0;
4554} 4663}
4555 4664
4556static void sync_child_counter(struct perf_counter *child_counter, 4665static void sync_child_event(struct perf_event *child_event,
4557 struct task_struct *child) 4666 struct task_struct *child)
4558{ 4667{
4559 struct perf_counter *parent_counter = child_counter->parent; 4668 struct perf_event *parent_event = child_event->parent;
4560 u64 child_val; 4669 u64 child_val;
4561 4670
4562 if (child_counter->attr.inherit_stat) 4671 if (child_event->attr.inherit_stat)
4563 perf_counter_read_event(child_counter, child); 4672 perf_event_read_event(child_event, child);
4564 4673
4565 child_val = atomic64_read(&child_counter->count); 4674 child_val = atomic64_read(&child_event->count);
4566 4675
4567 /* 4676 /*
4568 * Add back the child's count to the parent's count: 4677 * Add back the child's count to the parent's count:
4569 */ 4678 */
4570 atomic64_add(child_val, &parent_counter->count); 4679 atomic64_add(child_val, &parent_event->count);
4571 atomic64_add(child_counter->total_time_enabled, 4680 atomic64_add(child_event->total_time_enabled,
4572 &parent_counter->child_total_time_enabled); 4681 &parent_event->child_total_time_enabled);
4573 atomic64_add(child_counter->total_time_running, 4682 atomic64_add(child_event->total_time_running,
4574 &parent_counter->child_total_time_running); 4683 &parent_event->child_total_time_running);
4575 4684
4576 /* 4685 /*
4577 * Remove this counter from the parent's list 4686 * Remove this event from the parent's list
4578 */ 4687 */
4579 WARN_ON_ONCE(parent_counter->ctx->parent_ctx); 4688 WARN_ON_ONCE(parent_event->ctx->parent_ctx);
4580 mutex_lock(&parent_counter->child_mutex); 4689 mutex_lock(&parent_event->child_mutex);
4581 list_del_init(&child_counter->child_list); 4690 list_del_init(&child_event->child_list);
4582 mutex_unlock(&parent_counter->child_mutex); 4691 mutex_unlock(&parent_event->child_mutex);
4583 4692
4584 /* 4693 /*
4585 * Release the parent counter, if this was the last 4694 * Release the parent event, if this was the last
4586 * reference to it. 4695 * reference to it.
4587 */ 4696 */
4588 fput(parent_counter->filp); 4697 fput(parent_event->filp);
4589} 4698}
4590 4699
4591static void 4700static void
4592__perf_counter_exit_task(struct perf_counter *child_counter, 4701__perf_event_exit_task(struct perf_event *child_event,
4593 struct perf_counter_context *child_ctx, 4702 struct perf_event_context *child_ctx,
4594 struct task_struct *child) 4703 struct task_struct *child)
4595{ 4704{
4596 struct perf_counter *parent_counter; 4705 struct perf_event *parent_event;
4597 4706
4598 update_counter_times(child_counter); 4707 update_event_times(child_event);
4599 perf_counter_remove_from_context(child_counter); 4708 perf_event_remove_from_context(child_event);
4600 4709
4601 parent_counter = child_counter->parent; 4710 parent_event = child_event->parent;
4602 /* 4711 /*
4603 * It can happen that parent exits first, and has counters 4712 * It can happen that parent exits first, and has events
4604 * that are still around due to the child reference. These 4713 * that are still around due to the child reference. These
4605 * counters need to be zapped - but otherwise linger. 4714 * events need to be zapped - but otherwise linger.
4606 */ 4715 */
4607 if (parent_counter) { 4716 if (parent_event) {
4608 sync_child_counter(child_counter, child); 4717 sync_child_event(child_event, child);
4609 free_counter(child_counter); 4718 free_event(child_event);
4610 } 4719 }
4611} 4720}
4612 4721
4613/* 4722/*
4614 * When a child task exits, feed back counter values to parent counters. 4723 * When a child task exits, feed back event values to parent events.
4615 */ 4724 */
4616void perf_counter_exit_task(struct task_struct *child) 4725void perf_event_exit_task(struct task_struct *child)
4617{ 4726{
4618 struct perf_counter *child_counter, *tmp; 4727 struct perf_event *child_event, *tmp;
4619 struct perf_counter_context *child_ctx; 4728 struct perf_event_context *child_ctx;
4620 unsigned long flags; 4729 unsigned long flags;
4621 4730
4622 if (likely(!child->perf_counter_ctxp)) { 4731 if (likely(!child->perf_event_ctxp)) {
4623 perf_counter_task(child, NULL, 0); 4732 perf_event_task(child, NULL, 0);
4624 return; 4733 return;
4625 } 4734 }
4626 4735
@@ -4631,37 +4740,37 @@ void perf_counter_exit_task(struct task_struct *child)
4631 * scheduled, so we are now safe from rescheduling changing 4740 * scheduled, so we are now safe from rescheduling changing
4632 * our context. 4741 * our context.
4633 */ 4742 */
4634 child_ctx = child->perf_counter_ctxp; 4743 child_ctx = child->perf_event_ctxp;
4635 __perf_counter_task_sched_out(child_ctx); 4744 __perf_event_task_sched_out(child_ctx);
4636 4745
4637 /* 4746 /*
4638 * Take the context lock here so that if find_get_context is 4747 * Take the context lock here so that if find_get_context is
4639 * reading child->perf_counter_ctxp, we wait until it has 4748 * reading child->perf_event_ctxp, we wait until it has
4640 * incremented the context's refcount before we do put_ctx below. 4749 * incremented the context's refcount before we do put_ctx below.
4641 */ 4750 */
4642 spin_lock(&child_ctx->lock); 4751 spin_lock(&child_ctx->lock);
4643 child->perf_counter_ctxp = NULL; 4752 child->perf_event_ctxp = NULL;
4644 /* 4753 /*
4645 * If this context is a clone; unclone it so it can't get 4754 * If this context is a clone; unclone it so it can't get
4646 * swapped to another process while we're removing all 4755 * swapped to another process while we're removing all
4647 * the counters from it. 4756 * the events from it.
4648 */ 4757 */
4649 unclone_ctx(child_ctx); 4758 unclone_ctx(child_ctx);
4650 spin_unlock_irqrestore(&child_ctx->lock, flags); 4759 spin_unlock_irqrestore(&child_ctx->lock, flags);
4651 4760
4652 /* 4761 /*
4653 * Report the task dead after unscheduling the counters so that we 4762 * Report the task dead after unscheduling the events so that we
4654 * won't get any samples after PERF_EVENT_EXIT. We can however still 4763 * won't get any samples after PERF_RECORD_EXIT. We can however still
4655 * get a few PERF_EVENT_READ events. 4764 * get a few PERF_RECORD_READ events.
4656 */ 4765 */
4657 perf_counter_task(child, child_ctx, 0); 4766 perf_event_task(child, child_ctx, 0);
4658 4767
4659 /* 4768 /*
4660 * We can recurse on the same lock type through: 4769 * We can recurse on the same lock type through:
4661 * 4770 *
4662 * __perf_counter_exit_task() 4771 * __perf_event_exit_task()
4663 * sync_child_counter() 4772 * sync_child_event()
4664 * fput(parent_counter->filp) 4773 * fput(parent_event->filp)
4665 * perf_release() 4774 * perf_release()
4666 * mutex_lock(&ctx->mutex) 4775 * mutex_lock(&ctx->mutex)
4667 * 4776 *
@@ -4670,16 +4779,16 @@ void perf_counter_exit_task(struct task_struct *child)
4670 mutex_lock_nested(&child_ctx->mutex, SINGLE_DEPTH_NESTING); 4779 mutex_lock_nested(&child_ctx->mutex, SINGLE_DEPTH_NESTING);
4671 4780
4672again: 4781again:
4673 list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list, 4782 list_for_each_entry_safe(child_event, tmp, &child_ctx->group_list,
4674 list_entry) 4783 group_entry)
4675 __perf_counter_exit_task(child_counter, child_ctx, child); 4784 __perf_event_exit_task(child_event, child_ctx, child);
4676 4785
4677 /* 4786 /*
4678 * If the last counter was a group counter, it will have appended all 4787 * If the last event was a group event, it will have appended all
4679 * its siblings to the list, but we obtained 'tmp' before that which 4788 * its siblings to the list, but we obtained 'tmp' before that which
4680 * will still point to the list head terminating the iteration. 4789 * will still point to the list head terminating the iteration.
4681 */ 4790 */
4682 if (!list_empty(&child_ctx->counter_list)) 4791 if (!list_empty(&child_ctx->group_list))
4683 goto again; 4792 goto again;
4684 4793
4685 mutex_unlock(&child_ctx->mutex); 4794 mutex_unlock(&child_ctx->mutex);
@@ -4691,33 +4800,33 @@ again:
4691 * free an unexposed, unused context as created by inheritance by 4800 * free an unexposed, unused context as created by inheritance by
4692 * init_task below, used by fork() in case of fail. 4801 * init_task below, used by fork() in case of fail.
4693 */ 4802 */
4694void perf_counter_free_task(struct task_struct *task) 4803void perf_event_free_task(struct task_struct *task)
4695{ 4804{
4696 struct perf_counter_context *ctx = task->perf_counter_ctxp; 4805 struct perf_event_context *ctx = task->perf_event_ctxp;
4697 struct perf_counter *counter, *tmp; 4806 struct perf_event *event, *tmp;
4698 4807
4699 if (!ctx) 4808 if (!ctx)
4700 return; 4809 return;
4701 4810
4702 mutex_lock(&ctx->mutex); 4811 mutex_lock(&ctx->mutex);
4703again: 4812again:
4704 list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry) { 4813 list_for_each_entry_safe(event, tmp, &ctx->group_list, group_entry) {
4705 struct perf_counter *parent = counter->parent; 4814 struct perf_event *parent = event->parent;
4706 4815
4707 if (WARN_ON_ONCE(!parent)) 4816 if (WARN_ON_ONCE(!parent))
4708 continue; 4817 continue;
4709 4818
4710 mutex_lock(&parent->child_mutex); 4819 mutex_lock(&parent->child_mutex);
4711 list_del_init(&counter->child_list); 4820 list_del_init(&event->child_list);
4712 mutex_unlock(&parent->child_mutex); 4821 mutex_unlock(&parent->child_mutex);
4713 4822
4714 fput(parent->filp); 4823 fput(parent->filp);
4715 4824
4716 list_del_counter(counter, ctx); 4825 list_del_event(event, ctx);
4717 free_counter(counter); 4826 free_event(event);
4718 } 4827 }
4719 4828
4720 if (!list_empty(&ctx->counter_list)) 4829 if (!list_empty(&ctx->group_list))
4721 goto again; 4830 goto again;
4722 4831
4723 mutex_unlock(&ctx->mutex); 4832 mutex_unlock(&ctx->mutex);
@@ -4726,37 +4835,37 @@ again:
4726} 4835}
4727 4836
4728/* 4837/*
4729 * Initialize the perf_counter context in task_struct 4838 * Initialize the perf_event context in task_struct
4730 */ 4839 */
4731int perf_counter_init_task(struct task_struct *child) 4840int perf_event_init_task(struct task_struct *child)
4732{ 4841{
4733 struct perf_counter_context *child_ctx, *parent_ctx; 4842 struct perf_event_context *child_ctx, *parent_ctx;
4734 struct perf_counter_context *cloned_ctx; 4843 struct perf_event_context *cloned_ctx;
4735 struct perf_counter *counter; 4844 struct perf_event *event;
4736 struct task_struct *parent = current; 4845 struct task_struct *parent = current;
4737 int inherited_all = 1; 4846 int inherited_all = 1;
4738 int ret = 0; 4847 int ret = 0;
4739 4848
4740 child->perf_counter_ctxp = NULL; 4849 child->perf_event_ctxp = NULL;
4741 4850
4742 mutex_init(&child->perf_counter_mutex); 4851 mutex_init(&child->perf_event_mutex);
4743 INIT_LIST_HEAD(&child->perf_counter_list); 4852 INIT_LIST_HEAD(&child->perf_event_list);
4744 4853
4745 if (likely(!parent->perf_counter_ctxp)) 4854 if (likely(!parent->perf_event_ctxp))
4746 return 0; 4855 return 0;
4747 4856
4748 /* 4857 /*
4749 * This is executed from the parent task context, so inherit 4858 * This is executed from the parent task context, so inherit
4750 * counters that have been marked for cloning. 4859 * events that have been marked for cloning.
4751 * First allocate and initialize a context for the child. 4860 * First allocate and initialize a context for the child.
4752 */ 4861 */
4753 4862
4754 child_ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL); 4863 child_ctx = kmalloc(sizeof(struct perf_event_context), GFP_KERNEL);
4755 if (!child_ctx) 4864 if (!child_ctx)
4756 return -ENOMEM; 4865 return -ENOMEM;
4757 4866
4758 __perf_counter_init_context(child_ctx, child); 4867 __perf_event_init_context(child_ctx, child);
4759 child->perf_counter_ctxp = child_ctx; 4868 child->perf_event_ctxp = child_ctx;
4760 get_task_struct(child); 4869 get_task_struct(child);
4761 4870
4762 /* 4871 /*
@@ -4782,16 +4891,14 @@ int perf_counter_init_task(struct task_struct *child)
4782 * We dont have to disable NMIs - we are only looking at 4891 * We dont have to disable NMIs - we are only looking at
4783 * the list, not manipulating it: 4892 * the list, not manipulating it:
4784 */ 4893 */
4785 list_for_each_entry_rcu(counter, &parent_ctx->event_list, event_entry) { 4894 list_for_each_entry(event, &parent_ctx->group_list, group_entry) {
4786 if (counter != counter->group_leader)
4787 continue;
4788 4895
4789 if (!counter->attr.inherit) { 4896 if (!event->attr.inherit) {
4790 inherited_all = 0; 4897 inherited_all = 0;
4791 continue; 4898 continue;
4792 } 4899 }
4793 4900
4794 ret = inherit_group(counter, parent, parent_ctx, 4901 ret = inherit_group(event, parent, parent_ctx,
4795 child, child_ctx); 4902 child, child_ctx);
4796 if (ret) { 4903 if (ret) {
4797 inherited_all = 0; 4904 inherited_all = 0;
@@ -4805,7 +4912,7 @@ int perf_counter_init_task(struct task_struct *child)
4805 * context, or of whatever the parent is a clone of. 4912 * context, or of whatever the parent is a clone of.
4806 * Note that if the parent is a clone, it could get 4913 * Note that if the parent is a clone, it could get
4807 * uncloned at any point, but that doesn't matter 4914 * uncloned at any point, but that doesn't matter
4808 * because the list of counters and the generation 4915 * because the list of events and the generation
4809 * count can't have changed since we took the mutex. 4916 * count can't have changed since we took the mutex.
4810 */ 4917 */
4811 cloned_ctx = rcu_dereference(parent_ctx->parent_ctx); 4918 cloned_ctx = rcu_dereference(parent_ctx->parent_ctx);
@@ -4826,41 +4933,41 @@ int perf_counter_init_task(struct task_struct *child)
4826 return ret; 4933 return ret;
4827} 4934}
4828 4935
4829static void __cpuinit perf_counter_init_cpu(int cpu) 4936static void __cpuinit perf_event_init_cpu(int cpu)
4830{ 4937{
4831 struct perf_cpu_context *cpuctx; 4938 struct perf_cpu_context *cpuctx;
4832 4939
4833 cpuctx = &per_cpu(perf_cpu_context, cpu); 4940 cpuctx = &per_cpu(perf_cpu_context, cpu);
4834 __perf_counter_init_context(&cpuctx->ctx, NULL); 4941 __perf_event_init_context(&cpuctx->ctx, NULL);
4835 4942
4836 spin_lock(&perf_resource_lock); 4943 spin_lock(&perf_resource_lock);
4837 cpuctx->max_pertask = perf_max_counters - perf_reserved_percpu; 4944 cpuctx->max_pertask = perf_max_events - perf_reserved_percpu;
4838 spin_unlock(&perf_resource_lock); 4945 spin_unlock(&perf_resource_lock);
4839 4946
4840 hw_perf_counter_setup(cpu); 4947 hw_perf_event_setup(cpu);
4841} 4948}
4842 4949
4843#ifdef CONFIG_HOTPLUG_CPU 4950#ifdef CONFIG_HOTPLUG_CPU
4844static void __perf_counter_exit_cpu(void *info) 4951static void __perf_event_exit_cpu(void *info)
4845{ 4952{
4846 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); 4953 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
4847 struct perf_counter_context *ctx = &cpuctx->ctx; 4954 struct perf_event_context *ctx = &cpuctx->ctx;
4848 struct perf_counter *counter, *tmp; 4955 struct perf_event *event, *tmp;
4849 4956
4850 list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry) 4957 list_for_each_entry_safe(event, tmp, &ctx->group_list, group_entry)
4851 __perf_counter_remove_from_context(counter); 4958 __perf_event_remove_from_context(event);
4852} 4959}
4853static void perf_counter_exit_cpu(int cpu) 4960static void perf_event_exit_cpu(int cpu)
4854{ 4961{
4855 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); 4962 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
4856 struct perf_counter_context *ctx = &cpuctx->ctx; 4963 struct perf_event_context *ctx = &cpuctx->ctx;
4857 4964
4858 mutex_lock(&ctx->mutex); 4965 mutex_lock(&ctx->mutex);
4859 smp_call_function_single(cpu, __perf_counter_exit_cpu, NULL, 1); 4966 smp_call_function_single(cpu, __perf_event_exit_cpu, NULL, 1);
4860 mutex_unlock(&ctx->mutex); 4967 mutex_unlock(&ctx->mutex);
4861} 4968}
4862#else 4969#else
4863static inline void perf_counter_exit_cpu(int cpu) { } 4970static inline void perf_event_exit_cpu(int cpu) { }
4864#endif 4971#endif
4865 4972
4866static int __cpuinit 4973static int __cpuinit
@@ -4872,17 +4979,17 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
4872 4979
4873 case CPU_UP_PREPARE: 4980 case CPU_UP_PREPARE:
4874 case CPU_UP_PREPARE_FROZEN: 4981 case CPU_UP_PREPARE_FROZEN:
4875 perf_counter_init_cpu(cpu); 4982 perf_event_init_cpu(cpu);
4876 break; 4983 break;
4877 4984
4878 case CPU_ONLINE: 4985 case CPU_ONLINE:
4879 case CPU_ONLINE_FROZEN: 4986 case CPU_ONLINE_FROZEN:
4880 hw_perf_counter_setup_online(cpu); 4987 hw_perf_event_setup_online(cpu);
4881 break; 4988 break;
4882 4989
4883 case CPU_DOWN_PREPARE: 4990 case CPU_DOWN_PREPARE:
4884 case CPU_DOWN_PREPARE_FROZEN: 4991 case CPU_DOWN_PREPARE_FROZEN:
4885 perf_counter_exit_cpu(cpu); 4992 perf_event_exit_cpu(cpu);
4886 break; 4993 break;
4887 4994
4888 default: 4995 default:
@@ -4900,7 +5007,7 @@ static struct notifier_block __cpuinitdata perf_cpu_nb = {
4900 .priority = 20, 5007 .priority = 20,
4901}; 5008};
4902 5009
4903void __init perf_counter_init(void) 5010void __init perf_event_init(void)
4904{ 5011{
4905 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE, 5012 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
4906 (void *)(long)smp_processor_id()); 5013 (void *)(long)smp_processor_id());
@@ -4926,7 +5033,7 @@ perf_set_reserve_percpu(struct sysdev_class *class,
4926 err = strict_strtoul(buf, 10, &val); 5033 err = strict_strtoul(buf, 10, &val);
4927 if (err) 5034 if (err)
4928 return err; 5035 return err;
4929 if (val > perf_max_counters) 5036 if (val > perf_max_events)
4930 return -EINVAL; 5037 return -EINVAL;
4931 5038
4932 spin_lock(&perf_resource_lock); 5039 spin_lock(&perf_resource_lock);
@@ -4934,8 +5041,8 @@ perf_set_reserve_percpu(struct sysdev_class *class,
4934 for_each_online_cpu(cpu) { 5041 for_each_online_cpu(cpu) {
4935 cpuctx = &per_cpu(perf_cpu_context, cpu); 5042 cpuctx = &per_cpu(perf_cpu_context, cpu);
4936 spin_lock_irq(&cpuctx->ctx.lock); 5043 spin_lock_irq(&cpuctx->ctx.lock);
4937 mpt = min(perf_max_counters - cpuctx->ctx.nr_counters, 5044 mpt = min(perf_max_events - cpuctx->ctx.nr_events,
4938 perf_max_counters - perf_reserved_percpu); 5045 perf_max_events - perf_reserved_percpu);
4939 cpuctx->max_pertask = mpt; 5046 cpuctx->max_pertask = mpt;
4940 spin_unlock_irq(&cpuctx->ctx.lock); 5047 spin_unlock_irq(&cpuctx->ctx.lock);
4941 } 5048 }
@@ -4990,12 +5097,12 @@ static struct attribute *perfclass_attrs[] = {
4990 5097
4991static struct attribute_group perfclass_attr_group = { 5098static struct attribute_group perfclass_attr_group = {
4992 .attrs = perfclass_attrs, 5099 .attrs = perfclass_attrs,
4993 .name = "perf_counters", 5100 .name = "perf_events",
4994}; 5101};
4995 5102
4996static int __init perf_counter_sysfs_init(void) 5103static int __init perf_event_sysfs_init(void)
4997{ 5104{
4998 return sysfs_create_group(&cpu_sysdev_class.kset.kobj, 5105 return sysfs_create_group(&cpu_sysdev_class.kset.kobj,
4999 &perfclass_attr_group); 5106 &perfclass_attr_group);
5000} 5107}
5001device_initcall(perf_counter_sysfs_init); 5108device_initcall(perf_event_sysfs_init);
diff --git a/kernel/pid.c b/kernel/pid.c
index 31310b5d3f50..d3f722d20f9c 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -40,7 +40,7 @@
40#define pid_hashfn(nr, ns) \ 40#define pid_hashfn(nr, ns) \
41 hash_long((unsigned long)nr + (unsigned long)ns, pidhash_shift) 41 hash_long((unsigned long)nr + (unsigned long)ns, pidhash_shift)
42static struct hlist_head *pid_hash; 42static struct hlist_head *pid_hash;
43static int pidhash_shift; 43static unsigned int pidhash_shift = 4;
44struct pid init_struct_pid = INIT_STRUCT_PID; 44struct pid init_struct_pid = INIT_STRUCT_PID;
45 45
46int pid_max = PID_MAX_DEFAULT; 46int pid_max = PID_MAX_DEFAULT;
@@ -499,19 +499,12 @@ struct pid *find_ge_pid(int nr, struct pid_namespace *ns)
499void __init pidhash_init(void) 499void __init pidhash_init(void)
500{ 500{
501 int i, pidhash_size; 501 int i, pidhash_size;
502 unsigned long megabytes = nr_kernel_pages >> (20 - PAGE_SHIFT);
503 502
504 pidhash_shift = max(4, fls(megabytes * 4)); 503 pid_hash = alloc_large_system_hash("PID", sizeof(*pid_hash), 0, 18,
505 pidhash_shift = min(12, pidhash_shift); 504 HASH_EARLY | HASH_SMALL,
505 &pidhash_shift, NULL, 4096);
506 pidhash_size = 1 << pidhash_shift; 506 pidhash_size = 1 << pidhash_shift;
507 507
508 printk("PID hash table entries: %d (order: %d, %Zd bytes)\n",
509 pidhash_size, pidhash_shift,
510 pidhash_size * sizeof(struct hlist_head));
511
512 pid_hash = alloc_bootmem(pidhash_size * sizeof(*(pid_hash)));
513 if (!pid_hash)
514 panic("Could not alloc pidhash!\n");
515 for (i = 0; i < pidhash_size; i++) 508 for (i = 0; i < pidhash_size; i++)
516 INIT_HLIST_HEAD(&pid_hash[i]); 509 INIT_HLIST_HEAD(&pid_hash[i]);
517} 510}
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
index 821722ae58a7..86b3796b0436 100644
--- a/kernel/pid_namespace.c
+++ b/kernel/pid_namespace.c
@@ -118,7 +118,7 @@ struct pid_namespace *copy_pid_ns(unsigned long flags, struct pid_namespace *old
118{ 118{
119 if (!(flags & CLONE_NEWPID)) 119 if (!(flags & CLONE_NEWPID))
120 return get_pid_ns(old_ns); 120 return get_pid_ns(old_ns);
121 if (flags & CLONE_THREAD) 121 if (flags & (CLONE_THREAD|CLONE_PARENT))
122 return ERR_PTR(-EINVAL); 122 return ERR_PTR(-EINVAL);
123 return create_pid_namespace(old_ns); 123 return create_pid_namespace(old_ns);
124} 124}
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index e33a21cb9407..5c9dc228747b 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -8,17 +8,18 @@
8#include <linux/math64.h> 8#include <linux/math64.h>
9#include <asm/uaccess.h> 9#include <asm/uaccess.h>
10#include <linux/kernel_stat.h> 10#include <linux/kernel_stat.h>
11#include <trace/events/timer.h>
11 12
12/* 13/*
13 * Called after updating RLIMIT_CPU to set timer expiration if necessary. 14 * Called after updating RLIMIT_CPU to set timer expiration if necessary.
14 */ 15 */
15void update_rlimit_cpu(unsigned long rlim_new) 16void update_rlimit_cpu(unsigned long rlim_new)
16{ 17{
17 cputime_t cputime; 18 cputime_t cputime = secs_to_cputime(rlim_new);
19 struct signal_struct *const sig = current->signal;
18 20
19 cputime = secs_to_cputime(rlim_new); 21 if (cputime_eq(sig->it[CPUCLOCK_PROF].expires, cputime_zero) ||
20 if (cputime_eq(current->signal->it_prof_expires, cputime_zero) || 22 cputime_gt(sig->it[CPUCLOCK_PROF].expires, cputime)) {
21 cputime_gt(current->signal->it_prof_expires, cputime)) {
22 spin_lock_irq(&current->sighand->siglock); 23 spin_lock_irq(&current->sighand->siglock);
23 set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL); 24 set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL);
24 spin_unlock_irq(&current->sighand->siglock); 25 spin_unlock_irq(&current->sighand->siglock);
@@ -542,6 +543,17 @@ static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now)
542 now); 543 now);
543} 544}
544 545
546static inline int expires_gt(cputime_t expires, cputime_t new_exp)
547{
548 return cputime_eq(expires, cputime_zero) ||
549 cputime_gt(expires, new_exp);
550}
551
552static inline int expires_le(cputime_t expires, cputime_t new_exp)
553{
554 return !cputime_eq(expires, cputime_zero) &&
555 cputime_le(expires, new_exp);
556}
545/* 557/*
546 * Insert the timer on the appropriate list before any timers that 558 * Insert the timer on the appropriate list before any timers that
547 * expire later. This must be called with the tasklist_lock held 559 * expire later. This must be called with the tasklist_lock held
@@ -586,34 +598,32 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now)
586 */ 598 */
587 599
588 if (CPUCLOCK_PERTHREAD(timer->it_clock)) { 600 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
601 union cpu_time_count *exp = &nt->expires;
602
589 switch (CPUCLOCK_WHICH(timer->it_clock)) { 603 switch (CPUCLOCK_WHICH(timer->it_clock)) {
590 default: 604 default:
591 BUG(); 605 BUG();
592 case CPUCLOCK_PROF: 606 case CPUCLOCK_PROF:
593 if (cputime_eq(p->cputime_expires.prof_exp, 607 if (expires_gt(p->cputime_expires.prof_exp,
594 cputime_zero) || 608 exp->cpu))
595 cputime_gt(p->cputime_expires.prof_exp, 609 p->cputime_expires.prof_exp = exp->cpu;
596 nt->expires.cpu))
597 p->cputime_expires.prof_exp =
598 nt->expires.cpu;
599 break; 610 break;
600 case CPUCLOCK_VIRT: 611 case CPUCLOCK_VIRT:
601 if (cputime_eq(p->cputime_expires.virt_exp, 612 if (expires_gt(p->cputime_expires.virt_exp,
602 cputime_zero) || 613 exp->cpu))
603 cputime_gt(p->cputime_expires.virt_exp, 614 p->cputime_expires.virt_exp = exp->cpu;
604 nt->expires.cpu))
605 p->cputime_expires.virt_exp =
606 nt->expires.cpu;
607 break; 615 break;
608 case CPUCLOCK_SCHED: 616 case CPUCLOCK_SCHED:
609 if (p->cputime_expires.sched_exp == 0 || 617 if (p->cputime_expires.sched_exp == 0 ||
610 p->cputime_expires.sched_exp > 618 p->cputime_expires.sched_exp > exp->sched)
611 nt->expires.sched)
612 p->cputime_expires.sched_exp = 619 p->cputime_expires.sched_exp =
613 nt->expires.sched; 620 exp->sched;
614 break; 621 break;
615 } 622 }
616 } else { 623 } else {
624 struct signal_struct *const sig = p->signal;
625 union cpu_time_count *exp = &timer->it.cpu.expires;
626
617 /* 627 /*
618 * For a process timer, set the cached expiration time. 628 * For a process timer, set the cached expiration time.
619 */ 629 */
@@ -621,30 +631,23 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now)
621 default: 631 default:
622 BUG(); 632 BUG();
623 case CPUCLOCK_VIRT: 633 case CPUCLOCK_VIRT:
624 if (!cputime_eq(p->signal->it_virt_expires, 634 if (expires_le(sig->it[CPUCLOCK_VIRT].expires,
625 cputime_zero) && 635 exp->cpu))
626 cputime_lt(p->signal->it_virt_expires,
627 timer->it.cpu.expires.cpu))
628 break; 636 break;
629 p->signal->cputime_expires.virt_exp = 637 sig->cputime_expires.virt_exp = exp->cpu;
630 timer->it.cpu.expires.cpu;
631 break; 638 break;
632 case CPUCLOCK_PROF: 639 case CPUCLOCK_PROF:
633 if (!cputime_eq(p->signal->it_prof_expires, 640 if (expires_le(sig->it[CPUCLOCK_PROF].expires,
634 cputime_zero) && 641 exp->cpu))
635 cputime_lt(p->signal->it_prof_expires,
636 timer->it.cpu.expires.cpu))
637 break; 642 break;
638 i = p->signal->rlim[RLIMIT_CPU].rlim_cur; 643 i = sig->rlim[RLIMIT_CPU].rlim_cur;
639 if (i != RLIM_INFINITY && 644 if (i != RLIM_INFINITY &&
640 i <= cputime_to_secs(timer->it.cpu.expires.cpu)) 645 i <= cputime_to_secs(exp->cpu))
641 break; 646 break;
642 p->signal->cputime_expires.prof_exp = 647 sig->cputime_expires.prof_exp = exp->cpu;
643 timer->it.cpu.expires.cpu;
644 break; 648 break;
645 case CPUCLOCK_SCHED: 649 case CPUCLOCK_SCHED:
646 p->signal->cputime_expires.sched_exp = 650 sig->cputime_expires.sched_exp = exp->sched;
647 timer->it.cpu.expires.sched;
648 break; 651 break;
649 } 652 }
650 } 653 }
@@ -1071,6 +1074,40 @@ static void stop_process_timers(struct task_struct *tsk)
1071 spin_unlock_irqrestore(&cputimer->lock, flags); 1074 spin_unlock_irqrestore(&cputimer->lock, flags);
1072} 1075}
1073 1076
1077static u32 onecputick;
1078
1079static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it,
1080 cputime_t *expires, cputime_t cur_time, int signo)
1081{
1082 if (cputime_eq(it->expires, cputime_zero))
1083 return;
1084
1085 if (cputime_ge(cur_time, it->expires)) {
1086 if (!cputime_eq(it->incr, cputime_zero)) {
1087 it->expires = cputime_add(it->expires, it->incr);
1088 it->error += it->incr_error;
1089 if (it->error >= onecputick) {
1090 it->expires = cputime_sub(it->expires,
1091 cputime_one_jiffy);
1092 it->error -= onecputick;
1093 }
1094 } else {
1095 it->expires = cputime_zero;
1096 }
1097
1098 trace_itimer_expire(signo == SIGPROF ?
1099 ITIMER_PROF : ITIMER_VIRTUAL,
1100 tsk->signal->leader_pid, cur_time);
1101 __group_send_sig_info(signo, SEND_SIG_PRIV, tsk);
1102 }
1103
1104 if (!cputime_eq(it->expires, cputime_zero) &&
1105 (cputime_eq(*expires, cputime_zero) ||
1106 cputime_lt(it->expires, *expires))) {
1107 *expires = it->expires;
1108 }
1109}
1110
1074/* 1111/*
1075 * Check for any per-thread CPU timers that have fired and move them 1112 * Check for any per-thread CPU timers that have fired and move them
1076 * off the tsk->*_timers list onto the firing list. Per-thread timers 1113 * off the tsk->*_timers list onto the firing list. Per-thread timers
@@ -1090,10 +1127,10 @@ static void check_process_timers(struct task_struct *tsk,
1090 * Don't sample the current process CPU clocks if there are no timers. 1127 * Don't sample the current process CPU clocks if there are no timers.
1091 */ 1128 */
1092 if (list_empty(&timers[CPUCLOCK_PROF]) && 1129 if (list_empty(&timers[CPUCLOCK_PROF]) &&
1093 cputime_eq(sig->it_prof_expires, cputime_zero) && 1130 cputime_eq(sig->it[CPUCLOCK_PROF].expires, cputime_zero) &&
1094 sig->rlim[RLIMIT_CPU].rlim_cur == RLIM_INFINITY && 1131 sig->rlim[RLIMIT_CPU].rlim_cur == RLIM_INFINITY &&
1095 list_empty(&timers[CPUCLOCK_VIRT]) && 1132 list_empty(&timers[CPUCLOCK_VIRT]) &&
1096 cputime_eq(sig->it_virt_expires, cputime_zero) && 1133 cputime_eq(sig->it[CPUCLOCK_VIRT].expires, cputime_zero) &&
1097 list_empty(&timers[CPUCLOCK_SCHED])) { 1134 list_empty(&timers[CPUCLOCK_SCHED])) {
1098 stop_process_timers(tsk); 1135 stop_process_timers(tsk);
1099 return; 1136 return;
@@ -1153,38 +1190,11 @@ static void check_process_timers(struct task_struct *tsk,
1153 /* 1190 /*
1154 * Check for the special case process timers. 1191 * Check for the special case process timers.
1155 */ 1192 */
1156 if (!cputime_eq(sig->it_prof_expires, cputime_zero)) { 1193 check_cpu_itimer(tsk, &sig->it[CPUCLOCK_PROF], &prof_expires, ptime,
1157 if (cputime_ge(ptime, sig->it_prof_expires)) { 1194 SIGPROF);
1158 /* ITIMER_PROF fires and reloads. */ 1195 check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT], &virt_expires, utime,
1159 sig->it_prof_expires = sig->it_prof_incr; 1196 SIGVTALRM);
1160 if (!cputime_eq(sig->it_prof_expires, cputime_zero)) { 1197
1161 sig->it_prof_expires = cputime_add(
1162 sig->it_prof_expires, ptime);
1163 }
1164 __group_send_sig_info(SIGPROF, SEND_SIG_PRIV, tsk);
1165 }
1166 if (!cputime_eq(sig->it_prof_expires, cputime_zero) &&
1167 (cputime_eq(prof_expires, cputime_zero) ||
1168 cputime_lt(sig->it_prof_expires, prof_expires))) {
1169 prof_expires = sig->it_prof_expires;
1170 }
1171 }
1172 if (!cputime_eq(sig->it_virt_expires, cputime_zero)) {
1173 if (cputime_ge(utime, sig->it_virt_expires)) {
1174 /* ITIMER_VIRTUAL fires and reloads. */
1175 sig->it_virt_expires = sig->it_virt_incr;
1176 if (!cputime_eq(sig->it_virt_expires, cputime_zero)) {
1177 sig->it_virt_expires = cputime_add(
1178 sig->it_virt_expires, utime);
1179 }
1180 __group_send_sig_info(SIGVTALRM, SEND_SIG_PRIV, tsk);
1181 }
1182 if (!cputime_eq(sig->it_virt_expires, cputime_zero) &&
1183 (cputime_eq(virt_expires, cputime_zero) ||
1184 cputime_lt(sig->it_virt_expires, virt_expires))) {
1185 virt_expires = sig->it_virt_expires;
1186 }
1187 }
1188 if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) { 1198 if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) {
1189 unsigned long psecs = cputime_to_secs(ptime); 1199 unsigned long psecs = cputime_to_secs(ptime);
1190 cputime_t x; 1200 cputime_t x;
@@ -1457,7 +1467,7 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
1457 if (!cputime_eq(*oldval, cputime_zero)) { 1467 if (!cputime_eq(*oldval, cputime_zero)) {
1458 if (cputime_le(*oldval, now.cpu)) { 1468 if (cputime_le(*oldval, now.cpu)) {
1459 /* Just about to fire. */ 1469 /* Just about to fire. */
1460 *oldval = jiffies_to_cputime(1); 1470 *oldval = cputime_one_jiffy;
1461 } else { 1471 } else {
1462 *oldval = cputime_sub(*oldval, now.cpu); 1472 *oldval = cputime_sub(*oldval, now.cpu);
1463 } 1473 }
@@ -1703,10 +1713,15 @@ static __init int init_posix_cpu_timers(void)
1703 .nsleep = thread_cpu_nsleep, 1713 .nsleep = thread_cpu_nsleep,
1704 .nsleep_restart = thread_cpu_nsleep_restart, 1714 .nsleep_restart = thread_cpu_nsleep_restart,
1705 }; 1715 };
1716 struct timespec ts;
1706 1717
1707 register_posix_clock(CLOCK_PROCESS_CPUTIME_ID, &process); 1718 register_posix_clock(CLOCK_PROCESS_CPUTIME_ID, &process);
1708 register_posix_clock(CLOCK_THREAD_CPUTIME_ID, &thread); 1719 register_posix_clock(CLOCK_THREAD_CPUTIME_ID, &thread);
1709 1720
1721 cputime_to_timespec(cputime_one_jiffy, &ts);
1722 onecputick = ts.tv_nsec;
1723 WARN_ON(ts.tv_sec != 0);
1724
1710 return 0; 1725 return 0;
1711} 1726}
1712__initcall(init_posix_cpu_timers); 1727__initcall(init_posix_cpu_timers);
diff --git a/kernel/power/process.c b/kernel/power/process.c
index da2072d73811..cc2e55373b68 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -9,6 +9,7 @@
9#undef DEBUG 9#undef DEBUG
10 10
11#include <linux/interrupt.h> 11#include <linux/interrupt.h>
12#include <linux/oom.h>
12#include <linux/suspend.h> 13#include <linux/suspend.h>
13#include <linux/module.h> 14#include <linux/module.h>
14#include <linux/syscalls.h> 15#include <linux/syscalls.h>
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 97955b0e44f4..36cb168e4330 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -619,7 +619,7 @@ __register_nosave_region(unsigned long start_pfn, unsigned long end_pfn,
619 BUG_ON(!region); 619 BUG_ON(!region);
620 } else 620 } else
621 /* This allocation cannot fail */ 621 /* This allocation cannot fail */
622 region = alloc_bootmem_low(sizeof(struct nosave_region)); 622 region = alloc_bootmem(sizeof(struct nosave_region));
623 region->start_pfn = start_pfn; 623 region->start_pfn = start_pfn;
624 region->end_pfn = end_pfn; 624 region->end_pfn = end_pfn;
625 list_add_tail(&region->list, &nosave_regions); 625 list_add_tail(&region->list, &nosave_regions);
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index 8ba052c86d48..b101cdc4df3f 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -13,7 +13,6 @@
13 13
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/file.h> 15#include <linux/file.h>
16#include <linux/utsname.h>
17#include <linux/delay.h> 16#include <linux/delay.h>
18#include <linux/bitops.h> 17#include <linux/bitops.h>
19#include <linux/genhd.h> 18#include <linux/genhd.h>
diff --git a/kernel/printk.c b/kernel/printk.c
index 602033acd6c7..f38b07f78a4e 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -206,12 +206,11 @@ __setup("log_buf_len=", log_buf_len_setup);
206#ifdef CONFIG_BOOT_PRINTK_DELAY 206#ifdef CONFIG_BOOT_PRINTK_DELAY
207 207
208static unsigned int boot_delay; /* msecs delay after each printk during bootup */ 208static unsigned int boot_delay; /* msecs delay after each printk during bootup */
209static unsigned long long printk_delay_msec; /* per msec, based on boot_delay */ 209static unsigned long long loops_per_msec; /* based on boot_delay */
210 210
211static int __init boot_delay_setup(char *str) 211static int __init boot_delay_setup(char *str)
212{ 212{
213 unsigned long lpj; 213 unsigned long lpj;
214 unsigned long long loops_per_msec;
215 214
216 lpj = preset_lpj ? preset_lpj : 1000000; /* some guess */ 215 lpj = preset_lpj ? preset_lpj : 1000000; /* some guess */
217 loops_per_msec = (unsigned long long)lpj / 1000 * HZ; 216 loops_per_msec = (unsigned long long)lpj / 1000 * HZ;
@@ -220,10 +219,9 @@ static int __init boot_delay_setup(char *str)
220 if (boot_delay > 10 * 1000) 219 if (boot_delay > 10 * 1000)
221 boot_delay = 0; 220 boot_delay = 0;
222 221
223 printk_delay_msec = loops_per_msec; 222 pr_debug("boot_delay: %u, preset_lpj: %ld, lpj: %lu, "
224 printk(KERN_DEBUG "boot_delay: %u, preset_lpj: %ld, lpj: %lu, " 223 "HZ: %d, loops_per_msec: %llu\n",
225 "HZ: %d, printk_delay_msec: %llu\n", 224 boot_delay, preset_lpj, lpj, HZ, loops_per_msec);
226 boot_delay, preset_lpj, lpj, HZ, printk_delay_msec);
227 return 1; 225 return 1;
228} 226}
229__setup("boot_delay=", boot_delay_setup); 227__setup("boot_delay=", boot_delay_setup);
@@ -236,7 +234,7 @@ static void boot_delay_msec(void)
236 if (boot_delay == 0 || system_state != SYSTEM_BOOTING) 234 if (boot_delay == 0 || system_state != SYSTEM_BOOTING)
237 return; 235 return;
238 236
239 k = (unsigned long long)printk_delay_msec * boot_delay; 237 k = (unsigned long long)loops_per_msec * boot_delay;
240 238
241 timeout = jiffies + msecs_to_jiffies(boot_delay); 239 timeout = jiffies + msecs_to_jiffies(boot_delay);
242 while (k) { 240 while (k) {
@@ -655,6 +653,20 @@ static int recursion_bug;
655static int new_text_line = 1; 653static int new_text_line = 1;
656static char printk_buf[1024]; 654static char printk_buf[1024];
657 655
656int printk_delay_msec __read_mostly;
657
658static inline void printk_delay(void)
659{
660 if (unlikely(printk_delay_msec)) {
661 int m = printk_delay_msec;
662
663 while (m--) {
664 mdelay(1);
665 touch_nmi_watchdog();
666 }
667 }
668}
669
658asmlinkage int vprintk(const char *fmt, va_list args) 670asmlinkage int vprintk(const char *fmt, va_list args)
659{ 671{
660 int printed_len = 0; 672 int printed_len = 0;
@@ -664,6 +676,7 @@ asmlinkage int vprintk(const char *fmt, va_list args)
664 char *p; 676 char *p;
665 677
666 boot_delay_msec(); 678 boot_delay_msec();
679 printk_delay();
667 680
668 preempt_disable(); 681 preempt_disable();
669 /* This stops the holder of console_sem just where we want him */ 682 /* This stops the holder of console_sem just where we want him */
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 307c285af59e..23bd09cd042e 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -266,9 +266,10 @@ static int ignoring_children(struct sighand_struct *sigh)
266 * or self-reaping. Do notification now if it would have happened earlier. 266 * or self-reaping. Do notification now if it would have happened earlier.
267 * If it should reap itself, return true. 267 * If it should reap itself, return true.
268 * 268 *
269 * If it's our own child, there is no notification to do. 269 * If it's our own child, there is no notification to do. But if our normal
270 * But if our normal children self-reap, then this child 270 * children self-reap, then this child was prevented by ptrace and we must
271 * was prevented by ptrace and we must reap it now. 271 * reap it now, in that case we must also wake up sub-threads sleeping in
272 * do_wait().
272 */ 273 */
273static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p) 274static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p)
274{ 275{
@@ -278,8 +279,10 @@ static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p)
278 if (!task_detached(p) && thread_group_empty(p)) { 279 if (!task_detached(p) && thread_group_empty(p)) {
279 if (!same_thread_group(p->real_parent, tracer)) 280 if (!same_thread_group(p->real_parent, tracer))
280 do_notify_parent(p, p->exit_signal); 281 do_notify_parent(p, p->exit_signal);
281 else if (ignoring_children(tracer->sighand)) 282 else if (ignoring_children(tracer->sighand)) {
283 __wake_up_parent(p, tracer);
282 p->exit_signal = -1; 284 p->exit_signal = -1;
285 }
283 } 286 }
284 if (task_detached(p)) { 287 if (task_detached(p)) {
285 /* Mark it as in the process of being reaped. */ 288 /* Mark it as in the process of being reaped. */
diff --git a/kernel/relay.c b/kernel/relay.c
index bc188549788f..760c26209a3c 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -60,7 +60,7 @@ static int relay_buf_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
60/* 60/*
61 * vm_ops for relay file mappings. 61 * vm_ops for relay file mappings.
62 */ 62 */
63static struct vm_operations_struct relay_file_mmap_ops = { 63static const struct vm_operations_struct relay_file_mmap_ops = {
64 .fault = relay_buf_fault, 64 .fault = relay_buf_fault,
65 .close = relay_file_mmap_close, 65 .close = relay_file_mmap_close,
66}; 66};
diff --git a/kernel/res_counter.c b/kernel/res_counter.c
index e1338f074314..bcdabf37c40b 100644
--- a/kernel/res_counter.c
+++ b/kernel/res_counter.c
@@ -19,6 +19,7 @@ void res_counter_init(struct res_counter *counter, struct res_counter *parent)
19{ 19{
20 spin_lock_init(&counter->lock); 20 spin_lock_init(&counter->lock);
21 counter->limit = RESOURCE_MAX; 21 counter->limit = RESOURCE_MAX;
22 counter->soft_limit = RESOURCE_MAX;
22 counter->parent = parent; 23 counter->parent = parent;
23} 24}
24 25
@@ -101,6 +102,8 @@ res_counter_member(struct res_counter *counter, int member)
101 return &counter->limit; 102 return &counter->limit;
102 case RES_FAILCNT: 103 case RES_FAILCNT:
103 return &counter->failcnt; 104 return &counter->failcnt;
105 case RES_SOFT_LIMIT:
106 return &counter->soft_limit;
104 }; 107 };
105 108
106 BUG(); 109 BUG();
diff --git a/kernel/resource.c b/kernel/resource.c
index 78b087221c15..fb11a58b9594 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -223,13 +223,13 @@ int release_resource(struct resource *old)
223 223
224EXPORT_SYMBOL(release_resource); 224EXPORT_SYMBOL(release_resource);
225 225
226#if defined(CONFIG_MEMORY_HOTPLUG) && !defined(CONFIG_ARCH_HAS_WALK_MEMORY) 226#if !defined(CONFIG_ARCH_HAS_WALK_MEMORY)
227/* 227/*
228 * Finds the lowest memory reosurce exists within [res->start.res->end) 228 * Finds the lowest memory reosurce exists within [res->start.res->end)
229 * the caller must specify res->start, res->end, res->flags. 229 * the caller must specify res->start, res->end, res->flags and "name".
230 * If found, returns 0, res is overwritten, if not found, returns -1. 230 * If found, returns 0, res is overwritten, if not found, returns -1.
231 */ 231 */
232static int find_next_system_ram(struct resource *res) 232static int find_next_system_ram(struct resource *res, char *name)
233{ 233{
234 resource_size_t start, end; 234 resource_size_t start, end;
235 struct resource *p; 235 struct resource *p;
@@ -245,6 +245,8 @@ static int find_next_system_ram(struct resource *res)
245 /* system ram is just marked as IORESOURCE_MEM */ 245 /* system ram is just marked as IORESOURCE_MEM */
246 if (p->flags != res->flags) 246 if (p->flags != res->flags)
247 continue; 247 continue;
248 if (name && strcmp(p->name, name))
249 continue;
248 if (p->start > end) { 250 if (p->start > end) {
249 p = NULL; 251 p = NULL;
250 break; 252 break;
@@ -262,19 +264,26 @@ static int find_next_system_ram(struct resource *res)
262 res->end = p->end; 264 res->end = p->end;
263 return 0; 265 return 0;
264} 266}
265int 267
266walk_memory_resource(unsigned long start_pfn, unsigned long nr_pages, void *arg, 268/*
267 int (*func)(unsigned long, unsigned long, void *)) 269 * This function calls callback against all memory range of "System RAM"
270 * which are marked as IORESOURCE_MEM and IORESOUCE_BUSY.
271 * Now, this function is only for "System RAM".
272 */
273int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
274 void *arg, int (*func)(unsigned long, unsigned long, void *))
268{ 275{
269 struct resource res; 276 struct resource res;
270 unsigned long pfn, len; 277 unsigned long pfn, len;
271 u64 orig_end; 278 u64 orig_end;
272 int ret = -1; 279 int ret = -1;
280
273 res.start = (u64) start_pfn << PAGE_SHIFT; 281 res.start = (u64) start_pfn << PAGE_SHIFT;
274 res.end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1; 282 res.end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1;
275 res.flags = IORESOURCE_MEM | IORESOURCE_BUSY; 283 res.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
276 orig_end = res.end; 284 orig_end = res.end;
277 while ((res.start < res.end) && (find_next_system_ram(&res) >= 0)) { 285 while ((res.start < res.end) &&
286 (find_next_system_ram(&res, "System RAM") >= 0)) {
278 pfn = (unsigned long)(res.start >> PAGE_SHIFT); 287 pfn = (unsigned long)(res.start >> PAGE_SHIFT);
279 len = (unsigned long)((res.end + 1 - res.start) >> PAGE_SHIFT); 288 len = (unsigned long)((res.end + 1 - res.start) >> PAGE_SHIFT);
280 ret = (*func)(pfn, len, arg); 289 ret = (*func)(pfn, len, arg);
diff --git a/kernel/sched.c b/kernel/sched.c
index 830967e18285..76c0e9691fc0 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -39,7 +39,7 @@
39#include <linux/completion.h> 39#include <linux/completion.h>
40#include <linux/kernel_stat.h> 40#include <linux/kernel_stat.h>
41#include <linux/debug_locks.h> 41#include <linux/debug_locks.h>
42#include <linux/perf_counter.h> 42#include <linux/perf_event.h>
43#include <linux/security.h> 43#include <linux/security.h>
44#include <linux/notifier.h> 44#include <linux/notifier.h>
45#include <linux/profile.h> 45#include <linux/profile.h>
@@ -780,7 +780,7 @@ static int sched_feat_open(struct inode *inode, struct file *filp)
780 return single_open(filp, sched_feat_show, NULL); 780 return single_open(filp, sched_feat_show, NULL);
781} 781}
782 782
783static struct file_operations sched_feat_fops = { 783static const struct file_operations sched_feat_fops = {
784 .open = sched_feat_open, 784 .open = sched_feat_open,
785 .write = sched_feat_write, 785 .write = sched_feat_write,
786 .read = seq_read, 786 .read = seq_read,
@@ -2053,7 +2053,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
2053 if (task_hot(p, old_rq->clock, NULL)) 2053 if (task_hot(p, old_rq->clock, NULL))
2054 schedstat_inc(p, se.nr_forced2_migrations); 2054 schedstat_inc(p, se.nr_forced2_migrations);
2055#endif 2055#endif
2056 perf_swcounter_event(PERF_COUNT_SW_CPU_MIGRATIONS, 2056 perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS,
2057 1, 1, NULL, 0); 2057 1, 1, NULL, 0);
2058 } 2058 }
2059 p->se.vruntime -= old_cfsrq->min_vruntime - 2059 p->se.vruntime -= old_cfsrq->min_vruntime -
@@ -2515,22 +2515,17 @@ void sched_fork(struct task_struct *p, int clone_flags)
2515 __sched_fork(p); 2515 __sched_fork(p);
2516 2516
2517 /* 2517 /*
2518 * Make sure we do not leak PI boosting priority to the child.
2519 */
2520 p->prio = current->normal_prio;
2521
2522 /*
2523 * Revert to default priority/policy on fork if requested. 2518 * Revert to default priority/policy on fork if requested.
2524 */ 2519 */
2525 if (unlikely(p->sched_reset_on_fork)) { 2520 if (unlikely(p->sched_reset_on_fork)) {
2526 if (p->policy == SCHED_FIFO || p->policy == SCHED_RR) 2521 if (p->policy == SCHED_FIFO || p->policy == SCHED_RR) {
2527 p->policy = SCHED_NORMAL; 2522 p->policy = SCHED_NORMAL;
2528 2523 p->normal_prio = p->static_prio;
2529 if (p->normal_prio < DEFAULT_PRIO) 2524 }
2530 p->prio = DEFAULT_PRIO;
2531 2525
2532 if (PRIO_TO_NICE(p->static_prio) < 0) { 2526 if (PRIO_TO_NICE(p->static_prio) < 0) {
2533 p->static_prio = NICE_TO_PRIO(0); 2527 p->static_prio = NICE_TO_PRIO(0);
2528 p->normal_prio = p->static_prio;
2534 set_load_weight(p); 2529 set_load_weight(p);
2535 } 2530 }
2536 2531
@@ -2541,6 +2536,11 @@ void sched_fork(struct task_struct *p, int clone_flags)
2541 p->sched_reset_on_fork = 0; 2536 p->sched_reset_on_fork = 0;
2542 } 2537 }
2543 2538
2539 /*
2540 * Make sure we do not leak PI boosting priority to the child.
2541 */
2542 p->prio = current->normal_prio;
2543
2544 if (!rt_prio(p->prio)) 2544 if (!rt_prio(p->prio))
2545 p->sched_class = &fair_sched_class; 2545 p->sched_class = &fair_sched_class;
2546 2546
@@ -2581,8 +2581,6 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
2581 BUG_ON(p->state != TASK_RUNNING); 2581 BUG_ON(p->state != TASK_RUNNING);
2582 update_rq_clock(rq); 2582 update_rq_clock(rq);
2583 2583
2584 p->prio = effective_prio(p);
2585
2586 if (!p->sched_class->task_new || !current->se.on_rq) { 2584 if (!p->sched_class->task_new || !current->se.on_rq) {
2587 activate_task(rq, p, 0); 2585 activate_task(rq, p, 0);
2588 } else { 2586 } else {
@@ -2718,7 +2716,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
2718 */ 2716 */
2719 prev_state = prev->state; 2717 prev_state = prev->state;
2720 finish_arch_switch(prev); 2718 finish_arch_switch(prev);
2721 perf_counter_task_sched_in(current, cpu_of(rq)); 2719 perf_event_task_sched_in(current, cpu_of(rq));
2722 finish_lock_switch(rq, prev); 2720 finish_lock_switch(rq, prev);
2723 2721
2724 fire_sched_in_preempt_notifiers(current); 2722 fire_sched_in_preempt_notifiers(current);
@@ -2904,6 +2902,19 @@ unsigned long nr_iowait(void)
2904 return sum; 2902 return sum;
2905} 2903}
2906 2904
2905unsigned long nr_iowait_cpu(void)
2906{
2907 struct rq *this = this_rq();
2908 return atomic_read(&this->nr_iowait);
2909}
2910
2911unsigned long this_cpu_load(void)
2912{
2913 struct rq *this = this_rq();
2914 return this->cpu_load[0];
2915}
2916
2917
2907/* Variables and functions for calc_load */ 2918/* Variables and functions for calc_load */
2908static atomic_long_t calc_load_tasks; 2919static atomic_long_t calc_load_tasks;
2909static unsigned long calc_load_update; 2920static unsigned long calc_load_update;
@@ -5079,17 +5090,16 @@ void account_idle_time(cputime_t cputime)
5079 */ 5090 */
5080void account_process_tick(struct task_struct *p, int user_tick) 5091void account_process_tick(struct task_struct *p, int user_tick)
5081{ 5092{
5082 cputime_t one_jiffy = jiffies_to_cputime(1); 5093 cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
5083 cputime_t one_jiffy_scaled = cputime_to_scaled(one_jiffy);
5084 struct rq *rq = this_rq(); 5094 struct rq *rq = this_rq();
5085 5095
5086 if (user_tick) 5096 if (user_tick)
5087 account_user_time(p, one_jiffy, one_jiffy_scaled); 5097 account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
5088 else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET)) 5098 else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
5089 account_system_time(p, HARDIRQ_OFFSET, one_jiffy, 5099 account_system_time(p, HARDIRQ_OFFSET, cputime_one_jiffy,
5090 one_jiffy_scaled); 5100 one_jiffy_scaled);
5091 else 5101 else
5092 account_idle_time(one_jiffy); 5102 account_idle_time(cputime_one_jiffy);
5093} 5103}
5094 5104
5095/* 5105/*
@@ -5193,7 +5203,7 @@ void scheduler_tick(void)
5193 curr->sched_class->task_tick(rq, curr, 0); 5203 curr->sched_class->task_tick(rq, curr, 0);
5194 spin_unlock(&rq->lock); 5204 spin_unlock(&rq->lock);
5195 5205
5196 perf_counter_task_tick(curr, cpu); 5206 perf_event_task_tick(curr, cpu);
5197 5207
5198#ifdef CONFIG_SMP 5208#ifdef CONFIG_SMP
5199 rq->idle_at_tick = idle_cpu(cpu); 5209 rq->idle_at_tick = idle_cpu(cpu);
@@ -5409,7 +5419,7 @@ need_resched_nonpreemptible:
5409 5419
5410 if (likely(prev != next)) { 5420 if (likely(prev != next)) {
5411 sched_info_switch(prev, next); 5421 sched_info_switch(prev, next);
5412 perf_counter_task_sched_out(prev, next, cpu); 5422 perf_event_task_sched_out(prev, next, cpu);
5413 5423
5414 rq->nr_switches++; 5424 rq->nr_switches++;
5415 rq->curr = next; 5425 rq->curr = next;
@@ -7671,7 +7681,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
7671/* 7681/*
7672 * Register at high priority so that task migration (migrate_all_tasks) 7682 * Register at high priority so that task migration (migrate_all_tasks)
7673 * happens before everything else. This has to be lower priority than 7683 * happens before everything else. This has to be lower priority than
7674 * the notifier in the perf_counter subsystem, though. 7684 * the notifier in the perf_event subsystem, though.
7675 */ 7685 */
7676static struct notifier_block __cpuinitdata migration_notifier = { 7686static struct notifier_block __cpuinitdata migration_notifier = {
7677 .notifier_call = migration_call, 7687 .notifier_call = migration_call,
@@ -9528,7 +9538,7 @@ void __init sched_init(void)
9528 alloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT); 9538 alloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
9529#endif /* SMP */ 9539#endif /* SMP */
9530 9540
9531 perf_counter_init(); 9541 perf_event_init();
9532 9542
9533 scheduler_running = 1; 9543 scheduler_running = 1;
9534} 9544}
@@ -10300,7 +10310,7 @@ static int sched_rt_global_constraints(void)
10300#endif /* CONFIG_RT_GROUP_SCHED */ 10310#endif /* CONFIG_RT_GROUP_SCHED */
10301 10311
10302int sched_rt_handler(struct ctl_table *table, int write, 10312int sched_rt_handler(struct ctl_table *table, int write,
10303 struct file *filp, void __user *buffer, size_t *lenp, 10313 void __user *buffer, size_t *lenp,
10304 loff_t *ppos) 10314 loff_t *ppos)
10305{ 10315{
10306 int ret; 10316 int ret;
@@ -10311,7 +10321,7 @@ int sched_rt_handler(struct ctl_table *table, int write,
10311 old_period = sysctl_sched_rt_period; 10321 old_period = sysctl_sched_rt_period;
10312 old_runtime = sysctl_sched_rt_runtime; 10322 old_runtime = sysctl_sched_rt_runtime;
10313 10323
10314 ret = proc_dointvec(table, write, filp, buffer, lenp, ppos); 10324 ret = proc_dointvec(table, write, buffer, lenp, ppos);
10315 10325
10316 if (!ret && write) { 10326 if (!ret && write) {
10317 ret = sched_rt_global_constraints(); 10327 ret = sched_rt_global_constraints();
@@ -10365,8 +10375,7 @@ cpu_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
10365} 10375}
10366 10376
10367static int 10377static int
10368cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, 10378cpu_cgroup_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
10369 struct task_struct *tsk)
10370{ 10379{
10371#ifdef CONFIG_RT_GROUP_SCHED 10380#ifdef CONFIG_RT_GROUP_SCHED
10372 if (!sched_rt_can_attach(cgroup_tg(cgrp), tsk)) 10381 if (!sched_rt_can_attach(cgroup_tg(cgrp), tsk))
@@ -10376,15 +10385,45 @@ cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
10376 if (tsk->sched_class != &fair_sched_class) 10385 if (tsk->sched_class != &fair_sched_class)
10377 return -EINVAL; 10386 return -EINVAL;
10378#endif 10387#endif
10388 return 0;
10389}
10379 10390
10391static int
10392cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
10393 struct task_struct *tsk, bool threadgroup)
10394{
10395 int retval = cpu_cgroup_can_attach_task(cgrp, tsk);
10396 if (retval)
10397 return retval;
10398 if (threadgroup) {
10399 struct task_struct *c;
10400 rcu_read_lock();
10401 list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) {
10402 retval = cpu_cgroup_can_attach_task(cgrp, c);
10403 if (retval) {
10404 rcu_read_unlock();
10405 return retval;
10406 }
10407 }
10408 rcu_read_unlock();
10409 }
10380 return 0; 10410 return 0;
10381} 10411}
10382 10412
10383static void 10413static void
10384cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, 10414cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
10385 struct cgroup *old_cont, struct task_struct *tsk) 10415 struct cgroup *old_cont, struct task_struct *tsk,
10416 bool threadgroup)
10386{ 10417{
10387 sched_move_task(tsk); 10418 sched_move_task(tsk);
10419 if (threadgroup) {
10420 struct task_struct *c;
10421 rcu_read_lock();
10422 list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) {
10423 sched_move_task(c);
10424 }
10425 rcu_read_unlock();
10426 }
10388} 10427}
10389 10428
10390#ifdef CONFIG_FAIR_GROUP_SCHED 10429#ifdef CONFIG_FAIR_GROUP_SCHED
diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c
index ac2e1dc708bd..479ce5682d7c 100644
--- a/kernel/sched_clock.c
+++ b/kernel/sched_clock.c
@@ -127,7 +127,7 @@ again:
127 clock = wrap_max(clock, min_clock); 127 clock = wrap_max(clock, min_clock);
128 clock = wrap_min(clock, max_clock); 128 clock = wrap_min(clock, max_clock);
129 129
130 if (cmpxchg(&scd->clock, old_clock, clock) != old_clock) 130 if (cmpxchg64(&scd->clock, old_clock, clock) != old_clock)
131 goto again; 131 goto again;
132 132
133 return clock; 133 return clock;
@@ -163,7 +163,7 @@ again:
163 val = remote_clock; 163 val = remote_clock;
164 } 164 }
165 165
166 if (cmpxchg(ptr, old_val, val) != old_val) 166 if (cmpxchg64(ptr, old_val, val) != old_val)
167 goto again; 167 goto again;
168 168
169 return val; 169 return val;
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index ecc637a0d591..4e777b47eeda 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -384,10 +384,10 @@ static struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
384 384
385#ifdef CONFIG_SCHED_DEBUG 385#ifdef CONFIG_SCHED_DEBUG
386int sched_nr_latency_handler(struct ctl_table *table, int write, 386int sched_nr_latency_handler(struct ctl_table *table, int write,
387 struct file *filp, void __user *buffer, size_t *lenp, 387 void __user *buffer, size_t *lenp,
388 loff_t *ppos) 388 loff_t *ppos)
389{ 389{
390 int ret = proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos); 390 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
391 391
392 if (ret || !write) 392 if (ret || !write)
393 return ret; 393 return ret;
diff --git a/kernel/signal.c b/kernel/signal.c
index 64c5deeaca5d..6705320784fd 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -705,7 +705,7 @@ static int prepare_signal(int sig, struct task_struct *p, int from_ancestor_ns)
705 705
706 if (why) { 706 if (why) {
707 /* 707 /*
708 * The first thread which returns from finish_stop() 708 * The first thread which returns from do_signal_stop()
709 * will take ->siglock, notice SIGNAL_CLD_MASK, and 709 * will take ->siglock, notice SIGNAL_CLD_MASK, and
710 * notify its parent. See get_signal_to_deliver(). 710 * notify its parent. See get_signal_to_deliver().
711 */ 711 */
@@ -971,6 +971,20 @@ specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
971 return send_signal(sig, info, t, 0); 971 return send_signal(sig, info, t, 0);
972} 972}
973 973
974int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
975 bool group)
976{
977 unsigned long flags;
978 int ret = -ESRCH;
979
980 if (lock_task_sighand(p, &flags)) {
981 ret = send_signal(sig, info, p, group);
982 unlock_task_sighand(p, &flags);
983 }
984
985 return ret;
986}
987
974/* 988/*
975 * Force a signal that the process can't ignore: if necessary 989 * Force a signal that the process can't ignore: if necessary
976 * we unblock the signal and change any SIG_IGN to SIG_DFL. 990 * we unblock the signal and change any SIG_IGN to SIG_DFL.
@@ -1036,12 +1050,6 @@ void zap_other_threads(struct task_struct *p)
1036 } 1050 }
1037} 1051}
1038 1052
1039int __fatal_signal_pending(struct task_struct *tsk)
1040{
1041 return sigismember(&tsk->pending.signal, SIGKILL);
1042}
1043EXPORT_SYMBOL(__fatal_signal_pending);
1044
1045struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags) 1053struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
1046{ 1054{
1047 struct sighand_struct *sighand; 1055 struct sighand_struct *sighand;
@@ -1068,18 +1076,10 @@ struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long
1068 */ 1076 */
1069int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) 1077int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1070{ 1078{
1071 unsigned long flags; 1079 int ret = check_kill_permission(sig, info, p);
1072 int ret;
1073 1080
1074 ret = check_kill_permission(sig, info, p); 1081 if (!ret && sig)
1075 1082 ret = do_send_sig_info(sig, info, p, true);
1076 if (!ret && sig) {
1077 ret = -ESRCH;
1078 if (lock_task_sighand(p, &flags)) {
1079 ret = __group_send_sig_info(sig, info, p);
1080 unlock_task_sighand(p, &flags);
1081 }
1082 }
1083 1083
1084 return ret; 1084 return ret;
1085} 1085}
@@ -1224,15 +1224,9 @@ static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1224 * These are for backward compatibility with the rest of the kernel source. 1224 * These are for backward compatibility with the rest of the kernel source.
1225 */ 1225 */
1226 1226
1227/*
1228 * The caller must ensure the task can't exit.
1229 */
1230int 1227int
1231send_sig_info(int sig, struct siginfo *info, struct task_struct *p) 1228send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1232{ 1229{
1233 int ret;
1234 unsigned long flags;
1235
1236 /* 1230 /*
1237 * Make sure legacy kernel users don't send in bad values 1231 * Make sure legacy kernel users don't send in bad values
1238 * (normal paths check this in check_kill_permission). 1232 * (normal paths check this in check_kill_permission).
@@ -1240,10 +1234,7 @@ send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1240 if (!valid_signal(sig)) 1234 if (!valid_signal(sig))
1241 return -EINVAL; 1235 return -EINVAL;
1242 1236
1243 spin_lock_irqsave(&p->sighand->siglock, flags); 1237 return do_send_sig_info(sig, info, p, false);
1244 ret = specific_send_sig_info(sig, info, p);
1245 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1246 return ret;
1247} 1238}
1248 1239
1249#define __si_special(priv) \ 1240#define __si_special(priv) \
@@ -1383,15 +1374,6 @@ ret:
1383} 1374}
1384 1375
1385/* 1376/*
1386 * Wake up any threads in the parent blocked in wait* syscalls.
1387 */
1388static inline void __wake_up_parent(struct task_struct *p,
1389 struct task_struct *parent)
1390{
1391 wake_up_interruptible_sync(&parent->signal->wait_chldexit);
1392}
1393
1394/*
1395 * Let a parent know about the death of a child. 1377 * Let a parent know about the death of a child.
1396 * For a stopped/continued status change, use do_notify_parent_cldstop instead. 1378 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1397 * 1379 *
@@ -1673,29 +1655,6 @@ void ptrace_notify(int exit_code)
1673 spin_unlock_irq(&current->sighand->siglock); 1655 spin_unlock_irq(&current->sighand->siglock);
1674} 1656}
1675 1657
1676static void
1677finish_stop(int stop_count)
1678{
1679 /*
1680 * If there are no other threads in the group, or if there is
1681 * a group stop in progress and we are the last to stop,
1682 * report to the parent. When ptraced, every thread reports itself.
1683 */
1684 if (tracehook_notify_jctl(stop_count == 0, CLD_STOPPED)) {
1685 read_lock(&tasklist_lock);
1686 do_notify_parent_cldstop(current, CLD_STOPPED);
1687 read_unlock(&tasklist_lock);
1688 }
1689
1690 do {
1691 schedule();
1692 } while (try_to_freeze());
1693 /*
1694 * Now we don't run again until continued.
1695 */
1696 current->exit_code = 0;
1697}
1698
1699/* 1658/*
1700 * This performs the stopping for SIGSTOP and other stop signals. 1659 * This performs the stopping for SIGSTOP and other stop signals.
1701 * We have to stop all threads in the thread group. 1660 * We have to stop all threads in the thread group.
@@ -1705,15 +1664,9 @@ finish_stop(int stop_count)
1705static int do_signal_stop(int signr) 1664static int do_signal_stop(int signr)
1706{ 1665{
1707 struct signal_struct *sig = current->signal; 1666 struct signal_struct *sig = current->signal;
1708 int stop_count; 1667 int notify;
1709 1668
1710 if (sig->group_stop_count > 0) { 1669 if (!sig->group_stop_count) {
1711 /*
1712 * There is a group stop in progress. We don't need to
1713 * start another one.
1714 */
1715 stop_count = --sig->group_stop_count;
1716 } else {
1717 struct task_struct *t; 1670 struct task_struct *t;
1718 1671
1719 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) || 1672 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) ||
@@ -1725,7 +1678,7 @@ static int do_signal_stop(int signr)
1725 */ 1678 */
1726 sig->group_exit_code = signr; 1679 sig->group_exit_code = signr;
1727 1680
1728 stop_count = 0; 1681 sig->group_stop_count = 1;
1729 for (t = next_thread(current); t != current; t = next_thread(t)) 1682 for (t = next_thread(current); t != current; t = next_thread(t))
1730 /* 1683 /*
1731 * Setting state to TASK_STOPPED for a group 1684 * Setting state to TASK_STOPPED for a group
@@ -1734,19 +1687,44 @@ static int do_signal_stop(int signr)
1734 */ 1687 */
1735 if (!(t->flags & PF_EXITING) && 1688 if (!(t->flags & PF_EXITING) &&
1736 !task_is_stopped_or_traced(t)) { 1689 !task_is_stopped_or_traced(t)) {
1737 stop_count++; 1690 sig->group_stop_count++;
1738 signal_wake_up(t, 0); 1691 signal_wake_up(t, 0);
1739 } 1692 }
1740 sig->group_stop_count = stop_count;
1741 } 1693 }
1694 /*
1695 * If there are no other threads in the group, or if there is
1696 * a group stop in progress and we are the last to stop, report
1697 * to the parent. When ptraced, every thread reports itself.
1698 */
1699 notify = sig->group_stop_count == 1 ? CLD_STOPPED : 0;
1700 notify = tracehook_notify_jctl(notify, CLD_STOPPED);
1701 /*
1702 * tracehook_notify_jctl() can drop and reacquire siglock, so
1703 * we keep ->group_stop_count != 0 before the call. If SIGCONT
1704 * or SIGKILL comes in between ->group_stop_count == 0.
1705 */
1706 if (sig->group_stop_count) {
1707 if (!--sig->group_stop_count)
1708 sig->flags = SIGNAL_STOP_STOPPED;
1709 current->exit_code = sig->group_exit_code;
1710 __set_current_state(TASK_STOPPED);
1711 }
1712 spin_unlock_irq(&current->sighand->siglock);
1742 1713
1743 if (stop_count == 0) 1714 if (notify) {
1744 sig->flags = SIGNAL_STOP_STOPPED; 1715 read_lock(&tasklist_lock);
1745 current->exit_code = sig->group_exit_code; 1716 do_notify_parent_cldstop(current, notify);
1746 __set_current_state(TASK_STOPPED); 1717 read_unlock(&tasklist_lock);
1718 }
1719
1720 /* Now we don't run again until woken by SIGCONT or SIGKILL */
1721 do {
1722 schedule();
1723 } while (try_to_freeze());
1724
1725 tracehook_finish_jctl();
1726 current->exit_code = 0;
1747 1727
1748 spin_unlock_irq(&current->sighand->siglock);
1749 finish_stop(stop_count);
1750 return 1; 1728 return 1;
1751} 1729}
1752 1730
@@ -1815,14 +1793,15 @@ relock:
1815 int why = (signal->flags & SIGNAL_STOP_CONTINUED) 1793 int why = (signal->flags & SIGNAL_STOP_CONTINUED)
1816 ? CLD_CONTINUED : CLD_STOPPED; 1794 ? CLD_CONTINUED : CLD_STOPPED;
1817 signal->flags &= ~SIGNAL_CLD_MASK; 1795 signal->flags &= ~SIGNAL_CLD_MASK;
1818 spin_unlock_irq(&sighand->siglock);
1819 1796
1820 if (unlikely(!tracehook_notify_jctl(1, why))) 1797 why = tracehook_notify_jctl(why, CLD_CONTINUED);
1821 goto relock; 1798 spin_unlock_irq(&sighand->siglock);
1822 1799
1823 read_lock(&tasklist_lock); 1800 if (why) {
1824 do_notify_parent_cldstop(current->group_leader, why); 1801 read_lock(&tasklist_lock);
1825 read_unlock(&tasklist_lock); 1802 do_notify_parent_cldstop(current->group_leader, why);
1803 read_unlock(&tasklist_lock);
1804 }
1826 goto relock; 1805 goto relock;
1827 } 1806 }
1828 1807
@@ -1987,14 +1966,14 @@ void exit_signals(struct task_struct *tsk)
1987 if (unlikely(tsk->signal->group_stop_count) && 1966 if (unlikely(tsk->signal->group_stop_count) &&
1988 !--tsk->signal->group_stop_count) { 1967 !--tsk->signal->group_stop_count) {
1989 tsk->signal->flags = SIGNAL_STOP_STOPPED; 1968 tsk->signal->flags = SIGNAL_STOP_STOPPED;
1990 group_stop = 1; 1969 group_stop = tracehook_notify_jctl(CLD_STOPPED, CLD_STOPPED);
1991 } 1970 }
1992out: 1971out:
1993 spin_unlock_irq(&tsk->sighand->siglock); 1972 spin_unlock_irq(&tsk->sighand->siglock);
1994 1973
1995 if (unlikely(group_stop) && tracehook_notify_jctl(1, CLD_STOPPED)) { 1974 if (unlikely(group_stop)) {
1996 read_lock(&tasklist_lock); 1975 read_lock(&tasklist_lock);
1997 do_notify_parent_cldstop(tsk, CLD_STOPPED); 1976 do_notify_parent_cldstop(tsk, group_stop);
1998 read_unlock(&tasklist_lock); 1977 read_unlock(&tasklist_lock);
1999 } 1978 }
2000} 1979}
@@ -2290,7 +2269,6 @@ static int
2290do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info) 2269do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
2291{ 2270{
2292 struct task_struct *p; 2271 struct task_struct *p;
2293 unsigned long flags;
2294 int error = -ESRCH; 2272 int error = -ESRCH;
2295 2273
2296 rcu_read_lock(); 2274 rcu_read_lock();
@@ -2300,14 +2278,16 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
2300 /* 2278 /*
2301 * The null signal is a permissions and process existence 2279 * The null signal is a permissions and process existence
2302 * probe. No signal is actually delivered. 2280 * probe. No signal is actually delivered.
2303 *
2304 * If lock_task_sighand() fails we pretend the task dies
2305 * after receiving the signal. The window is tiny, and the
2306 * signal is private anyway.
2307 */ 2281 */
2308 if (!error && sig && lock_task_sighand(p, &flags)) { 2282 if (!error && sig) {
2309 error = specific_send_sig_info(sig, info, p); 2283 error = do_send_sig_info(sig, info, p, false);
2310 unlock_task_sighand(p, &flags); 2284 /*
2285 * If lock_task_sighand() failed we pretend the task
2286 * dies after receiving the signal. The window is tiny,
2287 * and the signal is private anyway.
2288 */
2289 if (unlikely(error == -ESRCH))
2290 error = 0;
2311 } 2291 }
2312 } 2292 }
2313 rcu_read_unlock(); 2293 rcu_read_unlock();
diff --git a/kernel/slow-work.c b/kernel/slow-work.c
index 09d7519557d3..0d31135efbf4 100644
--- a/kernel/slow-work.c
+++ b/kernel/slow-work.c
@@ -26,10 +26,10 @@ static void slow_work_cull_timeout(unsigned long);
26static void slow_work_oom_timeout(unsigned long); 26static void slow_work_oom_timeout(unsigned long);
27 27
28#ifdef CONFIG_SYSCTL 28#ifdef CONFIG_SYSCTL
29static int slow_work_min_threads_sysctl(struct ctl_table *, int, struct file *, 29static int slow_work_min_threads_sysctl(struct ctl_table *, int,
30 void __user *, size_t *, loff_t *); 30 void __user *, size_t *, loff_t *);
31 31
32static int slow_work_max_threads_sysctl(struct ctl_table *, int , struct file *, 32static int slow_work_max_threads_sysctl(struct ctl_table *, int ,
33 void __user *, size_t *, loff_t *); 33 void __user *, size_t *, loff_t *);
34#endif 34#endif
35 35
@@ -493,10 +493,10 @@ static void slow_work_oom_timeout(unsigned long data)
493 * Handle adjustment of the minimum number of threads 493 * Handle adjustment of the minimum number of threads
494 */ 494 */
495static int slow_work_min_threads_sysctl(struct ctl_table *table, int write, 495static int slow_work_min_threads_sysctl(struct ctl_table *table, int write,
496 struct file *filp, void __user *buffer, 496 void __user *buffer,
497 size_t *lenp, loff_t *ppos) 497 size_t *lenp, loff_t *ppos)
498{ 498{
499 int ret = proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos); 499 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
500 int n; 500 int n;
501 501
502 if (ret == 0) { 502 if (ret == 0) {
@@ -521,10 +521,10 @@ static int slow_work_min_threads_sysctl(struct ctl_table *table, int write,
521 * Handle adjustment of the maximum number of threads 521 * Handle adjustment of the maximum number of threads
522 */ 522 */
523static int slow_work_max_threads_sysctl(struct ctl_table *table, int write, 523static int slow_work_max_threads_sysctl(struct ctl_table *table, int write,
524 struct file *filp, void __user *buffer, 524 void __user *buffer,
525 size_t *lenp, loff_t *ppos) 525 size_t *lenp, loff_t *ppos)
526{ 526{
527 int ret = proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos); 527 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
528 int n; 528 int n;
529 529
530 if (ret == 0) { 530 if (ret == 0) {
diff --git a/kernel/smp.c b/kernel/smp.c
index 8e218500ab14..c9d1c7835c2f 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -29,8 +29,7 @@ enum {
29 29
30struct call_function_data { 30struct call_function_data {
31 struct call_single_data csd; 31 struct call_single_data csd;
32 spinlock_t lock; 32 atomic_t refs;
33 unsigned int refs;
34 cpumask_var_t cpumask; 33 cpumask_var_t cpumask;
35}; 34};
36 35
@@ -39,9 +38,7 @@ struct call_single_queue {
39 spinlock_t lock; 38 spinlock_t lock;
40}; 39};
41 40
42static DEFINE_PER_CPU(struct call_function_data, cfd_data) = { 41static DEFINE_PER_CPU(struct call_function_data, cfd_data);
43 .lock = __SPIN_LOCK_UNLOCKED(cfd_data.lock),
44};
45 42
46static int 43static int
47hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu) 44hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
@@ -196,25 +193,18 @@ void generic_smp_call_function_interrupt(void)
196 list_for_each_entry_rcu(data, &call_function.queue, csd.list) { 193 list_for_each_entry_rcu(data, &call_function.queue, csd.list) {
197 int refs; 194 int refs;
198 195
199 spin_lock(&data->lock); 196 if (!cpumask_test_and_clear_cpu(cpu, data->cpumask))
200 if (!cpumask_test_cpu(cpu, data->cpumask)) {
201 spin_unlock(&data->lock);
202 continue; 197 continue;
203 }
204 cpumask_clear_cpu(cpu, data->cpumask);
205 spin_unlock(&data->lock);
206 198
207 data->csd.func(data->csd.info); 199 data->csd.func(data->csd.info);
208 200
209 spin_lock(&data->lock); 201 refs = atomic_dec_return(&data->refs);
210 WARN_ON(data->refs == 0); 202 WARN_ON(refs < 0);
211 refs = --data->refs;
212 if (!refs) { 203 if (!refs) {
213 spin_lock(&call_function.lock); 204 spin_lock(&call_function.lock);
214 list_del_rcu(&data->csd.list); 205 list_del_rcu(&data->csd.list);
215 spin_unlock(&call_function.lock); 206 spin_unlock(&call_function.lock);
216 } 207 }
217 spin_unlock(&data->lock);
218 208
219 if (refs) 209 if (refs)
220 continue; 210 continue;
@@ -357,13 +347,6 @@ void __smp_call_function_single(int cpu, struct call_single_data *data,
357 generic_exec_single(cpu, data, wait); 347 generic_exec_single(cpu, data, wait);
358} 348}
359 349
360/* Deprecated: shim for archs using old arch_send_call_function_ipi API. */
361
362#ifndef arch_send_call_function_ipi_mask
363# define arch_send_call_function_ipi_mask(maskp) \
364 arch_send_call_function_ipi(*(maskp))
365#endif
366
367/** 350/**
368 * smp_call_function_many(): Run a function on a set of other CPUs. 351 * smp_call_function_many(): Run a function on a set of other CPUs.
369 * @mask: The set of cpus to run on (only runs on online subset). 352 * @mask: The set of cpus to run on (only runs on online subset).
@@ -419,23 +402,20 @@ void smp_call_function_many(const struct cpumask *mask,
419 data = &__get_cpu_var(cfd_data); 402 data = &__get_cpu_var(cfd_data);
420 csd_lock(&data->csd); 403 csd_lock(&data->csd);
421 404
422 spin_lock_irqsave(&data->lock, flags);
423 data->csd.func = func; 405 data->csd.func = func;
424 data->csd.info = info; 406 data->csd.info = info;
425 cpumask_and(data->cpumask, mask, cpu_online_mask); 407 cpumask_and(data->cpumask, mask, cpu_online_mask);
426 cpumask_clear_cpu(this_cpu, data->cpumask); 408 cpumask_clear_cpu(this_cpu, data->cpumask);
427 data->refs = cpumask_weight(data->cpumask); 409 atomic_set(&data->refs, cpumask_weight(data->cpumask));
428 410
429 spin_lock(&call_function.lock); 411 spin_lock_irqsave(&call_function.lock, flags);
430 /* 412 /*
431 * Place entry at the _HEAD_ of the list, so that any cpu still 413 * Place entry at the _HEAD_ of the list, so that any cpu still
432 * observing the entry in generic_smp_call_function_interrupt() 414 * observing the entry in generic_smp_call_function_interrupt()
433 * will not miss any other list entries: 415 * will not miss any other list entries:
434 */ 416 */
435 list_add_rcu(&data->csd.list, &call_function.queue); 417 list_add_rcu(&data->csd.list, &call_function.queue);
436 spin_unlock(&call_function.lock); 418 spin_unlock_irqrestore(&call_function.lock, flags);
437
438 spin_unlock_irqrestore(&data->lock, flags);
439 419
440 /* 420 /*
441 * Make the list addition visible before sending the ipi. 421 * Make the list addition visible before sending the ipi.
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index 88796c330838..81324d12eb35 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -90,11 +90,11 @@ void touch_all_softlockup_watchdogs(void)
90EXPORT_SYMBOL(touch_all_softlockup_watchdogs); 90EXPORT_SYMBOL(touch_all_softlockup_watchdogs);
91 91
92int proc_dosoftlockup_thresh(struct ctl_table *table, int write, 92int proc_dosoftlockup_thresh(struct ctl_table *table, int write,
93 struct file *filp, void __user *buffer, 93 void __user *buffer,
94 size_t *lenp, loff_t *ppos) 94 size_t *lenp, loff_t *ppos)
95{ 95{
96 touch_all_softlockup_watchdogs(); 96 touch_all_softlockup_watchdogs();
97 return proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos); 97 return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
98} 98}
99 99
100/* 100/*
diff --git a/kernel/sys.c b/kernel/sys.c
index b3f1097c76fa..255475d163e0 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -14,7 +14,7 @@
14#include <linux/prctl.h> 14#include <linux/prctl.h>
15#include <linux/highuid.h> 15#include <linux/highuid.h>
16#include <linux/fs.h> 16#include <linux/fs.h>
17#include <linux/perf_counter.h> 17#include <linux/perf_event.h>
18#include <linux/resource.h> 18#include <linux/resource.h>
19#include <linux/kernel.h> 19#include <linux/kernel.h>
20#include <linux/kexec.h> 20#include <linux/kexec.h>
@@ -1338,6 +1338,7 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
1338 unsigned long flags; 1338 unsigned long flags;
1339 cputime_t utime, stime; 1339 cputime_t utime, stime;
1340 struct task_cputime cputime; 1340 struct task_cputime cputime;
1341 unsigned long maxrss = 0;
1341 1342
1342 memset((char *) r, 0, sizeof *r); 1343 memset((char *) r, 0, sizeof *r);
1343 utime = stime = cputime_zero; 1344 utime = stime = cputime_zero;
@@ -1346,6 +1347,7 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
1346 utime = task_utime(current); 1347 utime = task_utime(current);
1347 stime = task_stime(current); 1348 stime = task_stime(current);
1348 accumulate_thread_rusage(p, r); 1349 accumulate_thread_rusage(p, r);
1350 maxrss = p->signal->maxrss;
1349 goto out; 1351 goto out;
1350 } 1352 }
1351 1353
@@ -1363,6 +1365,7 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
1363 r->ru_majflt = p->signal->cmaj_flt; 1365 r->ru_majflt = p->signal->cmaj_flt;
1364 r->ru_inblock = p->signal->cinblock; 1366 r->ru_inblock = p->signal->cinblock;
1365 r->ru_oublock = p->signal->coublock; 1367 r->ru_oublock = p->signal->coublock;
1368 maxrss = p->signal->cmaxrss;
1366 1369
1367 if (who == RUSAGE_CHILDREN) 1370 if (who == RUSAGE_CHILDREN)
1368 break; 1371 break;
@@ -1377,6 +1380,8 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
1377 r->ru_majflt += p->signal->maj_flt; 1380 r->ru_majflt += p->signal->maj_flt;
1378 r->ru_inblock += p->signal->inblock; 1381 r->ru_inblock += p->signal->inblock;
1379 r->ru_oublock += p->signal->oublock; 1382 r->ru_oublock += p->signal->oublock;
1383 if (maxrss < p->signal->maxrss)
1384 maxrss = p->signal->maxrss;
1380 t = p; 1385 t = p;
1381 do { 1386 do {
1382 accumulate_thread_rusage(t, r); 1387 accumulate_thread_rusage(t, r);
@@ -1392,6 +1397,15 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
1392out: 1397out:
1393 cputime_to_timeval(utime, &r->ru_utime); 1398 cputime_to_timeval(utime, &r->ru_utime);
1394 cputime_to_timeval(stime, &r->ru_stime); 1399 cputime_to_timeval(stime, &r->ru_stime);
1400
1401 if (who != RUSAGE_CHILDREN) {
1402 struct mm_struct *mm = get_task_mm(p);
1403 if (mm) {
1404 setmax_mm_hiwater_rss(&maxrss, mm);
1405 mmput(mm);
1406 }
1407 }
1408 r->ru_maxrss = maxrss * (PAGE_SIZE / 1024); /* convert pages to KBs */
1395} 1409}
1396 1410
1397int getrusage(struct task_struct *p, int who, struct rusage __user *ru) 1411int getrusage(struct task_struct *p, int who, struct rusage __user *ru)
@@ -1511,11 +1525,11 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
1511 case PR_SET_TSC: 1525 case PR_SET_TSC:
1512 error = SET_TSC_CTL(arg2); 1526 error = SET_TSC_CTL(arg2);
1513 break; 1527 break;
1514 case PR_TASK_PERF_COUNTERS_DISABLE: 1528 case PR_TASK_PERF_EVENTS_DISABLE:
1515 error = perf_counter_task_disable(); 1529 error = perf_event_task_disable();
1516 break; 1530 break;
1517 case PR_TASK_PERF_COUNTERS_ENABLE: 1531 case PR_TASK_PERF_EVENTS_ENABLE:
1518 error = perf_counter_task_enable(); 1532 error = perf_event_task_enable();
1519 break; 1533 break;
1520 case PR_GET_TIMERSLACK: 1534 case PR_GET_TIMERSLACK:
1521 error = current->timer_slack_ns; 1535 error = current->timer_slack_ns;
@@ -1528,6 +1542,28 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
1528 current->timer_slack_ns = arg2; 1542 current->timer_slack_ns = arg2;
1529 error = 0; 1543 error = 0;
1530 break; 1544 break;
1545 case PR_MCE_KILL:
1546 if (arg4 | arg5)
1547 return -EINVAL;
1548 switch (arg2) {
1549 case 0:
1550 if (arg3 != 0)
1551 return -EINVAL;
1552 current->flags &= ~PF_MCE_PROCESS;
1553 break;
1554 case 1:
1555 current->flags |= PF_MCE_PROCESS;
1556 if (arg3 != 0)
1557 current->flags |= PF_MCE_EARLY;
1558 else
1559 current->flags &= ~PF_MCE_EARLY;
1560 break;
1561 default:
1562 return -EINVAL;
1563 }
1564 error = 0;
1565 break;
1566
1531 default: 1567 default:
1532 error = -EINVAL; 1568 error = -EINVAL;
1533 break; 1569 break;
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index 68320f6b07b5..e06d0b8d1951 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -49,6 +49,7 @@ cond_syscall(sys_sendmsg);
49cond_syscall(compat_sys_sendmsg); 49cond_syscall(compat_sys_sendmsg);
50cond_syscall(sys_recvmsg); 50cond_syscall(sys_recvmsg);
51cond_syscall(compat_sys_recvmsg); 51cond_syscall(compat_sys_recvmsg);
52cond_syscall(compat_sys_recvfrom);
52cond_syscall(sys_socketcall); 53cond_syscall(sys_socketcall);
53cond_syscall(sys_futex); 54cond_syscall(sys_futex);
54cond_syscall(compat_sys_futex); 55cond_syscall(compat_sys_futex);
@@ -177,4 +178,4 @@ cond_syscall(sys_eventfd);
177cond_syscall(sys_eventfd2); 178cond_syscall(sys_eventfd2);
178 179
179/* performance counters: */ 180/* performance counters: */
180cond_syscall(sys_perf_counter_open); 181cond_syscall(sys_perf_event_open);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 1a631ba684a4..0d949c517412 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -26,7 +26,6 @@
26#include <linux/proc_fs.h> 26#include <linux/proc_fs.h>
27#include <linux/security.h> 27#include <linux/security.h>
28#include <linux/ctype.h> 28#include <linux/ctype.h>
29#include <linux/utsname.h>
30#include <linux/kmemcheck.h> 29#include <linux/kmemcheck.h>
31#include <linux/smp_lock.h> 30#include <linux/smp_lock.h>
32#include <linux/fs.h> 31#include <linux/fs.h>
@@ -50,7 +49,7 @@
50#include <linux/reboot.h> 49#include <linux/reboot.h>
51#include <linux/ftrace.h> 50#include <linux/ftrace.h>
52#include <linux/slow-work.h> 51#include <linux/slow-work.h>
53#include <linux/perf_counter.h> 52#include <linux/perf_event.h>
54 53
55#include <asm/uaccess.h> 54#include <asm/uaccess.h>
56#include <asm/processor.h> 55#include <asm/processor.h>
@@ -77,6 +76,7 @@ extern int max_threads;
77extern int core_uses_pid; 76extern int core_uses_pid;
78extern int suid_dumpable; 77extern int suid_dumpable;
79extern char core_pattern[]; 78extern char core_pattern[];
79extern unsigned int core_pipe_limit;
80extern int pid_max; 80extern int pid_max;
81extern int min_free_kbytes; 81extern int min_free_kbytes;
82extern int pid_max_min, pid_max_max; 82extern int pid_max_min, pid_max_max;
@@ -106,6 +106,9 @@ static int __maybe_unused one = 1;
106static int __maybe_unused two = 2; 106static int __maybe_unused two = 2;
107static unsigned long one_ul = 1; 107static unsigned long one_ul = 1;
108static int one_hundred = 100; 108static int one_hundred = 100;
109#ifdef CONFIG_PRINTK
110static int ten_thousand = 10000;
111#endif
109 112
110/* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */ 113/* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
111static unsigned long dirty_bytes_min = 2 * PAGE_SIZE; 114static unsigned long dirty_bytes_min = 2 * PAGE_SIZE;
@@ -160,9 +163,9 @@ extern int max_lock_depth;
160#endif 163#endif
161 164
162#ifdef CONFIG_PROC_SYSCTL 165#ifdef CONFIG_PROC_SYSCTL
163static int proc_do_cad_pid(struct ctl_table *table, int write, struct file *filp, 166static int proc_do_cad_pid(struct ctl_table *table, int write,
164 void __user *buffer, size_t *lenp, loff_t *ppos); 167 void __user *buffer, size_t *lenp, loff_t *ppos);
165static int proc_taint(struct ctl_table *table, int write, struct file *filp, 168static int proc_taint(struct ctl_table *table, int write,
166 void __user *buffer, size_t *lenp, loff_t *ppos); 169 void __user *buffer, size_t *lenp, loff_t *ppos);
167#endif 170#endif
168 171
@@ -421,6 +424,14 @@ static struct ctl_table kern_table[] = {
421 .proc_handler = &proc_dostring, 424 .proc_handler = &proc_dostring,
422 .strategy = &sysctl_string, 425 .strategy = &sysctl_string,
423 }, 426 },
427 {
428 .ctl_name = CTL_UNNUMBERED,
429 .procname = "core_pipe_limit",
430 .data = &core_pipe_limit,
431 .maxlen = sizeof(unsigned int),
432 .mode = 0644,
433 .proc_handler = &proc_dointvec,
434 },
424#ifdef CONFIG_PROC_SYSCTL 435#ifdef CONFIG_PROC_SYSCTL
425 { 436 {
426 .procname = "tainted", 437 .procname = "tainted",
@@ -722,6 +733,17 @@ static struct ctl_table kern_table[] = {
722 .mode = 0644, 733 .mode = 0644,
723 .proc_handler = &proc_dointvec, 734 .proc_handler = &proc_dointvec,
724 }, 735 },
736 {
737 .ctl_name = CTL_UNNUMBERED,
738 .procname = "printk_delay",
739 .data = &printk_delay_msec,
740 .maxlen = sizeof(int),
741 .mode = 0644,
742 .proc_handler = &proc_dointvec_minmax,
743 .strategy = &sysctl_intvec,
744 .extra1 = &zero,
745 .extra2 = &ten_thousand,
746 },
725#endif 747#endif
726 { 748 {
727 .ctl_name = KERN_NGROUPS_MAX, 749 .ctl_name = KERN_NGROUPS_MAX,
@@ -964,28 +986,28 @@ static struct ctl_table kern_table[] = {
964 .child = slow_work_sysctls, 986 .child = slow_work_sysctls,
965 }, 987 },
966#endif 988#endif
967#ifdef CONFIG_PERF_COUNTERS 989#ifdef CONFIG_PERF_EVENTS
968 { 990 {
969 .ctl_name = CTL_UNNUMBERED, 991 .ctl_name = CTL_UNNUMBERED,
970 .procname = "perf_counter_paranoid", 992 .procname = "perf_event_paranoid",
971 .data = &sysctl_perf_counter_paranoid, 993 .data = &sysctl_perf_event_paranoid,
972 .maxlen = sizeof(sysctl_perf_counter_paranoid), 994 .maxlen = sizeof(sysctl_perf_event_paranoid),
973 .mode = 0644, 995 .mode = 0644,
974 .proc_handler = &proc_dointvec, 996 .proc_handler = &proc_dointvec,
975 }, 997 },
976 { 998 {
977 .ctl_name = CTL_UNNUMBERED, 999 .ctl_name = CTL_UNNUMBERED,
978 .procname = "perf_counter_mlock_kb", 1000 .procname = "perf_event_mlock_kb",
979 .data = &sysctl_perf_counter_mlock, 1001 .data = &sysctl_perf_event_mlock,
980 .maxlen = sizeof(sysctl_perf_counter_mlock), 1002 .maxlen = sizeof(sysctl_perf_event_mlock),
981 .mode = 0644, 1003 .mode = 0644,
982 .proc_handler = &proc_dointvec, 1004 .proc_handler = &proc_dointvec,
983 }, 1005 },
984 { 1006 {
985 .ctl_name = CTL_UNNUMBERED, 1007 .ctl_name = CTL_UNNUMBERED,
986 .procname = "perf_counter_max_sample_rate", 1008 .procname = "perf_event_max_sample_rate",
987 .data = &sysctl_perf_counter_sample_rate, 1009 .data = &sysctl_perf_event_sample_rate,
988 .maxlen = sizeof(sysctl_perf_counter_sample_rate), 1010 .maxlen = sizeof(sysctl_perf_event_sample_rate),
989 .mode = 0644, 1011 .mode = 0644,
990 .proc_handler = &proc_dointvec, 1012 .proc_handler = &proc_dointvec,
991 }, 1013 },
@@ -1376,6 +1398,31 @@ static struct ctl_table vm_table[] = {
1376 .mode = 0644, 1398 .mode = 0644,
1377 .proc_handler = &scan_unevictable_handler, 1399 .proc_handler = &scan_unevictable_handler,
1378 }, 1400 },
1401#ifdef CONFIG_MEMORY_FAILURE
1402 {
1403 .ctl_name = CTL_UNNUMBERED,
1404 .procname = "memory_failure_early_kill",
1405 .data = &sysctl_memory_failure_early_kill,
1406 .maxlen = sizeof(sysctl_memory_failure_early_kill),
1407 .mode = 0644,
1408 .proc_handler = &proc_dointvec_minmax,
1409 .strategy = &sysctl_intvec,
1410 .extra1 = &zero,
1411 .extra2 = &one,
1412 },
1413 {
1414 .ctl_name = CTL_UNNUMBERED,
1415 .procname = "memory_failure_recovery",
1416 .data = &sysctl_memory_failure_recovery,
1417 .maxlen = sizeof(sysctl_memory_failure_recovery),
1418 .mode = 0644,
1419 .proc_handler = &proc_dointvec_minmax,
1420 .strategy = &sysctl_intvec,
1421 .extra1 = &zero,
1422 .extra2 = &one,
1423 },
1424#endif
1425
1379/* 1426/*
1380 * NOTE: do not add new entries to this table unless you have read 1427 * NOTE: do not add new entries to this table unless you have read
1381 * Documentation/sysctl/ctl_unnumbered.txt 1428 * Documentation/sysctl/ctl_unnumbered.txt
@@ -2204,7 +2251,7 @@ void sysctl_head_put(struct ctl_table_header *head)
2204#ifdef CONFIG_PROC_SYSCTL 2251#ifdef CONFIG_PROC_SYSCTL
2205 2252
2206static int _proc_do_string(void* data, int maxlen, int write, 2253static int _proc_do_string(void* data, int maxlen, int write,
2207 struct file *filp, void __user *buffer, 2254 void __user *buffer,
2208 size_t *lenp, loff_t *ppos) 2255 size_t *lenp, loff_t *ppos)
2209{ 2256{
2210 size_t len; 2257 size_t len;
@@ -2265,7 +2312,6 @@ static int _proc_do_string(void* data, int maxlen, int write,
2265 * proc_dostring - read a string sysctl 2312 * proc_dostring - read a string sysctl
2266 * @table: the sysctl table 2313 * @table: the sysctl table
2267 * @write: %TRUE if this is a write to the sysctl file 2314 * @write: %TRUE if this is a write to the sysctl file
2268 * @filp: the file structure
2269 * @buffer: the user buffer 2315 * @buffer: the user buffer
2270 * @lenp: the size of the user buffer 2316 * @lenp: the size of the user buffer
2271 * @ppos: file position 2317 * @ppos: file position
@@ -2279,10 +2325,10 @@ static int _proc_do_string(void* data, int maxlen, int write,
2279 * 2325 *
2280 * Returns 0 on success. 2326 * Returns 0 on success.
2281 */ 2327 */
2282int proc_dostring(struct ctl_table *table, int write, struct file *filp, 2328int proc_dostring(struct ctl_table *table, int write,
2283 void __user *buffer, size_t *lenp, loff_t *ppos) 2329 void __user *buffer, size_t *lenp, loff_t *ppos)
2284{ 2330{
2285 return _proc_do_string(table->data, table->maxlen, write, filp, 2331 return _proc_do_string(table->data, table->maxlen, write,
2286 buffer, lenp, ppos); 2332 buffer, lenp, ppos);
2287} 2333}
2288 2334
@@ -2307,7 +2353,7 @@ static int do_proc_dointvec_conv(int *negp, unsigned long *lvalp,
2307} 2353}
2308 2354
2309static int __do_proc_dointvec(void *tbl_data, struct ctl_table *table, 2355static int __do_proc_dointvec(void *tbl_data, struct ctl_table *table,
2310 int write, struct file *filp, void __user *buffer, 2356 int write, void __user *buffer,
2311 size_t *lenp, loff_t *ppos, 2357 size_t *lenp, loff_t *ppos,
2312 int (*conv)(int *negp, unsigned long *lvalp, int *valp, 2358 int (*conv)(int *negp, unsigned long *lvalp, int *valp,
2313 int write, void *data), 2359 int write, void *data),
@@ -2414,13 +2460,13 @@ static int __do_proc_dointvec(void *tbl_data, struct ctl_table *table,
2414#undef TMPBUFLEN 2460#undef TMPBUFLEN
2415} 2461}
2416 2462
2417static int do_proc_dointvec(struct ctl_table *table, int write, struct file *filp, 2463static int do_proc_dointvec(struct ctl_table *table, int write,
2418 void __user *buffer, size_t *lenp, loff_t *ppos, 2464 void __user *buffer, size_t *lenp, loff_t *ppos,
2419 int (*conv)(int *negp, unsigned long *lvalp, int *valp, 2465 int (*conv)(int *negp, unsigned long *lvalp, int *valp,
2420 int write, void *data), 2466 int write, void *data),
2421 void *data) 2467 void *data)
2422{ 2468{
2423 return __do_proc_dointvec(table->data, table, write, filp, 2469 return __do_proc_dointvec(table->data, table, write,
2424 buffer, lenp, ppos, conv, data); 2470 buffer, lenp, ppos, conv, data);
2425} 2471}
2426 2472
@@ -2428,7 +2474,6 @@ static int do_proc_dointvec(struct ctl_table *table, int write, struct file *fil
2428 * proc_dointvec - read a vector of integers 2474 * proc_dointvec - read a vector of integers
2429 * @table: the sysctl table 2475 * @table: the sysctl table
2430 * @write: %TRUE if this is a write to the sysctl file 2476 * @write: %TRUE if this is a write to the sysctl file
2431 * @filp: the file structure
2432 * @buffer: the user buffer 2477 * @buffer: the user buffer
2433 * @lenp: the size of the user buffer 2478 * @lenp: the size of the user buffer
2434 * @ppos: file position 2479 * @ppos: file position
@@ -2438,10 +2483,10 @@ static int do_proc_dointvec(struct ctl_table *table, int write, struct file *fil
2438 * 2483 *
2439 * Returns 0 on success. 2484 * Returns 0 on success.
2440 */ 2485 */
2441int proc_dointvec(struct ctl_table *table, int write, struct file *filp, 2486int proc_dointvec(struct ctl_table *table, int write,
2442 void __user *buffer, size_t *lenp, loff_t *ppos) 2487 void __user *buffer, size_t *lenp, loff_t *ppos)
2443{ 2488{
2444 return do_proc_dointvec(table,write,filp,buffer,lenp,ppos, 2489 return do_proc_dointvec(table,write,buffer,lenp,ppos,
2445 NULL,NULL); 2490 NULL,NULL);
2446} 2491}
2447 2492
@@ -2449,7 +2494,7 @@ int proc_dointvec(struct ctl_table *table, int write, struct file *filp,
2449 * Taint values can only be increased 2494 * Taint values can only be increased
2450 * This means we can safely use a temporary. 2495 * This means we can safely use a temporary.
2451 */ 2496 */
2452static int proc_taint(struct ctl_table *table, int write, struct file *filp, 2497static int proc_taint(struct ctl_table *table, int write,
2453 void __user *buffer, size_t *lenp, loff_t *ppos) 2498 void __user *buffer, size_t *lenp, loff_t *ppos)
2454{ 2499{
2455 struct ctl_table t; 2500 struct ctl_table t;
@@ -2461,7 +2506,7 @@ static int proc_taint(struct ctl_table *table, int write, struct file *filp,
2461 2506
2462 t = *table; 2507 t = *table;
2463 t.data = &tmptaint; 2508 t.data = &tmptaint;
2464 err = proc_doulongvec_minmax(&t, write, filp, buffer, lenp, ppos); 2509 err = proc_doulongvec_minmax(&t, write, buffer, lenp, ppos);
2465 if (err < 0) 2510 if (err < 0)
2466 return err; 2511 return err;
2467 2512
@@ -2513,7 +2558,6 @@ static int do_proc_dointvec_minmax_conv(int *negp, unsigned long *lvalp,
2513 * proc_dointvec_minmax - read a vector of integers with min/max values 2558 * proc_dointvec_minmax - read a vector of integers with min/max values
2514 * @table: the sysctl table 2559 * @table: the sysctl table
2515 * @write: %TRUE if this is a write to the sysctl file 2560 * @write: %TRUE if this is a write to the sysctl file
2516 * @filp: the file structure
2517 * @buffer: the user buffer 2561 * @buffer: the user buffer
2518 * @lenp: the size of the user buffer 2562 * @lenp: the size of the user buffer
2519 * @ppos: file position 2563 * @ppos: file position
@@ -2526,19 +2570,18 @@ static int do_proc_dointvec_minmax_conv(int *negp, unsigned long *lvalp,
2526 * 2570 *
2527 * Returns 0 on success. 2571 * Returns 0 on success.
2528 */ 2572 */
2529int proc_dointvec_minmax(struct ctl_table *table, int write, struct file *filp, 2573int proc_dointvec_minmax(struct ctl_table *table, int write,
2530 void __user *buffer, size_t *lenp, loff_t *ppos) 2574 void __user *buffer, size_t *lenp, loff_t *ppos)
2531{ 2575{
2532 struct do_proc_dointvec_minmax_conv_param param = { 2576 struct do_proc_dointvec_minmax_conv_param param = {
2533 .min = (int *) table->extra1, 2577 .min = (int *) table->extra1,
2534 .max = (int *) table->extra2, 2578 .max = (int *) table->extra2,
2535 }; 2579 };
2536 return do_proc_dointvec(table, write, filp, buffer, lenp, ppos, 2580 return do_proc_dointvec(table, write, buffer, lenp, ppos,
2537 do_proc_dointvec_minmax_conv, &param); 2581 do_proc_dointvec_minmax_conv, &param);
2538} 2582}
2539 2583
2540static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int write, 2584static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int write,
2541 struct file *filp,
2542 void __user *buffer, 2585 void __user *buffer,
2543 size_t *lenp, loff_t *ppos, 2586 size_t *lenp, loff_t *ppos,
2544 unsigned long convmul, 2587 unsigned long convmul,
@@ -2643,21 +2686,19 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
2643} 2686}
2644 2687
2645static int do_proc_doulongvec_minmax(struct ctl_table *table, int write, 2688static int do_proc_doulongvec_minmax(struct ctl_table *table, int write,
2646 struct file *filp,
2647 void __user *buffer, 2689 void __user *buffer,
2648 size_t *lenp, loff_t *ppos, 2690 size_t *lenp, loff_t *ppos,
2649 unsigned long convmul, 2691 unsigned long convmul,
2650 unsigned long convdiv) 2692 unsigned long convdiv)
2651{ 2693{
2652 return __do_proc_doulongvec_minmax(table->data, table, write, 2694 return __do_proc_doulongvec_minmax(table->data, table, write,
2653 filp, buffer, lenp, ppos, convmul, convdiv); 2695 buffer, lenp, ppos, convmul, convdiv);
2654} 2696}
2655 2697
2656/** 2698/**
2657 * proc_doulongvec_minmax - read a vector of long integers with min/max values 2699 * proc_doulongvec_minmax - read a vector of long integers with min/max values
2658 * @table: the sysctl table 2700 * @table: the sysctl table
2659 * @write: %TRUE if this is a write to the sysctl file 2701 * @write: %TRUE if this is a write to the sysctl file
2660 * @filp: the file structure
2661 * @buffer: the user buffer 2702 * @buffer: the user buffer
2662 * @lenp: the size of the user buffer 2703 * @lenp: the size of the user buffer
2663 * @ppos: file position 2704 * @ppos: file position
@@ -2670,17 +2711,16 @@ static int do_proc_doulongvec_minmax(struct ctl_table *table, int write,
2670 * 2711 *
2671 * Returns 0 on success. 2712 * Returns 0 on success.
2672 */ 2713 */
2673int proc_doulongvec_minmax(struct ctl_table *table, int write, struct file *filp, 2714int proc_doulongvec_minmax(struct ctl_table *table, int write,
2674 void __user *buffer, size_t *lenp, loff_t *ppos) 2715 void __user *buffer, size_t *lenp, loff_t *ppos)
2675{ 2716{
2676 return do_proc_doulongvec_minmax(table, write, filp, buffer, lenp, ppos, 1l, 1l); 2717 return do_proc_doulongvec_minmax(table, write, buffer, lenp, ppos, 1l, 1l);
2677} 2718}
2678 2719
2679/** 2720/**
2680 * proc_doulongvec_ms_jiffies_minmax - read a vector of millisecond values with min/max values 2721 * proc_doulongvec_ms_jiffies_minmax - read a vector of millisecond values with min/max values
2681 * @table: the sysctl table 2722 * @table: the sysctl table
2682 * @write: %TRUE if this is a write to the sysctl file 2723 * @write: %TRUE if this is a write to the sysctl file
2683 * @filp: the file structure
2684 * @buffer: the user buffer 2724 * @buffer: the user buffer
2685 * @lenp: the size of the user buffer 2725 * @lenp: the size of the user buffer
2686 * @ppos: file position 2726 * @ppos: file position
@@ -2695,11 +2735,10 @@ int proc_doulongvec_minmax(struct ctl_table *table, int write, struct file *filp
2695 * Returns 0 on success. 2735 * Returns 0 on success.
2696 */ 2736 */
2697int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int write, 2737int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int write,
2698 struct file *filp,
2699 void __user *buffer, 2738 void __user *buffer,
2700 size_t *lenp, loff_t *ppos) 2739 size_t *lenp, loff_t *ppos)
2701{ 2740{
2702 return do_proc_doulongvec_minmax(table, write, filp, buffer, 2741 return do_proc_doulongvec_minmax(table, write, buffer,
2703 lenp, ppos, HZ, 1000l); 2742 lenp, ppos, HZ, 1000l);
2704} 2743}
2705 2744
@@ -2775,7 +2814,6 @@ static int do_proc_dointvec_ms_jiffies_conv(int *negp, unsigned long *lvalp,
2775 * proc_dointvec_jiffies - read a vector of integers as seconds 2814 * proc_dointvec_jiffies - read a vector of integers as seconds
2776 * @table: the sysctl table 2815 * @table: the sysctl table
2777 * @write: %TRUE if this is a write to the sysctl file 2816 * @write: %TRUE if this is a write to the sysctl file
2778 * @filp: the file structure
2779 * @buffer: the user buffer 2817 * @buffer: the user buffer
2780 * @lenp: the size of the user buffer 2818 * @lenp: the size of the user buffer
2781 * @ppos: file position 2819 * @ppos: file position
@@ -2787,10 +2825,10 @@ static int do_proc_dointvec_ms_jiffies_conv(int *negp, unsigned long *lvalp,
2787 * 2825 *
2788 * Returns 0 on success. 2826 * Returns 0 on success.
2789 */ 2827 */
2790int proc_dointvec_jiffies(struct ctl_table *table, int write, struct file *filp, 2828int proc_dointvec_jiffies(struct ctl_table *table, int write,
2791 void __user *buffer, size_t *lenp, loff_t *ppos) 2829 void __user *buffer, size_t *lenp, loff_t *ppos)
2792{ 2830{
2793 return do_proc_dointvec(table,write,filp,buffer,lenp,ppos, 2831 return do_proc_dointvec(table,write,buffer,lenp,ppos,
2794 do_proc_dointvec_jiffies_conv,NULL); 2832 do_proc_dointvec_jiffies_conv,NULL);
2795} 2833}
2796 2834
@@ -2798,7 +2836,6 @@ int proc_dointvec_jiffies(struct ctl_table *table, int write, struct file *filp,
2798 * proc_dointvec_userhz_jiffies - read a vector of integers as 1/USER_HZ seconds 2836 * proc_dointvec_userhz_jiffies - read a vector of integers as 1/USER_HZ seconds
2799 * @table: the sysctl table 2837 * @table: the sysctl table
2800 * @write: %TRUE if this is a write to the sysctl file 2838 * @write: %TRUE if this is a write to the sysctl file
2801 * @filp: the file structure
2802 * @buffer: the user buffer 2839 * @buffer: the user buffer
2803 * @lenp: the size of the user buffer 2840 * @lenp: the size of the user buffer
2804 * @ppos: pointer to the file position 2841 * @ppos: pointer to the file position
@@ -2810,10 +2847,10 @@ int proc_dointvec_jiffies(struct ctl_table *table, int write, struct file *filp,
2810 * 2847 *
2811 * Returns 0 on success. 2848 * Returns 0 on success.
2812 */ 2849 */
2813int proc_dointvec_userhz_jiffies(struct ctl_table *table, int write, struct file *filp, 2850int proc_dointvec_userhz_jiffies(struct ctl_table *table, int write,
2814 void __user *buffer, size_t *lenp, loff_t *ppos) 2851 void __user *buffer, size_t *lenp, loff_t *ppos)
2815{ 2852{
2816 return do_proc_dointvec(table,write,filp,buffer,lenp,ppos, 2853 return do_proc_dointvec(table,write,buffer,lenp,ppos,
2817 do_proc_dointvec_userhz_jiffies_conv,NULL); 2854 do_proc_dointvec_userhz_jiffies_conv,NULL);
2818} 2855}
2819 2856
@@ -2821,7 +2858,6 @@ int proc_dointvec_userhz_jiffies(struct ctl_table *table, int write, struct file
2821 * proc_dointvec_ms_jiffies - read a vector of integers as 1 milliseconds 2858 * proc_dointvec_ms_jiffies - read a vector of integers as 1 milliseconds
2822 * @table: the sysctl table 2859 * @table: the sysctl table
2823 * @write: %TRUE if this is a write to the sysctl file 2860 * @write: %TRUE if this is a write to the sysctl file
2824 * @filp: the file structure
2825 * @buffer: the user buffer 2861 * @buffer: the user buffer
2826 * @lenp: the size of the user buffer 2862 * @lenp: the size of the user buffer
2827 * @ppos: file position 2863 * @ppos: file position
@@ -2834,14 +2870,14 @@ int proc_dointvec_userhz_jiffies(struct ctl_table *table, int write, struct file
2834 * 2870 *
2835 * Returns 0 on success. 2871 * Returns 0 on success.
2836 */ 2872 */
2837int proc_dointvec_ms_jiffies(struct ctl_table *table, int write, struct file *filp, 2873int proc_dointvec_ms_jiffies(struct ctl_table *table, int write,
2838 void __user *buffer, size_t *lenp, loff_t *ppos) 2874 void __user *buffer, size_t *lenp, loff_t *ppos)
2839{ 2875{
2840 return do_proc_dointvec(table, write, filp, buffer, lenp, ppos, 2876 return do_proc_dointvec(table, write, buffer, lenp, ppos,
2841 do_proc_dointvec_ms_jiffies_conv, NULL); 2877 do_proc_dointvec_ms_jiffies_conv, NULL);
2842} 2878}
2843 2879
2844static int proc_do_cad_pid(struct ctl_table *table, int write, struct file *filp, 2880static int proc_do_cad_pid(struct ctl_table *table, int write,
2845 void __user *buffer, size_t *lenp, loff_t *ppos) 2881 void __user *buffer, size_t *lenp, loff_t *ppos)
2846{ 2882{
2847 struct pid *new_pid; 2883 struct pid *new_pid;
@@ -2850,7 +2886,7 @@ static int proc_do_cad_pid(struct ctl_table *table, int write, struct file *filp
2850 2886
2851 tmp = pid_vnr(cad_pid); 2887 tmp = pid_vnr(cad_pid);
2852 2888
2853 r = __do_proc_dointvec(&tmp, table, write, filp, buffer, 2889 r = __do_proc_dointvec(&tmp, table, write, buffer,
2854 lenp, ppos, NULL, NULL); 2890 lenp, ppos, NULL, NULL);
2855 if (r || !write) 2891 if (r || !write)
2856 return r; 2892 return r;
@@ -2865,50 +2901,49 @@ static int proc_do_cad_pid(struct ctl_table *table, int write, struct file *filp
2865 2901
2866#else /* CONFIG_PROC_FS */ 2902#else /* CONFIG_PROC_FS */
2867 2903
2868int proc_dostring(struct ctl_table *table, int write, struct file *filp, 2904int proc_dostring(struct ctl_table *table, int write,
2869 void __user *buffer, size_t *lenp, loff_t *ppos) 2905 void __user *buffer, size_t *lenp, loff_t *ppos)
2870{ 2906{
2871 return -ENOSYS; 2907 return -ENOSYS;
2872} 2908}
2873 2909
2874int proc_dointvec(struct ctl_table *table, int write, struct file *filp, 2910int proc_dointvec(struct ctl_table *table, int write,
2875 void __user *buffer, size_t *lenp, loff_t *ppos) 2911 void __user *buffer, size_t *lenp, loff_t *ppos)
2876{ 2912{
2877 return -ENOSYS; 2913 return -ENOSYS;
2878} 2914}
2879 2915
2880int proc_dointvec_minmax(struct ctl_table *table, int write, struct file *filp, 2916int proc_dointvec_minmax(struct ctl_table *table, int write,
2881 void __user *buffer, size_t *lenp, loff_t *ppos) 2917 void __user *buffer, size_t *lenp, loff_t *ppos)
2882{ 2918{
2883 return -ENOSYS; 2919 return -ENOSYS;
2884} 2920}
2885 2921
2886int proc_dointvec_jiffies(struct ctl_table *table, int write, struct file *filp, 2922int proc_dointvec_jiffies(struct ctl_table *table, int write,
2887 void __user *buffer, size_t *lenp, loff_t *ppos) 2923 void __user *buffer, size_t *lenp, loff_t *ppos)
2888{ 2924{
2889 return -ENOSYS; 2925 return -ENOSYS;
2890} 2926}
2891 2927
2892int proc_dointvec_userhz_jiffies(struct ctl_table *table, int write, struct file *filp, 2928int proc_dointvec_userhz_jiffies(struct ctl_table *table, int write,
2893 void __user *buffer, size_t *lenp, loff_t *ppos) 2929 void __user *buffer, size_t *lenp, loff_t *ppos)
2894{ 2930{
2895 return -ENOSYS; 2931 return -ENOSYS;
2896} 2932}
2897 2933
2898int proc_dointvec_ms_jiffies(struct ctl_table *table, int write, struct file *filp, 2934int proc_dointvec_ms_jiffies(struct ctl_table *table, int write,
2899 void __user *buffer, size_t *lenp, loff_t *ppos) 2935 void __user *buffer, size_t *lenp, loff_t *ppos)
2900{ 2936{
2901 return -ENOSYS; 2937 return -ENOSYS;
2902} 2938}
2903 2939
2904int proc_doulongvec_minmax(struct ctl_table *table, int write, struct file *filp, 2940int proc_doulongvec_minmax(struct ctl_table *table, int write,
2905 void __user *buffer, size_t *lenp, loff_t *ppos) 2941 void __user *buffer, size_t *lenp, loff_t *ppos)
2906{ 2942{
2907 return -ENOSYS; 2943 return -ENOSYS;
2908} 2944}
2909 2945
2910int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int write, 2946int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int write,
2911 struct file *filp,
2912 void __user *buffer, 2947 void __user *buffer,
2913 size_t *lenp, loff_t *ppos) 2948 size_t *lenp, loff_t *ppos)
2914{ 2949{
diff --git a/kernel/time/Makefile b/kernel/time/Makefile
index 0b0a6366c9d4..ee266620b06c 100644
--- a/kernel/time/Makefile
+++ b/kernel/time/Makefile
@@ -1,4 +1,4 @@
1obj-y += timekeeping.o ntp.o clocksource.o jiffies.o timer_list.o timecompare.o 1obj-y += timekeeping.o ntp.o clocksource.o jiffies.o timer_list.o timecompare.o timeconv.o
2 2
3obj-$(CONFIG_GENERIC_CLOCKEVENTS_BUILD) += clockevents.o 3obj-$(CONFIG_GENERIC_CLOCKEVENTS_BUILD) += clockevents.o
4obj-$(CONFIG_GENERIC_CLOCKEVENTS) += tick-common.o 4obj-$(CONFIG_GENERIC_CLOCKEVENTS) += tick-common.o
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 09113347d328..5e18c6ab2c6a 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -394,15 +394,11 @@ void clocksource_resume(void)
394{ 394{
395 struct clocksource *cs; 395 struct clocksource *cs;
396 396
397 mutex_lock(&clocksource_mutex);
398
399 list_for_each_entry(cs, &clocksource_list, list) 397 list_for_each_entry(cs, &clocksource_list, list)
400 if (cs->resume) 398 if (cs->resume)
401 cs->resume(); 399 cs->resume();
402 400
403 clocksource_resume_watchdog(); 401 clocksource_resume_watchdog();
404
405 mutex_unlock(&clocksource_mutex);
406} 402}
407 403
408/** 404/**
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index e0f59a21c061..89aed5933ed4 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -231,6 +231,13 @@ void tick_nohz_stop_sched_tick(int inidle)
231 if (!inidle && !ts->inidle) 231 if (!inidle && !ts->inidle)
232 goto end; 232 goto end;
233 233
234 /*
235 * Set ts->inidle unconditionally. Even if the system did not
236 * switch to NOHZ mode the cpu frequency governers rely on the
237 * update of the idle time accounting in tick_nohz_start_idle().
238 */
239 ts->inidle = 1;
240
234 now = tick_nohz_start_idle(ts); 241 now = tick_nohz_start_idle(ts);
235 242
236 /* 243 /*
@@ -248,8 +255,6 @@ void tick_nohz_stop_sched_tick(int inidle)
248 if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) 255 if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE))
249 goto end; 256 goto end;
250 257
251 ts->inidle = 1;
252
253 if (need_resched()) 258 if (need_resched())
254 goto end; 259 goto end;
255 260
diff --git a/kernel/time/timeconv.c b/kernel/time/timeconv.c
new file mode 100644
index 000000000000..86628e755f38
--- /dev/null
+++ b/kernel/time/timeconv.c
@@ -0,0 +1,127 @@
1/*
2 * Copyright (C) 1993, 1994, 1995, 1996, 1997 Free Software Foundation, Inc.
3 * This file is part of the GNU C Library.
4 * Contributed by Paul Eggert (eggert@twinsun.com).
5 *
6 * The GNU C Library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Library General Public License as
8 * published by the Free Software Foundation; either version 2 of the
9 * License, or (at your option) any later version.
10 *
11 * The GNU C Library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Library General Public License for more details.
15 *
16 * You should have received a copy of the GNU Library General Public
17 * License along with the GNU C Library; see the file COPYING.LIB. If not,
18 * write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 * Boston, MA 02111-1307, USA.
20 */
21
22/*
23 * Converts the calendar time to broken-down time representation
24 * Based on code from glibc-2.6
25 *
26 * 2009-7-14:
27 * Moved from glibc-2.6 to kernel by Zhaolei<zhaolei@cn.fujitsu.com>
28 */
29
30#include <linux/time.h>
31#include <linux/module.h>
32
33/*
34 * Nonzero if YEAR is a leap year (every 4 years,
35 * except every 100th isn't, and every 400th is).
36 */
37static int __isleap(long year)
38{
39 return (year) % 4 == 0 && ((year) % 100 != 0 || (year) % 400 == 0);
40}
41
42/* do a mathdiv for long type */
43static long math_div(long a, long b)
44{
45 return a / b - (a % b < 0);
46}
47
48/* How many leap years between y1 and y2, y1 must less or equal to y2 */
49static long leaps_between(long y1, long y2)
50{
51 long leaps1 = math_div(y1 - 1, 4) - math_div(y1 - 1, 100)
52 + math_div(y1 - 1, 400);
53 long leaps2 = math_div(y2 - 1, 4) - math_div(y2 - 1, 100)
54 + math_div(y2 - 1, 400);
55 return leaps2 - leaps1;
56}
57
58/* How many days come before each month (0-12). */
59static const unsigned short __mon_yday[2][13] = {
60 /* Normal years. */
61 {0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365},
62 /* Leap years. */
63 {0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366}
64};
65
66#define SECS_PER_HOUR (60 * 60)
67#define SECS_PER_DAY (SECS_PER_HOUR * 24)
68
69/**
70 * time_to_tm - converts the calendar time to local broken-down time
71 *
72 * @totalsecs the number of seconds elapsed since 00:00:00 on January 1, 1970,
73 * Coordinated Universal Time (UTC).
74 * @offset offset seconds adding to totalsecs.
75 * @result pointer to struct tm variable to receive broken-down time
76 */
77void time_to_tm(time_t totalsecs, int offset, struct tm *result)
78{
79 long days, rem, y;
80 const unsigned short *ip;
81
82 days = totalsecs / SECS_PER_DAY;
83 rem = totalsecs % SECS_PER_DAY;
84 rem += offset;
85 while (rem < 0) {
86 rem += SECS_PER_DAY;
87 --days;
88 }
89 while (rem >= SECS_PER_DAY) {
90 rem -= SECS_PER_DAY;
91 ++days;
92 }
93
94 result->tm_hour = rem / SECS_PER_HOUR;
95 rem %= SECS_PER_HOUR;
96 result->tm_min = rem / 60;
97 result->tm_sec = rem % 60;
98
99 /* January 1, 1970 was a Thursday. */
100 result->tm_wday = (4 + days) % 7;
101 if (result->tm_wday < 0)
102 result->tm_wday += 7;
103
104 y = 1970;
105
106 while (days < 0 || days >= (__isleap(y) ? 366 : 365)) {
107 /* Guess a corrected year, assuming 365 days per year. */
108 long yg = y + math_div(days, 365);
109
110 /* Adjust DAYS and Y to match the guessed year. */
111 days -= (yg - y) * 365 + leaps_between(y, yg);
112 y = yg;
113 }
114
115 result->tm_year = y - 1900;
116
117 result->tm_yday = days;
118
119 ip = __mon_yday[__isleap(y)];
120 for (y = 11; days < ip[y]; y--)
121 continue;
122 days -= ip[y];
123
124 result->tm_mon = y;
125 result->tm_mday = days + 1;
126}
127EXPORT_SYMBOL(time_to_tm);
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
index fddd69d16e03..1b5b7aa2fdfd 100644
--- a/kernel/time/timer_list.c
+++ b/kernel/time/timer_list.c
@@ -275,7 +275,7 @@ static int timer_list_open(struct inode *inode, struct file *filp)
275 return single_open(filp, timer_list_show, NULL); 275 return single_open(filp, timer_list_show, NULL);
276} 276}
277 277
278static struct file_operations timer_list_fops = { 278static const struct file_operations timer_list_fops = {
279 .open = timer_list_open, 279 .open = timer_list_open,
280 .read = seq_read, 280 .read = seq_read,
281 .llseek = seq_lseek, 281 .llseek = seq_lseek,
diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
index 4cde8b9c716f..ee5681f8d7ec 100644
--- a/kernel/time/timer_stats.c
+++ b/kernel/time/timer_stats.c
@@ -395,7 +395,7 @@ static int tstats_open(struct inode *inode, struct file *filp)
395 return single_open(filp, tstats_show, NULL); 395 return single_open(filp, tstats_show, NULL);
396} 396}
397 397
398static struct file_operations tstats_fops = { 398static const struct file_operations tstats_fops = {
399 .open = tstats_open, 399 .open = tstats_open,
400 .read = seq_read, 400 .read = seq_read,
401 .write = tstats_write, 401 .write = tstats_write,
diff --git a/kernel/timer.c b/kernel/timer.c
index bbb51074680e..5db5a8d26811 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -37,7 +37,7 @@
37#include <linux/delay.h> 37#include <linux/delay.h>
38#include <linux/tick.h> 38#include <linux/tick.h>
39#include <linux/kallsyms.h> 39#include <linux/kallsyms.h>
40#include <linux/perf_counter.h> 40#include <linux/perf_event.h>
41#include <linux/sched.h> 41#include <linux/sched.h>
42 42
43#include <asm/uaccess.h> 43#include <asm/uaccess.h>
@@ -46,6 +46,9 @@
46#include <asm/timex.h> 46#include <asm/timex.h>
47#include <asm/io.h> 47#include <asm/io.h>
48 48
49#define CREATE_TRACE_POINTS
50#include <trace/events/timer.h>
51
49u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES; 52u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
50 53
51EXPORT_SYMBOL(jiffies_64); 54EXPORT_SYMBOL(jiffies_64);
@@ -521,6 +524,25 @@ static inline void debug_timer_activate(struct timer_list *timer) { }
521static inline void debug_timer_deactivate(struct timer_list *timer) { } 524static inline void debug_timer_deactivate(struct timer_list *timer) { }
522#endif 525#endif
523 526
527static inline void debug_init(struct timer_list *timer)
528{
529 debug_timer_init(timer);
530 trace_timer_init(timer);
531}
532
533static inline void
534debug_activate(struct timer_list *timer, unsigned long expires)
535{
536 debug_timer_activate(timer);
537 trace_timer_start(timer, expires);
538}
539
540static inline void debug_deactivate(struct timer_list *timer)
541{
542 debug_timer_deactivate(timer);
543 trace_timer_cancel(timer);
544}
545
524static void __init_timer(struct timer_list *timer, 546static void __init_timer(struct timer_list *timer,
525 const char *name, 547 const char *name,
526 struct lock_class_key *key) 548 struct lock_class_key *key)
@@ -549,7 +571,7 @@ void init_timer_key(struct timer_list *timer,
549 const char *name, 571 const char *name,
550 struct lock_class_key *key) 572 struct lock_class_key *key)
551{ 573{
552 debug_timer_init(timer); 574 debug_init(timer);
553 __init_timer(timer, name, key); 575 __init_timer(timer, name, key);
554} 576}
555EXPORT_SYMBOL(init_timer_key); 577EXPORT_SYMBOL(init_timer_key);
@@ -568,7 +590,7 @@ static inline void detach_timer(struct timer_list *timer,
568{ 590{
569 struct list_head *entry = &timer->entry; 591 struct list_head *entry = &timer->entry;
570 592
571 debug_timer_deactivate(timer); 593 debug_deactivate(timer);
572 594
573 __list_del(entry->prev, entry->next); 595 __list_del(entry->prev, entry->next);
574 if (clear_pending) 596 if (clear_pending)
@@ -632,7 +654,7 @@ __mod_timer(struct timer_list *timer, unsigned long expires,
632 goto out_unlock; 654 goto out_unlock;
633 } 655 }
634 656
635 debug_timer_activate(timer); 657 debug_activate(timer, expires);
636 658
637 new_base = __get_cpu_var(tvec_bases); 659 new_base = __get_cpu_var(tvec_bases);
638 660
@@ -787,7 +809,7 @@ void add_timer_on(struct timer_list *timer, int cpu)
787 BUG_ON(timer_pending(timer) || !timer->function); 809 BUG_ON(timer_pending(timer) || !timer->function);
788 spin_lock_irqsave(&base->lock, flags); 810 spin_lock_irqsave(&base->lock, flags);
789 timer_set_base(timer, base); 811 timer_set_base(timer, base);
790 debug_timer_activate(timer); 812 debug_activate(timer, timer->expires);
791 if (time_before(timer->expires, base->next_timer) && 813 if (time_before(timer->expires, base->next_timer) &&
792 !tbase_get_deferrable(timer->base)) 814 !tbase_get_deferrable(timer->base))
793 base->next_timer = timer->expires; 815 base->next_timer = timer->expires;
@@ -1000,7 +1022,9 @@ static inline void __run_timers(struct tvec_base *base)
1000 */ 1022 */
1001 lock_map_acquire(&lockdep_map); 1023 lock_map_acquire(&lockdep_map);
1002 1024
1025 trace_timer_expire_entry(timer);
1003 fn(data); 1026 fn(data);
1027 trace_timer_expire_exit(timer);
1004 1028
1005 lock_map_release(&lockdep_map); 1029 lock_map_release(&lockdep_map);
1006 1030
@@ -1187,7 +1211,7 @@ static void run_timer_softirq(struct softirq_action *h)
1187{ 1211{
1188 struct tvec_base *base = __get_cpu_var(tvec_bases); 1212 struct tvec_base *base = __get_cpu_var(tvec_bases);
1189 1213
1190 perf_counter_do_pending(); 1214 perf_event_do_pending();
1191 1215
1192 hrtimer_run_pending(); 1216 hrtimer_run_pending();
1193 1217
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index e71634604400..b416512ad17f 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -83,7 +83,7 @@ config RING_BUFFER_ALLOW_SWAP
83# This allows those options to appear when no other tracer is selected. But the 83# This allows those options to appear when no other tracer is selected. But the
84# options do not appear when something else selects it. We need the two options 84# options do not appear when something else selects it. We need the two options
85# GENERIC_TRACER and TRACING to avoid circular dependencies to accomplish the 85# GENERIC_TRACER and TRACING to avoid circular dependencies to accomplish the
86# hidding of the automatic options options. 86# hidding of the automatic options.
87 87
88config TRACING 88config TRACING
89 bool 89 bool
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 3eb159c277c8..d9d6206e0b14 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -856,6 +856,37 @@ static void blk_add_trace_remap(struct request_queue *q, struct bio *bio,
856} 856}
857 857
858/** 858/**
859 * blk_add_trace_rq_remap - Add a trace for a request-remap operation
860 * @q: queue the io is for
861 * @rq: the source request
862 * @dev: target device
863 * @from: source sector
864 *
865 * Description:
866 * Device mapper remaps request to other devices.
867 * Add a trace for that action.
868 *
869 **/
870static void blk_add_trace_rq_remap(struct request_queue *q,
871 struct request *rq, dev_t dev,
872 sector_t from)
873{
874 struct blk_trace *bt = q->blk_trace;
875 struct blk_io_trace_remap r;
876
877 if (likely(!bt))
878 return;
879
880 r.device_from = cpu_to_be32(dev);
881 r.device_to = cpu_to_be32(disk_devt(rq->rq_disk));
882 r.sector_from = cpu_to_be64(from);
883
884 __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
885 rq_data_dir(rq), BLK_TA_REMAP, !!rq->errors,
886 sizeof(r), &r);
887}
888
889/**
859 * blk_add_driver_data - Add binary message with driver-specific data 890 * blk_add_driver_data - Add binary message with driver-specific data
860 * @q: queue the io is for 891 * @q: queue the io is for
861 * @rq: io request 892 * @rq: io request
@@ -922,10 +953,13 @@ static void blk_register_tracepoints(void)
922 WARN_ON(ret); 953 WARN_ON(ret);
923 ret = register_trace_block_remap(blk_add_trace_remap); 954 ret = register_trace_block_remap(blk_add_trace_remap);
924 WARN_ON(ret); 955 WARN_ON(ret);
956 ret = register_trace_block_rq_remap(blk_add_trace_rq_remap);
957 WARN_ON(ret);
925} 958}
926 959
927static void blk_unregister_tracepoints(void) 960static void blk_unregister_tracepoints(void)
928{ 961{
962 unregister_trace_block_rq_remap(blk_add_trace_rq_remap);
929 unregister_trace_block_remap(blk_add_trace_remap); 963 unregister_trace_block_remap(blk_add_trace_remap);
930 unregister_trace_block_split(blk_add_trace_split); 964 unregister_trace_block_split(blk_add_trace_split);
931 unregister_trace_block_unplug_io(blk_add_trace_unplug_io); 965 unregister_trace_block_unplug_io(blk_add_trace_unplug_io);
@@ -1657,6 +1691,11 @@ int blk_trace_init_sysfs(struct device *dev)
1657 return sysfs_create_group(&dev->kobj, &blk_trace_attr_group); 1691 return sysfs_create_group(&dev->kobj, &blk_trace_attr_group);
1658} 1692}
1659 1693
1694void blk_trace_remove_sysfs(struct device *dev)
1695{
1696 sysfs_remove_group(&dev->kobj, &blk_trace_attr_group);
1697}
1698
1660#endif /* CONFIG_BLK_DEV_IO_TRACE */ 1699#endif /* CONFIG_BLK_DEV_IO_TRACE */
1661 1700
1662#ifdef CONFIG_EVENT_TRACING 1701#ifdef CONFIG_EVENT_TRACING
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index c71e91bf7372..37ba67e33265 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -225,7 +225,11 @@ static void ftrace_update_pid_func(void)
225 if (ftrace_trace_function == ftrace_stub) 225 if (ftrace_trace_function == ftrace_stub)
226 return; 226 return;
227 227
228#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
228 func = ftrace_trace_function; 229 func = ftrace_trace_function;
230#else
231 func = __ftrace_trace_function;
232#endif
229 233
230 if (ftrace_pid_trace) { 234 if (ftrace_pid_trace) {
231 set_ftrace_pid_function(func); 235 set_ftrace_pid_function(func);
@@ -1074,14 +1078,9 @@ static void ftrace_replace_code(int enable)
1074 failed = __ftrace_replace_code(rec, enable); 1078 failed = __ftrace_replace_code(rec, enable);
1075 if (failed) { 1079 if (failed) {
1076 rec->flags |= FTRACE_FL_FAILED; 1080 rec->flags |= FTRACE_FL_FAILED;
1077 if ((system_state == SYSTEM_BOOTING) || 1081 ftrace_bug(failed, rec->ip);
1078 !core_kernel_text(rec->ip)) { 1082 /* Stop processing */
1079 ftrace_free_rec(rec); 1083 return;
1080 } else {
1081 ftrace_bug(failed, rec->ip);
1082 /* Stop processing */
1083 return;
1084 }
1085 } 1084 }
1086 } while_for_each_ftrace_rec(); 1085 } while_for_each_ftrace_rec();
1087} 1086}
@@ -1520,7 +1519,7 @@ static int t_show(struct seq_file *m, void *v)
1520 return 0; 1519 return 0;
1521} 1520}
1522 1521
1523static struct seq_operations show_ftrace_seq_ops = { 1522static const struct seq_operations show_ftrace_seq_ops = {
1524 .start = t_start, 1523 .start = t_start,
1525 .next = t_next, 1524 .next = t_next,
1526 .stop = t_stop, 1525 .stop = t_stop,
@@ -1621,8 +1620,10 @@ ftrace_regex_open(struct inode *inode, struct file *file, int enable)
1621 if (!ret) { 1620 if (!ret) {
1622 struct seq_file *m = file->private_data; 1621 struct seq_file *m = file->private_data;
1623 m->private = iter; 1622 m->private = iter;
1624 } else 1623 } else {
1624 trace_parser_put(&iter->parser);
1625 kfree(iter); 1625 kfree(iter);
1626 }
1626 } else 1627 } else
1627 file->private_data = iter; 1628 file->private_data = iter;
1628 mutex_unlock(&ftrace_regex_lock); 1629 mutex_unlock(&ftrace_regex_lock);
@@ -2202,7 +2203,7 @@ ftrace_regex_write(struct file *file, const char __user *ubuf,
2202 struct trace_parser *parser; 2203 struct trace_parser *parser;
2203 ssize_t ret, read; 2204 ssize_t ret, read;
2204 2205
2205 if (!cnt || cnt < 0) 2206 if (!cnt)
2206 return 0; 2207 return 0;
2207 2208
2208 mutex_lock(&ftrace_regex_lock); 2209 mutex_lock(&ftrace_regex_lock);
@@ -2216,7 +2217,7 @@ ftrace_regex_write(struct file *file, const char __user *ubuf,
2216 parser = &iter->parser; 2217 parser = &iter->parser;
2217 read = trace_get_user(parser, ubuf, cnt, ppos); 2218 read = trace_get_user(parser, ubuf, cnt, ppos);
2218 2219
2219 if (trace_parser_loaded(parser) && 2220 if (read >= 0 && trace_parser_loaded(parser) &&
2220 !trace_parser_cont(parser)) { 2221 !trace_parser_cont(parser)) {
2221 ret = ftrace_process_regex(parser->buffer, 2222 ret = ftrace_process_regex(parser->buffer,
2222 parser->idx, enable); 2223 parser->idx, enable);
@@ -2459,7 +2460,7 @@ static int g_show(struct seq_file *m, void *v)
2459 return 0; 2460 return 0;
2460} 2461}
2461 2462
2462static struct seq_operations ftrace_graph_seq_ops = { 2463static const struct seq_operations ftrace_graph_seq_ops = {
2463 .start = g_start, 2464 .start = g_start,
2464 .next = g_next, 2465 .next = g_next,
2465 .stop = g_stop, 2466 .stop = g_stop,
@@ -2552,8 +2553,7 @@ ftrace_graph_write(struct file *file, const char __user *ubuf,
2552 size_t cnt, loff_t *ppos) 2553 size_t cnt, loff_t *ppos)
2553{ 2554{
2554 struct trace_parser parser; 2555 struct trace_parser parser;
2555 size_t read = 0; 2556 ssize_t read, ret;
2556 ssize_t ret;
2557 2557
2558 if (!cnt || cnt < 0) 2558 if (!cnt || cnt < 0)
2559 return 0; 2559 return 0;
@@ -2562,29 +2562,31 @@ ftrace_graph_write(struct file *file, const char __user *ubuf,
2562 2562
2563 if (ftrace_graph_count >= FTRACE_GRAPH_MAX_FUNCS) { 2563 if (ftrace_graph_count >= FTRACE_GRAPH_MAX_FUNCS) {
2564 ret = -EBUSY; 2564 ret = -EBUSY;
2565 goto out; 2565 goto out_unlock;
2566 } 2566 }
2567 2567
2568 if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) { 2568 if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) {
2569 ret = -ENOMEM; 2569 ret = -ENOMEM;
2570 goto out; 2570 goto out_unlock;
2571 } 2571 }
2572 2572
2573 read = trace_get_user(&parser, ubuf, cnt, ppos); 2573 read = trace_get_user(&parser, ubuf, cnt, ppos);
2574 2574
2575 if (trace_parser_loaded((&parser))) { 2575 if (read >= 0 && trace_parser_loaded((&parser))) {
2576 parser.buffer[parser.idx] = 0; 2576 parser.buffer[parser.idx] = 0;
2577 2577
2578 /* we allow only one expression at a time */ 2578 /* we allow only one expression at a time */
2579 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count, 2579 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
2580 parser.buffer); 2580 parser.buffer);
2581 if (ret) 2581 if (ret)
2582 goto out; 2582 goto out_free;
2583 } 2583 }
2584 2584
2585 ret = read; 2585 ret = read;
2586 out: 2586
2587out_free:
2587 trace_parser_put(&parser); 2588 trace_parser_put(&parser);
2589out_unlock:
2588 mutex_unlock(&graph_lock); 2590 mutex_unlock(&graph_lock);
2589 2591
2590 return ret; 2592 return ret;
@@ -2655,19 +2657,17 @@ static int ftrace_convert_nops(struct module *mod,
2655} 2657}
2656 2658
2657#ifdef CONFIG_MODULES 2659#ifdef CONFIG_MODULES
2658void ftrace_release(void *start, void *end) 2660void ftrace_release_mod(struct module *mod)
2659{ 2661{
2660 struct dyn_ftrace *rec; 2662 struct dyn_ftrace *rec;
2661 struct ftrace_page *pg; 2663 struct ftrace_page *pg;
2662 unsigned long s = (unsigned long)start;
2663 unsigned long e = (unsigned long)end;
2664 2664
2665 if (ftrace_disabled || !start || start == end) 2665 if (ftrace_disabled)
2666 return; 2666 return;
2667 2667
2668 mutex_lock(&ftrace_lock); 2668 mutex_lock(&ftrace_lock);
2669 do_for_each_ftrace_rec(pg, rec) { 2669 do_for_each_ftrace_rec(pg, rec) {
2670 if ((rec->ip >= s) && (rec->ip < e)) { 2670 if (within_module_core(rec->ip, mod)) {
2671 /* 2671 /*
2672 * rec->ip is changed in ftrace_free_rec() 2672 * rec->ip is changed in ftrace_free_rec()
2673 * It should not between s and e if record was freed. 2673 * It should not between s and e if record was freed.
@@ -2699,9 +2699,7 @@ static int ftrace_module_notify(struct notifier_block *self,
2699 mod->num_ftrace_callsites); 2699 mod->num_ftrace_callsites);
2700 break; 2700 break;
2701 case MODULE_STATE_GOING: 2701 case MODULE_STATE_GOING:
2702 ftrace_release(mod->ftrace_callsites, 2702 ftrace_release_mod(mod);
2703 mod->ftrace_callsites +
2704 mod->num_ftrace_callsites);
2705 break; 2703 break;
2706 } 2704 }
2707 2705
@@ -3015,7 +3013,7 @@ int unregister_ftrace_function(struct ftrace_ops *ops)
3015 3013
3016int 3014int
3017ftrace_enable_sysctl(struct ctl_table *table, int write, 3015ftrace_enable_sysctl(struct ctl_table *table, int write,
3018 struct file *file, void __user *buffer, size_t *lenp, 3016 void __user *buffer, size_t *lenp,
3019 loff_t *ppos) 3017 loff_t *ppos)
3020{ 3018{
3021 int ret; 3019 int ret;
@@ -3025,7 +3023,7 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
3025 3023
3026 mutex_lock(&ftrace_lock); 3024 mutex_lock(&ftrace_lock);
3027 3025
3028 ret = proc_dointvec(table, write, file, buffer, lenp, ppos); 3026 ret = proc_dointvec(table, write, buffer, lenp, ppos);
3029 3027
3030 if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled)) 3028 if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
3031 goto out; 3029 goto out;
diff --git a/kernel/trace/kmemtrace.c b/kernel/trace/kmemtrace.c
index 81b1645c8549..a91da69f153a 100644
--- a/kernel/trace/kmemtrace.c
+++ b/kernel/trace/kmemtrace.c
@@ -501,7 +501,7 @@ static int __init init_kmem_tracer(void)
501 return 1; 501 return 1;
502 } 502 }
503 503
504 if (!register_tracer(&kmem_tracer)) { 504 if (register_tracer(&kmem_tracer) != 0) {
505 pr_warning("Warning: could not register the kmem tracer\n"); 505 pr_warning("Warning: could not register the kmem tracer\n");
506 return 1; 506 return 1;
507 } 507 }
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index a35925d222ba..45068269ebb1 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -415,7 +415,7 @@ int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
415 415
416 /* read the non-space input */ 416 /* read the non-space input */
417 while (cnt && !isspace(ch)) { 417 while (cnt && !isspace(ch)) {
418 if (parser->idx < parser->size) 418 if (parser->idx < parser->size - 1)
419 parser->buffer[parser->idx++] = ch; 419 parser->buffer[parser->idx++] = ch;
420 else { 420 else {
421 ret = -EINVAL; 421 ret = -EINVAL;
@@ -1949,7 +1949,7 @@ static int s_show(struct seq_file *m, void *v)
1949 return 0; 1949 return 0;
1950} 1950}
1951 1951
1952static struct seq_operations tracer_seq_ops = { 1952static const struct seq_operations tracer_seq_ops = {
1953 .start = s_start, 1953 .start = s_start,
1954 .next = s_next, 1954 .next = s_next,
1955 .stop = s_stop, 1955 .stop = s_stop,
@@ -1984,11 +1984,9 @@ __tracing_open(struct inode *inode, struct file *file)
1984 if (current_trace) 1984 if (current_trace)
1985 *iter->trace = *current_trace; 1985 *iter->trace = *current_trace;
1986 1986
1987 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) 1987 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
1988 goto fail; 1988 goto fail;
1989 1989
1990 cpumask_clear(iter->started);
1991
1992 if (current_trace && current_trace->print_max) 1990 if (current_trace && current_trace->print_max)
1993 iter->tr = &max_tr; 1991 iter->tr = &max_tr;
1994 else 1992 else
@@ -2163,7 +2161,7 @@ static int t_show(struct seq_file *m, void *v)
2163 return 0; 2161 return 0;
2164} 2162}
2165 2163
2166static struct seq_operations show_traces_seq_ops = { 2164static const struct seq_operations show_traces_seq_ops = {
2167 .start = t_start, 2165 .start = t_start,
2168 .next = t_next, 2166 .next = t_next,
2169 .stop = t_stop, 2167 .stop = t_stop,
@@ -4389,7 +4387,7 @@ __init static int tracer_alloc_buffers(void)
4389 if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL)) 4387 if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL))
4390 goto out_free_buffer_mask; 4388 goto out_free_buffer_mask;
4391 4389
4392 if (!alloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL)) 4390 if (!zalloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL))
4393 goto out_free_tracing_cpumask; 4391 goto out_free_tracing_cpumask;
4394 4392
4395 /* To save memory, keep the ring buffer size to its minimum */ 4393 /* To save memory, keep the ring buffer size to its minimum */
@@ -4400,7 +4398,6 @@ __init static int tracer_alloc_buffers(void)
4400 4398
4401 cpumask_copy(tracing_buffer_mask, cpu_possible_mask); 4399 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
4402 cpumask_copy(tracing_cpumask, cpu_all_mask); 4400 cpumask_copy(tracing_cpumask, cpu_all_mask);
4403 cpumask_clear(tracing_reader_cpumask);
4404 4401
4405 /* TODO: make the number of buffers hot pluggable with CPUS */ 4402 /* TODO: make the number of buffers hot pluggable with CPUS */
4406 global_trace.buffer = ring_buffer_alloc(ring_buf_size, 4403 global_trace.buffer = ring_buffer_alloc(ring_buf_size,
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c
index 7a7a9fd249a9..4a194f08f88c 100644
--- a/kernel/trace/trace_branch.c
+++ b/kernel/trace/trace_branch.c
@@ -34,6 +34,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
34 struct trace_array *tr = branch_tracer; 34 struct trace_array *tr = branch_tracer;
35 struct ring_buffer_event *event; 35 struct ring_buffer_event *event;
36 struct trace_branch *entry; 36 struct trace_branch *entry;
37 struct ring_buffer *buffer;
37 unsigned long flags; 38 unsigned long flags;
38 int cpu, pc; 39 int cpu, pc;
39 const char *p; 40 const char *p;
@@ -54,7 +55,8 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
54 goto out; 55 goto out;
55 56
56 pc = preempt_count(); 57 pc = preempt_count();
57 event = trace_buffer_lock_reserve(tr, TRACE_BRANCH, 58 buffer = tr->buffer;
59 event = trace_buffer_lock_reserve(buffer, TRACE_BRANCH,
58 sizeof(*entry), flags, pc); 60 sizeof(*entry), flags, pc);
59 if (!event) 61 if (!event)
60 goto out; 62 goto out;
@@ -74,8 +76,8 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
74 entry->line = f->line; 76 entry->line = f->line;
75 entry->correct = val == expect; 77 entry->correct = val == expect;
76 78
77 if (!filter_check_discard(call, entry, tr->buffer, event)) 79 if (!filter_check_discard(call, entry, buffer, event))
78 ring_buffer_unlock_commit(tr->buffer, event); 80 ring_buffer_unlock_commit(buffer, event);
79 81
80 out: 82 out:
81 atomic_dec(&tr->data[cpu]->disabled); 83 atomic_dec(&tr->data[cpu]->disabled);
diff --git a/kernel/trace/trace_event_profile.c b/kernel/trace/trace_event_profile.c
index dd44b8768867..8d5c171cc998 100644
--- a/kernel/trace/trace_event_profile.c
+++ b/kernel/trace/trace_event_profile.c
@@ -31,7 +31,7 @@ static int ftrace_profile_enable_event(struct ftrace_event_call *event)
31 if (atomic_inc_return(&event->profile_count)) 31 if (atomic_inc_return(&event->profile_count))
32 return 0; 32 return 0;
33 33
34 if (!total_profile_count++) { 34 if (!total_profile_count) {
35 buf = (char *)alloc_percpu(profile_buf_t); 35 buf = (char *)alloc_percpu(profile_buf_t);
36 if (!buf) 36 if (!buf)
37 goto fail_buf; 37 goto fail_buf;
@@ -46,14 +46,19 @@ static int ftrace_profile_enable_event(struct ftrace_event_call *event)
46 } 46 }
47 47
48 ret = event->profile_enable(); 48 ret = event->profile_enable();
49 if (!ret) 49 if (!ret) {
50 total_profile_count++;
50 return 0; 51 return 0;
52 }
51 53
52 kfree(trace_profile_buf_nmi);
53fail_buf_nmi: 54fail_buf_nmi:
54 kfree(trace_profile_buf); 55 if (!total_profile_count) {
56 free_percpu(trace_profile_buf_nmi);
57 free_percpu(trace_profile_buf);
58 trace_profile_buf_nmi = NULL;
59 trace_profile_buf = NULL;
60 }
55fail_buf: 61fail_buf:
56 total_profile_count--;
57 atomic_dec(&event->profile_count); 62 atomic_dec(&event->profile_count);
58 63
59 return ret; 64 return ret;
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 6f03c8a1105e..d128f65778e6 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -232,10 +232,9 @@ ftrace_event_write(struct file *file, const char __user *ubuf,
232 size_t cnt, loff_t *ppos) 232 size_t cnt, loff_t *ppos)
233{ 233{
234 struct trace_parser parser; 234 struct trace_parser parser;
235 size_t read = 0; 235 ssize_t read, ret;
236 ssize_t ret;
237 236
238 if (!cnt || cnt < 0) 237 if (!cnt)
239 return 0; 238 return 0;
240 239
241 ret = tracing_update_buffers(); 240 ret = tracing_update_buffers();
@@ -247,7 +246,7 @@ ftrace_event_write(struct file *file, const char __user *ubuf,
247 246
248 read = trace_get_user(&parser, ubuf, cnt, ppos); 247 read = trace_get_user(&parser, ubuf, cnt, ppos);
249 248
250 if (trace_parser_loaded((&parser))) { 249 if (read >= 0 && trace_parser_loaded((&parser))) {
251 int set = 1; 250 int set = 1;
252 251
253 if (*parser.buffer == '!') 252 if (*parser.buffer == '!')
diff --git a/kernel/trace/trace_hw_branches.c b/kernel/trace/trace_hw_branches.c
index ca7d7c4d0c2a..69543a905cd5 100644
--- a/kernel/trace/trace_hw_branches.c
+++ b/kernel/trace/trace_hw_branches.c
@@ -155,7 +155,7 @@ static enum print_line_t bts_trace_print_line(struct trace_iterator *iter)
155 seq_print_ip_sym(seq, it->from, symflags) && 155 seq_print_ip_sym(seq, it->from, symflags) &&
156 trace_seq_printf(seq, "\n")) 156 trace_seq_printf(seq, "\n"))
157 return TRACE_TYPE_HANDLED; 157 return TRACE_TYPE_HANDLED;
158 return TRACE_TYPE_PARTIAL_LINE;; 158 return TRACE_TYPE_PARTIAL_LINE;
159 } 159 }
160 return TRACE_TYPE_UNHANDLED; 160 return TRACE_TYPE_UNHANDLED;
161} 161}
@@ -165,6 +165,7 @@ void trace_hw_branch(u64 from, u64 to)
165 struct ftrace_event_call *call = &event_hw_branch; 165 struct ftrace_event_call *call = &event_hw_branch;
166 struct trace_array *tr = hw_branch_trace; 166 struct trace_array *tr = hw_branch_trace;
167 struct ring_buffer_event *event; 167 struct ring_buffer_event *event;
168 struct ring_buffer *buf;
168 struct hw_branch_entry *entry; 169 struct hw_branch_entry *entry;
169 unsigned long irq1; 170 unsigned long irq1;
170 int cpu; 171 int cpu;
@@ -180,7 +181,8 @@ void trace_hw_branch(u64 from, u64 to)
180 if (atomic_inc_return(&tr->data[cpu]->disabled) != 1) 181 if (atomic_inc_return(&tr->data[cpu]->disabled) != 1)
181 goto out; 182 goto out;
182 183
183 event = trace_buffer_lock_reserve(tr, TRACE_HW_BRANCHES, 184 buf = tr->buffer;
185 event = trace_buffer_lock_reserve(buf, TRACE_HW_BRANCHES,
184 sizeof(*entry), 0, 0); 186 sizeof(*entry), 0, 0);
185 if (!event) 187 if (!event)
186 goto out; 188 goto out;
@@ -189,8 +191,8 @@ void trace_hw_branch(u64 from, u64 to)
189 entry->ent.type = TRACE_HW_BRANCHES; 191 entry->ent.type = TRACE_HW_BRANCHES;
190 entry->from = from; 192 entry->from = from;
191 entry->to = to; 193 entry->to = to;
192 if (!filter_check_discard(call, entry, tr->buffer, event)) 194 if (!filter_check_discard(call, entry, buf, event))
193 trace_buffer_unlock_commit(tr, event, 0, 0); 195 trace_buffer_unlock_commit(buf, event, 0, 0);
194 196
195 out: 197 out:
196 atomic_dec(&tr->data[cpu]->disabled); 198 atomic_dec(&tr->data[cpu]->disabled);
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index f572f44c6e1e..ed17565826b0 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -486,16 +486,18 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
486 hardirq ? 'h' : softirq ? 's' : '.')) 486 hardirq ? 'h' : softirq ? 's' : '.'))
487 return 0; 487 return 0;
488 488
489 if (entry->lock_depth < 0) 489 if (entry->preempt_count)
490 ret = trace_seq_putc(s, '.'); 490 ret = trace_seq_printf(s, "%x", entry->preempt_count);
491 else 491 else
492 ret = trace_seq_printf(s, "%d", entry->lock_depth); 492 ret = trace_seq_putc(s, '.');
493
493 if (!ret) 494 if (!ret)
494 return 0; 495 return 0;
495 496
496 if (entry->preempt_count) 497 if (entry->lock_depth < 0)
497 return trace_seq_printf(s, "%x", entry->preempt_count); 498 return trace_seq_putc(s, '.');
498 return trace_seq_putc(s, '.'); 499
500 return trace_seq_printf(s, "%d", entry->lock_depth);
499} 501}
500 502
501static int 503static int
@@ -883,7 +885,7 @@ static int trace_ctxwake_raw(struct trace_iterator *iter, char S)
883 trace_assign_type(field, iter->ent); 885 trace_assign_type(field, iter->ent);
884 886
885 if (!S) 887 if (!S)
886 task_state_char(field->prev_state); 888 S = task_state_char(field->prev_state);
887 T = task_state_char(field->next_state); 889 T = task_state_char(field->next_state);
888 if (!trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n", 890 if (!trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n",
889 field->prev_pid, 891 field->prev_pid,
@@ -918,7 +920,7 @@ static int trace_ctxwake_hex(struct trace_iterator *iter, char S)
918 trace_assign_type(field, iter->ent); 920 trace_assign_type(field, iter->ent);
919 921
920 if (!S) 922 if (!S)
921 task_state_char(field->prev_state); 923 S = task_state_char(field->prev_state);
922 T = task_state_char(field->next_state); 924 T = task_state_char(field->next_state);
923 925
924 SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid); 926 SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid);
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 0f6facb050a1..8504ac71e4e8 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -296,14 +296,14 @@ static const struct file_operations stack_trace_fops = {
296 296
297int 297int
298stack_trace_sysctl(struct ctl_table *table, int write, 298stack_trace_sysctl(struct ctl_table *table, int write,
299 struct file *file, void __user *buffer, size_t *lenp, 299 void __user *buffer, size_t *lenp,
300 loff_t *ppos) 300 loff_t *ppos)
301{ 301{
302 int ret; 302 int ret;
303 303
304 mutex_lock(&stack_sysctl_mutex); 304 mutex_lock(&stack_sysctl_mutex);
305 305
306 ret = proc_dointvec(table, write, file, buffer, lenp, ppos); 306 ret = proc_dointvec(table, write, buffer, lenp, ppos);
307 307
308 if (ret || !write || 308 if (ret || !write ||
309 (last_stack_tracer_enabled == !!stack_tracer_enabled)) 309 (last_stack_tracer_enabled == !!stack_tracer_enabled))
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index 7a3550cf2597..527e17eae575 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -2,7 +2,7 @@
2#include <trace/events/syscalls.h> 2#include <trace/events/syscalls.h>
3#include <linux/kernel.h> 3#include <linux/kernel.h>
4#include <linux/ftrace.h> 4#include <linux/ftrace.h>
5#include <linux/perf_counter.h> 5#include <linux/perf_event.h>
6#include <asm/syscall.h> 6#include <asm/syscall.h>
7 7
8#include "trace_output.h" 8#include "trace_output.h"
@@ -166,7 +166,7 @@ int syscall_exit_format(struct ftrace_event_call *call, struct trace_seq *s)
166 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n" 166 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
167 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n", 167 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n",
168 SYSCALL_FIELD(int, nr), 168 SYSCALL_FIELD(int, nr),
169 SYSCALL_FIELD(unsigned long, ret)); 169 SYSCALL_FIELD(long, ret));
170 if (!ret) 170 if (!ret)
171 return 0; 171 return 0;
172 172
@@ -212,7 +212,7 @@ int syscall_exit_define_fields(struct ftrace_event_call *call)
212 if (ret) 212 if (ret)
213 return ret; 213 return ret;
214 214
215 ret = trace_define_field(call, SYSCALL_FIELD(unsigned long, ret), 0, 215 ret = trace_define_field(call, SYSCALL_FIELD(long, ret), 0,
216 FILTER_OTHER); 216 FILTER_OTHER);
217 217
218 return ret; 218 return ret;
@@ -433,7 +433,7 @@ static void prof_syscall_enter(struct pt_regs *regs, long id)
433 rec->nr = syscall_nr; 433 rec->nr = syscall_nr;
434 syscall_get_arguments(current, regs, 0, sys_data->nb_args, 434 syscall_get_arguments(current, regs, 0, sys_data->nb_args,
435 (unsigned long *)&rec->args); 435 (unsigned long *)&rec->args);
436 perf_tpcounter_event(sys_data->enter_id, 0, 1, rec, size); 436 perf_tp_event(sys_data->enter_id, 0, 1, rec, size);
437 437
438end: 438end:
439 local_irq_restore(flags); 439 local_irq_restore(flags);
@@ -532,7 +532,7 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret)
532 rec->nr = syscall_nr; 532 rec->nr = syscall_nr;
533 rec->ret = syscall_get_return_value(current, regs); 533 rec->ret = syscall_get_return_value(current, regs);
534 534
535 perf_tpcounter_event(sys_data->exit_id, 0, 1, rec, size); 535 perf_tp_event(sys_data->exit_id, 0, 1, rec, size);
536 536
537end: 537end:
538 local_irq_restore(flags); 538 local_irq_restore(flags);
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
index 9489a0a9b1be..cc89be5bc0f8 100644
--- a/kernel/tracepoint.c
+++ b/kernel/tracepoint.c
@@ -48,7 +48,7 @@ static struct hlist_head tracepoint_table[TRACEPOINT_TABLE_SIZE];
48 48
49/* 49/*
50 * Note about RCU : 50 * Note about RCU :
51 * It is used to to delay the free of multiple probes array until a quiescent 51 * It is used to delay the free of multiple probes array until a quiescent
52 * state is reached. 52 * state is reached.
53 * Tracepoint entries modifications are protected by the tracepoints_mutex. 53 * Tracepoint entries modifications are protected by the tracepoints_mutex.
54 */ 54 */
diff --git a/kernel/uid16.c b/kernel/uid16.c
index 0314501688b9..419209893d87 100644
--- a/kernel/uid16.c
+++ b/kernel/uid16.c
@@ -4,7 +4,6 @@
4 */ 4 */
5 5
6#include <linux/mm.h> 6#include <linux/mm.h>
7#include <linux/utsname.h>
8#include <linux/mman.h> 7#include <linux/mman.h>
9#include <linux/notifier.h> 8#include <linux/notifier.h>
10#include <linux/reboot.h> 9#include <linux/reboot.h>
diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
index 92359cc747a7..69eae358a726 100644
--- a/kernel/utsname_sysctl.c
+++ b/kernel/utsname_sysctl.c
@@ -42,14 +42,14 @@ static void put_uts(ctl_table *table, int write, void *which)
42 * Special case of dostring for the UTS structure. This has locks 42 * Special case of dostring for the UTS structure. This has locks
43 * to observe. Should this be in kernel/sys.c ???? 43 * to observe. Should this be in kernel/sys.c ????
44 */ 44 */
45static int proc_do_uts_string(ctl_table *table, int write, struct file *filp, 45static int proc_do_uts_string(ctl_table *table, int write,
46 void __user *buffer, size_t *lenp, loff_t *ppos) 46 void __user *buffer, size_t *lenp, loff_t *ppos)
47{ 47{
48 struct ctl_table uts_table; 48 struct ctl_table uts_table;
49 int r; 49 int r;
50 memcpy(&uts_table, table, sizeof(uts_table)); 50 memcpy(&uts_table, table, sizeof(uts_table));
51 uts_table.data = get_uts(table, write); 51 uts_table.data = get_uts(table, write);
52 r = proc_dostring(&uts_table,write,filp,buffer,lenp, ppos); 52 r = proc_dostring(&uts_table,write,buffer,lenp, ppos);
53 put_uts(table, write, uts_table.data); 53 put_uts(table, write, uts_table.data);
54 return r; 54 return r;
55} 55}