diff options
Diffstat (limited to 'kernel')
101 files changed, 9352 insertions, 8215 deletions
diff --git a/kernel/Makefile b/kernel/Makefile index 3d9c7e27e3f9..776ffed1556d 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
@@ -58,7 +58,6 @@ obj-$(CONFIG_KEXEC) += kexec.o | |||
58 | obj-$(CONFIG_BACKTRACE_SELF_TEST) += backtracetest.o | 58 | obj-$(CONFIG_BACKTRACE_SELF_TEST) += backtracetest.o |
59 | obj-$(CONFIG_COMPAT) += compat.o | 59 | obj-$(CONFIG_COMPAT) += compat.o |
60 | obj-$(CONFIG_CGROUPS) += cgroup.o | 60 | obj-$(CONFIG_CGROUPS) += cgroup.o |
61 | obj-$(CONFIG_CGROUP_DEBUG) += cgroup_debug.o | ||
62 | obj-$(CONFIG_CGROUP_FREEZER) += cgroup_freezer.o | 61 | obj-$(CONFIG_CGROUP_FREEZER) += cgroup_freezer.o |
63 | obj-$(CONFIG_CPUSETS) += cpuset.o | 62 | obj-$(CONFIG_CPUSETS) += cpuset.o |
64 | obj-$(CONFIG_CGROUP_NS) += ns_cgroup.o | 63 | obj-$(CONFIG_CGROUP_NS) += ns_cgroup.o |
@@ -87,7 +86,6 @@ obj-$(CONFIG_RELAY) += relay.o | |||
87 | obj-$(CONFIG_SYSCTL) += utsname_sysctl.o | 86 | obj-$(CONFIG_SYSCTL) += utsname_sysctl.o |
88 | obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o | 87 | obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o |
89 | obj-$(CONFIG_TASKSTATS) += taskstats.o tsacct.o | 88 | obj-$(CONFIG_TASKSTATS) += taskstats.o tsacct.o |
90 | obj-$(CONFIG_MARKERS) += marker.o | ||
91 | obj-$(CONFIG_TRACEPOINTS) += tracepoint.o | 89 | obj-$(CONFIG_TRACEPOINTS) += tracepoint.o |
92 | obj-$(CONFIG_LATENCYTOP) += latencytop.o | 90 | obj-$(CONFIG_LATENCYTOP) += latencytop.o |
93 | obj-$(CONFIG_FUNCTION_TRACER) += trace/ | 91 | obj-$(CONFIG_FUNCTION_TRACER) += trace/ |
@@ -96,7 +94,8 @@ obj-$(CONFIG_X86_DS) += trace/ | |||
96 | obj-$(CONFIG_RING_BUFFER) += trace/ | 94 | obj-$(CONFIG_RING_BUFFER) += trace/ |
97 | obj-$(CONFIG_SMP) += sched_cpupri.o | 95 | obj-$(CONFIG_SMP) += sched_cpupri.o |
98 | obj-$(CONFIG_SLOW_WORK) += slow-work.o | 96 | obj-$(CONFIG_SLOW_WORK) += slow-work.o |
99 | obj-$(CONFIG_PERF_COUNTERS) += perf_counter.o | 97 | obj-$(CONFIG_SLOW_WORK_PROC) += slow-work-proc.o |
98 | obj-$(CONFIG_PERF_EVENTS) += perf_event.o | ||
100 | 99 | ||
101 | ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y) | 100 | ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y) |
102 | # According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is | 101 | # According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is |
diff --git a/kernel/audit.c b/kernel/audit.c index defc2e6f1e3b..5feed232be9d 100644 --- a/kernel/audit.c +++ b/kernel/audit.c | |||
@@ -855,18 +855,24 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
855 | break; | 855 | break; |
856 | } | 856 | } |
857 | case AUDIT_SIGNAL_INFO: | 857 | case AUDIT_SIGNAL_INFO: |
858 | err = security_secid_to_secctx(audit_sig_sid, &ctx, &len); | 858 | len = 0; |
859 | if (err) | 859 | if (audit_sig_sid) { |
860 | return err; | 860 | err = security_secid_to_secctx(audit_sig_sid, &ctx, &len); |
861 | if (err) | ||
862 | return err; | ||
863 | } | ||
861 | sig_data = kmalloc(sizeof(*sig_data) + len, GFP_KERNEL); | 864 | sig_data = kmalloc(sizeof(*sig_data) + len, GFP_KERNEL); |
862 | if (!sig_data) { | 865 | if (!sig_data) { |
863 | security_release_secctx(ctx, len); | 866 | if (audit_sig_sid) |
867 | security_release_secctx(ctx, len); | ||
864 | return -ENOMEM; | 868 | return -ENOMEM; |
865 | } | 869 | } |
866 | sig_data->uid = audit_sig_uid; | 870 | sig_data->uid = audit_sig_uid; |
867 | sig_data->pid = audit_sig_pid; | 871 | sig_data->pid = audit_sig_pid; |
868 | memcpy(sig_data->ctx, ctx, len); | 872 | if (audit_sig_sid) { |
869 | security_release_secctx(ctx, len); | 873 | memcpy(sig_data->ctx, ctx, len); |
874 | security_release_secctx(ctx, len); | ||
875 | } | ||
870 | audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_SIGNAL_INFO, | 876 | audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_SIGNAL_INFO, |
871 | 0, 0, sig_data, sizeof(*sig_data) + len); | 877 | 0, 0, sig_data, sizeof(*sig_data) + len); |
872 | kfree(sig_data); | 878 | kfree(sig_data); |
diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c index 0e96dbc60ea9..cc7e87936cbc 100644 --- a/kernel/audit_watch.c +++ b/kernel/audit_watch.c | |||
@@ -45,8 +45,8 @@ | |||
45 | 45 | ||
46 | struct audit_watch { | 46 | struct audit_watch { |
47 | atomic_t count; /* reference count */ | 47 | atomic_t count; /* reference count */ |
48 | char *path; /* insertion path */ | ||
49 | dev_t dev; /* associated superblock device */ | 48 | dev_t dev; /* associated superblock device */ |
49 | char *path; /* insertion path */ | ||
50 | unsigned long ino; /* associated inode number */ | 50 | unsigned long ino; /* associated inode number */ |
51 | struct audit_parent *parent; /* associated parent */ | 51 | struct audit_parent *parent; /* associated parent */ |
52 | struct list_head wlist; /* entry in parent->watches list */ | 52 | struct list_head wlist; /* entry in parent->watches list */ |
diff --git a/kernel/auditsc.c b/kernel/auditsc.c index 68d3c6a0ecd6..267e484f0198 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c | |||
@@ -168,12 +168,12 @@ struct audit_context { | |||
168 | int in_syscall; /* 1 if task is in a syscall */ | 168 | int in_syscall; /* 1 if task is in a syscall */ |
169 | enum audit_state state, current_state; | 169 | enum audit_state state, current_state; |
170 | unsigned int serial; /* serial number for record */ | 170 | unsigned int serial; /* serial number for record */ |
171 | struct timespec ctime; /* time of syscall entry */ | ||
172 | int major; /* syscall number */ | 171 | int major; /* syscall number */ |
172 | struct timespec ctime; /* time of syscall entry */ | ||
173 | unsigned long argv[4]; /* syscall arguments */ | 173 | unsigned long argv[4]; /* syscall arguments */ |
174 | int return_valid; /* return code is valid */ | ||
175 | long return_code;/* syscall return code */ | 174 | long return_code;/* syscall return code */ |
176 | u64 prio; | 175 | u64 prio; |
176 | int return_valid; /* return code is valid */ | ||
177 | int name_count; | 177 | int name_count; |
178 | struct audit_names names[AUDIT_NAMES]; | 178 | struct audit_names names[AUDIT_NAMES]; |
179 | char * filterkey; /* key for rule that triggered record */ | 179 | char * filterkey; /* key for rule that triggered record */ |
@@ -198,8 +198,8 @@ struct audit_context { | |||
198 | char target_comm[TASK_COMM_LEN]; | 198 | char target_comm[TASK_COMM_LEN]; |
199 | 199 | ||
200 | struct audit_tree_refs *trees, *first_trees; | 200 | struct audit_tree_refs *trees, *first_trees; |
201 | int tree_count; | ||
202 | struct list_head killed_trees; | 201 | struct list_head killed_trees; |
202 | int tree_count; | ||
203 | 203 | ||
204 | int type; | 204 | int type; |
205 | union { | 205 | union { |
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index c7ece8f027f2..0249f4be9b5c 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
@@ -23,6 +23,7 @@ | |||
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <linux/cgroup.h> | 25 | #include <linux/cgroup.h> |
26 | #include <linux/ctype.h> | ||
26 | #include <linux/errno.h> | 27 | #include <linux/errno.h> |
27 | #include <linux/fs.h> | 28 | #include <linux/fs.h> |
28 | #include <linux/kernel.h> | 29 | #include <linux/kernel.h> |
@@ -48,6 +49,8 @@ | |||
48 | #include <linux/namei.h> | 49 | #include <linux/namei.h> |
49 | #include <linux/smp_lock.h> | 50 | #include <linux/smp_lock.h> |
50 | #include <linux/pid_namespace.h> | 51 | #include <linux/pid_namespace.h> |
52 | #include <linux/idr.h> | ||
53 | #include <linux/vmalloc.h> /* TODO: replace with more sophisticated array */ | ||
51 | 54 | ||
52 | #include <asm/atomic.h> | 55 | #include <asm/atomic.h> |
53 | 56 | ||
@@ -60,6 +63,8 @@ static struct cgroup_subsys *subsys[] = { | |||
60 | #include <linux/cgroup_subsys.h> | 63 | #include <linux/cgroup_subsys.h> |
61 | }; | 64 | }; |
62 | 65 | ||
66 | #define MAX_CGROUP_ROOT_NAMELEN 64 | ||
67 | |||
63 | /* | 68 | /* |
64 | * A cgroupfs_root represents the root of a cgroup hierarchy, | 69 | * A cgroupfs_root represents the root of a cgroup hierarchy, |
65 | * and may be associated with a superblock to form an active | 70 | * and may be associated with a superblock to form an active |
@@ -74,6 +79,9 @@ struct cgroupfs_root { | |||
74 | */ | 79 | */ |
75 | unsigned long subsys_bits; | 80 | unsigned long subsys_bits; |
76 | 81 | ||
82 | /* Unique id for this hierarchy. */ | ||
83 | int hierarchy_id; | ||
84 | |||
77 | /* The bitmask of subsystems currently attached to this hierarchy */ | 85 | /* The bitmask of subsystems currently attached to this hierarchy */ |
78 | unsigned long actual_subsys_bits; | 86 | unsigned long actual_subsys_bits; |
79 | 87 | ||
@@ -94,6 +102,9 @@ struct cgroupfs_root { | |||
94 | 102 | ||
95 | /* The path to use for release notifications. */ | 103 | /* The path to use for release notifications. */ |
96 | char release_agent_path[PATH_MAX]; | 104 | char release_agent_path[PATH_MAX]; |
105 | |||
106 | /* The name for this hierarchy - may be empty */ | ||
107 | char name[MAX_CGROUP_ROOT_NAMELEN]; | ||
97 | }; | 108 | }; |
98 | 109 | ||
99 | /* | 110 | /* |
@@ -141,6 +152,10 @@ struct css_id { | |||
141 | static LIST_HEAD(roots); | 152 | static LIST_HEAD(roots); |
142 | static int root_count; | 153 | static int root_count; |
143 | 154 | ||
155 | static DEFINE_IDA(hierarchy_ida); | ||
156 | static int next_hierarchy_id; | ||
157 | static DEFINE_SPINLOCK(hierarchy_id_lock); | ||
158 | |||
144 | /* dummytop is a shorthand for the dummy hierarchy's top cgroup */ | 159 | /* dummytop is a shorthand for the dummy hierarchy's top cgroup */ |
145 | #define dummytop (&rootnode.top_cgroup) | 160 | #define dummytop (&rootnode.top_cgroup) |
146 | 161 | ||
@@ -201,6 +216,7 @@ struct cg_cgroup_link { | |||
201 | * cgroup, anchored on cgroup->css_sets | 216 | * cgroup, anchored on cgroup->css_sets |
202 | */ | 217 | */ |
203 | struct list_head cgrp_link_list; | 218 | struct list_head cgrp_link_list; |
219 | struct cgroup *cgrp; | ||
204 | /* | 220 | /* |
205 | * List running through cg_cgroup_links pointing at a | 221 | * List running through cg_cgroup_links pointing at a |
206 | * single css_set object, anchored on css_set->cg_links | 222 | * single css_set object, anchored on css_set->cg_links |
@@ -227,8 +243,11 @@ static int cgroup_subsys_init_idr(struct cgroup_subsys *ss); | |||
227 | static DEFINE_RWLOCK(css_set_lock); | 243 | static DEFINE_RWLOCK(css_set_lock); |
228 | static int css_set_count; | 244 | static int css_set_count; |
229 | 245 | ||
230 | /* hash table for cgroup groups. This improves the performance to | 246 | /* |
231 | * find an existing css_set */ | 247 | * hash table for cgroup groups. This improves the performance to find |
248 | * an existing css_set. This hash doesn't (currently) take into | ||
249 | * account cgroups in empty hierarchies. | ||
250 | */ | ||
232 | #define CSS_SET_HASH_BITS 7 | 251 | #define CSS_SET_HASH_BITS 7 |
233 | #define CSS_SET_TABLE_SIZE (1 << CSS_SET_HASH_BITS) | 252 | #define CSS_SET_TABLE_SIZE (1 << CSS_SET_HASH_BITS) |
234 | static struct hlist_head css_set_table[CSS_SET_TABLE_SIZE]; | 253 | static struct hlist_head css_set_table[CSS_SET_TABLE_SIZE]; |
@@ -248,48 +267,22 @@ static struct hlist_head *css_set_hash(struct cgroup_subsys_state *css[]) | |||
248 | return &css_set_table[index]; | 267 | return &css_set_table[index]; |
249 | } | 268 | } |
250 | 269 | ||
270 | static void free_css_set_rcu(struct rcu_head *obj) | ||
271 | { | ||
272 | struct css_set *cg = container_of(obj, struct css_set, rcu_head); | ||
273 | kfree(cg); | ||
274 | } | ||
275 | |||
251 | /* We don't maintain the lists running through each css_set to its | 276 | /* We don't maintain the lists running through each css_set to its |
252 | * task until after the first call to cgroup_iter_start(). This | 277 | * task until after the first call to cgroup_iter_start(). This |
253 | * reduces the fork()/exit() overhead for people who have cgroups | 278 | * reduces the fork()/exit() overhead for people who have cgroups |
254 | * compiled into their kernel but not actually in use */ | 279 | * compiled into their kernel but not actually in use */ |
255 | static int use_task_css_set_links __read_mostly; | 280 | static int use_task_css_set_links __read_mostly; |
256 | 281 | ||
257 | /* When we create or destroy a css_set, the operation simply | 282 | static void __put_css_set(struct css_set *cg, int taskexit) |
258 | * takes/releases a reference count on all the cgroups referenced | ||
259 | * by subsystems in this css_set. This can end up multiple-counting | ||
260 | * some cgroups, but that's OK - the ref-count is just a | ||
261 | * busy/not-busy indicator; ensuring that we only count each cgroup | ||
262 | * once would require taking a global lock to ensure that no | ||
263 | * subsystems moved between hierarchies while we were doing so. | ||
264 | * | ||
265 | * Possible TODO: decide at boot time based on the number of | ||
266 | * registered subsystems and the number of CPUs or NUMA nodes whether | ||
267 | * it's better for performance to ref-count every subsystem, or to | ||
268 | * take a global lock and only add one ref count to each hierarchy. | ||
269 | */ | ||
270 | |||
271 | /* | ||
272 | * unlink a css_set from the list and free it | ||
273 | */ | ||
274 | static void unlink_css_set(struct css_set *cg) | ||
275 | { | 283 | { |
276 | struct cg_cgroup_link *link; | 284 | struct cg_cgroup_link *link; |
277 | struct cg_cgroup_link *saved_link; | 285 | struct cg_cgroup_link *saved_link; |
278 | |||
279 | hlist_del(&cg->hlist); | ||
280 | css_set_count--; | ||
281 | |||
282 | list_for_each_entry_safe(link, saved_link, &cg->cg_links, | ||
283 | cg_link_list) { | ||
284 | list_del(&link->cg_link_list); | ||
285 | list_del(&link->cgrp_link_list); | ||
286 | kfree(link); | ||
287 | } | ||
288 | } | ||
289 | |||
290 | static void __put_css_set(struct css_set *cg, int taskexit) | ||
291 | { | ||
292 | int i; | ||
293 | /* | 286 | /* |
294 | * Ensure that the refcount doesn't hit zero while any readers | 287 | * Ensure that the refcount doesn't hit zero while any readers |
295 | * can see it. Similar to atomic_dec_and_lock(), but for an | 288 | * can see it. Similar to atomic_dec_and_lock(), but for an |
@@ -302,21 +295,28 @@ static void __put_css_set(struct css_set *cg, int taskexit) | |||
302 | write_unlock(&css_set_lock); | 295 | write_unlock(&css_set_lock); |
303 | return; | 296 | return; |
304 | } | 297 | } |
305 | unlink_css_set(cg); | ||
306 | write_unlock(&css_set_lock); | ||
307 | 298 | ||
308 | rcu_read_lock(); | 299 | /* This css_set is dead. unlink it and release cgroup refcounts */ |
309 | for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { | 300 | hlist_del(&cg->hlist); |
310 | struct cgroup *cgrp = rcu_dereference(cg->subsys[i]->cgroup); | 301 | css_set_count--; |
302 | |||
303 | list_for_each_entry_safe(link, saved_link, &cg->cg_links, | ||
304 | cg_link_list) { | ||
305 | struct cgroup *cgrp = link->cgrp; | ||
306 | list_del(&link->cg_link_list); | ||
307 | list_del(&link->cgrp_link_list); | ||
311 | if (atomic_dec_and_test(&cgrp->count) && | 308 | if (atomic_dec_and_test(&cgrp->count) && |
312 | notify_on_release(cgrp)) { | 309 | notify_on_release(cgrp)) { |
313 | if (taskexit) | 310 | if (taskexit) |
314 | set_bit(CGRP_RELEASABLE, &cgrp->flags); | 311 | set_bit(CGRP_RELEASABLE, &cgrp->flags); |
315 | check_for_release(cgrp); | 312 | check_for_release(cgrp); |
316 | } | 313 | } |
314 | |||
315 | kfree(link); | ||
317 | } | 316 | } |
318 | rcu_read_unlock(); | 317 | |
319 | kfree(cg); | 318 | write_unlock(&css_set_lock); |
319 | call_rcu(&cg->rcu_head, free_css_set_rcu); | ||
320 | } | 320 | } |
321 | 321 | ||
322 | /* | 322 | /* |
@@ -338,6 +338,78 @@ static inline void put_css_set_taskexit(struct css_set *cg) | |||
338 | } | 338 | } |
339 | 339 | ||
340 | /* | 340 | /* |
341 | * compare_css_sets - helper function for find_existing_css_set(). | ||
342 | * @cg: candidate css_set being tested | ||
343 | * @old_cg: existing css_set for a task | ||
344 | * @new_cgrp: cgroup that's being entered by the task | ||
345 | * @template: desired set of css pointers in css_set (pre-calculated) | ||
346 | * | ||
347 | * Returns true if "cg" matches "old_cg" except for the hierarchy | ||
348 | * which "new_cgrp" belongs to, for which it should match "new_cgrp". | ||
349 | */ | ||
350 | static bool compare_css_sets(struct css_set *cg, | ||
351 | struct css_set *old_cg, | ||
352 | struct cgroup *new_cgrp, | ||
353 | struct cgroup_subsys_state *template[]) | ||
354 | { | ||
355 | struct list_head *l1, *l2; | ||
356 | |||
357 | if (memcmp(template, cg->subsys, sizeof(cg->subsys))) { | ||
358 | /* Not all subsystems matched */ | ||
359 | return false; | ||
360 | } | ||
361 | |||
362 | /* | ||
363 | * Compare cgroup pointers in order to distinguish between | ||
364 | * different cgroups in heirarchies with no subsystems. We | ||
365 | * could get by with just this check alone (and skip the | ||
366 | * memcmp above) but on most setups the memcmp check will | ||
367 | * avoid the need for this more expensive check on almost all | ||
368 | * candidates. | ||
369 | */ | ||
370 | |||
371 | l1 = &cg->cg_links; | ||
372 | l2 = &old_cg->cg_links; | ||
373 | while (1) { | ||
374 | struct cg_cgroup_link *cgl1, *cgl2; | ||
375 | struct cgroup *cg1, *cg2; | ||
376 | |||
377 | l1 = l1->next; | ||
378 | l2 = l2->next; | ||
379 | /* See if we reached the end - both lists are equal length. */ | ||
380 | if (l1 == &cg->cg_links) { | ||
381 | BUG_ON(l2 != &old_cg->cg_links); | ||
382 | break; | ||
383 | } else { | ||
384 | BUG_ON(l2 == &old_cg->cg_links); | ||
385 | } | ||
386 | /* Locate the cgroups associated with these links. */ | ||
387 | cgl1 = list_entry(l1, struct cg_cgroup_link, cg_link_list); | ||
388 | cgl2 = list_entry(l2, struct cg_cgroup_link, cg_link_list); | ||
389 | cg1 = cgl1->cgrp; | ||
390 | cg2 = cgl2->cgrp; | ||
391 | /* Hierarchies should be linked in the same order. */ | ||
392 | BUG_ON(cg1->root != cg2->root); | ||
393 | |||
394 | /* | ||
395 | * If this hierarchy is the hierarchy of the cgroup | ||
396 | * that's changing, then we need to check that this | ||
397 | * css_set points to the new cgroup; if it's any other | ||
398 | * hierarchy, then this css_set should point to the | ||
399 | * same cgroup as the old css_set. | ||
400 | */ | ||
401 | if (cg1->root == new_cgrp->root) { | ||
402 | if (cg1 != new_cgrp) | ||
403 | return false; | ||
404 | } else { | ||
405 | if (cg1 != cg2) | ||
406 | return false; | ||
407 | } | ||
408 | } | ||
409 | return true; | ||
410 | } | ||
411 | |||
412 | /* | ||
341 | * find_existing_css_set() is a helper for | 413 | * find_existing_css_set() is a helper for |
342 | * find_css_set(), and checks to see whether an existing | 414 | * find_css_set(), and checks to see whether an existing |
343 | * css_set is suitable. | 415 | * css_set is suitable. |
@@ -378,10 +450,11 @@ static struct css_set *find_existing_css_set( | |||
378 | 450 | ||
379 | hhead = css_set_hash(template); | 451 | hhead = css_set_hash(template); |
380 | hlist_for_each_entry(cg, node, hhead, hlist) { | 452 | hlist_for_each_entry(cg, node, hhead, hlist) { |
381 | if (!memcmp(template, cg->subsys, sizeof(cg->subsys))) { | 453 | if (!compare_css_sets(cg, oldcg, cgrp, template)) |
382 | /* All subsystems matched */ | 454 | continue; |
383 | return cg; | 455 | |
384 | } | 456 | /* This css_set matches what we need */ |
457 | return cg; | ||
385 | } | 458 | } |
386 | 459 | ||
387 | /* No existing cgroup group matched */ | 460 | /* No existing cgroup group matched */ |
@@ -435,8 +508,14 @@ static void link_css_set(struct list_head *tmp_cg_links, | |||
435 | link = list_first_entry(tmp_cg_links, struct cg_cgroup_link, | 508 | link = list_first_entry(tmp_cg_links, struct cg_cgroup_link, |
436 | cgrp_link_list); | 509 | cgrp_link_list); |
437 | link->cg = cg; | 510 | link->cg = cg; |
511 | link->cgrp = cgrp; | ||
512 | atomic_inc(&cgrp->count); | ||
438 | list_move(&link->cgrp_link_list, &cgrp->css_sets); | 513 | list_move(&link->cgrp_link_list, &cgrp->css_sets); |
439 | list_add(&link->cg_link_list, &cg->cg_links); | 514 | /* |
515 | * Always add links to the tail of the list so that the list | ||
516 | * is sorted by order of hierarchy creation | ||
517 | */ | ||
518 | list_add_tail(&link->cg_link_list, &cg->cg_links); | ||
440 | } | 519 | } |
441 | 520 | ||
442 | /* | 521 | /* |
@@ -451,11 +530,11 @@ static struct css_set *find_css_set( | |||
451 | { | 530 | { |
452 | struct css_set *res; | 531 | struct css_set *res; |
453 | struct cgroup_subsys_state *template[CGROUP_SUBSYS_COUNT]; | 532 | struct cgroup_subsys_state *template[CGROUP_SUBSYS_COUNT]; |
454 | int i; | ||
455 | 533 | ||
456 | struct list_head tmp_cg_links; | 534 | struct list_head tmp_cg_links; |
457 | 535 | ||
458 | struct hlist_head *hhead; | 536 | struct hlist_head *hhead; |
537 | struct cg_cgroup_link *link; | ||
459 | 538 | ||
460 | /* First see if we already have a cgroup group that matches | 539 | /* First see if we already have a cgroup group that matches |
461 | * the desired set */ | 540 | * the desired set */ |
@@ -489,20 +568,12 @@ static struct css_set *find_css_set( | |||
489 | 568 | ||
490 | write_lock(&css_set_lock); | 569 | write_lock(&css_set_lock); |
491 | /* Add reference counts and links from the new css_set. */ | 570 | /* Add reference counts and links from the new css_set. */ |
492 | for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { | 571 | list_for_each_entry(link, &oldcg->cg_links, cg_link_list) { |
493 | struct cgroup *cgrp = res->subsys[i]->cgroup; | 572 | struct cgroup *c = link->cgrp; |
494 | struct cgroup_subsys *ss = subsys[i]; | 573 | if (c->root == cgrp->root) |
495 | atomic_inc(&cgrp->count); | 574 | c = cgrp; |
496 | /* | 575 | link_css_set(&tmp_cg_links, res, c); |
497 | * We want to add a link once per cgroup, so we | ||
498 | * only do it for the first subsystem in each | ||
499 | * hierarchy | ||
500 | */ | ||
501 | if (ss->root->subsys_list.next == &ss->sibling) | ||
502 | link_css_set(&tmp_cg_links, res, cgrp); | ||
503 | } | 576 | } |
504 | if (list_empty(&rootnode.subsys_list)) | ||
505 | link_css_set(&tmp_cg_links, res, dummytop); | ||
506 | 577 | ||
507 | BUG_ON(!list_empty(&tmp_cg_links)); | 578 | BUG_ON(!list_empty(&tmp_cg_links)); |
508 | 579 | ||
@@ -518,6 +589,41 @@ static struct css_set *find_css_set( | |||
518 | } | 589 | } |
519 | 590 | ||
520 | /* | 591 | /* |
592 | * Return the cgroup for "task" from the given hierarchy. Must be | ||
593 | * called with cgroup_mutex held. | ||
594 | */ | ||
595 | static struct cgroup *task_cgroup_from_root(struct task_struct *task, | ||
596 | struct cgroupfs_root *root) | ||
597 | { | ||
598 | struct css_set *css; | ||
599 | struct cgroup *res = NULL; | ||
600 | |||
601 | BUG_ON(!mutex_is_locked(&cgroup_mutex)); | ||
602 | read_lock(&css_set_lock); | ||
603 | /* | ||
604 | * No need to lock the task - since we hold cgroup_mutex the | ||
605 | * task can't change groups, so the only thing that can happen | ||
606 | * is that it exits and its css is set back to init_css_set. | ||
607 | */ | ||
608 | css = task->cgroups; | ||
609 | if (css == &init_css_set) { | ||
610 | res = &root->top_cgroup; | ||
611 | } else { | ||
612 | struct cg_cgroup_link *link; | ||
613 | list_for_each_entry(link, &css->cg_links, cg_link_list) { | ||
614 | struct cgroup *c = link->cgrp; | ||
615 | if (c->root == root) { | ||
616 | res = c; | ||
617 | break; | ||
618 | } | ||
619 | } | ||
620 | } | ||
621 | read_unlock(&css_set_lock); | ||
622 | BUG_ON(!res); | ||
623 | return res; | ||
624 | } | ||
625 | |||
626 | /* | ||
521 | * There is one global cgroup mutex. We also require taking | 627 | * There is one global cgroup mutex. We also require taking |
522 | * task_lock() when dereferencing a task's cgroup subsys pointers. | 628 | * task_lock() when dereferencing a task's cgroup subsys pointers. |
523 | * See "The task_lock() exception", at the end of this comment. | 629 | * See "The task_lock() exception", at the end of this comment. |
@@ -596,8 +702,8 @@ void cgroup_unlock(void) | |||
596 | static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, int mode); | 702 | static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, int mode); |
597 | static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry); | 703 | static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry); |
598 | static int cgroup_populate_dir(struct cgroup *cgrp); | 704 | static int cgroup_populate_dir(struct cgroup *cgrp); |
599 | static struct inode_operations cgroup_dir_inode_operations; | 705 | static const struct inode_operations cgroup_dir_inode_operations; |
600 | static struct file_operations proc_cgroupstats_operations; | 706 | static const struct file_operations proc_cgroupstats_operations; |
601 | 707 | ||
602 | static struct backing_dev_info cgroup_backing_dev_info = { | 708 | static struct backing_dev_info cgroup_backing_dev_info = { |
603 | .name = "cgroup", | 709 | .name = "cgroup", |
@@ -677,6 +783,12 @@ static void cgroup_diput(struct dentry *dentry, struct inode *inode) | |||
677 | */ | 783 | */ |
678 | deactivate_super(cgrp->root->sb); | 784 | deactivate_super(cgrp->root->sb); |
679 | 785 | ||
786 | /* | ||
787 | * if we're getting rid of the cgroup, refcount should ensure | ||
788 | * that there are no pidlists left. | ||
789 | */ | ||
790 | BUG_ON(!list_empty(&cgrp->pidlists)); | ||
791 | |||
680 | call_rcu(&cgrp->rcu_head, free_cgroup_rcu); | 792 | call_rcu(&cgrp->rcu_head, free_cgroup_rcu); |
681 | } | 793 | } |
682 | iput(inode); | 794 | iput(inode); |
@@ -841,6 +953,8 @@ static int cgroup_show_options(struct seq_file *seq, struct vfsmount *vfs) | |||
841 | seq_puts(seq, ",noprefix"); | 953 | seq_puts(seq, ",noprefix"); |
842 | if (strlen(root->release_agent_path)) | 954 | if (strlen(root->release_agent_path)) |
843 | seq_printf(seq, ",release_agent=%s", root->release_agent_path); | 955 | seq_printf(seq, ",release_agent=%s", root->release_agent_path); |
956 | if (strlen(root->name)) | ||
957 | seq_printf(seq, ",name=%s", root->name); | ||
844 | mutex_unlock(&cgroup_mutex); | 958 | mutex_unlock(&cgroup_mutex); |
845 | return 0; | 959 | return 0; |
846 | } | 960 | } |
@@ -849,6 +963,12 @@ struct cgroup_sb_opts { | |||
849 | unsigned long subsys_bits; | 963 | unsigned long subsys_bits; |
850 | unsigned long flags; | 964 | unsigned long flags; |
851 | char *release_agent; | 965 | char *release_agent; |
966 | char *name; | ||
967 | /* User explicitly requested empty subsystem */ | ||
968 | bool none; | ||
969 | |||
970 | struct cgroupfs_root *new_root; | ||
971 | |||
852 | }; | 972 | }; |
853 | 973 | ||
854 | /* Convert a hierarchy specifier into a bitmask of subsystems and | 974 | /* Convert a hierarchy specifier into a bitmask of subsystems and |
@@ -863,9 +983,7 @@ static int parse_cgroupfs_options(char *data, | |||
863 | mask = ~(1UL << cpuset_subsys_id); | 983 | mask = ~(1UL << cpuset_subsys_id); |
864 | #endif | 984 | #endif |
865 | 985 | ||
866 | opts->subsys_bits = 0; | 986 | memset(opts, 0, sizeof(*opts)); |
867 | opts->flags = 0; | ||
868 | opts->release_agent = NULL; | ||
869 | 987 | ||
870 | while ((token = strsep(&o, ",")) != NULL) { | 988 | while ((token = strsep(&o, ",")) != NULL) { |
871 | if (!*token) | 989 | if (!*token) |
@@ -879,17 +997,42 @@ static int parse_cgroupfs_options(char *data, | |||
879 | if (!ss->disabled) | 997 | if (!ss->disabled) |
880 | opts->subsys_bits |= 1ul << i; | 998 | opts->subsys_bits |= 1ul << i; |
881 | } | 999 | } |
1000 | } else if (!strcmp(token, "none")) { | ||
1001 | /* Explicitly have no subsystems */ | ||
1002 | opts->none = true; | ||
882 | } else if (!strcmp(token, "noprefix")) { | 1003 | } else if (!strcmp(token, "noprefix")) { |
883 | set_bit(ROOT_NOPREFIX, &opts->flags); | 1004 | set_bit(ROOT_NOPREFIX, &opts->flags); |
884 | } else if (!strncmp(token, "release_agent=", 14)) { | 1005 | } else if (!strncmp(token, "release_agent=", 14)) { |
885 | /* Specifying two release agents is forbidden */ | 1006 | /* Specifying two release agents is forbidden */ |
886 | if (opts->release_agent) | 1007 | if (opts->release_agent) |
887 | return -EINVAL; | 1008 | return -EINVAL; |
888 | opts->release_agent = kzalloc(PATH_MAX, GFP_KERNEL); | 1009 | opts->release_agent = |
1010 | kstrndup(token + 14, PATH_MAX, GFP_KERNEL); | ||
889 | if (!opts->release_agent) | 1011 | if (!opts->release_agent) |
890 | return -ENOMEM; | 1012 | return -ENOMEM; |
891 | strncpy(opts->release_agent, token + 14, PATH_MAX - 1); | 1013 | } else if (!strncmp(token, "name=", 5)) { |
892 | opts->release_agent[PATH_MAX - 1] = 0; | 1014 | int i; |
1015 | const char *name = token + 5; | ||
1016 | /* Can't specify an empty name */ | ||
1017 | if (!strlen(name)) | ||
1018 | return -EINVAL; | ||
1019 | /* Must match [\w.-]+ */ | ||
1020 | for (i = 0; i < strlen(name); i++) { | ||
1021 | char c = name[i]; | ||
1022 | if (isalnum(c)) | ||
1023 | continue; | ||
1024 | if ((c == '.') || (c == '-') || (c == '_')) | ||
1025 | continue; | ||
1026 | return -EINVAL; | ||
1027 | } | ||
1028 | /* Specifying two names is forbidden */ | ||
1029 | if (opts->name) | ||
1030 | return -EINVAL; | ||
1031 | opts->name = kstrndup(name, | ||
1032 | MAX_CGROUP_ROOT_NAMELEN, | ||
1033 | GFP_KERNEL); | ||
1034 | if (!opts->name) | ||
1035 | return -ENOMEM; | ||
893 | } else { | 1036 | } else { |
894 | struct cgroup_subsys *ss; | 1037 | struct cgroup_subsys *ss; |
895 | int i; | 1038 | int i; |
@@ -906,6 +1049,8 @@ static int parse_cgroupfs_options(char *data, | |||
906 | } | 1049 | } |
907 | } | 1050 | } |
908 | 1051 | ||
1052 | /* Consistency checks */ | ||
1053 | |||
909 | /* | 1054 | /* |
910 | * Option noprefix was introduced just for backward compatibility | 1055 | * Option noprefix was introduced just for backward compatibility |
911 | * with the old cpuset, so we allow noprefix only if mounting just | 1056 | * with the old cpuset, so we allow noprefix only if mounting just |
@@ -915,8 +1060,16 @@ static int parse_cgroupfs_options(char *data, | |||
915 | (opts->subsys_bits & mask)) | 1060 | (opts->subsys_bits & mask)) |
916 | return -EINVAL; | 1061 | return -EINVAL; |
917 | 1062 | ||
918 | /* We can't have an empty hierarchy */ | 1063 | |
919 | if (!opts->subsys_bits) | 1064 | /* Can't specify "none" and some subsystems */ |
1065 | if (opts->subsys_bits && opts->none) | ||
1066 | return -EINVAL; | ||
1067 | |||
1068 | /* | ||
1069 | * We either have to specify by name or by subsystems. (So all | ||
1070 | * empty hierarchies must have a name). | ||
1071 | */ | ||
1072 | if (!opts->subsys_bits && !opts->name) | ||
920 | return -EINVAL; | 1073 | return -EINVAL; |
921 | 1074 | ||
922 | return 0; | 1075 | return 0; |
@@ -944,6 +1097,12 @@ static int cgroup_remount(struct super_block *sb, int *flags, char *data) | |||
944 | goto out_unlock; | 1097 | goto out_unlock; |
945 | } | 1098 | } |
946 | 1099 | ||
1100 | /* Don't allow name to change at remount */ | ||
1101 | if (opts.name && strcmp(opts.name, root->name)) { | ||
1102 | ret = -EINVAL; | ||
1103 | goto out_unlock; | ||
1104 | } | ||
1105 | |||
947 | ret = rebind_subsystems(root, opts.subsys_bits); | 1106 | ret = rebind_subsystems(root, opts.subsys_bits); |
948 | if (ret) | 1107 | if (ret) |
949 | goto out_unlock; | 1108 | goto out_unlock; |
@@ -955,13 +1114,14 @@ static int cgroup_remount(struct super_block *sb, int *flags, char *data) | |||
955 | strcpy(root->release_agent_path, opts.release_agent); | 1114 | strcpy(root->release_agent_path, opts.release_agent); |
956 | out_unlock: | 1115 | out_unlock: |
957 | kfree(opts.release_agent); | 1116 | kfree(opts.release_agent); |
1117 | kfree(opts.name); | ||
958 | mutex_unlock(&cgroup_mutex); | 1118 | mutex_unlock(&cgroup_mutex); |
959 | mutex_unlock(&cgrp->dentry->d_inode->i_mutex); | 1119 | mutex_unlock(&cgrp->dentry->d_inode->i_mutex); |
960 | unlock_kernel(); | 1120 | unlock_kernel(); |
961 | return ret; | 1121 | return ret; |
962 | } | 1122 | } |
963 | 1123 | ||
964 | static struct super_operations cgroup_ops = { | 1124 | static const struct super_operations cgroup_ops = { |
965 | .statfs = simple_statfs, | 1125 | .statfs = simple_statfs, |
966 | .drop_inode = generic_delete_inode, | 1126 | .drop_inode = generic_delete_inode, |
967 | .show_options = cgroup_show_options, | 1127 | .show_options = cgroup_show_options, |
@@ -974,9 +1134,10 @@ static void init_cgroup_housekeeping(struct cgroup *cgrp) | |||
974 | INIT_LIST_HEAD(&cgrp->children); | 1134 | INIT_LIST_HEAD(&cgrp->children); |
975 | INIT_LIST_HEAD(&cgrp->css_sets); | 1135 | INIT_LIST_HEAD(&cgrp->css_sets); |
976 | INIT_LIST_HEAD(&cgrp->release_list); | 1136 | INIT_LIST_HEAD(&cgrp->release_list); |
977 | INIT_LIST_HEAD(&cgrp->pids_list); | 1137 | INIT_LIST_HEAD(&cgrp->pidlists); |
978 | init_rwsem(&cgrp->pids_mutex); | 1138 | mutex_init(&cgrp->pidlist_mutex); |
979 | } | 1139 | } |
1140 | |||
980 | static void init_cgroup_root(struct cgroupfs_root *root) | 1141 | static void init_cgroup_root(struct cgroupfs_root *root) |
981 | { | 1142 | { |
982 | struct cgroup *cgrp = &root->top_cgroup; | 1143 | struct cgroup *cgrp = &root->top_cgroup; |
@@ -988,33 +1149,106 @@ static void init_cgroup_root(struct cgroupfs_root *root) | |||
988 | init_cgroup_housekeeping(cgrp); | 1149 | init_cgroup_housekeeping(cgrp); |
989 | } | 1150 | } |
990 | 1151 | ||
1152 | static bool init_root_id(struct cgroupfs_root *root) | ||
1153 | { | ||
1154 | int ret = 0; | ||
1155 | |||
1156 | do { | ||
1157 | if (!ida_pre_get(&hierarchy_ida, GFP_KERNEL)) | ||
1158 | return false; | ||
1159 | spin_lock(&hierarchy_id_lock); | ||
1160 | /* Try to allocate the next unused ID */ | ||
1161 | ret = ida_get_new_above(&hierarchy_ida, next_hierarchy_id, | ||
1162 | &root->hierarchy_id); | ||
1163 | if (ret == -ENOSPC) | ||
1164 | /* Try again starting from 0 */ | ||
1165 | ret = ida_get_new(&hierarchy_ida, &root->hierarchy_id); | ||
1166 | if (!ret) { | ||
1167 | next_hierarchy_id = root->hierarchy_id + 1; | ||
1168 | } else if (ret != -EAGAIN) { | ||
1169 | /* Can only get here if the 31-bit IDR is full ... */ | ||
1170 | BUG_ON(ret); | ||
1171 | } | ||
1172 | spin_unlock(&hierarchy_id_lock); | ||
1173 | } while (ret); | ||
1174 | return true; | ||
1175 | } | ||
1176 | |||
991 | static int cgroup_test_super(struct super_block *sb, void *data) | 1177 | static int cgroup_test_super(struct super_block *sb, void *data) |
992 | { | 1178 | { |
993 | struct cgroupfs_root *new = data; | 1179 | struct cgroup_sb_opts *opts = data; |
994 | struct cgroupfs_root *root = sb->s_fs_info; | 1180 | struct cgroupfs_root *root = sb->s_fs_info; |
995 | 1181 | ||
996 | /* First check subsystems */ | 1182 | /* If we asked for a name then it must match */ |
997 | if (new->subsys_bits != root->subsys_bits) | 1183 | if (opts->name && strcmp(opts->name, root->name)) |
998 | return 0; | 1184 | return 0; |
999 | 1185 | ||
1000 | /* Next check flags */ | 1186 | /* |
1001 | if (new->flags != root->flags) | 1187 | * If we asked for subsystems (or explicitly for no |
1188 | * subsystems) then they must match | ||
1189 | */ | ||
1190 | if ((opts->subsys_bits || opts->none) | ||
1191 | && (opts->subsys_bits != root->subsys_bits)) | ||
1002 | return 0; | 1192 | return 0; |
1003 | 1193 | ||
1004 | return 1; | 1194 | return 1; |
1005 | } | 1195 | } |
1006 | 1196 | ||
1197 | static struct cgroupfs_root *cgroup_root_from_opts(struct cgroup_sb_opts *opts) | ||
1198 | { | ||
1199 | struct cgroupfs_root *root; | ||
1200 | |||
1201 | if (!opts->subsys_bits && !opts->none) | ||
1202 | return NULL; | ||
1203 | |||
1204 | root = kzalloc(sizeof(*root), GFP_KERNEL); | ||
1205 | if (!root) | ||
1206 | return ERR_PTR(-ENOMEM); | ||
1207 | |||
1208 | if (!init_root_id(root)) { | ||
1209 | kfree(root); | ||
1210 | return ERR_PTR(-ENOMEM); | ||
1211 | } | ||
1212 | init_cgroup_root(root); | ||
1213 | |||
1214 | root->subsys_bits = opts->subsys_bits; | ||
1215 | root->flags = opts->flags; | ||
1216 | if (opts->release_agent) | ||
1217 | strcpy(root->release_agent_path, opts->release_agent); | ||
1218 | if (opts->name) | ||
1219 | strcpy(root->name, opts->name); | ||
1220 | return root; | ||
1221 | } | ||
1222 | |||
1223 | static void cgroup_drop_root(struct cgroupfs_root *root) | ||
1224 | { | ||
1225 | if (!root) | ||
1226 | return; | ||
1227 | |||
1228 | BUG_ON(!root->hierarchy_id); | ||
1229 | spin_lock(&hierarchy_id_lock); | ||
1230 | ida_remove(&hierarchy_ida, root->hierarchy_id); | ||
1231 | spin_unlock(&hierarchy_id_lock); | ||
1232 | kfree(root); | ||
1233 | } | ||
1234 | |||
1007 | static int cgroup_set_super(struct super_block *sb, void *data) | 1235 | static int cgroup_set_super(struct super_block *sb, void *data) |
1008 | { | 1236 | { |
1009 | int ret; | 1237 | int ret; |
1010 | struct cgroupfs_root *root = data; | 1238 | struct cgroup_sb_opts *opts = data; |
1239 | |||
1240 | /* If we don't have a new root, we can't set up a new sb */ | ||
1241 | if (!opts->new_root) | ||
1242 | return -EINVAL; | ||
1243 | |||
1244 | BUG_ON(!opts->subsys_bits && !opts->none); | ||
1011 | 1245 | ||
1012 | ret = set_anon_super(sb, NULL); | 1246 | ret = set_anon_super(sb, NULL); |
1013 | if (ret) | 1247 | if (ret) |
1014 | return ret; | 1248 | return ret; |
1015 | 1249 | ||
1016 | sb->s_fs_info = root; | 1250 | sb->s_fs_info = opts->new_root; |
1017 | root->sb = sb; | 1251 | opts->new_root->sb = sb; |
1018 | 1252 | ||
1019 | sb->s_blocksize = PAGE_CACHE_SIZE; | 1253 | sb->s_blocksize = PAGE_CACHE_SIZE; |
1020 | sb->s_blocksize_bits = PAGE_CACHE_SHIFT; | 1254 | sb->s_blocksize_bits = PAGE_CACHE_SHIFT; |
@@ -1051,48 +1285,43 @@ static int cgroup_get_sb(struct file_system_type *fs_type, | |||
1051 | void *data, struct vfsmount *mnt) | 1285 | void *data, struct vfsmount *mnt) |
1052 | { | 1286 | { |
1053 | struct cgroup_sb_opts opts; | 1287 | struct cgroup_sb_opts opts; |
1288 | struct cgroupfs_root *root; | ||
1054 | int ret = 0; | 1289 | int ret = 0; |
1055 | struct super_block *sb; | 1290 | struct super_block *sb; |
1056 | struct cgroupfs_root *root; | 1291 | struct cgroupfs_root *new_root; |
1057 | struct list_head tmp_cg_links; | ||
1058 | 1292 | ||
1059 | /* First find the desired set of subsystems */ | 1293 | /* First find the desired set of subsystems */ |
1060 | ret = parse_cgroupfs_options(data, &opts); | 1294 | ret = parse_cgroupfs_options(data, &opts); |
1061 | if (ret) { | 1295 | if (ret) |
1062 | kfree(opts.release_agent); | 1296 | goto out_err; |
1063 | return ret; | ||
1064 | } | ||
1065 | |||
1066 | root = kzalloc(sizeof(*root), GFP_KERNEL); | ||
1067 | if (!root) { | ||
1068 | kfree(opts.release_agent); | ||
1069 | return -ENOMEM; | ||
1070 | } | ||
1071 | 1297 | ||
1072 | init_cgroup_root(root); | 1298 | /* |
1073 | root->subsys_bits = opts.subsys_bits; | 1299 | * Allocate a new cgroup root. We may not need it if we're |
1074 | root->flags = opts.flags; | 1300 | * reusing an existing hierarchy. |
1075 | if (opts.release_agent) { | 1301 | */ |
1076 | strcpy(root->release_agent_path, opts.release_agent); | 1302 | new_root = cgroup_root_from_opts(&opts); |
1077 | kfree(opts.release_agent); | 1303 | if (IS_ERR(new_root)) { |
1304 | ret = PTR_ERR(new_root); | ||
1305 | goto out_err; | ||
1078 | } | 1306 | } |
1307 | opts.new_root = new_root; | ||
1079 | 1308 | ||
1080 | sb = sget(fs_type, cgroup_test_super, cgroup_set_super, root); | 1309 | /* Locate an existing or new sb for this hierarchy */ |
1081 | 1310 | sb = sget(fs_type, cgroup_test_super, cgroup_set_super, &opts); | |
1082 | if (IS_ERR(sb)) { | 1311 | if (IS_ERR(sb)) { |
1083 | kfree(root); | 1312 | ret = PTR_ERR(sb); |
1084 | return PTR_ERR(sb); | 1313 | cgroup_drop_root(opts.new_root); |
1314 | goto out_err; | ||
1085 | } | 1315 | } |
1086 | 1316 | ||
1087 | if (sb->s_fs_info != root) { | 1317 | root = sb->s_fs_info; |
1088 | /* Reusing an existing superblock */ | 1318 | BUG_ON(!root); |
1089 | BUG_ON(sb->s_root == NULL); | 1319 | if (root == opts.new_root) { |
1090 | kfree(root); | 1320 | /* We used the new root structure, so this is a new hierarchy */ |
1091 | root = NULL; | 1321 | struct list_head tmp_cg_links; |
1092 | } else { | ||
1093 | /* New superblock */ | ||
1094 | struct cgroup *root_cgrp = &root->top_cgroup; | 1322 | struct cgroup *root_cgrp = &root->top_cgroup; |
1095 | struct inode *inode; | 1323 | struct inode *inode; |
1324 | struct cgroupfs_root *existing_root; | ||
1096 | int i; | 1325 | int i; |
1097 | 1326 | ||
1098 | BUG_ON(sb->s_root != NULL); | 1327 | BUG_ON(sb->s_root != NULL); |
@@ -1105,6 +1334,18 @@ static int cgroup_get_sb(struct file_system_type *fs_type, | |||
1105 | mutex_lock(&inode->i_mutex); | 1334 | mutex_lock(&inode->i_mutex); |
1106 | mutex_lock(&cgroup_mutex); | 1335 | mutex_lock(&cgroup_mutex); |
1107 | 1336 | ||
1337 | if (strlen(root->name)) { | ||
1338 | /* Check for name clashes with existing mounts */ | ||
1339 | for_each_active_root(existing_root) { | ||
1340 | if (!strcmp(existing_root->name, root->name)) { | ||
1341 | ret = -EBUSY; | ||
1342 | mutex_unlock(&cgroup_mutex); | ||
1343 | mutex_unlock(&inode->i_mutex); | ||
1344 | goto drop_new_super; | ||
1345 | } | ||
1346 | } | ||
1347 | } | ||
1348 | |||
1108 | /* | 1349 | /* |
1109 | * We're accessing css_set_count without locking | 1350 | * We're accessing css_set_count without locking |
1110 | * css_set_lock here, but that's OK - it can only be | 1351 | * css_set_lock here, but that's OK - it can only be |
@@ -1123,7 +1364,8 @@ static int cgroup_get_sb(struct file_system_type *fs_type, | |||
1123 | if (ret == -EBUSY) { | 1364 | if (ret == -EBUSY) { |
1124 | mutex_unlock(&cgroup_mutex); | 1365 | mutex_unlock(&cgroup_mutex); |
1125 | mutex_unlock(&inode->i_mutex); | 1366 | mutex_unlock(&inode->i_mutex); |
1126 | goto free_cg_links; | 1367 | free_cg_links(&tmp_cg_links); |
1368 | goto drop_new_super; | ||
1127 | } | 1369 | } |
1128 | 1370 | ||
1129 | /* EBUSY should be the only error here */ | 1371 | /* EBUSY should be the only error here */ |
@@ -1155,17 +1397,27 @@ static int cgroup_get_sb(struct file_system_type *fs_type, | |||
1155 | BUG_ON(root->number_of_cgroups != 1); | 1397 | BUG_ON(root->number_of_cgroups != 1); |
1156 | 1398 | ||
1157 | cgroup_populate_dir(root_cgrp); | 1399 | cgroup_populate_dir(root_cgrp); |
1158 | mutex_unlock(&inode->i_mutex); | ||
1159 | mutex_unlock(&cgroup_mutex); | 1400 | mutex_unlock(&cgroup_mutex); |
1401 | mutex_unlock(&inode->i_mutex); | ||
1402 | } else { | ||
1403 | /* | ||
1404 | * We re-used an existing hierarchy - the new root (if | ||
1405 | * any) is not needed | ||
1406 | */ | ||
1407 | cgroup_drop_root(opts.new_root); | ||
1160 | } | 1408 | } |
1161 | 1409 | ||
1162 | simple_set_mnt(mnt, sb); | 1410 | simple_set_mnt(mnt, sb); |
1411 | kfree(opts.release_agent); | ||
1412 | kfree(opts.name); | ||
1163 | return 0; | 1413 | return 0; |
1164 | 1414 | ||
1165 | free_cg_links: | ||
1166 | free_cg_links(&tmp_cg_links); | ||
1167 | drop_new_super: | 1415 | drop_new_super: |
1168 | deactivate_locked_super(sb); | 1416 | deactivate_locked_super(sb); |
1417 | out_err: | ||
1418 | kfree(opts.release_agent); | ||
1419 | kfree(opts.name); | ||
1420 | |||
1169 | return ret; | 1421 | return ret; |
1170 | } | 1422 | } |
1171 | 1423 | ||
@@ -1211,7 +1463,7 @@ static void cgroup_kill_sb(struct super_block *sb) { | |||
1211 | mutex_unlock(&cgroup_mutex); | 1463 | mutex_unlock(&cgroup_mutex); |
1212 | 1464 | ||
1213 | kill_litter_super(sb); | 1465 | kill_litter_super(sb); |
1214 | kfree(root); | 1466 | cgroup_drop_root(root); |
1215 | } | 1467 | } |
1216 | 1468 | ||
1217 | static struct file_system_type cgroup_fs_type = { | 1469 | static struct file_system_type cgroup_fs_type = { |
@@ -1276,27 +1528,6 @@ int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen) | |||
1276 | return 0; | 1528 | return 0; |
1277 | } | 1529 | } |
1278 | 1530 | ||
1279 | /* | ||
1280 | * Return the first subsystem attached to a cgroup's hierarchy, and | ||
1281 | * its subsystem id. | ||
1282 | */ | ||
1283 | |||
1284 | static void get_first_subsys(const struct cgroup *cgrp, | ||
1285 | struct cgroup_subsys_state **css, int *subsys_id) | ||
1286 | { | ||
1287 | const struct cgroupfs_root *root = cgrp->root; | ||
1288 | const struct cgroup_subsys *test_ss; | ||
1289 | BUG_ON(list_empty(&root->subsys_list)); | ||
1290 | test_ss = list_entry(root->subsys_list.next, | ||
1291 | struct cgroup_subsys, sibling); | ||
1292 | if (css) { | ||
1293 | *css = cgrp->subsys[test_ss->subsys_id]; | ||
1294 | BUG_ON(!*css); | ||
1295 | } | ||
1296 | if (subsys_id) | ||
1297 | *subsys_id = test_ss->subsys_id; | ||
1298 | } | ||
1299 | |||
1300 | /** | 1531 | /** |
1301 | * cgroup_attach_task - attach task 'tsk' to cgroup 'cgrp' | 1532 | * cgroup_attach_task - attach task 'tsk' to cgroup 'cgrp' |
1302 | * @cgrp: the cgroup the task is attaching to | 1533 | * @cgrp: the cgroup the task is attaching to |
@@ -1313,18 +1544,15 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk) | |||
1313 | struct css_set *cg; | 1544 | struct css_set *cg; |
1314 | struct css_set *newcg; | 1545 | struct css_set *newcg; |
1315 | struct cgroupfs_root *root = cgrp->root; | 1546 | struct cgroupfs_root *root = cgrp->root; |
1316 | int subsys_id; | ||
1317 | |||
1318 | get_first_subsys(cgrp, NULL, &subsys_id); | ||
1319 | 1547 | ||
1320 | /* Nothing to do if the task is already in that cgroup */ | 1548 | /* Nothing to do if the task is already in that cgroup */ |
1321 | oldcgrp = task_cgroup(tsk, subsys_id); | 1549 | oldcgrp = task_cgroup_from_root(tsk, root); |
1322 | if (cgrp == oldcgrp) | 1550 | if (cgrp == oldcgrp) |
1323 | return 0; | 1551 | return 0; |
1324 | 1552 | ||
1325 | for_each_subsys(root, ss) { | 1553 | for_each_subsys(root, ss) { |
1326 | if (ss->can_attach) { | 1554 | if (ss->can_attach) { |
1327 | retval = ss->can_attach(ss, cgrp, tsk); | 1555 | retval = ss->can_attach(ss, cgrp, tsk, false); |
1328 | if (retval) | 1556 | if (retval) |
1329 | return retval; | 1557 | return retval; |
1330 | } | 1558 | } |
@@ -1362,7 +1590,7 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk) | |||
1362 | 1590 | ||
1363 | for_each_subsys(root, ss) { | 1591 | for_each_subsys(root, ss) { |
1364 | if (ss->attach) | 1592 | if (ss->attach) |
1365 | ss->attach(ss, cgrp, oldcgrp, tsk); | 1593 | ss->attach(ss, cgrp, oldcgrp, tsk, false); |
1366 | } | 1594 | } |
1367 | set_bit(CGRP_RELEASABLE, &oldcgrp->flags); | 1595 | set_bit(CGRP_RELEASABLE, &oldcgrp->flags); |
1368 | synchronize_rcu(); | 1596 | synchronize_rcu(); |
@@ -1423,15 +1651,6 @@ static int cgroup_tasks_write(struct cgroup *cgrp, struct cftype *cft, u64 pid) | |||
1423 | return ret; | 1651 | return ret; |
1424 | } | 1652 | } |
1425 | 1653 | ||
1426 | /* The various types of files and directories in a cgroup file system */ | ||
1427 | enum cgroup_filetype { | ||
1428 | FILE_ROOT, | ||
1429 | FILE_DIR, | ||
1430 | FILE_TASKLIST, | ||
1431 | FILE_NOTIFY_ON_RELEASE, | ||
1432 | FILE_RELEASE_AGENT, | ||
1433 | }; | ||
1434 | |||
1435 | /** | 1654 | /** |
1436 | * cgroup_lock_live_group - take cgroup_mutex and check that cgrp is alive. | 1655 | * cgroup_lock_live_group - take cgroup_mutex and check that cgrp is alive. |
1437 | * @cgrp: the cgroup to be checked for liveness | 1656 | * @cgrp: the cgroup to be checked for liveness |
@@ -1491,14 +1710,13 @@ static ssize_t cgroup_write_X64(struct cgroup *cgrp, struct cftype *cft, | |||
1491 | return -EFAULT; | 1710 | return -EFAULT; |
1492 | 1711 | ||
1493 | buffer[nbytes] = 0; /* nul-terminate */ | 1712 | buffer[nbytes] = 0; /* nul-terminate */ |
1494 | strstrip(buffer); | ||
1495 | if (cft->write_u64) { | 1713 | if (cft->write_u64) { |
1496 | u64 val = simple_strtoull(buffer, &end, 0); | 1714 | u64 val = simple_strtoull(strstrip(buffer), &end, 0); |
1497 | if (*end) | 1715 | if (*end) |
1498 | return -EINVAL; | 1716 | return -EINVAL; |
1499 | retval = cft->write_u64(cgrp, cft, val); | 1717 | retval = cft->write_u64(cgrp, cft, val); |
1500 | } else { | 1718 | } else { |
1501 | s64 val = simple_strtoll(buffer, &end, 0); | 1719 | s64 val = simple_strtoll(strstrip(buffer), &end, 0); |
1502 | if (*end) | 1720 | if (*end) |
1503 | return -EINVAL; | 1721 | return -EINVAL; |
1504 | retval = cft->write_s64(cgrp, cft, val); | 1722 | retval = cft->write_s64(cgrp, cft, val); |
@@ -1534,8 +1752,7 @@ static ssize_t cgroup_write_string(struct cgroup *cgrp, struct cftype *cft, | |||
1534 | } | 1752 | } |
1535 | 1753 | ||
1536 | buffer[nbytes] = 0; /* nul-terminate */ | 1754 | buffer[nbytes] = 0; /* nul-terminate */ |
1537 | strstrip(buffer); | 1755 | retval = cft->write_string(cgrp, cft, strstrip(buffer)); |
1538 | retval = cft->write_string(cgrp, cft, buffer); | ||
1539 | if (!retval) | 1756 | if (!retval) |
1540 | retval = nbytes; | 1757 | retval = nbytes; |
1541 | out: | 1758 | out: |
@@ -1644,7 +1861,7 @@ static int cgroup_seqfile_release(struct inode *inode, struct file *file) | |||
1644 | return single_release(inode, file); | 1861 | return single_release(inode, file); |
1645 | } | 1862 | } |
1646 | 1863 | ||
1647 | static struct file_operations cgroup_seqfile_operations = { | 1864 | static const struct file_operations cgroup_seqfile_operations = { |
1648 | .read = seq_read, | 1865 | .read = seq_read, |
1649 | .write = cgroup_file_write, | 1866 | .write = cgroup_file_write, |
1650 | .llseek = seq_lseek, | 1867 | .llseek = seq_lseek, |
@@ -1703,7 +1920,7 @@ static int cgroup_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
1703 | return simple_rename(old_dir, old_dentry, new_dir, new_dentry); | 1920 | return simple_rename(old_dir, old_dentry, new_dir, new_dentry); |
1704 | } | 1921 | } |
1705 | 1922 | ||
1706 | static struct file_operations cgroup_file_operations = { | 1923 | static const struct file_operations cgroup_file_operations = { |
1707 | .read = cgroup_file_read, | 1924 | .read = cgroup_file_read, |
1708 | .write = cgroup_file_write, | 1925 | .write = cgroup_file_write, |
1709 | .llseek = generic_file_llseek, | 1926 | .llseek = generic_file_llseek, |
@@ -1711,7 +1928,7 @@ static struct file_operations cgroup_file_operations = { | |||
1711 | .release = cgroup_file_release, | 1928 | .release = cgroup_file_release, |
1712 | }; | 1929 | }; |
1713 | 1930 | ||
1714 | static struct inode_operations cgroup_dir_inode_operations = { | 1931 | static const struct inode_operations cgroup_dir_inode_operations = { |
1715 | .lookup = simple_lookup, | 1932 | .lookup = simple_lookup, |
1716 | .mkdir = cgroup_mkdir, | 1933 | .mkdir = cgroup_mkdir, |
1717 | .rmdir = cgroup_rmdir, | 1934 | .rmdir = cgroup_rmdir, |
@@ -1876,7 +2093,7 @@ int cgroup_task_count(const struct cgroup *cgrp) | |||
1876 | * the start of a css_set | 2093 | * the start of a css_set |
1877 | */ | 2094 | */ |
1878 | static void cgroup_advance_iter(struct cgroup *cgrp, | 2095 | static void cgroup_advance_iter(struct cgroup *cgrp, |
1879 | struct cgroup_iter *it) | 2096 | struct cgroup_iter *it) |
1880 | { | 2097 | { |
1881 | struct list_head *l = it->cg_link; | 2098 | struct list_head *l = it->cg_link; |
1882 | struct cg_cgroup_link *link; | 2099 | struct cg_cgroup_link *link; |
@@ -2129,7 +2346,7 @@ int cgroup_scan_tasks(struct cgroup_scanner *scan) | |||
2129 | } | 2346 | } |
2130 | 2347 | ||
2131 | /* | 2348 | /* |
2132 | * Stuff for reading the 'tasks' file. | 2349 | * Stuff for reading the 'tasks'/'procs' files. |
2133 | * | 2350 | * |
2134 | * Reading this file can return large amounts of data if a cgroup has | 2351 | * Reading this file can return large amounts of data if a cgroup has |
2135 | * *lots* of attached tasks. So it may need several calls to read(), | 2352 | * *lots* of attached tasks. So it may need several calls to read(), |
@@ -2139,27 +2356,196 @@ int cgroup_scan_tasks(struct cgroup_scanner *scan) | |||
2139 | */ | 2356 | */ |
2140 | 2357 | ||
2141 | /* | 2358 | /* |
2142 | * Load into 'pidarray' up to 'npids' of the tasks using cgroup | 2359 | * The following two functions "fix" the issue where there are more pids |
2143 | * 'cgrp'. Return actual number of pids loaded. No need to | 2360 | * than kmalloc will give memory for; in such cases, we use vmalloc/vfree. |
2144 | * task_lock(p) when reading out p->cgroup, since we're in an RCU | 2361 | * TODO: replace with a kernel-wide solution to this problem |
2145 | * read section, so the css_set can't go away, and is | 2362 | */ |
2146 | * immutable after creation. | 2363 | #define PIDLIST_TOO_LARGE(c) ((c) * sizeof(pid_t) > (PAGE_SIZE * 2)) |
2364 | static void *pidlist_allocate(int count) | ||
2365 | { | ||
2366 | if (PIDLIST_TOO_LARGE(count)) | ||
2367 | return vmalloc(count * sizeof(pid_t)); | ||
2368 | else | ||
2369 | return kmalloc(count * sizeof(pid_t), GFP_KERNEL); | ||
2370 | } | ||
2371 | static void pidlist_free(void *p) | ||
2372 | { | ||
2373 | if (is_vmalloc_addr(p)) | ||
2374 | vfree(p); | ||
2375 | else | ||
2376 | kfree(p); | ||
2377 | } | ||
2378 | static void *pidlist_resize(void *p, int newcount) | ||
2379 | { | ||
2380 | void *newlist; | ||
2381 | /* note: if new alloc fails, old p will still be valid either way */ | ||
2382 | if (is_vmalloc_addr(p)) { | ||
2383 | newlist = vmalloc(newcount * sizeof(pid_t)); | ||
2384 | if (!newlist) | ||
2385 | return NULL; | ||
2386 | memcpy(newlist, p, newcount * sizeof(pid_t)); | ||
2387 | vfree(p); | ||
2388 | } else { | ||
2389 | newlist = krealloc(p, newcount * sizeof(pid_t), GFP_KERNEL); | ||
2390 | } | ||
2391 | return newlist; | ||
2392 | } | ||
2393 | |||
2394 | /* | ||
2395 | * pidlist_uniq - given a kmalloc()ed list, strip out all duplicate entries | ||
2396 | * If the new stripped list is sufficiently smaller and there's enough memory | ||
2397 | * to allocate a new buffer, will let go of the unneeded memory. Returns the | ||
2398 | * number of unique elements. | ||
2399 | */ | ||
2400 | /* is the size difference enough that we should re-allocate the array? */ | ||
2401 | #define PIDLIST_REALLOC_DIFFERENCE(old, new) ((old) - PAGE_SIZE >= (new)) | ||
2402 | static int pidlist_uniq(pid_t **p, int length) | ||
2403 | { | ||
2404 | int src, dest = 1; | ||
2405 | pid_t *list = *p; | ||
2406 | pid_t *newlist; | ||
2407 | |||
2408 | /* | ||
2409 | * we presume the 0th element is unique, so i starts at 1. trivial | ||
2410 | * edge cases first; no work needs to be done for either | ||
2411 | */ | ||
2412 | if (length == 0 || length == 1) | ||
2413 | return length; | ||
2414 | /* src and dest walk down the list; dest counts unique elements */ | ||
2415 | for (src = 1; src < length; src++) { | ||
2416 | /* find next unique element */ | ||
2417 | while (list[src] == list[src-1]) { | ||
2418 | src++; | ||
2419 | if (src == length) | ||
2420 | goto after; | ||
2421 | } | ||
2422 | /* dest always points to where the next unique element goes */ | ||
2423 | list[dest] = list[src]; | ||
2424 | dest++; | ||
2425 | } | ||
2426 | after: | ||
2427 | /* | ||
2428 | * if the length difference is large enough, we want to allocate a | ||
2429 | * smaller buffer to save memory. if this fails due to out of memory, | ||
2430 | * we'll just stay with what we've got. | ||
2431 | */ | ||
2432 | if (PIDLIST_REALLOC_DIFFERENCE(length, dest)) { | ||
2433 | newlist = pidlist_resize(list, dest); | ||
2434 | if (newlist) | ||
2435 | *p = newlist; | ||
2436 | } | ||
2437 | return dest; | ||
2438 | } | ||
2439 | |||
2440 | static int cmppid(const void *a, const void *b) | ||
2441 | { | ||
2442 | return *(pid_t *)a - *(pid_t *)b; | ||
2443 | } | ||
2444 | |||
2445 | /* | ||
2446 | * find the appropriate pidlist for our purpose (given procs vs tasks) | ||
2447 | * returns with the lock on that pidlist already held, and takes care | ||
2448 | * of the use count, or returns NULL with no locks held if we're out of | ||
2449 | * memory. | ||
2147 | */ | 2450 | */ |
2148 | static int pid_array_load(pid_t *pidarray, int npids, struct cgroup *cgrp) | 2451 | static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp, |
2452 | enum cgroup_filetype type) | ||
2149 | { | 2453 | { |
2150 | int n = 0, pid; | 2454 | struct cgroup_pidlist *l; |
2455 | /* don't need task_nsproxy() if we're looking at ourself */ | ||
2456 | struct pid_namespace *ns = get_pid_ns(current->nsproxy->pid_ns); | ||
2457 | /* | ||
2458 | * We can't drop the pidlist_mutex before taking the l->mutex in case | ||
2459 | * the last ref-holder is trying to remove l from the list at the same | ||
2460 | * time. Holding the pidlist_mutex precludes somebody taking whichever | ||
2461 | * list we find out from under us - compare release_pid_array(). | ||
2462 | */ | ||
2463 | mutex_lock(&cgrp->pidlist_mutex); | ||
2464 | list_for_each_entry(l, &cgrp->pidlists, links) { | ||
2465 | if (l->key.type == type && l->key.ns == ns) { | ||
2466 | /* found a matching list - drop the extra refcount */ | ||
2467 | put_pid_ns(ns); | ||
2468 | /* make sure l doesn't vanish out from under us */ | ||
2469 | down_write(&l->mutex); | ||
2470 | mutex_unlock(&cgrp->pidlist_mutex); | ||
2471 | l->use_count++; | ||
2472 | return l; | ||
2473 | } | ||
2474 | } | ||
2475 | /* entry not found; create a new one */ | ||
2476 | l = kmalloc(sizeof(struct cgroup_pidlist), GFP_KERNEL); | ||
2477 | if (!l) { | ||
2478 | mutex_unlock(&cgrp->pidlist_mutex); | ||
2479 | put_pid_ns(ns); | ||
2480 | return l; | ||
2481 | } | ||
2482 | init_rwsem(&l->mutex); | ||
2483 | down_write(&l->mutex); | ||
2484 | l->key.type = type; | ||
2485 | l->key.ns = ns; | ||
2486 | l->use_count = 0; /* don't increment here */ | ||
2487 | l->list = NULL; | ||
2488 | l->owner = cgrp; | ||
2489 | list_add(&l->links, &cgrp->pidlists); | ||
2490 | mutex_unlock(&cgrp->pidlist_mutex); | ||
2491 | return l; | ||
2492 | } | ||
2493 | |||
2494 | /* | ||
2495 | * Load a cgroup's pidarray with either procs' tgids or tasks' pids | ||
2496 | */ | ||
2497 | static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type, | ||
2498 | struct cgroup_pidlist **lp) | ||
2499 | { | ||
2500 | pid_t *array; | ||
2501 | int length; | ||
2502 | int pid, n = 0; /* used for populating the array */ | ||
2151 | struct cgroup_iter it; | 2503 | struct cgroup_iter it; |
2152 | struct task_struct *tsk; | 2504 | struct task_struct *tsk; |
2505 | struct cgroup_pidlist *l; | ||
2506 | |||
2507 | /* | ||
2508 | * If cgroup gets more users after we read count, we won't have | ||
2509 | * enough space - tough. This race is indistinguishable to the | ||
2510 | * caller from the case that the additional cgroup users didn't | ||
2511 | * show up until sometime later on. | ||
2512 | */ | ||
2513 | length = cgroup_task_count(cgrp); | ||
2514 | array = pidlist_allocate(length); | ||
2515 | if (!array) | ||
2516 | return -ENOMEM; | ||
2517 | /* now, populate the array */ | ||
2153 | cgroup_iter_start(cgrp, &it); | 2518 | cgroup_iter_start(cgrp, &it); |
2154 | while ((tsk = cgroup_iter_next(cgrp, &it))) { | 2519 | while ((tsk = cgroup_iter_next(cgrp, &it))) { |
2155 | if (unlikely(n == npids)) | 2520 | if (unlikely(n == length)) |
2156 | break; | 2521 | break; |
2157 | pid = task_pid_vnr(tsk); | 2522 | /* get tgid or pid for procs or tasks file respectively */ |
2158 | if (pid > 0) | 2523 | if (type == CGROUP_FILE_PROCS) |
2159 | pidarray[n++] = pid; | 2524 | pid = task_tgid_vnr(tsk); |
2525 | else | ||
2526 | pid = task_pid_vnr(tsk); | ||
2527 | if (pid > 0) /* make sure to only use valid results */ | ||
2528 | array[n++] = pid; | ||
2160 | } | 2529 | } |
2161 | cgroup_iter_end(cgrp, &it); | 2530 | cgroup_iter_end(cgrp, &it); |
2162 | return n; | 2531 | length = n; |
2532 | /* now sort & (if procs) strip out duplicates */ | ||
2533 | sort(array, length, sizeof(pid_t), cmppid, NULL); | ||
2534 | if (type == CGROUP_FILE_PROCS) | ||
2535 | length = pidlist_uniq(&array, length); | ||
2536 | l = cgroup_pidlist_find(cgrp, type); | ||
2537 | if (!l) { | ||
2538 | pidlist_free(array); | ||
2539 | return -ENOMEM; | ||
2540 | } | ||
2541 | /* store array, freeing old if necessary - lock already held */ | ||
2542 | pidlist_free(l->list); | ||
2543 | l->list = array; | ||
2544 | l->length = length; | ||
2545 | l->use_count++; | ||
2546 | up_write(&l->mutex); | ||
2547 | *lp = l; | ||
2548 | return 0; | ||
2163 | } | 2549 | } |
2164 | 2550 | ||
2165 | /** | 2551 | /** |
@@ -2216,37 +2602,14 @@ err: | |||
2216 | return ret; | 2602 | return ret; |
2217 | } | 2603 | } |
2218 | 2604 | ||
2219 | /* | ||
2220 | * Cache pids for all threads in the same pid namespace that are | ||
2221 | * opening the same "tasks" file. | ||
2222 | */ | ||
2223 | struct cgroup_pids { | ||
2224 | /* The node in cgrp->pids_list */ | ||
2225 | struct list_head list; | ||
2226 | /* The cgroup those pids belong to */ | ||
2227 | struct cgroup *cgrp; | ||
2228 | /* The namepsace those pids belong to */ | ||
2229 | struct pid_namespace *ns; | ||
2230 | /* Array of process ids in the cgroup */ | ||
2231 | pid_t *tasks_pids; | ||
2232 | /* How many files are using the this tasks_pids array */ | ||
2233 | int use_count; | ||
2234 | /* Length of the current tasks_pids array */ | ||
2235 | int length; | ||
2236 | }; | ||
2237 | |||
2238 | static int cmppid(const void *a, const void *b) | ||
2239 | { | ||
2240 | return *(pid_t *)a - *(pid_t *)b; | ||
2241 | } | ||
2242 | 2605 | ||
2243 | /* | 2606 | /* |
2244 | * seq_file methods for the "tasks" file. The seq_file position is the | 2607 | * seq_file methods for the tasks/procs files. The seq_file position is the |
2245 | * next pid to display; the seq_file iterator is a pointer to the pid | 2608 | * next pid to display; the seq_file iterator is a pointer to the pid |
2246 | * in the cgroup->tasks_pids array. | 2609 | * in the cgroup->l->list array. |
2247 | */ | 2610 | */ |
2248 | 2611 | ||
2249 | static void *cgroup_tasks_start(struct seq_file *s, loff_t *pos) | 2612 | static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos) |
2250 | { | 2613 | { |
2251 | /* | 2614 | /* |
2252 | * Initially we receive a position value that corresponds to | 2615 | * Initially we receive a position value that corresponds to |
@@ -2254,48 +2617,45 @@ static void *cgroup_tasks_start(struct seq_file *s, loff_t *pos) | |||
2254 | * after a seek to the start). Use a binary-search to find the | 2617 | * after a seek to the start). Use a binary-search to find the |
2255 | * next pid to display, if any | 2618 | * next pid to display, if any |
2256 | */ | 2619 | */ |
2257 | struct cgroup_pids *cp = s->private; | 2620 | struct cgroup_pidlist *l = s->private; |
2258 | struct cgroup *cgrp = cp->cgrp; | ||
2259 | int index = 0, pid = *pos; | 2621 | int index = 0, pid = *pos; |
2260 | int *iter; | 2622 | int *iter; |
2261 | 2623 | ||
2262 | down_read(&cgrp->pids_mutex); | 2624 | down_read(&l->mutex); |
2263 | if (pid) { | 2625 | if (pid) { |
2264 | int end = cp->length; | 2626 | int end = l->length; |
2265 | 2627 | ||
2266 | while (index < end) { | 2628 | while (index < end) { |
2267 | int mid = (index + end) / 2; | 2629 | int mid = (index + end) / 2; |
2268 | if (cp->tasks_pids[mid] == pid) { | 2630 | if (l->list[mid] == pid) { |
2269 | index = mid; | 2631 | index = mid; |
2270 | break; | 2632 | break; |
2271 | } else if (cp->tasks_pids[mid] <= pid) | 2633 | } else if (l->list[mid] <= pid) |
2272 | index = mid + 1; | 2634 | index = mid + 1; |
2273 | else | 2635 | else |
2274 | end = mid; | 2636 | end = mid; |
2275 | } | 2637 | } |
2276 | } | 2638 | } |
2277 | /* If we're off the end of the array, we're done */ | 2639 | /* If we're off the end of the array, we're done */ |
2278 | if (index >= cp->length) | 2640 | if (index >= l->length) |
2279 | return NULL; | 2641 | return NULL; |
2280 | /* Update the abstract position to be the actual pid that we found */ | 2642 | /* Update the abstract position to be the actual pid that we found */ |
2281 | iter = cp->tasks_pids + index; | 2643 | iter = l->list + index; |
2282 | *pos = *iter; | 2644 | *pos = *iter; |
2283 | return iter; | 2645 | return iter; |
2284 | } | 2646 | } |
2285 | 2647 | ||
2286 | static void cgroup_tasks_stop(struct seq_file *s, void *v) | 2648 | static void cgroup_pidlist_stop(struct seq_file *s, void *v) |
2287 | { | 2649 | { |
2288 | struct cgroup_pids *cp = s->private; | 2650 | struct cgroup_pidlist *l = s->private; |
2289 | struct cgroup *cgrp = cp->cgrp; | 2651 | up_read(&l->mutex); |
2290 | up_read(&cgrp->pids_mutex); | ||
2291 | } | 2652 | } |
2292 | 2653 | ||
2293 | static void *cgroup_tasks_next(struct seq_file *s, void *v, loff_t *pos) | 2654 | static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos) |
2294 | { | 2655 | { |
2295 | struct cgroup_pids *cp = s->private; | 2656 | struct cgroup_pidlist *l = s->private; |
2296 | int *p = v; | 2657 | pid_t *p = v; |
2297 | int *end = cp->tasks_pids + cp->length; | 2658 | pid_t *end = l->list + l->length; |
2298 | |||
2299 | /* | 2659 | /* |
2300 | * Advance to the next pid in the array. If this goes off the | 2660 | * Advance to the next pid in the array. If this goes off the |
2301 | * end, we're done | 2661 | * end, we're done |
@@ -2309,124 +2669,107 @@ static void *cgroup_tasks_next(struct seq_file *s, void *v, loff_t *pos) | |||
2309 | } | 2669 | } |
2310 | } | 2670 | } |
2311 | 2671 | ||
2312 | static int cgroup_tasks_show(struct seq_file *s, void *v) | 2672 | static int cgroup_pidlist_show(struct seq_file *s, void *v) |
2313 | { | 2673 | { |
2314 | return seq_printf(s, "%d\n", *(int *)v); | 2674 | return seq_printf(s, "%d\n", *(int *)v); |
2315 | } | 2675 | } |
2316 | 2676 | ||
2317 | static struct seq_operations cgroup_tasks_seq_operations = { | 2677 | /* |
2318 | .start = cgroup_tasks_start, | 2678 | * seq_operations functions for iterating on pidlists through seq_file - |
2319 | .stop = cgroup_tasks_stop, | 2679 | * independent of whether it's tasks or procs |
2320 | .next = cgroup_tasks_next, | 2680 | */ |
2321 | .show = cgroup_tasks_show, | 2681 | static const struct seq_operations cgroup_pidlist_seq_operations = { |
2682 | .start = cgroup_pidlist_start, | ||
2683 | .stop = cgroup_pidlist_stop, | ||
2684 | .next = cgroup_pidlist_next, | ||
2685 | .show = cgroup_pidlist_show, | ||
2322 | }; | 2686 | }; |
2323 | 2687 | ||
2324 | static void release_cgroup_pid_array(struct cgroup_pids *cp) | 2688 | static void cgroup_release_pid_array(struct cgroup_pidlist *l) |
2325 | { | 2689 | { |
2326 | struct cgroup *cgrp = cp->cgrp; | 2690 | /* |
2327 | 2691 | * the case where we're the last user of this particular pidlist will | |
2328 | down_write(&cgrp->pids_mutex); | 2692 | * have us remove it from the cgroup's list, which entails taking the |
2329 | BUG_ON(!cp->use_count); | 2693 | * mutex. since in pidlist_find the pidlist->lock depends on cgroup-> |
2330 | if (!--cp->use_count) { | 2694 | * pidlist_mutex, we have to take pidlist_mutex first. |
2331 | list_del(&cp->list); | 2695 | */ |
2332 | put_pid_ns(cp->ns); | 2696 | mutex_lock(&l->owner->pidlist_mutex); |
2333 | kfree(cp->tasks_pids); | 2697 | down_write(&l->mutex); |
2334 | kfree(cp); | 2698 | BUG_ON(!l->use_count); |
2699 | if (!--l->use_count) { | ||
2700 | /* we're the last user if refcount is 0; remove and free */ | ||
2701 | list_del(&l->links); | ||
2702 | mutex_unlock(&l->owner->pidlist_mutex); | ||
2703 | pidlist_free(l->list); | ||
2704 | put_pid_ns(l->key.ns); | ||
2705 | up_write(&l->mutex); | ||
2706 | kfree(l); | ||
2707 | return; | ||
2335 | } | 2708 | } |
2336 | up_write(&cgrp->pids_mutex); | 2709 | mutex_unlock(&l->owner->pidlist_mutex); |
2710 | up_write(&l->mutex); | ||
2337 | } | 2711 | } |
2338 | 2712 | ||
2339 | static int cgroup_tasks_release(struct inode *inode, struct file *file) | 2713 | static int cgroup_pidlist_release(struct inode *inode, struct file *file) |
2340 | { | 2714 | { |
2341 | struct seq_file *seq; | 2715 | struct cgroup_pidlist *l; |
2342 | struct cgroup_pids *cp; | ||
2343 | |||
2344 | if (!(file->f_mode & FMODE_READ)) | 2716 | if (!(file->f_mode & FMODE_READ)) |
2345 | return 0; | 2717 | return 0; |
2346 | 2718 | /* | |
2347 | seq = file->private_data; | 2719 | * the seq_file will only be initialized if the file was opened for |
2348 | cp = seq->private; | 2720 | * reading; hence we check if it's not null only in that case. |
2349 | 2721 | */ | |
2350 | release_cgroup_pid_array(cp); | 2722 | l = ((struct seq_file *)file->private_data)->private; |
2723 | cgroup_release_pid_array(l); | ||
2351 | return seq_release(inode, file); | 2724 | return seq_release(inode, file); |
2352 | } | 2725 | } |
2353 | 2726 | ||
2354 | static struct file_operations cgroup_tasks_operations = { | 2727 | static const struct file_operations cgroup_pidlist_operations = { |
2355 | .read = seq_read, | 2728 | .read = seq_read, |
2356 | .llseek = seq_lseek, | 2729 | .llseek = seq_lseek, |
2357 | .write = cgroup_file_write, | 2730 | .write = cgroup_file_write, |
2358 | .release = cgroup_tasks_release, | 2731 | .release = cgroup_pidlist_release, |
2359 | }; | 2732 | }; |
2360 | 2733 | ||
2361 | /* | 2734 | /* |
2362 | * Handle an open on 'tasks' file. Prepare an array containing the | 2735 | * The following functions handle opens on a file that displays a pidlist |
2363 | * process id's of tasks currently attached to the cgroup being opened. | 2736 | * (tasks or procs). Prepare an array of the process/thread IDs of whoever's |
2737 | * in the cgroup. | ||
2364 | */ | 2738 | */ |
2365 | 2739 | /* helper function for the two below it */ | |
2366 | static int cgroup_tasks_open(struct inode *unused, struct file *file) | 2740 | static int cgroup_pidlist_open(struct file *file, enum cgroup_filetype type) |
2367 | { | 2741 | { |
2368 | struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent); | 2742 | struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent); |
2369 | struct pid_namespace *ns = current->nsproxy->pid_ns; | 2743 | struct cgroup_pidlist *l; |
2370 | struct cgroup_pids *cp; | ||
2371 | pid_t *pidarray; | ||
2372 | int npids; | ||
2373 | int retval; | 2744 | int retval; |
2374 | 2745 | ||
2375 | /* Nothing to do for write-only files */ | 2746 | /* Nothing to do for write-only files */ |
2376 | if (!(file->f_mode & FMODE_READ)) | 2747 | if (!(file->f_mode & FMODE_READ)) |
2377 | return 0; | 2748 | return 0; |
2378 | 2749 | ||
2379 | /* | 2750 | /* have the array populated */ |
2380 | * If cgroup gets more users after we read count, we won't have | 2751 | retval = pidlist_array_load(cgrp, type, &l); |
2381 | * enough space - tough. This race is indistinguishable to the | 2752 | if (retval) |
2382 | * caller from the case that the additional cgroup users didn't | 2753 | return retval; |
2383 | * show up until sometime later on. | 2754 | /* configure file information */ |
2384 | */ | 2755 | file->f_op = &cgroup_pidlist_operations; |
2385 | npids = cgroup_task_count(cgrp); | ||
2386 | pidarray = kmalloc(npids * sizeof(pid_t), GFP_KERNEL); | ||
2387 | if (!pidarray) | ||
2388 | return -ENOMEM; | ||
2389 | npids = pid_array_load(pidarray, npids, cgrp); | ||
2390 | sort(pidarray, npids, sizeof(pid_t), cmppid, NULL); | ||
2391 | |||
2392 | /* | ||
2393 | * Store the array in the cgroup, freeing the old | ||
2394 | * array if necessary | ||
2395 | */ | ||
2396 | down_write(&cgrp->pids_mutex); | ||
2397 | |||
2398 | list_for_each_entry(cp, &cgrp->pids_list, list) { | ||
2399 | if (ns == cp->ns) | ||
2400 | goto found; | ||
2401 | } | ||
2402 | |||
2403 | cp = kzalloc(sizeof(*cp), GFP_KERNEL); | ||
2404 | if (!cp) { | ||
2405 | up_write(&cgrp->pids_mutex); | ||
2406 | kfree(pidarray); | ||
2407 | return -ENOMEM; | ||
2408 | } | ||
2409 | cp->cgrp = cgrp; | ||
2410 | cp->ns = ns; | ||
2411 | get_pid_ns(ns); | ||
2412 | list_add(&cp->list, &cgrp->pids_list); | ||
2413 | found: | ||
2414 | kfree(cp->tasks_pids); | ||
2415 | cp->tasks_pids = pidarray; | ||
2416 | cp->length = npids; | ||
2417 | cp->use_count++; | ||
2418 | up_write(&cgrp->pids_mutex); | ||
2419 | |||
2420 | file->f_op = &cgroup_tasks_operations; | ||
2421 | 2756 | ||
2422 | retval = seq_open(file, &cgroup_tasks_seq_operations); | 2757 | retval = seq_open(file, &cgroup_pidlist_seq_operations); |
2423 | if (retval) { | 2758 | if (retval) { |
2424 | release_cgroup_pid_array(cp); | 2759 | cgroup_release_pid_array(l); |
2425 | return retval; | 2760 | return retval; |
2426 | } | 2761 | } |
2427 | ((struct seq_file *)file->private_data)->private = cp; | 2762 | ((struct seq_file *)file->private_data)->private = l; |
2428 | return 0; | 2763 | return 0; |
2429 | } | 2764 | } |
2765 | static int cgroup_tasks_open(struct inode *unused, struct file *file) | ||
2766 | { | ||
2767 | return cgroup_pidlist_open(file, CGROUP_FILE_TASKS); | ||
2768 | } | ||
2769 | static int cgroup_procs_open(struct inode *unused, struct file *file) | ||
2770 | { | ||
2771 | return cgroup_pidlist_open(file, CGROUP_FILE_PROCS); | ||
2772 | } | ||
2430 | 2773 | ||
2431 | static u64 cgroup_read_notify_on_release(struct cgroup *cgrp, | 2774 | static u64 cgroup_read_notify_on_release(struct cgroup *cgrp, |
2432 | struct cftype *cft) | 2775 | struct cftype *cft) |
@@ -2449,21 +2792,27 @@ static int cgroup_write_notify_on_release(struct cgroup *cgrp, | |||
2449 | /* | 2792 | /* |
2450 | * for the common functions, 'private' gives the type of file | 2793 | * for the common functions, 'private' gives the type of file |
2451 | */ | 2794 | */ |
2795 | /* for hysterical raisins, we can't put this on the older files */ | ||
2796 | #define CGROUP_FILE_GENERIC_PREFIX "cgroup." | ||
2452 | static struct cftype files[] = { | 2797 | static struct cftype files[] = { |
2453 | { | 2798 | { |
2454 | .name = "tasks", | 2799 | .name = "tasks", |
2455 | .open = cgroup_tasks_open, | 2800 | .open = cgroup_tasks_open, |
2456 | .write_u64 = cgroup_tasks_write, | 2801 | .write_u64 = cgroup_tasks_write, |
2457 | .release = cgroup_tasks_release, | 2802 | .release = cgroup_pidlist_release, |
2458 | .private = FILE_TASKLIST, | ||
2459 | .mode = S_IRUGO | S_IWUSR, | 2803 | .mode = S_IRUGO | S_IWUSR, |
2460 | }, | 2804 | }, |
2461 | 2805 | { | |
2806 | .name = CGROUP_FILE_GENERIC_PREFIX "procs", | ||
2807 | .open = cgroup_procs_open, | ||
2808 | /* .write_u64 = cgroup_procs_write, TODO */ | ||
2809 | .release = cgroup_pidlist_release, | ||
2810 | .mode = S_IRUGO, | ||
2811 | }, | ||
2462 | { | 2812 | { |
2463 | .name = "notify_on_release", | 2813 | .name = "notify_on_release", |
2464 | .read_u64 = cgroup_read_notify_on_release, | 2814 | .read_u64 = cgroup_read_notify_on_release, |
2465 | .write_u64 = cgroup_write_notify_on_release, | 2815 | .write_u64 = cgroup_write_notify_on_release, |
2466 | .private = FILE_NOTIFY_ON_RELEASE, | ||
2467 | }, | 2816 | }, |
2468 | }; | 2817 | }; |
2469 | 2818 | ||
@@ -2472,7 +2821,6 @@ static struct cftype cft_release_agent = { | |||
2472 | .read_seq_string = cgroup_release_agent_show, | 2821 | .read_seq_string = cgroup_release_agent_show, |
2473 | .write_string = cgroup_release_agent_write, | 2822 | .write_string = cgroup_release_agent_write, |
2474 | .max_write_len = PATH_MAX, | 2823 | .max_write_len = PATH_MAX, |
2475 | .private = FILE_RELEASE_AGENT, | ||
2476 | }; | 2824 | }; |
2477 | 2825 | ||
2478 | static int cgroup_populate_dir(struct cgroup *cgrp) | 2826 | static int cgroup_populate_dir(struct cgroup *cgrp) |
@@ -2879,6 +3227,7 @@ int __init cgroup_init_early(void) | |||
2879 | init_task.cgroups = &init_css_set; | 3227 | init_task.cgroups = &init_css_set; |
2880 | 3228 | ||
2881 | init_css_set_link.cg = &init_css_set; | 3229 | init_css_set_link.cg = &init_css_set; |
3230 | init_css_set_link.cgrp = dummytop; | ||
2882 | list_add(&init_css_set_link.cgrp_link_list, | 3231 | list_add(&init_css_set_link.cgrp_link_list, |
2883 | &rootnode.top_cgroup.css_sets); | 3232 | &rootnode.top_cgroup.css_sets); |
2884 | list_add(&init_css_set_link.cg_link_list, | 3233 | list_add(&init_css_set_link.cg_link_list, |
@@ -2933,7 +3282,7 @@ int __init cgroup_init(void) | |||
2933 | /* Add init_css_set to the hash table */ | 3282 | /* Add init_css_set to the hash table */ |
2934 | hhead = css_set_hash(init_css_set.subsys); | 3283 | hhead = css_set_hash(init_css_set.subsys); |
2935 | hlist_add_head(&init_css_set.hlist, hhead); | 3284 | hlist_add_head(&init_css_set.hlist, hhead); |
2936 | 3285 | BUG_ON(!init_root_id(&rootnode)); | |
2937 | err = register_filesystem(&cgroup_fs_type); | 3286 | err = register_filesystem(&cgroup_fs_type); |
2938 | if (err < 0) | 3287 | if (err < 0) |
2939 | goto out; | 3288 | goto out; |
@@ -2986,15 +3335,16 @@ static int proc_cgroup_show(struct seq_file *m, void *v) | |||
2986 | for_each_active_root(root) { | 3335 | for_each_active_root(root) { |
2987 | struct cgroup_subsys *ss; | 3336 | struct cgroup_subsys *ss; |
2988 | struct cgroup *cgrp; | 3337 | struct cgroup *cgrp; |
2989 | int subsys_id; | ||
2990 | int count = 0; | 3338 | int count = 0; |
2991 | 3339 | ||
2992 | seq_printf(m, "%lu:", root->subsys_bits); | 3340 | seq_printf(m, "%d:", root->hierarchy_id); |
2993 | for_each_subsys(root, ss) | 3341 | for_each_subsys(root, ss) |
2994 | seq_printf(m, "%s%s", count++ ? "," : "", ss->name); | 3342 | seq_printf(m, "%s%s", count++ ? "," : "", ss->name); |
3343 | if (strlen(root->name)) | ||
3344 | seq_printf(m, "%sname=%s", count ? "," : "", | ||
3345 | root->name); | ||
2995 | seq_putc(m, ':'); | 3346 | seq_putc(m, ':'); |
2996 | get_first_subsys(&root->top_cgroup, NULL, &subsys_id); | 3347 | cgrp = task_cgroup_from_root(tsk, root); |
2997 | cgrp = task_cgroup(tsk, subsys_id); | ||
2998 | retval = cgroup_path(cgrp, buf, PAGE_SIZE); | 3348 | retval = cgroup_path(cgrp, buf, PAGE_SIZE); |
2999 | if (retval < 0) | 3349 | if (retval < 0) |
3000 | goto out_unlock; | 3350 | goto out_unlock; |
@@ -3017,7 +3367,7 @@ static int cgroup_open(struct inode *inode, struct file *file) | |||
3017 | return single_open(file, proc_cgroup_show, pid); | 3367 | return single_open(file, proc_cgroup_show, pid); |
3018 | } | 3368 | } |
3019 | 3369 | ||
3020 | struct file_operations proc_cgroup_operations = { | 3370 | const struct file_operations proc_cgroup_operations = { |
3021 | .open = cgroup_open, | 3371 | .open = cgroup_open, |
3022 | .read = seq_read, | 3372 | .read = seq_read, |
3023 | .llseek = seq_lseek, | 3373 | .llseek = seq_lseek, |
@@ -3033,8 +3383,8 @@ static int proc_cgroupstats_show(struct seq_file *m, void *v) | |||
3033 | mutex_lock(&cgroup_mutex); | 3383 | mutex_lock(&cgroup_mutex); |
3034 | for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { | 3384 | for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { |
3035 | struct cgroup_subsys *ss = subsys[i]; | 3385 | struct cgroup_subsys *ss = subsys[i]; |
3036 | seq_printf(m, "%s\t%lu\t%d\t%d\n", | 3386 | seq_printf(m, "%s\t%d\t%d\t%d\n", |
3037 | ss->name, ss->root->subsys_bits, | 3387 | ss->name, ss->root->hierarchy_id, |
3038 | ss->root->number_of_cgroups, !ss->disabled); | 3388 | ss->root->number_of_cgroups, !ss->disabled); |
3039 | } | 3389 | } |
3040 | mutex_unlock(&cgroup_mutex); | 3390 | mutex_unlock(&cgroup_mutex); |
@@ -3046,7 +3396,7 @@ static int cgroupstats_open(struct inode *inode, struct file *file) | |||
3046 | return single_open(file, proc_cgroupstats_show, NULL); | 3396 | return single_open(file, proc_cgroupstats_show, NULL); |
3047 | } | 3397 | } |
3048 | 3398 | ||
3049 | static struct file_operations proc_cgroupstats_operations = { | 3399 | static const struct file_operations proc_cgroupstats_operations = { |
3050 | .open = cgroupstats_open, | 3400 | .open = cgroupstats_open, |
3051 | .read = seq_read, | 3401 | .read = seq_read, |
3052 | .llseek = seq_lseek, | 3402 | .llseek = seq_lseek, |
@@ -3320,13 +3670,11 @@ int cgroup_is_descendant(const struct cgroup *cgrp, struct task_struct *task) | |||
3320 | { | 3670 | { |
3321 | int ret; | 3671 | int ret; |
3322 | struct cgroup *target; | 3672 | struct cgroup *target; |
3323 | int subsys_id; | ||
3324 | 3673 | ||
3325 | if (cgrp == dummytop) | 3674 | if (cgrp == dummytop) |
3326 | return 1; | 3675 | return 1; |
3327 | 3676 | ||
3328 | get_first_subsys(cgrp, NULL, &subsys_id); | 3677 | target = task_cgroup_from_root(task, cgrp->root); |
3329 | target = task_cgroup(task, subsys_id); | ||
3330 | while (cgrp != target && cgrp!= cgrp->top_cgroup) | 3678 | while (cgrp != target && cgrp!= cgrp->top_cgroup) |
3331 | cgrp = cgrp->parent; | 3679 | cgrp = cgrp->parent; |
3332 | ret = (cgrp == target); | 3680 | ret = (cgrp == target); |
@@ -3358,8 +3706,10 @@ static void check_for_release(struct cgroup *cgrp) | |||
3358 | void __css_put(struct cgroup_subsys_state *css) | 3706 | void __css_put(struct cgroup_subsys_state *css) |
3359 | { | 3707 | { |
3360 | struct cgroup *cgrp = css->cgroup; | 3708 | struct cgroup *cgrp = css->cgroup; |
3709 | int val; | ||
3361 | rcu_read_lock(); | 3710 | rcu_read_lock(); |
3362 | if (atomic_dec_return(&css->refcnt) == 1) { | 3711 | val = atomic_dec_return(&css->refcnt); |
3712 | if (val == 1) { | ||
3363 | if (notify_on_release(cgrp)) { | 3713 | if (notify_on_release(cgrp)) { |
3364 | set_bit(CGRP_RELEASABLE, &cgrp->flags); | 3714 | set_bit(CGRP_RELEASABLE, &cgrp->flags); |
3365 | check_for_release(cgrp); | 3715 | check_for_release(cgrp); |
@@ -3367,6 +3717,7 @@ void __css_put(struct cgroup_subsys_state *css) | |||
3367 | cgroup_wakeup_rmdir_waiter(cgrp); | 3717 | cgroup_wakeup_rmdir_waiter(cgrp); |
3368 | } | 3718 | } |
3369 | rcu_read_unlock(); | 3719 | rcu_read_unlock(); |
3720 | WARN_ON_ONCE(val < 1); | ||
3370 | } | 3721 | } |
3371 | 3722 | ||
3372 | /* | 3723 | /* |
@@ -3693,3 +4044,154 @@ css_get_next(struct cgroup_subsys *ss, int id, | |||
3693 | return ret; | 4044 | return ret; |
3694 | } | 4045 | } |
3695 | 4046 | ||
4047 | #ifdef CONFIG_CGROUP_DEBUG | ||
4048 | static struct cgroup_subsys_state *debug_create(struct cgroup_subsys *ss, | ||
4049 | struct cgroup *cont) | ||
4050 | { | ||
4051 | struct cgroup_subsys_state *css = kzalloc(sizeof(*css), GFP_KERNEL); | ||
4052 | |||
4053 | if (!css) | ||
4054 | return ERR_PTR(-ENOMEM); | ||
4055 | |||
4056 | return css; | ||
4057 | } | ||
4058 | |||
4059 | static void debug_destroy(struct cgroup_subsys *ss, struct cgroup *cont) | ||
4060 | { | ||
4061 | kfree(cont->subsys[debug_subsys_id]); | ||
4062 | } | ||
4063 | |||
4064 | static u64 cgroup_refcount_read(struct cgroup *cont, struct cftype *cft) | ||
4065 | { | ||
4066 | return atomic_read(&cont->count); | ||
4067 | } | ||
4068 | |||
4069 | static u64 debug_taskcount_read(struct cgroup *cont, struct cftype *cft) | ||
4070 | { | ||
4071 | return cgroup_task_count(cont); | ||
4072 | } | ||
4073 | |||
4074 | static u64 current_css_set_read(struct cgroup *cont, struct cftype *cft) | ||
4075 | { | ||
4076 | return (u64)(unsigned long)current->cgroups; | ||
4077 | } | ||
4078 | |||
4079 | static u64 current_css_set_refcount_read(struct cgroup *cont, | ||
4080 | struct cftype *cft) | ||
4081 | { | ||
4082 | u64 count; | ||
4083 | |||
4084 | rcu_read_lock(); | ||
4085 | count = atomic_read(¤t->cgroups->refcount); | ||
4086 | rcu_read_unlock(); | ||
4087 | return count; | ||
4088 | } | ||
4089 | |||
4090 | static int current_css_set_cg_links_read(struct cgroup *cont, | ||
4091 | struct cftype *cft, | ||
4092 | struct seq_file *seq) | ||
4093 | { | ||
4094 | struct cg_cgroup_link *link; | ||
4095 | struct css_set *cg; | ||
4096 | |||
4097 | read_lock(&css_set_lock); | ||
4098 | rcu_read_lock(); | ||
4099 | cg = rcu_dereference(current->cgroups); | ||
4100 | list_for_each_entry(link, &cg->cg_links, cg_link_list) { | ||
4101 | struct cgroup *c = link->cgrp; | ||
4102 | const char *name; | ||
4103 | |||
4104 | if (c->dentry) | ||
4105 | name = c->dentry->d_name.name; | ||
4106 | else | ||
4107 | name = "?"; | ||
4108 | seq_printf(seq, "Root %d group %s\n", | ||
4109 | c->root->hierarchy_id, name); | ||
4110 | } | ||
4111 | rcu_read_unlock(); | ||
4112 | read_unlock(&css_set_lock); | ||
4113 | return 0; | ||
4114 | } | ||
4115 | |||
4116 | #define MAX_TASKS_SHOWN_PER_CSS 25 | ||
4117 | static int cgroup_css_links_read(struct cgroup *cont, | ||
4118 | struct cftype *cft, | ||
4119 | struct seq_file *seq) | ||
4120 | { | ||
4121 | struct cg_cgroup_link *link; | ||
4122 | |||
4123 | read_lock(&css_set_lock); | ||
4124 | list_for_each_entry(link, &cont->css_sets, cgrp_link_list) { | ||
4125 | struct css_set *cg = link->cg; | ||
4126 | struct task_struct *task; | ||
4127 | int count = 0; | ||
4128 | seq_printf(seq, "css_set %p\n", cg); | ||
4129 | list_for_each_entry(task, &cg->tasks, cg_list) { | ||
4130 | if (count++ > MAX_TASKS_SHOWN_PER_CSS) { | ||
4131 | seq_puts(seq, " ...\n"); | ||
4132 | break; | ||
4133 | } else { | ||
4134 | seq_printf(seq, " task %d\n", | ||
4135 | task_pid_vnr(task)); | ||
4136 | } | ||
4137 | } | ||
4138 | } | ||
4139 | read_unlock(&css_set_lock); | ||
4140 | return 0; | ||
4141 | } | ||
4142 | |||
4143 | static u64 releasable_read(struct cgroup *cgrp, struct cftype *cft) | ||
4144 | { | ||
4145 | return test_bit(CGRP_RELEASABLE, &cgrp->flags); | ||
4146 | } | ||
4147 | |||
4148 | static struct cftype debug_files[] = { | ||
4149 | { | ||
4150 | .name = "cgroup_refcount", | ||
4151 | .read_u64 = cgroup_refcount_read, | ||
4152 | }, | ||
4153 | { | ||
4154 | .name = "taskcount", | ||
4155 | .read_u64 = debug_taskcount_read, | ||
4156 | }, | ||
4157 | |||
4158 | { | ||
4159 | .name = "current_css_set", | ||
4160 | .read_u64 = current_css_set_read, | ||
4161 | }, | ||
4162 | |||
4163 | { | ||
4164 | .name = "current_css_set_refcount", | ||
4165 | .read_u64 = current_css_set_refcount_read, | ||
4166 | }, | ||
4167 | |||
4168 | { | ||
4169 | .name = "current_css_set_cg_links", | ||
4170 | .read_seq_string = current_css_set_cg_links_read, | ||
4171 | }, | ||
4172 | |||
4173 | { | ||
4174 | .name = "cgroup_css_links", | ||
4175 | .read_seq_string = cgroup_css_links_read, | ||
4176 | }, | ||
4177 | |||
4178 | { | ||
4179 | .name = "releasable", | ||
4180 | .read_u64 = releasable_read, | ||
4181 | }, | ||
4182 | }; | ||
4183 | |||
4184 | static int debug_populate(struct cgroup_subsys *ss, struct cgroup *cont) | ||
4185 | { | ||
4186 | return cgroup_add_files(cont, ss, debug_files, | ||
4187 | ARRAY_SIZE(debug_files)); | ||
4188 | } | ||
4189 | |||
4190 | struct cgroup_subsys debug_subsys = { | ||
4191 | .name = "debug", | ||
4192 | .create = debug_create, | ||
4193 | .destroy = debug_destroy, | ||
4194 | .populate = debug_populate, | ||
4195 | .subsys_id = debug_subsys_id, | ||
4196 | }; | ||
4197 | #endif /* CONFIG_CGROUP_DEBUG */ | ||
diff --git a/kernel/cgroup_debug.c b/kernel/cgroup_debug.c deleted file mode 100644 index 0c92d797baa6..000000000000 --- a/kernel/cgroup_debug.c +++ /dev/null | |||
@@ -1,105 +0,0 @@ | |||
1 | /* | ||
2 | * kernel/cgroup_debug.c - Example cgroup subsystem that | ||
3 | * exposes debug info | ||
4 | * | ||
5 | * Copyright (C) Google Inc, 2007 | ||
6 | * | ||
7 | * Developed by Paul Menage (menage@google.com) | ||
8 | * | ||
9 | */ | ||
10 | |||
11 | #include <linux/cgroup.h> | ||
12 | #include <linux/fs.h> | ||
13 | #include <linux/slab.h> | ||
14 | #include <linux/rcupdate.h> | ||
15 | |||
16 | #include <asm/atomic.h> | ||
17 | |||
18 | static struct cgroup_subsys_state *debug_create(struct cgroup_subsys *ss, | ||
19 | struct cgroup *cont) | ||
20 | { | ||
21 | struct cgroup_subsys_state *css = kzalloc(sizeof(*css), GFP_KERNEL); | ||
22 | |||
23 | if (!css) | ||
24 | return ERR_PTR(-ENOMEM); | ||
25 | |||
26 | return css; | ||
27 | } | ||
28 | |||
29 | static void debug_destroy(struct cgroup_subsys *ss, struct cgroup *cont) | ||
30 | { | ||
31 | kfree(cont->subsys[debug_subsys_id]); | ||
32 | } | ||
33 | |||
34 | static u64 cgroup_refcount_read(struct cgroup *cont, struct cftype *cft) | ||
35 | { | ||
36 | return atomic_read(&cont->count); | ||
37 | } | ||
38 | |||
39 | static u64 taskcount_read(struct cgroup *cont, struct cftype *cft) | ||
40 | { | ||
41 | u64 count; | ||
42 | |||
43 | count = cgroup_task_count(cont); | ||
44 | return count; | ||
45 | } | ||
46 | |||
47 | static u64 current_css_set_read(struct cgroup *cont, struct cftype *cft) | ||
48 | { | ||
49 | return (u64)(long)current->cgroups; | ||
50 | } | ||
51 | |||
52 | static u64 current_css_set_refcount_read(struct cgroup *cont, | ||
53 | struct cftype *cft) | ||
54 | { | ||
55 | u64 count; | ||
56 | |||
57 | rcu_read_lock(); | ||
58 | count = atomic_read(¤t->cgroups->refcount); | ||
59 | rcu_read_unlock(); | ||
60 | return count; | ||
61 | } | ||
62 | |||
63 | static u64 releasable_read(struct cgroup *cgrp, struct cftype *cft) | ||
64 | { | ||
65 | return test_bit(CGRP_RELEASABLE, &cgrp->flags); | ||
66 | } | ||
67 | |||
68 | static struct cftype files[] = { | ||
69 | { | ||
70 | .name = "cgroup_refcount", | ||
71 | .read_u64 = cgroup_refcount_read, | ||
72 | }, | ||
73 | { | ||
74 | .name = "taskcount", | ||
75 | .read_u64 = taskcount_read, | ||
76 | }, | ||
77 | |||
78 | { | ||
79 | .name = "current_css_set", | ||
80 | .read_u64 = current_css_set_read, | ||
81 | }, | ||
82 | |||
83 | { | ||
84 | .name = "current_css_set_refcount", | ||
85 | .read_u64 = current_css_set_refcount_read, | ||
86 | }, | ||
87 | |||
88 | { | ||
89 | .name = "releasable", | ||
90 | .read_u64 = releasable_read, | ||
91 | }, | ||
92 | }; | ||
93 | |||
94 | static int debug_populate(struct cgroup_subsys *ss, struct cgroup *cont) | ||
95 | { | ||
96 | return cgroup_add_files(cont, ss, files, ARRAY_SIZE(files)); | ||
97 | } | ||
98 | |||
99 | struct cgroup_subsys debug_subsys = { | ||
100 | .name = "debug", | ||
101 | .create = debug_create, | ||
102 | .destroy = debug_destroy, | ||
103 | .populate = debug_populate, | ||
104 | .subsys_id = debug_subsys_id, | ||
105 | }; | ||
diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c index fb249e2bcada..59e9ef6aab40 100644 --- a/kernel/cgroup_freezer.c +++ b/kernel/cgroup_freezer.c | |||
@@ -159,7 +159,7 @@ static bool is_task_frozen_enough(struct task_struct *task) | |||
159 | */ | 159 | */ |
160 | static int freezer_can_attach(struct cgroup_subsys *ss, | 160 | static int freezer_can_attach(struct cgroup_subsys *ss, |
161 | struct cgroup *new_cgroup, | 161 | struct cgroup *new_cgroup, |
162 | struct task_struct *task) | 162 | struct task_struct *task, bool threadgroup) |
163 | { | 163 | { |
164 | struct freezer *freezer; | 164 | struct freezer *freezer; |
165 | 165 | ||
@@ -177,6 +177,19 @@ static int freezer_can_attach(struct cgroup_subsys *ss, | |||
177 | if (freezer->state == CGROUP_FROZEN) | 177 | if (freezer->state == CGROUP_FROZEN) |
178 | return -EBUSY; | 178 | return -EBUSY; |
179 | 179 | ||
180 | if (threadgroup) { | ||
181 | struct task_struct *c; | ||
182 | |||
183 | rcu_read_lock(); | ||
184 | list_for_each_entry_rcu(c, &task->thread_group, thread_group) { | ||
185 | if (is_task_frozen_enough(c)) { | ||
186 | rcu_read_unlock(); | ||
187 | return -EBUSY; | ||
188 | } | ||
189 | } | ||
190 | rcu_read_unlock(); | ||
191 | } | ||
192 | |||
180 | return 0; | 193 | return 0; |
181 | } | 194 | } |
182 | 195 | ||
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 7e75a41bd508..b5cb469d2545 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
@@ -1324,9 +1324,10 @@ static int fmeter_getrate(struct fmeter *fmp) | |||
1324 | static cpumask_var_t cpus_attach; | 1324 | static cpumask_var_t cpus_attach; |
1325 | 1325 | ||
1326 | /* Called by cgroups to determine if a cpuset is usable; cgroup_mutex held */ | 1326 | /* Called by cgroups to determine if a cpuset is usable; cgroup_mutex held */ |
1327 | static int cpuset_can_attach(struct cgroup_subsys *ss, | 1327 | static int cpuset_can_attach(struct cgroup_subsys *ss, struct cgroup *cont, |
1328 | struct cgroup *cont, struct task_struct *tsk) | 1328 | struct task_struct *tsk, bool threadgroup) |
1329 | { | 1329 | { |
1330 | int ret; | ||
1330 | struct cpuset *cs = cgroup_cs(cont); | 1331 | struct cpuset *cs = cgroup_cs(cont); |
1331 | 1332 | ||
1332 | if (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)) | 1333 | if (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)) |
@@ -1343,18 +1344,51 @@ static int cpuset_can_attach(struct cgroup_subsys *ss, | |||
1343 | if (tsk->flags & PF_THREAD_BOUND) | 1344 | if (tsk->flags & PF_THREAD_BOUND) |
1344 | return -EINVAL; | 1345 | return -EINVAL; |
1345 | 1346 | ||
1346 | return security_task_setscheduler(tsk, 0, NULL); | 1347 | ret = security_task_setscheduler(tsk, 0, NULL); |
1348 | if (ret) | ||
1349 | return ret; | ||
1350 | if (threadgroup) { | ||
1351 | struct task_struct *c; | ||
1352 | |||
1353 | rcu_read_lock(); | ||
1354 | list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) { | ||
1355 | ret = security_task_setscheduler(c, 0, NULL); | ||
1356 | if (ret) { | ||
1357 | rcu_read_unlock(); | ||
1358 | return ret; | ||
1359 | } | ||
1360 | } | ||
1361 | rcu_read_unlock(); | ||
1362 | } | ||
1363 | return 0; | ||
1364 | } | ||
1365 | |||
1366 | static void cpuset_attach_task(struct task_struct *tsk, nodemask_t *to, | ||
1367 | struct cpuset *cs) | ||
1368 | { | ||
1369 | int err; | ||
1370 | /* | ||
1371 | * can_attach beforehand should guarantee that this doesn't fail. | ||
1372 | * TODO: have a better way to handle failure here | ||
1373 | */ | ||
1374 | err = set_cpus_allowed_ptr(tsk, cpus_attach); | ||
1375 | WARN_ON_ONCE(err); | ||
1376 | |||
1377 | task_lock(tsk); | ||
1378 | cpuset_change_task_nodemask(tsk, to); | ||
1379 | task_unlock(tsk); | ||
1380 | cpuset_update_task_spread_flag(cs, tsk); | ||
1381 | |||
1347 | } | 1382 | } |
1348 | 1383 | ||
1349 | static void cpuset_attach(struct cgroup_subsys *ss, | 1384 | static void cpuset_attach(struct cgroup_subsys *ss, struct cgroup *cont, |
1350 | struct cgroup *cont, struct cgroup *oldcont, | 1385 | struct cgroup *oldcont, struct task_struct *tsk, |
1351 | struct task_struct *tsk) | 1386 | bool threadgroup) |
1352 | { | 1387 | { |
1353 | nodemask_t from, to; | 1388 | nodemask_t from, to; |
1354 | struct mm_struct *mm; | 1389 | struct mm_struct *mm; |
1355 | struct cpuset *cs = cgroup_cs(cont); | 1390 | struct cpuset *cs = cgroup_cs(cont); |
1356 | struct cpuset *oldcs = cgroup_cs(oldcont); | 1391 | struct cpuset *oldcs = cgroup_cs(oldcont); |
1357 | int err; | ||
1358 | 1392 | ||
1359 | if (cs == &top_cpuset) { | 1393 | if (cs == &top_cpuset) { |
1360 | cpumask_copy(cpus_attach, cpu_possible_mask); | 1394 | cpumask_copy(cpus_attach, cpu_possible_mask); |
@@ -1363,15 +1397,19 @@ static void cpuset_attach(struct cgroup_subsys *ss, | |||
1363 | guarantee_online_cpus(cs, cpus_attach); | 1397 | guarantee_online_cpus(cs, cpus_attach); |
1364 | guarantee_online_mems(cs, &to); | 1398 | guarantee_online_mems(cs, &to); |
1365 | } | 1399 | } |
1366 | err = set_cpus_allowed_ptr(tsk, cpus_attach); | ||
1367 | if (err) | ||
1368 | return; | ||
1369 | 1400 | ||
1370 | task_lock(tsk); | 1401 | /* do per-task migration stuff possibly for each in the threadgroup */ |
1371 | cpuset_change_task_nodemask(tsk, &to); | 1402 | cpuset_attach_task(tsk, &to, cs); |
1372 | task_unlock(tsk); | 1403 | if (threadgroup) { |
1373 | cpuset_update_task_spread_flag(cs, tsk); | 1404 | struct task_struct *c; |
1405 | rcu_read_lock(); | ||
1406 | list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) { | ||
1407 | cpuset_attach_task(c, &to, cs); | ||
1408 | } | ||
1409 | rcu_read_unlock(); | ||
1410 | } | ||
1374 | 1411 | ||
1412 | /* change mm; only needs to be done once even if threadgroup */ | ||
1375 | from = oldcs->mems_allowed; | 1413 | from = oldcs->mems_allowed; |
1376 | to = cs->mems_allowed; | 1414 | to = cs->mems_allowed; |
1377 | mm = get_task_mm(tsk); | 1415 | mm = get_task_mm(tsk); |
diff --git a/kernel/cred.c b/kernel/cred.c index d7f7a01082eb..dd76cfe5f5b0 100644 --- a/kernel/cred.c +++ b/kernel/cred.c | |||
@@ -782,6 +782,25 @@ EXPORT_SYMBOL(set_create_files_as); | |||
782 | 782 | ||
783 | #ifdef CONFIG_DEBUG_CREDENTIALS | 783 | #ifdef CONFIG_DEBUG_CREDENTIALS |
784 | 784 | ||
785 | bool creds_are_invalid(const struct cred *cred) | ||
786 | { | ||
787 | if (cred->magic != CRED_MAGIC) | ||
788 | return true; | ||
789 | if (atomic_read(&cred->usage) < atomic_read(&cred->subscribers)) | ||
790 | return true; | ||
791 | #ifdef CONFIG_SECURITY_SELINUX | ||
792 | if (selinux_is_enabled()) { | ||
793 | if ((unsigned long) cred->security < PAGE_SIZE) | ||
794 | return true; | ||
795 | if ((*(u32 *)cred->security & 0xffffff00) == | ||
796 | (POISON_FREE << 24 | POISON_FREE << 16 | POISON_FREE << 8)) | ||
797 | return true; | ||
798 | } | ||
799 | #endif | ||
800 | return false; | ||
801 | } | ||
802 | EXPORT_SYMBOL(creds_are_invalid); | ||
803 | |||
785 | /* | 804 | /* |
786 | * dump invalid credentials | 805 | * dump invalid credentials |
787 | */ | 806 | */ |
diff --git a/kernel/exit.c b/kernel/exit.c index ae5d8660ddff..f7864ac2ecc1 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -47,7 +47,7 @@ | |||
47 | #include <linux/tracehook.h> | 47 | #include <linux/tracehook.h> |
48 | #include <linux/fs_struct.h> | 48 | #include <linux/fs_struct.h> |
49 | #include <linux/init_task.h> | 49 | #include <linux/init_task.h> |
50 | #include <linux/perf_counter.h> | 50 | #include <linux/perf_event.h> |
51 | #include <trace/events/sched.h> | 51 | #include <trace/events/sched.h> |
52 | 52 | ||
53 | #include <asm/uaccess.h> | 53 | #include <asm/uaccess.h> |
@@ -154,8 +154,8 @@ static void delayed_put_task_struct(struct rcu_head *rhp) | |||
154 | { | 154 | { |
155 | struct task_struct *tsk = container_of(rhp, struct task_struct, rcu); | 155 | struct task_struct *tsk = container_of(rhp, struct task_struct, rcu); |
156 | 156 | ||
157 | #ifdef CONFIG_PERF_COUNTERS | 157 | #ifdef CONFIG_PERF_EVENTS |
158 | WARN_ON_ONCE(tsk->perf_counter_ctxp); | 158 | WARN_ON_ONCE(tsk->perf_event_ctxp); |
159 | #endif | 159 | #endif |
160 | trace_sched_process_free(tsk); | 160 | trace_sched_process_free(tsk); |
161 | put_task_struct(tsk); | 161 | put_task_struct(tsk); |
@@ -945,6 +945,8 @@ NORET_TYPE void do_exit(long code) | |||
945 | if (group_dead) { | 945 | if (group_dead) { |
946 | hrtimer_cancel(&tsk->signal->real_timer); | 946 | hrtimer_cancel(&tsk->signal->real_timer); |
947 | exit_itimers(tsk->signal); | 947 | exit_itimers(tsk->signal); |
948 | if (tsk->mm) | ||
949 | setmax_mm_hiwater_rss(&tsk->signal->maxrss, tsk->mm); | ||
948 | } | 950 | } |
949 | acct_collect(code, group_dead); | 951 | acct_collect(code, group_dead); |
950 | if (group_dead) | 952 | if (group_dead) |
@@ -972,8 +974,6 @@ NORET_TYPE void do_exit(long code) | |||
972 | disassociate_ctty(1); | 974 | disassociate_ctty(1); |
973 | 975 | ||
974 | module_put(task_thread_info(tsk)->exec_domain->module); | 976 | module_put(task_thread_info(tsk)->exec_domain->module); |
975 | if (tsk->binfmt) | ||
976 | module_put(tsk->binfmt->module); | ||
977 | 977 | ||
978 | proc_exit_connector(tsk); | 978 | proc_exit_connector(tsk); |
979 | 979 | ||
@@ -981,7 +981,7 @@ NORET_TYPE void do_exit(long code) | |||
981 | * Flush inherited counters to the parent - before the parent | 981 | * Flush inherited counters to the parent - before the parent |
982 | * gets woken up by child-exit notifications. | 982 | * gets woken up by child-exit notifications. |
983 | */ | 983 | */ |
984 | perf_counter_exit_task(tsk); | 984 | perf_event_exit_task(tsk); |
985 | 985 | ||
986 | exit_notify(tsk, group_dead); | 986 | exit_notify(tsk, group_dead); |
987 | #ifdef CONFIG_NUMA | 987 | #ifdef CONFIG_NUMA |
@@ -989,8 +989,6 @@ NORET_TYPE void do_exit(long code) | |||
989 | tsk->mempolicy = NULL; | 989 | tsk->mempolicy = NULL; |
990 | #endif | 990 | #endif |
991 | #ifdef CONFIG_FUTEX | 991 | #ifdef CONFIG_FUTEX |
992 | if (unlikely(!list_empty(&tsk->pi_state_list))) | ||
993 | exit_pi_state_list(tsk); | ||
994 | if (unlikely(current->pi_state_cache)) | 992 | if (unlikely(current->pi_state_cache)) |
995 | kfree(current->pi_state_cache); | 993 | kfree(current->pi_state_cache); |
996 | #endif | 994 | #endif |
@@ -1093,28 +1091,28 @@ struct wait_opts { | |||
1093 | int __user *wo_stat; | 1091 | int __user *wo_stat; |
1094 | struct rusage __user *wo_rusage; | 1092 | struct rusage __user *wo_rusage; |
1095 | 1093 | ||
1094 | wait_queue_t child_wait; | ||
1096 | int notask_error; | 1095 | int notask_error; |
1097 | }; | 1096 | }; |
1098 | 1097 | ||
1099 | static struct pid *task_pid_type(struct task_struct *task, enum pid_type type) | 1098 | static inline |
1099 | struct pid *task_pid_type(struct task_struct *task, enum pid_type type) | ||
1100 | { | 1100 | { |
1101 | struct pid *pid = NULL; | 1101 | if (type != PIDTYPE_PID) |
1102 | if (type == PIDTYPE_PID) | 1102 | task = task->group_leader; |
1103 | pid = task->pids[type].pid; | 1103 | return task->pids[type].pid; |
1104 | else if (type < PIDTYPE_MAX) | ||
1105 | pid = task->group_leader->pids[type].pid; | ||
1106 | return pid; | ||
1107 | } | 1104 | } |
1108 | 1105 | ||
1109 | static int eligible_child(struct wait_opts *wo, struct task_struct *p) | 1106 | static int eligible_pid(struct wait_opts *wo, struct task_struct *p) |
1110 | { | 1107 | { |
1111 | int err; | 1108 | return wo->wo_type == PIDTYPE_MAX || |
1112 | 1109 | task_pid_type(p, wo->wo_type) == wo->wo_pid; | |
1113 | if (wo->wo_type < PIDTYPE_MAX) { | 1110 | } |
1114 | if (task_pid_type(p, wo->wo_type) != wo->wo_pid) | ||
1115 | return 0; | ||
1116 | } | ||
1117 | 1111 | ||
1112 | static int eligible_child(struct wait_opts *wo, struct task_struct *p) | ||
1113 | { | ||
1114 | if (!eligible_pid(wo, p)) | ||
1115 | return 0; | ||
1118 | /* Wait for all children (clone and not) if __WALL is set; | 1116 | /* Wait for all children (clone and not) if __WALL is set; |
1119 | * otherwise, wait for clone children *only* if __WCLONE is | 1117 | * otherwise, wait for clone children *only* if __WCLONE is |
1120 | * set; otherwise, wait for non-clone children *only*. (Note: | 1118 | * set; otherwise, wait for non-clone children *only*. (Note: |
@@ -1124,10 +1122,6 @@ static int eligible_child(struct wait_opts *wo, struct task_struct *p) | |||
1124 | && !(wo->wo_flags & __WALL)) | 1122 | && !(wo->wo_flags & __WALL)) |
1125 | return 0; | 1123 | return 0; |
1126 | 1124 | ||
1127 | err = security_task_wait(p); | ||
1128 | if (err) | ||
1129 | return err; | ||
1130 | |||
1131 | return 1; | 1125 | return 1; |
1132 | } | 1126 | } |
1133 | 1127 | ||
@@ -1140,18 +1134,20 @@ static int wait_noreap_copyout(struct wait_opts *wo, struct task_struct *p, | |||
1140 | 1134 | ||
1141 | put_task_struct(p); | 1135 | put_task_struct(p); |
1142 | infop = wo->wo_info; | 1136 | infop = wo->wo_info; |
1143 | if (!retval) | 1137 | if (infop) { |
1144 | retval = put_user(SIGCHLD, &infop->si_signo); | 1138 | if (!retval) |
1145 | if (!retval) | 1139 | retval = put_user(SIGCHLD, &infop->si_signo); |
1146 | retval = put_user(0, &infop->si_errno); | 1140 | if (!retval) |
1147 | if (!retval) | 1141 | retval = put_user(0, &infop->si_errno); |
1148 | retval = put_user((short)why, &infop->si_code); | 1142 | if (!retval) |
1149 | if (!retval) | 1143 | retval = put_user((short)why, &infop->si_code); |
1150 | retval = put_user(pid, &infop->si_pid); | 1144 | if (!retval) |
1151 | if (!retval) | 1145 | retval = put_user(pid, &infop->si_pid); |
1152 | retval = put_user(uid, &infop->si_uid); | 1146 | if (!retval) |
1153 | if (!retval) | 1147 | retval = put_user(uid, &infop->si_uid); |
1154 | retval = put_user(status, &infop->si_status); | 1148 | if (!retval) |
1149 | retval = put_user(status, &infop->si_status); | ||
1150 | } | ||
1155 | if (!retval) | 1151 | if (!retval) |
1156 | retval = pid; | 1152 | retval = pid; |
1157 | return retval; | 1153 | return retval; |
@@ -1208,6 +1204,7 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p) | |||
1208 | if (likely(!traced) && likely(!task_detached(p))) { | 1204 | if (likely(!traced) && likely(!task_detached(p))) { |
1209 | struct signal_struct *psig; | 1205 | struct signal_struct *psig; |
1210 | struct signal_struct *sig; | 1206 | struct signal_struct *sig; |
1207 | unsigned long maxrss; | ||
1211 | 1208 | ||
1212 | /* | 1209 | /* |
1213 | * The resource counters for the group leader are in its | 1210 | * The resource counters for the group leader are in its |
@@ -1256,6 +1253,9 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p) | |||
1256 | psig->coublock += | 1253 | psig->coublock += |
1257 | task_io_get_oublock(p) + | 1254 | task_io_get_oublock(p) + |
1258 | sig->oublock + sig->coublock; | 1255 | sig->oublock + sig->coublock; |
1256 | maxrss = max(sig->maxrss, sig->cmaxrss); | ||
1257 | if (psig->cmaxrss < maxrss) | ||
1258 | psig->cmaxrss = maxrss; | ||
1259 | task_io_accounting_add(&psig->ioac, &p->ioac); | 1259 | task_io_accounting_add(&psig->ioac, &p->ioac); |
1260 | task_io_accounting_add(&psig->ioac, &sig->ioac); | 1260 | task_io_accounting_add(&psig->ioac, &sig->ioac); |
1261 | spin_unlock_irq(&p->real_parent->sighand->siglock); | 1261 | spin_unlock_irq(&p->real_parent->sighand->siglock); |
@@ -1477,13 +1477,14 @@ static int wait_task_continued(struct wait_opts *wo, struct task_struct *p) | |||
1477 | * then ->notask_error is 0 if @p is an eligible child, | 1477 | * then ->notask_error is 0 if @p is an eligible child, |
1478 | * or another error from security_task_wait(), or still -ECHILD. | 1478 | * or another error from security_task_wait(), or still -ECHILD. |
1479 | */ | 1479 | */ |
1480 | static int wait_consider_task(struct wait_opts *wo, struct task_struct *parent, | 1480 | static int wait_consider_task(struct wait_opts *wo, int ptrace, |
1481 | int ptrace, struct task_struct *p) | 1481 | struct task_struct *p) |
1482 | { | 1482 | { |
1483 | int ret = eligible_child(wo, p); | 1483 | int ret = eligible_child(wo, p); |
1484 | if (!ret) | 1484 | if (!ret) |
1485 | return ret; | 1485 | return ret; |
1486 | 1486 | ||
1487 | ret = security_task_wait(p); | ||
1487 | if (unlikely(ret < 0)) { | 1488 | if (unlikely(ret < 0)) { |
1488 | /* | 1489 | /* |
1489 | * If we have not yet seen any eligible child, | 1490 | * If we have not yet seen any eligible child, |
@@ -1545,7 +1546,7 @@ static int do_wait_thread(struct wait_opts *wo, struct task_struct *tsk) | |||
1545 | * Do not consider detached threads. | 1546 | * Do not consider detached threads. |
1546 | */ | 1547 | */ |
1547 | if (!task_detached(p)) { | 1548 | if (!task_detached(p)) { |
1548 | int ret = wait_consider_task(wo, tsk, 0, p); | 1549 | int ret = wait_consider_task(wo, 0, p); |
1549 | if (ret) | 1550 | if (ret) |
1550 | return ret; | 1551 | return ret; |
1551 | } | 1552 | } |
@@ -1559,7 +1560,7 @@ static int ptrace_do_wait(struct wait_opts *wo, struct task_struct *tsk) | |||
1559 | struct task_struct *p; | 1560 | struct task_struct *p; |
1560 | 1561 | ||
1561 | list_for_each_entry(p, &tsk->ptraced, ptrace_entry) { | 1562 | list_for_each_entry(p, &tsk->ptraced, ptrace_entry) { |
1562 | int ret = wait_consider_task(wo, tsk, 1, p); | 1563 | int ret = wait_consider_task(wo, 1, p); |
1563 | if (ret) | 1564 | if (ret) |
1564 | return ret; | 1565 | return ret; |
1565 | } | 1566 | } |
@@ -1567,15 +1568,38 @@ static int ptrace_do_wait(struct wait_opts *wo, struct task_struct *tsk) | |||
1567 | return 0; | 1568 | return 0; |
1568 | } | 1569 | } |
1569 | 1570 | ||
1571 | static int child_wait_callback(wait_queue_t *wait, unsigned mode, | ||
1572 | int sync, void *key) | ||
1573 | { | ||
1574 | struct wait_opts *wo = container_of(wait, struct wait_opts, | ||
1575 | child_wait); | ||
1576 | struct task_struct *p = key; | ||
1577 | |||
1578 | if (!eligible_pid(wo, p)) | ||
1579 | return 0; | ||
1580 | |||
1581 | if ((wo->wo_flags & __WNOTHREAD) && wait->private != p->parent) | ||
1582 | return 0; | ||
1583 | |||
1584 | return default_wake_function(wait, mode, sync, key); | ||
1585 | } | ||
1586 | |||
1587 | void __wake_up_parent(struct task_struct *p, struct task_struct *parent) | ||
1588 | { | ||
1589 | __wake_up_sync_key(&parent->signal->wait_chldexit, | ||
1590 | TASK_INTERRUPTIBLE, 1, p); | ||
1591 | } | ||
1592 | |||
1570 | static long do_wait(struct wait_opts *wo) | 1593 | static long do_wait(struct wait_opts *wo) |
1571 | { | 1594 | { |
1572 | DECLARE_WAITQUEUE(wait, current); | ||
1573 | struct task_struct *tsk; | 1595 | struct task_struct *tsk; |
1574 | int retval; | 1596 | int retval; |
1575 | 1597 | ||
1576 | trace_sched_process_wait(wo->wo_pid); | 1598 | trace_sched_process_wait(wo->wo_pid); |
1577 | 1599 | ||
1578 | add_wait_queue(¤t->signal->wait_chldexit,&wait); | 1600 | init_waitqueue_func_entry(&wo->child_wait, child_wait_callback); |
1601 | wo->child_wait.private = current; | ||
1602 | add_wait_queue(¤t->signal->wait_chldexit, &wo->child_wait); | ||
1579 | repeat: | 1603 | repeat: |
1580 | /* | 1604 | /* |
1581 | * If there is nothing that can match our critiera just get out. | 1605 | * If there is nothing that can match our critiera just get out. |
@@ -1616,32 +1640,7 @@ notask: | |||
1616 | } | 1640 | } |
1617 | end: | 1641 | end: |
1618 | __set_current_state(TASK_RUNNING); | 1642 | __set_current_state(TASK_RUNNING); |
1619 | remove_wait_queue(¤t->signal->wait_chldexit,&wait); | 1643 | remove_wait_queue(¤t->signal->wait_chldexit, &wo->child_wait); |
1620 | if (wo->wo_info) { | ||
1621 | struct siginfo __user *infop = wo->wo_info; | ||
1622 | |||
1623 | if (retval > 0) | ||
1624 | retval = 0; | ||
1625 | else { | ||
1626 | /* | ||
1627 | * For a WNOHANG return, clear out all the fields | ||
1628 | * we would set so the user can easily tell the | ||
1629 | * difference. | ||
1630 | */ | ||
1631 | if (!retval) | ||
1632 | retval = put_user(0, &infop->si_signo); | ||
1633 | if (!retval) | ||
1634 | retval = put_user(0, &infop->si_errno); | ||
1635 | if (!retval) | ||
1636 | retval = put_user(0, &infop->si_code); | ||
1637 | if (!retval) | ||
1638 | retval = put_user(0, &infop->si_pid); | ||
1639 | if (!retval) | ||
1640 | retval = put_user(0, &infop->si_uid); | ||
1641 | if (!retval) | ||
1642 | retval = put_user(0, &infop->si_status); | ||
1643 | } | ||
1644 | } | ||
1645 | return retval; | 1644 | return retval; |
1646 | } | 1645 | } |
1647 | 1646 | ||
@@ -1686,6 +1685,29 @@ SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *, | |||
1686 | wo.wo_stat = NULL; | 1685 | wo.wo_stat = NULL; |
1687 | wo.wo_rusage = ru; | 1686 | wo.wo_rusage = ru; |
1688 | ret = do_wait(&wo); | 1687 | ret = do_wait(&wo); |
1688 | |||
1689 | if (ret > 0) { | ||
1690 | ret = 0; | ||
1691 | } else if (infop) { | ||
1692 | /* | ||
1693 | * For a WNOHANG return, clear out all the fields | ||
1694 | * we would set so the user can easily tell the | ||
1695 | * difference. | ||
1696 | */ | ||
1697 | if (!ret) | ||
1698 | ret = put_user(0, &infop->si_signo); | ||
1699 | if (!ret) | ||
1700 | ret = put_user(0, &infop->si_errno); | ||
1701 | if (!ret) | ||
1702 | ret = put_user(0, &infop->si_code); | ||
1703 | if (!ret) | ||
1704 | ret = put_user(0, &infop->si_pid); | ||
1705 | if (!ret) | ||
1706 | ret = put_user(0, &infop->si_uid); | ||
1707 | if (!ret) | ||
1708 | ret = put_user(0, &infop->si_status); | ||
1709 | } | ||
1710 | |||
1689 | put_pid(pid); | 1711 | put_pid(pid); |
1690 | 1712 | ||
1691 | /* avoid REGPARM breakage on x86: */ | 1713 | /* avoid REGPARM breakage on x86: */ |
diff --git a/kernel/fork.c b/kernel/fork.c index bfee931ee3fb..166b8c49257c 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -49,6 +49,7 @@ | |||
49 | #include <linux/ftrace.h> | 49 | #include <linux/ftrace.h> |
50 | #include <linux/profile.h> | 50 | #include <linux/profile.h> |
51 | #include <linux/rmap.h> | 51 | #include <linux/rmap.h> |
52 | #include <linux/ksm.h> | ||
52 | #include <linux/acct.h> | 53 | #include <linux/acct.h> |
53 | #include <linux/tsacct_kern.h> | 54 | #include <linux/tsacct_kern.h> |
54 | #include <linux/cn_proc.h> | 55 | #include <linux/cn_proc.h> |
@@ -61,7 +62,8 @@ | |||
61 | #include <linux/blkdev.h> | 62 | #include <linux/blkdev.h> |
62 | #include <linux/fs_struct.h> | 63 | #include <linux/fs_struct.h> |
63 | #include <linux/magic.h> | 64 | #include <linux/magic.h> |
64 | #include <linux/perf_counter.h> | 65 | #include <linux/perf_event.h> |
66 | #include <linux/posix-timers.h> | ||
65 | 67 | ||
66 | #include <asm/pgtable.h> | 68 | #include <asm/pgtable.h> |
67 | #include <asm/pgalloc.h> | 69 | #include <asm/pgalloc.h> |
@@ -89,7 +91,7 @@ int nr_processes(void) | |||
89 | int cpu; | 91 | int cpu; |
90 | int total = 0; | 92 | int total = 0; |
91 | 93 | ||
92 | for_each_online_cpu(cpu) | 94 | for_each_possible_cpu(cpu) |
93 | total += per_cpu(process_counts, cpu); | 95 | total += per_cpu(process_counts, cpu); |
94 | 96 | ||
95 | return total; | 97 | return total; |
@@ -136,9 +138,17 @@ struct kmem_cache *vm_area_cachep; | |||
136 | /* SLAB cache for mm_struct structures (tsk->mm) */ | 138 | /* SLAB cache for mm_struct structures (tsk->mm) */ |
137 | static struct kmem_cache *mm_cachep; | 139 | static struct kmem_cache *mm_cachep; |
138 | 140 | ||
141 | static void account_kernel_stack(struct thread_info *ti, int account) | ||
142 | { | ||
143 | struct zone *zone = page_zone(virt_to_page(ti)); | ||
144 | |||
145 | mod_zone_page_state(zone, NR_KERNEL_STACK, account); | ||
146 | } | ||
147 | |||
139 | void free_task(struct task_struct *tsk) | 148 | void free_task(struct task_struct *tsk) |
140 | { | 149 | { |
141 | prop_local_destroy_single(&tsk->dirties); | 150 | prop_local_destroy_single(&tsk->dirties); |
151 | account_kernel_stack(tsk->stack, -1); | ||
142 | free_thread_info(tsk->stack); | 152 | free_thread_info(tsk->stack); |
143 | rt_mutex_debug_task_free(tsk); | 153 | rt_mutex_debug_task_free(tsk); |
144 | ftrace_graph_exit_task(tsk); | 154 | ftrace_graph_exit_task(tsk); |
@@ -253,6 +263,9 @@ static struct task_struct *dup_task_struct(struct task_struct *orig) | |||
253 | tsk->btrace_seq = 0; | 263 | tsk->btrace_seq = 0; |
254 | #endif | 264 | #endif |
255 | tsk->splice_pipe = NULL; | 265 | tsk->splice_pipe = NULL; |
266 | |||
267 | account_kernel_stack(ti, 1); | ||
268 | |||
256 | return tsk; | 269 | return tsk; |
257 | 270 | ||
258 | out: | 271 | out: |
@@ -288,6 +301,9 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) | |||
288 | rb_link = &mm->mm_rb.rb_node; | 301 | rb_link = &mm->mm_rb.rb_node; |
289 | rb_parent = NULL; | 302 | rb_parent = NULL; |
290 | pprev = &mm->mmap; | 303 | pprev = &mm->mmap; |
304 | retval = ksm_fork(mm, oldmm); | ||
305 | if (retval) | ||
306 | goto out; | ||
291 | 307 | ||
292 | for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) { | 308 | for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) { |
293 | struct file *file; | 309 | struct file *file; |
@@ -418,22 +434,30 @@ __setup("coredump_filter=", coredump_filter_setup); | |||
418 | 434 | ||
419 | #include <linux/init_task.h> | 435 | #include <linux/init_task.h> |
420 | 436 | ||
437 | static void mm_init_aio(struct mm_struct *mm) | ||
438 | { | ||
439 | #ifdef CONFIG_AIO | ||
440 | spin_lock_init(&mm->ioctx_lock); | ||
441 | INIT_HLIST_HEAD(&mm->ioctx_list); | ||
442 | #endif | ||
443 | } | ||
444 | |||
421 | static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p) | 445 | static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p) |
422 | { | 446 | { |
423 | atomic_set(&mm->mm_users, 1); | 447 | atomic_set(&mm->mm_users, 1); |
424 | atomic_set(&mm->mm_count, 1); | 448 | atomic_set(&mm->mm_count, 1); |
425 | init_rwsem(&mm->mmap_sem); | 449 | init_rwsem(&mm->mmap_sem); |
426 | INIT_LIST_HEAD(&mm->mmlist); | 450 | INIT_LIST_HEAD(&mm->mmlist); |
427 | mm->flags = (current->mm) ? current->mm->flags : default_dump_filter; | 451 | mm->flags = (current->mm) ? |
452 | (current->mm->flags & MMF_INIT_MASK) : default_dump_filter; | ||
428 | mm->core_state = NULL; | 453 | mm->core_state = NULL; |
429 | mm->nr_ptes = 0; | 454 | mm->nr_ptes = 0; |
430 | set_mm_counter(mm, file_rss, 0); | 455 | set_mm_counter(mm, file_rss, 0); |
431 | set_mm_counter(mm, anon_rss, 0); | 456 | set_mm_counter(mm, anon_rss, 0); |
432 | spin_lock_init(&mm->page_table_lock); | 457 | spin_lock_init(&mm->page_table_lock); |
433 | spin_lock_init(&mm->ioctx_lock); | ||
434 | INIT_HLIST_HEAD(&mm->ioctx_list); | ||
435 | mm->free_area_cache = TASK_UNMAPPED_BASE; | 458 | mm->free_area_cache = TASK_UNMAPPED_BASE; |
436 | mm->cached_hole_size = ~0UL; | 459 | mm->cached_hole_size = ~0UL; |
460 | mm_init_aio(mm); | ||
437 | mm_init_owner(mm, p); | 461 | mm_init_owner(mm, p); |
438 | 462 | ||
439 | if (likely(!mm_alloc_pgd(mm))) { | 463 | if (likely(!mm_alloc_pgd(mm))) { |
@@ -485,6 +509,7 @@ void mmput(struct mm_struct *mm) | |||
485 | 509 | ||
486 | if (atomic_dec_and_test(&mm->mm_users)) { | 510 | if (atomic_dec_and_test(&mm->mm_users)) { |
487 | exit_aio(mm); | 511 | exit_aio(mm); |
512 | ksm_exit(mm); | ||
488 | exit_mmap(mm); | 513 | exit_mmap(mm); |
489 | set_mm_exe_file(mm, NULL); | 514 | set_mm_exe_file(mm, NULL); |
490 | if (!list_empty(&mm->mmlist)) { | 515 | if (!list_empty(&mm->mmlist)) { |
@@ -493,6 +518,8 @@ void mmput(struct mm_struct *mm) | |||
493 | spin_unlock(&mmlist_lock); | 518 | spin_unlock(&mmlist_lock); |
494 | } | 519 | } |
495 | put_swap_token(mm); | 520 | put_swap_token(mm); |
521 | if (mm->binfmt) | ||
522 | module_put(mm->binfmt->module); | ||
496 | mmdrop(mm); | 523 | mmdrop(mm); |
497 | } | 524 | } |
498 | } | 525 | } |
@@ -543,12 +570,18 @@ void mm_release(struct task_struct *tsk, struct mm_struct *mm) | |||
543 | 570 | ||
544 | /* Get rid of any futexes when releasing the mm */ | 571 | /* Get rid of any futexes when releasing the mm */ |
545 | #ifdef CONFIG_FUTEX | 572 | #ifdef CONFIG_FUTEX |
546 | if (unlikely(tsk->robust_list)) | 573 | if (unlikely(tsk->robust_list)) { |
547 | exit_robust_list(tsk); | 574 | exit_robust_list(tsk); |
575 | tsk->robust_list = NULL; | ||
576 | } | ||
548 | #ifdef CONFIG_COMPAT | 577 | #ifdef CONFIG_COMPAT |
549 | if (unlikely(tsk->compat_robust_list)) | 578 | if (unlikely(tsk->compat_robust_list)) { |
550 | compat_exit_robust_list(tsk); | 579 | compat_exit_robust_list(tsk); |
580 | tsk->compat_robust_list = NULL; | ||
581 | } | ||
551 | #endif | 582 | #endif |
583 | if (unlikely(!list_empty(&tsk->pi_state_list))) | ||
584 | exit_pi_state_list(tsk); | ||
552 | #endif | 585 | #endif |
553 | 586 | ||
554 | /* Get rid of any cached register state */ | 587 | /* Get rid of any cached register state */ |
@@ -618,9 +651,14 @@ struct mm_struct *dup_mm(struct task_struct *tsk) | |||
618 | mm->hiwater_rss = get_mm_rss(mm); | 651 | mm->hiwater_rss = get_mm_rss(mm); |
619 | mm->hiwater_vm = mm->total_vm; | 652 | mm->hiwater_vm = mm->total_vm; |
620 | 653 | ||
654 | if (mm->binfmt && !try_module_get(mm->binfmt->module)) | ||
655 | goto free_pt; | ||
656 | |||
621 | return mm; | 657 | return mm; |
622 | 658 | ||
623 | free_pt: | 659 | free_pt: |
660 | /* don't put binfmt in mmput, we haven't got module yet */ | ||
661 | mm->binfmt = NULL; | ||
624 | mmput(mm); | 662 | mmput(mm); |
625 | 663 | ||
626 | fail_nomem: | 664 | fail_nomem: |
@@ -788,10 +826,10 @@ static void posix_cpu_timers_init_group(struct signal_struct *sig) | |||
788 | thread_group_cputime_init(sig); | 826 | thread_group_cputime_init(sig); |
789 | 827 | ||
790 | /* Expiration times and increments. */ | 828 | /* Expiration times and increments. */ |
791 | sig->it_virt_expires = cputime_zero; | 829 | sig->it[CPUCLOCK_PROF].expires = cputime_zero; |
792 | sig->it_virt_incr = cputime_zero; | 830 | sig->it[CPUCLOCK_PROF].incr = cputime_zero; |
793 | sig->it_prof_expires = cputime_zero; | 831 | sig->it[CPUCLOCK_VIRT].expires = cputime_zero; |
794 | sig->it_prof_incr = cputime_zero; | 832 | sig->it[CPUCLOCK_VIRT].incr = cputime_zero; |
795 | 833 | ||
796 | /* Cached expiration times. */ | 834 | /* Cached expiration times. */ |
797 | sig->cputime_expires.prof_exp = cputime_zero; | 835 | sig->cputime_expires.prof_exp = cputime_zero; |
@@ -849,6 +887,7 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) | |||
849 | sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0; | 887 | sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0; |
850 | sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0; | 888 | sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0; |
851 | sig->inblock = sig->oublock = sig->cinblock = sig->coublock = 0; | 889 | sig->inblock = sig->oublock = sig->cinblock = sig->coublock = 0; |
890 | sig->maxrss = sig->cmaxrss = 0; | ||
852 | task_io_accounting_init(&sig->ioac); | 891 | task_io_accounting_init(&sig->ioac); |
853 | sig->sum_sched_runtime = 0; | 892 | sig->sum_sched_runtime = 0; |
854 | taskstats_tgid_init(sig); | 893 | taskstats_tgid_init(sig); |
@@ -863,6 +902,8 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) | |||
863 | 902 | ||
864 | tty_audit_fork(sig); | 903 | tty_audit_fork(sig); |
865 | 904 | ||
905 | sig->oom_adj = current->signal->oom_adj; | ||
906 | |||
866 | return 0; | 907 | return 0; |
867 | } | 908 | } |
868 | 909 | ||
@@ -958,6 +999,16 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
958 | if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM)) | 999 | if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM)) |
959 | return ERR_PTR(-EINVAL); | 1000 | return ERR_PTR(-EINVAL); |
960 | 1001 | ||
1002 | /* | ||
1003 | * Siblings of global init remain as zombies on exit since they are | ||
1004 | * not reaped by their parent (swapper). To solve this and to avoid | ||
1005 | * multi-rooted process trees, prevent global and container-inits | ||
1006 | * from creating siblings. | ||
1007 | */ | ||
1008 | if ((clone_flags & CLONE_PARENT) && | ||
1009 | current->signal->flags & SIGNAL_UNKILLABLE) | ||
1010 | return ERR_PTR(-EINVAL); | ||
1011 | |||
961 | retval = security_task_create(clone_flags); | 1012 | retval = security_task_create(clone_flags); |
962 | if (retval) | 1013 | if (retval) |
963 | goto fork_out; | 1014 | goto fork_out; |
@@ -999,9 +1050,6 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
999 | if (!try_module_get(task_thread_info(p)->exec_domain->module)) | 1050 | if (!try_module_get(task_thread_info(p)->exec_domain->module)) |
1000 | goto bad_fork_cleanup_count; | 1051 | goto bad_fork_cleanup_count; |
1001 | 1052 | ||
1002 | if (p->binfmt && !try_module_get(p->binfmt->module)) | ||
1003 | goto bad_fork_cleanup_put_domain; | ||
1004 | |||
1005 | p->did_exec = 0; | 1053 | p->did_exec = 0; |
1006 | delayacct_tsk_init(p); /* Must remain after dup_task_struct() */ | 1054 | delayacct_tsk_init(p); /* Must remain after dup_task_struct() */ |
1007 | copy_flags(clone_flags, p); | 1055 | copy_flags(clone_flags, p); |
@@ -1075,10 +1123,12 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1075 | 1123 | ||
1076 | p->bts = NULL; | 1124 | p->bts = NULL; |
1077 | 1125 | ||
1126 | p->stack_start = stack_start; | ||
1127 | |||
1078 | /* Perform scheduler related setup. Assign this task to a CPU. */ | 1128 | /* Perform scheduler related setup. Assign this task to a CPU. */ |
1079 | sched_fork(p, clone_flags); | 1129 | sched_fork(p, clone_flags); |
1080 | 1130 | ||
1081 | retval = perf_counter_init_task(p); | 1131 | retval = perf_event_init_task(p); |
1082 | if (retval) | 1132 | if (retval) |
1083 | goto bad_fork_cleanup_policy; | 1133 | goto bad_fork_cleanup_policy; |
1084 | 1134 | ||
@@ -1253,7 +1303,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1253 | write_unlock_irq(&tasklist_lock); | 1303 | write_unlock_irq(&tasklist_lock); |
1254 | proc_fork_connector(p); | 1304 | proc_fork_connector(p); |
1255 | cgroup_post_fork(p); | 1305 | cgroup_post_fork(p); |
1256 | perf_counter_fork(p); | 1306 | perf_event_fork(p); |
1257 | return p; | 1307 | return p; |
1258 | 1308 | ||
1259 | bad_fork_free_pid: | 1309 | bad_fork_free_pid: |
@@ -1280,16 +1330,13 @@ bad_fork_cleanup_semundo: | |||
1280 | bad_fork_cleanup_audit: | 1330 | bad_fork_cleanup_audit: |
1281 | audit_free(p); | 1331 | audit_free(p); |
1282 | bad_fork_cleanup_policy: | 1332 | bad_fork_cleanup_policy: |
1283 | perf_counter_free_task(p); | 1333 | perf_event_free_task(p); |
1284 | #ifdef CONFIG_NUMA | 1334 | #ifdef CONFIG_NUMA |
1285 | mpol_put(p->mempolicy); | 1335 | mpol_put(p->mempolicy); |
1286 | bad_fork_cleanup_cgroup: | 1336 | bad_fork_cleanup_cgroup: |
1287 | #endif | 1337 | #endif |
1288 | cgroup_exit(p, cgroup_callbacks_done); | 1338 | cgroup_exit(p, cgroup_callbacks_done); |
1289 | delayacct_tsk_free(p); | 1339 | delayacct_tsk_free(p); |
1290 | if (p->binfmt) | ||
1291 | module_put(p->binfmt->module); | ||
1292 | bad_fork_cleanup_put_domain: | ||
1293 | module_put(task_thread_info(p)->exec_domain->module); | 1340 | module_put(task_thread_info(p)->exec_domain->module); |
1294 | bad_fork_cleanup_count: | 1341 | bad_fork_cleanup_count: |
1295 | atomic_dec(&p->cred->user->processes); | 1342 | atomic_dec(&p->cred->user->processes); |
diff --git a/kernel/futex.c b/kernel/futex.c index 248dd119a86e..fb65e822fc41 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
@@ -89,36 +89,36 @@ struct futex_pi_state { | |||
89 | union futex_key key; | 89 | union futex_key key; |
90 | }; | 90 | }; |
91 | 91 | ||
92 | /* | 92 | /** |
93 | * We use this hashed waitqueue instead of a normal wait_queue_t, so | 93 | * struct futex_q - The hashed futex queue entry, one per waiting task |
94 | * @task: the task waiting on the futex | ||
95 | * @lock_ptr: the hash bucket lock | ||
96 | * @key: the key the futex is hashed on | ||
97 | * @pi_state: optional priority inheritance state | ||
98 | * @rt_waiter: rt_waiter storage for use with requeue_pi | ||
99 | * @requeue_pi_key: the requeue_pi target futex key | ||
100 | * @bitset: bitset for the optional bitmasked wakeup | ||
101 | * | ||
102 | * We use this hashed waitqueue, instead of a normal wait_queue_t, so | ||
94 | * we can wake only the relevant ones (hashed queues may be shared). | 103 | * we can wake only the relevant ones (hashed queues may be shared). |
95 | * | 104 | * |
96 | * A futex_q has a woken state, just like tasks have TASK_RUNNING. | 105 | * A futex_q has a woken state, just like tasks have TASK_RUNNING. |
97 | * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0. | 106 | * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0. |
98 | * The order of wakup is always to make the first condition true, then | 107 | * The order of wakup is always to make the first condition true, then |
99 | * wake up q->waiter, then make the second condition true. | 108 | * the second. |
109 | * | ||
110 | * PI futexes are typically woken before they are removed from the hash list via | ||
111 | * the rt_mutex code. See unqueue_me_pi(). | ||
100 | */ | 112 | */ |
101 | struct futex_q { | 113 | struct futex_q { |
102 | struct plist_node list; | 114 | struct plist_node list; |
103 | /* Waiter reference */ | ||
104 | struct task_struct *task; | ||
105 | 115 | ||
106 | /* Which hash list lock to use: */ | 116 | struct task_struct *task; |
107 | spinlock_t *lock_ptr; | 117 | spinlock_t *lock_ptr; |
108 | |||
109 | /* Key which the futex is hashed on: */ | ||
110 | union futex_key key; | 118 | union futex_key key; |
111 | |||
112 | /* Optional priority inheritance state: */ | ||
113 | struct futex_pi_state *pi_state; | 119 | struct futex_pi_state *pi_state; |
114 | |||
115 | /* rt_waiter storage for requeue_pi: */ | ||
116 | struct rt_mutex_waiter *rt_waiter; | 120 | struct rt_mutex_waiter *rt_waiter; |
117 | |||
118 | /* The expected requeue pi target futex key: */ | ||
119 | union futex_key *requeue_pi_key; | 121 | union futex_key *requeue_pi_key; |
120 | |||
121 | /* Bitset for the optional bitmasked wakeup */ | ||
122 | u32 bitset; | 122 | u32 bitset; |
123 | }; | 123 | }; |
124 | 124 | ||
@@ -150,7 +150,8 @@ static struct futex_hash_bucket *hash_futex(union futex_key *key) | |||
150 | */ | 150 | */ |
151 | static inline int match_futex(union futex_key *key1, union futex_key *key2) | 151 | static inline int match_futex(union futex_key *key1, union futex_key *key2) |
152 | { | 152 | { |
153 | return (key1->both.word == key2->both.word | 153 | return (key1 && key2 |
154 | && key1->both.word == key2->both.word | ||
154 | && key1->both.ptr == key2->both.ptr | 155 | && key1->both.ptr == key2->both.ptr |
155 | && key1->both.offset == key2->both.offset); | 156 | && key1->both.offset == key2->both.offset); |
156 | } | 157 | } |
@@ -198,11 +199,12 @@ static void drop_futex_key_refs(union futex_key *key) | |||
198 | } | 199 | } |
199 | 200 | ||
200 | /** | 201 | /** |
201 | * get_futex_key - Get parameters which are the keys for a futex. | 202 | * get_futex_key() - Get parameters which are the keys for a futex |
202 | * @uaddr: virtual address of the futex | 203 | * @uaddr: virtual address of the futex |
203 | * @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED | 204 | * @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED |
204 | * @key: address where result is stored. | 205 | * @key: address where result is stored. |
205 | * @rw: mapping needs to be read/write (values: VERIFY_READ, VERIFY_WRITE) | 206 | * @rw: mapping needs to be read/write (values: VERIFY_READ, |
207 | * VERIFY_WRITE) | ||
206 | * | 208 | * |
207 | * Returns a negative error code or 0 | 209 | * Returns a negative error code or 0 |
208 | * The key words are stored in *key on success. | 210 | * The key words are stored in *key on success. |
@@ -288,8 +290,8 @@ void put_futex_key(int fshared, union futex_key *key) | |||
288 | drop_futex_key_refs(key); | 290 | drop_futex_key_refs(key); |
289 | } | 291 | } |
290 | 292 | ||
291 | /* | 293 | /** |
292 | * fault_in_user_writeable - fault in user address and verify RW access | 294 | * fault_in_user_writeable() - Fault in user address and verify RW access |
293 | * @uaddr: pointer to faulting user space address | 295 | * @uaddr: pointer to faulting user space address |
294 | * | 296 | * |
295 | * Slow path to fixup the fault we just took in the atomic write | 297 | * Slow path to fixup the fault we just took in the atomic write |
@@ -309,8 +311,8 @@ static int fault_in_user_writeable(u32 __user *uaddr) | |||
309 | 311 | ||
310 | /** | 312 | /** |
311 | * futex_top_waiter() - Return the highest priority waiter on a futex | 313 | * futex_top_waiter() - Return the highest priority waiter on a futex |
312 | * @hb: the hash bucket the futex_q's reside in | 314 | * @hb: the hash bucket the futex_q's reside in |
313 | * @key: the futex key (to distinguish it from other futex futex_q's) | 315 | * @key: the futex key (to distinguish it from other futex futex_q's) |
314 | * | 316 | * |
315 | * Must be called with the hb lock held. | 317 | * Must be called with the hb lock held. |
316 | */ | 318 | */ |
@@ -588,7 +590,7 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, | |||
588 | } | 590 | } |
589 | 591 | ||
590 | /** | 592 | /** |
591 | * futex_lock_pi_atomic() - atomic work required to acquire a pi aware futex | 593 | * futex_lock_pi_atomic() - Atomic work required to acquire a pi aware futex |
592 | * @uaddr: the pi futex user address | 594 | * @uaddr: the pi futex user address |
593 | * @hb: the pi futex hash bucket | 595 | * @hb: the pi futex hash bucket |
594 | * @key: the futex key associated with uaddr and hb | 596 | * @key: the futex key associated with uaddr and hb |
@@ -915,8 +917,8 @@ retry: | |||
915 | hb1 = hash_futex(&key1); | 917 | hb1 = hash_futex(&key1); |
916 | hb2 = hash_futex(&key2); | 918 | hb2 = hash_futex(&key2); |
917 | 919 | ||
918 | double_lock_hb(hb1, hb2); | ||
919 | retry_private: | 920 | retry_private: |
921 | double_lock_hb(hb1, hb2); | ||
920 | op_ret = futex_atomic_op_inuser(op, uaddr2); | 922 | op_ret = futex_atomic_op_inuser(op, uaddr2); |
921 | if (unlikely(op_ret < 0)) { | 923 | if (unlikely(op_ret < 0)) { |
922 | 924 | ||
@@ -1011,9 +1013,9 @@ void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1, | |||
1011 | 1013 | ||
1012 | /** | 1014 | /** |
1013 | * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue | 1015 | * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue |
1014 | * q: the futex_q | 1016 | * @q: the futex_q |
1015 | * key: the key of the requeue target futex | 1017 | * @key: the key of the requeue target futex |
1016 | * hb: the hash_bucket of the requeue target futex | 1018 | * @hb: the hash_bucket of the requeue target futex |
1017 | * | 1019 | * |
1018 | * During futex_requeue, with requeue_pi=1, it is possible to acquire the | 1020 | * During futex_requeue, with requeue_pi=1, it is possible to acquire the |
1019 | * target futex if it is uncontended or via a lock steal. Set the futex_q key | 1021 | * target futex if it is uncontended or via a lock steal. Set the futex_q key |
@@ -1027,7 +1029,6 @@ static inline | |||
1027 | void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key, | 1029 | void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key, |
1028 | struct futex_hash_bucket *hb) | 1030 | struct futex_hash_bucket *hb) |
1029 | { | 1031 | { |
1030 | drop_futex_key_refs(&q->key); | ||
1031 | get_futex_key_refs(key); | 1032 | get_futex_key_refs(key); |
1032 | q->key = *key; | 1033 | q->key = *key; |
1033 | 1034 | ||
@@ -1225,6 +1226,7 @@ retry_private: | |||
1225 | */ | 1226 | */ |
1226 | if (ret == 1) { | 1227 | if (ret == 1) { |
1227 | WARN_ON(pi_state); | 1228 | WARN_ON(pi_state); |
1229 | drop_count++; | ||
1228 | task_count++; | 1230 | task_count++; |
1229 | ret = get_futex_value_locked(&curval2, uaddr2); | 1231 | ret = get_futex_value_locked(&curval2, uaddr2); |
1230 | if (!ret) | 1232 | if (!ret) |
@@ -1303,6 +1305,7 @@ retry_private: | |||
1303 | if (ret == 1) { | 1305 | if (ret == 1) { |
1304 | /* We got the lock. */ | 1306 | /* We got the lock. */ |
1305 | requeue_pi_wake_futex(this, &key2, hb2); | 1307 | requeue_pi_wake_futex(this, &key2, hb2); |
1308 | drop_count++; | ||
1306 | continue; | 1309 | continue; |
1307 | } else if (ret) { | 1310 | } else if (ret) { |
1308 | /* -EDEADLK */ | 1311 | /* -EDEADLK */ |
@@ -1350,6 +1353,25 @@ static inline struct futex_hash_bucket *queue_lock(struct futex_q *q) | |||
1350 | return hb; | 1353 | return hb; |
1351 | } | 1354 | } |
1352 | 1355 | ||
1356 | static inline void | ||
1357 | queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb) | ||
1358 | { | ||
1359 | spin_unlock(&hb->lock); | ||
1360 | drop_futex_key_refs(&q->key); | ||
1361 | } | ||
1362 | |||
1363 | /** | ||
1364 | * queue_me() - Enqueue the futex_q on the futex_hash_bucket | ||
1365 | * @q: The futex_q to enqueue | ||
1366 | * @hb: The destination hash bucket | ||
1367 | * | ||
1368 | * The hb->lock must be held by the caller, and is released here. A call to | ||
1369 | * queue_me() is typically paired with exactly one call to unqueue_me(). The | ||
1370 | * exceptions involve the PI related operations, which may use unqueue_me_pi() | ||
1371 | * or nothing if the unqueue is done as part of the wake process and the unqueue | ||
1372 | * state is implicit in the state of woken task (see futex_wait_requeue_pi() for | ||
1373 | * an example). | ||
1374 | */ | ||
1353 | static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb) | 1375 | static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb) |
1354 | { | 1376 | { |
1355 | int prio; | 1377 | int prio; |
@@ -1373,19 +1395,17 @@ static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb) | |||
1373 | spin_unlock(&hb->lock); | 1395 | spin_unlock(&hb->lock); |
1374 | } | 1396 | } |
1375 | 1397 | ||
1376 | static inline void | 1398 | /** |
1377 | queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb) | 1399 | * unqueue_me() - Remove the futex_q from its futex_hash_bucket |
1378 | { | 1400 | * @q: The futex_q to unqueue |
1379 | spin_unlock(&hb->lock); | 1401 | * |
1380 | drop_futex_key_refs(&q->key); | 1402 | * The q->lock_ptr must not be held by the caller. A call to unqueue_me() must |
1381 | } | 1403 | * be paired with exactly one earlier call to queue_me(). |
1382 | 1404 | * | |
1383 | /* | 1405 | * Returns: |
1384 | * queue_me and unqueue_me must be called as a pair, each | 1406 | * 1 - if the futex_q was still queued (and we removed unqueued it) |
1385 | * exactly once. They are called with the hashed spinlock held. | 1407 | * 0 - if the futex_q was already removed by the waking thread |
1386 | */ | 1408 | */ |
1387 | |||
1388 | /* Return 1 if we were still queued (ie. 0 means we were woken) */ | ||
1389 | static int unqueue_me(struct futex_q *q) | 1409 | static int unqueue_me(struct futex_q *q) |
1390 | { | 1410 | { |
1391 | spinlock_t *lock_ptr; | 1411 | spinlock_t *lock_ptr; |
@@ -1638,17 +1658,14 @@ out: | |||
1638 | static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q, | 1658 | static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q, |
1639 | struct hrtimer_sleeper *timeout) | 1659 | struct hrtimer_sleeper *timeout) |
1640 | { | 1660 | { |
1641 | queue_me(q, hb); | ||
1642 | |||
1643 | /* | 1661 | /* |
1644 | * There might have been scheduling since the queue_me(), as we | 1662 | * The task state is guaranteed to be set before another task can |
1645 | * cannot hold a spinlock across the get_user() in case it | 1663 | * wake it. set_current_state() is implemented using set_mb() and |
1646 | * faults, and we cannot just set TASK_INTERRUPTIBLE state when | 1664 | * queue_me() calls spin_unlock() upon completion, both serializing |
1647 | * queueing ourselves into the futex hash. This code thus has to | 1665 | * access to the hash list and forcing another memory barrier. |
1648 | * rely on the futex_wake() code removing us from hash when it | ||
1649 | * wakes us up. | ||
1650 | */ | 1666 | */ |
1651 | set_current_state(TASK_INTERRUPTIBLE); | 1667 | set_current_state(TASK_INTERRUPTIBLE); |
1668 | queue_me(q, hb); | ||
1652 | 1669 | ||
1653 | /* Arm the timer */ | 1670 | /* Arm the timer */ |
1654 | if (timeout) { | 1671 | if (timeout) { |
@@ -1658,8 +1675,8 @@ static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q, | |||
1658 | } | 1675 | } |
1659 | 1676 | ||
1660 | /* | 1677 | /* |
1661 | * !plist_node_empty() is safe here without any lock. | 1678 | * If we have been removed from the hash list, then another task |
1662 | * q.lock_ptr != 0 is not safe, because of ordering against wakeup. | 1679 | * has tried to wake us, and we can skip the call to schedule(). |
1663 | */ | 1680 | */ |
1664 | if (likely(!plist_node_empty(&q->list))) { | 1681 | if (likely(!plist_node_empty(&q->list))) { |
1665 | /* | 1682 | /* |
@@ -1776,6 +1793,7 @@ static int futex_wait(u32 __user *uaddr, int fshared, | |||
1776 | current->timer_slack_ns); | 1793 | current->timer_slack_ns); |
1777 | } | 1794 | } |
1778 | 1795 | ||
1796 | retry: | ||
1779 | /* Prepare to wait on uaddr. */ | 1797 | /* Prepare to wait on uaddr. */ |
1780 | ret = futex_wait_setup(uaddr, val, fshared, &q, &hb); | 1798 | ret = futex_wait_setup(uaddr, val, fshared, &q, &hb); |
1781 | if (ret) | 1799 | if (ret) |
@@ -1793,9 +1811,14 @@ static int futex_wait(u32 __user *uaddr, int fshared, | |||
1793 | goto out_put_key; | 1811 | goto out_put_key; |
1794 | 1812 | ||
1795 | /* | 1813 | /* |
1796 | * We expect signal_pending(current), but another thread may | 1814 | * We expect signal_pending(current), but we might be the |
1797 | * have handled it for us already. | 1815 | * victim of a spurious wakeup as well. |
1798 | */ | 1816 | */ |
1817 | if (!signal_pending(current)) { | ||
1818 | put_futex_key(fshared, &q.key); | ||
1819 | goto retry; | ||
1820 | } | ||
1821 | |||
1799 | ret = -ERESTARTSYS; | 1822 | ret = -ERESTARTSYS; |
1800 | if (!abs_time) | 1823 | if (!abs_time) |
1801 | goto out_put_key; | 1824 | goto out_put_key; |
@@ -2102,11 +2125,12 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb, | |||
2102 | * Unqueue the futex_q and determine which it was. | 2125 | * Unqueue the futex_q and determine which it was. |
2103 | */ | 2126 | */ |
2104 | plist_del(&q->list, &q->list.plist); | 2127 | plist_del(&q->list, &q->list.plist); |
2105 | drop_futex_key_refs(&q->key); | ||
2106 | 2128 | ||
2129 | /* Handle spurious wakeups gracefully */ | ||
2130 | ret = -EWOULDBLOCK; | ||
2107 | if (timeout && !timeout->task) | 2131 | if (timeout && !timeout->task) |
2108 | ret = -ETIMEDOUT; | 2132 | ret = -ETIMEDOUT; |
2109 | else | 2133 | else if (signal_pending(current)) |
2110 | ret = -ERESTARTNOINTR; | 2134 | ret = -ERESTARTNOINTR; |
2111 | } | 2135 | } |
2112 | return ret; | 2136 | return ret; |
@@ -2114,12 +2138,12 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb, | |||
2114 | 2138 | ||
2115 | /** | 2139 | /** |
2116 | * futex_wait_requeue_pi() - Wait on uaddr and take uaddr2 | 2140 | * futex_wait_requeue_pi() - Wait on uaddr and take uaddr2 |
2117 | * @uaddr: the futex we initialyl wait on (non-pi) | 2141 | * @uaddr: the futex we initially wait on (non-pi) |
2118 | * @fshared: whether the futexes are shared (1) or not (0). They must be | 2142 | * @fshared: whether the futexes are shared (1) or not (0). They must be |
2119 | * the same type, no requeueing from private to shared, etc. | 2143 | * the same type, no requeueing from private to shared, etc. |
2120 | * @val: the expected value of uaddr | 2144 | * @val: the expected value of uaddr |
2121 | * @abs_time: absolute timeout | 2145 | * @abs_time: absolute timeout |
2122 | * @bitset: 32 bit wakeup bitset set by userspace, defaults to all. | 2146 | * @bitset: 32 bit wakeup bitset set by userspace, defaults to all |
2123 | * @clockrt: whether to use CLOCK_REALTIME (1) or CLOCK_MONOTONIC (0) | 2147 | * @clockrt: whether to use CLOCK_REALTIME (1) or CLOCK_MONOTONIC (0) |
2124 | * @uaddr2: the pi futex we will take prior to returning to user-space | 2148 | * @uaddr2: the pi futex we will take prior to returning to user-space |
2125 | * | 2149 | * |
@@ -2246,7 +2270,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared, | |||
2246 | res = fixup_owner(uaddr2, fshared, &q, !ret); | 2270 | res = fixup_owner(uaddr2, fshared, &q, !ret); |
2247 | /* | 2271 | /* |
2248 | * If fixup_owner() returned an error, proprogate that. If it | 2272 | * If fixup_owner() returned an error, proprogate that. If it |
2249 | * acquired the lock, clear our -ETIMEDOUT or -EINTR. | 2273 | * acquired the lock, clear -ETIMEDOUT or -EINTR. |
2250 | */ | 2274 | */ |
2251 | if (res) | 2275 | if (res) |
2252 | ret = (res < 0) ? res : 0; | 2276 | ret = (res < 0) ? res : 0; |
@@ -2302,9 +2326,9 @@ out: | |||
2302 | */ | 2326 | */ |
2303 | 2327 | ||
2304 | /** | 2328 | /** |
2305 | * sys_set_robust_list - set the robust-futex list head of a task | 2329 | * sys_set_robust_list() - Set the robust-futex list head of a task |
2306 | * @head: pointer to the list-head | 2330 | * @head: pointer to the list-head |
2307 | * @len: length of the list-head, as userspace expects | 2331 | * @len: length of the list-head, as userspace expects |
2308 | */ | 2332 | */ |
2309 | SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head, | 2333 | SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head, |
2310 | size_t, len) | 2334 | size_t, len) |
@@ -2323,10 +2347,10 @@ SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head, | |||
2323 | } | 2347 | } |
2324 | 2348 | ||
2325 | /** | 2349 | /** |
2326 | * sys_get_robust_list - get the robust-futex list head of a task | 2350 | * sys_get_robust_list() - Get the robust-futex list head of a task |
2327 | * @pid: pid of the process [zero for current task] | 2351 | * @pid: pid of the process [zero for current task] |
2328 | * @head_ptr: pointer to a list-head pointer, the kernel fills it in | 2352 | * @head_ptr: pointer to a list-head pointer, the kernel fills it in |
2329 | * @len_ptr: pointer to a length field, the kernel fills in the header size | 2353 | * @len_ptr: pointer to a length field, the kernel fills in the header size |
2330 | */ | 2354 | */ |
2331 | SYSCALL_DEFINE3(get_robust_list, int, pid, | 2355 | SYSCALL_DEFINE3(get_robust_list, int, pid, |
2332 | struct robust_list_head __user * __user *, head_ptr, | 2356 | struct robust_list_head __user * __user *, head_ptr, |
diff --git a/kernel/gcov/Kconfig b/kernel/gcov/Kconfig index 654efd09f6a9..70a298d6da71 100644 --- a/kernel/gcov/Kconfig +++ b/kernel/gcov/Kconfig | |||
@@ -34,7 +34,7 @@ config GCOV_KERNEL | |||
34 | config GCOV_PROFILE_ALL | 34 | config GCOV_PROFILE_ALL |
35 | bool "Profile entire Kernel" | 35 | bool "Profile entire Kernel" |
36 | depends on GCOV_KERNEL | 36 | depends on GCOV_KERNEL |
37 | depends on S390 || X86 || (PPC && EXPERIMENTAL) | 37 | depends on S390 || X86 || (PPC && EXPERIMENTAL) || MICROBLAZE |
38 | default n | 38 | default n |
39 | ---help--- | 39 | ---help--- |
40 | This options activates profiling for the entire kernel. | 40 | This options activates profiling for the entire kernel. |
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index c03f221fee44..3e1c36e7998f 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
@@ -48,6 +48,8 @@ | |||
48 | 48 | ||
49 | #include <asm/uaccess.h> | 49 | #include <asm/uaccess.h> |
50 | 50 | ||
51 | #include <trace/events/timer.h> | ||
52 | |||
51 | /* | 53 | /* |
52 | * The timer bases: | 54 | * The timer bases: |
53 | * | 55 | * |
@@ -442,6 +444,26 @@ static inline void debug_hrtimer_activate(struct hrtimer *timer) { } | |||
442 | static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { } | 444 | static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { } |
443 | #endif | 445 | #endif |
444 | 446 | ||
447 | static inline void | ||
448 | debug_init(struct hrtimer *timer, clockid_t clockid, | ||
449 | enum hrtimer_mode mode) | ||
450 | { | ||
451 | debug_hrtimer_init(timer); | ||
452 | trace_hrtimer_init(timer, clockid, mode); | ||
453 | } | ||
454 | |||
455 | static inline void debug_activate(struct hrtimer *timer) | ||
456 | { | ||
457 | debug_hrtimer_activate(timer); | ||
458 | trace_hrtimer_start(timer); | ||
459 | } | ||
460 | |||
461 | static inline void debug_deactivate(struct hrtimer *timer) | ||
462 | { | ||
463 | debug_hrtimer_deactivate(timer); | ||
464 | trace_hrtimer_cancel(timer); | ||
465 | } | ||
466 | |||
445 | /* High resolution timer related functions */ | 467 | /* High resolution timer related functions */ |
446 | #ifdef CONFIG_HIGH_RES_TIMERS | 468 | #ifdef CONFIG_HIGH_RES_TIMERS |
447 | 469 | ||
@@ -487,13 +509,14 @@ static inline int hrtimer_hres_active(void) | |||
487 | * next event | 509 | * next event |
488 | * Called with interrupts disabled and base->lock held | 510 | * Called with interrupts disabled and base->lock held |
489 | */ | 511 | */ |
490 | static void hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base) | 512 | static void |
513 | hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal) | ||
491 | { | 514 | { |
492 | int i; | 515 | int i; |
493 | struct hrtimer_clock_base *base = cpu_base->clock_base; | 516 | struct hrtimer_clock_base *base = cpu_base->clock_base; |
494 | ktime_t expires; | 517 | ktime_t expires, expires_next; |
495 | 518 | ||
496 | cpu_base->expires_next.tv64 = KTIME_MAX; | 519 | expires_next.tv64 = KTIME_MAX; |
497 | 520 | ||
498 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) { | 521 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) { |
499 | struct hrtimer *timer; | 522 | struct hrtimer *timer; |
@@ -509,10 +532,15 @@ static void hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base) | |||
509 | */ | 532 | */ |
510 | if (expires.tv64 < 0) | 533 | if (expires.tv64 < 0) |
511 | expires.tv64 = 0; | 534 | expires.tv64 = 0; |
512 | if (expires.tv64 < cpu_base->expires_next.tv64) | 535 | if (expires.tv64 < expires_next.tv64) |
513 | cpu_base->expires_next = expires; | 536 | expires_next = expires; |
514 | } | 537 | } |
515 | 538 | ||
539 | if (skip_equal && expires_next.tv64 == cpu_base->expires_next.tv64) | ||
540 | return; | ||
541 | |||
542 | cpu_base->expires_next.tv64 = expires_next.tv64; | ||
543 | |||
516 | if (cpu_base->expires_next.tv64 != KTIME_MAX) | 544 | if (cpu_base->expires_next.tv64 != KTIME_MAX) |
517 | tick_program_event(cpu_base->expires_next, 1); | 545 | tick_program_event(cpu_base->expires_next, 1); |
518 | } | 546 | } |
@@ -595,7 +623,7 @@ static void retrigger_next_event(void *arg) | |||
595 | base->clock_base[CLOCK_REALTIME].offset = | 623 | base->clock_base[CLOCK_REALTIME].offset = |
596 | timespec_to_ktime(realtime_offset); | 624 | timespec_to_ktime(realtime_offset); |
597 | 625 | ||
598 | hrtimer_force_reprogram(base); | 626 | hrtimer_force_reprogram(base, 0); |
599 | spin_unlock(&base->lock); | 627 | spin_unlock(&base->lock); |
600 | } | 628 | } |
601 | 629 | ||
@@ -698,8 +726,6 @@ static int hrtimer_switch_to_hres(void) | |||
698 | /* "Retrigger" the interrupt to get things going */ | 726 | /* "Retrigger" the interrupt to get things going */ |
699 | retrigger_next_event(NULL); | 727 | retrigger_next_event(NULL); |
700 | local_irq_restore(flags); | 728 | local_irq_restore(flags); |
701 | printk(KERN_DEBUG "Switched to high resolution mode on CPU %d\n", | ||
702 | smp_processor_id()); | ||
703 | return 1; | 729 | return 1; |
704 | } | 730 | } |
705 | 731 | ||
@@ -708,7 +734,8 @@ static int hrtimer_switch_to_hres(void) | |||
708 | static inline int hrtimer_hres_active(void) { return 0; } | 734 | static inline int hrtimer_hres_active(void) { return 0; } |
709 | static inline int hrtimer_is_hres_enabled(void) { return 0; } | 735 | static inline int hrtimer_is_hres_enabled(void) { return 0; } |
710 | static inline int hrtimer_switch_to_hres(void) { return 0; } | 736 | static inline int hrtimer_switch_to_hres(void) { return 0; } |
711 | static inline void hrtimer_force_reprogram(struct hrtimer_cpu_base *base) { } | 737 | static inline void |
738 | hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { } | ||
712 | static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, | 739 | static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, |
713 | struct hrtimer_clock_base *base, | 740 | struct hrtimer_clock_base *base, |
714 | int wakeup) | 741 | int wakeup) |
@@ -798,7 +825,7 @@ static int enqueue_hrtimer(struct hrtimer *timer, | |||
798 | struct hrtimer *entry; | 825 | struct hrtimer *entry; |
799 | int leftmost = 1; | 826 | int leftmost = 1; |
800 | 827 | ||
801 | debug_hrtimer_activate(timer); | 828 | debug_activate(timer); |
802 | 829 | ||
803 | /* | 830 | /* |
804 | * Find the right place in the rbtree: | 831 | * Find the right place in the rbtree: |
@@ -851,19 +878,29 @@ static void __remove_hrtimer(struct hrtimer *timer, | |||
851 | struct hrtimer_clock_base *base, | 878 | struct hrtimer_clock_base *base, |
852 | unsigned long newstate, int reprogram) | 879 | unsigned long newstate, int reprogram) |
853 | { | 880 | { |
854 | if (timer->state & HRTIMER_STATE_ENQUEUED) { | 881 | if (!(timer->state & HRTIMER_STATE_ENQUEUED)) |
855 | /* | 882 | goto out; |
856 | * Remove the timer from the rbtree and replace the | 883 | |
857 | * first entry pointer if necessary. | 884 | /* |
858 | */ | 885 | * Remove the timer from the rbtree and replace the first |
859 | if (base->first == &timer->node) { | 886 | * entry pointer if necessary. |
860 | base->first = rb_next(&timer->node); | 887 | */ |
861 | /* Reprogram the clock event device. if enabled */ | 888 | if (base->first == &timer->node) { |
862 | if (reprogram && hrtimer_hres_active()) | 889 | base->first = rb_next(&timer->node); |
863 | hrtimer_force_reprogram(base->cpu_base); | 890 | #ifdef CONFIG_HIGH_RES_TIMERS |
891 | /* Reprogram the clock event device. if enabled */ | ||
892 | if (reprogram && hrtimer_hres_active()) { | ||
893 | ktime_t expires; | ||
894 | |||
895 | expires = ktime_sub(hrtimer_get_expires(timer), | ||
896 | base->offset); | ||
897 | if (base->cpu_base->expires_next.tv64 == expires.tv64) | ||
898 | hrtimer_force_reprogram(base->cpu_base, 1); | ||
864 | } | 899 | } |
865 | rb_erase(&timer->node, &base->active); | 900 | #endif |
866 | } | 901 | } |
902 | rb_erase(&timer->node, &base->active); | ||
903 | out: | ||
867 | timer->state = newstate; | 904 | timer->state = newstate; |
868 | } | 905 | } |
869 | 906 | ||
@@ -884,7 +921,7 @@ remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base) | |||
884 | * reprogramming happens in the interrupt handler. This is a | 921 | * reprogramming happens in the interrupt handler. This is a |
885 | * rare case and less expensive than a smp call. | 922 | * rare case and less expensive than a smp call. |
886 | */ | 923 | */ |
887 | debug_hrtimer_deactivate(timer); | 924 | debug_deactivate(timer); |
888 | timer_stats_hrtimer_clear_start_info(timer); | 925 | timer_stats_hrtimer_clear_start_info(timer); |
889 | reprogram = base->cpu_base == &__get_cpu_var(hrtimer_bases); | 926 | reprogram = base->cpu_base == &__get_cpu_var(hrtimer_bases); |
890 | __remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE, | 927 | __remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE, |
@@ -1117,7 +1154,7 @@ static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id, | |||
1117 | void hrtimer_init(struct hrtimer *timer, clockid_t clock_id, | 1154 | void hrtimer_init(struct hrtimer *timer, clockid_t clock_id, |
1118 | enum hrtimer_mode mode) | 1155 | enum hrtimer_mode mode) |
1119 | { | 1156 | { |
1120 | debug_hrtimer_init(timer); | 1157 | debug_init(timer, clock_id, mode); |
1121 | __hrtimer_init(timer, clock_id, mode); | 1158 | __hrtimer_init(timer, clock_id, mode); |
1122 | } | 1159 | } |
1123 | EXPORT_SYMBOL_GPL(hrtimer_init); | 1160 | EXPORT_SYMBOL_GPL(hrtimer_init); |
@@ -1141,7 +1178,7 @@ int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp) | |||
1141 | } | 1178 | } |
1142 | EXPORT_SYMBOL_GPL(hrtimer_get_res); | 1179 | EXPORT_SYMBOL_GPL(hrtimer_get_res); |
1143 | 1180 | ||
1144 | static void __run_hrtimer(struct hrtimer *timer) | 1181 | static void __run_hrtimer(struct hrtimer *timer, ktime_t *now) |
1145 | { | 1182 | { |
1146 | struct hrtimer_clock_base *base = timer->base; | 1183 | struct hrtimer_clock_base *base = timer->base; |
1147 | struct hrtimer_cpu_base *cpu_base = base->cpu_base; | 1184 | struct hrtimer_cpu_base *cpu_base = base->cpu_base; |
@@ -1150,7 +1187,7 @@ static void __run_hrtimer(struct hrtimer *timer) | |||
1150 | 1187 | ||
1151 | WARN_ON(!irqs_disabled()); | 1188 | WARN_ON(!irqs_disabled()); |
1152 | 1189 | ||
1153 | debug_hrtimer_deactivate(timer); | 1190 | debug_deactivate(timer); |
1154 | __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0); | 1191 | __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0); |
1155 | timer_stats_account_hrtimer(timer); | 1192 | timer_stats_account_hrtimer(timer); |
1156 | fn = timer->function; | 1193 | fn = timer->function; |
@@ -1161,7 +1198,9 @@ static void __run_hrtimer(struct hrtimer *timer) | |||
1161 | * the timer base. | 1198 | * the timer base. |
1162 | */ | 1199 | */ |
1163 | spin_unlock(&cpu_base->lock); | 1200 | spin_unlock(&cpu_base->lock); |
1201 | trace_hrtimer_expire_entry(timer, now); | ||
1164 | restart = fn(timer); | 1202 | restart = fn(timer); |
1203 | trace_hrtimer_expire_exit(timer); | ||
1165 | spin_lock(&cpu_base->lock); | 1204 | spin_lock(&cpu_base->lock); |
1166 | 1205 | ||
1167 | /* | 1206 | /* |
@@ -1272,7 +1311,7 @@ void hrtimer_interrupt(struct clock_event_device *dev) | |||
1272 | break; | 1311 | break; |
1273 | } | 1312 | } |
1274 | 1313 | ||
1275 | __run_hrtimer(timer); | 1314 | __run_hrtimer(timer, &basenow); |
1276 | } | 1315 | } |
1277 | base++; | 1316 | base++; |
1278 | } | 1317 | } |
@@ -1394,7 +1433,7 @@ void hrtimer_run_queues(void) | |||
1394 | hrtimer_get_expires_tv64(timer)) | 1433 | hrtimer_get_expires_tv64(timer)) |
1395 | break; | 1434 | break; |
1396 | 1435 | ||
1397 | __run_hrtimer(timer); | 1436 | __run_hrtimer(timer, &base->softirq_time); |
1398 | } | 1437 | } |
1399 | spin_unlock(&cpu_base->lock); | 1438 | spin_unlock(&cpu_base->lock); |
1400 | } | 1439 | } |
@@ -1571,7 +1610,7 @@ static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base, | |||
1571 | while ((node = rb_first(&old_base->active))) { | 1610 | while ((node = rb_first(&old_base->active))) { |
1572 | timer = rb_entry(node, struct hrtimer, node); | 1611 | timer = rb_entry(node, struct hrtimer, node); |
1573 | BUG_ON(hrtimer_callback_running(timer)); | 1612 | BUG_ON(hrtimer_callback_running(timer)); |
1574 | debug_hrtimer_deactivate(timer); | 1613 | debug_deactivate(timer); |
1575 | 1614 | ||
1576 | /* | 1615 | /* |
1577 | * Mark it as STATE_MIGRATE not INACTIVE otherwise the | 1616 | * Mark it as STATE_MIGRATE not INACTIVE otherwise the |
diff --git a/kernel/hung_task.c b/kernel/hung_task.c index 022a4927b785..d4e841747400 100644 --- a/kernel/hung_task.c +++ b/kernel/hung_task.c | |||
@@ -171,12 +171,12 @@ static unsigned long timeout_jiffies(unsigned long timeout) | |||
171 | * Process updating of timeout sysctl | 171 | * Process updating of timeout sysctl |
172 | */ | 172 | */ |
173 | int proc_dohung_task_timeout_secs(struct ctl_table *table, int write, | 173 | int proc_dohung_task_timeout_secs(struct ctl_table *table, int write, |
174 | struct file *filp, void __user *buffer, | 174 | void __user *buffer, |
175 | size_t *lenp, loff_t *ppos) | 175 | size_t *lenp, loff_t *ppos) |
176 | { | 176 | { |
177 | int ret; | 177 | int ret; |
178 | 178 | ||
179 | ret = proc_doulongvec_minmax(table, write, filp, buffer, lenp, ppos); | 179 | ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos); |
180 | 180 | ||
181 | if (ret || !write) | 181 | if (ret || !write) |
182 | goto out; | 182 | goto out; |
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index a81cf80554db..17c71bb565c6 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c | |||
@@ -11,6 +11,7 @@ | |||
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <linux/irq.h> | 13 | #include <linux/irq.h> |
14 | #include <linux/sched.h> | ||
14 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
15 | #include <linux/module.h> | 16 | #include <linux/module.h> |
16 | #include <linux/random.h> | 17 | #include <linux/random.h> |
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c index 114e704760fe..bd7273e6282e 100644 --- a/kernel/irq/spurious.c +++ b/kernel/irq/spurious.c | |||
@@ -121,7 +121,9 @@ static void poll_all_shared_irqs(void) | |||
121 | if (!(status & IRQ_SPURIOUS_DISABLED)) | 121 | if (!(status & IRQ_SPURIOUS_DISABLED)) |
122 | continue; | 122 | continue; |
123 | 123 | ||
124 | local_irq_disable(); | ||
124 | try_one_irq(i, desc); | 125 | try_one_irq(i, desc); |
126 | local_irq_enable(); | ||
125 | } | 127 | } |
126 | } | 128 | } |
127 | 129 | ||
diff --git a/kernel/itimer.c b/kernel/itimer.c index 58762f7077ec..b03451ede528 100644 --- a/kernel/itimer.c +++ b/kernel/itimer.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/time.h> | 12 | #include <linux/time.h> |
13 | #include <linux/posix-timers.h> | 13 | #include <linux/posix-timers.h> |
14 | #include <linux/hrtimer.h> | 14 | #include <linux/hrtimer.h> |
15 | #include <trace/events/timer.h> | ||
15 | 16 | ||
16 | #include <asm/uaccess.h> | 17 | #include <asm/uaccess.h> |
17 | 18 | ||
@@ -41,10 +42,43 @@ static struct timeval itimer_get_remtime(struct hrtimer *timer) | |||
41 | return ktime_to_timeval(rem); | 42 | return ktime_to_timeval(rem); |
42 | } | 43 | } |
43 | 44 | ||
45 | static void get_cpu_itimer(struct task_struct *tsk, unsigned int clock_id, | ||
46 | struct itimerval *const value) | ||
47 | { | ||
48 | cputime_t cval, cinterval; | ||
49 | struct cpu_itimer *it = &tsk->signal->it[clock_id]; | ||
50 | |||
51 | spin_lock_irq(&tsk->sighand->siglock); | ||
52 | |||
53 | cval = it->expires; | ||
54 | cinterval = it->incr; | ||
55 | if (!cputime_eq(cval, cputime_zero)) { | ||
56 | struct task_cputime cputime; | ||
57 | cputime_t t; | ||
58 | |||
59 | thread_group_cputimer(tsk, &cputime); | ||
60 | if (clock_id == CPUCLOCK_PROF) | ||
61 | t = cputime_add(cputime.utime, cputime.stime); | ||
62 | else | ||
63 | /* CPUCLOCK_VIRT */ | ||
64 | t = cputime.utime; | ||
65 | |||
66 | if (cputime_le(cval, t)) | ||
67 | /* about to fire */ | ||
68 | cval = cputime_one_jiffy; | ||
69 | else | ||
70 | cval = cputime_sub(cval, t); | ||
71 | } | ||
72 | |||
73 | spin_unlock_irq(&tsk->sighand->siglock); | ||
74 | |||
75 | cputime_to_timeval(cval, &value->it_value); | ||
76 | cputime_to_timeval(cinterval, &value->it_interval); | ||
77 | } | ||
78 | |||
44 | int do_getitimer(int which, struct itimerval *value) | 79 | int do_getitimer(int which, struct itimerval *value) |
45 | { | 80 | { |
46 | struct task_struct *tsk = current; | 81 | struct task_struct *tsk = current; |
47 | cputime_t cinterval, cval; | ||
48 | 82 | ||
49 | switch (which) { | 83 | switch (which) { |
50 | case ITIMER_REAL: | 84 | case ITIMER_REAL: |
@@ -55,44 +89,10 @@ int do_getitimer(int which, struct itimerval *value) | |||
55 | spin_unlock_irq(&tsk->sighand->siglock); | 89 | spin_unlock_irq(&tsk->sighand->siglock); |
56 | break; | 90 | break; |
57 | case ITIMER_VIRTUAL: | 91 | case ITIMER_VIRTUAL: |
58 | spin_lock_irq(&tsk->sighand->siglock); | 92 | get_cpu_itimer(tsk, CPUCLOCK_VIRT, value); |
59 | cval = tsk->signal->it_virt_expires; | ||
60 | cinterval = tsk->signal->it_virt_incr; | ||
61 | if (!cputime_eq(cval, cputime_zero)) { | ||
62 | struct task_cputime cputime; | ||
63 | cputime_t utime; | ||
64 | |||
65 | thread_group_cputimer(tsk, &cputime); | ||
66 | utime = cputime.utime; | ||
67 | if (cputime_le(cval, utime)) { /* about to fire */ | ||
68 | cval = jiffies_to_cputime(1); | ||
69 | } else { | ||
70 | cval = cputime_sub(cval, utime); | ||
71 | } | ||
72 | } | ||
73 | spin_unlock_irq(&tsk->sighand->siglock); | ||
74 | cputime_to_timeval(cval, &value->it_value); | ||
75 | cputime_to_timeval(cinterval, &value->it_interval); | ||
76 | break; | 93 | break; |
77 | case ITIMER_PROF: | 94 | case ITIMER_PROF: |
78 | spin_lock_irq(&tsk->sighand->siglock); | 95 | get_cpu_itimer(tsk, CPUCLOCK_PROF, value); |
79 | cval = tsk->signal->it_prof_expires; | ||
80 | cinterval = tsk->signal->it_prof_incr; | ||
81 | if (!cputime_eq(cval, cputime_zero)) { | ||
82 | struct task_cputime times; | ||
83 | cputime_t ptime; | ||
84 | |||
85 | thread_group_cputimer(tsk, ×); | ||
86 | ptime = cputime_add(times.utime, times.stime); | ||
87 | if (cputime_le(cval, ptime)) { /* about to fire */ | ||
88 | cval = jiffies_to_cputime(1); | ||
89 | } else { | ||
90 | cval = cputime_sub(cval, ptime); | ||
91 | } | ||
92 | } | ||
93 | spin_unlock_irq(&tsk->sighand->siglock); | ||
94 | cputime_to_timeval(cval, &value->it_value); | ||
95 | cputime_to_timeval(cinterval, &value->it_interval); | ||
96 | break; | 96 | break; |
97 | default: | 97 | default: |
98 | return(-EINVAL); | 98 | return(-EINVAL); |
@@ -123,11 +123,62 @@ enum hrtimer_restart it_real_fn(struct hrtimer *timer) | |||
123 | struct signal_struct *sig = | 123 | struct signal_struct *sig = |
124 | container_of(timer, struct signal_struct, real_timer); | 124 | container_of(timer, struct signal_struct, real_timer); |
125 | 125 | ||
126 | trace_itimer_expire(ITIMER_REAL, sig->leader_pid, 0); | ||
126 | kill_pid_info(SIGALRM, SEND_SIG_PRIV, sig->leader_pid); | 127 | kill_pid_info(SIGALRM, SEND_SIG_PRIV, sig->leader_pid); |
127 | 128 | ||
128 | return HRTIMER_NORESTART; | 129 | return HRTIMER_NORESTART; |
129 | } | 130 | } |
130 | 131 | ||
132 | static inline u32 cputime_sub_ns(cputime_t ct, s64 real_ns) | ||
133 | { | ||
134 | struct timespec ts; | ||
135 | s64 cpu_ns; | ||
136 | |||
137 | cputime_to_timespec(ct, &ts); | ||
138 | cpu_ns = timespec_to_ns(&ts); | ||
139 | |||
140 | return (cpu_ns <= real_ns) ? 0 : cpu_ns - real_ns; | ||
141 | } | ||
142 | |||
143 | static void set_cpu_itimer(struct task_struct *tsk, unsigned int clock_id, | ||
144 | const struct itimerval *const value, | ||
145 | struct itimerval *const ovalue) | ||
146 | { | ||
147 | cputime_t cval, nval, cinterval, ninterval; | ||
148 | s64 ns_ninterval, ns_nval; | ||
149 | struct cpu_itimer *it = &tsk->signal->it[clock_id]; | ||
150 | |||
151 | nval = timeval_to_cputime(&value->it_value); | ||
152 | ns_nval = timeval_to_ns(&value->it_value); | ||
153 | ninterval = timeval_to_cputime(&value->it_interval); | ||
154 | ns_ninterval = timeval_to_ns(&value->it_interval); | ||
155 | |||
156 | it->incr_error = cputime_sub_ns(ninterval, ns_ninterval); | ||
157 | it->error = cputime_sub_ns(nval, ns_nval); | ||
158 | |||
159 | spin_lock_irq(&tsk->sighand->siglock); | ||
160 | |||
161 | cval = it->expires; | ||
162 | cinterval = it->incr; | ||
163 | if (!cputime_eq(cval, cputime_zero) || | ||
164 | !cputime_eq(nval, cputime_zero)) { | ||
165 | if (cputime_gt(nval, cputime_zero)) | ||
166 | nval = cputime_add(nval, cputime_one_jiffy); | ||
167 | set_process_cpu_timer(tsk, clock_id, &nval, &cval); | ||
168 | } | ||
169 | it->expires = nval; | ||
170 | it->incr = ninterval; | ||
171 | trace_itimer_state(clock_id == CPUCLOCK_VIRT ? | ||
172 | ITIMER_VIRTUAL : ITIMER_PROF, value, nval); | ||
173 | |||
174 | spin_unlock_irq(&tsk->sighand->siglock); | ||
175 | |||
176 | if (ovalue) { | ||
177 | cputime_to_timeval(cval, &ovalue->it_value); | ||
178 | cputime_to_timeval(cinterval, &ovalue->it_interval); | ||
179 | } | ||
180 | } | ||
181 | |||
131 | /* | 182 | /* |
132 | * Returns true if the timeval is in canonical form | 183 | * Returns true if the timeval is in canonical form |
133 | */ | 184 | */ |
@@ -139,7 +190,6 @@ int do_setitimer(int which, struct itimerval *value, struct itimerval *ovalue) | |||
139 | struct task_struct *tsk = current; | 190 | struct task_struct *tsk = current; |
140 | struct hrtimer *timer; | 191 | struct hrtimer *timer; |
141 | ktime_t expires; | 192 | ktime_t expires; |
142 | cputime_t cval, cinterval, nval, ninterval; | ||
143 | 193 | ||
144 | /* | 194 | /* |
145 | * Validate the timevals in value. | 195 | * Validate the timevals in value. |
@@ -171,51 +221,14 @@ again: | |||
171 | } else | 221 | } else |
172 | tsk->signal->it_real_incr.tv64 = 0; | 222 | tsk->signal->it_real_incr.tv64 = 0; |
173 | 223 | ||
224 | trace_itimer_state(ITIMER_REAL, value, 0); | ||
174 | spin_unlock_irq(&tsk->sighand->siglock); | 225 | spin_unlock_irq(&tsk->sighand->siglock); |
175 | break; | 226 | break; |
176 | case ITIMER_VIRTUAL: | 227 | case ITIMER_VIRTUAL: |
177 | nval = timeval_to_cputime(&value->it_value); | 228 | set_cpu_itimer(tsk, CPUCLOCK_VIRT, value, ovalue); |
178 | ninterval = timeval_to_cputime(&value->it_interval); | ||
179 | spin_lock_irq(&tsk->sighand->siglock); | ||
180 | cval = tsk->signal->it_virt_expires; | ||
181 | cinterval = tsk->signal->it_virt_incr; | ||
182 | if (!cputime_eq(cval, cputime_zero) || | ||
183 | !cputime_eq(nval, cputime_zero)) { | ||
184 | if (cputime_gt(nval, cputime_zero)) | ||
185 | nval = cputime_add(nval, | ||
186 | jiffies_to_cputime(1)); | ||
187 | set_process_cpu_timer(tsk, CPUCLOCK_VIRT, | ||
188 | &nval, &cval); | ||
189 | } | ||
190 | tsk->signal->it_virt_expires = nval; | ||
191 | tsk->signal->it_virt_incr = ninterval; | ||
192 | spin_unlock_irq(&tsk->sighand->siglock); | ||
193 | if (ovalue) { | ||
194 | cputime_to_timeval(cval, &ovalue->it_value); | ||
195 | cputime_to_timeval(cinterval, &ovalue->it_interval); | ||
196 | } | ||
197 | break; | 229 | break; |
198 | case ITIMER_PROF: | 230 | case ITIMER_PROF: |
199 | nval = timeval_to_cputime(&value->it_value); | 231 | set_cpu_itimer(tsk, CPUCLOCK_PROF, value, ovalue); |
200 | ninterval = timeval_to_cputime(&value->it_interval); | ||
201 | spin_lock_irq(&tsk->sighand->siglock); | ||
202 | cval = tsk->signal->it_prof_expires; | ||
203 | cinterval = tsk->signal->it_prof_incr; | ||
204 | if (!cputime_eq(cval, cputime_zero) || | ||
205 | !cputime_eq(nval, cputime_zero)) { | ||
206 | if (cputime_gt(nval, cputime_zero)) | ||
207 | nval = cputime_add(nval, | ||
208 | jiffies_to_cputime(1)); | ||
209 | set_process_cpu_timer(tsk, CPUCLOCK_PROF, | ||
210 | &nval, &cval); | ||
211 | } | ||
212 | tsk->signal->it_prof_expires = nval; | ||
213 | tsk->signal->it_prof_incr = ninterval; | ||
214 | spin_unlock_irq(&tsk->sighand->siglock); | ||
215 | if (ovalue) { | ||
216 | cputime_to_timeval(cval, &ovalue->it_value); | ||
217 | cputime_to_timeval(cinterval, &ovalue->it_interval); | ||
218 | } | ||
219 | break; | 232 | break; |
220 | default: | 233 | default: |
221 | return -EINVAL; | 234 | return -EINVAL; |
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c index 3a29dbe7898e..8b6b8b697c68 100644 --- a/kernel/kallsyms.c +++ b/kernel/kallsyms.c | |||
@@ -59,7 +59,8 @@ static inline int is_kernel_inittext(unsigned long addr) | |||
59 | 59 | ||
60 | static inline int is_kernel_text(unsigned long addr) | 60 | static inline int is_kernel_text(unsigned long addr) |
61 | { | 61 | { |
62 | if (addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) | 62 | if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) || |
63 | arch_is_kernel_text(addr)) | ||
63 | return 1; | 64 | return 1; |
64 | return in_gate_area_no_task(addr); | 65 | return in_gate_area_no_task(addr); |
65 | } | 66 | } |
diff --git a/kernel/kfifo.c b/kernel/kfifo.c index 26539e3228e5..3765ff3c1bbe 100644 --- a/kernel/kfifo.c +++ b/kernel/kfifo.c | |||
@@ -117,7 +117,7 @@ EXPORT_SYMBOL(kfifo_free); | |||
117 | * writer, you don't need extra locking to use these functions. | 117 | * writer, you don't need extra locking to use these functions. |
118 | */ | 118 | */ |
119 | unsigned int __kfifo_put(struct kfifo *fifo, | 119 | unsigned int __kfifo_put(struct kfifo *fifo, |
120 | unsigned char *buffer, unsigned int len) | 120 | const unsigned char *buffer, unsigned int len) |
121 | { | 121 | { |
122 | unsigned int l; | 122 | unsigned int l; |
123 | 123 | ||
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index ef177d653b2c..5240d75f4c60 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
@@ -1321,7 +1321,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v) | |||
1321 | return 0; | 1321 | return 0; |
1322 | } | 1322 | } |
1323 | 1323 | ||
1324 | static struct seq_operations kprobes_seq_ops = { | 1324 | static const struct seq_operations kprobes_seq_ops = { |
1325 | .start = kprobe_seq_start, | 1325 | .start = kprobe_seq_start, |
1326 | .next = kprobe_seq_next, | 1326 | .next = kprobe_seq_next, |
1327 | .stop = kprobe_seq_stop, | 1327 | .stop = kprobe_seq_stop, |
@@ -1333,7 +1333,7 @@ static int __kprobes kprobes_open(struct inode *inode, struct file *filp) | |||
1333 | return seq_open(filp, &kprobes_seq_ops); | 1333 | return seq_open(filp, &kprobes_seq_ops); |
1334 | } | 1334 | } |
1335 | 1335 | ||
1336 | static struct file_operations debugfs_kprobes_operations = { | 1336 | static const struct file_operations debugfs_kprobes_operations = { |
1337 | .open = kprobes_open, | 1337 | .open = kprobes_open, |
1338 | .read = seq_read, | 1338 | .read = seq_read, |
1339 | .llseek = seq_lseek, | 1339 | .llseek = seq_lseek, |
@@ -1515,7 +1515,7 @@ static ssize_t write_enabled_file_bool(struct file *file, | |||
1515 | return count; | 1515 | return count; |
1516 | } | 1516 | } |
1517 | 1517 | ||
1518 | static struct file_operations fops_kp = { | 1518 | static const struct file_operations fops_kp = { |
1519 | .read = read_enabled_file_bool, | 1519 | .read = read_enabled_file_bool, |
1520 | .write = write_enabled_file_bool, | 1520 | .write = write_enabled_file_bool, |
1521 | }; | 1521 | }; |
diff --git a/kernel/kthread.c b/kernel/kthread.c index 5fe709982caa..ab7ae57773e1 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c | |||
@@ -150,29 +150,6 @@ struct task_struct *kthread_create(int (*threadfn)(void *data), | |||
150 | EXPORT_SYMBOL(kthread_create); | 150 | EXPORT_SYMBOL(kthread_create); |
151 | 151 | ||
152 | /** | 152 | /** |
153 | * kthread_bind - bind a just-created kthread to a cpu. | ||
154 | * @k: thread created by kthread_create(). | ||
155 | * @cpu: cpu (might not be online, must be possible) for @k to run on. | ||
156 | * | ||
157 | * Description: This function is equivalent to set_cpus_allowed(), | ||
158 | * except that @cpu doesn't need to be online, and the thread must be | ||
159 | * stopped (i.e., just returned from kthread_create()). | ||
160 | */ | ||
161 | void kthread_bind(struct task_struct *k, unsigned int cpu) | ||
162 | { | ||
163 | /* Must have done schedule() in kthread() before we set_task_cpu */ | ||
164 | if (!wait_task_inactive(k, TASK_UNINTERRUPTIBLE)) { | ||
165 | WARN_ON(1); | ||
166 | return; | ||
167 | } | ||
168 | set_task_cpu(k, cpu); | ||
169 | k->cpus_allowed = cpumask_of_cpu(cpu); | ||
170 | k->rt.nr_cpus_allowed = 1; | ||
171 | k->flags |= PF_THREAD_BOUND; | ||
172 | } | ||
173 | EXPORT_SYMBOL(kthread_bind); | ||
174 | |||
175 | /** | ||
176 | * kthread_stop - stop a thread created by kthread_create(). | 153 | * kthread_stop - stop a thread created by kthread_create(). |
177 | * @k: thread created by kthread_create(). | 154 | * @k: thread created by kthread_create(). |
178 | * | 155 | * |
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index f74d2d7aa605..9af56723c096 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
@@ -142,6 +142,11 @@ static inline struct lock_class *hlock_class(struct held_lock *hlock) | |||
142 | #ifdef CONFIG_LOCK_STAT | 142 | #ifdef CONFIG_LOCK_STAT |
143 | static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats); | 143 | static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats); |
144 | 144 | ||
145 | static inline u64 lockstat_clock(void) | ||
146 | { | ||
147 | return cpu_clock(smp_processor_id()); | ||
148 | } | ||
149 | |||
145 | static int lock_point(unsigned long points[], unsigned long ip) | 150 | static int lock_point(unsigned long points[], unsigned long ip) |
146 | { | 151 | { |
147 | int i; | 152 | int i; |
@@ -158,7 +163,7 @@ static int lock_point(unsigned long points[], unsigned long ip) | |||
158 | return i; | 163 | return i; |
159 | } | 164 | } |
160 | 165 | ||
161 | static void lock_time_inc(struct lock_time *lt, s64 time) | 166 | static void lock_time_inc(struct lock_time *lt, u64 time) |
162 | { | 167 | { |
163 | if (time > lt->max) | 168 | if (time > lt->max) |
164 | lt->max = time; | 169 | lt->max = time; |
@@ -234,12 +239,12 @@ static void put_lock_stats(struct lock_class_stats *stats) | |||
234 | static void lock_release_holdtime(struct held_lock *hlock) | 239 | static void lock_release_holdtime(struct held_lock *hlock) |
235 | { | 240 | { |
236 | struct lock_class_stats *stats; | 241 | struct lock_class_stats *stats; |
237 | s64 holdtime; | 242 | u64 holdtime; |
238 | 243 | ||
239 | if (!lock_stat) | 244 | if (!lock_stat) |
240 | return; | 245 | return; |
241 | 246 | ||
242 | holdtime = sched_clock() - hlock->holdtime_stamp; | 247 | holdtime = lockstat_clock() - hlock->holdtime_stamp; |
243 | 248 | ||
244 | stats = get_lock_stats(hlock_class(hlock)); | 249 | stats = get_lock_stats(hlock_class(hlock)); |
245 | if (hlock->read) | 250 | if (hlock->read) |
@@ -578,6 +583,9 @@ static int static_obj(void *obj) | |||
578 | if ((addr >= start) && (addr < end)) | 583 | if ((addr >= start) && (addr < end)) |
579 | return 1; | 584 | return 1; |
580 | 585 | ||
586 | if (arch_is_kernel_data(addr)) | ||
587 | return 1; | ||
588 | |||
581 | #ifdef CONFIG_SMP | 589 | #ifdef CONFIG_SMP |
582 | /* | 590 | /* |
583 | * percpu var? | 591 | * percpu var? |
@@ -2789,7 +2797,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
2789 | hlock->references = references; | 2797 | hlock->references = references; |
2790 | #ifdef CONFIG_LOCK_STAT | 2798 | #ifdef CONFIG_LOCK_STAT |
2791 | hlock->waittime_stamp = 0; | 2799 | hlock->waittime_stamp = 0; |
2792 | hlock->holdtime_stamp = sched_clock(); | 2800 | hlock->holdtime_stamp = lockstat_clock(); |
2793 | #endif | 2801 | #endif |
2794 | 2802 | ||
2795 | if (check == 2 && !mark_irqflags(curr, hlock)) | 2803 | if (check == 2 && !mark_irqflags(curr, hlock)) |
@@ -3319,7 +3327,7 @@ found_it: | |||
3319 | if (hlock->instance != lock) | 3327 | if (hlock->instance != lock) |
3320 | return; | 3328 | return; |
3321 | 3329 | ||
3322 | hlock->waittime_stamp = sched_clock(); | 3330 | hlock->waittime_stamp = lockstat_clock(); |
3323 | 3331 | ||
3324 | contention_point = lock_point(hlock_class(hlock)->contention_point, ip); | 3332 | contention_point = lock_point(hlock_class(hlock)->contention_point, ip); |
3325 | contending_point = lock_point(hlock_class(hlock)->contending_point, | 3333 | contending_point = lock_point(hlock_class(hlock)->contending_point, |
@@ -3342,8 +3350,7 @@ __lock_acquired(struct lockdep_map *lock, unsigned long ip) | |||
3342 | struct held_lock *hlock, *prev_hlock; | 3350 | struct held_lock *hlock, *prev_hlock; |
3343 | struct lock_class_stats *stats; | 3351 | struct lock_class_stats *stats; |
3344 | unsigned int depth; | 3352 | unsigned int depth; |
3345 | u64 now; | 3353 | u64 now, waittime = 0; |
3346 | s64 waittime = 0; | ||
3347 | int i, cpu; | 3354 | int i, cpu; |
3348 | 3355 | ||
3349 | depth = curr->lockdep_depth; | 3356 | depth = curr->lockdep_depth; |
@@ -3371,7 +3378,7 @@ found_it: | |||
3371 | 3378 | ||
3372 | cpu = smp_processor_id(); | 3379 | cpu = smp_processor_id(); |
3373 | if (hlock->waittime_stamp) { | 3380 | if (hlock->waittime_stamp) { |
3374 | now = sched_clock(); | 3381 | now = lockstat_clock(); |
3375 | waittime = now - hlock->waittime_stamp; | 3382 | waittime = now - hlock->waittime_stamp; |
3376 | hlock->holdtime_stamp = now; | 3383 | hlock->holdtime_stamp = now; |
3377 | } | 3384 | } |
diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c index d4b3dbc79fdb..d4aba4f3584c 100644 --- a/kernel/lockdep_proc.c +++ b/kernel/lockdep_proc.c | |||
@@ -594,7 +594,7 @@ static int ls_show(struct seq_file *m, void *v) | |||
594 | return 0; | 594 | return 0; |
595 | } | 595 | } |
596 | 596 | ||
597 | static struct seq_operations lockstat_ops = { | 597 | static const struct seq_operations lockstat_ops = { |
598 | .start = ls_start, | 598 | .start = ls_start, |
599 | .next = ls_next, | 599 | .next = ls_next, |
600 | .stop = ls_stop, | 600 | .stop = ls_stop, |
diff --git a/kernel/marker.c b/kernel/marker.c deleted file mode 100644 index ea54f2647868..000000000000 --- a/kernel/marker.c +++ /dev/null | |||
@@ -1,930 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2007 Mathieu Desnoyers | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
17 | */ | ||
18 | #include <linux/module.h> | ||
19 | #include <linux/mutex.h> | ||
20 | #include <linux/types.h> | ||
21 | #include <linux/jhash.h> | ||
22 | #include <linux/list.h> | ||
23 | #include <linux/rcupdate.h> | ||
24 | #include <linux/marker.h> | ||
25 | #include <linux/err.h> | ||
26 | #include <linux/slab.h> | ||
27 | |||
28 | extern struct marker __start___markers[]; | ||
29 | extern struct marker __stop___markers[]; | ||
30 | |||
31 | /* Set to 1 to enable marker debug output */ | ||
32 | static const int marker_debug; | ||
33 | |||
34 | /* | ||
35 | * markers_mutex nests inside module_mutex. Markers mutex protects the builtin | ||
36 | * and module markers and the hash table. | ||
37 | */ | ||
38 | static DEFINE_MUTEX(markers_mutex); | ||
39 | |||
40 | /* | ||
41 | * Marker hash table, containing the active markers. | ||
42 | * Protected by module_mutex. | ||
43 | */ | ||
44 | #define MARKER_HASH_BITS 6 | ||
45 | #define MARKER_TABLE_SIZE (1 << MARKER_HASH_BITS) | ||
46 | static struct hlist_head marker_table[MARKER_TABLE_SIZE]; | ||
47 | |||
48 | /* | ||
49 | * Note about RCU : | ||
50 | * It is used to make sure every handler has finished using its private data | ||
51 | * between two consecutive operation (add or remove) on a given marker. It is | ||
52 | * also used to delay the free of multiple probes array until a quiescent state | ||
53 | * is reached. | ||
54 | * marker entries modifications are protected by the markers_mutex. | ||
55 | */ | ||
56 | struct marker_entry { | ||
57 | struct hlist_node hlist; | ||
58 | char *format; | ||
59 | /* Probe wrapper */ | ||
60 | void (*call)(const struct marker *mdata, void *call_private, ...); | ||
61 | struct marker_probe_closure single; | ||
62 | struct marker_probe_closure *multi; | ||
63 | int refcount; /* Number of times armed. 0 if disarmed. */ | ||
64 | struct rcu_head rcu; | ||
65 | void *oldptr; | ||
66 | int rcu_pending; | ||
67 | unsigned char ptype:1; | ||
68 | unsigned char format_allocated:1; | ||
69 | char name[0]; /* Contains name'\0'format'\0' */ | ||
70 | }; | ||
71 | |||
72 | /** | ||
73 | * __mark_empty_function - Empty probe callback | ||
74 | * @probe_private: probe private data | ||
75 | * @call_private: call site private data | ||
76 | * @fmt: format string | ||
77 | * @...: variable argument list | ||
78 | * | ||
79 | * Empty callback provided as a probe to the markers. By providing this to a | ||
80 | * disabled marker, we make sure the execution flow is always valid even | ||
81 | * though the function pointer change and the marker enabling are two distinct | ||
82 | * operations that modifies the execution flow of preemptible code. | ||
83 | */ | ||
84 | notrace void __mark_empty_function(void *probe_private, void *call_private, | ||
85 | const char *fmt, va_list *args) | ||
86 | { | ||
87 | } | ||
88 | EXPORT_SYMBOL_GPL(__mark_empty_function); | ||
89 | |||
90 | /* | ||
91 | * marker_probe_cb Callback that prepares the variable argument list for probes. | ||
92 | * @mdata: pointer of type struct marker | ||
93 | * @call_private: caller site private data | ||
94 | * @...: Variable argument list. | ||
95 | * | ||
96 | * Since we do not use "typical" pointer based RCU in the 1 argument case, we | ||
97 | * need to put a full smp_rmb() in this branch. This is why we do not use | ||
98 | * rcu_dereference() for the pointer read. | ||
99 | */ | ||
100 | notrace void marker_probe_cb(const struct marker *mdata, | ||
101 | void *call_private, ...) | ||
102 | { | ||
103 | va_list args; | ||
104 | char ptype; | ||
105 | |||
106 | /* | ||
107 | * rcu_read_lock_sched does two things : disabling preemption to make | ||
108 | * sure the teardown of the callbacks can be done correctly when they | ||
109 | * are in modules and they insure RCU read coherency. | ||
110 | */ | ||
111 | rcu_read_lock_sched_notrace(); | ||
112 | ptype = mdata->ptype; | ||
113 | if (likely(!ptype)) { | ||
114 | marker_probe_func *func; | ||
115 | /* Must read the ptype before ptr. They are not data dependant, | ||
116 | * so we put an explicit smp_rmb() here. */ | ||
117 | smp_rmb(); | ||
118 | func = mdata->single.func; | ||
119 | /* Must read the ptr before private data. They are not data | ||
120 | * dependant, so we put an explicit smp_rmb() here. */ | ||
121 | smp_rmb(); | ||
122 | va_start(args, call_private); | ||
123 | func(mdata->single.probe_private, call_private, mdata->format, | ||
124 | &args); | ||
125 | va_end(args); | ||
126 | } else { | ||
127 | struct marker_probe_closure *multi; | ||
128 | int i; | ||
129 | /* | ||
130 | * Read mdata->ptype before mdata->multi. | ||
131 | */ | ||
132 | smp_rmb(); | ||
133 | multi = mdata->multi; | ||
134 | /* | ||
135 | * multi points to an array, therefore accessing the array | ||
136 | * depends on reading multi. However, even in this case, | ||
137 | * we must insure that the pointer is read _before_ the array | ||
138 | * data. Same as rcu_dereference, but we need a full smp_rmb() | ||
139 | * in the fast path, so put the explicit barrier here. | ||
140 | */ | ||
141 | smp_read_barrier_depends(); | ||
142 | for (i = 0; multi[i].func; i++) { | ||
143 | va_start(args, call_private); | ||
144 | multi[i].func(multi[i].probe_private, call_private, | ||
145 | mdata->format, &args); | ||
146 | va_end(args); | ||
147 | } | ||
148 | } | ||
149 | rcu_read_unlock_sched_notrace(); | ||
150 | } | ||
151 | EXPORT_SYMBOL_GPL(marker_probe_cb); | ||
152 | |||
153 | /* | ||
154 | * marker_probe_cb Callback that does not prepare the variable argument list. | ||
155 | * @mdata: pointer of type struct marker | ||
156 | * @call_private: caller site private data | ||
157 | * @...: Variable argument list. | ||
158 | * | ||
159 | * Should be connected to markers "MARK_NOARGS". | ||
160 | */ | ||
161 | static notrace void marker_probe_cb_noarg(const struct marker *mdata, | ||
162 | void *call_private, ...) | ||
163 | { | ||
164 | va_list args; /* not initialized */ | ||
165 | char ptype; | ||
166 | |||
167 | rcu_read_lock_sched_notrace(); | ||
168 | ptype = mdata->ptype; | ||
169 | if (likely(!ptype)) { | ||
170 | marker_probe_func *func; | ||
171 | /* Must read the ptype before ptr. They are not data dependant, | ||
172 | * so we put an explicit smp_rmb() here. */ | ||
173 | smp_rmb(); | ||
174 | func = mdata->single.func; | ||
175 | /* Must read the ptr before private data. They are not data | ||
176 | * dependant, so we put an explicit smp_rmb() here. */ | ||
177 | smp_rmb(); | ||
178 | func(mdata->single.probe_private, call_private, mdata->format, | ||
179 | &args); | ||
180 | } else { | ||
181 | struct marker_probe_closure *multi; | ||
182 | int i; | ||
183 | /* | ||
184 | * Read mdata->ptype before mdata->multi. | ||
185 | */ | ||
186 | smp_rmb(); | ||
187 | multi = mdata->multi; | ||
188 | /* | ||
189 | * multi points to an array, therefore accessing the array | ||
190 | * depends on reading multi. However, even in this case, | ||
191 | * we must insure that the pointer is read _before_ the array | ||
192 | * data. Same as rcu_dereference, but we need a full smp_rmb() | ||
193 | * in the fast path, so put the explicit barrier here. | ||
194 | */ | ||
195 | smp_read_barrier_depends(); | ||
196 | for (i = 0; multi[i].func; i++) | ||
197 | multi[i].func(multi[i].probe_private, call_private, | ||
198 | mdata->format, &args); | ||
199 | } | ||
200 | rcu_read_unlock_sched_notrace(); | ||
201 | } | ||
202 | |||
203 | static void free_old_closure(struct rcu_head *head) | ||
204 | { | ||
205 | struct marker_entry *entry = container_of(head, | ||
206 | struct marker_entry, rcu); | ||
207 | kfree(entry->oldptr); | ||
208 | /* Make sure we free the data before setting the pending flag to 0 */ | ||
209 | smp_wmb(); | ||
210 | entry->rcu_pending = 0; | ||
211 | } | ||
212 | |||
213 | static void debug_print_probes(struct marker_entry *entry) | ||
214 | { | ||
215 | int i; | ||
216 | |||
217 | if (!marker_debug) | ||
218 | return; | ||
219 | |||
220 | if (!entry->ptype) { | ||
221 | printk(KERN_DEBUG "Single probe : %p %p\n", | ||
222 | entry->single.func, | ||
223 | entry->single.probe_private); | ||
224 | } else { | ||
225 | for (i = 0; entry->multi[i].func; i++) | ||
226 | printk(KERN_DEBUG "Multi probe %d : %p %p\n", i, | ||
227 | entry->multi[i].func, | ||
228 | entry->multi[i].probe_private); | ||
229 | } | ||
230 | } | ||
231 | |||
232 | static struct marker_probe_closure * | ||
233 | marker_entry_add_probe(struct marker_entry *entry, | ||
234 | marker_probe_func *probe, void *probe_private) | ||
235 | { | ||
236 | int nr_probes = 0; | ||
237 | struct marker_probe_closure *old, *new; | ||
238 | |||
239 | WARN_ON(!probe); | ||
240 | |||
241 | debug_print_probes(entry); | ||
242 | old = entry->multi; | ||
243 | if (!entry->ptype) { | ||
244 | if (entry->single.func == probe && | ||
245 | entry->single.probe_private == probe_private) | ||
246 | return ERR_PTR(-EBUSY); | ||
247 | if (entry->single.func == __mark_empty_function) { | ||
248 | /* 0 -> 1 probes */ | ||
249 | entry->single.func = probe; | ||
250 | entry->single.probe_private = probe_private; | ||
251 | entry->refcount = 1; | ||
252 | entry->ptype = 0; | ||
253 | debug_print_probes(entry); | ||
254 | return NULL; | ||
255 | } else { | ||
256 | /* 1 -> 2 probes */ | ||
257 | nr_probes = 1; | ||
258 | old = NULL; | ||
259 | } | ||
260 | } else { | ||
261 | /* (N -> N+1), (N != 0, 1) probes */ | ||
262 | for (nr_probes = 0; old[nr_probes].func; nr_probes++) | ||
263 | if (old[nr_probes].func == probe | ||
264 | && old[nr_probes].probe_private | ||
265 | == probe_private) | ||
266 | return ERR_PTR(-EBUSY); | ||
267 | } | ||
268 | /* + 2 : one for new probe, one for NULL func */ | ||
269 | new = kzalloc((nr_probes + 2) * sizeof(struct marker_probe_closure), | ||
270 | GFP_KERNEL); | ||
271 | if (new == NULL) | ||
272 | return ERR_PTR(-ENOMEM); | ||
273 | if (!old) | ||
274 | new[0] = entry->single; | ||
275 | else | ||
276 | memcpy(new, old, | ||
277 | nr_probes * sizeof(struct marker_probe_closure)); | ||
278 | new[nr_probes].func = probe; | ||
279 | new[nr_probes].probe_private = probe_private; | ||
280 | entry->refcount = nr_probes + 1; | ||
281 | entry->multi = new; | ||
282 | entry->ptype = 1; | ||
283 | debug_print_probes(entry); | ||
284 | return old; | ||
285 | } | ||
286 | |||
287 | static struct marker_probe_closure * | ||
288 | marker_entry_remove_probe(struct marker_entry *entry, | ||
289 | marker_probe_func *probe, void *probe_private) | ||
290 | { | ||
291 | int nr_probes = 0, nr_del = 0, i; | ||
292 | struct marker_probe_closure *old, *new; | ||
293 | |||
294 | old = entry->multi; | ||
295 | |||
296 | debug_print_probes(entry); | ||
297 | if (!entry->ptype) { | ||
298 | /* 0 -> N is an error */ | ||
299 | WARN_ON(entry->single.func == __mark_empty_function); | ||
300 | /* 1 -> 0 probes */ | ||
301 | WARN_ON(probe && entry->single.func != probe); | ||
302 | WARN_ON(entry->single.probe_private != probe_private); | ||
303 | entry->single.func = __mark_empty_function; | ||
304 | entry->refcount = 0; | ||
305 | entry->ptype = 0; | ||
306 | debug_print_probes(entry); | ||
307 | return NULL; | ||
308 | } else { | ||
309 | /* (N -> M), (N > 1, M >= 0) probes */ | ||
310 | for (nr_probes = 0; old[nr_probes].func; nr_probes++) { | ||
311 | if ((!probe || old[nr_probes].func == probe) | ||
312 | && old[nr_probes].probe_private | ||
313 | == probe_private) | ||
314 | nr_del++; | ||
315 | } | ||
316 | } | ||
317 | |||
318 | if (nr_probes - nr_del == 0) { | ||
319 | /* N -> 0, (N > 1) */ | ||
320 | entry->single.func = __mark_empty_function; | ||
321 | entry->refcount = 0; | ||
322 | entry->ptype = 0; | ||
323 | } else if (nr_probes - nr_del == 1) { | ||
324 | /* N -> 1, (N > 1) */ | ||
325 | for (i = 0; old[i].func; i++) | ||
326 | if ((probe && old[i].func != probe) || | ||
327 | old[i].probe_private != probe_private) | ||
328 | entry->single = old[i]; | ||
329 | entry->refcount = 1; | ||
330 | entry->ptype = 0; | ||
331 | } else { | ||
332 | int j = 0; | ||
333 | /* N -> M, (N > 1, M > 1) */ | ||
334 | /* + 1 for NULL */ | ||
335 | new = kzalloc((nr_probes - nr_del + 1) | ||
336 | * sizeof(struct marker_probe_closure), GFP_KERNEL); | ||
337 | if (new == NULL) | ||
338 | return ERR_PTR(-ENOMEM); | ||
339 | for (i = 0; old[i].func; i++) | ||
340 | if ((probe && old[i].func != probe) || | ||
341 | old[i].probe_private != probe_private) | ||
342 | new[j++] = old[i]; | ||
343 | entry->refcount = nr_probes - nr_del; | ||
344 | entry->ptype = 1; | ||
345 | entry->multi = new; | ||
346 | } | ||
347 | debug_print_probes(entry); | ||
348 | return old; | ||
349 | } | ||
350 | |||
351 | /* | ||
352 | * Get marker if the marker is present in the marker hash table. | ||
353 | * Must be called with markers_mutex held. | ||
354 | * Returns NULL if not present. | ||
355 | */ | ||
356 | static struct marker_entry *get_marker(const char *name) | ||
357 | { | ||
358 | struct hlist_head *head; | ||
359 | struct hlist_node *node; | ||
360 | struct marker_entry *e; | ||
361 | u32 hash = jhash(name, strlen(name), 0); | ||
362 | |||
363 | head = &marker_table[hash & ((1 << MARKER_HASH_BITS)-1)]; | ||
364 | hlist_for_each_entry(e, node, head, hlist) { | ||
365 | if (!strcmp(name, e->name)) | ||
366 | return e; | ||
367 | } | ||
368 | return NULL; | ||
369 | } | ||
370 | |||
371 | /* | ||
372 | * Add the marker to the marker hash table. Must be called with markers_mutex | ||
373 | * held. | ||
374 | */ | ||
375 | static struct marker_entry *add_marker(const char *name, const char *format) | ||
376 | { | ||
377 | struct hlist_head *head; | ||
378 | struct hlist_node *node; | ||
379 | struct marker_entry *e; | ||
380 | size_t name_len = strlen(name) + 1; | ||
381 | size_t format_len = 0; | ||
382 | u32 hash = jhash(name, name_len-1, 0); | ||
383 | |||
384 | if (format) | ||
385 | format_len = strlen(format) + 1; | ||
386 | head = &marker_table[hash & ((1 << MARKER_HASH_BITS)-1)]; | ||
387 | hlist_for_each_entry(e, node, head, hlist) { | ||
388 | if (!strcmp(name, e->name)) { | ||
389 | printk(KERN_NOTICE | ||
390 | "Marker %s busy\n", name); | ||
391 | return ERR_PTR(-EBUSY); /* Already there */ | ||
392 | } | ||
393 | } | ||
394 | /* | ||
395 | * Using kmalloc here to allocate a variable length element. Could | ||
396 | * cause some memory fragmentation if overused. | ||
397 | */ | ||
398 | e = kmalloc(sizeof(struct marker_entry) + name_len + format_len, | ||
399 | GFP_KERNEL); | ||
400 | if (!e) | ||
401 | return ERR_PTR(-ENOMEM); | ||
402 | memcpy(&e->name[0], name, name_len); | ||
403 | if (format) { | ||
404 | e->format = &e->name[name_len]; | ||
405 | memcpy(e->format, format, format_len); | ||
406 | if (strcmp(e->format, MARK_NOARGS) == 0) | ||
407 | e->call = marker_probe_cb_noarg; | ||
408 | else | ||
409 | e->call = marker_probe_cb; | ||
410 | trace_mark(core_marker_format, "name %s format %s", | ||
411 | e->name, e->format); | ||
412 | } else { | ||
413 | e->format = NULL; | ||
414 | e->call = marker_probe_cb; | ||
415 | } | ||
416 | e->single.func = __mark_empty_function; | ||
417 | e->single.probe_private = NULL; | ||
418 | e->multi = NULL; | ||
419 | e->ptype = 0; | ||
420 | e->format_allocated = 0; | ||
421 | e->refcount = 0; | ||
422 | e->rcu_pending = 0; | ||
423 | hlist_add_head(&e->hlist, head); | ||
424 | return e; | ||
425 | } | ||
426 | |||
427 | /* | ||
428 | * Remove the marker from the marker hash table. Must be called with mutex_lock | ||
429 | * held. | ||
430 | */ | ||
431 | static int remove_marker(const char *name) | ||
432 | { | ||
433 | struct hlist_head *head; | ||
434 | struct hlist_node *node; | ||
435 | struct marker_entry *e; | ||
436 | int found = 0; | ||
437 | size_t len = strlen(name) + 1; | ||
438 | u32 hash = jhash(name, len-1, 0); | ||
439 | |||
440 | head = &marker_table[hash & ((1 << MARKER_HASH_BITS)-1)]; | ||
441 | hlist_for_each_entry(e, node, head, hlist) { | ||
442 | if (!strcmp(name, e->name)) { | ||
443 | found = 1; | ||
444 | break; | ||
445 | } | ||
446 | } | ||
447 | if (!found) | ||
448 | return -ENOENT; | ||
449 | if (e->single.func != __mark_empty_function) | ||
450 | return -EBUSY; | ||
451 | hlist_del(&e->hlist); | ||
452 | if (e->format_allocated) | ||
453 | kfree(e->format); | ||
454 | /* Make sure the call_rcu has been executed */ | ||
455 | if (e->rcu_pending) | ||
456 | rcu_barrier_sched(); | ||
457 | kfree(e); | ||
458 | return 0; | ||
459 | } | ||
460 | |||
461 | /* | ||
462 | * Set the mark_entry format to the format found in the element. | ||
463 | */ | ||
464 | static int marker_set_format(struct marker_entry *entry, const char *format) | ||
465 | { | ||
466 | entry->format = kstrdup(format, GFP_KERNEL); | ||
467 | if (!entry->format) | ||
468 | return -ENOMEM; | ||
469 | entry->format_allocated = 1; | ||
470 | |||
471 | trace_mark(core_marker_format, "name %s format %s", | ||
472 | entry->name, entry->format); | ||
473 | return 0; | ||
474 | } | ||
475 | |||
476 | /* | ||
477 | * Sets the probe callback corresponding to one marker. | ||
478 | */ | ||
479 | static int set_marker(struct marker_entry *entry, struct marker *elem, | ||
480 | int active) | ||
481 | { | ||
482 | int ret = 0; | ||
483 | WARN_ON(strcmp(entry->name, elem->name) != 0); | ||
484 | |||
485 | if (entry->format) { | ||
486 | if (strcmp(entry->format, elem->format) != 0) { | ||
487 | printk(KERN_NOTICE | ||
488 | "Format mismatch for probe %s " | ||
489 | "(%s), marker (%s)\n", | ||
490 | entry->name, | ||
491 | entry->format, | ||
492 | elem->format); | ||
493 | return -EPERM; | ||
494 | } | ||
495 | } else { | ||
496 | ret = marker_set_format(entry, elem->format); | ||
497 | if (ret) | ||
498 | return ret; | ||
499 | } | ||
500 | |||
501 | /* | ||
502 | * probe_cb setup (statically known) is done here. It is | ||
503 | * asynchronous with the rest of execution, therefore we only | ||
504 | * pass from a "safe" callback (with argument) to an "unsafe" | ||
505 | * callback (does not set arguments). | ||
506 | */ | ||
507 | elem->call = entry->call; | ||
508 | /* | ||
509 | * Sanity check : | ||
510 | * We only update the single probe private data when the ptr is | ||
511 | * set to a _non_ single probe! (0 -> 1 and N -> 1, N != 1) | ||
512 | */ | ||
513 | WARN_ON(elem->single.func != __mark_empty_function | ||
514 | && elem->single.probe_private != entry->single.probe_private | ||
515 | && !elem->ptype); | ||
516 | elem->single.probe_private = entry->single.probe_private; | ||
517 | /* | ||
518 | * Make sure the private data is valid when we update the | ||
519 | * single probe ptr. | ||
520 | */ | ||
521 | smp_wmb(); | ||
522 | elem->single.func = entry->single.func; | ||
523 | /* | ||
524 | * We also make sure that the new probe callbacks array is consistent | ||
525 | * before setting a pointer to it. | ||
526 | */ | ||
527 | rcu_assign_pointer(elem->multi, entry->multi); | ||
528 | /* | ||
529 | * Update the function or multi probe array pointer before setting the | ||
530 | * ptype. | ||
531 | */ | ||
532 | smp_wmb(); | ||
533 | elem->ptype = entry->ptype; | ||
534 | |||
535 | if (elem->tp_name && (active ^ elem->state)) { | ||
536 | WARN_ON(!elem->tp_cb); | ||
537 | /* | ||
538 | * It is ok to directly call the probe registration because type | ||
539 | * checking has been done in the __trace_mark_tp() macro. | ||
540 | */ | ||
541 | |||
542 | if (active) { | ||
543 | /* | ||
544 | * try_module_get should always succeed because we hold | ||
545 | * lock_module() to get the tp_cb address. | ||
546 | */ | ||
547 | ret = try_module_get(__module_text_address( | ||
548 | (unsigned long)elem->tp_cb)); | ||
549 | BUG_ON(!ret); | ||
550 | ret = tracepoint_probe_register_noupdate( | ||
551 | elem->tp_name, | ||
552 | elem->tp_cb); | ||
553 | } else { | ||
554 | ret = tracepoint_probe_unregister_noupdate( | ||
555 | elem->tp_name, | ||
556 | elem->tp_cb); | ||
557 | /* | ||
558 | * tracepoint_probe_update_all() must be called | ||
559 | * before the module containing tp_cb is unloaded. | ||
560 | */ | ||
561 | module_put(__module_text_address( | ||
562 | (unsigned long)elem->tp_cb)); | ||
563 | } | ||
564 | } | ||
565 | elem->state = active; | ||
566 | |||
567 | return ret; | ||
568 | } | ||
569 | |||
570 | /* | ||
571 | * Disable a marker and its probe callback. | ||
572 | * Note: only waiting an RCU period after setting elem->call to the empty | ||
573 | * function insures that the original callback is not used anymore. This insured | ||
574 | * by rcu_read_lock_sched around the call site. | ||
575 | */ | ||
576 | static void disable_marker(struct marker *elem) | ||
577 | { | ||
578 | int ret; | ||
579 | |||
580 | /* leave "call" as is. It is known statically. */ | ||
581 | if (elem->tp_name && elem->state) { | ||
582 | WARN_ON(!elem->tp_cb); | ||
583 | /* | ||
584 | * It is ok to directly call the probe registration because type | ||
585 | * checking has been done in the __trace_mark_tp() macro. | ||
586 | */ | ||
587 | ret = tracepoint_probe_unregister_noupdate(elem->tp_name, | ||
588 | elem->tp_cb); | ||
589 | WARN_ON(ret); | ||
590 | /* | ||
591 | * tracepoint_probe_update_all() must be called | ||
592 | * before the module containing tp_cb is unloaded. | ||
593 | */ | ||
594 | module_put(__module_text_address((unsigned long)elem->tp_cb)); | ||
595 | } | ||
596 | elem->state = 0; | ||
597 | elem->single.func = __mark_empty_function; | ||
598 | /* Update the function before setting the ptype */ | ||
599 | smp_wmb(); | ||
600 | elem->ptype = 0; /* single probe */ | ||
601 | /* | ||
602 | * Leave the private data and id there, because removal is racy and | ||
603 | * should be done only after an RCU period. These are never used until | ||
604 | * the next initialization anyway. | ||
605 | */ | ||
606 | } | ||
607 | |||
608 | /** | ||
609 | * marker_update_probe_range - Update a probe range | ||
610 | * @begin: beginning of the range | ||
611 | * @end: end of the range | ||
612 | * | ||
613 | * Updates the probe callback corresponding to a range of markers. | ||
614 | */ | ||
615 | void marker_update_probe_range(struct marker *begin, | ||
616 | struct marker *end) | ||
617 | { | ||
618 | struct marker *iter; | ||
619 | struct marker_entry *mark_entry; | ||
620 | |||
621 | mutex_lock(&markers_mutex); | ||
622 | for (iter = begin; iter < end; iter++) { | ||
623 | mark_entry = get_marker(iter->name); | ||
624 | if (mark_entry) { | ||
625 | set_marker(mark_entry, iter, !!mark_entry->refcount); | ||
626 | /* | ||
627 | * ignore error, continue | ||
628 | */ | ||
629 | } else { | ||
630 | disable_marker(iter); | ||
631 | } | ||
632 | } | ||
633 | mutex_unlock(&markers_mutex); | ||
634 | } | ||
635 | |||
636 | /* | ||
637 | * Update probes, removing the faulty probes. | ||
638 | * | ||
639 | * Internal callback only changed before the first probe is connected to it. | ||
640 | * Single probe private data can only be changed on 0 -> 1 and 2 -> 1 | ||
641 | * transitions. All other transitions will leave the old private data valid. | ||
642 | * This makes the non-atomicity of the callback/private data updates valid. | ||
643 | * | ||
644 | * "special case" updates : | ||
645 | * 0 -> 1 callback | ||
646 | * 1 -> 0 callback | ||
647 | * 1 -> 2 callbacks | ||
648 | * 2 -> 1 callbacks | ||
649 | * Other updates all behave the same, just like the 2 -> 3 or 3 -> 2 updates. | ||
650 | * Site effect : marker_set_format may delete the marker entry (creating a | ||
651 | * replacement). | ||
652 | */ | ||
653 | static void marker_update_probes(void) | ||
654 | { | ||
655 | /* Core kernel markers */ | ||
656 | marker_update_probe_range(__start___markers, __stop___markers); | ||
657 | /* Markers in modules. */ | ||
658 | module_update_markers(); | ||
659 | tracepoint_probe_update_all(); | ||
660 | } | ||
661 | |||
662 | /** | ||
663 | * marker_probe_register - Connect a probe to a marker | ||
664 | * @name: marker name | ||
665 | * @format: format string | ||
666 | * @probe: probe handler | ||
667 | * @probe_private: probe private data | ||
668 | * | ||
669 | * private data must be a valid allocated memory address, or NULL. | ||
670 | * Returns 0 if ok, error value on error. | ||
671 | * The probe address must at least be aligned on the architecture pointer size. | ||
672 | */ | ||
673 | int marker_probe_register(const char *name, const char *format, | ||
674 | marker_probe_func *probe, void *probe_private) | ||
675 | { | ||
676 | struct marker_entry *entry; | ||
677 | int ret = 0; | ||
678 | struct marker_probe_closure *old; | ||
679 | |||
680 | mutex_lock(&markers_mutex); | ||
681 | entry = get_marker(name); | ||
682 | if (!entry) { | ||
683 | entry = add_marker(name, format); | ||
684 | if (IS_ERR(entry)) | ||
685 | ret = PTR_ERR(entry); | ||
686 | } else if (format) { | ||
687 | if (!entry->format) | ||
688 | ret = marker_set_format(entry, format); | ||
689 | else if (strcmp(entry->format, format)) | ||
690 | ret = -EPERM; | ||
691 | } | ||
692 | if (ret) | ||
693 | goto end; | ||
694 | |||
695 | /* | ||
696 | * If we detect that a call_rcu is pending for this marker, | ||
697 | * make sure it's executed now. | ||
698 | */ | ||
699 | if (entry->rcu_pending) | ||
700 | rcu_barrier_sched(); | ||
701 | old = marker_entry_add_probe(entry, probe, probe_private); | ||
702 | if (IS_ERR(old)) { | ||
703 | ret = PTR_ERR(old); | ||
704 | goto end; | ||
705 | } | ||
706 | mutex_unlock(&markers_mutex); | ||
707 | marker_update_probes(); | ||
708 | mutex_lock(&markers_mutex); | ||
709 | entry = get_marker(name); | ||
710 | if (!entry) | ||
711 | goto end; | ||
712 | if (entry->rcu_pending) | ||
713 | rcu_barrier_sched(); | ||
714 | entry->oldptr = old; | ||
715 | entry->rcu_pending = 1; | ||
716 | /* write rcu_pending before calling the RCU callback */ | ||
717 | smp_wmb(); | ||
718 | call_rcu_sched(&entry->rcu, free_old_closure); | ||
719 | end: | ||
720 | mutex_unlock(&markers_mutex); | ||
721 | return ret; | ||
722 | } | ||
723 | EXPORT_SYMBOL_GPL(marker_probe_register); | ||
724 | |||
725 | /** | ||
726 | * marker_probe_unregister - Disconnect a probe from a marker | ||
727 | * @name: marker name | ||
728 | * @probe: probe function pointer | ||
729 | * @probe_private: probe private data | ||
730 | * | ||
731 | * Returns the private data given to marker_probe_register, or an ERR_PTR(). | ||
732 | * We do not need to call a synchronize_sched to make sure the probes have | ||
733 | * finished running before doing a module unload, because the module unload | ||
734 | * itself uses stop_machine(), which insures that every preempt disabled section | ||
735 | * have finished. | ||
736 | */ | ||
737 | int marker_probe_unregister(const char *name, | ||
738 | marker_probe_func *probe, void *probe_private) | ||
739 | { | ||
740 | struct marker_entry *entry; | ||
741 | struct marker_probe_closure *old; | ||
742 | int ret = -ENOENT; | ||
743 | |||
744 | mutex_lock(&markers_mutex); | ||
745 | entry = get_marker(name); | ||
746 | if (!entry) | ||
747 | goto end; | ||
748 | if (entry->rcu_pending) | ||
749 | rcu_barrier_sched(); | ||
750 | old = marker_entry_remove_probe(entry, probe, probe_private); | ||
751 | mutex_unlock(&markers_mutex); | ||
752 | marker_update_probes(); | ||
753 | mutex_lock(&markers_mutex); | ||
754 | entry = get_marker(name); | ||
755 | if (!entry) | ||
756 | goto end; | ||
757 | if (entry->rcu_pending) | ||
758 | rcu_barrier_sched(); | ||
759 | entry->oldptr = old; | ||
760 | entry->rcu_pending = 1; | ||
761 | /* write rcu_pending before calling the RCU callback */ | ||
762 | smp_wmb(); | ||
763 | call_rcu_sched(&entry->rcu, free_old_closure); | ||
764 | remove_marker(name); /* Ignore busy error message */ | ||
765 | ret = 0; | ||
766 | end: | ||
767 | mutex_unlock(&markers_mutex); | ||
768 | return ret; | ||
769 | } | ||
770 | EXPORT_SYMBOL_GPL(marker_probe_unregister); | ||
771 | |||
772 | static struct marker_entry * | ||
773 | get_marker_from_private_data(marker_probe_func *probe, void *probe_private) | ||
774 | { | ||
775 | struct marker_entry *entry; | ||
776 | unsigned int i; | ||
777 | struct hlist_head *head; | ||
778 | struct hlist_node *node; | ||
779 | |||
780 | for (i = 0; i < MARKER_TABLE_SIZE; i++) { | ||
781 | head = &marker_table[i]; | ||
782 | hlist_for_each_entry(entry, node, head, hlist) { | ||
783 | if (!entry->ptype) { | ||
784 | if (entry->single.func == probe | ||
785 | && entry->single.probe_private | ||
786 | == probe_private) | ||
787 | return entry; | ||
788 | } else { | ||
789 | struct marker_probe_closure *closure; | ||
790 | closure = entry->multi; | ||
791 | for (i = 0; closure[i].func; i++) { | ||
792 | if (closure[i].func == probe && | ||
793 | closure[i].probe_private | ||
794 | == probe_private) | ||
795 | return entry; | ||
796 | } | ||
797 | } | ||
798 | } | ||
799 | } | ||
800 | return NULL; | ||
801 | } | ||
802 | |||
803 | /** | ||
804 | * marker_probe_unregister_private_data - Disconnect a probe from a marker | ||
805 | * @probe: probe function | ||
806 | * @probe_private: probe private data | ||
807 | * | ||
808 | * Unregister a probe by providing the registered private data. | ||
809 | * Only removes the first marker found in hash table. | ||
810 | * Return 0 on success or error value. | ||
811 | * We do not need to call a synchronize_sched to make sure the probes have | ||
812 | * finished running before doing a module unload, because the module unload | ||
813 | * itself uses stop_machine(), which insures that every preempt disabled section | ||
814 | * have finished. | ||
815 | */ | ||
816 | int marker_probe_unregister_private_data(marker_probe_func *probe, | ||
817 | void *probe_private) | ||
818 | { | ||
819 | struct marker_entry *entry; | ||
820 | int ret = 0; | ||
821 | struct marker_probe_closure *old; | ||
822 | |||
823 | mutex_lock(&markers_mutex); | ||
824 | entry = get_marker_from_private_data(probe, probe_private); | ||
825 | if (!entry) { | ||
826 | ret = -ENOENT; | ||
827 | goto end; | ||
828 | } | ||
829 | if (entry->rcu_pending) | ||
830 | rcu_barrier_sched(); | ||
831 | old = marker_entry_remove_probe(entry, NULL, probe_private); | ||
832 | mutex_unlock(&markers_mutex); | ||
833 | marker_update_probes(); | ||
834 | mutex_lock(&markers_mutex); | ||
835 | entry = get_marker_from_private_data(probe, probe_private); | ||
836 | if (!entry) | ||
837 | goto end; | ||
838 | if (entry->rcu_pending) | ||
839 | rcu_barrier_sched(); | ||
840 | entry->oldptr = old; | ||
841 | entry->rcu_pending = 1; | ||
842 | /* write rcu_pending before calling the RCU callback */ | ||
843 | smp_wmb(); | ||
844 | call_rcu_sched(&entry->rcu, free_old_closure); | ||
845 | remove_marker(entry->name); /* Ignore busy error message */ | ||
846 | end: | ||
847 | mutex_unlock(&markers_mutex); | ||
848 | return ret; | ||
849 | } | ||
850 | EXPORT_SYMBOL_GPL(marker_probe_unregister_private_data); | ||
851 | |||
852 | /** | ||
853 | * marker_get_private_data - Get a marker's probe private data | ||
854 | * @name: marker name | ||
855 | * @probe: probe to match | ||
856 | * @num: get the nth matching probe's private data | ||
857 | * | ||
858 | * Returns the nth private data pointer (starting from 0) matching, or an | ||
859 | * ERR_PTR. | ||
860 | * Returns the private data pointer, or an ERR_PTR. | ||
861 | * The private data pointer should _only_ be dereferenced if the caller is the | ||
862 | * owner of the data, or its content could vanish. This is mostly used to | ||
863 | * confirm that a caller is the owner of a registered probe. | ||
864 | */ | ||
865 | void *marker_get_private_data(const char *name, marker_probe_func *probe, | ||
866 | int num) | ||
867 | { | ||
868 | struct hlist_head *head; | ||
869 | struct hlist_node *node; | ||
870 | struct marker_entry *e; | ||
871 | size_t name_len = strlen(name) + 1; | ||
872 | u32 hash = jhash(name, name_len-1, 0); | ||
873 | int i; | ||
874 | |||
875 | head = &marker_table[hash & ((1 << MARKER_HASH_BITS)-1)]; | ||
876 | hlist_for_each_entry(e, node, head, hlist) { | ||
877 | if (!strcmp(name, e->name)) { | ||
878 | if (!e->ptype) { | ||
879 | if (num == 0 && e->single.func == probe) | ||
880 | return e->single.probe_private; | ||
881 | } else { | ||
882 | struct marker_probe_closure *closure; | ||
883 | int match = 0; | ||
884 | closure = e->multi; | ||
885 | for (i = 0; closure[i].func; i++) { | ||
886 | if (closure[i].func != probe) | ||
887 | continue; | ||
888 | if (match++ == num) | ||
889 | return closure[i].probe_private; | ||
890 | } | ||
891 | } | ||
892 | break; | ||
893 | } | ||
894 | } | ||
895 | return ERR_PTR(-ENOENT); | ||
896 | } | ||
897 | EXPORT_SYMBOL_GPL(marker_get_private_data); | ||
898 | |||
899 | #ifdef CONFIG_MODULES | ||
900 | |||
901 | int marker_module_notify(struct notifier_block *self, | ||
902 | unsigned long val, void *data) | ||
903 | { | ||
904 | struct module *mod = data; | ||
905 | |||
906 | switch (val) { | ||
907 | case MODULE_STATE_COMING: | ||
908 | marker_update_probe_range(mod->markers, | ||
909 | mod->markers + mod->num_markers); | ||
910 | break; | ||
911 | case MODULE_STATE_GOING: | ||
912 | marker_update_probe_range(mod->markers, | ||
913 | mod->markers + mod->num_markers); | ||
914 | break; | ||
915 | } | ||
916 | return 0; | ||
917 | } | ||
918 | |||
919 | struct notifier_block marker_module_nb = { | ||
920 | .notifier_call = marker_module_notify, | ||
921 | .priority = 0, | ||
922 | }; | ||
923 | |||
924 | static int init_markers(void) | ||
925 | { | ||
926 | return register_module_notifier(&marker_module_nb); | ||
927 | } | ||
928 | __initcall(init_markers); | ||
929 | |||
930 | #endif /* CONFIG_MODULES */ | ||
diff --git a/kernel/module.c b/kernel/module.c index 05ce49ced8f6..8b7d8805819d 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
@@ -47,6 +47,7 @@ | |||
47 | #include <linux/rculist.h> | 47 | #include <linux/rculist.h> |
48 | #include <asm/uaccess.h> | 48 | #include <asm/uaccess.h> |
49 | #include <asm/cacheflush.h> | 49 | #include <asm/cacheflush.h> |
50 | #include <asm/mmu_context.h> | ||
50 | #include <linux/license.h> | 51 | #include <linux/license.h> |
51 | #include <asm/sections.h> | 52 | #include <asm/sections.h> |
52 | #include <linux/tracepoint.h> | 53 | #include <linux/tracepoint.h> |
@@ -1535,6 +1536,10 @@ static void free_module(struct module *mod) | |||
1535 | 1536 | ||
1536 | /* Finally, free the core (containing the module structure) */ | 1537 | /* Finally, free the core (containing the module structure) */ |
1537 | module_free(mod, mod->module_core); | 1538 | module_free(mod, mod->module_core); |
1539 | |||
1540 | #ifdef CONFIG_MPU | ||
1541 | update_protections(current->mm); | ||
1542 | #endif | ||
1538 | } | 1543 | } |
1539 | 1544 | ||
1540 | void *__symbol_get(const char *symbol) | 1545 | void *__symbol_get(const char *symbol) |
@@ -1792,6 +1797,17 @@ static void setup_modinfo(struct module *mod, Elf_Shdr *sechdrs, | |||
1792 | } | 1797 | } |
1793 | } | 1798 | } |
1794 | 1799 | ||
1800 | static void free_modinfo(struct module *mod) | ||
1801 | { | ||
1802 | struct module_attribute *attr; | ||
1803 | int i; | ||
1804 | |||
1805 | for (i = 0; (attr = modinfo_attrs[i]); i++) { | ||
1806 | if (attr->free) | ||
1807 | attr->free(mod); | ||
1808 | } | ||
1809 | } | ||
1810 | |||
1795 | #ifdef CONFIG_KALLSYMS | 1811 | #ifdef CONFIG_KALLSYMS |
1796 | 1812 | ||
1797 | /* lookup symbol in given range of kernel_symbols */ | 1813 | /* lookup symbol in given range of kernel_symbols */ |
@@ -1857,13 +1873,93 @@ static char elf_type(const Elf_Sym *sym, | |||
1857 | return '?'; | 1873 | return '?'; |
1858 | } | 1874 | } |
1859 | 1875 | ||
1876 | static bool is_core_symbol(const Elf_Sym *src, const Elf_Shdr *sechdrs, | ||
1877 | unsigned int shnum) | ||
1878 | { | ||
1879 | const Elf_Shdr *sec; | ||
1880 | |||
1881 | if (src->st_shndx == SHN_UNDEF | ||
1882 | || src->st_shndx >= shnum | ||
1883 | || !src->st_name) | ||
1884 | return false; | ||
1885 | |||
1886 | sec = sechdrs + src->st_shndx; | ||
1887 | if (!(sec->sh_flags & SHF_ALLOC) | ||
1888 | #ifndef CONFIG_KALLSYMS_ALL | ||
1889 | || !(sec->sh_flags & SHF_EXECINSTR) | ||
1890 | #endif | ||
1891 | || (sec->sh_entsize & INIT_OFFSET_MASK)) | ||
1892 | return false; | ||
1893 | |||
1894 | return true; | ||
1895 | } | ||
1896 | |||
1897 | static unsigned long layout_symtab(struct module *mod, | ||
1898 | Elf_Shdr *sechdrs, | ||
1899 | unsigned int symindex, | ||
1900 | unsigned int strindex, | ||
1901 | const Elf_Ehdr *hdr, | ||
1902 | const char *secstrings, | ||
1903 | unsigned long *pstroffs, | ||
1904 | unsigned long *strmap) | ||
1905 | { | ||
1906 | unsigned long symoffs; | ||
1907 | Elf_Shdr *symsect = sechdrs + symindex; | ||
1908 | Elf_Shdr *strsect = sechdrs + strindex; | ||
1909 | const Elf_Sym *src; | ||
1910 | const char *strtab; | ||
1911 | unsigned int i, nsrc, ndst; | ||
1912 | |||
1913 | /* Put symbol section at end of init part of module. */ | ||
1914 | symsect->sh_flags |= SHF_ALLOC; | ||
1915 | symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect, | ||
1916 | symindex) | INIT_OFFSET_MASK; | ||
1917 | DEBUGP("\t%s\n", secstrings + symsect->sh_name); | ||
1918 | |||
1919 | src = (void *)hdr + symsect->sh_offset; | ||
1920 | nsrc = symsect->sh_size / sizeof(*src); | ||
1921 | strtab = (void *)hdr + strsect->sh_offset; | ||
1922 | for (ndst = i = 1; i < nsrc; ++i, ++src) | ||
1923 | if (is_core_symbol(src, sechdrs, hdr->e_shnum)) { | ||
1924 | unsigned int j = src->st_name; | ||
1925 | |||
1926 | while(!__test_and_set_bit(j, strmap) && strtab[j]) | ||
1927 | ++j; | ||
1928 | ++ndst; | ||
1929 | } | ||
1930 | |||
1931 | /* Append room for core symbols at end of core part. */ | ||
1932 | symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1); | ||
1933 | mod->core_size = symoffs + ndst * sizeof(Elf_Sym); | ||
1934 | |||
1935 | /* Put string table section at end of init part of module. */ | ||
1936 | strsect->sh_flags |= SHF_ALLOC; | ||
1937 | strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect, | ||
1938 | strindex) | INIT_OFFSET_MASK; | ||
1939 | DEBUGP("\t%s\n", secstrings + strsect->sh_name); | ||
1940 | |||
1941 | /* Append room for core symbols' strings at end of core part. */ | ||
1942 | *pstroffs = mod->core_size; | ||
1943 | __set_bit(0, strmap); | ||
1944 | mod->core_size += bitmap_weight(strmap, strsect->sh_size); | ||
1945 | |||
1946 | return symoffs; | ||
1947 | } | ||
1948 | |||
1860 | static void add_kallsyms(struct module *mod, | 1949 | static void add_kallsyms(struct module *mod, |
1861 | Elf_Shdr *sechdrs, | 1950 | Elf_Shdr *sechdrs, |
1951 | unsigned int shnum, | ||
1862 | unsigned int symindex, | 1952 | unsigned int symindex, |
1863 | unsigned int strindex, | 1953 | unsigned int strindex, |
1864 | const char *secstrings) | 1954 | unsigned long symoffs, |
1955 | unsigned long stroffs, | ||
1956 | const char *secstrings, | ||
1957 | unsigned long *strmap) | ||
1865 | { | 1958 | { |
1866 | unsigned int i; | 1959 | unsigned int i, ndst; |
1960 | const Elf_Sym *src; | ||
1961 | Elf_Sym *dst; | ||
1962 | char *s; | ||
1867 | 1963 | ||
1868 | mod->symtab = (void *)sechdrs[symindex].sh_addr; | 1964 | mod->symtab = (void *)sechdrs[symindex].sh_addr; |
1869 | mod->num_symtab = sechdrs[symindex].sh_size / sizeof(Elf_Sym); | 1965 | mod->num_symtab = sechdrs[symindex].sh_size / sizeof(Elf_Sym); |
@@ -1873,13 +1969,46 @@ static void add_kallsyms(struct module *mod, | |||
1873 | for (i = 0; i < mod->num_symtab; i++) | 1969 | for (i = 0; i < mod->num_symtab; i++) |
1874 | mod->symtab[i].st_info | 1970 | mod->symtab[i].st_info |
1875 | = elf_type(&mod->symtab[i], sechdrs, secstrings, mod); | 1971 | = elf_type(&mod->symtab[i], sechdrs, secstrings, mod); |
1972 | |||
1973 | mod->core_symtab = dst = mod->module_core + symoffs; | ||
1974 | src = mod->symtab; | ||
1975 | *dst = *src; | ||
1976 | for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) { | ||
1977 | if (!is_core_symbol(src, sechdrs, shnum)) | ||
1978 | continue; | ||
1979 | dst[ndst] = *src; | ||
1980 | dst[ndst].st_name = bitmap_weight(strmap, dst[ndst].st_name); | ||
1981 | ++ndst; | ||
1982 | } | ||
1983 | mod->core_num_syms = ndst; | ||
1984 | |||
1985 | mod->core_strtab = s = mod->module_core + stroffs; | ||
1986 | for (*s = 0, i = 1; i < sechdrs[strindex].sh_size; ++i) | ||
1987 | if (test_bit(i, strmap)) | ||
1988 | *++s = mod->strtab[i]; | ||
1876 | } | 1989 | } |
1877 | #else | 1990 | #else |
1991 | static inline unsigned long layout_symtab(struct module *mod, | ||
1992 | Elf_Shdr *sechdrs, | ||
1993 | unsigned int symindex, | ||
1994 | unsigned int strindex, | ||
1995 | const Elf_Ehdr *hdr, | ||
1996 | const char *secstrings, | ||
1997 | unsigned long *pstroffs, | ||
1998 | unsigned long *strmap) | ||
1999 | { | ||
2000 | return 0; | ||
2001 | } | ||
2002 | |||
1878 | static inline void add_kallsyms(struct module *mod, | 2003 | static inline void add_kallsyms(struct module *mod, |
1879 | Elf_Shdr *sechdrs, | 2004 | Elf_Shdr *sechdrs, |
2005 | unsigned int shnum, | ||
1880 | unsigned int symindex, | 2006 | unsigned int symindex, |
1881 | unsigned int strindex, | 2007 | unsigned int strindex, |
1882 | const char *secstrings) | 2008 | unsigned long symoffs, |
2009 | unsigned long stroffs, | ||
2010 | const char *secstrings, | ||
2011 | const unsigned long *strmap) | ||
1883 | { | 2012 | { |
1884 | } | 2013 | } |
1885 | #endif /* CONFIG_KALLSYMS */ | 2014 | #endif /* CONFIG_KALLSYMS */ |
@@ -1954,6 +2083,8 @@ static noinline struct module *load_module(void __user *umod, | |||
1954 | struct module *mod; | 2083 | struct module *mod; |
1955 | long err = 0; | 2084 | long err = 0; |
1956 | void *percpu = NULL, *ptr = NULL; /* Stops spurious gcc warning */ | 2085 | void *percpu = NULL, *ptr = NULL; /* Stops spurious gcc warning */ |
2086 | unsigned long symoffs, stroffs, *strmap; | ||
2087 | |||
1957 | mm_segment_t old_fs; | 2088 | mm_segment_t old_fs; |
1958 | 2089 | ||
1959 | DEBUGP("load_module: umod=%p, len=%lu, uargs=%p\n", | 2090 | DEBUGP("load_module: umod=%p, len=%lu, uargs=%p\n", |
@@ -2035,11 +2166,6 @@ static noinline struct module *load_module(void __user *umod, | |||
2035 | /* Don't keep modinfo and version sections. */ | 2166 | /* Don't keep modinfo and version sections. */ |
2036 | sechdrs[infoindex].sh_flags &= ~(unsigned long)SHF_ALLOC; | 2167 | sechdrs[infoindex].sh_flags &= ~(unsigned long)SHF_ALLOC; |
2037 | sechdrs[versindex].sh_flags &= ~(unsigned long)SHF_ALLOC; | 2168 | sechdrs[versindex].sh_flags &= ~(unsigned long)SHF_ALLOC; |
2038 | #ifdef CONFIG_KALLSYMS | ||
2039 | /* Keep symbol and string tables for decoding later. */ | ||
2040 | sechdrs[symindex].sh_flags |= SHF_ALLOC; | ||
2041 | sechdrs[strindex].sh_flags |= SHF_ALLOC; | ||
2042 | #endif | ||
2043 | 2169 | ||
2044 | /* Check module struct version now, before we try to use module. */ | 2170 | /* Check module struct version now, before we try to use module. */ |
2045 | if (!check_modstruct_version(sechdrs, versindex, mod)) { | 2171 | if (!check_modstruct_version(sechdrs, versindex, mod)) { |
@@ -2075,6 +2201,13 @@ static noinline struct module *load_module(void __user *umod, | |||
2075 | goto free_hdr; | 2201 | goto free_hdr; |
2076 | } | 2202 | } |
2077 | 2203 | ||
2204 | strmap = kzalloc(BITS_TO_LONGS(sechdrs[strindex].sh_size) | ||
2205 | * sizeof(long), GFP_KERNEL); | ||
2206 | if (!strmap) { | ||
2207 | err = -ENOMEM; | ||
2208 | goto free_mod; | ||
2209 | } | ||
2210 | |||
2078 | if (find_module(mod->name)) { | 2211 | if (find_module(mod->name)) { |
2079 | err = -EEXIST; | 2212 | err = -EEXIST; |
2080 | goto free_mod; | 2213 | goto free_mod; |
@@ -2104,6 +2237,8 @@ static noinline struct module *load_module(void __user *umod, | |||
2104 | this is done generically; there doesn't appear to be any | 2237 | this is done generically; there doesn't appear to be any |
2105 | special cases for the architectures. */ | 2238 | special cases for the architectures. */ |
2106 | layout_sections(mod, hdr, sechdrs, secstrings); | 2239 | layout_sections(mod, hdr, sechdrs, secstrings); |
2240 | symoffs = layout_symtab(mod, sechdrs, symindex, strindex, hdr, | ||
2241 | secstrings, &stroffs, strmap); | ||
2107 | 2242 | ||
2108 | /* Do the allocs. */ | 2243 | /* Do the allocs. */ |
2109 | ptr = module_alloc_update_bounds(mod->core_size); | 2244 | ptr = module_alloc_update_bounds(mod->core_size); |
@@ -2237,10 +2372,6 @@ static noinline struct module *load_module(void __user *umod, | |||
2237 | sizeof(*mod->ctors), &mod->num_ctors); | 2372 | sizeof(*mod->ctors), &mod->num_ctors); |
2238 | #endif | 2373 | #endif |
2239 | 2374 | ||
2240 | #ifdef CONFIG_MARKERS | ||
2241 | mod->markers = section_objs(hdr, sechdrs, secstrings, "__markers", | ||
2242 | sizeof(*mod->markers), &mod->num_markers); | ||
2243 | #endif | ||
2244 | #ifdef CONFIG_TRACEPOINTS | 2375 | #ifdef CONFIG_TRACEPOINTS |
2245 | mod->tracepoints = section_objs(hdr, sechdrs, secstrings, | 2376 | mod->tracepoints = section_objs(hdr, sechdrs, secstrings, |
2246 | "__tracepoints", | 2377 | "__tracepoints", |
@@ -2312,7 +2443,10 @@ static noinline struct module *load_module(void __user *umod, | |||
2312 | percpu_modcopy(mod->percpu, (void *)sechdrs[pcpuindex].sh_addr, | 2443 | percpu_modcopy(mod->percpu, (void *)sechdrs[pcpuindex].sh_addr, |
2313 | sechdrs[pcpuindex].sh_size); | 2444 | sechdrs[pcpuindex].sh_size); |
2314 | 2445 | ||
2315 | add_kallsyms(mod, sechdrs, symindex, strindex, secstrings); | 2446 | add_kallsyms(mod, sechdrs, hdr->e_shnum, symindex, strindex, |
2447 | symoffs, stroffs, secstrings, strmap); | ||
2448 | kfree(strmap); | ||
2449 | strmap = NULL; | ||
2316 | 2450 | ||
2317 | if (!mod->taints) { | 2451 | if (!mod->taints) { |
2318 | struct _ddebug *debug; | 2452 | struct _ddebug *debug; |
@@ -2384,13 +2518,14 @@ static noinline struct module *load_module(void __user *umod, | |||
2384 | synchronize_sched(); | 2518 | synchronize_sched(); |
2385 | module_arch_cleanup(mod); | 2519 | module_arch_cleanup(mod); |
2386 | cleanup: | 2520 | cleanup: |
2521 | free_modinfo(mod); | ||
2387 | kobject_del(&mod->mkobj.kobj); | 2522 | kobject_del(&mod->mkobj.kobj); |
2388 | kobject_put(&mod->mkobj.kobj); | 2523 | kobject_put(&mod->mkobj.kobj); |
2389 | free_unload: | 2524 | free_unload: |
2390 | module_unload_free(mod); | 2525 | module_unload_free(mod); |
2391 | #if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP) | 2526 | #if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP) |
2392 | free_init: | ||
2393 | percpu_modfree(mod->refptr); | 2527 | percpu_modfree(mod->refptr); |
2528 | free_init: | ||
2394 | #endif | 2529 | #endif |
2395 | module_free(mod, mod->module_init); | 2530 | module_free(mod, mod->module_init); |
2396 | free_core: | 2531 | free_core: |
@@ -2401,6 +2536,7 @@ static noinline struct module *load_module(void __user *umod, | |||
2401 | percpu_modfree(percpu); | 2536 | percpu_modfree(percpu); |
2402 | free_mod: | 2537 | free_mod: |
2403 | kfree(args); | 2538 | kfree(args); |
2539 | kfree(strmap); | ||
2404 | free_hdr: | 2540 | free_hdr: |
2405 | vfree(hdr); | 2541 | vfree(hdr); |
2406 | return ERR_PTR(err); | 2542 | return ERR_PTR(err); |
@@ -2490,6 +2626,11 @@ SYSCALL_DEFINE3(init_module, void __user *, umod, | |||
2490 | /* Drop initial reference. */ | 2626 | /* Drop initial reference. */ |
2491 | module_put(mod); | 2627 | module_put(mod); |
2492 | trim_init_extable(mod); | 2628 | trim_init_extable(mod); |
2629 | #ifdef CONFIG_KALLSYMS | ||
2630 | mod->num_symtab = mod->core_num_syms; | ||
2631 | mod->symtab = mod->core_symtab; | ||
2632 | mod->strtab = mod->core_strtab; | ||
2633 | #endif | ||
2493 | module_free(mod, mod->module_init); | 2634 | module_free(mod, mod->module_init); |
2494 | mod->module_init = NULL; | 2635 | mod->module_init = NULL; |
2495 | mod->init_size = 0; | 2636 | mod->init_size = 0; |
@@ -2951,27 +3092,12 @@ void module_layout(struct module *mod, | |||
2951 | struct modversion_info *ver, | 3092 | struct modversion_info *ver, |
2952 | struct kernel_param *kp, | 3093 | struct kernel_param *kp, |
2953 | struct kernel_symbol *ks, | 3094 | struct kernel_symbol *ks, |
2954 | struct marker *marker, | ||
2955 | struct tracepoint *tp) | 3095 | struct tracepoint *tp) |
2956 | { | 3096 | { |
2957 | } | 3097 | } |
2958 | EXPORT_SYMBOL(module_layout); | 3098 | EXPORT_SYMBOL(module_layout); |
2959 | #endif | 3099 | #endif |
2960 | 3100 | ||
2961 | #ifdef CONFIG_MARKERS | ||
2962 | void module_update_markers(void) | ||
2963 | { | ||
2964 | struct module *mod; | ||
2965 | |||
2966 | mutex_lock(&module_mutex); | ||
2967 | list_for_each_entry(mod, &modules, list) | ||
2968 | if (!mod->taints) | ||
2969 | marker_update_probe_range(mod->markers, | ||
2970 | mod->markers + mod->num_markers); | ||
2971 | mutex_unlock(&module_mutex); | ||
2972 | } | ||
2973 | #endif | ||
2974 | |||
2975 | #ifdef CONFIG_TRACEPOINTS | 3101 | #ifdef CONFIG_TRACEPOINTS |
2976 | void module_update_tracepoints(void) | 3102 | void module_update_tracepoints(void) |
2977 | { | 3103 | { |
diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c index 50d022e5a560..ec815a960b5d 100644 --- a/kernel/mutex-debug.c +++ b/kernel/mutex-debug.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/delay.h> | 16 | #include <linux/delay.h> |
17 | #include <linux/module.h> | 17 | #include <linux/module.h> |
18 | #include <linux/poison.h> | 18 | #include <linux/poison.h> |
19 | #include <linux/sched.h> | ||
19 | #include <linux/spinlock.h> | 20 | #include <linux/spinlock.h> |
20 | #include <linux/kallsyms.h> | 21 | #include <linux/kallsyms.h> |
21 | #include <linux/interrupt.h> | 22 | #include <linux/interrupt.h> |
diff --git a/kernel/ns_cgroup.c b/kernel/ns_cgroup.c index 5aa854f9e5ae..2a5dfec8efe0 100644 --- a/kernel/ns_cgroup.c +++ b/kernel/ns_cgroup.c | |||
@@ -42,8 +42,8 @@ int ns_cgroup_clone(struct task_struct *task, struct pid *pid) | |||
42 | * (hence either you are in the same cgroup as task, or in an | 42 | * (hence either you are in the same cgroup as task, or in an |
43 | * ancestor cgroup thereof) | 43 | * ancestor cgroup thereof) |
44 | */ | 44 | */ |
45 | static int ns_can_attach(struct cgroup_subsys *ss, | 45 | static int ns_can_attach(struct cgroup_subsys *ss, struct cgroup *new_cgroup, |
46 | struct cgroup *new_cgroup, struct task_struct *task) | 46 | struct task_struct *task, bool threadgroup) |
47 | { | 47 | { |
48 | if (current != task) { | 48 | if (current != task) { |
49 | if (!capable(CAP_SYS_ADMIN)) | 49 | if (!capable(CAP_SYS_ADMIN)) |
@@ -56,6 +56,18 @@ static int ns_can_attach(struct cgroup_subsys *ss, | |||
56 | if (!cgroup_is_descendant(new_cgroup, task)) | 56 | if (!cgroup_is_descendant(new_cgroup, task)) |
57 | return -EPERM; | 57 | return -EPERM; |
58 | 58 | ||
59 | if (threadgroup) { | ||
60 | struct task_struct *c; | ||
61 | rcu_read_lock(); | ||
62 | list_for_each_entry_rcu(c, &task->thread_group, thread_group) { | ||
63 | if (!cgroup_is_descendant(new_cgroup, c)) { | ||
64 | rcu_read_unlock(); | ||
65 | return -EPERM; | ||
66 | } | ||
67 | } | ||
68 | rcu_read_unlock(); | ||
69 | } | ||
70 | |||
59 | return 0; | 71 | return 0; |
60 | } | 72 | } |
61 | 73 | ||
diff --git a/kernel/panic.c b/kernel/panic.c index 512ab73b0ca3..96b45d0b4ba5 100644 --- a/kernel/panic.c +++ b/kernel/panic.c | |||
@@ -90,6 +90,8 @@ NORET_TYPE void panic(const char * fmt, ...) | |||
90 | 90 | ||
91 | atomic_notifier_call_chain(&panic_notifier_list, 0, buf); | 91 | atomic_notifier_call_chain(&panic_notifier_list, 0, buf); |
92 | 92 | ||
93 | bust_spinlocks(0); | ||
94 | |||
93 | if (!panic_blink) | 95 | if (!panic_blink) |
94 | panic_blink = no_blink; | 96 | panic_blink = no_blink; |
95 | 97 | ||
@@ -136,7 +138,6 @@ NORET_TYPE void panic(const char * fmt, ...) | |||
136 | mdelay(1); | 138 | mdelay(1); |
137 | i++; | 139 | i++; |
138 | } | 140 | } |
139 | bust_spinlocks(0); | ||
140 | } | 141 | } |
141 | 142 | ||
142 | EXPORT_SYMBOL(panic); | 143 | EXPORT_SYMBOL(panic); |
@@ -177,7 +178,7 @@ static const struct tnt tnts[] = { | |||
177 | * 'W' - Taint on warning. | 178 | * 'W' - Taint on warning. |
178 | * 'C' - modules from drivers/staging are loaded. | 179 | * 'C' - modules from drivers/staging are loaded. |
179 | * | 180 | * |
180 | * The string is overwritten by the next call to print_taint(). | 181 | * The string is overwritten by the next call to print_tainted(). |
181 | */ | 182 | */ |
182 | const char *print_tainted(void) | 183 | const char *print_tainted(void) |
183 | { | 184 | { |
diff --git a/kernel/params.c b/kernel/params.c index 7f6912ced2ba..d656c276508d 100644 --- a/kernel/params.c +++ b/kernel/params.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/device.h> | 23 | #include <linux/device.h> |
24 | #include <linux/err.h> | 24 | #include <linux/err.h> |
25 | #include <linux/slab.h> | 25 | #include <linux/slab.h> |
26 | #include <linux/ctype.h> | ||
26 | 27 | ||
27 | #if 0 | 28 | #if 0 |
28 | #define DEBUGP printk | 29 | #define DEBUGP printk |
@@ -87,7 +88,7 @@ static char *next_arg(char *args, char **param, char **val) | |||
87 | } | 88 | } |
88 | 89 | ||
89 | for (i = 0; args[i]; i++) { | 90 | for (i = 0; args[i]; i++) { |
90 | if (args[i] == ' ' && !in_quote) | 91 | if (isspace(args[i]) && !in_quote) |
91 | break; | 92 | break; |
92 | if (equals == 0) { | 93 | if (equals == 0) { |
93 | if (args[i] == '=') | 94 | if (args[i] == '=') |
@@ -121,7 +122,7 @@ static char *next_arg(char *args, char **param, char **val) | |||
121 | next = args + i; | 122 | next = args + i; |
122 | 123 | ||
123 | /* Chew up trailing spaces. */ | 124 | /* Chew up trailing spaces. */ |
124 | while (*next == ' ') | 125 | while (isspace(*next)) |
125 | next++; | 126 | next++; |
126 | return next; | 127 | return next; |
127 | } | 128 | } |
@@ -138,7 +139,7 @@ int parse_args(const char *name, | |||
138 | DEBUGP("Parsing ARGS: %s\n", args); | 139 | DEBUGP("Parsing ARGS: %s\n", args); |
139 | 140 | ||
140 | /* Chew leading spaces */ | 141 | /* Chew leading spaces */ |
141 | while (*args == ' ') | 142 | while (isspace(*args)) |
142 | args++; | 143 | args++; |
143 | 144 | ||
144 | while (*args) { | 145 | while (*args) { |
@@ -217,15 +218,11 @@ int param_set_charp(const char *val, struct kernel_param *kp) | |||
217 | return -ENOSPC; | 218 | return -ENOSPC; |
218 | } | 219 | } |
219 | 220 | ||
220 | if (kp->flags & KPARAM_KMALLOCED) | ||
221 | kfree(*(char **)kp->arg); | ||
222 | |||
223 | /* This is a hack. We can't need to strdup in early boot, and we | 221 | /* This is a hack. We can't need to strdup in early boot, and we |
224 | * don't need to; this mangled commandline is preserved. */ | 222 | * don't need to; this mangled commandline is preserved. */ |
225 | if (slab_is_available()) { | 223 | if (slab_is_available()) { |
226 | kp->flags |= KPARAM_KMALLOCED; | ||
227 | *(char **)kp->arg = kstrdup(val, GFP_KERNEL); | 224 | *(char **)kp->arg = kstrdup(val, GFP_KERNEL); |
228 | if (!kp->arg) | 225 | if (!*(char **)kp->arg) |
229 | return -ENOMEM; | 226 | return -ENOMEM; |
230 | } else | 227 | } else |
231 | *(const char **)kp->arg = val; | 228 | *(const char **)kp->arg = val; |
@@ -303,6 +300,7 @@ static int param_array(const char *name, | |||
303 | unsigned int min, unsigned int max, | 300 | unsigned int min, unsigned int max, |
304 | void *elem, int elemsize, | 301 | void *elem, int elemsize, |
305 | int (*set)(const char *, struct kernel_param *kp), | 302 | int (*set)(const char *, struct kernel_param *kp), |
303 | u16 flags, | ||
306 | unsigned int *num) | 304 | unsigned int *num) |
307 | { | 305 | { |
308 | int ret; | 306 | int ret; |
@@ -312,6 +310,7 @@ static int param_array(const char *name, | |||
312 | /* Get the name right for errors. */ | 310 | /* Get the name right for errors. */ |
313 | kp.name = name; | 311 | kp.name = name; |
314 | kp.arg = elem; | 312 | kp.arg = elem; |
313 | kp.flags = flags; | ||
315 | 314 | ||
316 | /* No equals sign? */ | 315 | /* No equals sign? */ |
317 | if (!val) { | 316 | if (!val) { |
@@ -357,7 +356,8 @@ int param_array_set(const char *val, struct kernel_param *kp) | |||
357 | unsigned int temp_num; | 356 | unsigned int temp_num; |
358 | 357 | ||
359 | return param_array(kp->name, val, 1, arr->max, arr->elem, | 358 | return param_array(kp->name, val, 1, arr->max, arr->elem, |
360 | arr->elemsize, arr->set, arr->num ?: &temp_num); | 359 | arr->elemsize, arr->set, kp->flags, |
360 | arr->num ?: &temp_num); | ||
361 | } | 361 | } |
362 | 362 | ||
363 | int param_array_get(char *buffer, struct kernel_param *kp) | 363 | int param_array_get(char *buffer, struct kernel_param *kp) |
@@ -604,11 +604,7 @@ void module_param_sysfs_remove(struct module *mod) | |||
604 | 604 | ||
605 | void destroy_params(const struct kernel_param *params, unsigned num) | 605 | void destroy_params(const struct kernel_param *params, unsigned num) |
606 | { | 606 | { |
607 | unsigned int i; | 607 | /* FIXME: This should free kmalloced charp parameters. It doesn't. */ |
608 | |||
609 | for (i = 0; i < num; i++) | ||
610 | if (params[i].flags & KPARAM_KMALLOCED) | ||
611 | kfree(*(char **)params[i].arg); | ||
612 | } | 608 | } |
613 | 609 | ||
614 | static void __init kernel_add_sysfs_param(const char *name, | 610 | static void __init kernel_add_sysfs_param(const char *name, |
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c deleted file mode 100644 index 8cb94a52d1bb..000000000000 --- a/kernel/perf_counter.c +++ /dev/null | |||
@@ -1,4963 +0,0 @@ | |||
1 | /* | ||
2 | * Performance counter core code | ||
3 | * | ||
4 | * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> | ||
5 | * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar | ||
6 | * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> | ||
7 | * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> | ||
8 | * | ||
9 | * For licensing details see kernel-base/COPYING | ||
10 | */ | ||
11 | |||
12 | #include <linux/fs.h> | ||
13 | #include <linux/mm.h> | ||
14 | #include <linux/cpu.h> | ||
15 | #include <linux/smp.h> | ||
16 | #include <linux/file.h> | ||
17 | #include <linux/poll.h> | ||
18 | #include <linux/sysfs.h> | ||
19 | #include <linux/dcache.h> | ||
20 | #include <linux/percpu.h> | ||
21 | #include <linux/ptrace.h> | ||
22 | #include <linux/vmstat.h> | ||
23 | #include <linux/hardirq.h> | ||
24 | #include <linux/rculist.h> | ||
25 | #include <linux/uaccess.h> | ||
26 | #include <linux/syscalls.h> | ||
27 | #include <linux/anon_inodes.h> | ||
28 | #include <linux/kernel_stat.h> | ||
29 | #include <linux/perf_counter.h> | ||
30 | |||
31 | #include <asm/irq_regs.h> | ||
32 | |||
33 | /* | ||
34 | * Each CPU has a list of per CPU counters: | ||
35 | */ | ||
36 | DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context); | ||
37 | |||
38 | int perf_max_counters __read_mostly = 1; | ||
39 | static int perf_reserved_percpu __read_mostly; | ||
40 | static int perf_overcommit __read_mostly = 1; | ||
41 | |||
42 | static atomic_t nr_counters __read_mostly; | ||
43 | static atomic_t nr_mmap_counters __read_mostly; | ||
44 | static atomic_t nr_comm_counters __read_mostly; | ||
45 | static atomic_t nr_task_counters __read_mostly; | ||
46 | |||
47 | /* | ||
48 | * perf counter paranoia level: | ||
49 | * -1 - not paranoid at all | ||
50 | * 0 - disallow raw tracepoint access for unpriv | ||
51 | * 1 - disallow cpu counters for unpriv | ||
52 | * 2 - disallow kernel profiling for unpriv | ||
53 | */ | ||
54 | int sysctl_perf_counter_paranoid __read_mostly = 1; | ||
55 | |||
56 | static inline bool perf_paranoid_tracepoint_raw(void) | ||
57 | { | ||
58 | return sysctl_perf_counter_paranoid > -1; | ||
59 | } | ||
60 | |||
61 | static inline bool perf_paranoid_cpu(void) | ||
62 | { | ||
63 | return sysctl_perf_counter_paranoid > 0; | ||
64 | } | ||
65 | |||
66 | static inline bool perf_paranoid_kernel(void) | ||
67 | { | ||
68 | return sysctl_perf_counter_paranoid > 1; | ||
69 | } | ||
70 | |||
71 | int sysctl_perf_counter_mlock __read_mostly = 512; /* 'free' kb per user */ | ||
72 | |||
73 | /* | ||
74 | * max perf counter sample rate | ||
75 | */ | ||
76 | int sysctl_perf_counter_sample_rate __read_mostly = 100000; | ||
77 | |||
78 | static atomic64_t perf_counter_id; | ||
79 | |||
80 | /* | ||
81 | * Lock for (sysadmin-configurable) counter reservations: | ||
82 | */ | ||
83 | static DEFINE_SPINLOCK(perf_resource_lock); | ||
84 | |||
85 | /* | ||
86 | * Architecture provided APIs - weak aliases: | ||
87 | */ | ||
88 | extern __weak const struct pmu *hw_perf_counter_init(struct perf_counter *counter) | ||
89 | { | ||
90 | return NULL; | ||
91 | } | ||
92 | |||
93 | void __weak hw_perf_disable(void) { barrier(); } | ||
94 | void __weak hw_perf_enable(void) { barrier(); } | ||
95 | |||
96 | void __weak hw_perf_counter_setup(int cpu) { barrier(); } | ||
97 | void __weak hw_perf_counter_setup_online(int cpu) { barrier(); } | ||
98 | |||
99 | int __weak | ||
100 | hw_perf_group_sched_in(struct perf_counter *group_leader, | ||
101 | struct perf_cpu_context *cpuctx, | ||
102 | struct perf_counter_context *ctx, int cpu) | ||
103 | { | ||
104 | return 0; | ||
105 | } | ||
106 | |||
107 | void __weak perf_counter_print_debug(void) { } | ||
108 | |||
109 | static DEFINE_PER_CPU(int, perf_disable_count); | ||
110 | |||
111 | void __perf_disable(void) | ||
112 | { | ||
113 | __get_cpu_var(perf_disable_count)++; | ||
114 | } | ||
115 | |||
116 | bool __perf_enable(void) | ||
117 | { | ||
118 | return !--__get_cpu_var(perf_disable_count); | ||
119 | } | ||
120 | |||
121 | void perf_disable(void) | ||
122 | { | ||
123 | __perf_disable(); | ||
124 | hw_perf_disable(); | ||
125 | } | ||
126 | |||
127 | void perf_enable(void) | ||
128 | { | ||
129 | if (__perf_enable()) | ||
130 | hw_perf_enable(); | ||
131 | } | ||
132 | |||
133 | static void get_ctx(struct perf_counter_context *ctx) | ||
134 | { | ||
135 | WARN_ON(!atomic_inc_not_zero(&ctx->refcount)); | ||
136 | } | ||
137 | |||
138 | static void free_ctx(struct rcu_head *head) | ||
139 | { | ||
140 | struct perf_counter_context *ctx; | ||
141 | |||
142 | ctx = container_of(head, struct perf_counter_context, rcu_head); | ||
143 | kfree(ctx); | ||
144 | } | ||
145 | |||
146 | static void put_ctx(struct perf_counter_context *ctx) | ||
147 | { | ||
148 | if (atomic_dec_and_test(&ctx->refcount)) { | ||
149 | if (ctx->parent_ctx) | ||
150 | put_ctx(ctx->parent_ctx); | ||
151 | if (ctx->task) | ||
152 | put_task_struct(ctx->task); | ||
153 | call_rcu(&ctx->rcu_head, free_ctx); | ||
154 | } | ||
155 | } | ||
156 | |||
157 | static void unclone_ctx(struct perf_counter_context *ctx) | ||
158 | { | ||
159 | if (ctx->parent_ctx) { | ||
160 | put_ctx(ctx->parent_ctx); | ||
161 | ctx->parent_ctx = NULL; | ||
162 | } | ||
163 | } | ||
164 | |||
165 | /* | ||
166 | * If we inherit counters we want to return the parent counter id | ||
167 | * to userspace. | ||
168 | */ | ||
169 | static u64 primary_counter_id(struct perf_counter *counter) | ||
170 | { | ||
171 | u64 id = counter->id; | ||
172 | |||
173 | if (counter->parent) | ||
174 | id = counter->parent->id; | ||
175 | |||
176 | return id; | ||
177 | } | ||
178 | |||
179 | /* | ||
180 | * Get the perf_counter_context for a task and lock it. | ||
181 | * This has to cope with with the fact that until it is locked, | ||
182 | * the context could get moved to another task. | ||
183 | */ | ||
184 | static struct perf_counter_context * | ||
185 | perf_lock_task_context(struct task_struct *task, unsigned long *flags) | ||
186 | { | ||
187 | struct perf_counter_context *ctx; | ||
188 | |||
189 | rcu_read_lock(); | ||
190 | retry: | ||
191 | ctx = rcu_dereference(task->perf_counter_ctxp); | ||
192 | if (ctx) { | ||
193 | /* | ||
194 | * If this context is a clone of another, it might | ||
195 | * get swapped for another underneath us by | ||
196 | * perf_counter_task_sched_out, though the | ||
197 | * rcu_read_lock() protects us from any context | ||
198 | * getting freed. Lock the context and check if it | ||
199 | * got swapped before we could get the lock, and retry | ||
200 | * if so. If we locked the right context, then it | ||
201 | * can't get swapped on us any more. | ||
202 | */ | ||
203 | spin_lock_irqsave(&ctx->lock, *flags); | ||
204 | if (ctx != rcu_dereference(task->perf_counter_ctxp)) { | ||
205 | spin_unlock_irqrestore(&ctx->lock, *flags); | ||
206 | goto retry; | ||
207 | } | ||
208 | |||
209 | if (!atomic_inc_not_zero(&ctx->refcount)) { | ||
210 | spin_unlock_irqrestore(&ctx->lock, *flags); | ||
211 | ctx = NULL; | ||
212 | } | ||
213 | } | ||
214 | rcu_read_unlock(); | ||
215 | return ctx; | ||
216 | } | ||
217 | |||
218 | /* | ||
219 | * Get the context for a task and increment its pin_count so it | ||
220 | * can't get swapped to another task. This also increments its | ||
221 | * reference count so that the context can't get freed. | ||
222 | */ | ||
223 | static struct perf_counter_context *perf_pin_task_context(struct task_struct *task) | ||
224 | { | ||
225 | struct perf_counter_context *ctx; | ||
226 | unsigned long flags; | ||
227 | |||
228 | ctx = perf_lock_task_context(task, &flags); | ||
229 | if (ctx) { | ||
230 | ++ctx->pin_count; | ||
231 | spin_unlock_irqrestore(&ctx->lock, flags); | ||
232 | } | ||
233 | return ctx; | ||
234 | } | ||
235 | |||
236 | static void perf_unpin_context(struct perf_counter_context *ctx) | ||
237 | { | ||
238 | unsigned long flags; | ||
239 | |||
240 | spin_lock_irqsave(&ctx->lock, flags); | ||
241 | --ctx->pin_count; | ||
242 | spin_unlock_irqrestore(&ctx->lock, flags); | ||
243 | put_ctx(ctx); | ||
244 | } | ||
245 | |||
246 | /* | ||
247 | * Add a counter from the lists for its context. | ||
248 | * Must be called with ctx->mutex and ctx->lock held. | ||
249 | */ | ||
250 | static void | ||
251 | list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx) | ||
252 | { | ||
253 | struct perf_counter *group_leader = counter->group_leader; | ||
254 | |||
255 | /* | ||
256 | * Depending on whether it is a standalone or sibling counter, | ||
257 | * add it straight to the context's counter list, or to the group | ||
258 | * leader's sibling list: | ||
259 | */ | ||
260 | if (group_leader == counter) | ||
261 | list_add_tail(&counter->list_entry, &ctx->counter_list); | ||
262 | else { | ||
263 | list_add_tail(&counter->list_entry, &group_leader->sibling_list); | ||
264 | group_leader->nr_siblings++; | ||
265 | } | ||
266 | |||
267 | list_add_rcu(&counter->event_entry, &ctx->event_list); | ||
268 | ctx->nr_counters++; | ||
269 | if (counter->attr.inherit_stat) | ||
270 | ctx->nr_stat++; | ||
271 | } | ||
272 | |||
273 | /* | ||
274 | * Remove a counter from the lists for its context. | ||
275 | * Must be called with ctx->mutex and ctx->lock held. | ||
276 | */ | ||
277 | static void | ||
278 | list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx) | ||
279 | { | ||
280 | struct perf_counter *sibling, *tmp; | ||
281 | |||
282 | if (list_empty(&counter->list_entry)) | ||
283 | return; | ||
284 | ctx->nr_counters--; | ||
285 | if (counter->attr.inherit_stat) | ||
286 | ctx->nr_stat--; | ||
287 | |||
288 | list_del_init(&counter->list_entry); | ||
289 | list_del_rcu(&counter->event_entry); | ||
290 | |||
291 | if (counter->group_leader != counter) | ||
292 | counter->group_leader->nr_siblings--; | ||
293 | |||
294 | /* | ||
295 | * If this was a group counter with sibling counters then | ||
296 | * upgrade the siblings to singleton counters by adding them | ||
297 | * to the context list directly: | ||
298 | */ | ||
299 | list_for_each_entry_safe(sibling, tmp, | ||
300 | &counter->sibling_list, list_entry) { | ||
301 | |||
302 | list_move_tail(&sibling->list_entry, &ctx->counter_list); | ||
303 | sibling->group_leader = sibling; | ||
304 | } | ||
305 | } | ||
306 | |||
307 | static void | ||
308 | counter_sched_out(struct perf_counter *counter, | ||
309 | struct perf_cpu_context *cpuctx, | ||
310 | struct perf_counter_context *ctx) | ||
311 | { | ||
312 | if (counter->state != PERF_COUNTER_STATE_ACTIVE) | ||
313 | return; | ||
314 | |||
315 | counter->state = PERF_COUNTER_STATE_INACTIVE; | ||
316 | if (counter->pending_disable) { | ||
317 | counter->pending_disable = 0; | ||
318 | counter->state = PERF_COUNTER_STATE_OFF; | ||
319 | } | ||
320 | counter->tstamp_stopped = ctx->time; | ||
321 | counter->pmu->disable(counter); | ||
322 | counter->oncpu = -1; | ||
323 | |||
324 | if (!is_software_counter(counter)) | ||
325 | cpuctx->active_oncpu--; | ||
326 | ctx->nr_active--; | ||
327 | if (counter->attr.exclusive || !cpuctx->active_oncpu) | ||
328 | cpuctx->exclusive = 0; | ||
329 | } | ||
330 | |||
331 | static void | ||
332 | group_sched_out(struct perf_counter *group_counter, | ||
333 | struct perf_cpu_context *cpuctx, | ||
334 | struct perf_counter_context *ctx) | ||
335 | { | ||
336 | struct perf_counter *counter; | ||
337 | |||
338 | if (group_counter->state != PERF_COUNTER_STATE_ACTIVE) | ||
339 | return; | ||
340 | |||
341 | counter_sched_out(group_counter, cpuctx, ctx); | ||
342 | |||
343 | /* | ||
344 | * Schedule out siblings (if any): | ||
345 | */ | ||
346 | list_for_each_entry(counter, &group_counter->sibling_list, list_entry) | ||
347 | counter_sched_out(counter, cpuctx, ctx); | ||
348 | |||
349 | if (group_counter->attr.exclusive) | ||
350 | cpuctx->exclusive = 0; | ||
351 | } | ||
352 | |||
353 | /* | ||
354 | * Cross CPU call to remove a performance counter | ||
355 | * | ||
356 | * We disable the counter on the hardware level first. After that we | ||
357 | * remove it from the context list. | ||
358 | */ | ||
359 | static void __perf_counter_remove_from_context(void *info) | ||
360 | { | ||
361 | struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); | ||
362 | struct perf_counter *counter = info; | ||
363 | struct perf_counter_context *ctx = counter->ctx; | ||
364 | |||
365 | /* | ||
366 | * If this is a task context, we need to check whether it is | ||
367 | * the current task context of this cpu. If not it has been | ||
368 | * scheduled out before the smp call arrived. | ||
369 | */ | ||
370 | if (ctx->task && cpuctx->task_ctx != ctx) | ||
371 | return; | ||
372 | |||
373 | spin_lock(&ctx->lock); | ||
374 | /* | ||
375 | * Protect the list operation against NMI by disabling the | ||
376 | * counters on a global level. | ||
377 | */ | ||
378 | perf_disable(); | ||
379 | |||
380 | counter_sched_out(counter, cpuctx, ctx); | ||
381 | |||
382 | list_del_counter(counter, ctx); | ||
383 | |||
384 | if (!ctx->task) { | ||
385 | /* | ||
386 | * Allow more per task counters with respect to the | ||
387 | * reservation: | ||
388 | */ | ||
389 | cpuctx->max_pertask = | ||
390 | min(perf_max_counters - ctx->nr_counters, | ||
391 | perf_max_counters - perf_reserved_percpu); | ||
392 | } | ||
393 | |||
394 | perf_enable(); | ||
395 | spin_unlock(&ctx->lock); | ||
396 | } | ||
397 | |||
398 | |||
399 | /* | ||
400 | * Remove the counter from a task's (or a CPU's) list of counters. | ||
401 | * | ||
402 | * Must be called with ctx->mutex held. | ||
403 | * | ||
404 | * CPU counters are removed with a smp call. For task counters we only | ||
405 | * call when the task is on a CPU. | ||
406 | * | ||
407 | * If counter->ctx is a cloned context, callers must make sure that | ||
408 | * every task struct that counter->ctx->task could possibly point to | ||
409 | * remains valid. This is OK when called from perf_release since | ||
410 | * that only calls us on the top-level context, which can't be a clone. | ||
411 | * When called from perf_counter_exit_task, it's OK because the | ||
412 | * context has been detached from its task. | ||
413 | */ | ||
414 | static void perf_counter_remove_from_context(struct perf_counter *counter) | ||
415 | { | ||
416 | struct perf_counter_context *ctx = counter->ctx; | ||
417 | struct task_struct *task = ctx->task; | ||
418 | |||
419 | if (!task) { | ||
420 | /* | ||
421 | * Per cpu counters are removed via an smp call and | ||
422 | * the removal is always sucessful. | ||
423 | */ | ||
424 | smp_call_function_single(counter->cpu, | ||
425 | __perf_counter_remove_from_context, | ||
426 | counter, 1); | ||
427 | return; | ||
428 | } | ||
429 | |||
430 | retry: | ||
431 | task_oncpu_function_call(task, __perf_counter_remove_from_context, | ||
432 | counter); | ||
433 | |||
434 | spin_lock_irq(&ctx->lock); | ||
435 | /* | ||
436 | * If the context is active we need to retry the smp call. | ||
437 | */ | ||
438 | if (ctx->nr_active && !list_empty(&counter->list_entry)) { | ||
439 | spin_unlock_irq(&ctx->lock); | ||
440 | goto retry; | ||
441 | } | ||
442 | |||
443 | /* | ||
444 | * The lock prevents that this context is scheduled in so we | ||
445 | * can remove the counter safely, if the call above did not | ||
446 | * succeed. | ||
447 | */ | ||
448 | if (!list_empty(&counter->list_entry)) { | ||
449 | list_del_counter(counter, ctx); | ||
450 | } | ||
451 | spin_unlock_irq(&ctx->lock); | ||
452 | } | ||
453 | |||
454 | static inline u64 perf_clock(void) | ||
455 | { | ||
456 | return cpu_clock(smp_processor_id()); | ||
457 | } | ||
458 | |||
459 | /* | ||
460 | * Update the record of the current time in a context. | ||
461 | */ | ||
462 | static void update_context_time(struct perf_counter_context *ctx) | ||
463 | { | ||
464 | u64 now = perf_clock(); | ||
465 | |||
466 | ctx->time += now - ctx->timestamp; | ||
467 | ctx->timestamp = now; | ||
468 | } | ||
469 | |||
470 | /* | ||
471 | * Update the total_time_enabled and total_time_running fields for a counter. | ||
472 | */ | ||
473 | static void update_counter_times(struct perf_counter *counter) | ||
474 | { | ||
475 | struct perf_counter_context *ctx = counter->ctx; | ||
476 | u64 run_end; | ||
477 | |||
478 | if (counter->state < PERF_COUNTER_STATE_INACTIVE || | ||
479 | counter->group_leader->state < PERF_COUNTER_STATE_INACTIVE) | ||
480 | return; | ||
481 | |||
482 | counter->total_time_enabled = ctx->time - counter->tstamp_enabled; | ||
483 | |||
484 | if (counter->state == PERF_COUNTER_STATE_INACTIVE) | ||
485 | run_end = counter->tstamp_stopped; | ||
486 | else | ||
487 | run_end = ctx->time; | ||
488 | |||
489 | counter->total_time_running = run_end - counter->tstamp_running; | ||
490 | } | ||
491 | |||
492 | /* | ||
493 | * Update total_time_enabled and total_time_running for all counters in a group. | ||
494 | */ | ||
495 | static void update_group_times(struct perf_counter *leader) | ||
496 | { | ||
497 | struct perf_counter *counter; | ||
498 | |||
499 | update_counter_times(leader); | ||
500 | list_for_each_entry(counter, &leader->sibling_list, list_entry) | ||
501 | update_counter_times(counter); | ||
502 | } | ||
503 | |||
504 | /* | ||
505 | * Cross CPU call to disable a performance counter | ||
506 | */ | ||
507 | static void __perf_counter_disable(void *info) | ||
508 | { | ||
509 | struct perf_counter *counter = info; | ||
510 | struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); | ||
511 | struct perf_counter_context *ctx = counter->ctx; | ||
512 | |||
513 | /* | ||
514 | * If this is a per-task counter, need to check whether this | ||
515 | * counter's task is the current task on this cpu. | ||
516 | */ | ||
517 | if (ctx->task && cpuctx->task_ctx != ctx) | ||
518 | return; | ||
519 | |||
520 | spin_lock(&ctx->lock); | ||
521 | |||
522 | /* | ||
523 | * If the counter is on, turn it off. | ||
524 | * If it is in error state, leave it in error state. | ||
525 | */ | ||
526 | if (counter->state >= PERF_COUNTER_STATE_INACTIVE) { | ||
527 | update_context_time(ctx); | ||
528 | update_group_times(counter); | ||
529 | if (counter == counter->group_leader) | ||
530 | group_sched_out(counter, cpuctx, ctx); | ||
531 | else | ||
532 | counter_sched_out(counter, cpuctx, ctx); | ||
533 | counter->state = PERF_COUNTER_STATE_OFF; | ||
534 | } | ||
535 | |||
536 | spin_unlock(&ctx->lock); | ||
537 | } | ||
538 | |||
539 | /* | ||
540 | * Disable a counter. | ||
541 | * | ||
542 | * If counter->ctx is a cloned context, callers must make sure that | ||
543 | * every task struct that counter->ctx->task could possibly point to | ||
544 | * remains valid. This condition is satisifed when called through | ||
545 | * perf_counter_for_each_child or perf_counter_for_each because they | ||
546 | * hold the top-level counter's child_mutex, so any descendant that | ||
547 | * goes to exit will block in sync_child_counter. | ||
548 | * When called from perf_pending_counter it's OK because counter->ctx | ||
549 | * is the current context on this CPU and preemption is disabled, | ||
550 | * hence we can't get into perf_counter_task_sched_out for this context. | ||
551 | */ | ||
552 | static void perf_counter_disable(struct perf_counter *counter) | ||
553 | { | ||
554 | struct perf_counter_context *ctx = counter->ctx; | ||
555 | struct task_struct *task = ctx->task; | ||
556 | |||
557 | if (!task) { | ||
558 | /* | ||
559 | * Disable the counter on the cpu that it's on | ||
560 | */ | ||
561 | smp_call_function_single(counter->cpu, __perf_counter_disable, | ||
562 | counter, 1); | ||
563 | return; | ||
564 | } | ||
565 | |||
566 | retry: | ||
567 | task_oncpu_function_call(task, __perf_counter_disable, counter); | ||
568 | |||
569 | spin_lock_irq(&ctx->lock); | ||
570 | /* | ||
571 | * If the counter is still active, we need to retry the cross-call. | ||
572 | */ | ||
573 | if (counter->state == PERF_COUNTER_STATE_ACTIVE) { | ||
574 | spin_unlock_irq(&ctx->lock); | ||
575 | goto retry; | ||
576 | } | ||
577 | |||
578 | /* | ||
579 | * Since we have the lock this context can't be scheduled | ||
580 | * in, so we can change the state safely. | ||
581 | */ | ||
582 | if (counter->state == PERF_COUNTER_STATE_INACTIVE) { | ||
583 | update_group_times(counter); | ||
584 | counter->state = PERF_COUNTER_STATE_OFF; | ||
585 | } | ||
586 | |||
587 | spin_unlock_irq(&ctx->lock); | ||
588 | } | ||
589 | |||
590 | static int | ||
591 | counter_sched_in(struct perf_counter *counter, | ||
592 | struct perf_cpu_context *cpuctx, | ||
593 | struct perf_counter_context *ctx, | ||
594 | int cpu) | ||
595 | { | ||
596 | if (counter->state <= PERF_COUNTER_STATE_OFF) | ||
597 | return 0; | ||
598 | |||
599 | counter->state = PERF_COUNTER_STATE_ACTIVE; | ||
600 | counter->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */ | ||
601 | /* | ||
602 | * The new state must be visible before we turn it on in the hardware: | ||
603 | */ | ||
604 | smp_wmb(); | ||
605 | |||
606 | if (counter->pmu->enable(counter)) { | ||
607 | counter->state = PERF_COUNTER_STATE_INACTIVE; | ||
608 | counter->oncpu = -1; | ||
609 | return -EAGAIN; | ||
610 | } | ||
611 | |||
612 | counter->tstamp_running += ctx->time - counter->tstamp_stopped; | ||
613 | |||
614 | if (!is_software_counter(counter)) | ||
615 | cpuctx->active_oncpu++; | ||
616 | ctx->nr_active++; | ||
617 | |||
618 | if (counter->attr.exclusive) | ||
619 | cpuctx->exclusive = 1; | ||
620 | |||
621 | return 0; | ||
622 | } | ||
623 | |||
624 | static int | ||
625 | group_sched_in(struct perf_counter *group_counter, | ||
626 | struct perf_cpu_context *cpuctx, | ||
627 | struct perf_counter_context *ctx, | ||
628 | int cpu) | ||
629 | { | ||
630 | struct perf_counter *counter, *partial_group; | ||
631 | int ret; | ||
632 | |||
633 | if (group_counter->state == PERF_COUNTER_STATE_OFF) | ||
634 | return 0; | ||
635 | |||
636 | ret = hw_perf_group_sched_in(group_counter, cpuctx, ctx, cpu); | ||
637 | if (ret) | ||
638 | return ret < 0 ? ret : 0; | ||
639 | |||
640 | if (counter_sched_in(group_counter, cpuctx, ctx, cpu)) | ||
641 | return -EAGAIN; | ||
642 | |||
643 | /* | ||
644 | * Schedule in siblings as one group (if any): | ||
645 | */ | ||
646 | list_for_each_entry(counter, &group_counter->sibling_list, list_entry) { | ||
647 | if (counter_sched_in(counter, cpuctx, ctx, cpu)) { | ||
648 | partial_group = counter; | ||
649 | goto group_error; | ||
650 | } | ||
651 | } | ||
652 | |||
653 | return 0; | ||
654 | |||
655 | group_error: | ||
656 | /* | ||
657 | * Groups can be scheduled in as one unit only, so undo any | ||
658 | * partial group before returning: | ||
659 | */ | ||
660 | list_for_each_entry(counter, &group_counter->sibling_list, list_entry) { | ||
661 | if (counter == partial_group) | ||
662 | break; | ||
663 | counter_sched_out(counter, cpuctx, ctx); | ||
664 | } | ||
665 | counter_sched_out(group_counter, cpuctx, ctx); | ||
666 | |||
667 | return -EAGAIN; | ||
668 | } | ||
669 | |||
670 | /* | ||
671 | * Return 1 for a group consisting entirely of software counters, | ||
672 | * 0 if the group contains any hardware counters. | ||
673 | */ | ||
674 | static int is_software_only_group(struct perf_counter *leader) | ||
675 | { | ||
676 | struct perf_counter *counter; | ||
677 | |||
678 | if (!is_software_counter(leader)) | ||
679 | return 0; | ||
680 | |||
681 | list_for_each_entry(counter, &leader->sibling_list, list_entry) | ||
682 | if (!is_software_counter(counter)) | ||
683 | return 0; | ||
684 | |||
685 | return 1; | ||
686 | } | ||
687 | |||
688 | /* | ||
689 | * Work out whether we can put this counter group on the CPU now. | ||
690 | */ | ||
691 | static int group_can_go_on(struct perf_counter *counter, | ||
692 | struct perf_cpu_context *cpuctx, | ||
693 | int can_add_hw) | ||
694 | { | ||
695 | /* | ||
696 | * Groups consisting entirely of software counters can always go on. | ||
697 | */ | ||
698 | if (is_software_only_group(counter)) | ||
699 | return 1; | ||
700 | /* | ||
701 | * If an exclusive group is already on, no other hardware | ||
702 | * counters can go on. | ||
703 | */ | ||
704 | if (cpuctx->exclusive) | ||
705 | return 0; | ||
706 | /* | ||
707 | * If this group is exclusive and there are already | ||
708 | * counters on the CPU, it can't go on. | ||
709 | */ | ||
710 | if (counter->attr.exclusive && cpuctx->active_oncpu) | ||
711 | return 0; | ||
712 | /* | ||
713 | * Otherwise, try to add it if all previous groups were able | ||
714 | * to go on. | ||
715 | */ | ||
716 | return can_add_hw; | ||
717 | } | ||
718 | |||
719 | static void add_counter_to_ctx(struct perf_counter *counter, | ||
720 | struct perf_counter_context *ctx) | ||
721 | { | ||
722 | list_add_counter(counter, ctx); | ||
723 | counter->tstamp_enabled = ctx->time; | ||
724 | counter->tstamp_running = ctx->time; | ||
725 | counter->tstamp_stopped = ctx->time; | ||
726 | } | ||
727 | |||
728 | /* | ||
729 | * Cross CPU call to install and enable a performance counter | ||
730 | * | ||
731 | * Must be called with ctx->mutex held | ||
732 | */ | ||
733 | static void __perf_install_in_context(void *info) | ||
734 | { | ||
735 | struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); | ||
736 | struct perf_counter *counter = info; | ||
737 | struct perf_counter_context *ctx = counter->ctx; | ||
738 | struct perf_counter *leader = counter->group_leader; | ||
739 | int cpu = smp_processor_id(); | ||
740 | int err; | ||
741 | |||
742 | /* | ||
743 | * If this is a task context, we need to check whether it is | ||
744 | * the current task context of this cpu. If not it has been | ||
745 | * scheduled out before the smp call arrived. | ||
746 | * Or possibly this is the right context but it isn't | ||
747 | * on this cpu because it had no counters. | ||
748 | */ | ||
749 | if (ctx->task && cpuctx->task_ctx != ctx) { | ||
750 | if (cpuctx->task_ctx || ctx->task != current) | ||
751 | return; | ||
752 | cpuctx->task_ctx = ctx; | ||
753 | } | ||
754 | |||
755 | spin_lock(&ctx->lock); | ||
756 | ctx->is_active = 1; | ||
757 | update_context_time(ctx); | ||
758 | |||
759 | /* | ||
760 | * Protect the list operation against NMI by disabling the | ||
761 | * counters on a global level. NOP for non NMI based counters. | ||
762 | */ | ||
763 | perf_disable(); | ||
764 | |||
765 | add_counter_to_ctx(counter, ctx); | ||
766 | |||
767 | /* | ||
768 | * Don't put the counter on if it is disabled or if | ||
769 | * it is in a group and the group isn't on. | ||
770 | */ | ||
771 | if (counter->state != PERF_COUNTER_STATE_INACTIVE || | ||
772 | (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE)) | ||
773 | goto unlock; | ||
774 | |||
775 | /* | ||
776 | * An exclusive counter can't go on if there are already active | ||
777 | * hardware counters, and no hardware counter can go on if there | ||
778 | * is already an exclusive counter on. | ||
779 | */ | ||
780 | if (!group_can_go_on(counter, cpuctx, 1)) | ||
781 | err = -EEXIST; | ||
782 | else | ||
783 | err = counter_sched_in(counter, cpuctx, ctx, cpu); | ||
784 | |||
785 | if (err) { | ||
786 | /* | ||
787 | * This counter couldn't go on. If it is in a group | ||
788 | * then we have to pull the whole group off. | ||
789 | * If the counter group is pinned then put it in error state. | ||
790 | */ | ||
791 | if (leader != counter) | ||
792 | group_sched_out(leader, cpuctx, ctx); | ||
793 | if (leader->attr.pinned) { | ||
794 | update_group_times(leader); | ||
795 | leader->state = PERF_COUNTER_STATE_ERROR; | ||
796 | } | ||
797 | } | ||
798 | |||
799 | if (!err && !ctx->task && cpuctx->max_pertask) | ||
800 | cpuctx->max_pertask--; | ||
801 | |||
802 | unlock: | ||
803 | perf_enable(); | ||
804 | |||
805 | spin_unlock(&ctx->lock); | ||
806 | } | ||
807 | |||
808 | /* | ||
809 | * Attach a performance counter to a context | ||
810 | * | ||
811 | * First we add the counter to the list with the hardware enable bit | ||
812 | * in counter->hw_config cleared. | ||
813 | * | ||
814 | * If the counter is attached to a task which is on a CPU we use a smp | ||
815 | * call to enable it in the task context. The task might have been | ||
816 | * scheduled away, but we check this in the smp call again. | ||
817 | * | ||
818 | * Must be called with ctx->mutex held. | ||
819 | */ | ||
820 | static void | ||
821 | perf_install_in_context(struct perf_counter_context *ctx, | ||
822 | struct perf_counter *counter, | ||
823 | int cpu) | ||
824 | { | ||
825 | struct task_struct *task = ctx->task; | ||
826 | |||
827 | if (!task) { | ||
828 | /* | ||
829 | * Per cpu counters are installed via an smp call and | ||
830 | * the install is always sucessful. | ||
831 | */ | ||
832 | smp_call_function_single(cpu, __perf_install_in_context, | ||
833 | counter, 1); | ||
834 | return; | ||
835 | } | ||
836 | |||
837 | retry: | ||
838 | task_oncpu_function_call(task, __perf_install_in_context, | ||
839 | counter); | ||
840 | |||
841 | spin_lock_irq(&ctx->lock); | ||
842 | /* | ||
843 | * we need to retry the smp call. | ||
844 | */ | ||
845 | if (ctx->is_active && list_empty(&counter->list_entry)) { | ||
846 | spin_unlock_irq(&ctx->lock); | ||
847 | goto retry; | ||
848 | } | ||
849 | |||
850 | /* | ||
851 | * The lock prevents that this context is scheduled in so we | ||
852 | * can add the counter safely, if it the call above did not | ||
853 | * succeed. | ||
854 | */ | ||
855 | if (list_empty(&counter->list_entry)) | ||
856 | add_counter_to_ctx(counter, ctx); | ||
857 | spin_unlock_irq(&ctx->lock); | ||
858 | } | ||
859 | |||
860 | /* | ||
861 | * Put a counter into inactive state and update time fields. | ||
862 | * Enabling the leader of a group effectively enables all | ||
863 | * the group members that aren't explicitly disabled, so we | ||
864 | * have to update their ->tstamp_enabled also. | ||
865 | * Note: this works for group members as well as group leaders | ||
866 | * since the non-leader members' sibling_lists will be empty. | ||
867 | */ | ||
868 | static void __perf_counter_mark_enabled(struct perf_counter *counter, | ||
869 | struct perf_counter_context *ctx) | ||
870 | { | ||
871 | struct perf_counter *sub; | ||
872 | |||
873 | counter->state = PERF_COUNTER_STATE_INACTIVE; | ||
874 | counter->tstamp_enabled = ctx->time - counter->total_time_enabled; | ||
875 | list_for_each_entry(sub, &counter->sibling_list, list_entry) | ||
876 | if (sub->state >= PERF_COUNTER_STATE_INACTIVE) | ||
877 | sub->tstamp_enabled = | ||
878 | ctx->time - sub->total_time_enabled; | ||
879 | } | ||
880 | |||
881 | /* | ||
882 | * Cross CPU call to enable a performance counter | ||
883 | */ | ||
884 | static void __perf_counter_enable(void *info) | ||
885 | { | ||
886 | struct perf_counter *counter = info; | ||
887 | struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); | ||
888 | struct perf_counter_context *ctx = counter->ctx; | ||
889 | struct perf_counter *leader = counter->group_leader; | ||
890 | int err; | ||
891 | |||
892 | /* | ||
893 | * If this is a per-task counter, need to check whether this | ||
894 | * counter's task is the current task on this cpu. | ||
895 | */ | ||
896 | if (ctx->task && cpuctx->task_ctx != ctx) { | ||
897 | if (cpuctx->task_ctx || ctx->task != current) | ||
898 | return; | ||
899 | cpuctx->task_ctx = ctx; | ||
900 | } | ||
901 | |||
902 | spin_lock(&ctx->lock); | ||
903 | ctx->is_active = 1; | ||
904 | update_context_time(ctx); | ||
905 | |||
906 | if (counter->state >= PERF_COUNTER_STATE_INACTIVE) | ||
907 | goto unlock; | ||
908 | __perf_counter_mark_enabled(counter, ctx); | ||
909 | |||
910 | /* | ||
911 | * If the counter is in a group and isn't the group leader, | ||
912 | * then don't put it on unless the group is on. | ||
913 | */ | ||
914 | if (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE) | ||
915 | goto unlock; | ||
916 | |||
917 | if (!group_can_go_on(counter, cpuctx, 1)) { | ||
918 | err = -EEXIST; | ||
919 | } else { | ||
920 | perf_disable(); | ||
921 | if (counter == leader) | ||
922 | err = group_sched_in(counter, cpuctx, ctx, | ||
923 | smp_processor_id()); | ||
924 | else | ||
925 | err = counter_sched_in(counter, cpuctx, ctx, | ||
926 | smp_processor_id()); | ||
927 | perf_enable(); | ||
928 | } | ||
929 | |||
930 | if (err) { | ||
931 | /* | ||
932 | * If this counter can't go on and it's part of a | ||
933 | * group, then the whole group has to come off. | ||
934 | */ | ||
935 | if (leader != counter) | ||
936 | group_sched_out(leader, cpuctx, ctx); | ||
937 | if (leader->attr.pinned) { | ||
938 | update_group_times(leader); | ||
939 | leader->state = PERF_COUNTER_STATE_ERROR; | ||
940 | } | ||
941 | } | ||
942 | |||
943 | unlock: | ||
944 | spin_unlock(&ctx->lock); | ||
945 | } | ||
946 | |||
947 | /* | ||
948 | * Enable a counter. | ||
949 | * | ||
950 | * If counter->ctx is a cloned context, callers must make sure that | ||
951 | * every task struct that counter->ctx->task could possibly point to | ||
952 | * remains valid. This condition is satisfied when called through | ||
953 | * perf_counter_for_each_child or perf_counter_for_each as described | ||
954 | * for perf_counter_disable. | ||
955 | */ | ||
956 | static void perf_counter_enable(struct perf_counter *counter) | ||
957 | { | ||
958 | struct perf_counter_context *ctx = counter->ctx; | ||
959 | struct task_struct *task = ctx->task; | ||
960 | |||
961 | if (!task) { | ||
962 | /* | ||
963 | * Enable the counter on the cpu that it's on | ||
964 | */ | ||
965 | smp_call_function_single(counter->cpu, __perf_counter_enable, | ||
966 | counter, 1); | ||
967 | return; | ||
968 | } | ||
969 | |||
970 | spin_lock_irq(&ctx->lock); | ||
971 | if (counter->state >= PERF_COUNTER_STATE_INACTIVE) | ||
972 | goto out; | ||
973 | |||
974 | /* | ||
975 | * If the counter is in error state, clear that first. | ||
976 | * That way, if we see the counter in error state below, we | ||
977 | * know that it has gone back into error state, as distinct | ||
978 | * from the task having been scheduled away before the | ||
979 | * cross-call arrived. | ||
980 | */ | ||
981 | if (counter->state == PERF_COUNTER_STATE_ERROR) | ||
982 | counter->state = PERF_COUNTER_STATE_OFF; | ||
983 | |||
984 | retry: | ||
985 | spin_unlock_irq(&ctx->lock); | ||
986 | task_oncpu_function_call(task, __perf_counter_enable, counter); | ||
987 | |||
988 | spin_lock_irq(&ctx->lock); | ||
989 | |||
990 | /* | ||
991 | * If the context is active and the counter is still off, | ||
992 | * we need to retry the cross-call. | ||
993 | */ | ||
994 | if (ctx->is_active && counter->state == PERF_COUNTER_STATE_OFF) | ||
995 | goto retry; | ||
996 | |||
997 | /* | ||
998 | * Since we have the lock this context can't be scheduled | ||
999 | * in, so we can change the state safely. | ||
1000 | */ | ||
1001 | if (counter->state == PERF_COUNTER_STATE_OFF) | ||
1002 | __perf_counter_mark_enabled(counter, ctx); | ||
1003 | |||
1004 | out: | ||
1005 | spin_unlock_irq(&ctx->lock); | ||
1006 | } | ||
1007 | |||
1008 | static int perf_counter_refresh(struct perf_counter *counter, int refresh) | ||
1009 | { | ||
1010 | /* | ||
1011 | * not supported on inherited counters | ||
1012 | */ | ||
1013 | if (counter->attr.inherit) | ||
1014 | return -EINVAL; | ||
1015 | |||
1016 | atomic_add(refresh, &counter->event_limit); | ||
1017 | perf_counter_enable(counter); | ||
1018 | |||
1019 | return 0; | ||
1020 | } | ||
1021 | |||
1022 | void __perf_counter_sched_out(struct perf_counter_context *ctx, | ||
1023 | struct perf_cpu_context *cpuctx) | ||
1024 | { | ||
1025 | struct perf_counter *counter; | ||
1026 | |||
1027 | spin_lock(&ctx->lock); | ||
1028 | ctx->is_active = 0; | ||
1029 | if (likely(!ctx->nr_counters)) | ||
1030 | goto out; | ||
1031 | update_context_time(ctx); | ||
1032 | |||
1033 | perf_disable(); | ||
1034 | if (ctx->nr_active) { | ||
1035 | list_for_each_entry(counter, &ctx->counter_list, list_entry) { | ||
1036 | if (counter != counter->group_leader) | ||
1037 | counter_sched_out(counter, cpuctx, ctx); | ||
1038 | else | ||
1039 | group_sched_out(counter, cpuctx, ctx); | ||
1040 | } | ||
1041 | } | ||
1042 | perf_enable(); | ||
1043 | out: | ||
1044 | spin_unlock(&ctx->lock); | ||
1045 | } | ||
1046 | |||
1047 | /* | ||
1048 | * Test whether two contexts are equivalent, i.e. whether they | ||
1049 | * have both been cloned from the same version of the same context | ||
1050 | * and they both have the same number of enabled counters. | ||
1051 | * If the number of enabled counters is the same, then the set | ||
1052 | * of enabled counters should be the same, because these are both | ||
1053 | * inherited contexts, therefore we can't access individual counters | ||
1054 | * in them directly with an fd; we can only enable/disable all | ||
1055 | * counters via prctl, or enable/disable all counters in a family | ||
1056 | * via ioctl, which will have the same effect on both contexts. | ||
1057 | */ | ||
1058 | static int context_equiv(struct perf_counter_context *ctx1, | ||
1059 | struct perf_counter_context *ctx2) | ||
1060 | { | ||
1061 | return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx | ||
1062 | && ctx1->parent_gen == ctx2->parent_gen | ||
1063 | && !ctx1->pin_count && !ctx2->pin_count; | ||
1064 | } | ||
1065 | |||
1066 | static void __perf_counter_read(void *counter); | ||
1067 | |||
1068 | static void __perf_counter_sync_stat(struct perf_counter *counter, | ||
1069 | struct perf_counter *next_counter) | ||
1070 | { | ||
1071 | u64 value; | ||
1072 | |||
1073 | if (!counter->attr.inherit_stat) | ||
1074 | return; | ||
1075 | |||
1076 | /* | ||
1077 | * Update the counter value, we cannot use perf_counter_read() | ||
1078 | * because we're in the middle of a context switch and have IRQs | ||
1079 | * disabled, which upsets smp_call_function_single(), however | ||
1080 | * we know the counter must be on the current CPU, therefore we | ||
1081 | * don't need to use it. | ||
1082 | */ | ||
1083 | switch (counter->state) { | ||
1084 | case PERF_COUNTER_STATE_ACTIVE: | ||
1085 | __perf_counter_read(counter); | ||
1086 | break; | ||
1087 | |||
1088 | case PERF_COUNTER_STATE_INACTIVE: | ||
1089 | update_counter_times(counter); | ||
1090 | break; | ||
1091 | |||
1092 | default: | ||
1093 | break; | ||
1094 | } | ||
1095 | |||
1096 | /* | ||
1097 | * In order to keep per-task stats reliable we need to flip the counter | ||
1098 | * values when we flip the contexts. | ||
1099 | */ | ||
1100 | value = atomic64_read(&next_counter->count); | ||
1101 | value = atomic64_xchg(&counter->count, value); | ||
1102 | atomic64_set(&next_counter->count, value); | ||
1103 | |||
1104 | swap(counter->total_time_enabled, next_counter->total_time_enabled); | ||
1105 | swap(counter->total_time_running, next_counter->total_time_running); | ||
1106 | |||
1107 | /* | ||
1108 | * Since we swizzled the values, update the user visible data too. | ||
1109 | */ | ||
1110 | perf_counter_update_userpage(counter); | ||
1111 | perf_counter_update_userpage(next_counter); | ||
1112 | } | ||
1113 | |||
1114 | #define list_next_entry(pos, member) \ | ||
1115 | list_entry(pos->member.next, typeof(*pos), member) | ||
1116 | |||
1117 | static void perf_counter_sync_stat(struct perf_counter_context *ctx, | ||
1118 | struct perf_counter_context *next_ctx) | ||
1119 | { | ||
1120 | struct perf_counter *counter, *next_counter; | ||
1121 | |||
1122 | if (!ctx->nr_stat) | ||
1123 | return; | ||
1124 | |||
1125 | counter = list_first_entry(&ctx->event_list, | ||
1126 | struct perf_counter, event_entry); | ||
1127 | |||
1128 | next_counter = list_first_entry(&next_ctx->event_list, | ||
1129 | struct perf_counter, event_entry); | ||
1130 | |||
1131 | while (&counter->event_entry != &ctx->event_list && | ||
1132 | &next_counter->event_entry != &next_ctx->event_list) { | ||
1133 | |||
1134 | __perf_counter_sync_stat(counter, next_counter); | ||
1135 | |||
1136 | counter = list_next_entry(counter, event_entry); | ||
1137 | next_counter = list_next_entry(next_counter, event_entry); | ||
1138 | } | ||
1139 | } | ||
1140 | |||
1141 | /* | ||
1142 | * Called from scheduler to remove the counters of the current task, | ||
1143 | * with interrupts disabled. | ||
1144 | * | ||
1145 | * We stop each counter and update the counter value in counter->count. | ||
1146 | * | ||
1147 | * This does not protect us against NMI, but disable() | ||
1148 | * sets the disabled bit in the control field of counter _before_ | ||
1149 | * accessing the counter control register. If a NMI hits, then it will | ||
1150 | * not restart the counter. | ||
1151 | */ | ||
1152 | void perf_counter_task_sched_out(struct task_struct *task, | ||
1153 | struct task_struct *next, int cpu) | ||
1154 | { | ||
1155 | struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); | ||
1156 | struct perf_counter_context *ctx = task->perf_counter_ctxp; | ||
1157 | struct perf_counter_context *next_ctx; | ||
1158 | struct perf_counter_context *parent; | ||
1159 | struct pt_regs *regs; | ||
1160 | int do_switch = 1; | ||
1161 | |||
1162 | regs = task_pt_regs(task); | ||
1163 | perf_swcounter_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, regs, 0); | ||
1164 | |||
1165 | if (likely(!ctx || !cpuctx->task_ctx)) | ||
1166 | return; | ||
1167 | |||
1168 | update_context_time(ctx); | ||
1169 | |||
1170 | rcu_read_lock(); | ||
1171 | parent = rcu_dereference(ctx->parent_ctx); | ||
1172 | next_ctx = next->perf_counter_ctxp; | ||
1173 | if (parent && next_ctx && | ||
1174 | rcu_dereference(next_ctx->parent_ctx) == parent) { | ||
1175 | /* | ||
1176 | * Looks like the two contexts are clones, so we might be | ||
1177 | * able to optimize the context switch. We lock both | ||
1178 | * contexts and check that they are clones under the | ||
1179 | * lock (including re-checking that neither has been | ||
1180 | * uncloned in the meantime). It doesn't matter which | ||
1181 | * order we take the locks because no other cpu could | ||
1182 | * be trying to lock both of these tasks. | ||
1183 | */ | ||
1184 | spin_lock(&ctx->lock); | ||
1185 | spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING); | ||
1186 | if (context_equiv(ctx, next_ctx)) { | ||
1187 | /* | ||
1188 | * XXX do we need a memory barrier of sorts | ||
1189 | * wrt to rcu_dereference() of perf_counter_ctxp | ||
1190 | */ | ||
1191 | task->perf_counter_ctxp = next_ctx; | ||
1192 | next->perf_counter_ctxp = ctx; | ||
1193 | ctx->task = next; | ||
1194 | next_ctx->task = task; | ||
1195 | do_switch = 0; | ||
1196 | |||
1197 | perf_counter_sync_stat(ctx, next_ctx); | ||
1198 | } | ||
1199 | spin_unlock(&next_ctx->lock); | ||
1200 | spin_unlock(&ctx->lock); | ||
1201 | } | ||
1202 | rcu_read_unlock(); | ||
1203 | |||
1204 | if (do_switch) { | ||
1205 | __perf_counter_sched_out(ctx, cpuctx); | ||
1206 | cpuctx->task_ctx = NULL; | ||
1207 | } | ||
1208 | } | ||
1209 | |||
1210 | /* | ||
1211 | * Called with IRQs disabled | ||
1212 | */ | ||
1213 | static void __perf_counter_task_sched_out(struct perf_counter_context *ctx) | ||
1214 | { | ||
1215 | struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); | ||
1216 | |||
1217 | if (!cpuctx->task_ctx) | ||
1218 | return; | ||
1219 | |||
1220 | if (WARN_ON_ONCE(ctx != cpuctx->task_ctx)) | ||
1221 | return; | ||
1222 | |||
1223 | __perf_counter_sched_out(ctx, cpuctx); | ||
1224 | cpuctx->task_ctx = NULL; | ||
1225 | } | ||
1226 | |||
1227 | /* | ||
1228 | * Called with IRQs disabled | ||
1229 | */ | ||
1230 | static void perf_counter_cpu_sched_out(struct perf_cpu_context *cpuctx) | ||
1231 | { | ||
1232 | __perf_counter_sched_out(&cpuctx->ctx, cpuctx); | ||
1233 | } | ||
1234 | |||
1235 | static void | ||
1236 | __perf_counter_sched_in(struct perf_counter_context *ctx, | ||
1237 | struct perf_cpu_context *cpuctx, int cpu) | ||
1238 | { | ||
1239 | struct perf_counter *counter; | ||
1240 | int can_add_hw = 1; | ||
1241 | |||
1242 | spin_lock(&ctx->lock); | ||
1243 | ctx->is_active = 1; | ||
1244 | if (likely(!ctx->nr_counters)) | ||
1245 | goto out; | ||
1246 | |||
1247 | ctx->timestamp = perf_clock(); | ||
1248 | |||
1249 | perf_disable(); | ||
1250 | |||
1251 | /* | ||
1252 | * First go through the list and put on any pinned groups | ||
1253 | * in order to give them the best chance of going on. | ||
1254 | */ | ||
1255 | list_for_each_entry(counter, &ctx->counter_list, list_entry) { | ||
1256 | if (counter->state <= PERF_COUNTER_STATE_OFF || | ||
1257 | !counter->attr.pinned) | ||
1258 | continue; | ||
1259 | if (counter->cpu != -1 && counter->cpu != cpu) | ||
1260 | continue; | ||
1261 | |||
1262 | if (counter != counter->group_leader) | ||
1263 | counter_sched_in(counter, cpuctx, ctx, cpu); | ||
1264 | else { | ||
1265 | if (group_can_go_on(counter, cpuctx, 1)) | ||
1266 | group_sched_in(counter, cpuctx, ctx, cpu); | ||
1267 | } | ||
1268 | |||
1269 | /* | ||
1270 | * If this pinned group hasn't been scheduled, | ||
1271 | * put it in error state. | ||
1272 | */ | ||
1273 | if (counter->state == PERF_COUNTER_STATE_INACTIVE) { | ||
1274 | update_group_times(counter); | ||
1275 | counter->state = PERF_COUNTER_STATE_ERROR; | ||
1276 | } | ||
1277 | } | ||
1278 | |||
1279 | list_for_each_entry(counter, &ctx->counter_list, list_entry) { | ||
1280 | /* | ||
1281 | * Ignore counters in OFF or ERROR state, and | ||
1282 | * ignore pinned counters since we did them already. | ||
1283 | */ | ||
1284 | if (counter->state <= PERF_COUNTER_STATE_OFF || | ||
1285 | counter->attr.pinned) | ||
1286 | continue; | ||
1287 | |||
1288 | /* | ||
1289 | * Listen to the 'cpu' scheduling filter constraint | ||
1290 | * of counters: | ||
1291 | */ | ||
1292 | if (counter->cpu != -1 && counter->cpu != cpu) | ||
1293 | continue; | ||
1294 | |||
1295 | if (counter != counter->group_leader) { | ||
1296 | if (counter_sched_in(counter, cpuctx, ctx, cpu)) | ||
1297 | can_add_hw = 0; | ||
1298 | } else { | ||
1299 | if (group_can_go_on(counter, cpuctx, can_add_hw)) { | ||
1300 | if (group_sched_in(counter, cpuctx, ctx, cpu)) | ||
1301 | can_add_hw = 0; | ||
1302 | } | ||
1303 | } | ||
1304 | } | ||
1305 | perf_enable(); | ||
1306 | out: | ||
1307 | spin_unlock(&ctx->lock); | ||
1308 | } | ||
1309 | |||
1310 | /* | ||
1311 | * Called from scheduler to add the counters of the current task | ||
1312 | * with interrupts disabled. | ||
1313 | * | ||
1314 | * We restore the counter value and then enable it. | ||
1315 | * | ||
1316 | * This does not protect us against NMI, but enable() | ||
1317 | * sets the enabled bit in the control field of counter _before_ | ||
1318 | * accessing the counter control register. If a NMI hits, then it will | ||
1319 | * keep the counter running. | ||
1320 | */ | ||
1321 | void perf_counter_task_sched_in(struct task_struct *task, int cpu) | ||
1322 | { | ||
1323 | struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); | ||
1324 | struct perf_counter_context *ctx = task->perf_counter_ctxp; | ||
1325 | |||
1326 | if (likely(!ctx)) | ||
1327 | return; | ||
1328 | if (cpuctx->task_ctx == ctx) | ||
1329 | return; | ||
1330 | __perf_counter_sched_in(ctx, cpuctx, cpu); | ||
1331 | cpuctx->task_ctx = ctx; | ||
1332 | } | ||
1333 | |||
1334 | static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu) | ||
1335 | { | ||
1336 | struct perf_counter_context *ctx = &cpuctx->ctx; | ||
1337 | |||
1338 | __perf_counter_sched_in(ctx, cpuctx, cpu); | ||
1339 | } | ||
1340 | |||
1341 | #define MAX_INTERRUPTS (~0ULL) | ||
1342 | |||
1343 | static void perf_log_throttle(struct perf_counter *counter, int enable); | ||
1344 | |||
1345 | static void perf_adjust_period(struct perf_counter *counter, u64 events) | ||
1346 | { | ||
1347 | struct hw_perf_counter *hwc = &counter->hw; | ||
1348 | u64 period, sample_period; | ||
1349 | s64 delta; | ||
1350 | |||
1351 | events *= hwc->sample_period; | ||
1352 | period = div64_u64(events, counter->attr.sample_freq); | ||
1353 | |||
1354 | delta = (s64)(period - hwc->sample_period); | ||
1355 | delta = (delta + 7) / 8; /* low pass filter */ | ||
1356 | |||
1357 | sample_period = hwc->sample_period + delta; | ||
1358 | |||
1359 | if (!sample_period) | ||
1360 | sample_period = 1; | ||
1361 | |||
1362 | hwc->sample_period = sample_period; | ||
1363 | } | ||
1364 | |||
1365 | static void perf_ctx_adjust_freq(struct perf_counter_context *ctx) | ||
1366 | { | ||
1367 | struct perf_counter *counter; | ||
1368 | struct hw_perf_counter *hwc; | ||
1369 | u64 interrupts, freq; | ||
1370 | |||
1371 | spin_lock(&ctx->lock); | ||
1372 | list_for_each_entry(counter, &ctx->counter_list, list_entry) { | ||
1373 | if (counter->state != PERF_COUNTER_STATE_ACTIVE) | ||
1374 | continue; | ||
1375 | |||
1376 | hwc = &counter->hw; | ||
1377 | |||
1378 | interrupts = hwc->interrupts; | ||
1379 | hwc->interrupts = 0; | ||
1380 | |||
1381 | /* | ||
1382 | * unthrottle counters on the tick | ||
1383 | */ | ||
1384 | if (interrupts == MAX_INTERRUPTS) { | ||
1385 | perf_log_throttle(counter, 1); | ||
1386 | counter->pmu->unthrottle(counter); | ||
1387 | interrupts = 2*sysctl_perf_counter_sample_rate/HZ; | ||
1388 | } | ||
1389 | |||
1390 | if (!counter->attr.freq || !counter->attr.sample_freq) | ||
1391 | continue; | ||
1392 | |||
1393 | /* | ||
1394 | * if the specified freq < HZ then we need to skip ticks | ||
1395 | */ | ||
1396 | if (counter->attr.sample_freq < HZ) { | ||
1397 | freq = counter->attr.sample_freq; | ||
1398 | |||
1399 | hwc->freq_count += freq; | ||
1400 | hwc->freq_interrupts += interrupts; | ||
1401 | |||
1402 | if (hwc->freq_count < HZ) | ||
1403 | continue; | ||
1404 | |||
1405 | interrupts = hwc->freq_interrupts; | ||
1406 | hwc->freq_interrupts = 0; | ||
1407 | hwc->freq_count -= HZ; | ||
1408 | } else | ||
1409 | freq = HZ; | ||
1410 | |||
1411 | perf_adjust_period(counter, freq * interrupts); | ||
1412 | |||
1413 | /* | ||
1414 | * In order to avoid being stalled by an (accidental) huge | ||
1415 | * sample period, force reset the sample period if we didn't | ||
1416 | * get any events in this freq period. | ||
1417 | */ | ||
1418 | if (!interrupts) { | ||
1419 | perf_disable(); | ||
1420 | counter->pmu->disable(counter); | ||
1421 | atomic64_set(&hwc->period_left, 0); | ||
1422 | counter->pmu->enable(counter); | ||
1423 | perf_enable(); | ||
1424 | } | ||
1425 | } | ||
1426 | spin_unlock(&ctx->lock); | ||
1427 | } | ||
1428 | |||
1429 | /* | ||
1430 | * Round-robin a context's counters: | ||
1431 | */ | ||
1432 | static void rotate_ctx(struct perf_counter_context *ctx) | ||
1433 | { | ||
1434 | struct perf_counter *counter; | ||
1435 | |||
1436 | if (!ctx->nr_counters) | ||
1437 | return; | ||
1438 | |||
1439 | spin_lock(&ctx->lock); | ||
1440 | /* | ||
1441 | * Rotate the first entry last (works just fine for group counters too): | ||
1442 | */ | ||
1443 | perf_disable(); | ||
1444 | list_for_each_entry(counter, &ctx->counter_list, list_entry) { | ||
1445 | list_move_tail(&counter->list_entry, &ctx->counter_list); | ||
1446 | break; | ||
1447 | } | ||
1448 | perf_enable(); | ||
1449 | |||
1450 | spin_unlock(&ctx->lock); | ||
1451 | } | ||
1452 | |||
1453 | void perf_counter_task_tick(struct task_struct *curr, int cpu) | ||
1454 | { | ||
1455 | struct perf_cpu_context *cpuctx; | ||
1456 | struct perf_counter_context *ctx; | ||
1457 | |||
1458 | if (!atomic_read(&nr_counters)) | ||
1459 | return; | ||
1460 | |||
1461 | cpuctx = &per_cpu(perf_cpu_context, cpu); | ||
1462 | ctx = curr->perf_counter_ctxp; | ||
1463 | |||
1464 | perf_ctx_adjust_freq(&cpuctx->ctx); | ||
1465 | if (ctx) | ||
1466 | perf_ctx_adjust_freq(ctx); | ||
1467 | |||
1468 | perf_counter_cpu_sched_out(cpuctx); | ||
1469 | if (ctx) | ||
1470 | __perf_counter_task_sched_out(ctx); | ||
1471 | |||
1472 | rotate_ctx(&cpuctx->ctx); | ||
1473 | if (ctx) | ||
1474 | rotate_ctx(ctx); | ||
1475 | |||
1476 | perf_counter_cpu_sched_in(cpuctx, cpu); | ||
1477 | if (ctx) | ||
1478 | perf_counter_task_sched_in(curr, cpu); | ||
1479 | } | ||
1480 | |||
1481 | /* | ||
1482 | * Enable all of a task's counters that have been marked enable-on-exec. | ||
1483 | * This expects task == current. | ||
1484 | */ | ||
1485 | static void perf_counter_enable_on_exec(struct task_struct *task) | ||
1486 | { | ||
1487 | struct perf_counter_context *ctx; | ||
1488 | struct perf_counter *counter; | ||
1489 | unsigned long flags; | ||
1490 | int enabled = 0; | ||
1491 | |||
1492 | local_irq_save(flags); | ||
1493 | ctx = task->perf_counter_ctxp; | ||
1494 | if (!ctx || !ctx->nr_counters) | ||
1495 | goto out; | ||
1496 | |||
1497 | __perf_counter_task_sched_out(ctx); | ||
1498 | |||
1499 | spin_lock(&ctx->lock); | ||
1500 | |||
1501 | list_for_each_entry(counter, &ctx->counter_list, list_entry) { | ||
1502 | if (!counter->attr.enable_on_exec) | ||
1503 | continue; | ||
1504 | counter->attr.enable_on_exec = 0; | ||
1505 | if (counter->state >= PERF_COUNTER_STATE_INACTIVE) | ||
1506 | continue; | ||
1507 | __perf_counter_mark_enabled(counter, ctx); | ||
1508 | enabled = 1; | ||
1509 | } | ||
1510 | |||
1511 | /* | ||
1512 | * Unclone this context if we enabled any counter. | ||
1513 | */ | ||
1514 | if (enabled) | ||
1515 | unclone_ctx(ctx); | ||
1516 | |||
1517 | spin_unlock(&ctx->lock); | ||
1518 | |||
1519 | perf_counter_task_sched_in(task, smp_processor_id()); | ||
1520 | out: | ||
1521 | local_irq_restore(flags); | ||
1522 | } | ||
1523 | |||
1524 | /* | ||
1525 | * Cross CPU call to read the hardware counter | ||
1526 | */ | ||
1527 | static void __perf_counter_read(void *info) | ||
1528 | { | ||
1529 | struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); | ||
1530 | struct perf_counter *counter = info; | ||
1531 | struct perf_counter_context *ctx = counter->ctx; | ||
1532 | unsigned long flags; | ||
1533 | |||
1534 | /* | ||
1535 | * If this is a task context, we need to check whether it is | ||
1536 | * the current task context of this cpu. If not it has been | ||
1537 | * scheduled out before the smp call arrived. In that case | ||
1538 | * counter->count would have been updated to a recent sample | ||
1539 | * when the counter was scheduled out. | ||
1540 | */ | ||
1541 | if (ctx->task && cpuctx->task_ctx != ctx) | ||
1542 | return; | ||
1543 | |||
1544 | local_irq_save(flags); | ||
1545 | if (ctx->is_active) | ||
1546 | update_context_time(ctx); | ||
1547 | counter->pmu->read(counter); | ||
1548 | update_counter_times(counter); | ||
1549 | local_irq_restore(flags); | ||
1550 | } | ||
1551 | |||
1552 | static u64 perf_counter_read(struct perf_counter *counter) | ||
1553 | { | ||
1554 | /* | ||
1555 | * If counter is enabled and currently active on a CPU, update the | ||
1556 | * value in the counter structure: | ||
1557 | */ | ||
1558 | if (counter->state == PERF_COUNTER_STATE_ACTIVE) { | ||
1559 | smp_call_function_single(counter->oncpu, | ||
1560 | __perf_counter_read, counter, 1); | ||
1561 | } else if (counter->state == PERF_COUNTER_STATE_INACTIVE) { | ||
1562 | update_counter_times(counter); | ||
1563 | } | ||
1564 | |||
1565 | return atomic64_read(&counter->count); | ||
1566 | } | ||
1567 | |||
1568 | /* | ||
1569 | * Initialize the perf_counter context in a task_struct: | ||
1570 | */ | ||
1571 | static void | ||
1572 | __perf_counter_init_context(struct perf_counter_context *ctx, | ||
1573 | struct task_struct *task) | ||
1574 | { | ||
1575 | memset(ctx, 0, sizeof(*ctx)); | ||
1576 | spin_lock_init(&ctx->lock); | ||
1577 | mutex_init(&ctx->mutex); | ||
1578 | INIT_LIST_HEAD(&ctx->counter_list); | ||
1579 | INIT_LIST_HEAD(&ctx->event_list); | ||
1580 | atomic_set(&ctx->refcount, 1); | ||
1581 | ctx->task = task; | ||
1582 | } | ||
1583 | |||
1584 | static struct perf_counter_context *find_get_context(pid_t pid, int cpu) | ||
1585 | { | ||
1586 | struct perf_counter_context *ctx; | ||
1587 | struct perf_cpu_context *cpuctx; | ||
1588 | struct task_struct *task; | ||
1589 | unsigned long flags; | ||
1590 | int err; | ||
1591 | |||
1592 | /* | ||
1593 | * If cpu is not a wildcard then this is a percpu counter: | ||
1594 | */ | ||
1595 | if (cpu != -1) { | ||
1596 | /* Must be root to operate on a CPU counter: */ | ||
1597 | if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN)) | ||
1598 | return ERR_PTR(-EACCES); | ||
1599 | |||
1600 | if (cpu < 0 || cpu > num_possible_cpus()) | ||
1601 | return ERR_PTR(-EINVAL); | ||
1602 | |||
1603 | /* | ||
1604 | * We could be clever and allow to attach a counter to an | ||
1605 | * offline CPU and activate it when the CPU comes up, but | ||
1606 | * that's for later. | ||
1607 | */ | ||
1608 | if (!cpu_isset(cpu, cpu_online_map)) | ||
1609 | return ERR_PTR(-ENODEV); | ||
1610 | |||
1611 | cpuctx = &per_cpu(perf_cpu_context, cpu); | ||
1612 | ctx = &cpuctx->ctx; | ||
1613 | get_ctx(ctx); | ||
1614 | |||
1615 | return ctx; | ||
1616 | } | ||
1617 | |||
1618 | rcu_read_lock(); | ||
1619 | if (!pid) | ||
1620 | task = current; | ||
1621 | else | ||
1622 | task = find_task_by_vpid(pid); | ||
1623 | if (task) | ||
1624 | get_task_struct(task); | ||
1625 | rcu_read_unlock(); | ||
1626 | |||
1627 | if (!task) | ||
1628 | return ERR_PTR(-ESRCH); | ||
1629 | |||
1630 | /* | ||
1631 | * Can't attach counters to a dying task. | ||
1632 | */ | ||
1633 | err = -ESRCH; | ||
1634 | if (task->flags & PF_EXITING) | ||
1635 | goto errout; | ||
1636 | |||
1637 | /* Reuse ptrace permission checks for now. */ | ||
1638 | err = -EACCES; | ||
1639 | if (!ptrace_may_access(task, PTRACE_MODE_READ)) | ||
1640 | goto errout; | ||
1641 | |||
1642 | retry: | ||
1643 | ctx = perf_lock_task_context(task, &flags); | ||
1644 | if (ctx) { | ||
1645 | unclone_ctx(ctx); | ||
1646 | spin_unlock_irqrestore(&ctx->lock, flags); | ||
1647 | } | ||
1648 | |||
1649 | if (!ctx) { | ||
1650 | ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL); | ||
1651 | err = -ENOMEM; | ||
1652 | if (!ctx) | ||
1653 | goto errout; | ||
1654 | __perf_counter_init_context(ctx, task); | ||
1655 | get_ctx(ctx); | ||
1656 | if (cmpxchg(&task->perf_counter_ctxp, NULL, ctx)) { | ||
1657 | /* | ||
1658 | * We raced with some other task; use | ||
1659 | * the context they set. | ||
1660 | */ | ||
1661 | kfree(ctx); | ||
1662 | goto retry; | ||
1663 | } | ||
1664 | get_task_struct(task); | ||
1665 | } | ||
1666 | |||
1667 | put_task_struct(task); | ||
1668 | return ctx; | ||
1669 | |||
1670 | errout: | ||
1671 | put_task_struct(task); | ||
1672 | return ERR_PTR(err); | ||
1673 | } | ||
1674 | |||
1675 | static void free_counter_rcu(struct rcu_head *head) | ||
1676 | { | ||
1677 | struct perf_counter *counter; | ||
1678 | |||
1679 | counter = container_of(head, struct perf_counter, rcu_head); | ||
1680 | if (counter->ns) | ||
1681 | put_pid_ns(counter->ns); | ||
1682 | kfree(counter); | ||
1683 | } | ||
1684 | |||
1685 | static void perf_pending_sync(struct perf_counter *counter); | ||
1686 | |||
1687 | static void free_counter(struct perf_counter *counter) | ||
1688 | { | ||
1689 | perf_pending_sync(counter); | ||
1690 | |||
1691 | if (!counter->parent) { | ||
1692 | atomic_dec(&nr_counters); | ||
1693 | if (counter->attr.mmap) | ||
1694 | atomic_dec(&nr_mmap_counters); | ||
1695 | if (counter->attr.comm) | ||
1696 | atomic_dec(&nr_comm_counters); | ||
1697 | if (counter->attr.task) | ||
1698 | atomic_dec(&nr_task_counters); | ||
1699 | } | ||
1700 | |||
1701 | if (counter->output) { | ||
1702 | fput(counter->output->filp); | ||
1703 | counter->output = NULL; | ||
1704 | } | ||
1705 | |||
1706 | if (counter->destroy) | ||
1707 | counter->destroy(counter); | ||
1708 | |||
1709 | put_ctx(counter->ctx); | ||
1710 | call_rcu(&counter->rcu_head, free_counter_rcu); | ||
1711 | } | ||
1712 | |||
1713 | /* | ||
1714 | * Called when the last reference to the file is gone. | ||
1715 | */ | ||
1716 | static int perf_release(struct inode *inode, struct file *file) | ||
1717 | { | ||
1718 | struct perf_counter *counter = file->private_data; | ||
1719 | struct perf_counter_context *ctx = counter->ctx; | ||
1720 | |||
1721 | file->private_data = NULL; | ||
1722 | |||
1723 | WARN_ON_ONCE(ctx->parent_ctx); | ||
1724 | mutex_lock(&ctx->mutex); | ||
1725 | perf_counter_remove_from_context(counter); | ||
1726 | mutex_unlock(&ctx->mutex); | ||
1727 | |||
1728 | mutex_lock(&counter->owner->perf_counter_mutex); | ||
1729 | list_del_init(&counter->owner_entry); | ||
1730 | mutex_unlock(&counter->owner->perf_counter_mutex); | ||
1731 | put_task_struct(counter->owner); | ||
1732 | |||
1733 | free_counter(counter); | ||
1734 | |||
1735 | return 0; | ||
1736 | } | ||
1737 | |||
1738 | static int perf_counter_read_size(struct perf_counter *counter) | ||
1739 | { | ||
1740 | int entry = sizeof(u64); /* value */ | ||
1741 | int size = 0; | ||
1742 | int nr = 1; | ||
1743 | |||
1744 | if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) | ||
1745 | size += sizeof(u64); | ||
1746 | |||
1747 | if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) | ||
1748 | size += sizeof(u64); | ||
1749 | |||
1750 | if (counter->attr.read_format & PERF_FORMAT_ID) | ||
1751 | entry += sizeof(u64); | ||
1752 | |||
1753 | if (counter->attr.read_format & PERF_FORMAT_GROUP) { | ||
1754 | nr += counter->group_leader->nr_siblings; | ||
1755 | size += sizeof(u64); | ||
1756 | } | ||
1757 | |||
1758 | size += entry * nr; | ||
1759 | |||
1760 | return size; | ||
1761 | } | ||
1762 | |||
1763 | static u64 perf_counter_read_value(struct perf_counter *counter) | ||
1764 | { | ||
1765 | struct perf_counter *child; | ||
1766 | u64 total = 0; | ||
1767 | |||
1768 | total += perf_counter_read(counter); | ||
1769 | list_for_each_entry(child, &counter->child_list, child_list) | ||
1770 | total += perf_counter_read(child); | ||
1771 | |||
1772 | return total; | ||
1773 | } | ||
1774 | |||
1775 | static int perf_counter_read_entry(struct perf_counter *counter, | ||
1776 | u64 read_format, char __user *buf) | ||
1777 | { | ||
1778 | int n = 0, count = 0; | ||
1779 | u64 values[2]; | ||
1780 | |||
1781 | values[n++] = perf_counter_read_value(counter); | ||
1782 | if (read_format & PERF_FORMAT_ID) | ||
1783 | values[n++] = primary_counter_id(counter); | ||
1784 | |||
1785 | count = n * sizeof(u64); | ||
1786 | |||
1787 | if (copy_to_user(buf, values, count)) | ||
1788 | return -EFAULT; | ||
1789 | |||
1790 | return count; | ||
1791 | } | ||
1792 | |||
1793 | static int perf_counter_read_group(struct perf_counter *counter, | ||
1794 | u64 read_format, char __user *buf) | ||
1795 | { | ||
1796 | struct perf_counter *leader = counter->group_leader, *sub; | ||
1797 | int n = 0, size = 0, err = -EFAULT; | ||
1798 | u64 values[3]; | ||
1799 | |||
1800 | values[n++] = 1 + leader->nr_siblings; | ||
1801 | if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { | ||
1802 | values[n++] = leader->total_time_enabled + | ||
1803 | atomic64_read(&leader->child_total_time_enabled); | ||
1804 | } | ||
1805 | if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { | ||
1806 | values[n++] = leader->total_time_running + | ||
1807 | atomic64_read(&leader->child_total_time_running); | ||
1808 | } | ||
1809 | |||
1810 | size = n * sizeof(u64); | ||
1811 | |||
1812 | if (copy_to_user(buf, values, size)) | ||
1813 | return -EFAULT; | ||
1814 | |||
1815 | err = perf_counter_read_entry(leader, read_format, buf + size); | ||
1816 | if (err < 0) | ||
1817 | return err; | ||
1818 | |||
1819 | size += err; | ||
1820 | |||
1821 | list_for_each_entry(sub, &leader->sibling_list, list_entry) { | ||
1822 | err = perf_counter_read_entry(sub, read_format, | ||
1823 | buf + size); | ||
1824 | if (err < 0) | ||
1825 | return err; | ||
1826 | |||
1827 | size += err; | ||
1828 | } | ||
1829 | |||
1830 | return size; | ||
1831 | } | ||
1832 | |||
1833 | static int perf_counter_read_one(struct perf_counter *counter, | ||
1834 | u64 read_format, char __user *buf) | ||
1835 | { | ||
1836 | u64 values[4]; | ||
1837 | int n = 0; | ||
1838 | |||
1839 | values[n++] = perf_counter_read_value(counter); | ||
1840 | if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { | ||
1841 | values[n++] = counter->total_time_enabled + | ||
1842 | atomic64_read(&counter->child_total_time_enabled); | ||
1843 | } | ||
1844 | if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { | ||
1845 | values[n++] = counter->total_time_running + | ||
1846 | atomic64_read(&counter->child_total_time_running); | ||
1847 | } | ||
1848 | if (read_format & PERF_FORMAT_ID) | ||
1849 | values[n++] = primary_counter_id(counter); | ||
1850 | |||
1851 | if (copy_to_user(buf, values, n * sizeof(u64))) | ||
1852 | return -EFAULT; | ||
1853 | |||
1854 | return n * sizeof(u64); | ||
1855 | } | ||
1856 | |||
1857 | /* | ||
1858 | * Read the performance counter - simple non blocking version for now | ||
1859 | */ | ||
1860 | static ssize_t | ||
1861 | perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count) | ||
1862 | { | ||
1863 | u64 read_format = counter->attr.read_format; | ||
1864 | int ret; | ||
1865 | |||
1866 | /* | ||
1867 | * Return end-of-file for a read on a counter that is in | ||
1868 | * error state (i.e. because it was pinned but it couldn't be | ||
1869 | * scheduled on to the CPU at some point). | ||
1870 | */ | ||
1871 | if (counter->state == PERF_COUNTER_STATE_ERROR) | ||
1872 | return 0; | ||
1873 | |||
1874 | if (count < perf_counter_read_size(counter)) | ||
1875 | return -ENOSPC; | ||
1876 | |||
1877 | WARN_ON_ONCE(counter->ctx->parent_ctx); | ||
1878 | mutex_lock(&counter->child_mutex); | ||
1879 | if (read_format & PERF_FORMAT_GROUP) | ||
1880 | ret = perf_counter_read_group(counter, read_format, buf); | ||
1881 | else | ||
1882 | ret = perf_counter_read_one(counter, read_format, buf); | ||
1883 | mutex_unlock(&counter->child_mutex); | ||
1884 | |||
1885 | return ret; | ||
1886 | } | ||
1887 | |||
1888 | static ssize_t | ||
1889 | perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) | ||
1890 | { | ||
1891 | struct perf_counter *counter = file->private_data; | ||
1892 | |||
1893 | return perf_read_hw(counter, buf, count); | ||
1894 | } | ||
1895 | |||
1896 | static unsigned int perf_poll(struct file *file, poll_table *wait) | ||
1897 | { | ||
1898 | struct perf_counter *counter = file->private_data; | ||
1899 | struct perf_mmap_data *data; | ||
1900 | unsigned int events = POLL_HUP; | ||
1901 | |||
1902 | rcu_read_lock(); | ||
1903 | data = rcu_dereference(counter->data); | ||
1904 | if (data) | ||
1905 | events = atomic_xchg(&data->poll, 0); | ||
1906 | rcu_read_unlock(); | ||
1907 | |||
1908 | poll_wait(file, &counter->waitq, wait); | ||
1909 | |||
1910 | return events; | ||
1911 | } | ||
1912 | |||
1913 | static void perf_counter_reset(struct perf_counter *counter) | ||
1914 | { | ||
1915 | (void)perf_counter_read(counter); | ||
1916 | atomic64_set(&counter->count, 0); | ||
1917 | perf_counter_update_userpage(counter); | ||
1918 | } | ||
1919 | |||
1920 | /* | ||
1921 | * Holding the top-level counter's child_mutex means that any | ||
1922 | * descendant process that has inherited this counter will block | ||
1923 | * in sync_child_counter if it goes to exit, thus satisfying the | ||
1924 | * task existence requirements of perf_counter_enable/disable. | ||
1925 | */ | ||
1926 | static void perf_counter_for_each_child(struct perf_counter *counter, | ||
1927 | void (*func)(struct perf_counter *)) | ||
1928 | { | ||
1929 | struct perf_counter *child; | ||
1930 | |||
1931 | WARN_ON_ONCE(counter->ctx->parent_ctx); | ||
1932 | mutex_lock(&counter->child_mutex); | ||
1933 | func(counter); | ||
1934 | list_for_each_entry(child, &counter->child_list, child_list) | ||
1935 | func(child); | ||
1936 | mutex_unlock(&counter->child_mutex); | ||
1937 | } | ||
1938 | |||
1939 | static void perf_counter_for_each(struct perf_counter *counter, | ||
1940 | void (*func)(struct perf_counter *)) | ||
1941 | { | ||
1942 | struct perf_counter_context *ctx = counter->ctx; | ||
1943 | struct perf_counter *sibling; | ||
1944 | |||
1945 | WARN_ON_ONCE(ctx->parent_ctx); | ||
1946 | mutex_lock(&ctx->mutex); | ||
1947 | counter = counter->group_leader; | ||
1948 | |||
1949 | perf_counter_for_each_child(counter, func); | ||
1950 | func(counter); | ||
1951 | list_for_each_entry(sibling, &counter->sibling_list, list_entry) | ||
1952 | perf_counter_for_each_child(counter, func); | ||
1953 | mutex_unlock(&ctx->mutex); | ||
1954 | } | ||
1955 | |||
1956 | static int perf_counter_period(struct perf_counter *counter, u64 __user *arg) | ||
1957 | { | ||
1958 | struct perf_counter_context *ctx = counter->ctx; | ||
1959 | unsigned long size; | ||
1960 | int ret = 0; | ||
1961 | u64 value; | ||
1962 | |||
1963 | if (!counter->attr.sample_period) | ||
1964 | return -EINVAL; | ||
1965 | |||
1966 | size = copy_from_user(&value, arg, sizeof(value)); | ||
1967 | if (size != sizeof(value)) | ||
1968 | return -EFAULT; | ||
1969 | |||
1970 | if (!value) | ||
1971 | return -EINVAL; | ||
1972 | |||
1973 | spin_lock_irq(&ctx->lock); | ||
1974 | if (counter->attr.freq) { | ||
1975 | if (value > sysctl_perf_counter_sample_rate) { | ||
1976 | ret = -EINVAL; | ||
1977 | goto unlock; | ||
1978 | } | ||
1979 | |||
1980 | counter->attr.sample_freq = value; | ||
1981 | } else { | ||
1982 | counter->attr.sample_period = value; | ||
1983 | counter->hw.sample_period = value; | ||
1984 | } | ||
1985 | unlock: | ||
1986 | spin_unlock_irq(&ctx->lock); | ||
1987 | |||
1988 | return ret; | ||
1989 | } | ||
1990 | |||
1991 | int perf_counter_set_output(struct perf_counter *counter, int output_fd); | ||
1992 | |||
1993 | static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | ||
1994 | { | ||
1995 | struct perf_counter *counter = file->private_data; | ||
1996 | void (*func)(struct perf_counter *); | ||
1997 | u32 flags = arg; | ||
1998 | |||
1999 | switch (cmd) { | ||
2000 | case PERF_COUNTER_IOC_ENABLE: | ||
2001 | func = perf_counter_enable; | ||
2002 | break; | ||
2003 | case PERF_COUNTER_IOC_DISABLE: | ||
2004 | func = perf_counter_disable; | ||
2005 | break; | ||
2006 | case PERF_COUNTER_IOC_RESET: | ||
2007 | func = perf_counter_reset; | ||
2008 | break; | ||
2009 | |||
2010 | case PERF_COUNTER_IOC_REFRESH: | ||
2011 | return perf_counter_refresh(counter, arg); | ||
2012 | |||
2013 | case PERF_COUNTER_IOC_PERIOD: | ||
2014 | return perf_counter_period(counter, (u64 __user *)arg); | ||
2015 | |||
2016 | case PERF_COUNTER_IOC_SET_OUTPUT: | ||
2017 | return perf_counter_set_output(counter, arg); | ||
2018 | |||
2019 | default: | ||
2020 | return -ENOTTY; | ||
2021 | } | ||
2022 | |||
2023 | if (flags & PERF_IOC_FLAG_GROUP) | ||
2024 | perf_counter_for_each(counter, func); | ||
2025 | else | ||
2026 | perf_counter_for_each_child(counter, func); | ||
2027 | |||
2028 | return 0; | ||
2029 | } | ||
2030 | |||
2031 | int perf_counter_task_enable(void) | ||
2032 | { | ||
2033 | struct perf_counter *counter; | ||
2034 | |||
2035 | mutex_lock(¤t->perf_counter_mutex); | ||
2036 | list_for_each_entry(counter, ¤t->perf_counter_list, owner_entry) | ||
2037 | perf_counter_for_each_child(counter, perf_counter_enable); | ||
2038 | mutex_unlock(¤t->perf_counter_mutex); | ||
2039 | |||
2040 | return 0; | ||
2041 | } | ||
2042 | |||
2043 | int perf_counter_task_disable(void) | ||
2044 | { | ||
2045 | struct perf_counter *counter; | ||
2046 | |||
2047 | mutex_lock(¤t->perf_counter_mutex); | ||
2048 | list_for_each_entry(counter, ¤t->perf_counter_list, owner_entry) | ||
2049 | perf_counter_for_each_child(counter, perf_counter_disable); | ||
2050 | mutex_unlock(¤t->perf_counter_mutex); | ||
2051 | |||
2052 | return 0; | ||
2053 | } | ||
2054 | |||
2055 | #ifndef PERF_COUNTER_INDEX_OFFSET | ||
2056 | # define PERF_COUNTER_INDEX_OFFSET 0 | ||
2057 | #endif | ||
2058 | |||
2059 | static int perf_counter_index(struct perf_counter *counter) | ||
2060 | { | ||
2061 | if (counter->state != PERF_COUNTER_STATE_ACTIVE) | ||
2062 | return 0; | ||
2063 | |||
2064 | return counter->hw.idx + 1 - PERF_COUNTER_INDEX_OFFSET; | ||
2065 | } | ||
2066 | |||
2067 | /* | ||
2068 | * Callers need to ensure there can be no nesting of this function, otherwise | ||
2069 | * the seqlock logic goes bad. We can not serialize this because the arch | ||
2070 | * code calls this from NMI context. | ||
2071 | */ | ||
2072 | void perf_counter_update_userpage(struct perf_counter *counter) | ||
2073 | { | ||
2074 | struct perf_counter_mmap_page *userpg; | ||
2075 | struct perf_mmap_data *data; | ||
2076 | |||
2077 | rcu_read_lock(); | ||
2078 | data = rcu_dereference(counter->data); | ||
2079 | if (!data) | ||
2080 | goto unlock; | ||
2081 | |||
2082 | userpg = data->user_page; | ||
2083 | |||
2084 | /* | ||
2085 | * Disable preemption so as to not let the corresponding user-space | ||
2086 | * spin too long if we get preempted. | ||
2087 | */ | ||
2088 | preempt_disable(); | ||
2089 | ++userpg->lock; | ||
2090 | barrier(); | ||
2091 | userpg->index = perf_counter_index(counter); | ||
2092 | userpg->offset = atomic64_read(&counter->count); | ||
2093 | if (counter->state == PERF_COUNTER_STATE_ACTIVE) | ||
2094 | userpg->offset -= atomic64_read(&counter->hw.prev_count); | ||
2095 | |||
2096 | userpg->time_enabled = counter->total_time_enabled + | ||
2097 | atomic64_read(&counter->child_total_time_enabled); | ||
2098 | |||
2099 | userpg->time_running = counter->total_time_running + | ||
2100 | atomic64_read(&counter->child_total_time_running); | ||
2101 | |||
2102 | barrier(); | ||
2103 | ++userpg->lock; | ||
2104 | preempt_enable(); | ||
2105 | unlock: | ||
2106 | rcu_read_unlock(); | ||
2107 | } | ||
2108 | |||
2109 | static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | ||
2110 | { | ||
2111 | struct perf_counter *counter = vma->vm_file->private_data; | ||
2112 | struct perf_mmap_data *data; | ||
2113 | int ret = VM_FAULT_SIGBUS; | ||
2114 | |||
2115 | if (vmf->flags & FAULT_FLAG_MKWRITE) { | ||
2116 | if (vmf->pgoff == 0) | ||
2117 | ret = 0; | ||
2118 | return ret; | ||
2119 | } | ||
2120 | |||
2121 | rcu_read_lock(); | ||
2122 | data = rcu_dereference(counter->data); | ||
2123 | if (!data) | ||
2124 | goto unlock; | ||
2125 | |||
2126 | if (vmf->pgoff == 0) { | ||
2127 | vmf->page = virt_to_page(data->user_page); | ||
2128 | } else { | ||
2129 | int nr = vmf->pgoff - 1; | ||
2130 | |||
2131 | if ((unsigned)nr > data->nr_pages) | ||
2132 | goto unlock; | ||
2133 | |||
2134 | if (vmf->flags & FAULT_FLAG_WRITE) | ||
2135 | goto unlock; | ||
2136 | |||
2137 | vmf->page = virt_to_page(data->data_pages[nr]); | ||
2138 | } | ||
2139 | |||
2140 | get_page(vmf->page); | ||
2141 | vmf->page->mapping = vma->vm_file->f_mapping; | ||
2142 | vmf->page->index = vmf->pgoff; | ||
2143 | |||
2144 | ret = 0; | ||
2145 | unlock: | ||
2146 | rcu_read_unlock(); | ||
2147 | |||
2148 | return ret; | ||
2149 | } | ||
2150 | |||
2151 | static int perf_mmap_data_alloc(struct perf_counter *counter, int nr_pages) | ||
2152 | { | ||
2153 | struct perf_mmap_data *data; | ||
2154 | unsigned long size; | ||
2155 | int i; | ||
2156 | |||
2157 | WARN_ON(atomic_read(&counter->mmap_count)); | ||
2158 | |||
2159 | size = sizeof(struct perf_mmap_data); | ||
2160 | size += nr_pages * sizeof(void *); | ||
2161 | |||
2162 | data = kzalloc(size, GFP_KERNEL); | ||
2163 | if (!data) | ||
2164 | goto fail; | ||
2165 | |||
2166 | data->user_page = (void *)get_zeroed_page(GFP_KERNEL); | ||
2167 | if (!data->user_page) | ||
2168 | goto fail_user_page; | ||
2169 | |||
2170 | for (i = 0; i < nr_pages; i++) { | ||
2171 | data->data_pages[i] = (void *)get_zeroed_page(GFP_KERNEL); | ||
2172 | if (!data->data_pages[i]) | ||
2173 | goto fail_data_pages; | ||
2174 | } | ||
2175 | |||
2176 | data->nr_pages = nr_pages; | ||
2177 | atomic_set(&data->lock, -1); | ||
2178 | |||
2179 | rcu_assign_pointer(counter->data, data); | ||
2180 | |||
2181 | return 0; | ||
2182 | |||
2183 | fail_data_pages: | ||
2184 | for (i--; i >= 0; i--) | ||
2185 | free_page((unsigned long)data->data_pages[i]); | ||
2186 | |||
2187 | free_page((unsigned long)data->user_page); | ||
2188 | |||
2189 | fail_user_page: | ||
2190 | kfree(data); | ||
2191 | |||
2192 | fail: | ||
2193 | return -ENOMEM; | ||
2194 | } | ||
2195 | |||
2196 | static void perf_mmap_free_page(unsigned long addr) | ||
2197 | { | ||
2198 | struct page *page = virt_to_page((void *)addr); | ||
2199 | |||
2200 | page->mapping = NULL; | ||
2201 | __free_page(page); | ||
2202 | } | ||
2203 | |||
2204 | static void __perf_mmap_data_free(struct rcu_head *rcu_head) | ||
2205 | { | ||
2206 | struct perf_mmap_data *data; | ||
2207 | int i; | ||
2208 | |||
2209 | data = container_of(rcu_head, struct perf_mmap_data, rcu_head); | ||
2210 | |||
2211 | perf_mmap_free_page((unsigned long)data->user_page); | ||
2212 | for (i = 0; i < data->nr_pages; i++) | ||
2213 | perf_mmap_free_page((unsigned long)data->data_pages[i]); | ||
2214 | |||
2215 | kfree(data); | ||
2216 | } | ||
2217 | |||
2218 | static void perf_mmap_data_free(struct perf_counter *counter) | ||
2219 | { | ||
2220 | struct perf_mmap_data *data = counter->data; | ||
2221 | |||
2222 | WARN_ON(atomic_read(&counter->mmap_count)); | ||
2223 | |||
2224 | rcu_assign_pointer(counter->data, NULL); | ||
2225 | call_rcu(&data->rcu_head, __perf_mmap_data_free); | ||
2226 | } | ||
2227 | |||
2228 | static void perf_mmap_open(struct vm_area_struct *vma) | ||
2229 | { | ||
2230 | struct perf_counter *counter = vma->vm_file->private_data; | ||
2231 | |||
2232 | atomic_inc(&counter->mmap_count); | ||
2233 | } | ||
2234 | |||
2235 | static void perf_mmap_close(struct vm_area_struct *vma) | ||
2236 | { | ||
2237 | struct perf_counter *counter = vma->vm_file->private_data; | ||
2238 | |||
2239 | WARN_ON_ONCE(counter->ctx->parent_ctx); | ||
2240 | if (atomic_dec_and_mutex_lock(&counter->mmap_count, &counter->mmap_mutex)) { | ||
2241 | struct user_struct *user = current_user(); | ||
2242 | |||
2243 | atomic_long_sub(counter->data->nr_pages + 1, &user->locked_vm); | ||
2244 | vma->vm_mm->locked_vm -= counter->data->nr_locked; | ||
2245 | perf_mmap_data_free(counter); | ||
2246 | mutex_unlock(&counter->mmap_mutex); | ||
2247 | } | ||
2248 | } | ||
2249 | |||
2250 | static struct vm_operations_struct perf_mmap_vmops = { | ||
2251 | .open = perf_mmap_open, | ||
2252 | .close = perf_mmap_close, | ||
2253 | .fault = perf_mmap_fault, | ||
2254 | .page_mkwrite = perf_mmap_fault, | ||
2255 | }; | ||
2256 | |||
2257 | static int perf_mmap(struct file *file, struct vm_area_struct *vma) | ||
2258 | { | ||
2259 | struct perf_counter *counter = file->private_data; | ||
2260 | unsigned long user_locked, user_lock_limit; | ||
2261 | struct user_struct *user = current_user(); | ||
2262 | unsigned long locked, lock_limit; | ||
2263 | unsigned long vma_size; | ||
2264 | unsigned long nr_pages; | ||
2265 | long user_extra, extra; | ||
2266 | int ret = 0; | ||
2267 | |||
2268 | if (!(vma->vm_flags & VM_SHARED)) | ||
2269 | return -EINVAL; | ||
2270 | |||
2271 | vma_size = vma->vm_end - vma->vm_start; | ||
2272 | nr_pages = (vma_size / PAGE_SIZE) - 1; | ||
2273 | |||
2274 | /* | ||
2275 | * If we have data pages ensure they're a power-of-two number, so we | ||
2276 | * can do bitmasks instead of modulo. | ||
2277 | */ | ||
2278 | if (nr_pages != 0 && !is_power_of_2(nr_pages)) | ||
2279 | return -EINVAL; | ||
2280 | |||
2281 | if (vma_size != PAGE_SIZE * (1 + nr_pages)) | ||
2282 | return -EINVAL; | ||
2283 | |||
2284 | if (vma->vm_pgoff != 0) | ||
2285 | return -EINVAL; | ||
2286 | |||
2287 | WARN_ON_ONCE(counter->ctx->parent_ctx); | ||
2288 | mutex_lock(&counter->mmap_mutex); | ||
2289 | if (counter->output) { | ||
2290 | ret = -EINVAL; | ||
2291 | goto unlock; | ||
2292 | } | ||
2293 | |||
2294 | if (atomic_inc_not_zero(&counter->mmap_count)) { | ||
2295 | if (nr_pages != counter->data->nr_pages) | ||
2296 | ret = -EINVAL; | ||
2297 | goto unlock; | ||
2298 | } | ||
2299 | |||
2300 | user_extra = nr_pages + 1; | ||
2301 | user_lock_limit = sysctl_perf_counter_mlock >> (PAGE_SHIFT - 10); | ||
2302 | |||
2303 | /* | ||
2304 | * Increase the limit linearly with more CPUs: | ||
2305 | */ | ||
2306 | user_lock_limit *= num_online_cpus(); | ||
2307 | |||
2308 | user_locked = atomic_long_read(&user->locked_vm) + user_extra; | ||
2309 | |||
2310 | extra = 0; | ||
2311 | if (user_locked > user_lock_limit) | ||
2312 | extra = user_locked - user_lock_limit; | ||
2313 | |||
2314 | lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur; | ||
2315 | lock_limit >>= PAGE_SHIFT; | ||
2316 | locked = vma->vm_mm->locked_vm + extra; | ||
2317 | |||
2318 | if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) { | ||
2319 | ret = -EPERM; | ||
2320 | goto unlock; | ||
2321 | } | ||
2322 | |||
2323 | WARN_ON(counter->data); | ||
2324 | ret = perf_mmap_data_alloc(counter, nr_pages); | ||
2325 | if (ret) | ||
2326 | goto unlock; | ||
2327 | |||
2328 | atomic_set(&counter->mmap_count, 1); | ||
2329 | atomic_long_add(user_extra, &user->locked_vm); | ||
2330 | vma->vm_mm->locked_vm += extra; | ||
2331 | counter->data->nr_locked = extra; | ||
2332 | if (vma->vm_flags & VM_WRITE) | ||
2333 | counter->data->writable = 1; | ||
2334 | |||
2335 | unlock: | ||
2336 | mutex_unlock(&counter->mmap_mutex); | ||
2337 | |||
2338 | vma->vm_flags |= VM_RESERVED; | ||
2339 | vma->vm_ops = &perf_mmap_vmops; | ||
2340 | |||
2341 | return ret; | ||
2342 | } | ||
2343 | |||
2344 | static int perf_fasync(int fd, struct file *filp, int on) | ||
2345 | { | ||
2346 | struct inode *inode = filp->f_path.dentry->d_inode; | ||
2347 | struct perf_counter *counter = filp->private_data; | ||
2348 | int retval; | ||
2349 | |||
2350 | mutex_lock(&inode->i_mutex); | ||
2351 | retval = fasync_helper(fd, filp, on, &counter->fasync); | ||
2352 | mutex_unlock(&inode->i_mutex); | ||
2353 | |||
2354 | if (retval < 0) | ||
2355 | return retval; | ||
2356 | |||
2357 | return 0; | ||
2358 | } | ||
2359 | |||
2360 | static const struct file_operations perf_fops = { | ||
2361 | .release = perf_release, | ||
2362 | .read = perf_read, | ||
2363 | .poll = perf_poll, | ||
2364 | .unlocked_ioctl = perf_ioctl, | ||
2365 | .compat_ioctl = perf_ioctl, | ||
2366 | .mmap = perf_mmap, | ||
2367 | .fasync = perf_fasync, | ||
2368 | }; | ||
2369 | |||
2370 | /* | ||
2371 | * Perf counter wakeup | ||
2372 | * | ||
2373 | * If there's data, ensure we set the poll() state and publish everything | ||
2374 | * to user-space before waking everybody up. | ||
2375 | */ | ||
2376 | |||
2377 | void perf_counter_wakeup(struct perf_counter *counter) | ||
2378 | { | ||
2379 | wake_up_all(&counter->waitq); | ||
2380 | |||
2381 | if (counter->pending_kill) { | ||
2382 | kill_fasync(&counter->fasync, SIGIO, counter->pending_kill); | ||
2383 | counter->pending_kill = 0; | ||
2384 | } | ||
2385 | } | ||
2386 | |||
2387 | /* | ||
2388 | * Pending wakeups | ||
2389 | * | ||
2390 | * Handle the case where we need to wakeup up from NMI (or rq->lock) context. | ||
2391 | * | ||
2392 | * The NMI bit means we cannot possibly take locks. Therefore, maintain a | ||
2393 | * single linked list and use cmpxchg() to add entries lockless. | ||
2394 | */ | ||
2395 | |||
2396 | static void perf_pending_counter(struct perf_pending_entry *entry) | ||
2397 | { | ||
2398 | struct perf_counter *counter = container_of(entry, | ||
2399 | struct perf_counter, pending); | ||
2400 | |||
2401 | if (counter->pending_disable) { | ||
2402 | counter->pending_disable = 0; | ||
2403 | __perf_counter_disable(counter); | ||
2404 | } | ||
2405 | |||
2406 | if (counter->pending_wakeup) { | ||
2407 | counter->pending_wakeup = 0; | ||
2408 | perf_counter_wakeup(counter); | ||
2409 | } | ||
2410 | } | ||
2411 | |||
2412 | #define PENDING_TAIL ((struct perf_pending_entry *)-1UL) | ||
2413 | |||
2414 | static DEFINE_PER_CPU(struct perf_pending_entry *, perf_pending_head) = { | ||
2415 | PENDING_TAIL, | ||
2416 | }; | ||
2417 | |||
2418 | static void perf_pending_queue(struct perf_pending_entry *entry, | ||
2419 | void (*func)(struct perf_pending_entry *)) | ||
2420 | { | ||
2421 | struct perf_pending_entry **head; | ||
2422 | |||
2423 | if (cmpxchg(&entry->next, NULL, PENDING_TAIL) != NULL) | ||
2424 | return; | ||
2425 | |||
2426 | entry->func = func; | ||
2427 | |||
2428 | head = &get_cpu_var(perf_pending_head); | ||
2429 | |||
2430 | do { | ||
2431 | entry->next = *head; | ||
2432 | } while (cmpxchg(head, entry->next, entry) != entry->next); | ||
2433 | |||
2434 | set_perf_counter_pending(); | ||
2435 | |||
2436 | put_cpu_var(perf_pending_head); | ||
2437 | } | ||
2438 | |||
2439 | static int __perf_pending_run(void) | ||
2440 | { | ||
2441 | struct perf_pending_entry *list; | ||
2442 | int nr = 0; | ||
2443 | |||
2444 | list = xchg(&__get_cpu_var(perf_pending_head), PENDING_TAIL); | ||
2445 | while (list != PENDING_TAIL) { | ||
2446 | void (*func)(struct perf_pending_entry *); | ||
2447 | struct perf_pending_entry *entry = list; | ||
2448 | |||
2449 | list = list->next; | ||
2450 | |||
2451 | func = entry->func; | ||
2452 | entry->next = NULL; | ||
2453 | /* | ||
2454 | * Ensure we observe the unqueue before we issue the wakeup, | ||
2455 | * so that we won't be waiting forever. | ||
2456 | * -- see perf_not_pending(). | ||
2457 | */ | ||
2458 | smp_wmb(); | ||
2459 | |||
2460 | func(entry); | ||
2461 | nr++; | ||
2462 | } | ||
2463 | |||
2464 | return nr; | ||
2465 | } | ||
2466 | |||
2467 | static inline int perf_not_pending(struct perf_counter *counter) | ||
2468 | { | ||
2469 | /* | ||
2470 | * If we flush on whatever cpu we run, there is a chance we don't | ||
2471 | * need to wait. | ||
2472 | */ | ||
2473 | get_cpu(); | ||
2474 | __perf_pending_run(); | ||
2475 | put_cpu(); | ||
2476 | |||
2477 | /* | ||
2478 | * Ensure we see the proper queue state before going to sleep | ||
2479 | * so that we do not miss the wakeup. -- see perf_pending_handle() | ||
2480 | */ | ||
2481 | smp_rmb(); | ||
2482 | return counter->pending.next == NULL; | ||
2483 | } | ||
2484 | |||
2485 | static void perf_pending_sync(struct perf_counter *counter) | ||
2486 | { | ||
2487 | wait_event(counter->waitq, perf_not_pending(counter)); | ||
2488 | } | ||
2489 | |||
2490 | void perf_counter_do_pending(void) | ||
2491 | { | ||
2492 | __perf_pending_run(); | ||
2493 | } | ||
2494 | |||
2495 | /* | ||
2496 | * Callchain support -- arch specific | ||
2497 | */ | ||
2498 | |||
2499 | __weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) | ||
2500 | { | ||
2501 | return NULL; | ||
2502 | } | ||
2503 | |||
2504 | /* | ||
2505 | * Output | ||
2506 | */ | ||
2507 | |||
2508 | struct perf_output_handle { | ||
2509 | struct perf_counter *counter; | ||
2510 | struct perf_mmap_data *data; | ||
2511 | unsigned long head; | ||
2512 | unsigned long offset; | ||
2513 | int nmi; | ||
2514 | int sample; | ||
2515 | int locked; | ||
2516 | unsigned long flags; | ||
2517 | }; | ||
2518 | |||
2519 | static bool perf_output_space(struct perf_mmap_data *data, | ||
2520 | unsigned int offset, unsigned int head) | ||
2521 | { | ||
2522 | unsigned long tail; | ||
2523 | unsigned long mask; | ||
2524 | |||
2525 | if (!data->writable) | ||
2526 | return true; | ||
2527 | |||
2528 | mask = (data->nr_pages << PAGE_SHIFT) - 1; | ||
2529 | /* | ||
2530 | * Userspace could choose to issue a mb() before updating the tail | ||
2531 | * pointer. So that all reads will be completed before the write is | ||
2532 | * issued. | ||
2533 | */ | ||
2534 | tail = ACCESS_ONCE(data->user_page->data_tail); | ||
2535 | smp_rmb(); | ||
2536 | |||
2537 | offset = (offset - tail) & mask; | ||
2538 | head = (head - tail) & mask; | ||
2539 | |||
2540 | if ((int)(head - offset) < 0) | ||
2541 | return false; | ||
2542 | |||
2543 | return true; | ||
2544 | } | ||
2545 | |||
2546 | static void perf_output_wakeup(struct perf_output_handle *handle) | ||
2547 | { | ||
2548 | atomic_set(&handle->data->poll, POLL_IN); | ||
2549 | |||
2550 | if (handle->nmi) { | ||
2551 | handle->counter->pending_wakeup = 1; | ||
2552 | perf_pending_queue(&handle->counter->pending, | ||
2553 | perf_pending_counter); | ||
2554 | } else | ||
2555 | perf_counter_wakeup(handle->counter); | ||
2556 | } | ||
2557 | |||
2558 | /* | ||
2559 | * Curious locking construct. | ||
2560 | * | ||
2561 | * We need to ensure a later event doesn't publish a head when a former | ||
2562 | * event isn't done writing. However since we need to deal with NMIs we | ||
2563 | * cannot fully serialize things. | ||
2564 | * | ||
2565 | * What we do is serialize between CPUs so we only have to deal with NMI | ||
2566 | * nesting on a single CPU. | ||
2567 | * | ||
2568 | * We only publish the head (and generate a wakeup) when the outer-most | ||
2569 | * event completes. | ||
2570 | */ | ||
2571 | static void perf_output_lock(struct perf_output_handle *handle) | ||
2572 | { | ||
2573 | struct perf_mmap_data *data = handle->data; | ||
2574 | int cpu; | ||
2575 | |||
2576 | handle->locked = 0; | ||
2577 | |||
2578 | local_irq_save(handle->flags); | ||
2579 | cpu = smp_processor_id(); | ||
2580 | |||
2581 | if (in_nmi() && atomic_read(&data->lock) == cpu) | ||
2582 | return; | ||
2583 | |||
2584 | while (atomic_cmpxchg(&data->lock, -1, cpu) != -1) | ||
2585 | cpu_relax(); | ||
2586 | |||
2587 | handle->locked = 1; | ||
2588 | } | ||
2589 | |||
2590 | static void perf_output_unlock(struct perf_output_handle *handle) | ||
2591 | { | ||
2592 | struct perf_mmap_data *data = handle->data; | ||
2593 | unsigned long head; | ||
2594 | int cpu; | ||
2595 | |||
2596 | data->done_head = data->head; | ||
2597 | |||
2598 | if (!handle->locked) | ||
2599 | goto out; | ||
2600 | |||
2601 | again: | ||
2602 | /* | ||
2603 | * The xchg implies a full barrier that ensures all writes are done | ||
2604 | * before we publish the new head, matched by a rmb() in userspace when | ||
2605 | * reading this position. | ||
2606 | */ | ||
2607 | while ((head = atomic_long_xchg(&data->done_head, 0))) | ||
2608 | data->user_page->data_head = head; | ||
2609 | |||
2610 | /* | ||
2611 | * NMI can happen here, which means we can miss a done_head update. | ||
2612 | */ | ||
2613 | |||
2614 | cpu = atomic_xchg(&data->lock, -1); | ||
2615 | WARN_ON_ONCE(cpu != smp_processor_id()); | ||
2616 | |||
2617 | /* | ||
2618 | * Therefore we have to validate we did not indeed do so. | ||
2619 | */ | ||
2620 | if (unlikely(atomic_long_read(&data->done_head))) { | ||
2621 | /* | ||
2622 | * Since we had it locked, we can lock it again. | ||
2623 | */ | ||
2624 | while (atomic_cmpxchg(&data->lock, -1, cpu) != -1) | ||
2625 | cpu_relax(); | ||
2626 | |||
2627 | goto again; | ||
2628 | } | ||
2629 | |||
2630 | if (atomic_xchg(&data->wakeup, 0)) | ||
2631 | perf_output_wakeup(handle); | ||
2632 | out: | ||
2633 | local_irq_restore(handle->flags); | ||
2634 | } | ||
2635 | |||
2636 | static void perf_output_copy(struct perf_output_handle *handle, | ||
2637 | const void *buf, unsigned int len) | ||
2638 | { | ||
2639 | unsigned int pages_mask; | ||
2640 | unsigned int offset; | ||
2641 | unsigned int size; | ||
2642 | void **pages; | ||
2643 | |||
2644 | offset = handle->offset; | ||
2645 | pages_mask = handle->data->nr_pages - 1; | ||
2646 | pages = handle->data->data_pages; | ||
2647 | |||
2648 | do { | ||
2649 | unsigned int page_offset; | ||
2650 | int nr; | ||
2651 | |||
2652 | nr = (offset >> PAGE_SHIFT) & pages_mask; | ||
2653 | page_offset = offset & (PAGE_SIZE - 1); | ||
2654 | size = min_t(unsigned int, PAGE_SIZE - page_offset, len); | ||
2655 | |||
2656 | memcpy(pages[nr] + page_offset, buf, size); | ||
2657 | |||
2658 | len -= size; | ||
2659 | buf += size; | ||
2660 | offset += size; | ||
2661 | } while (len); | ||
2662 | |||
2663 | handle->offset = offset; | ||
2664 | |||
2665 | /* | ||
2666 | * Check we didn't copy past our reservation window, taking the | ||
2667 | * possible unsigned int wrap into account. | ||
2668 | */ | ||
2669 | WARN_ON_ONCE(((long)(handle->head - handle->offset)) < 0); | ||
2670 | } | ||
2671 | |||
2672 | #define perf_output_put(handle, x) \ | ||
2673 | perf_output_copy((handle), &(x), sizeof(x)) | ||
2674 | |||
2675 | static int perf_output_begin(struct perf_output_handle *handle, | ||
2676 | struct perf_counter *counter, unsigned int size, | ||
2677 | int nmi, int sample) | ||
2678 | { | ||
2679 | struct perf_counter *output_counter; | ||
2680 | struct perf_mmap_data *data; | ||
2681 | unsigned int offset, head; | ||
2682 | int have_lost; | ||
2683 | struct { | ||
2684 | struct perf_event_header header; | ||
2685 | u64 id; | ||
2686 | u64 lost; | ||
2687 | } lost_event; | ||
2688 | |||
2689 | rcu_read_lock(); | ||
2690 | /* | ||
2691 | * For inherited counters we send all the output towards the parent. | ||
2692 | */ | ||
2693 | if (counter->parent) | ||
2694 | counter = counter->parent; | ||
2695 | |||
2696 | output_counter = rcu_dereference(counter->output); | ||
2697 | if (output_counter) | ||
2698 | counter = output_counter; | ||
2699 | |||
2700 | data = rcu_dereference(counter->data); | ||
2701 | if (!data) | ||
2702 | goto out; | ||
2703 | |||
2704 | handle->data = data; | ||
2705 | handle->counter = counter; | ||
2706 | handle->nmi = nmi; | ||
2707 | handle->sample = sample; | ||
2708 | |||
2709 | if (!data->nr_pages) | ||
2710 | goto fail; | ||
2711 | |||
2712 | have_lost = atomic_read(&data->lost); | ||
2713 | if (have_lost) | ||
2714 | size += sizeof(lost_event); | ||
2715 | |||
2716 | perf_output_lock(handle); | ||
2717 | |||
2718 | do { | ||
2719 | offset = head = atomic_long_read(&data->head); | ||
2720 | head += size; | ||
2721 | if (unlikely(!perf_output_space(data, offset, head))) | ||
2722 | goto fail; | ||
2723 | } while (atomic_long_cmpxchg(&data->head, offset, head) != offset); | ||
2724 | |||
2725 | handle->offset = offset; | ||
2726 | handle->head = head; | ||
2727 | |||
2728 | if ((offset >> PAGE_SHIFT) != (head >> PAGE_SHIFT)) | ||
2729 | atomic_set(&data->wakeup, 1); | ||
2730 | |||
2731 | if (have_lost) { | ||
2732 | lost_event.header.type = PERF_EVENT_LOST; | ||
2733 | lost_event.header.misc = 0; | ||
2734 | lost_event.header.size = sizeof(lost_event); | ||
2735 | lost_event.id = counter->id; | ||
2736 | lost_event.lost = atomic_xchg(&data->lost, 0); | ||
2737 | |||
2738 | perf_output_put(handle, lost_event); | ||
2739 | } | ||
2740 | |||
2741 | return 0; | ||
2742 | |||
2743 | fail: | ||
2744 | atomic_inc(&data->lost); | ||
2745 | perf_output_unlock(handle); | ||
2746 | out: | ||
2747 | rcu_read_unlock(); | ||
2748 | |||
2749 | return -ENOSPC; | ||
2750 | } | ||
2751 | |||
2752 | static void perf_output_end(struct perf_output_handle *handle) | ||
2753 | { | ||
2754 | struct perf_counter *counter = handle->counter; | ||
2755 | struct perf_mmap_data *data = handle->data; | ||
2756 | |||
2757 | int wakeup_events = counter->attr.wakeup_events; | ||
2758 | |||
2759 | if (handle->sample && wakeup_events) { | ||
2760 | int events = atomic_inc_return(&data->events); | ||
2761 | if (events >= wakeup_events) { | ||
2762 | atomic_sub(wakeup_events, &data->events); | ||
2763 | atomic_set(&data->wakeup, 1); | ||
2764 | } | ||
2765 | } | ||
2766 | |||
2767 | perf_output_unlock(handle); | ||
2768 | rcu_read_unlock(); | ||
2769 | } | ||
2770 | |||
2771 | static u32 perf_counter_pid(struct perf_counter *counter, struct task_struct *p) | ||
2772 | { | ||
2773 | /* | ||
2774 | * only top level counters have the pid namespace they were created in | ||
2775 | */ | ||
2776 | if (counter->parent) | ||
2777 | counter = counter->parent; | ||
2778 | |||
2779 | return task_tgid_nr_ns(p, counter->ns); | ||
2780 | } | ||
2781 | |||
2782 | static u32 perf_counter_tid(struct perf_counter *counter, struct task_struct *p) | ||
2783 | { | ||
2784 | /* | ||
2785 | * only top level counters have the pid namespace they were created in | ||
2786 | */ | ||
2787 | if (counter->parent) | ||
2788 | counter = counter->parent; | ||
2789 | |||
2790 | return task_pid_nr_ns(p, counter->ns); | ||
2791 | } | ||
2792 | |||
2793 | static void perf_output_read_one(struct perf_output_handle *handle, | ||
2794 | struct perf_counter *counter) | ||
2795 | { | ||
2796 | u64 read_format = counter->attr.read_format; | ||
2797 | u64 values[4]; | ||
2798 | int n = 0; | ||
2799 | |||
2800 | values[n++] = atomic64_read(&counter->count); | ||
2801 | if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { | ||
2802 | values[n++] = counter->total_time_enabled + | ||
2803 | atomic64_read(&counter->child_total_time_enabled); | ||
2804 | } | ||
2805 | if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { | ||
2806 | values[n++] = counter->total_time_running + | ||
2807 | atomic64_read(&counter->child_total_time_running); | ||
2808 | } | ||
2809 | if (read_format & PERF_FORMAT_ID) | ||
2810 | values[n++] = primary_counter_id(counter); | ||
2811 | |||
2812 | perf_output_copy(handle, values, n * sizeof(u64)); | ||
2813 | } | ||
2814 | |||
2815 | /* | ||
2816 | * XXX PERF_FORMAT_GROUP vs inherited counters seems difficult. | ||
2817 | */ | ||
2818 | static void perf_output_read_group(struct perf_output_handle *handle, | ||
2819 | struct perf_counter *counter) | ||
2820 | { | ||
2821 | struct perf_counter *leader = counter->group_leader, *sub; | ||
2822 | u64 read_format = counter->attr.read_format; | ||
2823 | u64 values[5]; | ||
2824 | int n = 0; | ||
2825 | |||
2826 | values[n++] = 1 + leader->nr_siblings; | ||
2827 | |||
2828 | if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) | ||
2829 | values[n++] = leader->total_time_enabled; | ||
2830 | |||
2831 | if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) | ||
2832 | values[n++] = leader->total_time_running; | ||
2833 | |||
2834 | if (leader != counter) | ||
2835 | leader->pmu->read(leader); | ||
2836 | |||
2837 | values[n++] = atomic64_read(&leader->count); | ||
2838 | if (read_format & PERF_FORMAT_ID) | ||
2839 | values[n++] = primary_counter_id(leader); | ||
2840 | |||
2841 | perf_output_copy(handle, values, n * sizeof(u64)); | ||
2842 | |||
2843 | list_for_each_entry(sub, &leader->sibling_list, list_entry) { | ||
2844 | n = 0; | ||
2845 | |||
2846 | if (sub != counter) | ||
2847 | sub->pmu->read(sub); | ||
2848 | |||
2849 | values[n++] = atomic64_read(&sub->count); | ||
2850 | if (read_format & PERF_FORMAT_ID) | ||
2851 | values[n++] = primary_counter_id(sub); | ||
2852 | |||
2853 | perf_output_copy(handle, values, n * sizeof(u64)); | ||
2854 | } | ||
2855 | } | ||
2856 | |||
2857 | static void perf_output_read(struct perf_output_handle *handle, | ||
2858 | struct perf_counter *counter) | ||
2859 | { | ||
2860 | if (counter->attr.read_format & PERF_FORMAT_GROUP) | ||
2861 | perf_output_read_group(handle, counter); | ||
2862 | else | ||
2863 | perf_output_read_one(handle, counter); | ||
2864 | } | ||
2865 | |||
2866 | void perf_counter_output(struct perf_counter *counter, int nmi, | ||
2867 | struct perf_sample_data *data) | ||
2868 | { | ||
2869 | int ret; | ||
2870 | u64 sample_type = counter->attr.sample_type; | ||
2871 | struct perf_output_handle handle; | ||
2872 | struct perf_event_header header; | ||
2873 | u64 ip; | ||
2874 | struct { | ||
2875 | u32 pid, tid; | ||
2876 | } tid_entry; | ||
2877 | struct perf_callchain_entry *callchain = NULL; | ||
2878 | int callchain_size = 0; | ||
2879 | u64 time; | ||
2880 | struct { | ||
2881 | u32 cpu, reserved; | ||
2882 | } cpu_entry; | ||
2883 | |||
2884 | header.type = PERF_EVENT_SAMPLE; | ||
2885 | header.size = sizeof(header); | ||
2886 | |||
2887 | header.misc = 0; | ||
2888 | header.misc |= perf_misc_flags(data->regs); | ||
2889 | |||
2890 | if (sample_type & PERF_SAMPLE_IP) { | ||
2891 | ip = perf_instruction_pointer(data->regs); | ||
2892 | header.size += sizeof(ip); | ||
2893 | } | ||
2894 | |||
2895 | if (sample_type & PERF_SAMPLE_TID) { | ||
2896 | /* namespace issues */ | ||
2897 | tid_entry.pid = perf_counter_pid(counter, current); | ||
2898 | tid_entry.tid = perf_counter_tid(counter, current); | ||
2899 | |||
2900 | header.size += sizeof(tid_entry); | ||
2901 | } | ||
2902 | |||
2903 | if (sample_type & PERF_SAMPLE_TIME) { | ||
2904 | /* | ||
2905 | * Maybe do better on x86 and provide cpu_clock_nmi() | ||
2906 | */ | ||
2907 | time = sched_clock(); | ||
2908 | |||
2909 | header.size += sizeof(u64); | ||
2910 | } | ||
2911 | |||
2912 | if (sample_type & PERF_SAMPLE_ADDR) | ||
2913 | header.size += sizeof(u64); | ||
2914 | |||
2915 | if (sample_type & PERF_SAMPLE_ID) | ||
2916 | header.size += sizeof(u64); | ||
2917 | |||
2918 | if (sample_type & PERF_SAMPLE_STREAM_ID) | ||
2919 | header.size += sizeof(u64); | ||
2920 | |||
2921 | if (sample_type & PERF_SAMPLE_CPU) { | ||
2922 | header.size += sizeof(cpu_entry); | ||
2923 | |||
2924 | cpu_entry.cpu = raw_smp_processor_id(); | ||
2925 | cpu_entry.reserved = 0; | ||
2926 | } | ||
2927 | |||
2928 | if (sample_type & PERF_SAMPLE_PERIOD) | ||
2929 | header.size += sizeof(u64); | ||
2930 | |||
2931 | if (sample_type & PERF_SAMPLE_READ) | ||
2932 | header.size += perf_counter_read_size(counter); | ||
2933 | |||
2934 | if (sample_type & PERF_SAMPLE_CALLCHAIN) { | ||
2935 | callchain = perf_callchain(data->regs); | ||
2936 | |||
2937 | if (callchain) { | ||
2938 | callchain_size = (1 + callchain->nr) * sizeof(u64); | ||
2939 | header.size += callchain_size; | ||
2940 | } else | ||
2941 | header.size += sizeof(u64); | ||
2942 | } | ||
2943 | |||
2944 | if (sample_type & PERF_SAMPLE_RAW) { | ||
2945 | int size = sizeof(u32); | ||
2946 | |||
2947 | if (data->raw) | ||
2948 | size += data->raw->size; | ||
2949 | else | ||
2950 | size += sizeof(u32); | ||
2951 | |||
2952 | WARN_ON_ONCE(size & (sizeof(u64)-1)); | ||
2953 | header.size += size; | ||
2954 | } | ||
2955 | |||
2956 | ret = perf_output_begin(&handle, counter, header.size, nmi, 1); | ||
2957 | if (ret) | ||
2958 | return; | ||
2959 | |||
2960 | perf_output_put(&handle, header); | ||
2961 | |||
2962 | if (sample_type & PERF_SAMPLE_IP) | ||
2963 | perf_output_put(&handle, ip); | ||
2964 | |||
2965 | if (sample_type & PERF_SAMPLE_TID) | ||
2966 | perf_output_put(&handle, tid_entry); | ||
2967 | |||
2968 | if (sample_type & PERF_SAMPLE_TIME) | ||
2969 | perf_output_put(&handle, time); | ||
2970 | |||
2971 | if (sample_type & PERF_SAMPLE_ADDR) | ||
2972 | perf_output_put(&handle, data->addr); | ||
2973 | |||
2974 | if (sample_type & PERF_SAMPLE_ID) { | ||
2975 | u64 id = primary_counter_id(counter); | ||
2976 | |||
2977 | perf_output_put(&handle, id); | ||
2978 | } | ||
2979 | |||
2980 | if (sample_type & PERF_SAMPLE_STREAM_ID) | ||
2981 | perf_output_put(&handle, counter->id); | ||
2982 | |||
2983 | if (sample_type & PERF_SAMPLE_CPU) | ||
2984 | perf_output_put(&handle, cpu_entry); | ||
2985 | |||
2986 | if (sample_type & PERF_SAMPLE_PERIOD) | ||
2987 | perf_output_put(&handle, data->period); | ||
2988 | |||
2989 | if (sample_type & PERF_SAMPLE_READ) | ||
2990 | perf_output_read(&handle, counter); | ||
2991 | |||
2992 | if (sample_type & PERF_SAMPLE_CALLCHAIN) { | ||
2993 | if (callchain) | ||
2994 | perf_output_copy(&handle, callchain, callchain_size); | ||
2995 | else { | ||
2996 | u64 nr = 0; | ||
2997 | perf_output_put(&handle, nr); | ||
2998 | } | ||
2999 | } | ||
3000 | |||
3001 | if (sample_type & PERF_SAMPLE_RAW) { | ||
3002 | if (data->raw) { | ||
3003 | perf_output_put(&handle, data->raw->size); | ||
3004 | perf_output_copy(&handle, data->raw->data, data->raw->size); | ||
3005 | } else { | ||
3006 | struct { | ||
3007 | u32 size; | ||
3008 | u32 data; | ||
3009 | } raw = { | ||
3010 | .size = sizeof(u32), | ||
3011 | .data = 0, | ||
3012 | }; | ||
3013 | perf_output_put(&handle, raw); | ||
3014 | } | ||
3015 | } | ||
3016 | |||
3017 | perf_output_end(&handle); | ||
3018 | } | ||
3019 | |||
3020 | /* | ||
3021 | * read event | ||
3022 | */ | ||
3023 | |||
3024 | struct perf_read_event { | ||
3025 | struct perf_event_header header; | ||
3026 | |||
3027 | u32 pid; | ||
3028 | u32 tid; | ||
3029 | }; | ||
3030 | |||
3031 | static void | ||
3032 | perf_counter_read_event(struct perf_counter *counter, | ||
3033 | struct task_struct *task) | ||
3034 | { | ||
3035 | struct perf_output_handle handle; | ||
3036 | struct perf_read_event event = { | ||
3037 | .header = { | ||
3038 | .type = PERF_EVENT_READ, | ||
3039 | .misc = 0, | ||
3040 | .size = sizeof(event) + perf_counter_read_size(counter), | ||
3041 | }, | ||
3042 | .pid = perf_counter_pid(counter, task), | ||
3043 | .tid = perf_counter_tid(counter, task), | ||
3044 | }; | ||
3045 | int ret; | ||
3046 | |||
3047 | ret = perf_output_begin(&handle, counter, event.header.size, 0, 0); | ||
3048 | if (ret) | ||
3049 | return; | ||
3050 | |||
3051 | perf_output_put(&handle, event); | ||
3052 | perf_output_read(&handle, counter); | ||
3053 | |||
3054 | perf_output_end(&handle); | ||
3055 | } | ||
3056 | |||
3057 | /* | ||
3058 | * task tracking -- fork/exit | ||
3059 | * | ||
3060 | * enabled by: attr.comm | attr.mmap | attr.task | ||
3061 | */ | ||
3062 | |||
3063 | struct perf_task_event { | ||
3064 | struct task_struct *task; | ||
3065 | struct perf_counter_context *task_ctx; | ||
3066 | |||
3067 | struct { | ||
3068 | struct perf_event_header header; | ||
3069 | |||
3070 | u32 pid; | ||
3071 | u32 ppid; | ||
3072 | u32 tid; | ||
3073 | u32 ptid; | ||
3074 | } event; | ||
3075 | }; | ||
3076 | |||
3077 | static void perf_counter_task_output(struct perf_counter *counter, | ||
3078 | struct perf_task_event *task_event) | ||
3079 | { | ||
3080 | struct perf_output_handle handle; | ||
3081 | int size = task_event->event.header.size; | ||
3082 | struct task_struct *task = task_event->task; | ||
3083 | int ret = perf_output_begin(&handle, counter, size, 0, 0); | ||
3084 | |||
3085 | if (ret) | ||
3086 | return; | ||
3087 | |||
3088 | task_event->event.pid = perf_counter_pid(counter, task); | ||
3089 | task_event->event.ppid = perf_counter_pid(counter, current); | ||
3090 | |||
3091 | task_event->event.tid = perf_counter_tid(counter, task); | ||
3092 | task_event->event.ptid = perf_counter_tid(counter, current); | ||
3093 | |||
3094 | perf_output_put(&handle, task_event->event); | ||
3095 | perf_output_end(&handle); | ||
3096 | } | ||
3097 | |||
3098 | static int perf_counter_task_match(struct perf_counter *counter) | ||
3099 | { | ||
3100 | if (counter->attr.comm || counter->attr.mmap || counter->attr.task) | ||
3101 | return 1; | ||
3102 | |||
3103 | return 0; | ||
3104 | } | ||
3105 | |||
3106 | static void perf_counter_task_ctx(struct perf_counter_context *ctx, | ||
3107 | struct perf_task_event *task_event) | ||
3108 | { | ||
3109 | struct perf_counter *counter; | ||
3110 | |||
3111 | if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list)) | ||
3112 | return; | ||
3113 | |||
3114 | rcu_read_lock(); | ||
3115 | list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) { | ||
3116 | if (perf_counter_task_match(counter)) | ||
3117 | perf_counter_task_output(counter, task_event); | ||
3118 | } | ||
3119 | rcu_read_unlock(); | ||
3120 | } | ||
3121 | |||
3122 | static void perf_counter_task_event(struct perf_task_event *task_event) | ||
3123 | { | ||
3124 | struct perf_cpu_context *cpuctx; | ||
3125 | struct perf_counter_context *ctx = task_event->task_ctx; | ||
3126 | |||
3127 | cpuctx = &get_cpu_var(perf_cpu_context); | ||
3128 | perf_counter_task_ctx(&cpuctx->ctx, task_event); | ||
3129 | put_cpu_var(perf_cpu_context); | ||
3130 | |||
3131 | rcu_read_lock(); | ||
3132 | if (!ctx) | ||
3133 | ctx = rcu_dereference(task_event->task->perf_counter_ctxp); | ||
3134 | if (ctx) | ||
3135 | perf_counter_task_ctx(ctx, task_event); | ||
3136 | rcu_read_unlock(); | ||
3137 | } | ||
3138 | |||
3139 | static void perf_counter_task(struct task_struct *task, | ||
3140 | struct perf_counter_context *task_ctx, | ||
3141 | int new) | ||
3142 | { | ||
3143 | struct perf_task_event task_event; | ||
3144 | |||
3145 | if (!atomic_read(&nr_comm_counters) && | ||
3146 | !atomic_read(&nr_mmap_counters) && | ||
3147 | !atomic_read(&nr_task_counters)) | ||
3148 | return; | ||
3149 | |||
3150 | task_event = (struct perf_task_event){ | ||
3151 | .task = task, | ||
3152 | .task_ctx = task_ctx, | ||
3153 | .event = { | ||
3154 | .header = { | ||
3155 | .type = new ? PERF_EVENT_FORK : PERF_EVENT_EXIT, | ||
3156 | .misc = 0, | ||
3157 | .size = sizeof(task_event.event), | ||
3158 | }, | ||
3159 | /* .pid */ | ||
3160 | /* .ppid */ | ||
3161 | /* .tid */ | ||
3162 | /* .ptid */ | ||
3163 | }, | ||
3164 | }; | ||
3165 | |||
3166 | perf_counter_task_event(&task_event); | ||
3167 | } | ||
3168 | |||
3169 | void perf_counter_fork(struct task_struct *task) | ||
3170 | { | ||
3171 | perf_counter_task(task, NULL, 1); | ||
3172 | } | ||
3173 | |||
3174 | /* | ||
3175 | * comm tracking | ||
3176 | */ | ||
3177 | |||
3178 | struct perf_comm_event { | ||
3179 | struct task_struct *task; | ||
3180 | char *comm; | ||
3181 | int comm_size; | ||
3182 | |||
3183 | struct { | ||
3184 | struct perf_event_header header; | ||
3185 | |||
3186 | u32 pid; | ||
3187 | u32 tid; | ||
3188 | } event; | ||
3189 | }; | ||
3190 | |||
3191 | static void perf_counter_comm_output(struct perf_counter *counter, | ||
3192 | struct perf_comm_event *comm_event) | ||
3193 | { | ||
3194 | struct perf_output_handle handle; | ||
3195 | int size = comm_event->event.header.size; | ||
3196 | int ret = perf_output_begin(&handle, counter, size, 0, 0); | ||
3197 | |||
3198 | if (ret) | ||
3199 | return; | ||
3200 | |||
3201 | comm_event->event.pid = perf_counter_pid(counter, comm_event->task); | ||
3202 | comm_event->event.tid = perf_counter_tid(counter, comm_event->task); | ||
3203 | |||
3204 | perf_output_put(&handle, comm_event->event); | ||
3205 | perf_output_copy(&handle, comm_event->comm, | ||
3206 | comm_event->comm_size); | ||
3207 | perf_output_end(&handle); | ||
3208 | } | ||
3209 | |||
3210 | static int perf_counter_comm_match(struct perf_counter *counter) | ||
3211 | { | ||
3212 | if (counter->attr.comm) | ||
3213 | return 1; | ||
3214 | |||
3215 | return 0; | ||
3216 | } | ||
3217 | |||
3218 | static void perf_counter_comm_ctx(struct perf_counter_context *ctx, | ||
3219 | struct perf_comm_event *comm_event) | ||
3220 | { | ||
3221 | struct perf_counter *counter; | ||
3222 | |||
3223 | if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list)) | ||
3224 | return; | ||
3225 | |||
3226 | rcu_read_lock(); | ||
3227 | list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) { | ||
3228 | if (perf_counter_comm_match(counter)) | ||
3229 | perf_counter_comm_output(counter, comm_event); | ||
3230 | } | ||
3231 | rcu_read_unlock(); | ||
3232 | } | ||
3233 | |||
3234 | static void perf_counter_comm_event(struct perf_comm_event *comm_event) | ||
3235 | { | ||
3236 | struct perf_cpu_context *cpuctx; | ||
3237 | struct perf_counter_context *ctx; | ||
3238 | unsigned int size; | ||
3239 | char comm[TASK_COMM_LEN]; | ||
3240 | |||
3241 | memset(comm, 0, sizeof(comm)); | ||
3242 | strncpy(comm, comm_event->task->comm, sizeof(comm)); | ||
3243 | size = ALIGN(strlen(comm)+1, sizeof(u64)); | ||
3244 | |||
3245 | comm_event->comm = comm; | ||
3246 | comm_event->comm_size = size; | ||
3247 | |||
3248 | comm_event->event.header.size = sizeof(comm_event->event) + size; | ||
3249 | |||
3250 | cpuctx = &get_cpu_var(perf_cpu_context); | ||
3251 | perf_counter_comm_ctx(&cpuctx->ctx, comm_event); | ||
3252 | put_cpu_var(perf_cpu_context); | ||
3253 | |||
3254 | rcu_read_lock(); | ||
3255 | /* | ||
3256 | * doesn't really matter which of the child contexts the | ||
3257 | * events ends up in. | ||
3258 | */ | ||
3259 | ctx = rcu_dereference(current->perf_counter_ctxp); | ||
3260 | if (ctx) | ||
3261 | perf_counter_comm_ctx(ctx, comm_event); | ||
3262 | rcu_read_unlock(); | ||
3263 | } | ||
3264 | |||
3265 | void perf_counter_comm(struct task_struct *task) | ||
3266 | { | ||
3267 | struct perf_comm_event comm_event; | ||
3268 | |||
3269 | if (task->perf_counter_ctxp) | ||
3270 | perf_counter_enable_on_exec(task); | ||
3271 | |||
3272 | if (!atomic_read(&nr_comm_counters)) | ||
3273 | return; | ||
3274 | |||
3275 | comm_event = (struct perf_comm_event){ | ||
3276 | .task = task, | ||
3277 | /* .comm */ | ||
3278 | /* .comm_size */ | ||
3279 | .event = { | ||
3280 | .header = { | ||
3281 | .type = PERF_EVENT_COMM, | ||
3282 | .misc = 0, | ||
3283 | /* .size */ | ||
3284 | }, | ||
3285 | /* .pid */ | ||
3286 | /* .tid */ | ||
3287 | }, | ||
3288 | }; | ||
3289 | |||
3290 | perf_counter_comm_event(&comm_event); | ||
3291 | } | ||
3292 | |||
3293 | /* | ||
3294 | * mmap tracking | ||
3295 | */ | ||
3296 | |||
3297 | struct perf_mmap_event { | ||
3298 | struct vm_area_struct *vma; | ||
3299 | |||
3300 | const char *file_name; | ||
3301 | int file_size; | ||
3302 | |||
3303 | struct { | ||
3304 | struct perf_event_header header; | ||
3305 | |||
3306 | u32 pid; | ||
3307 | u32 tid; | ||
3308 | u64 start; | ||
3309 | u64 len; | ||
3310 | u64 pgoff; | ||
3311 | } event; | ||
3312 | }; | ||
3313 | |||
3314 | static void perf_counter_mmap_output(struct perf_counter *counter, | ||
3315 | struct perf_mmap_event *mmap_event) | ||
3316 | { | ||
3317 | struct perf_output_handle handle; | ||
3318 | int size = mmap_event->event.header.size; | ||
3319 | int ret = perf_output_begin(&handle, counter, size, 0, 0); | ||
3320 | |||
3321 | if (ret) | ||
3322 | return; | ||
3323 | |||
3324 | mmap_event->event.pid = perf_counter_pid(counter, current); | ||
3325 | mmap_event->event.tid = perf_counter_tid(counter, current); | ||
3326 | |||
3327 | perf_output_put(&handle, mmap_event->event); | ||
3328 | perf_output_copy(&handle, mmap_event->file_name, | ||
3329 | mmap_event->file_size); | ||
3330 | perf_output_end(&handle); | ||
3331 | } | ||
3332 | |||
3333 | static int perf_counter_mmap_match(struct perf_counter *counter, | ||
3334 | struct perf_mmap_event *mmap_event) | ||
3335 | { | ||
3336 | if (counter->attr.mmap) | ||
3337 | return 1; | ||
3338 | |||
3339 | return 0; | ||
3340 | } | ||
3341 | |||
3342 | static void perf_counter_mmap_ctx(struct perf_counter_context *ctx, | ||
3343 | struct perf_mmap_event *mmap_event) | ||
3344 | { | ||
3345 | struct perf_counter *counter; | ||
3346 | |||
3347 | if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list)) | ||
3348 | return; | ||
3349 | |||
3350 | rcu_read_lock(); | ||
3351 | list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) { | ||
3352 | if (perf_counter_mmap_match(counter, mmap_event)) | ||
3353 | perf_counter_mmap_output(counter, mmap_event); | ||
3354 | } | ||
3355 | rcu_read_unlock(); | ||
3356 | } | ||
3357 | |||
3358 | static void perf_counter_mmap_event(struct perf_mmap_event *mmap_event) | ||
3359 | { | ||
3360 | struct perf_cpu_context *cpuctx; | ||
3361 | struct perf_counter_context *ctx; | ||
3362 | struct vm_area_struct *vma = mmap_event->vma; | ||
3363 | struct file *file = vma->vm_file; | ||
3364 | unsigned int size; | ||
3365 | char tmp[16]; | ||
3366 | char *buf = NULL; | ||
3367 | const char *name; | ||
3368 | |||
3369 | memset(tmp, 0, sizeof(tmp)); | ||
3370 | |||
3371 | if (file) { | ||
3372 | /* | ||
3373 | * d_path works from the end of the buffer backwards, so we | ||
3374 | * need to add enough zero bytes after the string to handle | ||
3375 | * the 64bit alignment we do later. | ||
3376 | */ | ||
3377 | buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL); | ||
3378 | if (!buf) { | ||
3379 | name = strncpy(tmp, "//enomem", sizeof(tmp)); | ||
3380 | goto got_name; | ||
3381 | } | ||
3382 | name = d_path(&file->f_path, buf, PATH_MAX); | ||
3383 | if (IS_ERR(name)) { | ||
3384 | name = strncpy(tmp, "//toolong", sizeof(tmp)); | ||
3385 | goto got_name; | ||
3386 | } | ||
3387 | } else { | ||
3388 | if (arch_vma_name(mmap_event->vma)) { | ||
3389 | name = strncpy(tmp, arch_vma_name(mmap_event->vma), | ||
3390 | sizeof(tmp)); | ||
3391 | goto got_name; | ||
3392 | } | ||
3393 | |||
3394 | if (!vma->vm_mm) { | ||
3395 | name = strncpy(tmp, "[vdso]", sizeof(tmp)); | ||
3396 | goto got_name; | ||
3397 | } | ||
3398 | |||
3399 | name = strncpy(tmp, "//anon", sizeof(tmp)); | ||
3400 | goto got_name; | ||
3401 | } | ||
3402 | |||
3403 | got_name: | ||
3404 | size = ALIGN(strlen(name)+1, sizeof(u64)); | ||
3405 | |||
3406 | mmap_event->file_name = name; | ||
3407 | mmap_event->file_size = size; | ||
3408 | |||
3409 | mmap_event->event.header.size = sizeof(mmap_event->event) + size; | ||
3410 | |||
3411 | cpuctx = &get_cpu_var(perf_cpu_context); | ||
3412 | perf_counter_mmap_ctx(&cpuctx->ctx, mmap_event); | ||
3413 | put_cpu_var(perf_cpu_context); | ||
3414 | |||
3415 | rcu_read_lock(); | ||
3416 | /* | ||
3417 | * doesn't really matter which of the child contexts the | ||
3418 | * events ends up in. | ||
3419 | */ | ||
3420 | ctx = rcu_dereference(current->perf_counter_ctxp); | ||
3421 | if (ctx) | ||
3422 | perf_counter_mmap_ctx(ctx, mmap_event); | ||
3423 | rcu_read_unlock(); | ||
3424 | |||
3425 | kfree(buf); | ||
3426 | } | ||
3427 | |||
3428 | void __perf_counter_mmap(struct vm_area_struct *vma) | ||
3429 | { | ||
3430 | struct perf_mmap_event mmap_event; | ||
3431 | |||
3432 | if (!atomic_read(&nr_mmap_counters)) | ||
3433 | return; | ||
3434 | |||
3435 | mmap_event = (struct perf_mmap_event){ | ||
3436 | .vma = vma, | ||
3437 | /* .file_name */ | ||
3438 | /* .file_size */ | ||
3439 | .event = { | ||
3440 | .header = { | ||
3441 | .type = PERF_EVENT_MMAP, | ||
3442 | .misc = 0, | ||
3443 | /* .size */ | ||
3444 | }, | ||
3445 | /* .pid */ | ||
3446 | /* .tid */ | ||
3447 | .start = vma->vm_start, | ||
3448 | .len = vma->vm_end - vma->vm_start, | ||
3449 | .pgoff = vma->vm_pgoff, | ||
3450 | }, | ||
3451 | }; | ||
3452 | |||
3453 | perf_counter_mmap_event(&mmap_event); | ||
3454 | } | ||
3455 | |||
3456 | /* | ||
3457 | * IRQ throttle logging | ||
3458 | */ | ||
3459 | |||
3460 | static void perf_log_throttle(struct perf_counter *counter, int enable) | ||
3461 | { | ||
3462 | struct perf_output_handle handle; | ||
3463 | int ret; | ||
3464 | |||
3465 | struct { | ||
3466 | struct perf_event_header header; | ||
3467 | u64 time; | ||
3468 | u64 id; | ||
3469 | u64 stream_id; | ||
3470 | } throttle_event = { | ||
3471 | .header = { | ||
3472 | .type = PERF_EVENT_THROTTLE, | ||
3473 | .misc = 0, | ||
3474 | .size = sizeof(throttle_event), | ||
3475 | }, | ||
3476 | .time = sched_clock(), | ||
3477 | .id = primary_counter_id(counter), | ||
3478 | .stream_id = counter->id, | ||
3479 | }; | ||
3480 | |||
3481 | if (enable) | ||
3482 | throttle_event.header.type = PERF_EVENT_UNTHROTTLE; | ||
3483 | |||
3484 | ret = perf_output_begin(&handle, counter, sizeof(throttle_event), 1, 0); | ||
3485 | if (ret) | ||
3486 | return; | ||
3487 | |||
3488 | perf_output_put(&handle, throttle_event); | ||
3489 | perf_output_end(&handle); | ||
3490 | } | ||
3491 | |||
3492 | /* | ||
3493 | * Generic counter overflow handling, sampling. | ||
3494 | */ | ||
3495 | |||
3496 | int perf_counter_overflow(struct perf_counter *counter, int nmi, | ||
3497 | struct perf_sample_data *data) | ||
3498 | { | ||
3499 | int events = atomic_read(&counter->event_limit); | ||
3500 | int throttle = counter->pmu->unthrottle != NULL; | ||
3501 | struct hw_perf_counter *hwc = &counter->hw; | ||
3502 | int ret = 0; | ||
3503 | |||
3504 | if (!throttle) { | ||
3505 | hwc->interrupts++; | ||
3506 | } else { | ||
3507 | if (hwc->interrupts != MAX_INTERRUPTS) { | ||
3508 | hwc->interrupts++; | ||
3509 | if (HZ * hwc->interrupts > | ||
3510 | (u64)sysctl_perf_counter_sample_rate) { | ||
3511 | hwc->interrupts = MAX_INTERRUPTS; | ||
3512 | perf_log_throttle(counter, 0); | ||
3513 | ret = 1; | ||
3514 | } | ||
3515 | } else { | ||
3516 | /* | ||
3517 | * Keep re-disabling counters even though on the previous | ||
3518 | * pass we disabled it - just in case we raced with a | ||
3519 | * sched-in and the counter got enabled again: | ||
3520 | */ | ||
3521 | ret = 1; | ||
3522 | } | ||
3523 | } | ||
3524 | |||
3525 | if (counter->attr.freq) { | ||
3526 | u64 now = sched_clock(); | ||
3527 | s64 delta = now - hwc->freq_stamp; | ||
3528 | |||
3529 | hwc->freq_stamp = now; | ||
3530 | |||
3531 | if (delta > 0 && delta < TICK_NSEC) | ||
3532 | perf_adjust_period(counter, NSEC_PER_SEC / (int)delta); | ||
3533 | } | ||
3534 | |||
3535 | /* | ||
3536 | * XXX event_limit might not quite work as expected on inherited | ||
3537 | * counters | ||
3538 | */ | ||
3539 | |||
3540 | counter->pending_kill = POLL_IN; | ||
3541 | if (events && atomic_dec_and_test(&counter->event_limit)) { | ||
3542 | ret = 1; | ||
3543 | counter->pending_kill = POLL_HUP; | ||
3544 | if (nmi) { | ||
3545 | counter->pending_disable = 1; | ||
3546 | perf_pending_queue(&counter->pending, | ||
3547 | perf_pending_counter); | ||
3548 | } else | ||
3549 | perf_counter_disable(counter); | ||
3550 | } | ||
3551 | |||
3552 | perf_counter_output(counter, nmi, data); | ||
3553 | return ret; | ||
3554 | } | ||
3555 | |||
3556 | /* | ||
3557 | * Generic software counter infrastructure | ||
3558 | */ | ||
3559 | |||
3560 | /* | ||
3561 | * We directly increment counter->count and keep a second value in | ||
3562 | * counter->hw.period_left to count intervals. This period counter | ||
3563 | * is kept in the range [-sample_period, 0] so that we can use the | ||
3564 | * sign as trigger. | ||
3565 | */ | ||
3566 | |||
3567 | static u64 perf_swcounter_set_period(struct perf_counter *counter) | ||
3568 | { | ||
3569 | struct hw_perf_counter *hwc = &counter->hw; | ||
3570 | u64 period = hwc->last_period; | ||
3571 | u64 nr, offset; | ||
3572 | s64 old, val; | ||
3573 | |||
3574 | hwc->last_period = hwc->sample_period; | ||
3575 | |||
3576 | again: | ||
3577 | old = val = atomic64_read(&hwc->period_left); | ||
3578 | if (val < 0) | ||
3579 | return 0; | ||
3580 | |||
3581 | nr = div64_u64(period + val, period); | ||
3582 | offset = nr * period; | ||
3583 | val -= offset; | ||
3584 | if (atomic64_cmpxchg(&hwc->period_left, old, val) != old) | ||
3585 | goto again; | ||
3586 | |||
3587 | return nr; | ||
3588 | } | ||
3589 | |||
3590 | static void perf_swcounter_overflow(struct perf_counter *counter, | ||
3591 | int nmi, struct perf_sample_data *data) | ||
3592 | { | ||
3593 | struct hw_perf_counter *hwc = &counter->hw; | ||
3594 | u64 overflow; | ||
3595 | |||
3596 | data->period = counter->hw.last_period; | ||
3597 | overflow = perf_swcounter_set_period(counter); | ||
3598 | |||
3599 | if (hwc->interrupts == MAX_INTERRUPTS) | ||
3600 | return; | ||
3601 | |||
3602 | for (; overflow; overflow--) { | ||
3603 | if (perf_counter_overflow(counter, nmi, data)) { | ||
3604 | /* | ||
3605 | * We inhibit the overflow from happening when | ||
3606 | * hwc->interrupts == MAX_INTERRUPTS. | ||
3607 | */ | ||
3608 | break; | ||
3609 | } | ||
3610 | } | ||
3611 | } | ||
3612 | |||
3613 | static void perf_swcounter_unthrottle(struct perf_counter *counter) | ||
3614 | { | ||
3615 | /* | ||
3616 | * Nothing to do, we already reset hwc->interrupts. | ||
3617 | */ | ||
3618 | } | ||
3619 | |||
3620 | static void perf_swcounter_add(struct perf_counter *counter, u64 nr, | ||
3621 | int nmi, struct perf_sample_data *data) | ||
3622 | { | ||
3623 | struct hw_perf_counter *hwc = &counter->hw; | ||
3624 | |||
3625 | atomic64_add(nr, &counter->count); | ||
3626 | |||
3627 | if (!hwc->sample_period) | ||
3628 | return; | ||
3629 | |||
3630 | if (!data->regs) | ||
3631 | return; | ||
3632 | |||
3633 | if (!atomic64_add_negative(nr, &hwc->period_left)) | ||
3634 | perf_swcounter_overflow(counter, nmi, data); | ||
3635 | } | ||
3636 | |||
3637 | static int perf_swcounter_is_counting(struct perf_counter *counter) | ||
3638 | { | ||
3639 | /* | ||
3640 | * The counter is active, we're good! | ||
3641 | */ | ||
3642 | if (counter->state == PERF_COUNTER_STATE_ACTIVE) | ||
3643 | return 1; | ||
3644 | |||
3645 | /* | ||
3646 | * The counter is off/error, not counting. | ||
3647 | */ | ||
3648 | if (counter->state != PERF_COUNTER_STATE_INACTIVE) | ||
3649 | return 0; | ||
3650 | |||
3651 | /* | ||
3652 | * The counter is inactive, if the context is active | ||
3653 | * we're part of a group that didn't make it on the 'pmu', | ||
3654 | * not counting. | ||
3655 | */ | ||
3656 | if (counter->ctx->is_active) | ||
3657 | return 0; | ||
3658 | |||
3659 | /* | ||
3660 | * We're inactive and the context is too, this means the | ||
3661 | * task is scheduled out, we're counting events that happen | ||
3662 | * to us, like migration events. | ||
3663 | */ | ||
3664 | return 1; | ||
3665 | } | ||
3666 | |||
3667 | static int perf_swcounter_match(struct perf_counter *counter, | ||
3668 | enum perf_type_id type, | ||
3669 | u32 event, struct pt_regs *regs) | ||
3670 | { | ||
3671 | if (!perf_swcounter_is_counting(counter)) | ||
3672 | return 0; | ||
3673 | |||
3674 | if (counter->attr.type != type) | ||
3675 | return 0; | ||
3676 | if (counter->attr.config != event) | ||
3677 | return 0; | ||
3678 | |||
3679 | if (regs) { | ||
3680 | if (counter->attr.exclude_user && user_mode(regs)) | ||
3681 | return 0; | ||
3682 | |||
3683 | if (counter->attr.exclude_kernel && !user_mode(regs)) | ||
3684 | return 0; | ||
3685 | } | ||
3686 | |||
3687 | return 1; | ||
3688 | } | ||
3689 | |||
3690 | static void perf_swcounter_ctx_event(struct perf_counter_context *ctx, | ||
3691 | enum perf_type_id type, | ||
3692 | u32 event, u64 nr, int nmi, | ||
3693 | struct perf_sample_data *data) | ||
3694 | { | ||
3695 | struct perf_counter *counter; | ||
3696 | |||
3697 | if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list)) | ||
3698 | return; | ||
3699 | |||
3700 | rcu_read_lock(); | ||
3701 | list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) { | ||
3702 | if (perf_swcounter_match(counter, type, event, data->regs)) | ||
3703 | perf_swcounter_add(counter, nr, nmi, data); | ||
3704 | } | ||
3705 | rcu_read_unlock(); | ||
3706 | } | ||
3707 | |||
3708 | static int *perf_swcounter_recursion_context(struct perf_cpu_context *cpuctx) | ||
3709 | { | ||
3710 | if (in_nmi()) | ||
3711 | return &cpuctx->recursion[3]; | ||
3712 | |||
3713 | if (in_irq()) | ||
3714 | return &cpuctx->recursion[2]; | ||
3715 | |||
3716 | if (in_softirq()) | ||
3717 | return &cpuctx->recursion[1]; | ||
3718 | |||
3719 | return &cpuctx->recursion[0]; | ||
3720 | } | ||
3721 | |||
3722 | static void do_perf_swcounter_event(enum perf_type_id type, u32 event, | ||
3723 | u64 nr, int nmi, | ||
3724 | struct perf_sample_data *data) | ||
3725 | { | ||
3726 | struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context); | ||
3727 | int *recursion = perf_swcounter_recursion_context(cpuctx); | ||
3728 | struct perf_counter_context *ctx; | ||
3729 | |||
3730 | if (*recursion) | ||
3731 | goto out; | ||
3732 | |||
3733 | (*recursion)++; | ||
3734 | barrier(); | ||
3735 | |||
3736 | perf_swcounter_ctx_event(&cpuctx->ctx, type, event, | ||
3737 | nr, nmi, data); | ||
3738 | rcu_read_lock(); | ||
3739 | /* | ||
3740 | * doesn't really matter which of the child contexts the | ||
3741 | * events ends up in. | ||
3742 | */ | ||
3743 | ctx = rcu_dereference(current->perf_counter_ctxp); | ||
3744 | if (ctx) | ||
3745 | perf_swcounter_ctx_event(ctx, type, event, nr, nmi, data); | ||
3746 | rcu_read_unlock(); | ||
3747 | |||
3748 | barrier(); | ||
3749 | (*recursion)--; | ||
3750 | |||
3751 | out: | ||
3752 | put_cpu_var(perf_cpu_context); | ||
3753 | } | ||
3754 | |||
3755 | void __perf_swcounter_event(u32 event, u64 nr, int nmi, | ||
3756 | struct pt_regs *regs, u64 addr) | ||
3757 | { | ||
3758 | struct perf_sample_data data = { | ||
3759 | .regs = regs, | ||
3760 | .addr = addr, | ||
3761 | }; | ||
3762 | |||
3763 | do_perf_swcounter_event(PERF_TYPE_SOFTWARE, event, nr, nmi, &data); | ||
3764 | } | ||
3765 | |||
3766 | static void perf_swcounter_read(struct perf_counter *counter) | ||
3767 | { | ||
3768 | } | ||
3769 | |||
3770 | static int perf_swcounter_enable(struct perf_counter *counter) | ||
3771 | { | ||
3772 | struct hw_perf_counter *hwc = &counter->hw; | ||
3773 | |||
3774 | if (hwc->sample_period) { | ||
3775 | hwc->last_period = hwc->sample_period; | ||
3776 | perf_swcounter_set_period(counter); | ||
3777 | } | ||
3778 | return 0; | ||
3779 | } | ||
3780 | |||
3781 | static void perf_swcounter_disable(struct perf_counter *counter) | ||
3782 | { | ||
3783 | } | ||
3784 | |||
3785 | static const struct pmu perf_ops_generic = { | ||
3786 | .enable = perf_swcounter_enable, | ||
3787 | .disable = perf_swcounter_disable, | ||
3788 | .read = perf_swcounter_read, | ||
3789 | .unthrottle = perf_swcounter_unthrottle, | ||
3790 | }; | ||
3791 | |||
3792 | /* | ||
3793 | * hrtimer based swcounter callback | ||
3794 | */ | ||
3795 | |||
3796 | static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer) | ||
3797 | { | ||
3798 | enum hrtimer_restart ret = HRTIMER_RESTART; | ||
3799 | struct perf_sample_data data; | ||
3800 | struct perf_counter *counter; | ||
3801 | u64 period; | ||
3802 | |||
3803 | counter = container_of(hrtimer, struct perf_counter, hw.hrtimer); | ||
3804 | counter->pmu->read(counter); | ||
3805 | |||
3806 | data.addr = 0; | ||
3807 | data.regs = get_irq_regs(); | ||
3808 | /* | ||
3809 | * In case we exclude kernel IPs or are somehow not in interrupt | ||
3810 | * context, provide the next best thing, the user IP. | ||
3811 | */ | ||
3812 | if ((counter->attr.exclude_kernel || !data.regs) && | ||
3813 | !counter->attr.exclude_user) | ||
3814 | data.regs = task_pt_regs(current); | ||
3815 | |||
3816 | if (data.regs) { | ||
3817 | if (perf_counter_overflow(counter, 0, &data)) | ||
3818 | ret = HRTIMER_NORESTART; | ||
3819 | } | ||
3820 | |||
3821 | period = max_t(u64, 10000, counter->hw.sample_period); | ||
3822 | hrtimer_forward_now(hrtimer, ns_to_ktime(period)); | ||
3823 | |||
3824 | return ret; | ||
3825 | } | ||
3826 | |||
3827 | /* | ||
3828 | * Software counter: cpu wall time clock | ||
3829 | */ | ||
3830 | |||
3831 | static void cpu_clock_perf_counter_update(struct perf_counter *counter) | ||
3832 | { | ||
3833 | int cpu = raw_smp_processor_id(); | ||
3834 | s64 prev; | ||
3835 | u64 now; | ||
3836 | |||
3837 | now = cpu_clock(cpu); | ||
3838 | prev = atomic64_read(&counter->hw.prev_count); | ||
3839 | atomic64_set(&counter->hw.prev_count, now); | ||
3840 | atomic64_add(now - prev, &counter->count); | ||
3841 | } | ||
3842 | |||
3843 | static int cpu_clock_perf_counter_enable(struct perf_counter *counter) | ||
3844 | { | ||
3845 | struct hw_perf_counter *hwc = &counter->hw; | ||
3846 | int cpu = raw_smp_processor_id(); | ||
3847 | |||
3848 | atomic64_set(&hwc->prev_count, cpu_clock(cpu)); | ||
3849 | hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | ||
3850 | hwc->hrtimer.function = perf_swcounter_hrtimer; | ||
3851 | if (hwc->sample_period) { | ||
3852 | u64 period = max_t(u64, 10000, hwc->sample_period); | ||
3853 | __hrtimer_start_range_ns(&hwc->hrtimer, | ||
3854 | ns_to_ktime(period), 0, | ||
3855 | HRTIMER_MODE_REL, 0); | ||
3856 | } | ||
3857 | |||
3858 | return 0; | ||
3859 | } | ||
3860 | |||
3861 | static void cpu_clock_perf_counter_disable(struct perf_counter *counter) | ||
3862 | { | ||
3863 | if (counter->hw.sample_period) | ||
3864 | hrtimer_cancel(&counter->hw.hrtimer); | ||
3865 | cpu_clock_perf_counter_update(counter); | ||
3866 | } | ||
3867 | |||
3868 | static void cpu_clock_perf_counter_read(struct perf_counter *counter) | ||
3869 | { | ||
3870 | cpu_clock_perf_counter_update(counter); | ||
3871 | } | ||
3872 | |||
3873 | static const struct pmu perf_ops_cpu_clock = { | ||
3874 | .enable = cpu_clock_perf_counter_enable, | ||
3875 | .disable = cpu_clock_perf_counter_disable, | ||
3876 | .read = cpu_clock_perf_counter_read, | ||
3877 | }; | ||
3878 | |||
3879 | /* | ||
3880 | * Software counter: task time clock | ||
3881 | */ | ||
3882 | |||
3883 | static void task_clock_perf_counter_update(struct perf_counter *counter, u64 now) | ||
3884 | { | ||
3885 | u64 prev; | ||
3886 | s64 delta; | ||
3887 | |||
3888 | prev = atomic64_xchg(&counter->hw.prev_count, now); | ||
3889 | delta = now - prev; | ||
3890 | atomic64_add(delta, &counter->count); | ||
3891 | } | ||
3892 | |||
3893 | static int task_clock_perf_counter_enable(struct perf_counter *counter) | ||
3894 | { | ||
3895 | struct hw_perf_counter *hwc = &counter->hw; | ||
3896 | u64 now; | ||
3897 | |||
3898 | now = counter->ctx->time; | ||
3899 | |||
3900 | atomic64_set(&hwc->prev_count, now); | ||
3901 | hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | ||
3902 | hwc->hrtimer.function = perf_swcounter_hrtimer; | ||
3903 | if (hwc->sample_period) { | ||
3904 | u64 period = max_t(u64, 10000, hwc->sample_period); | ||
3905 | __hrtimer_start_range_ns(&hwc->hrtimer, | ||
3906 | ns_to_ktime(period), 0, | ||
3907 | HRTIMER_MODE_REL, 0); | ||
3908 | } | ||
3909 | |||
3910 | return 0; | ||
3911 | } | ||
3912 | |||
3913 | static void task_clock_perf_counter_disable(struct perf_counter *counter) | ||
3914 | { | ||
3915 | if (counter->hw.sample_period) | ||
3916 | hrtimer_cancel(&counter->hw.hrtimer); | ||
3917 | task_clock_perf_counter_update(counter, counter->ctx->time); | ||
3918 | |||
3919 | } | ||
3920 | |||
3921 | static void task_clock_perf_counter_read(struct perf_counter *counter) | ||
3922 | { | ||
3923 | u64 time; | ||
3924 | |||
3925 | if (!in_nmi()) { | ||
3926 | update_context_time(counter->ctx); | ||
3927 | time = counter->ctx->time; | ||
3928 | } else { | ||
3929 | u64 now = perf_clock(); | ||
3930 | u64 delta = now - counter->ctx->timestamp; | ||
3931 | time = counter->ctx->time + delta; | ||
3932 | } | ||
3933 | |||
3934 | task_clock_perf_counter_update(counter, time); | ||
3935 | } | ||
3936 | |||
3937 | static const struct pmu perf_ops_task_clock = { | ||
3938 | .enable = task_clock_perf_counter_enable, | ||
3939 | .disable = task_clock_perf_counter_disable, | ||
3940 | .read = task_clock_perf_counter_read, | ||
3941 | }; | ||
3942 | |||
3943 | #ifdef CONFIG_EVENT_PROFILE | ||
3944 | void perf_tpcounter_event(int event_id, u64 addr, u64 count, void *record, | ||
3945 | int entry_size) | ||
3946 | { | ||
3947 | struct perf_raw_record raw = { | ||
3948 | .size = entry_size, | ||
3949 | .data = record, | ||
3950 | }; | ||
3951 | |||
3952 | struct perf_sample_data data = { | ||
3953 | .regs = get_irq_regs(), | ||
3954 | .addr = addr, | ||
3955 | .raw = &raw, | ||
3956 | }; | ||
3957 | |||
3958 | if (!data.regs) | ||
3959 | data.regs = task_pt_regs(current); | ||
3960 | |||
3961 | do_perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, count, 1, &data); | ||
3962 | } | ||
3963 | EXPORT_SYMBOL_GPL(perf_tpcounter_event); | ||
3964 | |||
3965 | extern int ftrace_profile_enable(int); | ||
3966 | extern void ftrace_profile_disable(int); | ||
3967 | |||
3968 | static void tp_perf_counter_destroy(struct perf_counter *counter) | ||
3969 | { | ||
3970 | ftrace_profile_disable(counter->attr.config); | ||
3971 | } | ||
3972 | |||
3973 | static const struct pmu *tp_perf_counter_init(struct perf_counter *counter) | ||
3974 | { | ||
3975 | /* | ||
3976 | * Raw tracepoint data is a severe data leak, only allow root to | ||
3977 | * have these. | ||
3978 | */ | ||
3979 | if ((counter->attr.sample_type & PERF_SAMPLE_RAW) && | ||
3980 | perf_paranoid_tracepoint_raw() && | ||
3981 | !capable(CAP_SYS_ADMIN)) | ||
3982 | return ERR_PTR(-EPERM); | ||
3983 | |||
3984 | if (ftrace_profile_enable(counter->attr.config)) | ||
3985 | return NULL; | ||
3986 | |||
3987 | counter->destroy = tp_perf_counter_destroy; | ||
3988 | |||
3989 | return &perf_ops_generic; | ||
3990 | } | ||
3991 | #else | ||
3992 | static const struct pmu *tp_perf_counter_init(struct perf_counter *counter) | ||
3993 | { | ||
3994 | return NULL; | ||
3995 | } | ||
3996 | #endif | ||
3997 | |||
3998 | atomic_t perf_swcounter_enabled[PERF_COUNT_SW_MAX]; | ||
3999 | |||
4000 | static void sw_perf_counter_destroy(struct perf_counter *counter) | ||
4001 | { | ||
4002 | u64 event = counter->attr.config; | ||
4003 | |||
4004 | WARN_ON(counter->parent); | ||
4005 | |||
4006 | atomic_dec(&perf_swcounter_enabled[event]); | ||
4007 | } | ||
4008 | |||
4009 | static const struct pmu *sw_perf_counter_init(struct perf_counter *counter) | ||
4010 | { | ||
4011 | const struct pmu *pmu = NULL; | ||
4012 | u64 event = counter->attr.config; | ||
4013 | |||
4014 | /* | ||
4015 | * Software counters (currently) can't in general distinguish | ||
4016 | * between user, kernel and hypervisor events. | ||
4017 | * However, context switches and cpu migrations are considered | ||
4018 | * to be kernel events, and page faults are never hypervisor | ||
4019 | * events. | ||
4020 | */ | ||
4021 | switch (event) { | ||
4022 | case PERF_COUNT_SW_CPU_CLOCK: | ||
4023 | pmu = &perf_ops_cpu_clock; | ||
4024 | |||
4025 | break; | ||
4026 | case PERF_COUNT_SW_TASK_CLOCK: | ||
4027 | /* | ||
4028 | * If the user instantiates this as a per-cpu counter, | ||
4029 | * use the cpu_clock counter instead. | ||
4030 | */ | ||
4031 | if (counter->ctx->task) | ||
4032 | pmu = &perf_ops_task_clock; | ||
4033 | else | ||
4034 | pmu = &perf_ops_cpu_clock; | ||
4035 | |||
4036 | break; | ||
4037 | case PERF_COUNT_SW_PAGE_FAULTS: | ||
4038 | case PERF_COUNT_SW_PAGE_FAULTS_MIN: | ||
4039 | case PERF_COUNT_SW_PAGE_FAULTS_MAJ: | ||
4040 | case PERF_COUNT_SW_CONTEXT_SWITCHES: | ||
4041 | case PERF_COUNT_SW_CPU_MIGRATIONS: | ||
4042 | if (!counter->parent) { | ||
4043 | atomic_inc(&perf_swcounter_enabled[event]); | ||
4044 | counter->destroy = sw_perf_counter_destroy; | ||
4045 | } | ||
4046 | pmu = &perf_ops_generic; | ||
4047 | break; | ||
4048 | } | ||
4049 | |||
4050 | return pmu; | ||
4051 | } | ||
4052 | |||
4053 | /* | ||
4054 | * Allocate and initialize a counter structure | ||
4055 | */ | ||
4056 | static struct perf_counter * | ||
4057 | perf_counter_alloc(struct perf_counter_attr *attr, | ||
4058 | int cpu, | ||
4059 | struct perf_counter_context *ctx, | ||
4060 | struct perf_counter *group_leader, | ||
4061 | struct perf_counter *parent_counter, | ||
4062 | gfp_t gfpflags) | ||
4063 | { | ||
4064 | const struct pmu *pmu; | ||
4065 | struct perf_counter *counter; | ||
4066 | struct hw_perf_counter *hwc; | ||
4067 | long err; | ||
4068 | |||
4069 | counter = kzalloc(sizeof(*counter), gfpflags); | ||
4070 | if (!counter) | ||
4071 | return ERR_PTR(-ENOMEM); | ||
4072 | |||
4073 | /* | ||
4074 | * Single counters are their own group leaders, with an | ||
4075 | * empty sibling list: | ||
4076 | */ | ||
4077 | if (!group_leader) | ||
4078 | group_leader = counter; | ||
4079 | |||
4080 | mutex_init(&counter->child_mutex); | ||
4081 | INIT_LIST_HEAD(&counter->child_list); | ||
4082 | |||
4083 | INIT_LIST_HEAD(&counter->list_entry); | ||
4084 | INIT_LIST_HEAD(&counter->event_entry); | ||
4085 | INIT_LIST_HEAD(&counter->sibling_list); | ||
4086 | init_waitqueue_head(&counter->waitq); | ||
4087 | |||
4088 | mutex_init(&counter->mmap_mutex); | ||
4089 | |||
4090 | counter->cpu = cpu; | ||
4091 | counter->attr = *attr; | ||
4092 | counter->group_leader = group_leader; | ||
4093 | counter->pmu = NULL; | ||
4094 | counter->ctx = ctx; | ||
4095 | counter->oncpu = -1; | ||
4096 | |||
4097 | counter->parent = parent_counter; | ||
4098 | |||
4099 | counter->ns = get_pid_ns(current->nsproxy->pid_ns); | ||
4100 | counter->id = atomic64_inc_return(&perf_counter_id); | ||
4101 | |||
4102 | counter->state = PERF_COUNTER_STATE_INACTIVE; | ||
4103 | |||
4104 | if (attr->disabled) | ||
4105 | counter->state = PERF_COUNTER_STATE_OFF; | ||
4106 | |||
4107 | pmu = NULL; | ||
4108 | |||
4109 | hwc = &counter->hw; | ||
4110 | hwc->sample_period = attr->sample_period; | ||
4111 | if (attr->freq && attr->sample_freq) | ||
4112 | hwc->sample_period = 1; | ||
4113 | hwc->last_period = hwc->sample_period; | ||
4114 | |||
4115 | atomic64_set(&hwc->period_left, hwc->sample_period); | ||
4116 | |||
4117 | /* | ||
4118 | * we currently do not support PERF_FORMAT_GROUP on inherited counters | ||
4119 | */ | ||
4120 | if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP)) | ||
4121 | goto done; | ||
4122 | |||
4123 | switch (attr->type) { | ||
4124 | case PERF_TYPE_RAW: | ||
4125 | case PERF_TYPE_HARDWARE: | ||
4126 | case PERF_TYPE_HW_CACHE: | ||
4127 | pmu = hw_perf_counter_init(counter); | ||
4128 | break; | ||
4129 | |||
4130 | case PERF_TYPE_SOFTWARE: | ||
4131 | pmu = sw_perf_counter_init(counter); | ||
4132 | break; | ||
4133 | |||
4134 | case PERF_TYPE_TRACEPOINT: | ||
4135 | pmu = tp_perf_counter_init(counter); | ||
4136 | break; | ||
4137 | |||
4138 | default: | ||
4139 | break; | ||
4140 | } | ||
4141 | done: | ||
4142 | err = 0; | ||
4143 | if (!pmu) | ||
4144 | err = -EINVAL; | ||
4145 | else if (IS_ERR(pmu)) | ||
4146 | err = PTR_ERR(pmu); | ||
4147 | |||
4148 | if (err) { | ||
4149 | if (counter->ns) | ||
4150 | put_pid_ns(counter->ns); | ||
4151 | kfree(counter); | ||
4152 | return ERR_PTR(err); | ||
4153 | } | ||
4154 | |||
4155 | counter->pmu = pmu; | ||
4156 | |||
4157 | if (!counter->parent) { | ||
4158 | atomic_inc(&nr_counters); | ||
4159 | if (counter->attr.mmap) | ||
4160 | atomic_inc(&nr_mmap_counters); | ||
4161 | if (counter->attr.comm) | ||
4162 | atomic_inc(&nr_comm_counters); | ||
4163 | if (counter->attr.task) | ||
4164 | atomic_inc(&nr_task_counters); | ||
4165 | } | ||
4166 | |||
4167 | return counter; | ||
4168 | } | ||
4169 | |||
4170 | static int perf_copy_attr(struct perf_counter_attr __user *uattr, | ||
4171 | struct perf_counter_attr *attr) | ||
4172 | { | ||
4173 | int ret; | ||
4174 | u32 size; | ||
4175 | |||
4176 | if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0)) | ||
4177 | return -EFAULT; | ||
4178 | |||
4179 | /* | ||
4180 | * zero the full structure, so that a short copy will be nice. | ||
4181 | */ | ||
4182 | memset(attr, 0, sizeof(*attr)); | ||
4183 | |||
4184 | ret = get_user(size, &uattr->size); | ||
4185 | if (ret) | ||
4186 | return ret; | ||
4187 | |||
4188 | if (size > PAGE_SIZE) /* silly large */ | ||
4189 | goto err_size; | ||
4190 | |||
4191 | if (!size) /* abi compat */ | ||
4192 | size = PERF_ATTR_SIZE_VER0; | ||
4193 | |||
4194 | if (size < PERF_ATTR_SIZE_VER0) | ||
4195 | goto err_size; | ||
4196 | |||
4197 | /* | ||
4198 | * If we're handed a bigger struct than we know of, | ||
4199 | * ensure all the unknown bits are 0. | ||
4200 | */ | ||
4201 | if (size > sizeof(*attr)) { | ||
4202 | unsigned long val; | ||
4203 | unsigned long __user *addr; | ||
4204 | unsigned long __user *end; | ||
4205 | |||
4206 | addr = PTR_ALIGN((void __user *)uattr + sizeof(*attr), | ||
4207 | sizeof(unsigned long)); | ||
4208 | end = PTR_ALIGN((void __user *)uattr + size, | ||
4209 | sizeof(unsigned long)); | ||
4210 | |||
4211 | for (; addr < end; addr += sizeof(unsigned long)) { | ||
4212 | ret = get_user(val, addr); | ||
4213 | if (ret) | ||
4214 | return ret; | ||
4215 | if (val) | ||
4216 | goto err_size; | ||
4217 | } | ||
4218 | size = sizeof(*attr); | ||
4219 | } | ||
4220 | |||
4221 | ret = copy_from_user(attr, uattr, size); | ||
4222 | if (ret) | ||
4223 | return -EFAULT; | ||
4224 | |||
4225 | /* | ||
4226 | * If the type exists, the corresponding creation will verify | ||
4227 | * the attr->config. | ||
4228 | */ | ||
4229 | if (attr->type >= PERF_TYPE_MAX) | ||
4230 | return -EINVAL; | ||
4231 | |||
4232 | if (attr->__reserved_1 || attr->__reserved_2 || attr->__reserved_3) | ||
4233 | return -EINVAL; | ||
4234 | |||
4235 | if (attr->sample_type & ~(PERF_SAMPLE_MAX-1)) | ||
4236 | return -EINVAL; | ||
4237 | |||
4238 | if (attr->read_format & ~(PERF_FORMAT_MAX-1)) | ||
4239 | return -EINVAL; | ||
4240 | |||
4241 | out: | ||
4242 | return ret; | ||
4243 | |||
4244 | err_size: | ||
4245 | put_user(sizeof(*attr), &uattr->size); | ||
4246 | ret = -E2BIG; | ||
4247 | goto out; | ||
4248 | } | ||
4249 | |||
4250 | int perf_counter_set_output(struct perf_counter *counter, int output_fd) | ||
4251 | { | ||
4252 | struct perf_counter *output_counter = NULL; | ||
4253 | struct file *output_file = NULL; | ||
4254 | struct perf_counter *old_output; | ||
4255 | int fput_needed = 0; | ||
4256 | int ret = -EINVAL; | ||
4257 | |||
4258 | if (!output_fd) | ||
4259 | goto set; | ||
4260 | |||
4261 | output_file = fget_light(output_fd, &fput_needed); | ||
4262 | if (!output_file) | ||
4263 | return -EBADF; | ||
4264 | |||
4265 | if (output_file->f_op != &perf_fops) | ||
4266 | goto out; | ||
4267 | |||
4268 | output_counter = output_file->private_data; | ||
4269 | |||
4270 | /* Don't chain output fds */ | ||
4271 | if (output_counter->output) | ||
4272 | goto out; | ||
4273 | |||
4274 | /* Don't set an output fd when we already have an output channel */ | ||
4275 | if (counter->data) | ||
4276 | goto out; | ||
4277 | |||
4278 | atomic_long_inc(&output_file->f_count); | ||
4279 | |||
4280 | set: | ||
4281 | mutex_lock(&counter->mmap_mutex); | ||
4282 | old_output = counter->output; | ||
4283 | rcu_assign_pointer(counter->output, output_counter); | ||
4284 | mutex_unlock(&counter->mmap_mutex); | ||
4285 | |||
4286 | if (old_output) { | ||
4287 | /* | ||
4288 | * we need to make sure no existing perf_output_*() | ||
4289 | * is still referencing this counter. | ||
4290 | */ | ||
4291 | synchronize_rcu(); | ||
4292 | fput(old_output->filp); | ||
4293 | } | ||
4294 | |||
4295 | ret = 0; | ||
4296 | out: | ||
4297 | fput_light(output_file, fput_needed); | ||
4298 | return ret; | ||
4299 | } | ||
4300 | |||
4301 | /** | ||
4302 | * sys_perf_counter_open - open a performance counter, associate it to a task/cpu | ||
4303 | * | ||
4304 | * @attr_uptr: event type attributes for monitoring/sampling | ||
4305 | * @pid: target pid | ||
4306 | * @cpu: target cpu | ||
4307 | * @group_fd: group leader counter fd | ||
4308 | */ | ||
4309 | SYSCALL_DEFINE5(perf_counter_open, | ||
4310 | struct perf_counter_attr __user *, attr_uptr, | ||
4311 | pid_t, pid, int, cpu, int, group_fd, unsigned long, flags) | ||
4312 | { | ||
4313 | struct perf_counter *counter, *group_leader; | ||
4314 | struct perf_counter_attr attr; | ||
4315 | struct perf_counter_context *ctx; | ||
4316 | struct file *counter_file = NULL; | ||
4317 | struct file *group_file = NULL; | ||
4318 | int fput_needed = 0; | ||
4319 | int fput_needed2 = 0; | ||
4320 | int err; | ||
4321 | |||
4322 | /* for future expandability... */ | ||
4323 | if (flags & ~(PERF_FLAG_FD_NO_GROUP | PERF_FLAG_FD_OUTPUT)) | ||
4324 | return -EINVAL; | ||
4325 | |||
4326 | err = perf_copy_attr(attr_uptr, &attr); | ||
4327 | if (err) | ||
4328 | return err; | ||
4329 | |||
4330 | if (!attr.exclude_kernel) { | ||
4331 | if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN)) | ||
4332 | return -EACCES; | ||
4333 | } | ||
4334 | |||
4335 | if (attr.freq) { | ||
4336 | if (attr.sample_freq > sysctl_perf_counter_sample_rate) | ||
4337 | return -EINVAL; | ||
4338 | } | ||
4339 | |||
4340 | /* | ||
4341 | * Get the target context (task or percpu): | ||
4342 | */ | ||
4343 | ctx = find_get_context(pid, cpu); | ||
4344 | if (IS_ERR(ctx)) | ||
4345 | return PTR_ERR(ctx); | ||
4346 | |||
4347 | /* | ||
4348 | * Look up the group leader (we will attach this counter to it): | ||
4349 | */ | ||
4350 | group_leader = NULL; | ||
4351 | if (group_fd != -1 && !(flags & PERF_FLAG_FD_NO_GROUP)) { | ||
4352 | err = -EINVAL; | ||
4353 | group_file = fget_light(group_fd, &fput_needed); | ||
4354 | if (!group_file) | ||
4355 | goto err_put_context; | ||
4356 | if (group_file->f_op != &perf_fops) | ||
4357 | goto err_put_context; | ||
4358 | |||
4359 | group_leader = group_file->private_data; | ||
4360 | /* | ||
4361 | * Do not allow a recursive hierarchy (this new sibling | ||
4362 | * becoming part of another group-sibling): | ||
4363 | */ | ||
4364 | if (group_leader->group_leader != group_leader) | ||
4365 | goto err_put_context; | ||
4366 | /* | ||
4367 | * Do not allow to attach to a group in a different | ||
4368 | * task or CPU context: | ||
4369 | */ | ||
4370 | if (group_leader->ctx != ctx) | ||
4371 | goto err_put_context; | ||
4372 | /* | ||
4373 | * Only a group leader can be exclusive or pinned | ||
4374 | */ | ||
4375 | if (attr.exclusive || attr.pinned) | ||
4376 | goto err_put_context; | ||
4377 | } | ||
4378 | |||
4379 | counter = perf_counter_alloc(&attr, cpu, ctx, group_leader, | ||
4380 | NULL, GFP_KERNEL); | ||
4381 | err = PTR_ERR(counter); | ||
4382 | if (IS_ERR(counter)) | ||
4383 | goto err_put_context; | ||
4384 | |||
4385 | err = anon_inode_getfd("[perf_counter]", &perf_fops, counter, 0); | ||
4386 | if (err < 0) | ||
4387 | goto err_free_put_context; | ||
4388 | |||
4389 | counter_file = fget_light(err, &fput_needed2); | ||
4390 | if (!counter_file) | ||
4391 | goto err_free_put_context; | ||
4392 | |||
4393 | if (flags & PERF_FLAG_FD_OUTPUT) { | ||
4394 | err = perf_counter_set_output(counter, group_fd); | ||
4395 | if (err) | ||
4396 | goto err_fput_free_put_context; | ||
4397 | } | ||
4398 | |||
4399 | counter->filp = counter_file; | ||
4400 | WARN_ON_ONCE(ctx->parent_ctx); | ||
4401 | mutex_lock(&ctx->mutex); | ||
4402 | perf_install_in_context(ctx, counter, cpu); | ||
4403 | ++ctx->generation; | ||
4404 | mutex_unlock(&ctx->mutex); | ||
4405 | |||
4406 | counter->owner = current; | ||
4407 | get_task_struct(current); | ||
4408 | mutex_lock(¤t->perf_counter_mutex); | ||
4409 | list_add_tail(&counter->owner_entry, ¤t->perf_counter_list); | ||
4410 | mutex_unlock(¤t->perf_counter_mutex); | ||
4411 | |||
4412 | err_fput_free_put_context: | ||
4413 | fput_light(counter_file, fput_needed2); | ||
4414 | |||
4415 | err_free_put_context: | ||
4416 | if (err < 0) | ||
4417 | kfree(counter); | ||
4418 | |||
4419 | err_put_context: | ||
4420 | if (err < 0) | ||
4421 | put_ctx(ctx); | ||
4422 | |||
4423 | fput_light(group_file, fput_needed); | ||
4424 | |||
4425 | return err; | ||
4426 | } | ||
4427 | |||
4428 | /* | ||
4429 | * inherit a counter from parent task to child task: | ||
4430 | */ | ||
4431 | static struct perf_counter * | ||
4432 | inherit_counter(struct perf_counter *parent_counter, | ||
4433 | struct task_struct *parent, | ||
4434 | struct perf_counter_context *parent_ctx, | ||
4435 | struct task_struct *child, | ||
4436 | struct perf_counter *group_leader, | ||
4437 | struct perf_counter_context *child_ctx) | ||
4438 | { | ||
4439 | struct perf_counter *child_counter; | ||
4440 | |||
4441 | /* | ||
4442 | * Instead of creating recursive hierarchies of counters, | ||
4443 | * we link inherited counters back to the original parent, | ||
4444 | * which has a filp for sure, which we use as the reference | ||
4445 | * count: | ||
4446 | */ | ||
4447 | if (parent_counter->parent) | ||
4448 | parent_counter = parent_counter->parent; | ||
4449 | |||
4450 | child_counter = perf_counter_alloc(&parent_counter->attr, | ||
4451 | parent_counter->cpu, child_ctx, | ||
4452 | group_leader, parent_counter, | ||
4453 | GFP_KERNEL); | ||
4454 | if (IS_ERR(child_counter)) | ||
4455 | return child_counter; | ||
4456 | get_ctx(child_ctx); | ||
4457 | |||
4458 | /* | ||
4459 | * Make the child state follow the state of the parent counter, | ||
4460 | * not its attr.disabled bit. We hold the parent's mutex, | ||
4461 | * so we won't race with perf_counter_{en, dis}able_family. | ||
4462 | */ | ||
4463 | if (parent_counter->state >= PERF_COUNTER_STATE_INACTIVE) | ||
4464 | child_counter->state = PERF_COUNTER_STATE_INACTIVE; | ||
4465 | else | ||
4466 | child_counter->state = PERF_COUNTER_STATE_OFF; | ||
4467 | |||
4468 | if (parent_counter->attr.freq) | ||
4469 | child_counter->hw.sample_period = parent_counter->hw.sample_period; | ||
4470 | |||
4471 | /* | ||
4472 | * Link it up in the child's context: | ||
4473 | */ | ||
4474 | add_counter_to_ctx(child_counter, child_ctx); | ||
4475 | |||
4476 | /* | ||
4477 | * Get a reference to the parent filp - we will fput it | ||
4478 | * when the child counter exits. This is safe to do because | ||
4479 | * we are in the parent and we know that the filp still | ||
4480 | * exists and has a nonzero count: | ||
4481 | */ | ||
4482 | atomic_long_inc(&parent_counter->filp->f_count); | ||
4483 | |||
4484 | /* | ||
4485 | * Link this into the parent counter's child list | ||
4486 | */ | ||
4487 | WARN_ON_ONCE(parent_counter->ctx->parent_ctx); | ||
4488 | mutex_lock(&parent_counter->child_mutex); | ||
4489 | list_add_tail(&child_counter->child_list, &parent_counter->child_list); | ||
4490 | mutex_unlock(&parent_counter->child_mutex); | ||
4491 | |||
4492 | return child_counter; | ||
4493 | } | ||
4494 | |||
4495 | static int inherit_group(struct perf_counter *parent_counter, | ||
4496 | struct task_struct *parent, | ||
4497 | struct perf_counter_context *parent_ctx, | ||
4498 | struct task_struct *child, | ||
4499 | struct perf_counter_context *child_ctx) | ||
4500 | { | ||
4501 | struct perf_counter *leader; | ||
4502 | struct perf_counter *sub; | ||
4503 | struct perf_counter *child_ctr; | ||
4504 | |||
4505 | leader = inherit_counter(parent_counter, parent, parent_ctx, | ||
4506 | child, NULL, child_ctx); | ||
4507 | if (IS_ERR(leader)) | ||
4508 | return PTR_ERR(leader); | ||
4509 | list_for_each_entry(sub, &parent_counter->sibling_list, list_entry) { | ||
4510 | child_ctr = inherit_counter(sub, parent, parent_ctx, | ||
4511 | child, leader, child_ctx); | ||
4512 | if (IS_ERR(child_ctr)) | ||
4513 | return PTR_ERR(child_ctr); | ||
4514 | } | ||
4515 | return 0; | ||
4516 | } | ||
4517 | |||
4518 | static void sync_child_counter(struct perf_counter *child_counter, | ||
4519 | struct task_struct *child) | ||
4520 | { | ||
4521 | struct perf_counter *parent_counter = child_counter->parent; | ||
4522 | u64 child_val; | ||
4523 | |||
4524 | if (child_counter->attr.inherit_stat) | ||
4525 | perf_counter_read_event(child_counter, child); | ||
4526 | |||
4527 | child_val = atomic64_read(&child_counter->count); | ||
4528 | |||
4529 | /* | ||
4530 | * Add back the child's count to the parent's count: | ||
4531 | */ | ||
4532 | atomic64_add(child_val, &parent_counter->count); | ||
4533 | atomic64_add(child_counter->total_time_enabled, | ||
4534 | &parent_counter->child_total_time_enabled); | ||
4535 | atomic64_add(child_counter->total_time_running, | ||
4536 | &parent_counter->child_total_time_running); | ||
4537 | |||
4538 | /* | ||
4539 | * Remove this counter from the parent's list | ||
4540 | */ | ||
4541 | WARN_ON_ONCE(parent_counter->ctx->parent_ctx); | ||
4542 | mutex_lock(&parent_counter->child_mutex); | ||
4543 | list_del_init(&child_counter->child_list); | ||
4544 | mutex_unlock(&parent_counter->child_mutex); | ||
4545 | |||
4546 | /* | ||
4547 | * Release the parent counter, if this was the last | ||
4548 | * reference to it. | ||
4549 | */ | ||
4550 | fput(parent_counter->filp); | ||
4551 | } | ||
4552 | |||
4553 | static void | ||
4554 | __perf_counter_exit_task(struct perf_counter *child_counter, | ||
4555 | struct perf_counter_context *child_ctx, | ||
4556 | struct task_struct *child) | ||
4557 | { | ||
4558 | struct perf_counter *parent_counter; | ||
4559 | |||
4560 | update_counter_times(child_counter); | ||
4561 | perf_counter_remove_from_context(child_counter); | ||
4562 | |||
4563 | parent_counter = child_counter->parent; | ||
4564 | /* | ||
4565 | * It can happen that parent exits first, and has counters | ||
4566 | * that are still around due to the child reference. These | ||
4567 | * counters need to be zapped - but otherwise linger. | ||
4568 | */ | ||
4569 | if (parent_counter) { | ||
4570 | sync_child_counter(child_counter, child); | ||
4571 | free_counter(child_counter); | ||
4572 | } | ||
4573 | } | ||
4574 | |||
4575 | /* | ||
4576 | * When a child task exits, feed back counter values to parent counters. | ||
4577 | */ | ||
4578 | void perf_counter_exit_task(struct task_struct *child) | ||
4579 | { | ||
4580 | struct perf_counter *child_counter, *tmp; | ||
4581 | struct perf_counter_context *child_ctx; | ||
4582 | unsigned long flags; | ||
4583 | |||
4584 | if (likely(!child->perf_counter_ctxp)) { | ||
4585 | perf_counter_task(child, NULL, 0); | ||
4586 | return; | ||
4587 | } | ||
4588 | |||
4589 | local_irq_save(flags); | ||
4590 | /* | ||
4591 | * We can't reschedule here because interrupts are disabled, | ||
4592 | * and either child is current or it is a task that can't be | ||
4593 | * scheduled, so we are now safe from rescheduling changing | ||
4594 | * our context. | ||
4595 | */ | ||
4596 | child_ctx = child->perf_counter_ctxp; | ||
4597 | __perf_counter_task_sched_out(child_ctx); | ||
4598 | |||
4599 | /* | ||
4600 | * Take the context lock here so that if find_get_context is | ||
4601 | * reading child->perf_counter_ctxp, we wait until it has | ||
4602 | * incremented the context's refcount before we do put_ctx below. | ||
4603 | */ | ||
4604 | spin_lock(&child_ctx->lock); | ||
4605 | child->perf_counter_ctxp = NULL; | ||
4606 | /* | ||
4607 | * If this context is a clone; unclone it so it can't get | ||
4608 | * swapped to another process while we're removing all | ||
4609 | * the counters from it. | ||
4610 | */ | ||
4611 | unclone_ctx(child_ctx); | ||
4612 | spin_unlock_irqrestore(&child_ctx->lock, flags); | ||
4613 | |||
4614 | /* | ||
4615 | * Report the task dead after unscheduling the counters so that we | ||
4616 | * won't get any samples after PERF_EVENT_EXIT. We can however still | ||
4617 | * get a few PERF_EVENT_READ events. | ||
4618 | */ | ||
4619 | perf_counter_task(child, child_ctx, 0); | ||
4620 | |||
4621 | /* | ||
4622 | * We can recurse on the same lock type through: | ||
4623 | * | ||
4624 | * __perf_counter_exit_task() | ||
4625 | * sync_child_counter() | ||
4626 | * fput(parent_counter->filp) | ||
4627 | * perf_release() | ||
4628 | * mutex_lock(&ctx->mutex) | ||
4629 | * | ||
4630 | * But since its the parent context it won't be the same instance. | ||
4631 | */ | ||
4632 | mutex_lock_nested(&child_ctx->mutex, SINGLE_DEPTH_NESTING); | ||
4633 | |||
4634 | again: | ||
4635 | list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list, | ||
4636 | list_entry) | ||
4637 | __perf_counter_exit_task(child_counter, child_ctx, child); | ||
4638 | |||
4639 | /* | ||
4640 | * If the last counter was a group counter, it will have appended all | ||
4641 | * its siblings to the list, but we obtained 'tmp' before that which | ||
4642 | * will still point to the list head terminating the iteration. | ||
4643 | */ | ||
4644 | if (!list_empty(&child_ctx->counter_list)) | ||
4645 | goto again; | ||
4646 | |||
4647 | mutex_unlock(&child_ctx->mutex); | ||
4648 | |||
4649 | put_ctx(child_ctx); | ||
4650 | } | ||
4651 | |||
4652 | /* | ||
4653 | * free an unexposed, unused context as created by inheritance by | ||
4654 | * init_task below, used by fork() in case of fail. | ||
4655 | */ | ||
4656 | void perf_counter_free_task(struct task_struct *task) | ||
4657 | { | ||
4658 | struct perf_counter_context *ctx = task->perf_counter_ctxp; | ||
4659 | struct perf_counter *counter, *tmp; | ||
4660 | |||
4661 | if (!ctx) | ||
4662 | return; | ||
4663 | |||
4664 | mutex_lock(&ctx->mutex); | ||
4665 | again: | ||
4666 | list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry) { | ||
4667 | struct perf_counter *parent = counter->parent; | ||
4668 | |||
4669 | if (WARN_ON_ONCE(!parent)) | ||
4670 | continue; | ||
4671 | |||
4672 | mutex_lock(&parent->child_mutex); | ||
4673 | list_del_init(&counter->child_list); | ||
4674 | mutex_unlock(&parent->child_mutex); | ||
4675 | |||
4676 | fput(parent->filp); | ||
4677 | |||
4678 | list_del_counter(counter, ctx); | ||
4679 | free_counter(counter); | ||
4680 | } | ||
4681 | |||
4682 | if (!list_empty(&ctx->counter_list)) | ||
4683 | goto again; | ||
4684 | |||
4685 | mutex_unlock(&ctx->mutex); | ||
4686 | |||
4687 | put_ctx(ctx); | ||
4688 | } | ||
4689 | |||
4690 | /* | ||
4691 | * Initialize the perf_counter context in task_struct | ||
4692 | */ | ||
4693 | int perf_counter_init_task(struct task_struct *child) | ||
4694 | { | ||
4695 | struct perf_counter_context *child_ctx, *parent_ctx; | ||
4696 | struct perf_counter_context *cloned_ctx; | ||
4697 | struct perf_counter *counter; | ||
4698 | struct task_struct *parent = current; | ||
4699 | int inherited_all = 1; | ||
4700 | int ret = 0; | ||
4701 | |||
4702 | child->perf_counter_ctxp = NULL; | ||
4703 | |||
4704 | mutex_init(&child->perf_counter_mutex); | ||
4705 | INIT_LIST_HEAD(&child->perf_counter_list); | ||
4706 | |||
4707 | if (likely(!parent->perf_counter_ctxp)) | ||
4708 | return 0; | ||
4709 | |||
4710 | /* | ||
4711 | * This is executed from the parent task context, so inherit | ||
4712 | * counters that have been marked for cloning. | ||
4713 | * First allocate and initialize a context for the child. | ||
4714 | */ | ||
4715 | |||
4716 | child_ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL); | ||
4717 | if (!child_ctx) | ||
4718 | return -ENOMEM; | ||
4719 | |||
4720 | __perf_counter_init_context(child_ctx, child); | ||
4721 | child->perf_counter_ctxp = child_ctx; | ||
4722 | get_task_struct(child); | ||
4723 | |||
4724 | /* | ||
4725 | * If the parent's context is a clone, pin it so it won't get | ||
4726 | * swapped under us. | ||
4727 | */ | ||
4728 | parent_ctx = perf_pin_task_context(parent); | ||
4729 | |||
4730 | /* | ||
4731 | * No need to check if parent_ctx != NULL here; since we saw | ||
4732 | * it non-NULL earlier, the only reason for it to become NULL | ||
4733 | * is if we exit, and since we're currently in the middle of | ||
4734 | * a fork we can't be exiting at the same time. | ||
4735 | */ | ||
4736 | |||
4737 | /* | ||
4738 | * Lock the parent list. No need to lock the child - not PID | ||
4739 | * hashed yet and not running, so nobody can access it. | ||
4740 | */ | ||
4741 | mutex_lock(&parent_ctx->mutex); | ||
4742 | |||
4743 | /* | ||
4744 | * We dont have to disable NMIs - we are only looking at | ||
4745 | * the list, not manipulating it: | ||
4746 | */ | ||
4747 | list_for_each_entry_rcu(counter, &parent_ctx->event_list, event_entry) { | ||
4748 | if (counter != counter->group_leader) | ||
4749 | continue; | ||
4750 | |||
4751 | if (!counter->attr.inherit) { | ||
4752 | inherited_all = 0; | ||
4753 | continue; | ||
4754 | } | ||
4755 | |||
4756 | ret = inherit_group(counter, parent, parent_ctx, | ||
4757 | child, child_ctx); | ||
4758 | if (ret) { | ||
4759 | inherited_all = 0; | ||
4760 | break; | ||
4761 | } | ||
4762 | } | ||
4763 | |||
4764 | if (inherited_all) { | ||
4765 | /* | ||
4766 | * Mark the child context as a clone of the parent | ||
4767 | * context, or of whatever the parent is a clone of. | ||
4768 | * Note that if the parent is a clone, it could get | ||
4769 | * uncloned at any point, but that doesn't matter | ||
4770 | * because the list of counters and the generation | ||
4771 | * count can't have changed since we took the mutex. | ||
4772 | */ | ||
4773 | cloned_ctx = rcu_dereference(parent_ctx->parent_ctx); | ||
4774 | if (cloned_ctx) { | ||
4775 | child_ctx->parent_ctx = cloned_ctx; | ||
4776 | child_ctx->parent_gen = parent_ctx->parent_gen; | ||
4777 | } else { | ||
4778 | child_ctx->parent_ctx = parent_ctx; | ||
4779 | child_ctx->parent_gen = parent_ctx->generation; | ||
4780 | } | ||
4781 | get_ctx(child_ctx->parent_ctx); | ||
4782 | } | ||
4783 | |||
4784 | mutex_unlock(&parent_ctx->mutex); | ||
4785 | |||
4786 | perf_unpin_context(parent_ctx); | ||
4787 | |||
4788 | return ret; | ||
4789 | } | ||
4790 | |||
4791 | static void __cpuinit perf_counter_init_cpu(int cpu) | ||
4792 | { | ||
4793 | struct perf_cpu_context *cpuctx; | ||
4794 | |||
4795 | cpuctx = &per_cpu(perf_cpu_context, cpu); | ||
4796 | __perf_counter_init_context(&cpuctx->ctx, NULL); | ||
4797 | |||
4798 | spin_lock(&perf_resource_lock); | ||
4799 | cpuctx->max_pertask = perf_max_counters - perf_reserved_percpu; | ||
4800 | spin_unlock(&perf_resource_lock); | ||
4801 | |||
4802 | hw_perf_counter_setup(cpu); | ||
4803 | } | ||
4804 | |||
4805 | #ifdef CONFIG_HOTPLUG_CPU | ||
4806 | static void __perf_counter_exit_cpu(void *info) | ||
4807 | { | ||
4808 | struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); | ||
4809 | struct perf_counter_context *ctx = &cpuctx->ctx; | ||
4810 | struct perf_counter *counter, *tmp; | ||
4811 | |||
4812 | list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry) | ||
4813 | __perf_counter_remove_from_context(counter); | ||
4814 | } | ||
4815 | static void perf_counter_exit_cpu(int cpu) | ||
4816 | { | ||
4817 | struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); | ||
4818 | struct perf_counter_context *ctx = &cpuctx->ctx; | ||
4819 | |||
4820 | mutex_lock(&ctx->mutex); | ||
4821 | smp_call_function_single(cpu, __perf_counter_exit_cpu, NULL, 1); | ||
4822 | mutex_unlock(&ctx->mutex); | ||
4823 | } | ||
4824 | #else | ||
4825 | static inline void perf_counter_exit_cpu(int cpu) { } | ||
4826 | #endif | ||
4827 | |||
4828 | static int __cpuinit | ||
4829 | perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) | ||
4830 | { | ||
4831 | unsigned int cpu = (long)hcpu; | ||
4832 | |||
4833 | switch (action) { | ||
4834 | |||
4835 | case CPU_UP_PREPARE: | ||
4836 | case CPU_UP_PREPARE_FROZEN: | ||
4837 | perf_counter_init_cpu(cpu); | ||
4838 | break; | ||
4839 | |||
4840 | case CPU_ONLINE: | ||
4841 | case CPU_ONLINE_FROZEN: | ||
4842 | hw_perf_counter_setup_online(cpu); | ||
4843 | break; | ||
4844 | |||
4845 | case CPU_DOWN_PREPARE: | ||
4846 | case CPU_DOWN_PREPARE_FROZEN: | ||
4847 | perf_counter_exit_cpu(cpu); | ||
4848 | break; | ||
4849 | |||
4850 | default: | ||
4851 | break; | ||
4852 | } | ||
4853 | |||
4854 | return NOTIFY_OK; | ||
4855 | } | ||
4856 | |||
4857 | /* | ||
4858 | * This has to have a higher priority than migration_notifier in sched.c. | ||
4859 | */ | ||
4860 | static struct notifier_block __cpuinitdata perf_cpu_nb = { | ||
4861 | .notifier_call = perf_cpu_notify, | ||
4862 | .priority = 20, | ||
4863 | }; | ||
4864 | |||
4865 | void __init perf_counter_init(void) | ||
4866 | { | ||
4867 | perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE, | ||
4868 | (void *)(long)smp_processor_id()); | ||
4869 | perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_ONLINE, | ||
4870 | (void *)(long)smp_processor_id()); | ||
4871 | register_cpu_notifier(&perf_cpu_nb); | ||
4872 | } | ||
4873 | |||
4874 | static ssize_t perf_show_reserve_percpu(struct sysdev_class *class, char *buf) | ||
4875 | { | ||
4876 | return sprintf(buf, "%d\n", perf_reserved_percpu); | ||
4877 | } | ||
4878 | |||
4879 | static ssize_t | ||
4880 | perf_set_reserve_percpu(struct sysdev_class *class, | ||
4881 | const char *buf, | ||
4882 | size_t count) | ||
4883 | { | ||
4884 | struct perf_cpu_context *cpuctx; | ||
4885 | unsigned long val; | ||
4886 | int err, cpu, mpt; | ||
4887 | |||
4888 | err = strict_strtoul(buf, 10, &val); | ||
4889 | if (err) | ||
4890 | return err; | ||
4891 | if (val > perf_max_counters) | ||
4892 | return -EINVAL; | ||
4893 | |||
4894 | spin_lock(&perf_resource_lock); | ||
4895 | perf_reserved_percpu = val; | ||
4896 | for_each_online_cpu(cpu) { | ||
4897 | cpuctx = &per_cpu(perf_cpu_context, cpu); | ||
4898 | spin_lock_irq(&cpuctx->ctx.lock); | ||
4899 | mpt = min(perf_max_counters - cpuctx->ctx.nr_counters, | ||
4900 | perf_max_counters - perf_reserved_percpu); | ||
4901 | cpuctx->max_pertask = mpt; | ||
4902 | spin_unlock_irq(&cpuctx->ctx.lock); | ||
4903 | } | ||
4904 | spin_unlock(&perf_resource_lock); | ||
4905 | |||
4906 | return count; | ||
4907 | } | ||
4908 | |||
4909 | static ssize_t perf_show_overcommit(struct sysdev_class *class, char *buf) | ||
4910 | { | ||
4911 | return sprintf(buf, "%d\n", perf_overcommit); | ||
4912 | } | ||
4913 | |||
4914 | static ssize_t | ||
4915 | perf_set_overcommit(struct sysdev_class *class, const char *buf, size_t count) | ||
4916 | { | ||
4917 | unsigned long val; | ||
4918 | int err; | ||
4919 | |||
4920 | err = strict_strtoul(buf, 10, &val); | ||
4921 | if (err) | ||
4922 | return err; | ||
4923 | if (val > 1) | ||
4924 | return -EINVAL; | ||
4925 | |||
4926 | spin_lock(&perf_resource_lock); | ||
4927 | perf_overcommit = val; | ||
4928 | spin_unlock(&perf_resource_lock); | ||
4929 | |||
4930 | return count; | ||
4931 | } | ||
4932 | |||
4933 | static SYSDEV_CLASS_ATTR( | ||
4934 | reserve_percpu, | ||
4935 | 0644, | ||
4936 | perf_show_reserve_percpu, | ||
4937 | perf_set_reserve_percpu | ||
4938 | ); | ||
4939 | |||
4940 | static SYSDEV_CLASS_ATTR( | ||
4941 | overcommit, | ||
4942 | 0644, | ||
4943 | perf_show_overcommit, | ||
4944 | perf_set_overcommit | ||
4945 | ); | ||
4946 | |||
4947 | static struct attribute *perfclass_attrs[] = { | ||
4948 | &attr_reserve_percpu.attr, | ||
4949 | &attr_overcommit.attr, | ||
4950 | NULL | ||
4951 | }; | ||
4952 | |||
4953 | static struct attribute_group perfclass_attr_group = { | ||
4954 | .attrs = perfclass_attrs, | ||
4955 | .name = "perf_counters", | ||
4956 | }; | ||
4957 | |||
4958 | static int __init perf_counter_sysfs_init(void) | ||
4959 | { | ||
4960 | return sysfs_create_group(&cpu_sysdev_class.kset.kobj, | ||
4961 | &perfclass_attr_group); | ||
4962 | } | ||
4963 | device_initcall(perf_counter_sysfs_init); | ||
diff --git a/kernel/perf_event.c b/kernel/perf_event.c new file mode 100644 index 000000000000..7f29643c8985 --- /dev/null +++ b/kernel/perf_event.c | |||
@@ -0,0 +1,5130 @@ | |||
1 | /* | ||
2 | * Performance events core code: | ||
3 | * | ||
4 | * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> | ||
5 | * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar | ||
6 | * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> | ||
7 | * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> | ||
8 | * | ||
9 | * For licensing details see kernel-base/COPYING | ||
10 | */ | ||
11 | |||
12 | #include <linux/fs.h> | ||
13 | #include <linux/mm.h> | ||
14 | #include <linux/cpu.h> | ||
15 | #include <linux/smp.h> | ||
16 | #include <linux/file.h> | ||
17 | #include <linux/poll.h> | ||
18 | #include <linux/sysfs.h> | ||
19 | #include <linux/dcache.h> | ||
20 | #include <linux/percpu.h> | ||
21 | #include <linux/ptrace.h> | ||
22 | #include <linux/vmstat.h> | ||
23 | #include <linux/vmalloc.h> | ||
24 | #include <linux/hardirq.h> | ||
25 | #include <linux/rculist.h> | ||
26 | #include <linux/uaccess.h> | ||
27 | #include <linux/syscalls.h> | ||
28 | #include <linux/anon_inodes.h> | ||
29 | #include <linux/kernel_stat.h> | ||
30 | #include <linux/perf_event.h> | ||
31 | |||
32 | #include <asm/irq_regs.h> | ||
33 | |||
34 | /* | ||
35 | * Each CPU has a list of per CPU events: | ||
36 | */ | ||
37 | DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context); | ||
38 | |||
39 | int perf_max_events __read_mostly = 1; | ||
40 | static int perf_reserved_percpu __read_mostly; | ||
41 | static int perf_overcommit __read_mostly = 1; | ||
42 | |||
43 | static atomic_t nr_events __read_mostly; | ||
44 | static atomic_t nr_mmap_events __read_mostly; | ||
45 | static atomic_t nr_comm_events __read_mostly; | ||
46 | static atomic_t nr_task_events __read_mostly; | ||
47 | |||
48 | /* | ||
49 | * perf event paranoia level: | ||
50 | * -1 - not paranoid at all | ||
51 | * 0 - disallow raw tracepoint access for unpriv | ||
52 | * 1 - disallow cpu events for unpriv | ||
53 | * 2 - disallow kernel profiling for unpriv | ||
54 | */ | ||
55 | int sysctl_perf_event_paranoid __read_mostly = 1; | ||
56 | |||
57 | static inline bool perf_paranoid_tracepoint_raw(void) | ||
58 | { | ||
59 | return sysctl_perf_event_paranoid > -1; | ||
60 | } | ||
61 | |||
62 | static inline bool perf_paranoid_cpu(void) | ||
63 | { | ||
64 | return sysctl_perf_event_paranoid > 0; | ||
65 | } | ||
66 | |||
67 | static inline bool perf_paranoid_kernel(void) | ||
68 | { | ||
69 | return sysctl_perf_event_paranoid > 1; | ||
70 | } | ||
71 | |||
72 | int sysctl_perf_event_mlock __read_mostly = 512; /* 'free' kb per user */ | ||
73 | |||
74 | /* | ||
75 | * max perf event sample rate | ||
76 | */ | ||
77 | int sysctl_perf_event_sample_rate __read_mostly = 100000; | ||
78 | |||
79 | static atomic64_t perf_event_id; | ||
80 | |||
81 | /* | ||
82 | * Lock for (sysadmin-configurable) event reservations: | ||
83 | */ | ||
84 | static DEFINE_SPINLOCK(perf_resource_lock); | ||
85 | |||
86 | /* | ||
87 | * Architecture provided APIs - weak aliases: | ||
88 | */ | ||
89 | extern __weak const struct pmu *hw_perf_event_init(struct perf_event *event) | ||
90 | { | ||
91 | return NULL; | ||
92 | } | ||
93 | |||
94 | void __weak hw_perf_disable(void) { barrier(); } | ||
95 | void __weak hw_perf_enable(void) { barrier(); } | ||
96 | |||
97 | void __weak hw_perf_event_setup(int cpu) { barrier(); } | ||
98 | void __weak hw_perf_event_setup_online(int cpu) { barrier(); } | ||
99 | |||
100 | int __weak | ||
101 | hw_perf_group_sched_in(struct perf_event *group_leader, | ||
102 | struct perf_cpu_context *cpuctx, | ||
103 | struct perf_event_context *ctx, int cpu) | ||
104 | { | ||
105 | return 0; | ||
106 | } | ||
107 | |||
108 | void __weak perf_event_print_debug(void) { } | ||
109 | |||
110 | static DEFINE_PER_CPU(int, perf_disable_count); | ||
111 | |||
112 | void __perf_disable(void) | ||
113 | { | ||
114 | __get_cpu_var(perf_disable_count)++; | ||
115 | } | ||
116 | |||
117 | bool __perf_enable(void) | ||
118 | { | ||
119 | return !--__get_cpu_var(perf_disable_count); | ||
120 | } | ||
121 | |||
122 | void perf_disable(void) | ||
123 | { | ||
124 | __perf_disable(); | ||
125 | hw_perf_disable(); | ||
126 | } | ||
127 | |||
128 | void perf_enable(void) | ||
129 | { | ||
130 | if (__perf_enable()) | ||
131 | hw_perf_enable(); | ||
132 | } | ||
133 | |||
134 | static void get_ctx(struct perf_event_context *ctx) | ||
135 | { | ||
136 | WARN_ON(!atomic_inc_not_zero(&ctx->refcount)); | ||
137 | } | ||
138 | |||
139 | static void free_ctx(struct rcu_head *head) | ||
140 | { | ||
141 | struct perf_event_context *ctx; | ||
142 | |||
143 | ctx = container_of(head, struct perf_event_context, rcu_head); | ||
144 | kfree(ctx); | ||
145 | } | ||
146 | |||
147 | static void put_ctx(struct perf_event_context *ctx) | ||
148 | { | ||
149 | if (atomic_dec_and_test(&ctx->refcount)) { | ||
150 | if (ctx->parent_ctx) | ||
151 | put_ctx(ctx->parent_ctx); | ||
152 | if (ctx->task) | ||
153 | put_task_struct(ctx->task); | ||
154 | call_rcu(&ctx->rcu_head, free_ctx); | ||
155 | } | ||
156 | } | ||
157 | |||
158 | static void unclone_ctx(struct perf_event_context *ctx) | ||
159 | { | ||
160 | if (ctx->parent_ctx) { | ||
161 | put_ctx(ctx->parent_ctx); | ||
162 | ctx->parent_ctx = NULL; | ||
163 | } | ||
164 | } | ||
165 | |||
166 | /* | ||
167 | * If we inherit events we want to return the parent event id | ||
168 | * to userspace. | ||
169 | */ | ||
170 | static u64 primary_event_id(struct perf_event *event) | ||
171 | { | ||
172 | u64 id = event->id; | ||
173 | |||
174 | if (event->parent) | ||
175 | id = event->parent->id; | ||
176 | |||
177 | return id; | ||
178 | } | ||
179 | |||
180 | /* | ||
181 | * Get the perf_event_context for a task and lock it. | ||
182 | * This has to cope with with the fact that until it is locked, | ||
183 | * the context could get moved to another task. | ||
184 | */ | ||
185 | static struct perf_event_context * | ||
186 | perf_lock_task_context(struct task_struct *task, unsigned long *flags) | ||
187 | { | ||
188 | struct perf_event_context *ctx; | ||
189 | |||
190 | rcu_read_lock(); | ||
191 | retry: | ||
192 | ctx = rcu_dereference(task->perf_event_ctxp); | ||
193 | if (ctx) { | ||
194 | /* | ||
195 | * If this context is a clone of another, it might | ||
196 | * get swapped for another underneath us by | ||
197 | * perf_event_task_sched_out, though the | ||
198 | * rcu_read_lock() protects us from any context | ||
199 | * getting freed. Lock the context and check if it | ||
200 | * got swapped before we could get the lock, and retry | ||
201 | * if so. If we locked the right context, then it | ||
202 | * can't get swapped on us any more. | ||
203 | */ | ||
204 | spin_lock_irqsave(&ctx->lock, *flags); | ||
205 | if (ctx != rcu_dereference(task->perf_event_ctxp)) { | ||
206 | spin_unlock_irqrestore(&ctx->lock, *flags); | ||
207 | goto retry; | ||
208 | } | ||
209 | |||
210 | if (!atomic_inc_not_zero(&ctx->refcount)) { | ||
211 | spin_unlock_irqrestore(&ctx->lock, *flags); | ||
212 | ctx = NULL; | ||
213 | } | ||
214 | } | ||
215 | rcu_read_unlock(); | ||
216 | return ctx; | ||
217 | } | ||
218 | |||
219 | /* | ||
220 | * Get the context for a task and increment its pin_count so it | ||
221 | * can't get swapped to another task. This also increments its | ||
222 | * reference count so that the context can't get freed. | ||
223 | */ | ||
224 | static struct perf_event_context *perf_pin_task_context(struct task_struct *task) | ||
225 | { | ||
226 | struct perf_event_context *ctx; | ||
227 | unsigned long flags; | ||
228 | |||
229 | ctx = perf_lock_task_context(task, &flags); | ||
230 | if (ctx) { | ||
231 | ++ctx->pin_count; | ||
232 | spin_unlock_irqrestore(&ctx->lock, flags); | ||
233 | } | ||
234 | return ctx; | ||
235 | } | ||
236 | |||
237 | static void perf_unpin_context(struct perf_event_context *ctx) | ||
238 | { | ||
239 | unsigned long flags; | ||
240 | |||
241 | spin_lock_irqsave(&ctx->lock, flags); | ||
242 | --ctx->pin_count; | ||
243 | spin_unlock_irqrestore(&ctx->lock, flags); | ||
244 | put_ctx(ctx); | ||
245 | } | ||
246 | |||
247 | /* | ||
248 | * Add a event from the lists for its context. | ||
249 | * Must be called with ctx->mutex and ctx->lock held. | ||
250 | */ | ||
251 | static void | ||
252 | list_add_event(struct perf_event *event, struct perf_event_context *ctx) | ||
253 | { | ||
254 | struct perf_event *group_leader = event->group_leader; | ||
255 | |||
256 | /* | ||
257 | * Depending on whether it is a standalone or sibling event, | ||
258 | * add it straight to the context's event list, or to the group | ||
259 | * leader's sibling list: | ||
260 | */ | ||
261 | if (group_leader == event) | ||
262 | list_add_tail(&event->group_entry, &ctx->group_list); | ||
263 | else { | ||
264 | list_add_tail(&event->group_entry, &group_leader->sibling_list); | ||
265 | group_leader->nr_siblings++; | ||
266 | } | ||
267 | |||
268 | list_add_rcu(&event->event_entry, &ctx->event_list); | ||
269 | ctx->nr_events++; | ||
270 | if (event->attr.inherit_stat) | ||
271 | ctx->nr_stat++; | ||
272 | } | ||
273 | |||
274 | /* | ||
275 | * Remove a event from the lists for its context. | ||
276 | * Must be called with ctx->mutex and ctx->lock held. | ||
277 | */ | ||
278 | static void | ||
279 | list_del_event(struct perf_event *event, struct perf_event_context *ctx) | ||
280 | { | ||
281 | struct perf_event *sibling, *tmp; | ||
282 | |||
283 | if (list_empty(&event->group_entry)) | ||
284 | return; | ||
285 | ctx->nr_events--; | ||
286 | if (event->attr.inherit_stat) | ||
287 | ctx->nr_stat--; | ||
288 | |||
289 | list_del_init(&event->group_entry); | ||
290 | list_del_rcu(&event->event_entry); | ||
291 | |||
292 | if (event->group_leader != event) | ||
293 | event->group_leader->nr_siblings--; | ||
294 | |||
295 | /* | ||
296 | * If this was a group event with sibling events then | ||
297 | * upgrade the siblings to singleton events by adding them | ||
298 | * to the context list directly: | ||
299 | */ | ||
300 | list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) { | ||
301 | |||
302 | list_move_tail(&sibling->group_entry, &ctx->group_list); | ||
303 | sibling->group_leader = sibling; | ||
304 | } | ||
305 | } | ||
306 | |||
307 | static void | ||
308 | event_sched_out(struct perf_event *event, | ||
309 | struct perf_cpu_context *cpuctx, | ||
310 | struct perf_event_context *ctx) | ||
311 | { | ||
312 | if (event->state != PERF_EVENT_STATE_ACTIVE) | ||
313 | return; | ||
314 | |||
315 | event->state = PERF_EVENT_STATE_INACTIVE; | ||
316 | if (event->pending_disable) { | ||
317 | event->pending_disable = 0; | ||
318 | event->state = PERF_EVENT_STATE_OFF; | ||
319 | } | ||
320 | event->tstamp_stopped = ctx->time; | ||
321 | event->pmu->disable(event); | ||
322 | event->oncpu = -1; | ||
323 | |||
324 | if (!is_software_event(event)) | ||
325 | cpuctx->active_oncpu--; | ||
326 | ctx->nr_active--; | ||
327 | if (event->attr.exclusive || !cpuctx->active_oncpu) | ||
328 | cpuctx->exclusive = 0; | ||
329 | } | ||
330 | |||
331 | static void | ||
332 | group_sched_out(struct perf_event *group_event, | ||
333 | struct perf_cpu_context *cpuctx, | ||
334 | struct perf_event_context *ctx) | ||
335 | { | ||
336 | struct perf_event *event; | ||
337 | |||
338 | if (group_event->state != PERF_EVENT_STATE_ACTIVE) | ||
339 | return; | ||
340 | |||
341 | event_sched_out(group_event, cpuctx, ctx); | ||
342 | |||
343 | /* | ||
344 | * Schedule out siblings (if any): | ||
345 | */ | ||
346 | list_for_each_entry(event, &group_event->sibling_list, group_entry) | ||
347 | event_sched_out(event, cpuctx, ctx); | ||
348 | |||
349 | if (group_event->attr.exclusive) | ||
350 | cpuctx->exclusive = 0; | ||
351 | } | ||
352 | |||
353 | /* | ||
354 | * Cross CPU call to remove a performance event | ||
355 | * | ||
356 | * We disable the event on the hardware level first. After that we | ||
357 | * remove it from the context list. | ||
358 | */ | ||
359 | static void __perf_event_remove_from_context(void *info) | ||
360 | { | ||
361 | struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); | ||
362 | struct perf_event *event = info; | ||
363 | struct perf_event_context *ctx = event->ctx; | ||
364 | |||
365 | /* | ||
366 | * If this is a task context, we need to check whether it is | ||
367 | * the current task context of this cpu. If not it has been | ||
368 | * scheduled out before the smp call arrived. | ||
369 | */ | ||
370 | if (ctx->task && cpuctx->task_ctx != ctx) | ||
371 | return; | ||
372 | |||
373 | spin_lock(&ctx->lock); | ||
374 | /* | ||
375 | * Protect the list operation against NMI by disabling the | ||
376 | * events on a global level. | ||
377 | */ | ||
378 | perf_disable(); | ||
379 | |||
380 | event_sched_out(event, cpuctx, ctx); | ||
381 | |||
382 | list_del_event(event, ctx); | ||
383 | |||
384 | if (!ctx->task) { | ||
385 | /* | ||
386 | * Allow more per task events with respect to the | ||
387 | * reservation: | ||
388 | */ | ||
389 | cpuctx->max_pertask = | ||
390 | min(perf_max_events - ctx->nr_events, | ||
391 | perf_max_events - perf_reserved_percpu); | ||
392 | } | ||
393 | |||
394 | perf_enable(); | ||
395 | spin_unlock(&ctx->lock); | ||
396 | } | ||
397 | |||
398 | |||
399 | /* | ||
400 | * Remove the event from a task's (or a CPU's) list of events. | ||
401 | * | ||
402 | * Must be called with ctx->mutex held. | ||
403 | * | ||
404 | * CPU events are removed with a smp call. For task events we only | ||
405 | * call when the task is on a CPU. | ||
406 | * | ||
407 | * If event->ctx is a cloned context, callers must make sure that | ||
408 | * every task struct that event->ctx->task could possibly point to | ||
409 | * remains valid. This is OK when called from perf_release since | ||
410 | * that only calls us on the top-level context, which can't be a clone. | ||
411 | * When called from perf_event_exit_task, it's OK because the | ||
412 | * context has been detached from its task. | ||
413 | */ | ||
414 | static void perf_event_remove_from_context(struct perf_event *event) | ||
415 | { | ||
416 | struct perf_event_context *ctx = event->ctx; | ||
417 | struct task_struct *task = ctx->task; | ||
418 | |||
419 | if (!task) { | ||
420 | /* | ||
421 | * Per cpu events are removed via an smp call and | ||
422 | * the removal is always sucessful. | ||
423 | */ | ||
424 | smp_call_function_single(event->cpu, | ||
425 | __perf_event_remove_from_context, | ||
426 | event, 1); | ||
427 | return; | ||
428 | } | ||
429 | |||
430 | retry: | ||
431 | task_oncpu_function_call(task, __perf_event_remove_from_context, | ||
432 | event); | ||
433 | |||
434 | spin_lock_irq(&ctx->lock); | ||
435 | /* | ||
436 | * If the context is active we need to retry the smp call. | ||
437 | */ | ||
438 | if (ctx->nr_active && !list_empty(&event->group_entry)) { | ||
439 | spin_unlock_irq(&ctx->lock); | ||
440 | goto retry; | ||
441 | } | ||
442 | |||
443 | /* | ||
444 | * The lock prevents that this context is scheduled in so we | ||
445 | * can remove the event safely, if the call above did not | ||
446 | * succeed. | ||
447 | */ | ||
448 | if (!list_empty(&event->group_entry)) { | ||
449 | list_del_event(event, ctx); | ||
450 | } | ||
451 | spin_unlock_irq(&ctx->lock); | ||
452 | } | ||
453 | |||
454 | static inline u64 perf_clock(void) | ||
455 | { | ||
456 | return cpu_clock(smp_processor_id()); | ||
457 | } | ||
458 | |||
459 | /* | ||
460 | * Update the record of the current time in a context. | ||
461 | */ | ||
462 | static void update_context_time(struct perf_event_context *ctx) | ||
463 | { | ||
464 | u64 now = perf_clock(); | ||
465 | |||
466 | ctx->time += now - ctx->timestamp; | ||
467 | ctx->timestamp = now; | ||
468 | } | ||
469 | |||
470 | /* | ||
471 | * Update the total_time_enabled and total_time_running fields for a event. | ||
472 | */ | ||
473 | static void update_event_times(struct perf_event *event) | ||
474 | { | ||
475 | struct perf_event_context *ctx = event->ctx; | ||
476 | u64 run_end; | ||
477 | |||
478 | if (event->state < PERF_EVENT_STATE_INACTIVE || | ||
479 | event->group_leader->state < PERF_EVENT_STATE_INACTIVE) | ||
480 | return; | ||
481 | |||
482 | event->total_time_enabled = ctx->time - event->tstamp_enabled; | ||
483 | |||
484 | if (event->state == PERF_EVENT_STATE_INACTIVE) | ||
485 | run_end = event->tstamp_stopped; | ||
486 | else | ||
487 | run_end = ctx->time; | ||
488 | |||
489 | event->total_time_running = run_end - event->tstamp_running; | ||
490 | } | ||
491 | |||
492 | /* | ||
493 | * Update total_time_enabled and total_time_running for all events in a group. | ||
494 | */ | ||
495 | static void update_group_times(struct perf_event *leader) | ||
496 | { | ||
497 | struct perf_event *event; | ||
498 | |||
499 | update_event_times(leader); | ||
500 | list_for_each_entry(event, &leader->sibling_list, group_entry) | ||
501 | update_event_times(event); | ||
502 | } | ||
503 | |||
504 | /* | ||
505 | * Cross CPU call to disable a performance event | ||
506 | */ | ||
507 | static void __perf_event_disable(void *info) | ||
508 | { | ||
509 | struct perf_event *event = info; | ||
510 | struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); | ||
511 | struct perf_event_context *ctx = event->ctx; | ||
512 | |||
513 | /* | ||
514 | * If this is a per-task event, need to check whether this | ||
515 | * event's task is the current task on this cpu. | ||
516 | */ | ||
517 | if (ctx->task && cpuctx->task_ctx != ctx) | ||
518 | return; | ||
519 | |||
520 | spin_lock(&ctx->lock); | ||
521 | |||
522 | /* | ||
523 | * If the event is on, turn it off. | ||
524 | * If it is in error state, leave it in error state. | ||
525 | */ | ||
526 | if (event->state >= PERF_EVENT_STATE_INACTIVE) { | ||
527 | update_context_time(ctx); | ||
528 | update_group_times(event); | ||
529 | if (event == event->group_leader) | ||
530 | group_sched_out(event, cpuctx, ctx); | ||
531 | else | ||
532 | event_sched_out(event, cpuctx, ctx); | ||
533 | event->state = PERF_EVENT_STATE_OFF; | ||
534 | } | ||
535 | |||
536 | spin_unlock(&ctx->lock); | ||
537 | } | ||
538 | |||
539 | /* | ||
540 | * Disable a event. | ||
541 | * | ||
542 | * If event->ctx is a cloned context, callers must make sure that | ||
543 | * every task struct that event->ctx->task could possibly point to | ||
544 | * remains valid. This condition is satisifed when called through | ||
545 | * perf_event_for_each_child or perf_event_for_each because they | ||
546 | * hold the top-level event's child_mutex, so any descendant that | ||
547 | * goes to exit will block in sync_child_event. | ||
548 | * When called from perf_pending_event it's OK because event->ctx | ||
549 | * is the current context on this CPU and preemption is disabled, | ||
550 | * hence we can't get into perf_event_task_sched_out for this context. | ||
551 | */ | ||
552 | static void perf_event_disable(struct perf_event *event) | ||
553 | { | ||
554 | struct perf_event_context *ctx = event->ctx; | ||
555 | struct task_struct *task = ctx->task; | ||
556 | |||
557 | if (!task) { | ||
558 | /* | ||
559 | * Disable the event on the cpu that it's on | ||
560 | */ | ||
561 | smp_call_function_single(event->cpu, __perf_event_disable, | ||
562 | event, 1); | ||
563 | return; | ||
564 | } | ||
565 | |||
566 | retry: | ||
567 | task_oncpu_function_call(task, __perf_event_disable, event); | ||
568 | |||
569 | spin_lock_irq(&ctx->lock); | ||
570 | /* | ||
571 | * If the event is still active, we need to retry the cross-call. | ||
572 | */ | ||
573 | if (event->state == PERF_EVENT_STATE_ACTIVE) { | ||
574 | spin_unlock_irq(&ctx->lock); | ||
575 | goto retry; | ||
576 | } | ||
577 | |||
578 | /* | ||
579 | * Since we have the lock this context can't be scheduled | ||
580 | * in, so we can change the state safely. | ||
581 | */ | ||
582 | if (event->state == PERF_EVENT_STATE_INACTIVE) { | ||
583 | update_group_times(event); | ||
584 | event->state = PERF_EVENT_STATE_OFF; | ||
585 | } | ||
586 | |||
587 | spin_unlock_irq(&ctx->lock); | ||
588 | } | ||
589 | |||
590 | static int | ||
591 | event_sched_in(struct perf_event *event, | ||
592 | struct perf_cpu_context *cpuctx, | ||
593 | struct perf_event_context *ctx, | ||
594 | int cpu) | ||
595 | { | ||
596 | if (event->state <= PERF_EVENT_STATE_OFF) | ||
597 | return 0; | ||
598 | |||
599 | event->state = PERF_EVENT_STATE_ACTIVE; | ||
600 | event->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */ | ||
601 | /* | ||
602 | * The new state must be visible before we turn it on in the hardware: | ||
603 | */ | ||
604 | smp_wmb(); | ||
605 | |||
606 | if (event->pmu->enable(event)) { | ||
607 | event->state = PERF_EVENT_STATE_INACTIVE; | ||
608 | event->oncpu = -1; | ||
609 | return -EAGAIN; | ||
610 | } | ||
611 | |||
612 | event->tstamp_running += ctx->time - event->tstamp_stopped; | ||
613 | |||
614 | if (!is_software_event(event)) | ||
615 | cpuctx->active_oncpu++; | ||
616 | ctx->nr_active++; | ||
617 | |||
618 | if (event->attr.exclusive) | ||
619 | cpuctx->exclusive = 1; | ||
620 | |||
621 | return 0; | ||
622 | } | ||
623 | |||
624 | static int | ||
625 | group_sched_in(struct perf_event *group_event, | ||
626 | struct perf_cpu_context *cpuctx, | ||
627 | struct perf_event_context *ctx, | ||
628 | int cpu) | ||
629 | { | ||
630 | struct perf_event *event, *partial_group; | ||
631 | int ret; | ||
632 | |||
633 | if (group_event->state == PERF_EVENT_STATE_OFF) | ||
634 | return 0; | ||
635 | |||
636 | ret = hw_perf_group_sched_in(group_event, cpuctx, ctx, cpu); | ||
637 | if (ret) | ||
638 | return ret < 0 ? ret : 0; | ||
639 | |||
640 | if (event_sched_in(group_event, cpuctx, ctx, cpu)) | ||
641 | return -EAGAIN; | ||
642 | |||
643 | /* | ||
644 | * Schedule in siblings as one group (if any): | ||
645 | */ | ||
646 | list_for_each_entry(event, &group_event->sibling_list, group_entry) { | ||
647 | if (event_sched_in(event, cpuctx, ctx, cpu)) { | ||
648 | partial_group = event; | ||
649 | goto group_error; | ||
650 | } | ||
651 | } | ||
652 | |||
653 | return 0; | ||
654 | |||
655 | group_error: | ||
656 | /* | ||
657 | * Groups can be scheduled in as one unit only, so undo any | ||
658 | * partial group before returning: | ||
659 | */ | ||
660 | list_for_each_entry(event, &group_event->sibling_list, group_entry) { | ||
661 | if (event == partial_group) | ||
662 | break; | ||
663 | event_sched_out(event, cpuctx, ctx); | ||
664 | } | ||
665 | event_sched_out(group_event, cpuctx, ctx); | ||
666 | |||
667 | return -EAGAIN; | ||
668 | } | ||
669 | |||
670 | /* | ||
671 | * Return 1 for a group consisting entirely of software events, | ||
672 | * 0 if the group contains any hardware events. | ||
673 | */ | ||
674 | static int is_software_only_group(struct perf_event *leader) | ||
675 | { | ||
676 | struct perf_event *event; | ||
677 | |||
678 | if (!is_software_event(leader)) | ||
679 | return 0; | ||
680 | |||
681 | list_for_each_entry(event, &leader->sibling_list, group_entry) | ||
682 | if (!is_software_event(event)) | ||
683 | return 0; | ||
684 | |||
685 | return 1; | ||
686 | } | ||
687 | |||
688 | /* | ||
689 | * Work out whether we can put this event group on the CPU now. | ||
690 | */ | ||
691 | static int group_can_go_on(struct perf_event *event, | ||
692 | struct perf_cpu_context *cpuctx, | ||
693 | int can_add_hw) | ||
694 | { | ||
695 | /* | ||
696 | * Groups consisting entirely of software events can always go on. | ||
697 | */ | ||
698 | if (is_software_only_group(event)) | ||
699 | return 1; | ||
700 | /* | ||
701 | * If an exclusive group is already on, no other hardware | ||
702 | * events can go on. | ||
703 | */ | ||
704 | if (cpuctx->exclusive) | ||
705 | return 0; | ||
706 | /* | ||
707 | * If this group is exclusive and there are already | ||
708 | * events on the CPU, it can't go on. | ||
709 | */ | ||
710 | if (event->attr.exclusive && cpuctx->active_oncpu) | ||
711 | return 0; | ||
712 | /* | ||
713 | * Otherwise, try to add it if all previous groups were able | ||
714 | * to go on. | ||
715 | */ | ||
716 | return can_add_hw; | ||
717 | } | ||
718 | |||
719 | static void add_event_to_ctx(struct perf_event *event, | ||
720 | struct perf_event_context *ctx) | ||
721 | { | ||
722 | list_add_event(event, ctx); | ||
723 | event->tstamp_enabled = ctx->time; | ||
724 | event->tstamp_running = ctx->time; | ||
725 | event->tstamp_stopped = ctx->time; | ||
726 | } | ||
727 | |||
728 | /* | ||
729 | * Cross CPU call to install and enable a performance event | ||
730 | * | ||
731 | * Must be called with ctx->mutex held | ||
732 | */ | ||
733 | static void __perf_install_in_context(void *info) | ||
734 | { | ||
735 | struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); | ||
736 | struct perf_event *event = info; | ||
737 | struct perf_event_context *ctx = event->ctx; | ||
738 | struct perf_event *leader = event->group_leader; | ||
739 | int cpu = smp_processor_id(); | ||
740 | int err; | ||
741 | |||
742 | /* | ||
743 | * If this is a task context, we need to check whether it is | ||
744 | * the current task context of this cpu. If not it has been | ||
745 | * scheduled out before the smp call arrived. | ||
746 | * Or possibly this is the right context but it isn't | ||
747 | * on this cpu because it had no events. | ||
748 | */ | ||
749 | if (ctx->task && cpuctx->task_ctx != ctx) { | ||
750 | if (cpuctx->task_ctx || ctx->task != current) | ||
751 | return; | ||
752 | cpuctx->task_ctx = ctx; | ||
753 | } | ||
754 | |||
755 | spin_lock(&ctx->lock); | ||
756 | ctx->is_active = 1; | ||
757 | update_context_time(ctx); | ||
758 | |||
759 | /* | ||
760 | * Protect the list operation against NMI by disabling the | ||
761 | * events on a global level. NOP for non NMI based events. | ||
762 | */ | ||
763 | perf_disable(); | ||
764 | |||
765 | add_event_to_ctx(event, ctx); | ||
766 | |||
767 | /* | ||
768 | * Don't put the event on if it is disabled or if | ||
769 | * it is in a group and the group isn't on. | ||
770 | */ | ||
771 | if (event->state != PERF_EVENT_STATE_INACTIVE || | ||
772 | (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE)) | ||
773 | goto unlock; | ||
774 | |||
775 | /* | ||
776 | * An exclusive event can't go on if there are already active | ||
777 | * hardware events, and no hardware event can go on if there | ||
778 | * is already an exclusive event on. | ||
779 | */ | ||
780 | if (!group_can_go_on(event, cpuctx, 1)) | ||
781 | err = -EEXIST; | ||
782 | else | ||
783 | err = event_sched_in(event, cpuctx, ctx, cpu); | ||
784 | |||
785 | if (err) { | ||
786 | /* | ||
787 | * This event couldn't go on. If it is in a group | ||
788 | * then we have to pull the whole group off. | ||
789 | * If the event group is pinned then put it in error state. | ||
790 | */ | ||
791 | if (leader != event) | ||
792 | group_sched_out(leader, cpuctx, ctx); | ||
793 | if (leader->attr.pinned) { | ||
794 | update_group_times(leader); | ||
795 | leader->state = PERF_EVENT_STATE_ERROR; | ||
796 | } | ||
797 | } | ||
798 | |||
799 | if (!err && !ctx->task && cpuctx->max_pertask) | ||
800 | cpuctx->max_pertask--; | ||
801 | |||
802 | unlock: | ||
803 | perf_enable(); | ||
804 | |||
805 | spin_unlock(&ctx->lock); | ||
806 | } | ||
807 | |||
808 | /* | ||
809 | * Attach a performance event to a context | ||
810 | * | ||
811 | * First we add the event to the list with the hardware enable bit | ||
812 | * in event->hw_config cleared. | ||
813 | * | ||
814 | * If the event is attached to a task which is on a CPU we use a smp | ||
815 | * call to enable it in the task context. The task might have been | ||
816 | * scheduled away, but we check this in the smp call again. | ||
817 | * | ||
818 | * Must be called with ctx->mutex held. | ||
819 | */ | ||
820 | static void | ||
821 | perf_install_in_context(struct perf_event_context *ctx, | ||
822 | struct perf_event *event, | ||
823 | int cpu) | ||
824 | { | ||
825 | struct task_struct *task = ctx->task; | ||
826 | |||
827 | if (!task) { | ||
828 | /* | ||
829 | * Per cpu events are installed via an smp call and | ||
830 | * the install is always sucessful. | ||
831 | */ | ||
832 | smp_call_function_single(cpu, __perf_install_in_context, | ||
833 | event, 1); | ||
834 | return; | ||
835 | } | ||
836 | |||
837 | retry: | ||
838 | task_oncpu_function_call(task, __perf_install_in_context, | ||
839 | event); | ||
840 | |||
841 | spin_lock_irq(&ctx->lock); | ||
842 | /* | ||
843 | * we need to retry the smp call. | ||
844 | */ | ||
845 | if (ctx->is_active && list_empty(&event->group_entry)) { | ||
846 | spin_unlock_irq(&ctx->lock); | ||
847 | goto retry; | ||
848 | } | ||
849 | |||
850 | /* | ||
851 | * The lock prevents that this context is scheduled in so we | ||
852 | * can add the event safely, if it the call above did not | ||
853 | * succeed. | ||
854 | */ | ||
855 | if (list_empty(&event->group_entry)) | ||
856 | add_event_to_ctx(event, ctx); | ||
857 | spin_unlock_irq(&ctx->lock); | ||
858 | } | ||
859 | |||
860 | /* | ||
861 | * Put a event into inactive state and update time fields. | ||
862 | * Enabling the leader of a group effectively enables all | ||
863 | * the group members that aren't explicitly disabled, so we | ||
864 | * have to update their ->tstamp_enabled also. | ||
865 | * Note: this works for group members as well as group leaders | ||
866 | * since the non-leader members' sibling_lists will be empty. | ||
867 | */ | ||
868 | static void __perf_event_mark_enabled(struct perf_event *event, | ||
869 | struct perf_event_context *ctx) | ||
870 | { | ||
871 | struct perf_event *sub; | ||
872 | |||
873 | event->state = PERF_EVENT_STATE_INACTIVE; | ||
874 | event->tstamp_enabled = ctx->time - event->total_time_enabled; | ||
875 | list_for_each_entry(sub, &event->sibling_list, group_entry) | ||
876 | if (sub->state >= PERF_EVENT_STATE_INACTIVE) | ||
877 | sub->tstamp_enabled = | ||
878 | ctx->time - sub->total_time_enabled; | ||
879 | } | ||
880 | |||
881 | /* | ||
882 | * Cross CPU call to enable a performance event | ||
883 | */ | ||
884 | static void __perf_event_enable(void *info) | ||
885 | { | ||
886 | struct perf_event *event = info; | ||
887 | struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); | ||
888 | struct perf_event_context *ctx = event->ctx; | ||
889 | struct perf_event *leader = event->group_leader; | ||
890 | int err; | ||
891 | |||
892 | /* | ||
893 | * If this is a per-task event, need to check whether this | ||
894 | * event's task is the current task on this cpu. | ||
895 | */ | ||
896 | if (ctx->task && cpuctx->task_ctx != ctx) { | ||
897 | if (cpuctx->task_ctx || ctx->task != current) | ||
898 | return; | ||
899 | cpuctx->task_ctx = ctx; | ||
900 | } | ||
901 | |||
902 | spin_lock(&ctx->lock); | ||
903 | ctx->is_active = 1; | ||
904 | update_context_time(ctx); | ||
905 | |||
906 | if (event->state >= PERF_EVENT_STATE_INACTIVE) | ||
907 | goto unlock; | ||
908 | __perf_event_mark_enabled(event, ctx); | ||
909 | |||
910 | /* | ||
911 | * If the event is in a group and isn't the group leader, | ||
912 | * then don't put it on unless the group is on. | ||
913 | */ | ||
914 | if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) | ||
915 | goto unlock; | ||
916 | |||
917 | if (!group_can_go_on(event, cpuctx, 1)) { | ||
918 | err = -EEXIST; | ||
919 | } else { | ||
920 | perf_disable(); | ||
921 | if (event == leader) | ||
922 | err = group_sched_in(event, cpuctx, ctx, | ||
923 | smp_processor_id()); | ||
924 | else | ||
925 | err = event_sched_in(event, cpuctx, ctx, | ||
926 | smp_processor_id()); | ||
927 | perf_enable(); | ||
928 | } | ||
929 | |||
930 | if (err) { | ||
931 | /* | ||
932 | * If this event can't go on and it's part of a | ||
933 | * group, then the whole group has to come off. | ||
934 | */ | ||
935 | if (leader != event) | ||
936 | group_sched_out(leader, cpuctx, ctx); | ||
937 | if (leader->attr.pinned) { | ||
938 | update_group_times(leader); | ||
939 | leader->state = PERF_EVENT_STATE_ERROR; | ||
940 | } | ||
941 | } | ||
942 | |||
943 | unlock: | ||
944 | spin_unlock(&ctx->lock); | ||
945 | } | ||
946 | |||
947 | /* | ||
948 | * Enable a event. | ||
949 | * | ||
950 | * If event->ctx is a cloned context, callers must make sure that | ||
951 | * every task struct that event->ctx->task could possibly point to | ||
952 | * remains valid. This condition is satisfied when called through | ||
953 | * perf_event_for_each_child or perf_event_for_each as described | ||
954 | * for perf_event_disable. | ||
955 | */ | ||
956 | static void perf_event_enable(struct perf_event *event) | ||
957 | { | ||
958 | struct perf_event_context *ctx = event->ctx; | ||
959 | struct task_struct *task = ctx->task; | ||
960 | |||
961 | if (!task) { | ||
962 | /* | ||
963 | * Enable the event on the cpu that it's on | ||
964 | */ | ||
965 | smp_call_function_single(event->cpu, __perf_event_enable, | ||
966 | event, 1); | ||
967 | return; | ||
968 | } | ||
969 | |||
970 | spin_lock_irq(&ctx->lock); | ||
971 | if (event->state >= PERF_EVENT_STATE_INACTIVE) | ||
972 | goto out; | ||
973 | |||
974 | /* | ||
975 | * If the event is in error state, clear that first. | ||
976 | * That way, if we see the event in error state below, we | ||
977 | * know that it has gone back into error state, as distinct | ||
978 | * from the task having been scheduled away before the | ||
979 | * cross-call arrived. | ||
980 | */ | ||
981 | if (event->state == PERF_EVENT_STATE_ERROR) | ||
982 | event->state = PERF_EVENT_STATE_OFF; | ||
983 | |||
984 | retry: | ||
985 | spin_unlock_irq(&ctx->lock); | ||
986 | task_oncpu_function_call(task, __perf_event_enable, event); | ||
987 | |||
988 | spin_lock_irq(&ctx->lock); | ||
989 | |||
990 | /* | ||
991 | * If the context is active and the event is still off, | ||
992 | * we need to retry the cross-call. | ||
993 | */ | ||
994 | if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF) | ||
995 | goto retry; | ||
996 | |||
997 | /* | ||
998 | * Since we have the lock this context can't be scheduled | ||
999 | * in, so we can change the state safely. | ||
1000 | */ | ||
1001 | if (event->state == PERF_EVENT_STATE_OFF) | ||
1002 | __perf_event_mark_enabled(event, ctx); | ||
1003 | |||
1004 | out: | ||
1005 | spin_unlock_irq(&ctx->lock); | ||
1006 | } | ||
1007 | |||
1008 | static int perf_event_refresh(struct perf_event *event, int refresh) | ||
1009 | { | ||
1010 | /* | ||
1011 | * not supported on inherited events | ||
1012 | */ | ||
1013 | if (event->attr.inherit) | ||
1014 | return -EINVAL; | ||
1015 | |||
1016 | atomic_add(refresh, &event->event_limit); | ||
1017 | perf_event_enable(event); | ||
1018 | |||
1019 | return 0; | ||
1020 | } | ||
1021 | |||
1022 | void __perf_event_sched_out(struct perf_event_context *ctx, | ||
1023 | struct perf_cpu_context *cpuctx) | ||
1024 | { | ||
1025 | struct perf_event *event; | ||
1026 | |||
1027 | spin_lock(&ctx->lock); | ||
1028 | ctx->is_active = 0; | ||
1029 | if (likely(!ctx->nr_events)) | ||
1030 | goto out; | ||
1031 | update_context_time(ctx); | ||
1032 | |||
1033 | perf_disable(); | ||
1034 | if (ctx->nr_active) | ||
1035 | list_for_each_entry(event, &ctx->group_list, group_entry) | ||
1036 | group_sched_out(event, cpuctx, ctx); | ||
1037 | |||
1038 | perf_enable(); | ||
1039 | out: | ||
1040 | spin_unlock(&ctx->lock); | ||
1041 | } | ||
1042 | |||
1043 | /* | ||
1044 | * Test whether two contexts are equivalent, i.e. whether they | ||
1045 | * have both been cloned from the same version of the same context | ||
1046 | * and they both have the same number of enabled events. | ||
1047 | * If the number of enabled events is the same, then the set | ||
1048 | * of enabled events should be the same, because these are both | ||
1049 | * inherited contexts, therefore we can't access individual events | ||
1050 | * in them directly with an fd; we can only enable/disable all | ||
1051 | * events via prctl, or enable/disable all events in a family | ||
1052 | * via ioctl, which will have the same effect on both contexts. | ||
1053 | */ | ||
1054 | static int context_equiv(struct perf_event_context *ctx1, | ||
1055 | struct perf_event_context *ctx2) | ||
1056 | { | ||
1057 | return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx | ||
1058 | && ctx1->parent_gen == ctx2->parent_gen | ||
1059 | && !ctx1->pin_count && !ctx2->pin_count; | ||
1060 | } | ||
1061 | |||
1062 | static void __perf_event_read(void *event); | ||
1063 | |||
1064 | static void __perf_event_sync_stat(struct perf_event *event, | ||
1065 | struct perf_event *next_event) | ||
1066 | { | ||
1067 | u64 value; | ||
1068 | |||
1069 | if (!event->attr.inherit_stat) | ||
1070 | return; | ||
1071 | |||
1072 | /* | ||
1073 | * Update the event value, we cannot use perf_event_read() | ||
1074 | * because we're in the middle of a context switch and have IRQs | ||
1075 | * disabled, which upsets smp_call_function_single(), however | ||
1076 | * we know the event must be on the current CPU, therefore we | ||
1077 | * don't need to use it. | ||
1078 | */ | ||
1079 | switch (event->state) { | ||
1080 | case PERF_EVENT_STATE_ACTIVE: | ||
1081 | __perf_event_read(event); | ||
1082 | break; | ||
1083 | |||
1084 | case PERF_EVENT_STATE_INACTIVE: | ||
1085 | update_event_times(event); | ||
1086 | break; | ||
1087 | |||
1088 | default: | ||
1089 | break; | ||
1090 | } | ||
1091 | |||
1092 | /* | ||
1093 | * In order to keep per-task stats reliable we need to flip the event | ||
1094 | * values when we flip the contexts. | ||
1095 | */ | ||
1096 | value = atomic64_read(&next_event->count); | ||
1097 | value = atomic64_xchg(&event->count, value); | ||
1098 | atomic64_set(&next_event->count, value); | ||
1099 | |||
1100 | swap(event->total_time_enabled, next_event->total_time_enabled); | ||
1101 | swap(event->total_time_running, next_event->total_time_running); | ||
1102 | |||
1103 | /* | ||
1104 | * Since we swizzled the values, update the user visible data too. | ||
1105 | */ | ||
1106 | perf_event_update_userpage(event); | ||
1107 | perf_event_update_userpage(next_event); | ||
1108 | } | ||
1109 | |||
1110 | #define list_next_entry(pos, member) \ | ||
1111 | list_entry(pos->member.next, typeof(*pos), member) | ||
1112 | |||
1113 | static void perf_event_sync_stat(struct perf_event_context *ctx, | ||
1114 | struct perf_event_context *next_ctx) | ||
1115 | { | ||
1116 | struct perf_event *event, *next_event; | ||
1117 | |||
1118 | if (!ctx->nr_stat) | ||
1119 | return; | ||
1120 | |||
1121 | event = list_first_entry(&ctx->event_list, | ||
1122 | struct perf_event, event_entry); | ||
1123 | |||
1124 | next_event = list_first_entry(&next_ctx->event_list, | ||
1125 | struct perf_event, event_entry); | ||
1126 | |||
1127 | while (&event->event_entry != &ctx->event_list && | ||
1128 | &next_event->event_entry != &next_ctx->event_list) { | ||
1129 | |||
1130 | __perf_event_sync_stat(event, next_event); | ||
1131 | |||
1132 | event = list_next_entry(event, event_entry); | ||
1133 | next_event = list_next_entry(next_event, event_entry); | ||
1134 | } | ||
1135 | } | ||
1136 | |||
1137 | /* | ||
1138 | * Called from scheduler to remove the events of the current task, | ||
1139 | * with interrupts disabled. | ||
1140 | * | ||
1141 | * We stop each event and update the event value in event->count. | ||
1142 | * | ||
1143 | * This does not protect us against NMI, but disable() | ||
1144 | * sets the disabled bit in the control field of event _before_ | ||
1145 | * accessing the event control register. If a NMI hits, then it will | ||
1146 | * not restart the event. | ||
1147 | */ | ||
1148 | void perf_event_task_sched_out(struct task_struct *task, | ||
1149 | struct task_struct *next, int cpu) | ||
1150 | { | ||
1151 | struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); | ||
1152 | struct perf_event_context *ctx = task->perf_event_ctxp; | ||
1153 | struct perf_event_context *next_ctx; | ||
1154 | struct perf_event_context *parent; | ||
1155 | struct pt_regs *regs; | ||
1156 | int do_switch = 1; | ||
1157 | |||
1158 | regs = task_pt_regs(task); | ||
1159 | perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, regs, 0); | ||
1160 | |||
1161 | if (likely(!ctx || !cpuctx->task_ctx)) | ||
1162 | return; | ||
1163 | |||
1164 | update_context_time(ctx); | ||
1165 | |||
1166 | rcu_read_lock(); | ||
1167 | parent = rcu_dereference(ctx->parent_ctx); | ||
1168 | next_ctx = next->perf_event_ctxp; | ||
1169 | if (parent && next_ctx && | ||
1170 | rcu_dereference(next_ctx->parent_ctx) == parent) { | ||
1171 | /* | ||
1172 | * Looks like the two contexts are clones, so we might be | ||
1173 | * able to optimize the context switch. We lock both | ||
1174 | * contexts and check that they are clones under the | ||
1175 | * lock (including re-checking that neither has been | ||
1176 | * uncloned in the meantime). It doesn't matter which | ||
1177 | * order we take the locks because no other cpu could | ||
1178 | * be trying to lock both of these tasks. | ||
1179 | */ | ||
1180 | spin_lock(&ctx->lock); | ||
1181 | spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING); | ||
1182 | if (context_equiv(ctx, next_ctx)) { | ||
1183 | /* | ||
1184 | * XXX do we need a memory barrier of sorts | ||
1185 | * wrt to rcu_dereference() of perf_event_ctxp | ||
1186 | */ | ||
1187 | task->perf_event_ctxp = next_ctx; | ||
1188 | next->perf_event_ctxp = ctx; | ||
1189 | ctx->task = next; | ||
1190 | next_ctx->task = task; | ||
1191 | do_switch = 0; | ||
1192 | |||
1193 | perf_event_sync_stat(ctx, next_ctx); | ||
1194 | } | ||
1195 | spin_unlock(&next_ctx->lock); | ||
1196 | spin_unlock(&ctx->lock); | ||
1197 | } | ||
1198 | rcu_read_unlock(); | ||
1199 | |||
1200 | if (do_switch) { | ||
1201 | __perf_event_sched_out(ctx, cpuctx); | ||
1202 | cpuctx->task_ctx = NULL; | ||
1203 | } | ||
1204 | } | ||
1205 | |||
1206 | /* | ||
1207 | * Called with IRQs disabled | ||
1208 | */ | ||
1209 | static void __perf_event_task_sched_out(struct perf_event_context *ctx) | ||
1210 | { | ||
1211 | struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); | ||
1212 | |||
1213 | if (!cpuctx->task_ctx) | ||
1214 | return; | ||
1215 | |||
1216 | if (WARN_ON_ONCE(ctx != cpuctx->task_ctx)) | ||
1217 | return; | ||
1218 | |||
1219 | __perf_event_sched_out(ctx, cpuctx); | ||
1220 | cpuctx->task_ctx = NULL; | ||
1221 | } | ||
1222 | |||
1223 | /* | ||
1224 | * Called with IRQs disabled | ||
1225 | */ | ||
1226 | static void perf_event_cpu_sched_out(struct perf_cpu_context *cpuctx) | ||
1227 | { | ||
1228 | __perf_event_sched_out(&cpuctx->ctx, cpuctx); | ||
1229 | } | ||
1230 | |||
1231 | static void | ||
1232 | __perf_event_sched_in(struct perf_event_context *ctx, | ||
1233 | struct perf_cpu_context *cpuctx, int cpu) | ||
1234 | { | ||
1235 | struct perf_event *event; | ||
1236 | int can_add_hw = 1; | ||
1237 | |||
1238 | spin_lock(&ctx->lock); | ||
1239 | ctx->is_active = 1; | ||
1240 | if (likely(!ctx->nr_events)) | ||
1241 | goto out; | ||
1242 | |||
1243 | ctx->timestamp = perf_clock(); | ||
1244 | |||
1245 | perf_disable(); | ||
1246 | |||
1247 | /* | ||
1248 | * First go through the list and put on any pinned groups | ||
1249 | * in order to give them the best chance of going on. | ||
1250 | */ | ||
1251 | list_for_each_entry(event, &ctx->group_list, group_entry) { | ||
1252 | if (event->state <= PERF_EVENT_STATE_OFF || | ||
1253 | !event->attr.pinned) | ||
1254 | continue; | ||
1255 | if (event->cpu != -1 && event->cpu != cpu) | ||
1256 | continue; | ||
1257 | |||
1258 | if (group_can_go_on(event, cpuctx, 1)) | ||
1259 | group_sched_in(event, cpuctx, ctx, cpu); | ||
1260 | |||
1261 | /* | ||
1262 | * If this pinned group hasn't been scheduled, | ||
1263 | * put it in error state. | ||
1264 | */ | ||
1265 | if (event->state == PERF_EVENT_STATE_INACTIVE) { | ||
1266 | update_group_times(event); | ||
1267 | event->state = PERF_EVENT_STATE_ERROR; | ||
1268 | } | ||
1269 | } | ||
1270 | |||
1271 | list_for_each_entry(event, &ctx->group_list, group_entry) { | ||
1272 | /* | ||
1273 | * Ignore events in OFF or ERROR state, and | ||
1274 | * ignore pinned events since we did them already. | ||
1275 | */ | ||
1276 | if (event->state <= PERF_EVENT_STATE_OFF || | ||
1277 | event->attr.pinned) | ||
1278 | continue; | ||
1279 | |||
1280 | /* | ||
1281 | * Listen to the 'cpu' scheduling filter constraint | ||
1282 | * of events: | ||
1283 | */ | ||
1284 | if (event->cpu != -1 && event->cpu != cpu) | ||
1285 | continue; | ||
1286 | |||
1287 | if (group_can_go_on(event, cpuctx, can_add_hw)) | ||
1288 | if (group_sched_in(event, cpuctx, ctx, cpu)) | ||
1289 | can_add_hw = 0; | ||
1290 | } | ||
1291 | perf_enable(); | ||
1292 | out: | ||
1293 | spin_unlock(&ctx->lock); | ||
1294 | } | ||
1295 | |||
1296 | /* | ||
1297 | * Called from scheduler to add the events of the current task | ||
1298 | * with interrupts disabled. | ||
1299 | * | ||
1300 | * We restore the event value and then enable it. | ||
1301 | * | ||
1302 | * This does not protect us against NMI, but enable() | ||
1303 | * sets the enabled bit in the control field of event _before_ | ||
1304 | * accessing the event control register. If a NMI hits, then it will | ||
1305 | * keep the event running. | ||
1306 | */ | ||
1307 | void perf_event_task_sched_in(struct task_struct *task, int cpu) | ||
1308 | { | ||
1309 | struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); | ||
1310 | struct perf_event_context *ctx = task->perf_event_ctxp; | ||
1311 | |||
1312 | if (likely(!ctx)) | ||
1313 | return; | ||
1314 | if (cpuctx->task_ctx == ctx) | ||
1315 | return; | ||
1316 | __perf_event_sched_in(ctx, cpuctx, cpu); | ||
1317 | cpuctx->task_ctx = ctx; | ||
1318 | } | ||
1319 | |||
1320 | static void perf_event_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu) | ||
1321 | { | ||
1322 | struct perf_event_context *ctx = &cpuctx->ctx; | ||
1323 | |||
1324 | __perf_event_sched_in(ctx, cpuctx, cpu); | ||
1325 | } | ||
1326 | |||
1327 | #define MAX_INTERRUPTS (~0ULL) | ||
1328 | |||
1329 | static void perf_log_throttle(struct perf_event *event, int enable); | ||
1330 | |||
1331 | static void perf_adjust_period(struct perf_event *event, u64 events) | ||
1332 | { | ||
1333 | struct hw_perf_event *hwc = &event->hw; | ||
1334 | u64 period, sample_period; | ||
1335 | s64 delta; | ||
1336 | |||
1337 | events *= hwc->sample_period; | ||
1338 | period = div64_u64(events, event->attr.sample_freq); | ||
1339 | |||
1340 | delta = (s64)(period - hwc->sample_period); | ||
1341 | delta = (delta + 7) / 8; /* low pass filter */ | ||
1342 | |||
1343 | sample_period = hwc->sample_period + delta; | ||
1344 | |||
1345 | if (!sample_period) | ||
1346 | sample_period = 1; | ||
1347 | |||
1348 | hwc->sample_period = sample_period; | ||
1349 | } | ||
1350 | |||
1351 | static void perf_ctx_adjust_freq(struct perf_event_context *ctx) | ||
1352 | { | ||
1353 | struct perf_event *event; | ||
1354 | struct hw_perf_event *hwc; | ||
1355 | u64 interrupts, freq; | ||
1356 | |||
1357 | spin_lock(&ctx->lock); | ||
1358 | list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { | ||
1359 | if (event->state != PERF_EVENT_STATE_ACTIVE) | ||
1360 | continue; | ||
1361 | |||
1362 | hwc = &event->hw; | ||
1363 | |||
1364 | interrupts = hwc->interrupts; | ||
1365 | hwc->interrupts = 0; | ||
1366 | |||
1367 | /* | ||
1368 | * unthrottle events on the tick | ||
1369 | */ | ||
1370 | if (interrupts == MAX_INTERRUPTS) { | ||
1371 | perf_log_throttle(event, 1); | ||
1372 | event->pmu->unthrottle(event); | ||
1373 | interrupts = 2*sysctl_perf_event_sample_rate/HZ; | ||
1374 | } | ||
1375 | |||
1376 | if (!event->attr.freq || !event->attr.sample_freq) | ||
1377 | continue; | ||
1378 | |||
1379 | /* | ||
1380 | * if the specified freq < HZ then we need to skip ticks | ||
1381 | */ | ||
1382 | if (event->attr.sample_freq < HZ) { | ||
1383 | freq = event->attr.sample_freq; | ||
1384 | |||
1385 | hwc->freq_count += freq; | ||
1386 | hwc->freq_interrupts += interrupts; | ||
1387 | |||
1388 | if (hwc->freq_count < HZ) | ||
1389 | continue; | ||
1390 | |||
1391 | interrupts = hwc->freq_interrupts; | ||
1392 | hwc->freq_interrupts = 0; | ||
1393 | hwc->freq_count -= HZ; | ||
1394 | } else | ||
1395 | freq = HZ; | ||
1396 | |||
1397 | perf_adjust_period(event, freq * interrupts); | ||
1398 | |||
1399 | /* | ||
1400 | * In order to avoid being stalled by an (accidental) huge | ||
1401 | * sample period, force reset the sample period if we didn't | ||
1402 | * get any events in this freq period. | ||
1403 | */ | ||
1404 | if (!interrupts) { | ||
1405 | perf_disable(); | ||
1406 | event->pmu->disable(event); | ||
1407 | atomic64_set(&hwc->period_left, 0); | ||
1408 | event->pmu->enable(event); | ||
1409 | perf_enable(); | ||
1410 | } | ||
1411 | } | ||
1412 | spin_unlock(&ctx->lock); | ||
1413 | } | ||
1414 | |||
1415 | /* | ||
1416 | * Round-robin a context's events: | ||
1417 | */ | ||
1418 | static void rotate_ctx(struct perf_event_context *ctx) | ||
1419 | { | ||
1420 | struct perf_event *event; | ||
1421 | |||
1422 | if (!ctx->nr_events) | ||
1423 | return; | ||
1424 | |||
1425 | spin_lock(&ctx->lock); | ||
1426 | /* | ||
1427 | * Rotate the first entry last (works just fine for group events too): | ||
1428 | */ | ||
1429 | perf_disable(); | ||
1430 | list_for_each_entry(event, &ctx->group_list, group_entry) { | ||
1431 | list_move_tail(&event->group_entry, &ctx->group_list); | ||
1432 | break; | ||
1433 | } | ||
1434 | perf_enable(); | ||
1435 | |||
1436 | spin_unlock(&ctx->lock); | ||
1437 | } | ||
1438 | |||
1439 | void perf_event_task_tick(struct task_struct *curr, int cpu) | ||
1440 | { | ||
1441 | struct perf_cpu_context *cpuctx; | ||
1442 | struct perf_event_context *ctx; | ||
1443 | |||
1444 | if (!atomic_read(&nr_events)) | ||
1445 | return; | ||
1446 | |||
1447 | cpuctx = &per_cpu(perf_cpu_context, cpu); | ||
1448 | ctx = curr->perf_event_ctxp; | ||
1449 | |||
1450 | perf_ctx_adjust_freq(&cpuctx->ctx); | ||
1451 | if (ctx) | ||
1452 | perf_ctx_adjust_freq(ctx); | ||
1453 | |||
1454 | perf_event_cpu_sched_out(cpuctx); | ||
1455 | if (ctx) | ||
1456 | __perf_event_task_sched_out(ctx); | ||
1457 | |||
1458 | rotate_ctx(&cpuctx->ctx); | ||
1459 | if (ctx) | ||
1460 | rotate_ctx(ctx); | ||
1461 | |||
1462 | perf_event_cpu_sched_in(cpuctx, cpu); | ||
1463 | if (ctx) | ||
1464 | perf_event_task_sched_in(curr, cpu); | ||
1465 | } | ||
1466 | |||
1467 | /* | ||
1468 | * Enable all of a task's events that have been marked enable-on-exec. | ||
1469 | * This expects task == current. | ||
1470 | */ | ||
1471 | static void perf_event_enable_on_exec(struct task_struct *task) | ||
1472 | { | ||
1473 | struct perf_event_context *ctx; | ||
1474 | struct perf_event *event; | ||
1475 | unsigned long flags; | ||
1476 | int enabled = 0; | ||
1477 | |||
1478 | local_irq_save(flags); | ||
1479 | ctx = task->perf_event_ctxp; | ||
1480 | if (!ctx || !ctx->nr_events) | ||
1481 | goto out; | ||
1482 | |||
1483 | __perf_event_task_sched_out(ctx); | ||
1484 | |||
1485 | spin_lock(&ctx->lock); | ||
1486 | |||
1487 | list_for_each_entry(event, &ctx->group_list, group_entry) { | ||
1488 | if (!event->attr.enable_on_exec) | ||
1489 | continue; | ||
1490 | event->attr.enable_on_exec = 0; | ||
1491 | if (event->state >= PERF_EVENT_STATE_INACTIVE) | ||
1492 | continue; | ||
1493 | __perf_event_mark_enabled(event, ctx); | ||
1494 | enabled = 1; | ||
1495 | } | ||
1496 | |||
1497 | /* | ||
1498 | * Unclone this context if we enabled any event. | ||
1499 | */ | ||
1500 | if (enabled) | ||
1501 | unclone_ctx(ctx); | ||
1502 | |||
1503 | spin_unlock(&ctx->lock); | ||
1504 | |||
1505 | perf_event_task_sched_in(task, smp_processor_id()); | ||
1506 | out: | ||
1507 | local_irq_restore(flags); | ||
1508 | } | ||
1509 | |||
1510 | /* | ||
1511 | * Cross CPU call to read the hardware event | ||
1512 | */ | ||
1513 | static void __perf_event_read(void *info) | ||
1514 | { | ||
1515 | struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); | ||
1516 | struct perf_event *event = info; | ||
1517 | struct perf_event_context *ctx = event->ctx; | ||
1518 | unsigned long flags; | ||
1519 | |||
1520 | /* | ||
1521 | * If this is a task context, we need to check whether it is | ||
1522 | * the current task context of this cpu. If not it has been | ||
1523 | * scheduled out before the smp call arrived. In that case | ||
1524 | * event->count would have been updated to a recent sample | ||
1525 | * when the event was scheduled out. | ||
1526 | */ | ||
1527 | if (ctx->task && cpuctx->task_ctx != ctx) | ||
1528 | return; | ||
1529 | |||
1530 | local_irq_save(flags); | ||
1531 | if (ctx->is_active) | ||
1532 | update_context_time(ctx); | ||
1533 | event->pmu->read(event); | ||
1534 | update_event_times(event); | ||
1535 | local_irq_restore(flags); | ||
1536 | } | ||
1537 | |||
1538 | static u64 perf_event_read(struct perf_event *event) | ||
1539 | { | ||
1540 | /* | ||
1541 | * If event is enabled and currently active on a CPU, update the | ||
1542 | * value in the event structure: | ||
1543 | */ | ||
1544 | if (event->state == PERF_EVENT_STATE_ACTIVE) { | ||
1545 | smp_call_function_single(event->oncpu, | ||
1546 | __perf_event_read, event, 1); | ||
1547 | } else if (event->state == PERF_EVENT_STATE_INACTIVE) { | ||
1548 | update_event_times(event); | ||
1549 | } | ||
1550 | |||
1551 | return atomic64_read(&event->count); | ||
1552 | } | ||
1553 | |||
1554 | /* | ||
1555 | * Initialize the perf_event context in a task_struct: | ||
1556 | */ | ||
1557 | static void | ||
1558 | __perf_event_init_context(struct perf_event_context *ctx, | ||
1559 | struct task_struct *task) | ||
1560 | { | ||
1561 | memset(ctx, 0, sizeof(*ctx)); | ||
1562 | spin_lock_init(&ctx->lock); | ||
1563 | mutex_init(&ctx->mutex); | ||
1564 | INIT_LIST_HEAD(&ctx->group_list); | ||
1565 | INIT_LIST_HEAD(&ctx->event_list); | ||
1566 | atomic_set(&ctx->refcount, 1); | ||
1567 | ctx->task = task; | ||
1568 | } | ||
1569 | |||
1570 | static struct perf_event_context *find_get_context(pid_t pid, int cpu) | ||
1571 | { | ||
1572 | struct perf_event_context *ctx; | ||
1573 | struct perf_cpu_context *cpuctx; | ||
1574 | struct task_struct *task; | ||
1575 | unsigned long flags; | ||
1576 | int err; | ||
1577 | |||
1578 | /* | ||
1579 | * If cpu is not a wildcard then this is a percpu event: | ||
1580 | */ | ||
1581 | if (cpu != -1) { | ||
1582 | /* Must be root to operate on a CPU event: */ | ||
1583 | if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN)) | ||
1584 | return ERR_PTR(-EACCES); | ||
1585 | |||
1586 | if (cpu < 0 || cpu > num_possible_cpus()) | ||
1587 | return ERR_PTR(-EINVAL); | ||
1588 | |||
1589 | /* | ||
1590 | * We could be clever and allow to attach a event to an | ||
1591 | * offline CPU and activate it when the CPU comes up, but | ||
1592 | * that's for later. | ||
1593 | */ | ||
1594 | if (!cpu_isset(cpu, cpu_online_map)) | ||
1595 | return ERR_PTR(-ENODEV); | ||
1596 | |||
1597 | cpuctx = &per_cpu(perf_cpu_context, cpu); | ||
1598 | ctx = &cpuctx->ctx; | ||
1599 | get_ctx(ctx); | ||
1600 | |||
1601 | return ctx; | ||
1602 | } | ||
1603 | |||
1604 | rcu_read_lock(); | ||
1605 | if (!pid) | ||
1606 | task = current; | ||
1607 | else | ||
1608 | task = find_task_by_vpid(pid); | ||
1609 | if (task) | ||
1610 | get_task_struct(task); | ||
1611 | rcu_read_unlock(); | ||
1612 | |||
1613 | if (!task) | ||
1614 | return ERR_PTR(-ESRCH); | ||
1615 | |||
1616 | /* | ||
1617 | * Can't attach events to a dying task. | ||
1618 | */ | ||
1619 | err = -ESRCH; | ||
1620 | if (task->flags & PF_EXITING) | ||
1621 | goto errout; | ||
1622 | |||
1623 | /* Reuse ptrace permission checks for now. */ | ||
1624 | err = -EACCES; | ||
1625 | if (!ptrace_may_access(task, PTRACE_MODE_READ)) | ||
1626 | goto errout; | ||
1627 | |||
1628 | retry: | ||
1629 | ctx = perf_lock_task_context(task, &flags); | ||
1630 | if (ctx) { | ||
1631 | unclone_ctx(ctx); | ||
1632 | spin_unlock_irqrestore(&ctx->lock, flags); | ||
1633 | } | ||
1634 | |||
1635 | if (!ctx) { | ||
1636 | ctx = kmalloc(sizeof(struct perf_event_context), GFP_KERNEL); | ||
1637 | err = -ENOMEM; | ||
1638 | if (!ctx) | ||
1639 | goto errout; | ||
1640 | __perf_event_init_context(ctx, task); | ||
1641 | get_ctx(ctx); | ||
1642 | if (cmpxchg(&task->perf_event_ctxp, NULL, ctx)) { | ||
1643 | /* | ||
1644 | * We raced with some other task; use | ||
1645 | * the context they set. | ||
1646 | */ | ||
1647 | kfree(ctx); | ||
1648 | goto retry; | ||
1649 | } | ||
1650 | get_task_struct(task); | ||
1651 | } | ||
1652 | |||
1653 | put_task_struct(task); | ||
1654 | return ctx; | ||
1655 | |||
1656 | errout: | ||
1657 | put_task_struct(task); | ||
1658 | return ERR_PTR(err); | ||
1659 | } | ||
1660 | |||
1661 | static void free_event_rcu(struct rcu_head *head) | ||
1662 | { | ||
1663 | struct perf_event *event; | ||
1664 | |||
1665 | event = container_of(head, struct perf_event, rcu_head); | ||
1666 | if (event->ns) | ||
1667 | put_pid_ns(event->ns); | ||
1668 | kfree(event); | ||
1669 | } | ||
1670 | |||
1671 | static void perf_pending_sync(struct perf_event *event); | ||
1672 | |||
1673 | static void free_event(struct perf_event *event) | ||
1674 | { | ||
1675 | perf_pending_sync(event); | ||
1676 | |||
1677 | if (!event->parent) { | ||
1678 | atomic_dec(&nr_events); | ||
1679 | if (event->attr.mmap) | ||
1680 | atomic_dec(&nr_mmap_events); | ||
1681 | if (event->attr.comm) | ||
1682 | atomic_dec(&nr_comm_events); | ||
1683 | if (event->attr.task) | ||
1684 | atomic_dec(&nr_task_events); | ||
1685 | } | ||
1686 | |||
1687 | if (event->output) { | ||
1688 | fput(event->output->filp); | ||
1689 | event->output = NULL; | ||
1690 | } | ||
1691 | |||
1692 | if (event->destroy) | ||
1693 | event->destroy(event); | ||
1694 | |||
1695 | put_ctx(event->ctx); | ||
1696 | call_rcu(&event->rcu_head, free_event_rcu); | ||
1697 | } | ||
1698 | |||
1699 | /* | ||
1700 | * Called when the last reference to the file is gone. | ||
1701 | */ | ||
1702 | static int perf_release(struct inode *inode, struct file *file) | ||
1703 | { | ||
1704 | struct perf_event *event = file->private_data; | ||
1705 | struct perf_event_context *ctx = event->ctx; | ||
1706 | |||
1707 | file->private_data = NULL; | ||
1708 | |||
1709 | WARN_ON_ONCE(ctx->parent_ctx); | ||
1710 | mutex_lock(&ctx->mutex); | ||
1711 | perf_event_remove_from_context(event); | ||
1712 | mutex_unlock(&ctx->mutex); | ||
1713 | |||
1714 | mutex_lock(&event->owner->perf_event_mutex); | ||
1715 | list_del_init(&event->owner_entry); | ||
1716 | mutex_unlock(&event->owner->perf_event_mutex); | ||
1717 | put_task_struct(event->owner); | ||
1718 | |||
1719 | free_event(event); | ||
1720 | |||
1721 | return 0; | ||
1722 | } | ||
1723 | |||
1724 | static int perf_event_read_size(struct perf_event *event) | ||
1725 | { | ||
1726 | int entry = sizeof(u64); /* value */ | ||
1727 | int size = 0; | ||
1728 | int nr = 1; | ||
1729 | |||
1730 | if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) | ||
1731 | size += sizeof(u64); | ||
1732 | |||
1733 | if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) | ||
1734 | size += sizeof(u64); | ||
1735 | |||
1736 | if (event->attr.read_format & PERF_FORMAT_ID) | ||
1737 | entry += sizeof(u64); | ||
1738 | |||
1739 | if (event->attr.read_format & PERF_FORMAT_GROUP) { | ||
1740 | nr += event->group_leader->nr_siblings; | ||
1741 | size += sizeof(u64); | ||
1742 | } | ||
1743 | |||
1744 | size += entry * nr; | ||
1745 | |||
1746 | return size; | ||
1747 | } | ||
1748 | |||
1749 | static u64 perf_event_read_value(struct perf_event *event) | ||
1750 | { | ||
1751 | struct perf_event *child; | ||
1752 | u64 total = 0; | ||
1753 | |||
1754 | total += perf_event_read(event); | ||
1755 | list_for_each_entry(child, &event->child_list, child_list) | ||
1756 | total += perf_event_read(child); | ||
1757 | |||
1758 | return total; | ||
1759 | } | ||
1760 | |||
1761 | static int perf_event_read_entry(struct perf_event *event, | ||
1762 | u64 read_format, char __user *buf) | ||
1763 | { | ||
1764 | int n = 0, count = 0; | ||
1765 | u64 values[2]; | ||
1766 | |||
1767 | values[n++] = perf_event_read_value(event); | ||
1768 | if (read_format & PERF_FORMAT_ID) | ||
1769 | values[n++] = primary_event_id(event); | ||
1770 | |||
1771 | count = n * sizeof(u64); | ||
1772 | |||
1773 | if (copy_to_user(buf, values, count)) | ||
1774 | return -EFAULT; | ||
1775 | |||
1776 | return count; | ||
1777 | } | ||
1778 | |||
1779 | static int perf_event_read_group(struct perf_event *event, | ||
1780 | u64 read_format, char __user *buf) | ||
1781 | { | ||
1782 | struct perf_event *leader = event->group_leader, *sub; | ||
1783 | int n = 0, size = 0, err = -EFAULT; | ||
1784 | u64 values[3]; | ||
1785 | |||
1786 | values[n++] = 1 + leader->nr_siblings; | ||
1787 | if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { | ||
1788 | values[n++] = leader->total_time_enabled + | ||
1789 | atomic64_read(&leader->child_total_time_enabled); | ||
1790 | } | ||
1791 | if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { | ||
1792 | values[n++] = leader->total_time_running + | ||
1793 | atomic64_read(&leader->child_total_time_running); | ||
1794 | } | ||
1795 | |||
1796 | size = n * sizeof(u64); | ||
1797 | |||
1798 | if (copy_to_user(buf, values, size)) | ||
1799 | return -EFAULT; | ||
1800 | |||
1801 | err = perf_event_read_entry(leader, read_format, buf + size); | ||
1802 | if (err < 0) | ||
1803 | return err; | ||
1804 | |||
1805 | size += err; | ||
1806 | |||
1807 | list_for_each_entry(sub, &leader->sibling_list, group_entry) { | ||
1808 | err = perf_event_read_entry(sub, read_format, | ||
1809 | buf + size); | ||
1810 | if (err < 0) | ||
1811 | return err; | ||
1812 | |||
1813 | size += err; | ||
1814 | } | ||
1815 | |||
1816 | return size; | ||
1817 | } | ||
1818 | |||
1819 | static int perf_event_read_one(struct perf_event *event, | ||
1820 | u64 read_format, char __user *buf) | ||
1821 | { | ||
1822 | u64 values[4]; | ||
1823 | int n = 0; | ||
1824 | |||
1825 | values[n++] = perf_event_read_value(event); | ||
1826 | if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { | ||
1827 | values[n++] = event->total_time_enabled + | ||
1828 | atomic64_read(&event->child_total_time_enabled); | ||
1829 | } | ||
1830 | if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { | ||
1831 | values[n++] = event->total_time_running + | ||
1832 | atomic64_read(&event->child_total_time_running); | ||
1833 | } | ||
1834 | if (read_format & PERF_FORMAT_ID) | ||
1835 | values[n++] = primary_event_id(event); | ||
1836 | |||
1837 | if (copy_to_user(buf, values, n * sizeof(u64))) | ||
1838 | return -EFAULT; | ||
1839 | |||
1840 | return n * sizeof(u64); | ||
1841 | } | ||
1842 | |||
1843 | /* | ||
1844 | * Read the performance event - simple non blocking version for now | ||
1845 | */ | ||
1846 | static ssize_t | ||
1847 | perf_read_hw(struct perf_event *event, char __user *buf, size_t count) | ||
1848 | { | ||
1849 | u64 read_format = event->attr.read_format; | ||
1850 | int ret; | ||
1851 | |||
1852 | /* | ||
1853 | * Return end-of-file for a read on a event that is in | ||
1854 | * error state (i.e. because it was pinned but it couldn't be | ||
1855 | * scheduled on to the CPU at some point). | ||
1856 | */ | ||
1857 | if (event->state == PERF_EVENT_STATE_ERROR) | ||
1858 | return 0; | ||
1859 | |||
1860 | if (count < perf_event_read_size(event)) | ||
1861 | return -ENOSPC; | ||
1862 | |||
1863 | WARN_ON_ONCE(event->ctx->parent_ctx); | ||
1864 | mutex_lock(&event->child_mutex); | ||
1865 | if (read_format & PERF_FORMAT_GROUP) | ||
1866 | ret = perf_event_read_group(event, read_format, buf); | ||
1867 | else | ||
1868 | ret = perf_event_read_one(event, read_format, buf); | ||
1869 | mutex_unlock(&event->child_mutex); | ||
1870 | |||
1871 | return ret; | ||
1872 | } | ||
1873 | |||
1874 | static ssize_t | ||
1875 | perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) | ||
1876 | { | ||
1877 | struct perf_event *event = file->private_data; | ||
1878 | |||
1879 | return perf_read_hw(event, buf, count); | ||
1880 | } | ||
1881 | |||
1882 | static unsigned int perf_poll(struct file *file, poll_table *wait) | ||
1883 | { | ||
1884 | struct perf_event *event = file->private_data; | ||
1885 | struct perf_mmap_data *data; | ||
1886 | unsigned int events = POLL_HUP; | ||
1887 | |||
1888 | rcu_read_lock(); | ||
1889 | data = rcu_dereference(event->data); | ||
1890 | if (data) | ||
1891 | events = atomic_xchg(&data->poll, 0); | ||
1892 | rcu_read_unlock(); | ||
1893 | |||
1894 | poll_wait(file, &event->waitq, wait); | ||
1895 | |||
1896 | return events; | ||
1897 | } | ||
1898 | |||
1899 | static void perf_event_reset(struct perf_event *event) | ||
1900 | { | ||
1901 | (void)perf_event_read(event); | ||
1902 | atomic64_set(&event->count, 0); | ||
1903 | perf_event_update_userpage(event); | ||
1904 | } | ||
1905 | |||
1906 | /* | ||
1907 | * Holding the top-level event's child_mutex means that any | ||
1908 | * descendant process that has inherited this event will block | ||
1909 | * in sync_child_event if it goes to exit, thus satisfying the | ||
1910 | * task existence requirements of perf_event_enable/disable. | ||
1911 | */ | ||
1912 | static void perf_event_for_each_child(struct perf_event *event, | ||
1913 | void (*func)(struct perf_event *)) | ||
1914 | { | ||
1915 | struct perf_event *child; | ||
1916 | |||
1917 | WARN_ON_ONCE(event->ctx->parent_ctx); | ||
1918 | mutex_lock(&event->child_mutex); | ||
1919 | func(event); | ||
1920 | list_for_each_entry(child, &event->child_list, child_list) | ||
1921 | func(child); | ||
1922 | mutex_unlock(&event->child_mutex); | ||
1923 | } | ||
1924 | |||
1925 | static void perf_event_for_each(struct perf_event *event, | ||
1926 | void (*func)(struct perf_event *)) | ||
1927 | { | ||
1928 | struct perf_event_context *ctx = event->ctx; | ||
1929 | struct perf_event *sibling; | ||
1930 | |||
1931 | WARN_ON_ONCE(ctx->parent_ctx); | ||
1932 | mutex_lock(&ctx->mutex); | ||
1933 | event = event->group_leader; | ||
1934 | |||
1935 | perf_event_for_each_child(event, func); | ||
1936 | func(event); | ||
1937 | list_for_each_entry(sibling, &event->sibling_list, group_entry) | ||
1938 | perf_event_for_each_child(event, func); | ||
1939 | mutex_unlock(&ctx->mutex); | ||
1940 | } | ||
1941 | |||
1942 | static int perf_event_period(struct perf_event *event, u64 __user *arg) | ||
1943 | { | ||
1944 | struct perf_event_context *ctx = event->ctx; | ||
1945 | unsigned long size; | ||
1946 | int ret = 0; | ||
1947 | u64 value; | ||
1948 | |||
1949 | if (!event->attr.sample_period) | ||
1950 | return -EINVAL; | ||
1951 | |||
1952 | size = copy_from_user(&value, arg, sizeof(value)); | ||
1953 | if (size != sizeof(value)) | ||
1954 | return -EFAULT; | ||
1955 | |||
1956 | if (!value) | ||
1957 | return -EINVAL; | ||
1958 | |||
1959 | spin_lock_irq(&ctx->lock); | ||
1960 | if (event->attr.freq) { | ||
1961 | if (value > sysctl_perf_event_sample_rate) { | ||
1962 | ret = -EINVAL; | ||
1963 | goto unlock; | ||
1964 | } | ||
1965 | |||
1966 | event->attr.sample_freq = value; | ||
1967 | } else { | ||
1968 | event->attr.sample_period = value; | ||
1969 | event->hw.sample_period = value; | ||
1970 | } | ||
1971 | unlock: | ||
1972 | spin_unlock_irq(&ctx->lock); | ||
1973 | |||
1974 | return ret; | ||
1975 | } | ||
1976 | |||
1977 | int perf_event_set_output(struct perf_event *event, int output_fd); | ||
1978 | |||
1979 | static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | ||
1980 | { | ||
1981 | struct perf_event *event = file->private_data; | ||
1982 | void (*func)(struct perf_event *); | ||
1983 | u32 flags = arg; | ||
1984 | |||
1985 | switch (cmd) { | ||
1986 | case PERF_EVENT_IOC_ENABLE: | ||
1987 | func = perf_event_enable; | ||
1988 | break; | ||
1989 | case PERF_EVENT_IOC_DISABLE: | ||
1990 | func = perf_event_disable; | ||
1991 | break; | ||
1992 | case PERF_EVENT_IOC_RESET: | ||
1993 | func = perf_event_reset; | ||
1994 | break; | ||
1995 | |||
1996 | case PERF_EVENT_IOC_REFRESH: | ||
1997 | return perf_event_refresh(event, arg); | ||
1998 | |||
1999 | case PERF_EVENT_IOC_PERIOD: | ||
2000 | return perf_event_period(event, (u64 __user *)arg); | ||
2001 | |||
2002 | case PERF_EVENT_IOC_SET_OUTPUT: | ||
2003 | return perf_event_set_output(event, arg); | ||
2004 | |||
2005 | default: | ||
2006 | return -ENOTTY; | ||
2007 | } | ||
2008 | |||
2009 | if (flags & PERF_IOC_FLAG_GROUP) | ||
2010 | perf_event_for_each(event, func); | ||
2011 | else | ||
2012 | perf_event_for_each_child(event, func); | ||
2013 | |||
2014 | return 0; | ||
2015 | } | ||
2016 | |||
2017 | int perf_event_task_enable(void) | ||
2018 | { | ||
2019 | struct perf_event *event; | ||
2020 | |||
2021 | mutex_lock(¤t->perf_event_mutex); | ||
2022 | list_for_each_entry(event, ¤t->perf_event_list, owner_entry) | ||
2023 | perf_event_for_each_child(event, perf_event_enable); | ||
2024 | mutex_unlock(¤t->perf_event_mutex); | ||
2025 | |||
2026 | return 0; | ||
2027 | } | ||
2028 | |||
2029 | int perf_event_task_disable(void) | ||
2030 | { | ||
2031 | struct perf_event *event; | ||
2032 | |||
2033 | mutex_lock(¤t->perf_event_mutex); | ||
2034 | list_for_each_entry(event, ¤t->perf_event_list, owner_entry) | ||
2035 | perf_event_for_each_child(event, perf_event_disable); | ||
2036 | mutex_unlock(¤t->perf_event_mutex); | ||
2037 | |||
2038 | return 0; | ||
2039 | } | ||
2040 | |||
2041 | #ifndef PERF_EVENT_INDEX_OFFSET | ||
2042 | # define PERF_EVENT_INDEX_OFFSET 0 | ||
2043 | #endif | ||
2044 | |||
2045 | static int perf_event_index(struct perf_event *event) | ||
2046 | { | ||
2047 | if (event->state != PERF_EVENT_STATE_ACTIVE) | ||
2048 | return 0; | ||
2049 | |||
2050 | return event->hw.idx + 1 - PERF_EVENT_INDEX_OFFSET; | ||
2051 | } | ||
2052 | |||
2053 | /* | ||
2054 | * Callers need to ensure there can be no nesting of this function, otherwise | ||
2055 | * the seqlock logic goes bad. We can not serialize this because the arch | ||
2056 | * code calls this from NMI context. | ||
2057 | */ | ||
2058 | void perf_event_update_userpage(struct perf_event *event) | ||
2059 | { | ||
2060 | struct perf_event_mmap_page *userpg; | ||
2061 | struct perf_mmap_data *data; | ||
2062 | |||
2063 | rcu_read_lock(); | ||
2064 | data = rcu_dereference(event->data); | ||
2065 | if (!data) | ||
2066 | goto unlock; | ||
2067 | |||
2068 | userpg = data->user_page; | ||
2069 | |||
2070 | /* | ||
2071 | * Disable preemption so as to not let the corresponding user-space | ||
2072 | * spin too long if we get preempted. | ||
2073 | */ | ||
2074 | preempt_disable(); | ||
2075 | ++userpg->lock; | ||
2076 | barrier(); | ||
2077 | userpg->index = perf_event_index(event); | ||
2078 | userpg->offset = atomic64_read(&event->count); | ||
2079 | if (event->state == PERF_EVENT_STATE_ACTIVE) | ||
2080 | userpg->offset -= atomic64_read(&event->hw.prev_count); | ||
2081 | |||
2082 | userpg->time_enabled = event->total_time_enabled + | ||
2083 | atomic64_read(&event->child_total_time_enabled); | ||
2084 | |||
2085 | userpg->time_running = event->total_time_running + | ||
2086 | atomic64_read(&event->child_total_time_running); | ||
2087 | |||
2088 | barrier(); | ||
2089 | ++userpg->lock; | ||
2090 | preempt_enable(); | ||
2091 | unlock: | ||
2092 | rcu_read_unlock(); | ||
2093 | } | ||
2094 | |||
2095 | static unsigned long perf_data_size(struct perf_mmap_data *data) | ||
2096 | { | ||
2097 | return data->nr_pages << (PAGE_SHIFT + data->data_order); | ||
2098 | } | ||
2099 | |||
2100 | #ifndef CONFIG_PERF_USE_VMALLOC | ||
2101 | |||
2102 | /* | ||
2103 | * Back perf_mmap() with regular GFP_KERNEL-0 pages. | ||
2104 | */ | ||
2105 | |||
2106 | static struct page * | ||
2107 | perf_mmap_to_page(struct perf_mmap_data *data, unsigned long pgoff) | ||
2108 | { | ||
2109 | if (pgoff > data->nr_pages) | ||
2110 | return NULL; | ||
2111 | |||
2112 | if (pgoff == 0) | ||
2113 | return virt_to_page(data->user_page); | ||
2114 | |||
2115 | return virt_to_page(data->data_pages[pgoff - 1]); | ||
2116 | } | ||
2117 | |||
2118 | static struct perf_mmap_data * | ||
2119 | perf_mmap_data_alloc(struct perf_event *event, int nr_pages) | ||
2120 | { | ||
2121 | struct perf_mmap_data *data; | ||
2122 | unsigned long size; | ||
2123 | int i; | ||
2124 | |||
2125 | WARN_ON(atomic_read(&event->mmap_count)); | ||
2126 | |||
2127 | size = sizeof(struct perf_mmap_data); | ||
2128 | size += nr_pages * sizeof(void *); | ||
2129 | |||
2130 | data = kzalloc(size, GFP_KERNEL); | ||
2131 | if (!data) | ||
2132 | goto fail; | ||
2133 | |||
2134 | data->user_page = (void *)get_zeroed_page(GFP_KERNEL); | ||
2135 | if (!data->user_page) | ||
2136 | goto fail_user_page; | ||
2137 | |||
2138 | for (i = 0; i < nr_pages; i++) { | ||
2139 | data->data_pages[i] = (void *)get_zeroed_page(GFP_KERNEL); | ||
2140 | if (!data->data_pages[i]) | ||
2141 | goto fail_data_pages; | ||
2142 | } | ||
2143 | |||
2144 | data->data_order = 0; | ||
2145 | data->nr_pages = nr_pages; | ||
2146 | |||
2147 | return data; | ||
2148 | |||
2149 | fail_data_pages: | ||
2150 | for (i--; i >= 0; i--) | ||
2151 | free_page((unsigned long)data->data_pages[i]); | ||
2152 | |||
2153 | free_page((unsigned long)data->user_page); | ||
2154 | |||
2155 | fail_user_page: | ||
2156 | kfree(data); | ||
2157 | |||
2158 | fail: | ||
2159 | return NULL; | ||
2160 | } | ||
2161 | |||
2162 | static void perf_mmap_free_page(unsigned long addr) | ||
2163 | { | ||
2164 | struct page *page = virt_to_page((void *)addr); | ||
2165 | |||
2166 | page->mapping = NULL; | ||
2167 | __free_page(page); | ||
2168 | } | ||
2169 | |||
2170 | static void perf_mmap_data_free(struct perf_mmap_data *data) | ||
2171 | { | ||
2172 | int i; | ||
2173 | |||
2174 | perf_mmap_free_page((unsigned long)data->user_page); | ||
2175 | for (i = 0; i < data->nr_pages; i++) | ||
2176 | perf_mmap_free_page((unsigned long)data->data_pages[i]); | ||
2177 | } | ||
2178 | |||
2179 | #else | ||
2180 | |||
2181 | /* | ||
2182 | * Back perf_mmap() with vmalloc memory. | ||
2183 | * | ||
2184 | * Required for architectures that have d-cache aliasing issues. | ||
2185 | */ | ||
2186 | |||
2187 | static struct page * | ||
2188 | perf_mmap_to_page(struct perf_mmap_data *data, unsigned long pgoff) | ||
2189 | { | ||
2190 | if (pgoff > (1UL << data->data_order)) | ||
2191 | return NULL; | ||
2192 | |||
2193 | return vmalloc_to_page((void *)data->user_page + pgoff * PAGE_SIZE); | ||
2194 | } | ||
2195 | |||
2196 | static void perf_mmap_unmark_page(void *addr) | ||
2197 | { | ||
2198 | struct page *page = vmalloc_to_page(addr); | ||
2199 | |||
2200 | page->mapping = NULL; | ||
2201 | } | ||
2202 | |||
2203 | static void perf_mmap_data_free_work(struct work_struct *work) | ||
2204 | { | ||
2205 | struct perf_mmap_data *data; | ||
2206 | void *base; | ||
2207 | int i, nr; | ||
2208 | |||
2209 | data = container_of(work, struct perf_mmap_data, work); | ||
2210 | nr = 1 << data->data_order; | ||
2211 | |||
2212 | base = data->user_page; | ||
2213 | for (i = 0; i < nr + 1; i++) | ||
2214 | perf_mmap_unmark_page(base + (i * PAGE_SIZE)); | ||
2215 | |||
2216 | vfree(base); | ||
2217 | } | ||
2218 | |||
2219 | static void perf_mmap_data_free(struct perf_mmap_data *data) | ||
2220 | { | ||
2221 | schedule_work(&data->work); | ||
2222 | } | ||
2223 | |||
2224 | static struct perf_mmap_data * | ||
2225 | perf_mmap_data_alloc(struct perf_event *event, int nr_pages) | ||
2226 | { | ||
2227 | struct perf_mmap_data *data; | ||
2228 | unsigned long size; | ||
2229 | void *all_buf; | ||
2230 | |||
2231 | WARN_ON(atomic_read(&event->mmap_count)); | ||
2232 | |||
2233 | size = sizeof(struct perf_mmap_data); | ||
2234 | size += sizeof(void *); | ||
2235 | |||
2236 | data = kzalloc(size, GFP_KERNEL); | ||
2237 | if (!data) | ||
2238 | goto fail; | ||
2239 | |||
2240 | INIT_WORK(&data->work, perf_mmap_data_free_work); | ||
2241 | |||
2242 | all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE); | ||
2243 | if (!all_buf) | ||
2244 | goto fail_all_buf; | ||
2245 | |||
2246 | data->user_page = all_buf; | ||
2247 | data->data_pages[0] = all_buf + PAGE_SIZE; | ||
2248 | data->data_order = ilog2(nr_pages); | ||
2249 | data->nr_pages = 1; | ||
2250 | |||
2251 | return data; | ||
2252 | |||
2253 | fail_all_buf: | ||
2254 | kfree(data); | ||
2255 | |||
2256 | fail: | ||
2257 | return NULL; | ||
2258 | } | ||
2259 | |||
2260 | #endif | ||
2261 | |||
2262 | static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | ||
2263 | { | ||
2264 | struct perf_event *event = vma->vm_file->private_data; | ||
2265 | struct perf_mmap_data *data; | ||
2266 | int ret = VM_FAULT_SIGBUS; | ||
2267 | |||
2268 | if (vmf->flags & FAULT_FLAG_MKWRITE) { | ||
2269 | if (vmf->pgoff == 0) | ||
2270 | ret = 0; | ||
2271 | return ret; | ||
2272 | } | ||
2273 | |||
2274 | rcu_read_lock(); | ||
2275 | data = rcu_dereference(event->data); | ||
2276 | if (!data) | ||
2277 | goto unlock; | ||
2278 | |||
2279 | if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE)) | ||
2280 | goto unlock; | ||
2281 | |||
2282 | vmf->page = perf_mmap_to_page(data, vmf->pgoff); | ||
2283 | if (!vmf->page) | ||
2284 | goto unlock; | ||
2285 | |||
2286 | get_page(vmf->page); | ||
2287 | vmf->page->mapping = vma->vm_file->f_mapping; | ||
2288 | vmf->page->index = vmf->pgoff; | ||
2289 | |||
2290 | ret = 0; | ||
2291 | unlock: | ||
2292 | rcu_read_unlock(); | ||
2293 | |||
2294 | return ret; | ||
2295 | } | ||
2296 | |||
2297 | static void | ||
2298 | perf_mmap_data_init(struct perf_event *event, struct perf_mmap_data *data) | ||
2299 | { | ||
2300 | long max_size = perf_data_size(data); | ||
2301 | |||
2302 | atomic_set(&data->lock, -1); | ||
2303 | |||
2304 | if (event->attr.watermark) { | ||
2305 | data->watermark = min_t(long, max_size, | ||
2306 | event->attr.wakeup_watermark); | ||
2307 | } | ||
2308 | |||
2309 | if (!data->watermark) | ||
2310 | data->watermark = max_t(long, PAGE_SIZE, max_size / 2); | ||
2311 | |||
2312 | |||
2313 | rcu_assign_pointer(event->data, data); | ||
2314 | } | ||
2315 | |||
2316 | static void perf_mmap_data_free_rcu(struct rcu_head *rcu_head) | ||
2317 | { | ||
2318 | struct perf_mmap_data *data; | ||
2319 | |||
2320 | data = container_of(rcu_head, struct perf_mmap_data, rcu_head); | ||
2321 | perf_mmap_data_free(data); | ||
2322 | kfree(data); | ||
2323 | } | ||
2324 | |||
2325 | static void perf_mmap_data_release(struct perf_event *event) | ||
2326 | { | ||
2327 | struct perf_mmap_data *data = event->data; | ||
2328 | |||
2329 | WARN_ON(atomic_read(&event->mmap_count)); | ||
2330 | |||
2331 | rcu_assign_pointer(event->data, NULL); | ||
2332 | call_rcu(&data->rcu_head, perf_mmap_data_free_rcu); | ||
2333 | } | ||
2334 | |||
2335 | static void perf_mmap_open(struct vm_area_struct *vma) | ||
2336 | { | ||
2337 | struct perf_event *event = vma->vm_file->private_data; | ||
2338 | |||
2339 | atomic_inc(&event->mmap_count); | ||
2340 | } | ||
2341 | |||
2342 | static void perf_mmap_close(struct vm_area_struct *vma) | ||
2343 | { | ||
2344 | struct perf_event *event = vma->vm_file->private_data; | ||
2345 | |||
2346 | WARN_ON_ONCE(event->ctx->parent_ctx); | ||
2347 | if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) { | ||
2348 | unsigned long size = perf_data_size(event->data); | ||
2349 | struct user_struct *user = current_user(); | ||
2350 | |||
2351 | atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm); | ||
2352 | vma->vm_mm->locked_vm -= event->data->nr_locked; | ||
2353 | perf_mmap_data_release(event); | ||
2354 | mutex_unlock(&event->mmap_mutex); | ||
2355 | } | ||
2356 | } | ||
2357 | |||
2358 | static const struct vm_operations_struct perf_mmap_vmops = { | ||
2359 | .open = perf_mmap_open, | ||
2360 | .close = perf_mmap_close, | ||
2361 | .fault = perf_mmap_fault, | ||
2362 | .page_mkwrite = perf_mmap_fault, | ||
2363 | }; | ||
2364 | |||
2365 | static int perf_mmap(struct file *file, struct vm_area_struct *vma) | ||
2366 | { | ||
2367 | struct perf_event *event = file->private_data; | ||
2368 | unsigned long user_locked, user_lock_limit; | ||
2369 | struct user_struct *user = current_user(); | ||
2370 | unsigned long locked, lock_limit; | ||
2371 | struct perf_mmap_data *data; | ||
2372 | unsigned long vma_size; | ||
2373 | unsigned long nr_pages; | ||
2374 | long user_extra, extra; | ||
2375 | int ret = 0; | ||
2376 | |||
2377 | if (!(vma->vm_flags & VM_SHARED)) | ||
2378 | return -EINVAL; | ||
2379 | |||
2380 | vma_size = vma->vm_end - vma->vm_start; | ||
2381 | nr_pages = (vma_size / PAGE_SIZE) - 1; | ||
2382 | |||
2383 | /* | ||
2384 | * If we have data pages ensure they're a power-of-two number, so we | ||
2385 | * can do bitmasks instead of modulo. | ||
2386 | */ | ||
2387 | if (nr_pages != 0 && !is_power_of_2(nr_pages)) | ||
2388 | return -EINVAL; | ||
2389 | |||
2390 | if (vma_size != PAGE_SIZE * (1 + nr_pages)) | ||
2391 | return -EINVAL; | ||
2392 | |||
2393 | if (vma->vm_pgoff != 0) | ||
2394 | return -EINVAL; | ||
2395 | |||
2396 | WARN_ON_ONCE(event->ctx->parent_ctx); | ||
2397 | mutex_lock(&event->mmap_mutex); | ||
2398 | if (event->output) { | ||
2399 | ret = -EINVAL; | ||
2400 | goto unlock; | ||
2401 | } | ||
2402 | |||
2403 | if (atomic_inc_not_zero(&event->mmap_count)) { | ||
2404 | if (nr_pages != event->data->nr_pages) | ||
2405 | ret = -EINVAL; | ||
2406 | goto unlock; | ||
2407 | } | ||
2408 | |||
2409 | user_extra = nr_pages + 1; | ||
2410 | user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10); | ||
2411 | |||
2412 | /* | ||
2413 | * Increase the limit linearly with more CPUs: | ||
2414 | */ | ||
2415 | user_lock_limit *= num_online_cpus(); | ||
2416 | |||
2417 | user_locked = atomic_long_read(&user->locked_vm) + user_extra; | ||
2418 | |||
2419 | extra = 0; | ||
2420 | if (user_locked > user_lock_limit) | ||
2421 | extra = user_locked - user_lock_limit; | ||
2422 | |||
2423 | lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur; | ||
2424 | lock_limit >>= PAGE_SHIFT; | ||
2425 | locked = vma->vm_mm->locked_vm + extra; | ||
2426 | |||
2427 | if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() && | ||
2428 | !capable(CAP_IPC_LOCK)) { | ||
2429 | ret = -EPERM; | ||
2430 | goto unlock; | ||
2431 | } | ||
2432 | |||
2433 | WARN_ON(event->data); | ||
2434 | |||
2435 | data = perf_mmap_data_alloc(event, nr_pages); | ||
2436 | ret = -ENOMEM; | ||
2437 | if (!data) | ||
2438 | goto unlock; | ||
2439 | |||
2440 | ret = 0; | ||
2441 | perf_mmap_data_init(event, data); | ||
2442 | |||
2443 | atomic_set(&event->mmap_count, 1); | ||
2444 | atomic_long_add(user_extra, &user->locked_vm); | ||
2445 | vma->vm_mm->locked_vm += extra; | ||
2446 | event->data->nr_locked = extra; | ||
2447 | if (vma->vm_flags & VM_WRITE) | ||
2448 | event->data->writable = 1; | ||
2449 | |||
2450 | unlock: | ||
2451 | mutex_unlock(&event->mmap_mutex); | ||
2452 | |||
2453 | vma->vm_flags |= VM_RESERVED; | ||
2454 | vma->vm_ops = &perf_mmap_vmops; | ||
2455 | |||
2456 | return ret; | ||
2457 | } | ||
2458 | |||
2459 | static int perf_fasync(int fd, struct file *filp, int on) | ||
2460 | { | ||
2461 | struct inode *inode = filp->f_path.dentry->d_inode; | ||
2462 | struct perf_event *event = filp->private_data; | ||
2463 | int retval; | ||
2464 | |||
2465 | mutex_lock(&inode->i_mutex); | ||
2466 | retval = fasync_helper(fd, filp, on, &event->fasync); | ||
2467 | mutex_unlock(&inode->i_mutex); | ||
2468 | |||
2469 | if (retval < 0) | ||
2470 | return retval; | ||
2471 | |||
2472 | return 0; | ||
2473 | } | ||
2474 | |||
2475 | static const struct file_operations perf_fops = { | ||
2476 | .release = perf_release, | ||
2477 | .read = perf_read, | ||
2478 | .poll = perf_poll, | ||
2479 | .unlocked_ioctl = perf_ioctl, | ||
2480 | .compat_ioctl = perf_ioctl, | ||
2481 | .mmap = perf_mmap, | ||
2482 | .fasync = perf_fasync, | ||
2483 | }; | ||
2484 | |||
2485 | /* | ||
2486 | * Perf event wakeup | ||
2487 | * | ||
2488 | * If there's data, ensure we set the poll() state and publish everything | ||
2489 | * to user-space before waking everybody up. | ||
2490 | */ | ||
2491 | |||
2492 | void perf_event_wakeup(struct perf_event *event) | ||
2493 | { | ||
2494 | wake_up_all(&event->waitq); | ||
2495 | |||
2496 | if (event->pending_kill) { | ||
2497 | kill_fasync(&event->fasync, SIGIO, event->pending_kill); | ||
2498 | event->pending_kill = 0; | ||
2499 | } | ||
2500 | } | ||
2501 | |||
2502 | /* | ||
2503 | * Pending wakeups | ||
2504 | * | ||
2505 | * Handle the case where we need to wakeup up from NMI (or rq->lock) context. | ||
2506 | * | ||
2507 | * The NMI bit means we cannot possibly take locks. Therefore, maintain a | ||
2508 | * single linked list and use cmpxchg() to add entries lockless. | ||
2509 | */ | ||
2510 | |||
2511 | static void perf_pending_event(struct perf_pending_entry *entry) | ||
2512 | { | ||
2513 | struct perf_event *event = container_of(entry, | ||
2514 | struct perf_event, pending); | ||
2515 | |||
2516 | if (event->pending_disable) { | ||
2517 | event->pending_disable = 0; | ||
2518 | __perf_event_disable(event); | ||
2519 | } | ||
2520 | |||
2521 | if (event->pending_wakeup) { | ||
2522 | event->pending_wakeup = 0; | ||
2523 | perf_event_wakeup(event); | ||
2524 | } | ||
2525 | } | ||
2526 | |||
2527 | #define PENDING_TAIL ((struct perf_pending_entry *)-1UL) | ||
2528 | |||
2529 | static DEFINE_PER_CPU(struct perf_pending_entry *, perf_pending_head) = { | ||
2530 | PENDING_TAIL, | ||
2531 | }; | ||
2532 | |||
2533 | static void perf_pending_queue(struct perf_pending_entry *entry, | ||
2534 | void (*func)(struct perf_pending_entry *)) | ||
2535 | { | ||
2536 | struct perf_pending_entry **head; | ||
2537 | |||
2538 | if (cmpxchg(&entry->next, NULL, PENDING_TAIL) != NULL) | ||
2539 | return; | ||
2540 | |||
2541 | entry->func = func; | ||
2542 | |||
2543 | head = &get_cpu_var(perf_pending_head); | ||
2544 | |||
2545 | do { | ||
2546 | entry->next = *head; | ||
2547 | } while (cmpxchg(head, entry->next, entry) != entry->next); | ||
2548 | |||
2549 | set_perf_event_pending(); | ||
2550 | |||
2551 | put_cpu_var(perf_pending_head); | ||
2552 | } | ||
2553 | |||
2554 | static int __perf_pending_run(void) | ||
2555 | { | ||
2556 | struct perf_pending_entry *list; | ||
2557 | int nr = 0; | ||
2558 | |||
2559 | list = xchg(&__get_cpu_var(perf_pending_head), PENDING_TAIL); | ||
2560 | while (list != PENDING_TAIL) { | ||
2561 | void (*func)(struct perf_pending_entry *); | ||
2562 | struct perf_pending_entry *entry = list; | ||
2563 | |||
2564 | list = list->next; | ||
2565 | |||
2566 | func = entry->func; | ||
2567 | entry->next = NULL; | ||
2568 | /* | ||
2569 | * Ensure we observe the unqueue before we issue the wakeup, | ||
2570 | * so that we won't be waiting forever. | ||
2571 | * -- see perf_not_pending(). | ||
2572 | */ | ||
2573 | smp_wmb(); | ||
2574 | |||
2575 | func(entry); | ||
2576 | nr++; | ||
2577 | } | ||
2578 | |||
2579 | return nr; | ||
2580 | } | ||
2581 | |||
2582 | static inline int perf_not_pending(struct perf_event *event) | ||
2583 | { | ||
2584 | /* | ||
2585 | * If we flush on whatever cpu we run, there is a chance we don't | ||
2586 | * need to wait. | ||
2587 | */ | ||
2588 | get_cpu(); | ||
2589 | __perf_pending_run(); | ||
2590 | put_cpu(); | ||
2591 | |||
2592 | /* | ||
2593 | * Ensure we see the proper queue state before going to sleep | ||
2594 | * so that we do not miss the wakeup. -- see perf_pending_handle() | ||
2595 | */ | ||
2596 | smp_rmb(); | ||
2597 | return event->pending.next == NULL; | ||
2598 | } | ||
2599 | |||
2600 | static void perf_pending_sync(struct perf_event *event) | ||
2601 | { | ||
2602 | wait_event(event->waitq, perf_not_pending(event)); | ||
2603 | } | ||
2604 | |||
2605 | void perf_event_do_pending(void) | ||
2606 | { | ||
2607 | __perf_pending_run(); | ||
2608 | } | ||
2609 | |||
2610 | /* | ||
2611 | * Callchain support -- arch specific | ||
2612 | */ | ||
2613 | |||
2614 | __weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) | ||
2615 | { | ||
2616 | return NULL; | ||
2617 | } | ||
2618 | |||
2619 | /* | ||
2620 | * Output | ||
2621 | */ | ||
2622 | static bool perf_output_space(struct perf_mmap_data *data, unsigned long tail, | ||
2623 | unsigned long offset, unsigned long head) | ||
2624 | { | ||
2625 | unsigned long mask; | ||
2626 | |||
2627 | if (!data->writable) | ||
2628 | return true; | ||
2629 | |||
2630 | mask = perf_data_size(data) - 1; | ||
2631 | |||
2632 | offset = (offset - tail) & mask; | ||
2633 | head = (head - tail) & mask; | ||
2634 | |||
2635 | if ((int)(head - offset) < 0) | ||
2636 | return false; | ||
2637 | |||
2638 | return true; | ||
2639 | } | ||
2640 | |||
2641 | static void perf_output_wakeup(struct perf_output_handle *handle) | ||
2642 | { | ||
2643 | atomic_set(&handle->data->poll, POLL_IN); | ||
2644 | |||
2645 | if (handle->nmi) { | ||
2646 | handle->event->pending_wakeup = 1; | ||
2647 | perf_pending_queue(&handle->event->pending, | ||
2648 | perf_pending_event); | ||
2649 | } else | ||
2650 | perf_event_wakeup(handle->event); | ||
2651 | } | ||
2652 | |||
2653 | /* | ||
2654 | * Curious locking construct. | ||
2655 | * | ||
2656 | * We need to ensure a later event_id doesn't publish a head when a former | ||
2657 | * event_id isn't done writing. However since we need to deal with NMIs we | ||
2658 | * cannot fully serialize things. | ||
2659 | * | ||
2660 | * What we do is serialize between CPUs so we only have to deal with NMI | ||
2661 | * nesting on a single CPU. | ||
2662 | * | ||
2663 | * We only publish the head (and generate a wakeup) when the outer-most | ||
2664 | * event_id completes. | ||
2665 | */ | ||
2666 | static void perf_output_lock(struct perf_output_handle *handle) | ||
2667 | { | ||
2668 | struct perf_mmap_data *data = handle->data; | ||
2669 | int cpu; | ||
2670 | |||
2671 | handle->locked = 0; | ||
2672 | |||
2673 | local_irq_save(handle->flags); | ||
2674 | cpu = smp_processor_id(); | ||
2675 | |||
2676 | if (in_nmi() && atomic_read(&data->lock) == cpu) | ||
2677 | return; | ||
2678 | |||
2679 | while (atomic_cmpxchg(&data->lock, -1, cpu) != -1) | ||
2680 | cpu_relax(); | ||
2681 | |||
2682 | handle->locked = 1; | ||
2683 | } | ||
2684 | |||
2685 | static void perf_output_unlock(struct perf_output_handle *handle) | ||
2686 | { | ||
2687 | struct perf_mmap_data *data = handle->data; | ||
2688 | unsigned long head; | ||
2689 | int cpu; | ||
2690 | |||
2691 | data->done_head = data->head; | ||
2692 | |||
2693 | if (!handle->locked) | ||
2694 | goto out; | ||
2695 | |||
2696 | again: | ||
2697 | /* | ||
2698 | * The xchg implies a full barrier that ensures all writes are done | ||
2699 | * before we publish the new head, matched by a rmb() in userspace when | ||
2700 | * reading this position. | ||
2701 | */ | ||
2702 | while ((head = atomic_long_xchg(&data->done_head, 0))) | ||
2703 | data->user_page->data_head = head; | ||
2704 | |||
2705 | /* | ||
2706 | * NMI can happen here, which means we can miss a done_head update. | ||
2707 | */ | ||
2708 | |||
2709 | cpu = atomic_xchg(&data->lock, -1); | ||
2710 | WARN_ON_ONCE(cpu != smp_processor_id()); | ||
2711 | |||
2712 | /* | ||
2713 | * Therefore we have to validate we did not indeed do so. | ||
2714 | */ | ||
2715 | if (unlikely(atomic_long_read(&data->done_head))) { | ||
2716 | /* | ||
2717 | * Since we had it locked, we can lock it again. | ||
2718 | */ | ||
2719 | while (atomic_cmpxchg(&data->lock, -1, cpu) != -1) | ||
2720 | cpu_relax(); | ||
2721 | |||
2722 | goto again; | ||
2723 | } | ||
2724 | |||
2725 | if (atomic_xchg(&data->wakeup, 0)) | ||
2726 | perf_output_wakeup(handle); | ||
2727 | out: | ||
2728 | local_irq_restore(handle->flags); | ||
2729 | } | ||
2730 | |||
2731 | void perf_output_copy(struct perf_output_handle *handle, | ||
2732 | const void *buf, unsigned int len) | ||
2733 | { | ||
2734 | unsigned int pages_mask; | ||
2735 | unsigned long offset; | ||
2736 | unsigned int size; | ||
2737 | void **pages; | ||
2738 | |||
2739 | offset = handle->offset; | ||
2740 | pages_mask = handle->data->nr_pages - 1; | ||
2741 | pages = handle->data->data_pages; | ||
2742 | |||
2743 | do { | ||
2744 | unsigned long page_offset; | ||
2745 | unsigned long page_size; | ||
2746 | int nr; | ||
2747 | |||
2748 | nr = (offset >> PAGE_SHIFT) & pages_mask; | ||
2749 | page_size = 1UL << (handle->data->data_order + PAGE_SHIFT); | ||
2750 | page_offset = offset & (page_size - 1); | ||
2751 | size = min_t(unsigned int, page_size - page_offset, len); | ||
2752 | |||
2753 | memcpy(pages[nr] + page_offset, buf, size); | ||
2754 | |||
2755 | len -= size; | ||
2756 | buf += size; | ||
2757 | offset += size; | ||
2758 | } while (len); | ||
2759 | |||
2760 | handle->offset = offset; | ||
2761 | |||
2762 | /* | ||
2763 | * Check we didn't copy past our reservation window, taking the | ||
2764 | * possible unsigned int wrap into account. | ||
2765 | */ | ||
2766 | WARN_ON_ONCE(((long)(handle->head - handle->offset)) < 0); | ||
2767 | } | ||
2768 | |||
2769 | int perf_output_begin(struct perf_output_handle *handle, | ||
2770 | struct perf_event *event, unsigned int size, | ||
2771 | int nmi, int sample) | ||
2772 | { | ||
2773 | struct perf_event *output_event; | ||
2774 | struct perf_mmap_data *data; | ||
2775 | unsigned long tail, offset, head; | ||
2776 | int have_lost; | ||
2777 | struct { | ||
2778 | struct perf_event_header header; | ||
2779 | u64 id; | ||
2780 | u64 lost; | ||
2781 | } lost_event; | ||
2782 | |||
2783 | rcu_read_lock(); | ||
2784 | /* | ||
2785 | * For inherited events we send all the output towards the parent. | ||
2786 | */ | ||
2787 | if (event->parent) | ||
2788 | event = event->parent; | ||
2789 | |||
2790 | output_event = rcu_dereference(event->output); | ||
2791 | if (output_event) | ||
2792 | event = output_event; | ||
2793 | |||
2794 | data = rcu_dereference(event->data); | ||
2795 | if (!data) | ||
2796 | goto out; | ||
2797 | |||
2798 | handle->data = data; | ||
2799 | handle->event = event; | ||
2800 | handle->nmi = nmi; | ||
2801 | handle->sample = sample; | ||
2802 | |||
2803 | if (!data->nr_pages) | ||
2804 | goto fail; | ||
2805 | |||
2806 | have_lost = atomic_read(&data->lost); | ||
2807 | if (have_lost) | ||
2808 | size += sizeof(lost_event); | ||
2809 | |||
2810 | perf_output_lock(handle); | ||
2811 | |||
2812 | do { | ||
2813 | /* | ||
2814 | * Userspace could choose to issue a mb() before updating the | ||
2815 | * tail pointer. So that all reads will be completed before the | ||
2816 | * write is issued. | ||
2817 | */ | ||
2818 | tail = ACCESS_ONCE(data->user_page->data_tail); | ||
2819 | smp_rmb(); | ||
2820 | offset = head = atomic_long_read(&data->head); | ||
2821 | head += size; | ||
2822 | if (unlikely(!perf_output_space(data, tail, offset, head))) | ||
2823 | goto fail; | ||
2824 | } while (atomic_long_cmpxchg(&data->head, offset, head) != offset); | ||
2825 | |||
2826 | handle->offset = offset; | ||
2827 | handle->head = head; | ||
2828 | |||
2829 | if (head - tail > data->watermark) | ||
2830 | atomic_set(&data->wakeup, 1); | ||
2831 | |||
2832 | if (have_lost) { | ||
2833 | lost_event.header.type = PERF_RECORD_LOST; | ||
2834 | lost_event.header.misc = 0; | ||
2835 | lost_event.header.size = sizeof(lost_event); | ||
2836 | lost_event.id = event->id; | ||
2837 | lost_event.lost = atomic_xchg(&data->lost, 0); | ||
2838 | |||
2839 | perf_output_put(handle, lost_event); | ||
2840 | } | ||
2841 | |||
2842 | return 0; | ||
2843 | |||
2844 | fail: | ||
2845 | atomic_inc(&data->lost); | ||
2846 | perf_output_unlock(handle); | ||
2847 | out: | ||
2848 | rcu_read_unlock(); | ||
2849 | |||
2850 | return -ENOSPC; | ||
2851 | } | ||
2852 | |||
2853 | void perf_output_end(struct perf_output_handle *handle) | ||
2854 | { | ||
2855 | struct perf_event *event = handle->event; | ||
2856 | struct perf_mmap_data *data = handle->data; | ||
2857 | |||
2858 | int wakeup_events = event->attr.wakeup_events; | ||
2859 | |||
2860 | if (handle->sample && wakeup_events) { | ||
2861 | int events = atomic_inc_return(&data->events); | ||
2862 | if (events >= wakeup_events) { | ||
2863 | atomic_sub(wakeup_events, &data->events); | ||
2864 | atomic_set(&data->wakeup, 1); | ||
2865 | } | ||
2866 | } | ||
2867 | |||
2868 | perf_output_unlock(handle); | ||
2869 | rcu_read_unlock(); | ||
2870 | } | ||
2871 | |||
2872 | static u32 perf_event_pid(struct perf_event *event, struct task_struct *p) | ||
2873 | { | ||
2874 | /* | ||
2875 | * only top level events have the pid namespace they were created in | ||
2876 | */ | ||
2877 | if (event->parent) | ||
2878 | event = event->parent; | ||
2879 | |||
2880 | return task_tgid_nr_ns(p, event->ns); | ||
2881 | } | ||
2882 | |||
2883 | static u32 perf_event_tid(struct perf_event *event, struct task_struct *p) | ||
2884 | { | ||
2885 | /* | ||
2886 | * only top level events have the pid namespace they were created in | ||
2887 | */ | ||
2888 | if (event->parent) | ||
2889 | event = event->parent; | ||
2890 | |||
2891 | return task_pid_nr_ns(p, event->ns); | ||
2892 | } | ||
2893 | |||
2894 | static void perf_output_read_one(struct perf_output_handle *handle, | ||
2895 | struct perf_event *event) | ||
2896 | { | ||
2897 | u64 read_format = event->attr.read_format; | ||
2898 | u64 values[4]; | ||
2899 | int n = 0; | ||
2900 | |||
2901 | values[n++] = atomic64_read(&event->count); | ||
2902 | if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { | ||
2903 | values[n++] = event->total_time_enabled + | ||
2904 | atomic64_read(&event->child_total_time_enabled); | ||
2905 | } | ||
2906 | if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { | ||
2907 | values[n++] = event->total_time_running + | ||
2908 | atomic64_read(&event->child_total_time_running); | ||
2909 | } | ||
2910 | if (read_format & PERF_FORMAT_ID) | ||
2911 | values[n++] = primary_event_id(event); | ||
2912 | |||
2913 | perf_output_copy(handle, values, n * sizeof(u64)); | ||
2914 | } | ||
2915 | |||
2916 | /* | ||
2917 | * XXX PERF_FORMAT_GROUP vs inherited events seems difficult. | ||
2918 | */ | ||
2919 | static void perf_output_read_group(struct perf_output_handle *handle, | ||
2920 | struct perf_event *event) | ||
2921 | { | ||
2922 | struct perf_event *leader = event->group_leader, *sub; | ||
2923 | u64 read_format = event->attr.read_format; | ||
2924 | u64 values[5]; | ||
2925 | int n = 0; | ||
2926 | |||
2927 | values[n++] = 1 + leader->nr_siblings; | ||
2928 | |||
2929 | if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) | ||
2930 | values[n++] = leader->total_time_enabled; | ||
2931 | |||
2932 | if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) | ||
2933 | values[n++] = leader->total_time_running; | ||
2934 | |||
2935 | if (leader != event) | ||
2936 | leader->pmu->read(leader); | ||
2937 | |||
2938 | values[n++] = atomic64_read(&leader->count); | ||
2939 | if (read_format & PERF_FORMAT_ID) | ||
2940 | values[n++] = primary_event_id(leader); | ||
2941 | |||
2942 | perf_output_copy(handle, values, n * sizeof(u64)); | ||
2943 | |||
2944 | list_for_each_entry(sub, &leader->sibling_list, group_entry) { | ||
2945 | n = 0; | ||
2946 | |||
2947 | if (sub != event) | ||
2948 | sub->pmu->read(sub); | ||
2949 | |||
2950 | values[n++] = atomic64_read(&sub->count); | ||
2951 | if (read_format & PERF_FORMAT_ID) | ||
2952 | values[n++] = primary_event_id(sub); | ||
2953 | |||
2954 | perf_output_copy(handle, values, n * sizeof(u64)); | ||
2955 | } | ||
2956 | } | ||
2957 | |||
2958 | static void perf_output_read(struct perf_output_handle *handle, | ||
2959 | struct perf_event *event) | ||
2960 | { | ||
2961 | if (event->attr.read_format & PERF_FORMAT_GROUP) | ||
2962 | perf_output_read_group(handle, event); | ||
2963 | else | ||
2964 | perf_output_read_one(handle, event); | ||
2965 | } | ||
2966 | |||
2967 | void perf_output_sample(struct perf_output_handle *handle, | ||
2968 | struct perf_event_header *header, | ||
2969 | struct perf_sample_data *data, | ||
2970 | struct perf_event *event) | ||
2971 | { | ||
2972 | u64 sample_type = data->type; | ||
2973 | |||
2974 | perf_output_put(handle, *header); | ||
2975 | |||
2976 | if (sample_type & PERF_SAMPLE_IP) | ||
2977 | perf_output_put(handle, data->ip); | ||
2978 | |||
2979 | if (sample_type & PERF_SAMPLE_TID) | ||
2980 | perf_output_put(handle, data->tid_entry); | ||
2981 | |||
2982 | if (sample_type & PERF_SAMPLE_TIME) | ||
2983 | perf_output_put(handle, data->time); | ||
2984 | |||
2985 | if (sample_type & PERF_SAMPLE_ADDR) | ||
2986 | perf_output_put(handle, data->addr); | ||
2987 | |||
2988 | if (sample_type & PERF_SAMPLE_ID) | ||
2989 | perf_output_put(handle, data->id); | ||
2990 | |||
2991 | if (sample_type & PERF_SAMPLE_STREAM_ID) | ||
2992 | perf_output_put(handle, data->stream_id); | ||
2993 | |||
2994 | if (sample_type & PERF_SAMPLE_CPU) | ||
2995 | perf_output_put(handle, data->cpu_entry); | ||
2996 | |||
2997 | if (sample_type & PERF_SAMPLE_PERIOD) | ||
2998 | perf_output_put(handle, data->period); | ||
2999 | |||
3000 | if (sample_type & PERF_SAMPLE_READ) | ||
3001 | perf_output_read(handle, event); | ||
3002 | |||
3003 | if (sample_type & PERF_SAMPLE_CALLCHAIN) { | ||
3004 | if (data->callchain) { | ||
3005 | int size = 1; | ||
3006 | |||
3007 | if (data->callchain) | ||
3008 | size += data->callchain->nr; | ||
3009 | |||
3010 | size *= sizeof(u64); | ||
3011 | |||
3012 | perf_output_copy(handle, data->callchain, size); | ||
3013 | } else { | ||
3014 | u64 nr = 0; | ||
3015 | perf_output_put(handle, nr); | ||
3016 | } | ||
3017 | } | ||
3018 | |||
3019 | if (sample_type & PERF_SAMPLE_RAW) { | ||
3020 | if (data->raw) { | ||
3021 | perf_output_put(handle, data->raw->size); | ||
3022 | perf_output_copy(handle, data->raw->data, | ||
3023 | data->raw->size); | ||
3024 | } else { | ||
3025 | struct { | ||
3026 | u32 size; | ||
3027 | u32 data; | ||
3028 | } raw = { | ||
3029 | .size = sizeof(u32), | ||
3030 | .data = 0, | ||
3031 | }; | ||
3032 | perf_output_put(handle, raw); | ||
3033 | } | ||
3034 | } | ||
3035 | } | ||
3036 | |||
3037 | void perf_prepare_sample(struct perf_event_header *header, | ||
3038 | struct perf_sample_data *data, | ||
3039 | struct perf_event *event, | ||
3040 | struct pt_regs *regs) | ||
3041 | { | ||
3042 | u64 sample_type = event->attr.sample_type; | ||
3043 | |||
3044 | data->type = sample_type; | ||
3045 | |||
3046 | header->type = PERF_RECORD_SAMPLE; | ||
3047 | header->size = sizeof(*header); | ||
3048 | |||
3049 | header->misc = 0; | ||
3050 | header->misc |= perf_misc_flags(regs); | ||
3051 | |||
3052 | if (sample_type & PERF_SAMPLE_IP) { | ||
3053 | data->ip = perf_instruction_pointer(regs); | ||
3054 | |||
3055 | header->size += sizeof(data->ip); | ||
3056 | } | ||
3057 | |||
3058 | if (sample_type & PERF_SAMPLE_TID) { | ||
3059 | /* namespace issues */ | ||
3060 | data->tid_entry.pid = perf_event_pid(event, current); | ||
3061 | data->tid_entry.tid = perf_event_tid(event, current); | ||
3062 | |||
3063 | header->size += sizeof(data->tid_entry); | ||
3064 | } | ||
3065 | |||
3066 | if (sample_type & PERF_SAMPLE_TIME) { | ||
3067 | data->time = perf_clock(); | ||
3068 | |||
3069 | header->size += sizeof(data->time); | ||
3070 | } | ||
3071 | |||
3072 | if (sample_type & PERF_SAMPLE_ADDR) | ||
3073 | header->size += sizeof(data->addr); | ||
3074 | |||
3075 | if (sample_type & PERF_SAMPLE_ID) { | ||
3076 | data->id = primary_event_id(event); | ||
3077 | |||
3078 | header->size += sizeof(data->id); | ||
3079 | } | ||
3080 | |||
3081 | if (sample_type & PERF_SAMPLE_STREAM_ID) { | ||
3082 | data->stream_id = event->id; | ||
3083 | |||
3084 | header->size += sizeof(data->stream_id); | ||
3085 | } | ||
3086 | |||
3087 | if (sample_type & PERF_SAMPLE_CPU) { | ||
3088 | data->cpu_entry.cpu = raw_smp_processor_id(); | ||
3089 | data->cpu_entry.reserved = 0; | ||
3090 | |||
3091 | header->size += sizeof(data->cpu_entry); | ||
3092 | } | ||
3093 | |||
3094 | if (sample_type & PERF_SAMPLE_PERIOD) | ||
3095 | header->size += sizeof(data->period); | ||
3096 | |||
3097 | if (sample_type & PERF_SAMPLE_READ) | ||
3098 | header->size += perf_event_read_size(event); | ||
3099 | |||
3100 | if (sample_type & PERF_SAMPLE_CALLCHAIN) { | ||
3101 | int size = 1; | ||
3102 | |||
3103 | data->callchain = perf_callchain(regs); | ||
3104 | |||
3105 | if (data->callchain) | ||
3106 | size += data->callchain->nr; | ||
3107 | |||
3108 | header->size += size * sizeof(u64); | ||
3109 | } | ||
3110 | |||
3111 | if (sample_type & PERF_SAMPLE_RAW) { | ||
3112 | int size = sizeof(u32); | ||
3113 | |||
3114 | if (data->raw) | ||
3115 | size += data->raw->size; | ||
3116 | else | ||
3117 | size += sizeof(u32); | ||
3118 | |||
3119 | WARN_ON_ONCE(size & (sizeof(u64)-1)); | ||
3120 | header->size += size; | ||
3121 | } | ||
3122 | } | ||
3123 | |||
3124 | static void perf_event_output(struct perf_event *event, int nmi, | ||
3125 | struct perf_sample_data *data, | ||
3126 | struct pt_regs *regs) | ||
3127 | { | ||
3128 | struct perf_output_handle handle; | ||
3129 | struct perf_event_header header; | ||
3130 | |||
3131 | perf_prepare_sample(&header, data, event, regs); | ||
3132 | |||
3133 | if (perf_output_begin(&handle, event, header.size, nmi, 1)) | ||
3134 | return; | ||
3135 | |||
3136 | perf_output_sample(&handle, &header, data, event); | ||
3137 | |||
3138 | perf_output_end(&handle); | ||
3139 | } | ||
3140 | |||
3141 | /* | ||
3142 | * read event_id | ||
3143 | */ | ||
3144 | |||
3145 | struct perf_read_event { | ||
3146 | struct perf_event_header header; | ||
3147 | |||
3148 | u32 pid; | ||
3149 | u32 tid; | ||
3150 | }; | ||
3151 | |||
3152 | static void | ||
3153 | perf_event_read_event(struct perf_event *event, | ||
3154 | struct task_struct *task) | ||
3155 | { | ||
3156 | struct perf_output_handle handle; | ||
3157 | struct perf_read_event read_event = { | ||
3158 | .header = { | ||
3159 | .type = PERF_RECORD_READ, | ||
3160 | .misc = 0, | ||
3161 | .size = sizeof(read_event) + perf_event_read_size(event), | ||
3162 | }, | ||
3163 | .pid = perf_event_pid(event, task), | ||
3164 | .tid = perf_event_tid(event, task), | ||
3165 | }; | ||
3166 | int ret; | ||
3167 | |||
3168 | ret = perf_output_begin(&handle, event, read_event.header.size, 0, 0); | ||
3169 | if (ret) | ||
3170 | return; | ||
3171 | |||
3172 | perf_output_put(&handle, read_event); | ||
3173 | perf_output_read(&handle, event); | ||
3174 | |||
3175 | perf_output_end(&handle); | ||
3176 | } | ||
3177 | |||
3178 | /* | ||
3179 | * task tracking -- fork/exit | ||
3180 | * | ||
3181 | * enabled by: attr.comm | attr.mmap | attr.task | ||
3182 | */ | ||
3183 | |||
3184 | struct perf_task_event { | ||
3185 | struct task_struct *task; | ||
3186 | struct perf_event_context *task_ctx; | ||
3187 | |||
3188 | struct { | ||
3189 | struct perf_event_header header; | ||
3190 | |||
3191 | u32 pid; | ||
3192 | u32 ppid; | ||
3193 | u32 tid; | ||
3194 | u32 ptid; | ||
3195 | u64 time; | ||
3196 | } event_id; | ||
3197 | }; | ||
3198 | |||
3199 | static void perf_event_task_output(struct perf_event *event, | ||
3200 | struct perf_task_event *task_event) | ||
3201 | { | ||
3202 | struct perf_output_handle handle; | ||
3203 | int size; | ||
3204 | struct task_struct *task = task_event->task; | ||
3205 | int ret; | ||
3206 | |||
3207 | size = task_event->event_id.header.size; | ||
3208 | ret = perf_output_begin(&handle, event, size, 0, 0); | ||
3209 | |||
3210 | if (ret) | ||
3211 | return; | ||
3212 | |||
3213 | task_event->event_id.pid = perf_event_pid(event, task); | ||
3214 | task_event->event_id.ppid = perf_event_pid(event, current); | ||
3215 | |||
3216 | task_event->event_id.tid = perf_event_tid(event, task); | ||
3217 | task_event->event_id.ptid = perf_event_tid(event, current); | ||
3218 | |||
3219 | task_event->event_id.time = perf_clock(); | ||
3220 | |||
3221 | perf_output_put(&handle, task_event->event_id); | ||
3222 | |||
3223 | perf_output_end(&handle); | ||
3224 | } | ||
3225 | |||
3226 | static int perf_event_task_match(struct perf_event *event) | ||
3227 | { | ||
3228 | if (event->attr.comm || event->attr.mmap || event->attr.task) | ||
3229 | return 1; | ||
3230 | |||
3231 | return 0; | ||
3232 | } | ||
3233 | |||
3234 | static void perf_event_task_ctx(struct perf_event_context *ctx, | ||
3235 | struct perf_task_event *task_event) | ||
3236 | { | ||
3237 | struct perf_event *event; | ||
3238 | |||
3239 | if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list)) | ||
3240 | return; | ||
3241 | |||
3242 | rcu_read_lock(); | ||
3243 | list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { | ||
3244 | if (perf_event_task_match(event)) | ||
3245 | perf_event_task_output(event, task_event); | ||
3246 | } | ||
3247 | rcu_read_unlock(); | ||
3248 | } | ||
3249 | |||
3250 | static void perf_event_task_event(struct perf_task_event *task_event) | ||
3251 | { | ||
3252 | struct perf_cpu_context *cpuctx; | ||
3253 | struct perf_event_context *ctx = task_event->task_ctx; | ||
3254 | |||
3255 | cpuctx = &get_cpu_var(perf_cpu_context); | ||
3256 | perf_event_task_ctx(&cpuctx->ctx, task_event); | ||
3257 | put_cpu_var(perf_cpu_context); | ||
3258 | |||
3259 | rcu_read_lock(); | ||
3260 | if (!ctx) | ||
3261 | ctx = rcu_dereference(task_event->task->perf_event_ctxp); | ||
3262 | if (ctx) | ||
3263 | perf_event_task_ctx(ctx, task_event); | ||
3264 | rcu_read_unlock(); | ||
3265 | } | ||
3266 | |||
3267 | static void perf_event_task(struct task_struct *task, | ||
3268 | struct perf_event_context *task_ctx, | ||
3269 | int new) | ||
3270 | { | ||
3271 | struct perf_task_event task_event; | ||
3272 | |||
3273 | if (!atomic_read(&nr_comm_events) && | ||
3274 | !atomic_read(&nr_mmap_events) && | ||
3275 | !atomic_read(&nr_task_events)) | ||
3276 | return; | ||
3277 | |||
3278 | task_event = (struct perf_task_event){ | ||
3279 | .task = task, | ||
3280 | .task_ctx = task_ctx, | ||
3281 | .event_id = { | ||
3282 | .header = { | ||
3283 | .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT, | ||
3284 | .misc = 0, | ||
3285 | .size = sizeof(task_event.event_id), | ||
3286 | }, | ||
3287 | /* .pid */ | ||
3288 | /* .ppid */ | ||
3289 | /* .tid */ | ||
3290 | /* .ptid */ | ||
3291 | }, | ||
3292 | }; | ||
3293 | |||
3294 | perf_event_task_event(&task_event); | ||
3295 | } | ||
3296 | |||
3297 | void perf_event_fork(struct task_struct *task) | ||
3298 | { | ||
3299 | perf_event_task(task, NULL, 1); | ||
3300 | } | ||
3301 | |||
3302 | /* | ||
3303 | * comm tracking | ||
3304 | */ | ||
3305 | |||
3306 | struct perf_comm_event { | ||
3307 | struct task_struct *task; | ||
3308 | char *comm; | ||
3309 | int comm_size; | ||
3310 | |||
3311 | struct { | ||
3312 | struct perf_event_header header; | ||
3313 | |||
3314 | u32 pid; | ||
3315 | u32 tid; | ||
3316 | } event_id; | ||
3317 | }; | ||
3318 | |||
3319 | static void perf_event_comm_output(struct perf_event *event, | ||
3320 | struct perf_comm_event *comm_event) | ||
3321 | { | ||
3322 | struct perf_output_handle handle; | ||
3323 | int size = comm_event->event_id.header.size; | ||
3324 | int ret = perf_output_begin(&handle, event, size, 0, 0); | ||
3325 | |||
3326 | if (ret) | ||
3327 | return; | ||
3328 | |||
3329 | comm_event->event_id.pid = perf_event_pid(event, comm_event->task); | ||
3330 | comm_event->event_id.tid = perf_event_tid(event, comm_event->task); | ||
3331 | |||
3332 | perf_output_put(&handle, comm_event->event_id); | ||
3333 | perf_output_copy(&handle, comm_event->comm, | ||
3334 | comm_event->comm_size); | ||
3335 | perf_output_end(&handle); | ||
3336 | } | ||
3337 | |||
3338 | static int perf_event_comm_match(struct perf_event *event) | ||
3339 | { | ||
3340 | if (event->attr.comm) | ||
3341 | return 1; | ||
3342 | |||
3343 | return 0; | ||
3344 | } | ||
3345 | |||
3346 | static void perf_event_comm_ctx(struct perf_event_context *ctx, | ||
3347 | struct perf_comm_event *comm_event) | ||
3348 | { | ||
3349 | struct perf_event *event; | ||
3350 | |||
3351 | if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list)) | ||
3352 | return; | ||
3353 | |||
3354 | rcu_read_lock(); | ||
3355 | list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { | ||
3356 | if (perf_event_comm_match(event)) | ||
3357 | perf_event_comm_output(event, comm_event); | ||
3358 | } | ||
3359 | rcu_read_unlock(); | ||
3360 | } | ||
3361 | |||
3362 | static void perf_event_comm_event(struct perf_comm_event *comm_event) | ||
3363 | { | ||
3364 | struct perf_cpu_context *cpuctx; | ||
3365 | struct perf_event_context *ctx; | ||
3366 | unsigned int size; | ||
3367 | char comm[TASK_COMM_LEN]; | ||
3368 | |||
3369 | memset(comm, 0, sizeof(comm)); | ||
3370 | strncpy(comm, comm_event->task->comm, sizeof(comm)); | ||
3371 | size = ALIGN(strlen(comm)+1, sizeof(u64)); | ||
3372 | |||
3373 | comm_event->comm = comm; | ||
3374 | comm_event->comm_size = size; | ||
3375 | |||
3376 | comm_event->event_id.header.size = sizeof(comm_event->event_id) + size; | ||
3377 | |||
3378 | cpuctx = &get_cpu_var(perf_cpu_context); | ||
3379 | perf_event_comm_ctx(&cpuctx->ctx, comm_event); | ||
3380 | put_cpu_var(perf_cpu_context); | ||
3381 | |||
3382 | rcu_read_lock(); | ||
3383 | /* | ||
3384 | * doesn't really matter which of the child contexts the | ||
3385 | * events ends up in. | ||
3386 | */ | ||
3387 | ctx = rcu_dereference(current->perf_event_ctxp); | ||
3388 | if (ctx) | ||
3389 | perf_event_comm_ctx(ctx, comm_event); | ||
3390 | rcu_read_unlock(); | ||
3391 | } | ||
3392 | |||
3393 | void perf_event_comm(struct task_struct *task) | ||
3394 | { | ||
3395 | struct perf_comm_event comm_event; | ||
3396 | |||
3397 | if (task->perf_event_ctxp) | ||
3398 | perf_event_enable_on_exec(task); | ||
3399 | |||
3400 | if (!atomic_read(&nr_comm_events)) | ||
3401 | return; | ||
3402 | |||
3403 | comm_event = (struct perf_comm_event){ | ||
3404 | .task = task, | ||
3405 | /* .comm */ | ||
3406 | /* .comm_size */ | ||
3407 | .event_id = { | ||
3408 | .header = { | ||
3409 | .type = PERF_RECORD_COMM, | ||
3410 | .misc = 0, | ||
3411 | /* .size */ | ||
3412 | }, | ||
3413 | /* .pid */ | ||
3414 | /* .tid */ | ||
3415 | }, | ||
3416 | }; | ||
3417 | |||
3418 | perf_event_comm_event(&comm_event); | ||
3419 | } | ||
3420 | |||
3421 | /* | ||
3422 | * mmap tracking | ||
3423 | */ | ||
3424 | |||
3425 | struct perf_mmap_event { | ||
3426 | struct vm_area_struct *vma; | ||
3427 | |||
3428 | const char *file_name; | ||
3429 | int file_size; | ||
3430 | |||
3431 | struct { | ||
3432 | struct perf_event_header header; | ||
3433 | |||
3434 | u32 pid; | ||
3435 | u32 tid; | ||
3436 | u64 start; | ||
3437 | u64 len; | ||
3438 | u64 pgoff; | ||
3439 | } event_id; | ||
3440 | }; | ||
3441 | |||
3442 | static void perf_event_mmap_output(struct perf_event *event, | ||
3443 | struct perf_mmap_event *mmap_event) | ||
3444 | { | ||
3445 | struct perf_output_handle handle; | ||
3446 | int size = mmap_event->event_id.header.size; | ||
3447 | int ret = perf_output_begin(&handle, event, size, 0, 0); | ||
3448 | |||
3449 | if (ret) | ||
3450 | return; | ||
3451 | |||
3452 | mmap_event->event_id.pid = perf_event_pid(event, current); | ||
3453 | mmap_event->event_id.tid = perf_event_tid(event, current); | ||
3454 | |||
3455 | perf_output_put(&handle, mmap_event->event_id); | ||
3456 | perf_output_copy(&handle, mmap_event->file_name, | ||
3457 | mmap_event->file_size); | ||
3458 | perf_output_end(&handle); | ||
3459 | } | ||
3460 | |||
3461 | static int perf_event_mmap_match(struct perf_event *event, | ||
3462 | struct perf_mmap_event *mmap_event) | ||
3463 | { | ||
3464 | if (event->attr.mmap) | ||
3465 | return 1; | ||
3466 | |||
3467 | return 0; | ||
3468 | } | ||
3469 | |||
3470 | static void perf_event_mmap_ctx(struct perf_event_context *ctx, | ||
3471 | struct perf_mmap_event *mmap_event) | ||
3472 | { | ||
3473 | struct perf_event *event; | ||
3474 | |||
3475 | if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list)) | ||
3476 | return; | ||
3477 | |||
3478 | rcu_read_lock(); | ||
3479 | list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { | ||
3480 | if (perf_event_mmap_match(event, mmap_event)) | ||
3481 | perf_event_mmap_output(event, mmap_event); | ||
3482 | } | ||
3483 | rcu_read_unlock(); | ||
3484 | } | ||
3485 | |||
3486 | static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) | ||
3487 | { | ||
3488 | struct perf_cpu_context *cpuctx; | ||
3489 | struct perf_event_context *ctx; | ||
3490 | struct vm_area_struct *vma = mmap_event->vma; | ||
3491 | struct file *file = vma->vm_file; | ||
3492 | unsigned int size; | ||
3493 | char tmp[16]; | ||
3494 | char *buf = NULL; | ||
3495 | const char *name; | ||
3496 | |||
3497 | memset(tmp, 0, sizeof(tmp)); | ||
3498 | |||
3499 | if (file) { | ||
3500 | /* | ||
3501 | * d_path works from the end of the buffer backwards, so we | ||
3502 | * need to add enough zero bytes after the string to handle | ||
3503 | * the 64bit alignment we do later. | ||
3504 | */ | ||
3505 | buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL); | ||
3506 | if (!buf) { | ||
3507 | name = strncpy(tmp, "//enomem", sizeof(tmp)); | ||
3508 | goto got_name; | ||
3509 | } | ||
3510 | name = d_path(&file->f_path, buf, PATH_MAX); | ||
3511 | if (IS_ERR(name)) { | ||
3512 | name = strncpy(tmp, "//toolong", sizeof(tmp)); | ||
3513 | goto got_name; | ||
3514 | } | ||
3515 | } else { | ||
3516 | if (arch_vma_name(mmap_event->vma)) { | ||
3517 | name = strncpy(tmp, arch_vma_name(mmap_event->vma), | ||
3518 | sizeof(tmp)); | ||
3519 | goto got_name; | ||
3520 | } | ||
3521 | |||
3522 | if (!vma->vm_mm) { | ||
3523 | name = strncpy(tmp, "[vdso]", sizeof(tmp)); | ||
3524 | goto got_name; | ||
3525 | } | ||
3526 | |||
3527 | name = strncpy(tmp, "//anon", sizeof(tmp)); | ||
3528 | goto got_name; | ||
3529 | } | ||
3530 | |||
3531 | got_name: | ||
3532 | size = ALIGN(strlen(name)+1, sizeof(u64)); | ||
3533 | |||
3534 | mmap_event->file_name = name; | ||
3535 | mmap_event->file_size = size; | ||
3536 | |||
3537 | mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size; | ||
3538 | |||
3539 | cpuctx = &get_cpu_var(perf_cpu_context); | ||
3540 | perf_event_mmap_ctx(&cpuctx->ctx, mmap_event); | ||
3541 | put_cpu_var(perf_cpu_context); | ||
3542 | |||
3543 | rcu_read_lock(); | ||
3544 | /* | ||
3545 | * doesn't really matter which of the child contexts the | ||
3546 | * events ends up in. | ||
3547 | */ | ||
3548 | ctx = rcu_dereference(current->perf_event_ctxp); | ||
3549 | if (ctx) | ||
3550 | perf_event_mmap_ctx(ctx, mmap_event); | ||
3551 | rcu_read_unlock(); | ||
3552 | |||
3553 | kfree(buf); | ||
3554 | } | ||
3555 | |||
3556 | void __perf_event_mmap(struct vm_area_struct *vma) | ||
3557 | { | ||
3558 | struct perf_mmap_event mmap_event; | ||
3559 | |||
3560 | if (!atomic_read(&nr_mmap_events)) | ||
3561 | return; | ||
3562 | |||
3563 | mmap_event = (struct perf_mmap_event){ | ||
3564 | .vma = vma, | ||
3565 | /* .file_name */ | ||
3566 | /* .file_size */ | ||
3567 | .event_id = { | ||
3568 | .header = { | ||
3569 | .type = PERF_RECORD_MMAP, | ||
3570 | .misc = 0, | ||
3571 | /* .size */ | ||
3572 | }, | ||
3573 | /* .pid */ | ||
3574 | /* .tid */ | ||
3575 | .start = vma->vm_start, | ||
3576 | .len = vma->vm_end - vma->vm_start, | ||
3577 | .pgoff = vma->vm_pgoff, | ||
3578 | }, | ||
3579 | }; | ||
3580 | |||
3581 | perf_event_mmap_event(&mmap_event); | ||
3582 | } | ||
3583 | |||
3584 | /* | ||
3585 | * IRQ throttle logging | ||
3586 | */ | ||
3587 | |||
3588 | static void perf_log_throttle(struct perf_event *event, int enable) | ||
3589 | { | ||
3590 | struct perf_output_handle handle; | ||
3591 | int ret; | ||
3592 | |||
3593 | struct { | ||
3594 | struct perf_event_header header; | ||
3595 | u64 time; | ||
3596 | u64 id; | ||
3597 | u64 stream_id; | ||
3598 | } throttle_event = { | ||
3599 | .header = { | ||
3600 | .type = PERF_RECORD_THROTTLE, | ||
3601 | .misc = 0, | ||
3602 | .size = sizeof(throttle_event), | ||
3603 | }, | ||
3604 | .time = perf_clock(), | ||
3605 | .id = primary_event_id(event), | ||
3606 | .stream_id = event->id, | ||
3607 | }; | ||
3608 | |||
3609 | if (enable) | ||
3610 | throttle_event.header.type = PERF_RECORD_UNTHROTTLE; | ||
3611 | |||
3612 | ret = perf_output_begin(&handle, event, sizeof(throttle_event), 1, 0); | ||
3613 | if (ret) | ||
3614 | return; | ||
3615 | |||
3616 | perf_output_put(&handle, throttle_event); | ||
3617 | perf_output_end(&handle); | ||
3618 | } | ||
3619 | |||
3620 | /* | ||
3621 | * Generic event overflow handling, sampling. | ||
3622 | */ | ||
3623 | |||
3624 | static int __perf_event_overflow(struct perf_event *event, int nmi, | ||
3625 | int throttle, struct perf_sample_data *data, | ||
3626 | struct pt_regs *regs) | ||
3627 | { | ||
3628 | int events = atomic_read(&event->event_limit); | ||
3629 | struct hw_perf_event *hwc = &event->hw; | ||
3630 | int ret = 0; | ||
3631 | |||
3632 | throttle = (throttle && event->pmu->unthrottle != NULL); | ||
3633 | |||
3634 | if (!throttle) { | ||
3635 | hwc->interrupts++; | ||
3636 | } else { | ||
3637 | if (hwc->interrupts != MAX_INTERRUPTS) { | ||
3638 | hwc->interrupts++; | ||
3639 | if (HZ * hwc->interrupts > | ||
3640 | (u64)sysctl_perf_event_sample_rate) { | ||
3641 | hwc->interrupts = MAX_INTERRUPTS; | ||
3642 | perf_log_throttle(event, 0); | ||
3643 | ret = 1; | ||
3644 | } | ||
3645 | } else { | ||
3646 | /* | ||
3647 | * Keep re-disabling events even though on the previous | ||
3648 | * pass we disabled it - just in case we raced with a | ||
3649 | * sched-in and the event got enabled again: | ||
3650 | */ | ||
3651 | ret = 1; | ||
3652 | } | ||
3653 | } | ||
3654 | |||
3655 | if (event->attr.freq) { | ||
3656 | u64 now = perf_clock(); | ||
3657 | s64 delta = now - hwc->freq_stamp; | ||
3658 | |||
3659 | hwc->freq_stamp = now; | ||
3660 | |||
3661 | if (delta > 0 && delta < TICK_NSEC) | ||
3662 | perf_adjust_period(event, NSEC_PER_SEC / (int)delta); | ||
3663 | } | ||
3664 | |||
3665 | /* | ||
3666 | * XXX event_limit might not quite work as expected on inherited | ||
3667 | * events | ||
3668 | */ | ||
3669 | |||
3670 | event->pending_kill = POLL_IN; | ||
3671 | if (events && atomic_dec_and_test(&event->event_limit)) { | ||
3672 | ret = 1; | ||
3673 | event->pending_kill = POLL_HUP; | ||
3674 | if (nmi) { | ||
3675 | event->pending_disable = 1; | ||
3676 | perf_pending_queue(&event->pending, | ||
3677 | perf_pending_event); | ||
3678 | } else | ||
3679 | perf_event_disable(event); | ||
3680 | } | ||
3681 | |||
3682 | perf_event_output(event, nmi, data, regs); | ||
3683 | return ret; | ||
3684 | } | ||
3685 | |||
3686 | int perf_event_overflow(struct perf_event *event, int nmi, | ||
3687 | struct perf_sample_data *data, | ||
3688 | struct pt_regs *regs) | ||
3689 | { | ||
3690 | return __perf_event_overflow(event, nmi, 1, data, regs); | ||
3691 | } | ||
3692 | |||
3693 | /* | ||
3694 | * Generic software event infrastructure | ||
3695 | */ | ||
3696 | |||
3697 | /* | ||
3698 | * We directly increment event->count and keep a second value in | ||
3699 | * event->hw.period_left to count intervals. This period event | ||
3700 | * is kept in the range [-sample_period, 0] so that we can use the | ||
3701 | * sign as trigger. | ||
3702 | */ | ||
3703 | |||
3704 | static u64 perf_swevent_set_period(struct perf_event *event) | ||
3705 | { | ||
3706 | struct hw_perf_event *hwc = &event->hw; | ||
3707 | u64 period = hwc->last_period; | ||
3708 | u64 nr, offset; | ||
3709 | s64 old, val; | ||
3710 | |||
3711 | hwc->last_period = hwc->sample_period; | ||
3712 | |||
3713 | again: | ||
3714 | old = val = atomic64_read(&hwc->period_left); | ||
3715 | if (val < 0) | ||
3716 | return 0; | ||
3717 | |||
3718 | nr = div64_u64(period + val, period); | ||
3719 | offset = nr * period; | ||
3720 | val -= offset; | ||
3721 | if (atomic64_cmpxchg(&hwc->period_left, old, val) != old) | ||
3722 | goto again; | ||
3723 | |||
3724 | return nr; | ||
3725 | } | ||
3726 | |||
3727 | static void perf_swevent_overflow(struct perf_event *event, | ||
3728 | int nmi, struct perf_sample_data *data, | ||
3729 | struct pt_regs *regs) | ||
3730 | { | ||
3731 | struct hw_perf_event *hwc = &event->hw; | ||
3732 | int throttle = 0; | ||
3733 | u64 overflow; | ||
3734 | |||
3735 | data->period = event->hw.last_period; | ||
3736 | overflow = perf_swevent_set_period(event); | ||
3737 | |||
3738 | if (hwc->interrupts == MAX_INTERRUPTS) | ||
3739 | return; | ||
3740 | |||
3741 | for (; overflow; overflow--) { | ||
3742 | if (__perf_event_overflow(event, nmi, throttle, | ||
3743 | data, regs)) { | ||
3744 | /* | ||
3745 | * We inhibit the overflow from happening when | ||
3746 | * hwc->interrupts == MAX_INTERRUPTS. | ||
3747 | */ | ||
3748 | break; | ||
3749 | } | ||
3750 | throttle = 1; | ||
3751 | } | ||
3752 | } | ||
3753 | |||
3754 | static void perf_swevent_unthrottle(struct perf_event *event) | ||
3755 | { | ||
3756 | /* | ||
3757 | * Nothing to do, we already reset hwc->interrupts. | ||
3758 | */ | ||
3759 | } | ||
3760 | |||
3761 | static void perf_swevent_add(struct perf_event *event, u64 nr, | ||
3762 | int nmi, struct perf_sample_data *data, | ||
3763 | struct pt_regs *regs) | ||
3764 | { | ||
3765 | struct hw_perf_event *hwc = &event->hw; | ||
3766 | |||
3767 | atomic64_add(nr, &event->count); | ||
3768 | |||
3769 | if (!hwc->sample_period) | ||
3770 | return; | ||
3771 | |||
3772 | if (!regs) | ||
3773 | return; | ||
3774 | |||
3775 | if (!atomic64_add_negative(nr, &hwc->period_left)) | ||
3776 | perf_swevent_overflow(event, nmi, data, regs); | ||
3777 | } | ||
3778 | |||
3779 | static int perf_swevent_is_counting(struct perf_event *event) | ||
3780 | { | ||
3781 | /* | ||
3782 | * The event is active, we're good! | ||
3783 | */ | ||
3784 | if (event->state == PERF_EVENT_STATE_ACTIVE) | ||
3785 | return 1; | ||
3786 | |||
3787 | /* | ||
3788 | * The event is off/error, not counting. | ||
3789 | */ | ||
3790 | if (event->state != PERF_EVENT_STATE_INACTIVE) | ||
3791 | return 0; | ||
3792 | |||
3793 | /* | ||
3794 | * The event is inactive, if the context is active | ||
3795 | * we're part of a group that didn't make it on the 'pmu', | ||
3796 | * not counting. | ||
3797 | */ | ||
3798 | if (event->ctx->is_active) | ||
3799 | return 0; | ||
3800 | |||
3801 | /* | ||
3802 | * We're inactive and the context is too, this means the | ||
3803 | * task is scheduled out, we're counting events that happen | ||
3804 | * to us, like migration events. | ||
3805 | */ | ||
3806 | return 1; | ||
3807 | } | ||
3808 | |||
3809 | static int perf_swevent_match(struct perf_event *event, | ||
3810 | enum perf_type_id type, | ||
3811 | u32 event_id, struct pt_regs *regs) | ||
3812 | { | ||
3813 | if (!perf_swevent_is_counting(event)) | ||
3814 | return 0; | ||
3815 | |||
3816 | if (event->attr.type != type) | ||
3817 | return 0; | ||
3818 | if (event->attr.config != event_id) | ||
3819 | return 0; | ||
3820 | |||
3821 | if (regs) { | ||
3822 | if (event->attr.exclude_user && user_mode(regs)) | ||
3823 | return 0; | ||
3824 | |||
3825 | if (event->attr.exclude_kernel && !user_mode(regs)) | ||
3826 | return 0; | ||
3827 | } | ||
3828 | |||
3829 | return 1; | ||
3830 | } | ||
3831 | |||
3832 | static void perf_swevent_ctx_event(struct perf_event_context *ctx, | ||
3833 | enum perf_type_id type, | ||
3834 | u32 event_id, u64 nr, int nmi, | ||
3835 | struct perf_sample_data *data, | ||
3836 | struct pt_regs *regs) | ||
3837 | { | ||
3838 | struct perf_event *event; | ||
3839 | |||
3840 | if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list)) | ||
3841 | return; | ||
3842 | |||
3843 | rcu_read_lock(); | ||
3844 | list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { | ||
3845 | if (perf_swevent_match(event, type, event_id, regs)) | ||
3846 | perf_swevent_add(event, nr, nmi, data, regs); | ||
3847 | } | ||
3848 | rcu_read_unlock(); | ||
3849 | } | ||
3850 | |||
3851 | static int *perf_swevent_recursion_context(struct perf_cpu_context *cpuctx) | ||
3852 | { | ||
3853 | if (in_nmi()) | ||
3854 | return &cpuctx->recursion[3]; | ||
3855 | |||
3856 | if (in_irq()) | ||
3857 | return &cpuctx->recursion[2]; | ||
3858 | |||
3859 | if (in_softirq()) | ||
3860 | return &cpuctx->recursion[1]; | ||
3861 | |||
3862 | return &cpuctx->recursion[0]; | ||
3863 | } | ||
3864 | |||
3865 | static void do_perf_sw_event(enum perf_type_id type, u32 event_id, | ||
3866 | u64 nr, int nmi, | ||
3867 | struct perf_sample_data *data, | ||
3868 | struct pt_regs *regs) | ||
3869 | { | ||
3870 | struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context); | ||
3871 | int *recursion = perf_swevent_recursion_context(cpuctx); | ||
3872 | struct perf_event_context *ctx; | ||
3873 | |||
3874 | if (*recursion) | ||
3875 | goto out; | ||
3876 | |||
3877 | (*recursion)++; | ||
3878 | barrier(); | ||
3879 | |||
3880 | perf_swevent_ctx_event(&cpuctx->ctx, type, event_id, | ||
3881 | nr, nmi, data, regs); | ||
3882 | rcu_read_lock(); | ||
3883 | /* | ||
3884 | * doesn't really matter which of the child contexts the | ||
3885 | * events ends up in. | ||
3886 | */ | ||
3887 | ctx = rcu_dereference(current->perf_event_ctxp); | ||
3888 | if (ctx) | ||
3889 | perf_swevent_ctx_event(ctx, type, event_id, nr, nmi, data, regs); | ||
3890 | rcu_read_unlock(); | ||
3891 | |||
3892 | barrier(); | ||
3893 | (*recursion)--; | ||
3894 | |||
3895 | out: | ||
3896 | put_cpu_var(perf_cpu_context); | ||
3897 | } | ||
3898 | |||
3899 | void __perf_sw_event(u32 event_id, u64 nr, int nmi, | ||
3900 | struct pt_regs *regs, u64 addr) | ||
3901 | { | ||
3902 | struct perf_sample_data data = { | ||
3903 | .addr = addr, | ||
3904 | }; | ||
3905 | |||
3906 | do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, nmi, | ||
3907 | &data, regs); | ||
3908 | } | ||
3909 | |||
3910 | static void perf_swevent_read(struct perf_event *event) | ||
3911 | { | ||
3912 | } | ||
3913 | |||
3914 | static int perf_swevent_enable(struct perf_event *event) | ||
3915 | { | ||
3916 | struct hw_perf_event *hwc = &event->hw; | ||
3917 | |||
3918 | if (hwc->sample_period) { | ||
3919 | hwc->last_period = hwc->sample_period; | ||
3920 | perf_swevent_set_period(event); | ||
3921 | } | ||
3922 | return 0; | ||
3923 | } | ||
3924 | |||
3925 | static void perf_swevent_disable(struct perf_event *event) | ||
3926 | { | ||
3927 | } | ||
3928 | |||
3929 | static const struct pmu perf_ops_generic = { | ||
3930 | .enable = perf_swevent_enable, | ||
3931 | .disable = perf_swevent_disable, | ||
3932 | .read = perf_swevent_read, | ||
3933 | .unthrottle = perf_swevent_unthrottle, | ||
3934 | }; | ||
3935 | |||
3936 | /* | ||
3937 | * hrtimer based swevent callback | ||
3938 | */ | ||
3939 | |||
3940 | static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer) | ||
3941 | { | ||
3942 | enum hrtimer_restart ret = HRTIMER_RESTART; | ||
3943 | struct perf_sample_data data; | ||
3944 | struct pt_regs *regs; | ||
3945 | struct perf_event *event; | ||
3946 | u64 period; | ||
3947 | |||
3948 | event = container_of(hrtimer, struct perf_event, hw.hrtimer); | ||
3949 | event->pmu->read(event); | ||
3950 | |||
3951 | data.addr = 0; | ||
3952 | regs = get_irq_regs(); | ||
3953 | /* | ||
3954 | * In case we exclude kernel IPs or are somehow not in interrupt | ||
3955 | * context, provide the next best thing, the user IP. | ||
3956 | */ | ||
3957 | if ((event->attr.exclude_kernel || !regs) && | ||
3958 | !event->attr.exclude_user) | ||
3959 | regs = task_pt_regs(current); | ||
3960 | |||
3961 | if (regs) { | ||
3962 | if (!(event->attr.exclude_idle && current->pid == 0)) | ||
3963 | if (perf_event_overflow(event, 0, &data, regs)) | ||
3964 | ret = HRTIMER_NORESTART; | ||
3965 | } | ||
3966 | |||
3967 | period = max_t(u64, 10000, event->hw.sample_period); | ||
3968 | hrtimer_forward_now(hrtimer, ns_to_ktime(period)); | ||
3969 | |||
3970 | return ret; | ||
3971 | } | ||
3972 | |||
3973 | static void perf_swevent_start_hrtimer(struct perf_event *event) | ||
3974 | { | ||
3975 | struct hw_perf_event *hwc = &event->hw; | ||
3976 | |||
3977 | hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | ||
3978 | hwc->hrtimer.function = perf_swevent_hrtimer; | ||
3979 | if (hwc->sample_period) { | ||
3980 | u64 period; | ||
3981 | |||
3982 | if (hwc->remaining) { | ||
3983 | if (hwc->remaining < 0) | ||
3984 | period = 10000; | ||
3985 | else | ||
3986 | period = hwc->remaining; | ||
3987 | hwc->remaining = 0; | ||
3988 | } else { | ||
3989 | period = max_t(u64, 10000, hwc->sample_period); | ||
3990 | } | ||
3991 | __hrtimer_start_range_ns(&hwc->hrtimer, | ||
3992 | ns_to_ktime(period), 0, | ||
3993 | HRTIMER_MODE_REL, 0); | ||
3994 | } | ||
3995 | } | ||
3996 | |||
3997 | static void perf_swevent_cancel_hrtimer(struct perf_event *event) | ||
3998 | { | ||
3999 | struct hw_perf_event *hwc = &event->hw; | ||
4000 | |||
4001 | if (hwc->sample_period) { | ||
4002 | ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer); | ||
4003 | hwc->remaining = ktime_to_ns(remaining); | ||
4004 | |||
4005 | hrtimer_cancel(&hwc->hrtimer); | ||
4006 | } | ||
4007 | } | ||
4008 | |||
4009 | /* | ||
4010 | * Software event: cpu wall time clock | ||
4011 | */ | ||
4012 | |||
4013 | static void cpu_clock_perf_event_update(struct perf_event *event) | ||
4014 | { | ||
4015 | int cpu = raw_smp_processor_id(); | ||
4016 | s64 prev; | ||
4017 | u64 now; | ||
4018 | |||
4019 | now = cpu_clock(cpu); | ||
4020 | prev = atomic64_read(&event->hw.prev_count); | ||
4021 | atomic64_set(&event->hw.prev_count, now); | ||
4022 | atomic64_add(now - prev, &event->count); | ||
4023 | } | ||
4024 | |||
4025 | static int cpu_clock_perf_event_enable(struct perf_event *event) | ||
4026 | { | ||
4027 | struct hw_perf_event *hwc = &event->hw; | ||
4028 | int cpu = raw_smp_processor_id(); | ||
4029 | |||
4030 | atomic64_set(&hwc->prev_count, cpu_clock(cpu)); | ||
4031 | perf_swevent_start_hrtimer(event); | ||
4032 | |||
4033 | return 0; | ||
4034 | } | ||
4035 | |||
4036 | static void cpu_clock_perf_event_disable(struct perf_event *event) | ||
4037 | { | ||
4038 | perf_swevent_cancel_hrtimer(event); | ||
4039 | cpu_clock_perf_event_update(event); | ||
4040 | } | ||
4041 | |||
4042 | static void cpu_clock_perf_event_read(struct perf_event *event) | ||
4043 | { | ||
4044 | cpu_clock_perf_event_update(event); | ||
4045 | } | ||
4046 | |||
4047 | static const struct pmu perf_ops_cpu_clock = { | ||
4048 | .enable = cpu_clock_perf_event_enable, | ||
4049 | .disable = cpu_clock_perf_event_disable, | ||
4050 | .read = cpu_clock_perf_event_read, | ||
4051 | }; | ||
4052 | |||
4053 | /* | ||
4054 | * Software event: task time clock | ||
4055 | */ | ||
4056 | |||
4057 | static void task_clock_perf_event_update(struct perf_event *event, u64 now) | ||
4058 | { | ||
4059 | u64 prev; | ||
4060 | s64 delta; | ||
4061 | |||
4062 | prev = atomic64_xchg(&event->hw.prev_count, now); | ||
4063 | delta = now - prev; | ||
4064 | atomic64_add(delta, &event->count); | ||
4065 | } | ||
4066 | |||
4067 | static int task_clock_perf_event_enable(struct perf_event *event) | ||
4068 | { | ||
4069 | struct hw_perf_event *hwc = &event->hw; | ||
4070 | u64 now; | ||
4071 | |||
4072 | now = event->ctx->time; | ||
4073 | |||
4074 | atomic64_set(&hwc->prev_count, now); | ||
4075 | |||
4076 | perf_swevent_start_hrtimer(event); | ||
4077 | |||
4078 | return 0; | ||
4079 | } | ||
4080 | |||
4081 | static void task_clock_perf_event_disable(struct perf_event *event) | ||
4082 | { | ||
4083 | perf_swevent_cancel_hrtimer(event); | ||
4084 | task_clock_perf_event_update(event, event->ctx->time); | ||
4085 | |||
4086 | } | ||
4087 | |||
4088 | static void task_clock_perf_event_read(struct perf_event *event) | ||
4089 | { | ||
4090 | u64 time; | ||
4091 | |||
4092 | if (!in_nmi()) { | ||
4093 | update_context_time(event->ctx); | ||
4094 | time = event->ctx->time; | ||
4095 | } else { | ||
4096 | u64 now = perf_clock(); | ||
4097 | u64 delta = now - event->ctx->timestamp; | ||
4098 | time = event->ctx->time + delta; | ||
4099 | } | ||
4100 | |||
4101 | task_clock_perf_event_update(event, time); | ||
4102 | } | ||
4103 | |||
4104 | static const struct pmu perf_ops_task_clock = { | ||
4105 | .enable = task_clock_perf_event_enable, | ||
4106 | .disable = task_clock_perf_event_disable, | ||
4107 | .read = task_clock_perf_event_read, | ||
4108 | }; | ||
4109 | |||
4110 | #ifdef CONFIG_EVENT_PROFILE | ||
4111 | void perf_tp_event(int event_id, u64 addr, u64 count, void *record, | ||
4112 | int entry_size) | ||
4113 | { | ||
4114 | struct perf_raw_record raw = { | ||
4115 | .size = entry_size, | ||
4116 | .data = record, | ||
4117 | }; | ||
4118 | |||
4119 | struct perf_sample_data data = { | ||
4120 | .addr = addr, | ||
4121 | .raw = &raw, | ||
4122 | }; | ||
4123 | |||
4124 | struct pt_regs *regs = get_irq_regs(); | ||
4125 | |||
4126 | if (!regs) | ||
4127 | regs = task_pt_regs(current); | ||
4128 | |||
4129 | do_perf_sw_event(PERF_TYPE_TRACEPOINT, event_id, count, 1, | ||
4130 | &data, regs); | ||
4131 | } | ||
4132 | EXPORT_SYMBOL_GPL(perf_tp_event); | ||
4133 | |||
4134 | extern int ftrace_profile_enable(int); | ||
4135 | extern void ftrace_profile_disable(int); | ||
4136 | |||
4137 | static void tp_perf_event_destroy(struct perf_event *event) | ||
4138 | { | ||
4139 | ftrace_profile_disable(event->attr.config); | ||
4140 | } | ||
4141 | |||
4142 | static const struct pmu *tp_perf_event_init(struct perf_event *event) | ||
4143 | { | ||
4144 | /* | ||
4145 | * Raw tracepoint data is a severe data leak, only allow root to | ||
4146 | * have these. | ||
4147 | */ | ||
4148 | if ((event->attr.sample_type & PERF_SAMPLE_RAW) && | ||
4149 | perf_paranoid_tracepoint_raw() && | ||
4150 | !capable(CAP_SYS_ADMIN)) | ||
4151 | return ERR_PTR(-EPERM); | ||
4152 | |||
4153 | if (ftrace_profile_enable(event->attr.config)) | ||
4154 | return NULL; | ||
4155 | |||
4156 | event->destroy = tp_perf_event_destroy; | ||
4157 | |||
4158 | return &perf_ops_generic; | ||
4159 | } | ||
4160 | #else | ||
4161 | static const struct pmu *tp_perf_event_init(struct perf_event *event) | ||
4162 | { | ||
4163 | return NULL; | ||
4164 | } | ||
4165 | #endif | ||
4166 | |||
4167 | atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; | ||
4168 | |||
4169 | static void sw_perf_event_destroy(struct perf_event *event) | ||
4170 | { | ||
4171 | u64 event_id = event->attr.config; | ||
4172 | |||
4173 | WARN_ON(event->parent); | ||
4174 | |||
4175 | atomic_dec(&perf_swevent_enabled[event_id]); | ||
4176 | } | ||
4177 | |||
4178 | static const struct pmu *sw_perf_event_init(struct perf_event *event) | ||
4179 | { | ||
4180 | const struct pmu *pmu = NULL; | ||
4181 | u64 event_id = event->attr.config; | ||
4182 | |||
4183 | /* | ||
4184 | * Software events (currently) can't in general distinguish | ||
4185 | * between user, kernel and hypervisor events. | ||
4186 | * However, context switches and cpu migrations are considered | ||
4187 | * to be kernel events, and page faults are never hypervisor | ||
4188 | * events. | ||
4189 | */ | ||
4190 | switch (event_id) { | ||
4191 | case PERF_COUNT_SW_CPU_CLOCK: | ||
4192 | pmu = &perf_ops_cpu_clock; | ||
4193 | |||
4194 | break; | ||
4195 | case PERF_COUNT_SW_TASK_CLOCK: | ||
4196 | /* | ||
4197 | * If the user instantiates this as a per-cpu event, | ||
4198 | * use the cpu_clock event instead. | ||
4199 | */ | ||
4200 | if (event->ctx->task) | ||
4201 | pmu = &perf_ops_task_clock; | ||
4202 | else | ||
4203 | pmu = &perf_ops_cpu_clock; | ||
4204 | |||
4205 | break; | ||
4206 | case PERF_COUNT_SW_PAGE_FAULTS: | ||
4207 | case PERF_COUNT_SW_PAGE_FAULTS_MIN: | ||
4208 | case PERF_COUNT_SW_PAGE_FAULTS_MAJ: | ||
4209 | case PERF_COUNT_SW_CONTEXT_SWITCHES: | ||
4210 | case PERF_COUNT_SW_CPU_MIGRATIONS: | ||
4211 | if (!event->parent) { | ||
4212 | atomic_inc(&perf_swevent_enabled[event_id]); | ||
4213 | event->destroy = sw_perf_event_destroy; | ||
4214 | } | ||
4215 | pmu = &perf_ops_generic; | ||
4216 | break; | ||
4217 | } | ||
4218 | |||
4219 | return pmu; | ||
4220 | } | ||
4221 | |||
4222 | /* | ||
4223 | * Allocate and initialize a event structure | ||
4224 | */ | ||
4225 | static struct perf_event * | ||
4226 | perf_event_alloc(struct perf_event_attr *attr, | ||
4227 | int cpu, | ||
4228 | struct perf_event_context *ctx, | ||
4229 | struct perf_event *group_leader, | ||
4230 | struct perf_event *parent_event, | ||
4231 | gfp_t gfpflags) | ||
4232 | { | ||
4233 | const struct pmu *pmu; | ||
4234 | struct perf_event *event; | ||
4235 | struct hw_perf_event *hwc; | ||
4236 | long err; | ||
4237 | |||
4238 | event = kzalloc(sizeof(*event), gfpflags); | ||
4239 | if (!event) | ||
4240 | return ERR_PTR(-ENOMEM); | ||
4241 | |||
4242 | /* | ||
4243 | * Single events are their own group leaders, with an | ||
4244 | * empty sibling list: | ||
4245 | */ | ||
4246 | if (!group_leader) | ||
4247 | group_leader = event; | ||
4248 | |||
4249 | mutex_init(&event->child_mutex); | ||
4250 | INIT_LIST_HEAD(&event->child_list); | ||
4251 | |||
4252 | INIT_LIST_HEAD(&event->group_entry); | ||
4253 | INIT_LIST_HEAD(&event->event_entry); | ||
4254 | INIT_LIST_HEAD(&event->sibling_list); | ||
4255 | init_waitqueue_head(&event->waitq); | ||
4256 | |||
4257 | mutex_init(&event->mmap_mutex); | ||
4258 | |||
4259 | event->cpu = cpu; | ||
4260 | event->attr = *attr; | ||
4261 | event->group_leader = group_leader; | ||
4262 | event->pmu = NULL; | ||
4263 | event->ctx = ctx; | ||
4264 | event->oncpu = -1; | ||
4265 | |||
4266 | event->parent = parent_event; | ||
4267 | |||
4268 | event->ns = get_pid_ns(current->nsproxy->pid_ns); | ||
4269 | event->id = atomic64_inc_return(&perf_event_id); | ||
4270 | |||
4271 | event->state = PERF_EVENT_STATE_INACTIVE; | ||
4272 | |||
4273 | if (attr->disabled) | ||
4274 | event->state = PERF_EVENT_STATE_OFF; | ||
4275 | |||
4276 | pmu = NULL; | ||
4277 | |||
4278 | hwc = &event->hw; | ||
4279 | hwc->sample_period = attr->sample_period; | ||
4280 | if (attr->freq && attr->sample_freq) | ||
4281 | hwc->sample_period = 1; | ||
4282 | hwc->last_period = hwc->sample_period; | ||
4283 | |||
4284 | atomic64_set(&hwc->period_left, hwc->sample_period); | ||
4285 | |||
4286 | /* | ||
4287 | * we currently do not support PERF_FORMAT_GROUP on inherited events | ||
4288 | */ | ||
4289 | if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP)) | ||
4290 | goto done; | ||
4291 | |||
4292 | switch (attr->type) { | ||
4293 | case PERF_TYPE_RAW: | ||
4294 | case PERF_TYPE_HARDWARE: | ||
4295 | case PERF_TYPE_HW_CACHE: | ||
4296 | pmu = hw_perf_event_init(event); | ||
4297 | break; | ||
4298 | |||
4299 | case PERF_TYPE_SOFTWARE: | ||
4300 | pmu = sw_perf_event_init(event); | ||
4301 | break; | ||
4302 | |||
4303 | case PERF_TYPE_TRACEPOINT: | ||
4304 | pmu = tp_perf_event_init(event); | ||
4305 | break; | ||
4306 | |||
4307 | default: | ||
4308 | break; | ||
4309 | } | ||
4310 | done: | ||
4311 | err = 0; | ||
4312 | if (!pmu) | ||
4313 | err = -EINVAL; | ||
4314 | else if (IS_ERR(pmu)) | ||
4315 | err = PTR_ERR(pmu); | ||
4316 | |||
4317 | if (err) { | ||
4318 | if (event->ns) | ||
4319 | put_pid_ns(event->ns); | ||
4320 | kfree(event); | ||
4321 | return ERR_PTR(err); | ||
4322 | } | ||
4323 | |||
4324 | event->pmu = pmu; | ||
4325 | |||
4326 | if (!event->parent) { | ||
4327 | atomic_inc(&nr_events); | ||
4328 | if (event->attr.mmap) | ||
4329 | atomic_inc(&nr_mmap_events); | ||
4330 | if (event->attr.comm) | ||
4331 | atomic_inc(&nr_comm_events); | ||
4332 | if (event->attr.task) | ||
4333 | atomic_inc(&nr_task_events); | ||
4334 | } | ||
4335 | |||
4336 | return event; | ||
4337 | } | ||
4338 | |||
4339 | static int perf_copy_attr(struct perf_event_attr __user *uattr, | ||
4340 | struct perf_event_attr *attr) | ||
4341 | { | ||
4342 | u32 size; | ||
4343 | int ret; | ||
4344 | |||
4345 | if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0)) | ||
4346 | return -EFAULT; | ||
4347 | |||
4348 | /* | ||
4349 | * zero the full structure, so that a short copy will be nice. | ||
4350 | */ | ||
4351 | memset(attr, 0, sizeof(*attr)); | ||
4352 | |||
4353 | ret = get_user(size, &uattr->size); | ||
4354 | if (ret) | ||
4355 | return ret; | ||
4356 | |||
4357 | if (size > PAGE_SIZE) /* silly large */ | ||
4358 | goto err_size; | ||
4359 | |||
4360 | if (!size) /* abi compat */ | ||
4361 | size = PERF_ATTR_SIZE_VER0; | ||
4362 | |||
4363 | if (size < PERF_ATTR_SIZE_VER0) | ||
4364 | goto err_size; | ||
4365 | |||
4366 | /* | ||
4367 | * If we're handed a bigger struct than we know of, | ||
4368 | * ensure all the unknown bits are 0 - i.e. new | ||
4369 | * user-space does not rely on any kernel feature | ||
4370 | * extensions we dont know about yet. | ||
4371 | */ | ||
4372 | if (size > sizeof(*attr)) { | ||
4373 | unsigned char __user *addr; | ||
4374 | unsigned char __user *end; | ||
4375 | unsigned char val; | ||
4376 | |||
4377 | addr = (void __user *)uattr + sizeof(*attr); | ||
4378 | end = (void __user *)uattr + size; | ||
4379 | |||
4380 | for (; addr < end; addr++) { | ||
4381 | ret = get_user(val, addr); | ||
4382 | if (ret) | ||
4383 | return ret; | ||
4384 | if (val) | ||
4385 | goto err_size; | ||
4386 | } | ||
4387 | size = sizeof(*attr); | ||
4388 | } | ||
4389 | |||
4390 | ret = copy_from_user(attr, uattr, size); | ||
4391 | if (ret) | ||
4392 | return -EFAULT; | ||
4393 | |||
4394 | /* | ||
4395 | * If the type exists, the corresponding creation will verify | ||
4396 | * the attr->config. | ||
4397 | */ | ||
4398 | if (attr->type >= PERF_TYPE_MAX) | ||
4399 | return -EINVAL; | ||
4400 | |||
4401 | if (attr->__reserved_1 || attr->__reserved_2 || attr->__reserved_3) | ||
4402 | return -EINVAL; | ||
4403 | |||
4404 | if (attr->sample_type & ~(PERF_SAMPLE_MAX-1)) | ||
4405 | return -EINVAL; | ||
4406 | |||
4407 | if (attr->read_format & ~(PERF_FORMAT_MAX-1)) | ||
4408 | return -EINVAL; | ||
4409 | |||
4410 | out: | ||
4411 | return ret; | ||
4412 | |||
4413 | err_size: | ||
4414 | put_user(sizeof(*attr), &uattr->size); | ||
4415 | ret = -E2BIG; | ||
4416 | goto out; | ||
4417 | } | ||
4418 | |||
4419 | int perf_event_set_output(struct perf_event *event, int output_fd) | ||
4420 | { | ||
4421 | struct perf_event *output_event = NULL; | ||
4422 | struct file *output_file = NULL; | ||
4423 | struct perf_event *old_output; | ||
4424 | int fput_needed = 0; | ||
4425 | int ret = -EINVAL; | ||
4426 | |||
4427 | if (!output_fd) | ||
4428 | goto set; | ||
4429 | |||
4430 | output_file = fget_light(output_fd, &fput_needed); | ||
4431 | if (!output_file) | ||
4432 | return -EBADF; | ||
4433 | |||
4434 | if (output_file->f_op != &perf_fops) | ||
4435 | goto out; | ||
4436 | |||
4437 | output_event = output_file->private_data; | ||
4438 | |||
4439 | /* Don't chain output fds */ | ||
4440 | if (output_event->output) | ||
4441 | goto out; | ||
4442 | |||
4443 | /* Don't set an output fd when we already have an output channel */ | ||
4444 | if (event->data) | ||
4445 | goto out; | ||
4446 | |||
4447 | atomic_long_inc(&output_file->f_count); | ||
4448 | |||
4449 | set: | ||
4450 | mutex_lock(&event->mmap_mutex); | ||
4451 | old_output = event->output; | ||
4452 | rcu_assign_pointer(event->output, output_event); | ||
4453 | mutex_unlock(&event->mmap_mutex); | ||
4454 | |||
4455 | if (old_output) { | ||
4456 | /* | ||
4457 | * we need to make sure no existing perf_output_*() | ||
4458 | * is still referencing this event. | ||
4459 | */ | ||
4460 | synchronize_rcu(); | ||
4461 | fput(old_output->filp); | ||
4462 | } | ||
4463 | |||
4464 | ret = 0; | ||
4465 | out: | ||
4466 | fput_light(output_file, fput_needed); | ||
4467 | return ret; | ||
4468 | } | ||
4469 | |||
4470 | /** | ||
4471 | * sys_perf_event_open - open a performance event, associate it to a task/cpu | ||
4472 | * | ||
4473 | * @attr_uptr: event_id type attributes for monitoring/sampling | ||
4474 | * @pid: target pid | ||
4475 | * @cpu: target cpu | ||
4476 | * @group_fd: group leader event fd | ||
4477 | */ | ||
4478 | SYSCALL_DEFINE5(perf_event_open, | ||
4479 | struct perf_event_attr __user *, attr_uptr, | ||
4480 | pid_t, pid, int, cpu, int, group_fd, unsigned long, flags) | ||
4481 | { | ||
4482 | struct perf_event *event, *group_leader; | ||
4483 | struct perf_event_attr attr; | ||
4484 | struct perf_event_context *ctx; | ||
4485 | struct file *event_file = NULL; | ||
4486 | struct file *group_file = NULL; | ||
4487 | int fput_needed = 0; | ||
4488 | int fput_needed2 = 0; | ||
4489 | int err; | ||
4490 | |||
4491 | /* for future expandability... */ | ||
4492 | if (flags & ~(PERF_FLAG_FD_NO_GROUP | PERF_FLAG_FD_OUTPUT)) | ||
4493 | return -EINVAL; | ||
4494 | |||
4495 | err = perf_copy_attr(attr_uptr, &attr); | ||
4496 | if (err) | ||
4497 | return err; | ||
4498 | |||
4499 | if (!attr.exclude_kernel) { | ||
4500 | if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN)) | ||
4501 | return -EACCES; | ||
4502 | } | ||
4503 | |||
4504 | if (attr.freq) { | ||
4505 | if (attr.sample_freq > sysctl_perf_event_sample_rate) | ||
4506 | return -EINVAL; | ||
4507 | } | ||
4508 | |||
4509 | /* | ||
4510 | * Get the target context (task or percpu): | ||
4511 | */ | ||
4512 | ctx = find_get_context(pid, cpu); | ||
4513 | if (IS_ERR(ctx)) | ||
4514 | return PTR_ERR(ctx); | ||
4515 | |||
4516 | /* | ||
4517 | * Look up the group leader (we will attach this event to it): | ||
4518 | */ | ||
4519 | group_leader = NULL; | ||
4520 | if (group_fd != -1 && !(flags & PERF_FLAG_FD_NO_GROUP)) { | ||
4521 | err = -EINVAL; | ||
4522 | group_file = fget_light(group_fd, &fput_needed); | ||
4523 | if (!group_file) | ||
4524 | goto err_put_context; | ||
4525 | if (group_file->f_op != &perf_fops) | ||
4526 | goto err_put_context; | ||
4527 | |||
4528 | group_leader = group_file->private_data; | ||
4529 | /* | ||
4530 | * Do not allow a recursive hierarchy (this new sibling | ||
4531 | * becoming part of another group-sibling): | ||
4532 | */ | ||
4533 | if (group_leader->group_leader != group_leader) | ||
4534 | goto err_put_context; | ||
4535 | /* | ||
4536 | * Do not allow to attach to a group in a different | ||
4537 | * task or CPU context: | ||
4538 | */ | ||
4539 | if (group_leader->ctx != ctx) | ||
4540 | goto err_put_context; | ||
4541 | /* | ||
4542 | * Only a group leader can be exclusive or pinned | ||
4543 | */ | ||
4544 | if (attr.exclusive || attr.pinned) | ||
4545 | goto err_put_context; | ||
4546 | } | ||
4547 | |||
4548 | event = perf_event_alloc(&attr, cpu, ctx, group_leader, | ||
4549 | NULL, GFP_KERNEL); | ||
4550 | err = PTR_ERR(event); | ||
4551 | if (IS_ERR(event)) | ||
4552 | goto err_put_context; | ||
4553 | |||
4554 | err = anon_inode_getfd("[perf_event]", &perf_fops, event, 0); | ||
4555 | if (err < 0) | ||
4556 | goto err_free_put_context; | ||
4557 | |||
4558 | event_file = fget_light(err, &fput_needed2); | ||
4559 | if (!event_file) | ||
4560 | goto err_free_put_context; | ||
4561 | |||
4562 | if (flags & PERF_FLAG_FD_OUTPUT) { | ||
4563 | err = perf_event_set_output(event, group_fd); | ||
4564 | if (err) | ||
4565 | goto err_fput_free_put_context; | ||
4566 | } | ||
4567 | |||
4568 | event->filp = event_file; | ||
4569 | WARN_ON_ONCE(ctx->parent_ctx); | ||
4570 | mutex_lock(&ctx->mutex); | ||
4571 | perf_install_in_context(ctx, event, cpu); | ||
4572 | ++ctx->generation; | ||
4573 | mutex_unlock(&ctx->mutex); | ||
4574 | |||
4575 | event->owner = current; | ||
4576 | get_task_struct(current); | ||
4577 | mutex_lock(¤t->perf_event_mutex); | ||
4578 | list_add_tail(&event->owner_entry, ¤t->perf_event_list); | ||
4579 | mutex_unlock(¤t->perf_event_mutex); | ||
4580 | |||
4581 | err_fput_free_put_context: | ||
4582 | fput_light(event_file, fput_needed2); | ||
4583 | |||
4584 | err_free_put_context: | ||
4585 | if (err < 0) | ||
4586 | kfree(event); | ||
4587 | |||
4588 | err_put_context: | ||
4589 | if (err < 0) | ||
4590 | put_ctx(ctx); | ||
4591 | |||
4592 | fput_light(group_file, fput_needed); | ||
4593 | |||
4594 | return err; | ||
4595 | } | ||
4596 | |||
4597 | /* | ||
4598 | * inherit a event from parent task to child task: | ||
4599 | */ | ||
4600 | static struct perf_event * | ||
4601 | inherit_event(struct perf_event *parent_event, | ||
4602 | struct task_struct *parent, | ||
4603 | struct perf_event_context *parent_ctx, | ||
4604 | struct task_struct *child, | ||
4605 | struct perf_event *group_leader, | ||
4606 | struct perf_event_context *child_ctx) | ||
4607 | { | ||
4608 | struct perf_event *child_event; | ||
4609 | |||
4610 | /* | ||
4611 | * Instead of creating recursive hierarchies of events, | ||
4612 | * we link inherited events back to the original parent, | ||
4613 | * which has a filp for sure, which we use as the reference | ||
4614 | * count: | ||
4615 | */ | ||
4616 | if (parent_event->parent) | ||
4617 | parent_event = parent_event->parent; | ||
4618 | |||
4619 | child_event = perf_event_alloc(&parent_event->attr, | ||
4620 | parent_event->cpu, child_ctx, | ||
4621 | group_leader, parent_event, | ||
4622 | GFP_KERNEL); | ||
4623 | if (IS_ERR(child_event)) | ||
4624 | return child_event; | ||
4625 | get_ctx(child_ctx); | ||
4626 | |||
4627 | /* | ||
4628 | * Make the child state follow the state of the parent event, | ||
4629 | * not its attr.disabled bit. We hold the parent's mutex, | ||
4630 | * so we won't race with perf_event_{en, dis}able_family. | ||
4631 | */ | ||
4632 | if (parent_event->state >= PERF_EVENT_STATE_INACTIVE) | ||
4633 | child_event->state = PERF_EVENT_STATE_INACTIVE; | ||
4634 | else | ||
4635 | child_event->state = PERF_EVENT_STATE_OFF; | ||
4636 | |||
4637 | if (parent_event->attr.freq) | ||
4638 | child_event->hw.sample_period = parent_event->hw.sample_period; | ||
4639 | |||
4640 | /* | ||
4641 | * Link it up in the child's context: | ||
4642 | */ | ||
4643 | add_event_to_ctx(child_event, child_ctx); | ||
4644 | |||
4645 | /* | ||
4646 | * Get a reference to the parent filp - we will fput it | ||
4647 | * when the child event exits. This is safe to do because | ||
4648 | * we are in the parent and we know that the filp still | ||
4649 | * exists and has a nonzero count: | ||
4650 | */ | ||
4651 | atomic_long_inc(&parent_event->filp->f_count); | ||
4652 | |||
4653 | /* | ||
4654 | * Link this into the parent event's child list | ||
4655 | */ | ||
4656 | WARN_ON_ONCE(parent_event->ctx->parent_ctx); | ||
4657 | mutex_lock(&parent_event->child_mutex); | ||
4658 | list_add_tail(&child_event->child_list, &parent_event->child_list); | ||
4659 | mutex_unlock(&parent_event->child_mutex); | ||
4660 | |||
4661 | return child_event; | ||
4662 | } | ||
4663 | |||
4664 | static int inherit_group(struct perf_event *parent_event, | ||
4665 | struct task_struct *parent, | ||
4666 | struct perf_event_context *parent_ctx, | ||
4667 | struct task_struct *child, | ||
4668 | struct perf_event_context *child_ctx) | ||
4669 | { | ||
4670 | struct perf_event *leader; | ||
4671 | struct perf_event *sub; | ||
4672 | struct perf_event *child_ctr; | ||
4673 | |||
4674 | leader = inherit_event(parent_event, parent, parent_ctx, | ||
4675 | child, NULL, child_ctx); | ||
4676 | if (IS_ERR(leader)) | ||
4677 | return PTR_ERR(leader); | ||
4678 | list_for_each_entry(sub, &parent_event->sibling_list, group_entry) { | ||
4679 | child_ctr = inherit_event(sub, parent, parent_ctx, | ||
4680 | child, leader, child_ctx); | ||
4681 | if (IS_ERR(child_ctr)) | ||
4682 | return PTR_ERR(child_ctr); | ||
4683 | } | ||
4684 | return 0; | ||
4685 | } | ||
4686 | |||
4687 | static void sync_child_event(struct perf_event *child_event, | ||
4688 | struct task_struct *child) | ||
4689 | { | ||
4690 | struct perf_event *parent_event = child_event->parent; | ||
4691 | u64 child_val; | ||
4692 | |||
4693 | if (child_event->attr.inherit_stat) | ||
4694 | perf_event_read_event(child_event, child); | ||
4695 | |||
4696 | child_val = atomic64_read(&child_event->count); | ||
4697 | |||
4698 | /* | ||
4699 | * Add back the child's count to the parent's count: | ||
4700 | */ | ||
4701 | atomic64_add(child_val, &parent_event->count); | ||
4702 | atomic64_add(child_event->total_time_enabled, | ||
4703 | &parent_event->child_total_time_enabled); | ||
4704 | atomic64_add(child_event->total_time_running, | ||
4705 | &parent_event->child_total_time_running); | ||
4706 | |||
4707 | /* | ||
4708 | * Remove this event from the parent's list | ||
4709 | */ | ||
4710 | WARN_ON_ONCE(parent_event->ctx->parent_ctx); | ||
4711 | mutex_lock(&parent_event->child_mutex); | ||
4712 | list_del_init(&child_event->child_list); | ||
4713 | mutex_unlock(&parent_event->child_mutex); | ||
4714 | |||
4715 | /* | ||
4716 | * Release the parent event, if this was the last | ||
4717 | * reference to it. | ||
4718 | */ | ||
4719 | fput(parent_event->filp); | ||
4720 | } | ||
4721 | |||
4722 | static void | ||
4723 | __perf_event_exit_task(struct perf_event *child_event, | ||
4724 | struct perf_event_context *child_ctx, | ||
4725 | struct task_struct *child) | ||
4726 | { | ||
4727 | struct perf_event *parent_event; | ||
4728 | |||
4729 | update_event_times(child_event); | ||
4730 | perf_event_remove_from_context(child_event); | ||
4731 | |||
4732 | parent_event = child_event->parent; | ||
4733 | /* | ||
4734 | * It can happen that parent exits first, and has events | ||
4735 | * that are still around due to the child reference. These | ||
4736 | * events need to be zapped - but otherwise linger. | ||
4737 | */ | ||
4738 | if (parent_event) { | ||
4739 | sync_child_event(child_event, child); | ||
4740 | free_event(child_event); | ||
4741 | } | ||
4742 | } | ||
4743 | |||
4744 | /* | ||
4745 | * When a child task exits, feed back event values to parent events. | ||
4746 | */ | ||
4747 | void perf_event_exit_task(struct task_struct *child) | ||
4748 | { | ||
4749 | struct perf_event *child_event, *tmp; | ||
4750 | struct perf_event_context *child_ctx; | ||
4751 | unsigned long flags; | ||
4752 | |||
4753 | if (likely(!child->perf_event_ctxp)) { | ||
4754 | perf_event_task(child, NULL, 0); | ||
4755 | return; | ||
4756 | } | ||
4757 | |||
4758 | local_irq_save(flags); | ||
4759 | /* | ||
4760 | * We can't reschedule here because interrupts are disabled, | ||
4761 | * and either child is current or it is a task that can't be | ||
4762 | * scheduled, so we are now safe from rescheduling changing | ||
4763 | * our context. | ||
4764 | */ | ||
4765 | child_ctx = child->perf_event_ctxp; | ||
4766 | __perf_event_task_sched_out(child_ctx); | ||
4767 | |||
4768 | /* | ||
4769 | * Take the context lock here so that if find_get_context is | ||
4770 | * reading child->perf_event_ctxp, we wait until it has | ||
4771 | * incremented the context's refcount before we do put_ctx below. | ||
4772 | */ | ||
4773 | spin_lock(&child_ctx->lock); | ||
4774 | child->perf_event_ctxp = NULL; | ||
4775 | /* | ||
4776 | * If this context is a clone; unclone it so it can't get | ||
4777 | * swapped to another process while we're removing all | ||
4778 | * the events from it. | ||
4779 | */ | ||
4780 | unclone_ctx(child_ctx); | ||
4781 | spin_unlock_irqrestore(&child_ctx->lock, flags); | ||
4782 | |||
4783 | /* | ||
4784 | * Report the task dead after unscheduling the events so that we | ||
4785 | * won't get any samples after PERF_RECORD_EXIT. We can however still | ||
4786 | * get a few PERF_RECORD_READ events. | ||
4787 | */ | ||
4788 | perf_event_task(child, child_ctx, 0); | ||
4789 | |||
4790 | /* | ||
4791 | * We can recurse on the same lock type through: | ||
4792 | * | ||
4793 | * __perf_event_exit_task() | ||
4794 | * sync_child_event() | ||
4795 | * fput(parent_event->filp) | ||
4796 | * perf_release() | ||
4797 | * mutex_lock(&ctx->mutex) | ||
4798 | * | ||
4799 | * But since its the parent context it won't be the same instance. | ||
4800 | */ | ||
4801 | mutex_lock_nested(&child_ctx->mutex, SINGLE_DEPTH_NESTING); | ||
4802 | |||
4803 | again: | ||
4804 | list_for_each_entry_safe(child_event, tmp, &child_ctx->group_list, | ||
4805 | group_entry) | ||
4806 | __perf_event_exit_task(child_event, child_ctx, child); | ||
4807 | |||
4808 | /* | ||
4809 | * If the last event was a group event, it will have appended all | ||
4810 | * its siblings to the list, but we obtained 'tmp' before that which | ||
4811 | * will still point to the list head terminating the iteration. | ||
4812 | */ | ||
4813 | if (!list_empty(&child_ctx->group_list)) | ||
4814 | goto again; | ||
4815 | |||
4816 | mutex_unlock(&child_ctx->mutex); | ||
4817 | |||
4818 | put_ctx(child_ctx); | ||
4819 | } | ||
4820 | |||
4821 | /* | ||
4822 | * free an unexposed, unused context as created by inheritance by | ||
4823 | * init_task below, used by fork() in case of fail. | ||
4824 | */ | ||
4825 | void perf_event_free_task(struct task_struct *task) | ||
4826 | { | ||
4827 | struct perf_event_context *ctx = task->perf_event_ctxp; | ||
4828 | struct perf_event *event, *tmp; | ||
4829 | |||
4830 | if (!ctx) | ||
4831 | return; | ||
4832 | |||
4833 | mutex_lock(&ctx->mutex); | ||
4834 | again: | ||
4835 | list_for_each_entry_safe(event, tmp, &ctx->group_list, group_entry) { | ||
4836 | struct perf_event *parent = event->parent; | ||
4837 | |||
4838 | if (WARN_ON_ONCE(!parent)) | ||
4839 | continue; | ||
4840 | |||
4841 | mutex_lock(&parent->child_mutex); | ||
4842 | list_del_init(&event->child_list); | ||
4843 | mutex_unlock(&parent->child_mutex); | ||
4844 | |||
4845 | fput(parent->filp); | ||
4846 | |||
4847 | list_del_event(event, ctx); | ||
4848 | free_event(event); | ||
4849 | } | ||
4850 | |||
4851 | if (!list_empty(&ctx->group_list)) | ||
4852 | goto again; | ||
4853 | |||
4854 | mutex_unlock(&ctx->mutex); | ||
4855 | |||
4856 | put_ctx(ctx); | ||
4857 | } | ||
4858 | |||
4859 | /* | ||
4860 | * Initialize the perf_event context in task_struct | ||
4861 | */ | ||
4862 | int perf_event_init_task(struct task_struct *child) | ||
4863 | { | ||
4864 | struct perf_event_context *child_ctx, *parent_ctx; | ||
4865 | struct perf_event_context *cloned_ctx; | ||
4866 | struct perf_event *event; | ||
4867 | struct task_struct *parent = current; | ||
4868 | int inherited_all = 1; | ||
4869 | int ret = 0; | ||
4870 | |||
4871 | child->perf_event_ctxp = NULL; | ||
4872 | |||
4873 | mutex_init(&child->perf_event_mutex); | ||
4874 | INIT_LIST_HEAD(&child->perf_event_list); | ||
4875 | |||
4876 | if (likely(!parent->perf_event_ctxp)) | ||
4877 | return 0; | ||
4878 | |||
4879 | /* | ||
4880 | * This is executed from the parent task context, so inherit | ||
4881 | * events that have been marked for cloning. | ||
4882 | * First allocate and initialize a context for the child. | ||
4883 | */ | ||
4884 | |||
4885 | child_ctx = kmalloc(sizeof(struct perf_event_context), GFP_KERNEL); | ||
4886 | if (!child_ctx) | ||
4887 | return -ENOMEM; | ||
4888 | |||
4889 | __perf_event_init_context(child_ctx, child); | ||
4890 | child->perf_event_ctxp = child_ctx; | ||
4891 | get_task_struct(child); | ||
4892 | |||
4893 | /* | ||
4894 | * If the parent's context is a clone, pin it so it won't get | ||
4895 | * swapped under us. | ||
4896 | */ | ||
4897 | parent_ctx = perf_pin_task_context(parent); | ||
4898 | |||
4899 | /* | ||
4900 | * No need to check if parent_ctx != NULL here; since we saw | ||
4901 | * it non-NULL earlier, the only reason for it to become NULL | ||
4902 | * is if we exit, and since we're currently in the middle of | ||
4903 | * a fork we can't be exiting at the same time. | ||
4904 | */ | ||
4905 | |||
4906 | /* | ||
4907 | * Lock the parent list. No need to lock the child - not PID | ||
4908 | * hashed yet and not running, so nobody can access it. | ||
4909 | */ | ||
4910 | mutex_lock(&parent_ctx->mutex); | ||
4911 | |||
4912 | /* | ||
4913 | * We dont have to disable NMIs - we are only looking at | ||
4914 | * the list, not manipulating it: | ||
4915 | */ | ||
4916 | list_for_each_entry(event, &parent_ctx->group_list, group_entry) { | ||
4917 | |||
4918 | if (!event->attr.inherit) { | ||
4919 | inherited_all = 0; | ||
4920 | continue; | ||
4921 | } | ||
4922 | |||
4923 | ret = inherit_group(event, parent, parent_ctx, | ||
4924 | child, child_ctx); | ||
4925 | if (ret) { | ||
4926 | inherited_all = 0; | ||
4927 | break; | ||
4928 | } | ||
4929 | } | ||
4930 | |||
4931 | if (inherited_all) { | ||
4932 | /* | ||
4933 | * Mark the child context as a clone of the parent | ||
4934 | * context, or of whatever the parent is a clone of. | ||
4935 | * Note that if the parent is a clone, it could get | ||
4936 | * uncloned at any point, but that doesn't matter | ||
4937 | * because the list of events and the generation | ||
4938 | * count can't have changed since we took the mutex. | ||
4939 | */ | ||
4940 | cloned_ctx = rcu_dereference(parent_ctx->parent_ctx); | ||
4941 | if (cloned_ctx) { | ||
4942 | child_ctx->parent_ctx = cloned_ctx; | ||
4943 | child_ctx->parent_gen = parent_ctx->parent_gen; | ||
4944 | } else { | ||
4945 | child_ctx->parent_ctx = parent_ctx; | ||
4946 | child_ctx->parent_gen = parent_ctx->generation; | ||
4947 | } | ||
4948 | get_ctx(child_ctx->parent_ctx); | ||
4949 | } | ||
4950 | |||
4951 | mutex_unlock(&parent_ctx->mutex); | ||
4952 | |||
4953 | perf_unpin_context(parent_ctx); | ||
4954 | |||
4955 | return ret; | ||
4956 | } | ||
4957 | |||
4958 | static void __cpuinit perf_event_init_cpu(int cpu) | ||
4959 | { | ||
4960 | struct perf_cpu_context *cpuctx; | ||
4961 | |||
4962 | cpuctx = &per_cpu(perf_cpu_context, cpu); | ||
4963 | __perf_event_init_context(&cpuctx->ctx, NULL); | ||
4964 | |||
4965 | spin_lock(&perf_resource_lock); | ||
4966 | cpuctx->max_pertask = perf_max_events - perf_reserved_percpu; | ||
4967 | spin_unlock(&perf_resource_lock); | ||
4968 | |||
4969 | hw_perf_event_setup(cpu); | ||
4970 | } | ||
4971 | |||
4972 | #ifdef CONFIG_HOTPLUG_CPU | ||
4973 | static void __perf_event_exit_cpu(void *info) | ||
4974 | { | ||
4975 | struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); | ||
4976 | struct perf_event_context *ctx = &cpuctx->ctx; | ||
4977 | struct perf_event *event, *tmp; | ||
4978 | |||
4979 | list_for_each_entry_safe(event, tmp, &ctx->group_list, group_entry) | ||
4980 | __perf_event_remove_from_context(event); | ||
4981 | } | ||
4982 | static void perf_event_exit_cpu(int cpu) | ||
4983 | { | ||
4984 | struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); | ||
4985 | struct perf_event_context *ctx = &cpuctx->ctx; | ||
4986 | |||
4987 | mutex_lock(&ctx->mutex); | ||
4988 | smp_call_function_single(cpu, __perf_event_exit_cpu, NULL, 1); | ||
4989 | mutex_unlock(&ctx->mutex); | ||
4990 | } | ||
4991 | #else | ||
4992 | static inline void perf_event_exit_cpu(int cpu) { } | ||
4993 | #endif | ||
4994 | |||
4995 | static int __cpuinit | ||
4996 | perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) | ||
4997 | { | ||
4998 | unsigned int cpu = (long)hcpu; | ||
4999 | |||
5000 | switch (action) { | ||
5001 | |||
5002 | case CPU_UP_PREPARE: | ||
5003 | case CPU_UP_PREPARE_FROZEN: | ||
5004 | perf_event_init_cpu(cpu); | ||
5005 | break; | ||
5006 | |||
5007 | case CPU_ONLINE: | ||
5008 | case CPU_ONLINE_FROZEN: | ||
5009 | hw_perf_event_setup_online(cpu); | ||
5010 | break; | ||
5011 | |||
5012 | case CPU_DOWN_PREPARE: | ||
5013 | case CPU_DOWN_PREPARE_FROZEN: | ||
5014 | perf_event_exit_cpu(cpu); | ||
5015 | break; | ||
5016 | |||
5017 | default: | ||
5018 | break; | ||
5019 | } | ||
5020 | |||
5021 | return NOTIFY_OK; | ||
5022 | } | ||
5023 | |||
5024 | /* | ||
5025 | * This has to have a higher priority than migration_notifier in sched.c. | ||
5026 | */ | ||
5027 | static struct notifier_block __cpuinitdata perf_cpu_nb = { | ||
5028 | .notifier_call = perf_cpu_notify, | ||
5029 | .priority = 20, | ||
5030 | }; | ||
5031 | |||
5032 | void __init perf_event_init(void) | ||
5033 | { | ||
5034 | perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE, | ||
5035 | (void *)(long)smp_processor_id()); | ||
5036 | perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_ONLINE, | ||
5037 | (void *)(long)smp_processor_id()); | ||
5038 | register_cpu_notifier(&perf_cpu_nb); | ||
5039 | } | ||
5040 | |||
5041 | static ssize_t perf_show_reserve_percpu(struct sysdev_class *class, char *buf) | ||
5042 | { | ||
5043 | return sprintf(buf, "%d\n", perf_reserved_percpu); | ||
5044 | } | ||
5045 | |||
5046 | static ssize_t | ||
5047 | perf_set_reserve_percpu(struct sysdev_class *class, | ||
5048 | const char *buf, | ||
5049 | size_t count) | ||
5050 | { | ||
5051 | struct perf_cpu_context *cpuctx; | ||
5052 | unsigned long val; | ||
5053 | int err, cpu, mpt; | ||
5054 | |||
5055 | err = strict_strtoul(buf, 10, &val); | ||
5056 | if (err) | ||
5057 | return err; | ||
5058 | if (val > perf_max_events) | ||
5059 | return -EINVAL; | ||
5060 | |||
5061 | spin_lock(&perf_resource_lock); | ||
5062 | perf_reserved_percpu = val; | ||
5063 | for_each_online_cpu(cpu) { | ||
5064 | cpuctx = &per_cpu(perf_cpu_context, cpu); | ||
5065 | spin_lock_irq(&cpuctx->ctx.lock); | ||
5066 | mpt = min(perf_max_events - cpuctx->ctx.nr_events, | ||
5067 | perf_max_events - perf_reserved_percpu); | ||
5068 | cpuctx->max_pertask = mpt; | ||
5069 | spin_unlock_irq(&cpuctx->ctx.lock); | ||
5070 | } | ||
5071 | spin_unlock(&perf_resource_lock); | ||
5072 | |||
5073 | return count; | ||
5074 | } | ||
5075 | |||
5076 | static ssize_t perf_show_overcommit(struct sysdev_class *class, char *buf) | ||
5077 | { | ||
5078 | return sprintf(buf, "%d\n", perf_overcommit); | ||
5079 | } | ||
5080 | |||
5081 | static ssize_t | ||
5082 | perf_set_overcommit(struct sysdev_class *class, const char *buf, size_t count) | ||
5083 | { | ||
5084 | unsigned long val; | ||
5085 | int err; | ||
5086 | |||
5087 | err = strict_strtoul(buf, 10, &val); | ||
5088 | if (err) | ||
5089 | return err; | ||
5090 | if (val > 1) | ||
5091 | return -EINVAL; | ||
5092 | |||
5093 | spin_lock(&perf_resource_lock); | ||
5094 | perf_overcommit = val; | ||
5095 | spin_unlock(&perf_resource_lock); | ||
5096 | |||
5097 | return count; | ||
5098 | } | ||
5099 | |||
5100 | static SYSDEV_CLASS_ATTR( | ||
5101 | reserve_percpu, | ||
5102 | 0644, | ||
5103 | perf_show_reserve_percpu, | ||
5104 | perf_set_reserve_percpu | ||
5105 | ); | ||
5106 | |||
5107 | static SYSDEV_CLASS_ATTR( | ||
5108 | overcommit, | ||
5109 | 0644, | ||
5110 | perf_show_overcommit, | ||
5111 | perf_set_overcommit | ||
5112 | ); | ||
5113 | |||
5114 | static struct attribute *perfclass_attrs[] = { | ||
5115 | &attr_reserve_percpu.attr, | ||
5116 | &attr_overcommit.attr, | ||
5117 | NULL | ||
5118 | }; | ||
5119 | |||
5120 | static struct attribute_group perfclass_attr_group = { | ||
5121 | .attrs = perfclass_attrs, | ||
5122 | .name = "perf_events", | ||
5123 | }; | ||
5124 | |||
5125 | static int __init perf_event_sysfs_init(void) | ||
5126 | { | ||
5127 | return sysfs_create_group(&cpu_sysdev_class.kset.kobj, | ||
5128 | &perfclass_attr_group); | ||
5129 | } | ||
5130 | device_initcall(perf_event_sysfs_init); | ||
diff --git a/kernel/pid.c b/kernel/pid.c index 31310b5d3f50..d3f722d20f9c 100644 --- a/kernel/pid.c +++ b/kernel/pid.c | |||
@@ -40,7 +40,7 @@ | |||
40 | #define pid_hashfn(nr, ns) \ | 40 | #define pid_hashfn(nr, ns) \ |
41 | hash_long((unsigned long)nr + (unsigned long)ns, pidhash_shift) | 41 | hash_long((unsigned long)nr + (unsigned long)ns, pidhash_shift) |
42 | static struct hlist_head *pid_hash; | 42 | static struct hlist_head *pid_hash; |
43 | static int pidhash_shift; | 43 | static unsigned int pidhash_shift = 4; |
44 | struct pid init_struct_pid = INIT_STRUCT_PID; | 44 | struct pid init_struct_pid = INIT_STRUCT_PID; |
45 | 45 | ||
46 | int pid_max = PID_MAX_DEFAULT; | 46 | int pid_max = PID_MAX_DEFAULT; |
@@ -499,19 +499,12 @@ struct pid *find_ge_pid(int nr, struct pid_namespace *ns) | |||
499 | void __init pidhash_init(void) | 499 | void __init pidhash_init(void) |
500 | { | 500 | { |
501 | int i, pidhash_size; | 501 | int i, pidhash_size; |
502 | unsigned long megabytes = nr_kernel_pages >> (20 - PAGE_SHIFT); | ||
503 | 502 | ||
504 | pidhash_shift = max(4, fls(megabytes * 4)); | 503 | pid_hash = alloc_large_system_hash("PID", sizeof(*pid_hash), 0, 18, |
505 | pidhash_shift = min(12, pidhash_shift); | 504 | HASH_EARLY | HASH_SMALL, |
505 | &pidhash_shift, NULL, 4096); | ||
506 | pidhash_size = 1 << pidhash_shift; | 506 | pidhash_size = 1 << pidhash_shift; |
507 | 507 | ||
508 | printk("PID hash table entries: %d (order: %d, %Zd bytes)\n", | ||
509 | pidhash_size, pidhash_shift, | ||
510 | pidhash_size * sizeof(struct hlist_head)); | ||
511 | |||
512 | pid_hash = alloc_bootmem(pidhash_size * sizeof(*(pid_hash))); | ||
513 | if (!pid_hash) | ||
514 | panic("Could not alloc pidhash!\n"); | ||
515 | for (i = 0; i < pidhash_size; i++) | 508 | for (i = 0; i < pidhash_size; i++) |
516 | INIT_HLIST_HEAD(&pid_hash[i]); | 509 | INIT_HLIST_HEAD(&pid_hash[i]); |
517 | } | 510 | } |
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c index 821722ae58a7..86b3796b0436 100644 --- a/kernel/pid_namespace.c +++ b/kernel/pid_namespace.c | |||
@@ -118,7 +118,7 @@ struct pid_namespace *copy_pid_ns(unsigned long flags, struct pid_namespace *old | |||
118 | { | 118 | { |
119 | if (!(flags & CLONE_NEWPID)) | 119 | if (!(flags & CLONE_NEWPID)) |
120 | return get_pid_ns(old_ns); | 120 | return get_pid_ns(old_ns); |
121 | if (flags & CLONE_THREAD) | 121 | if (flags & (CLONE_THREAD|CLONE_PARENT)) |
122 | return ERR_PTR(-EINVAL); | 122 | return ERR_PTR(-EINVAL); |
123 | return create_pid_namespace(old_ns); | 123 | return create_pid_namespace(old_ns); |
124 | } | 124 | } |
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index e33a21cb9407..5c9dc228747b 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c | |||
@@ -8,17 +8,18 @@ | |||
8 | #include <linux/math64.h> | 8 | #include <linux/math64.h> |
9 | #include <asm/uaccess.h> | 9 | #include <asm/uaccess.h> |
10 | #include <linux/kernel_stat.h> | 10 | #include <linux/kernel_stat.h> |
11 | #include <trace/events/timer.h> | ||
11 | 12 | ||
12 | /* | 13 | /* |
13 | * Called after updating RLIMIT_CPU to set timer expiration if necessary. | 14 | * Called after updating RLIMIT_CPU to set timer expiration if necessary. |
14 | */ | 15 | */ |
15 | void update_rlimit_cpu(unsigned long rlim_new) | 16 | void update_rlimit_cpu(unsigned long rlim_new) |
16 | { | 17 | { |
17 | cputime_t cputime; | 18 | cputime_t cputime = secs_to_cputime(rlim_new); |
19 | struct signal_struct *const sig = current->signal; | ||
18 | 20 | ||
19 | cputime = secs_to_cputime(rlim_new); | 21 | if (cputime_eq(sig->it[CPUCLOCK_PROF].expires, cputime_zero) || |
20 | if (cputime_eq(current->signal->it_prof_expires, cputime_zero) || | 22 | cputime_gt(sig->it[CPUCLOCK_PROF].expires, cputime)) { |
21 | cputime_gt(current->signal->it_prof_expires, cputime)) { | ||
22 | spin_lock_irq(¤t->sighand->siglock); | 23 | spin_lock_irq(¤t->sighand->siglock); |
23 | set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL); | 24 | set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL); |
24 | spin_unlock_irq(¤t->sighand->siglock); | 25 | spin_unlock_irq(¤t->sighand->siglock); |
@@ -542,6 +543,17 @@ static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now) | |||
542 | now); | 543 | now); |
543 | } | 544 | } |
544 | 545 | ||
546 | static inline int expires_gt(cputime_t expires, cputime_t new_exp) | ||
547 | { | ||
548 | return cputime_eq(expires, cputime_zero) || | ||
549 | cputime_gt(expires, new_exp); | ||
550 | } | ||
551 | |||
552 | static inline int expires_le(cputime_t expires, cputime_t new_exp) | ||
553 | { | ||
554 | return !cputime_eq(expires, cputime_zero) && | ||
555 | cputime_le(expires, new_exp); | ||
556 | } | ||
545 | /* | 557 | /* |
546 | * Insert the timer on the appropriate list before any timers that | 558 | * Insert the timer on the appropriate list before any timers that |
547 | * expire later. This must be called with the tasklist_lock held | 559 | * expire later. This must be called with the tasklist_lock held |
@@ -586,34 +598,32 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now) | |||
586 | */ | 598 | */ |
587 | 599 | ||
588 | if (CPUCLOCK_PERTHREAD(timer->it_clock)) { | 600 | if (CPUCLOCK_PERTHREAD(timer->it_clock)) { |
601 | union cpu_time_count *exp = &nt->expires; | ||
602 | |||
589 | switch (CPUCLOCK_WHICH(timer->it_clock)) { | 603 | switch (CPUCLOCK_WHICH(timer->it_clock)) { |
590 | default: | 604 | default: |
591 | BUG(); | 605 | BUG(); |
592 | case CPUCLOCK_PROF: | 606 | case CPUCLOCK_PROF: |
593 | if (cputime_eq(p->cputime_expires.prof_exp, | 607 | if (expires_gt(p->cputime_expires.prof_exp, |
594 | cputime_zero) || | 608 | exp->cpu)) |
595 | cputime_gt(p->cputime_expires.prof_exp, | 609 | p->cputime_expires.prof_exp = exp->cpu; |
596 | nt->expires.cpu)) | ||
597 | p->cputime_expires.prof_exp = | ||
598 | nt->expires.cpu; | ||
599 | break; | 610 | break; |
600 | case CPUCLOCK_VIRT: | 611 | case CPUCLOCK_VIRT: |
601 | if (cputime_eq(p->cputime_expires.virt_exp, | 612 | if (expires_gt(p->cputime_expires.virt_exp, |
602 | cputime_zero) || | 613 | exp->cpu)) |
603 | cputime_gt(p->cputime_expires.virt_exp, | 614 | p->cputime_expires.virt_exp = exp->cpu; |
604 | nt->expires.cpu)) | ||
605 | p->cputime_expires.virt_exp = | ||
606 | nt->expires.cpu; | ||
607 | break; | 615 | break; |
608 | case CPUCLOCK_SCHED: | 616 | case CPUCLOCK_SCHED: |
609 | if (p->cputime_expires.sched_exp == 0 || | 617 | if (p->cputime_expires.sched_exp == 0 || |
610 | p->cputime_expires.sched_exp > | 618 | p->cputime_expires.sched_exp > exp->sched) |
611 | nt->expires.sched) | ||
612 | p->cputime_expires.sched_exp = | 619 | p->cputime_expires.sched_exp = |
613 | nt->expires.sched; | 620 | exp->sched; |
614 | break; | 621 | break; |
615 | } | 622 | } |
616 | } else { | 623 | } else { |
624 | struct signal_struct *const sig = p->signal; | ||
625 | union cpu_time_count *exp = &timer->it.cpu.expires; | ||
626 | |||
617 | /* | 627 | /* |
618 | * For a process timer, set the cached expiration time. | 628 | * For a process timer, set the cached expiration time. |
619 | */ | 629 | */ |
@@ -621,30 +631,23 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now) | |||
621 | default: | 631 | default: |
622 | BUG(); | 632 | BUG(); |
623 | case CPUCLOCK_VIRT: | 633 | case CPUCLOCK_VIRT: |
624 | if (!cputime_eq(p->signal->it_virt_expires, | 634 | if (expires_le(sig->it[CPUCLOCK_VIRT].expires, |
625 | cputime_zero) && | 635 | exp->cpu)) |
626 | cputime_lt(p->signal->it_virt_expires, | ||
627 | timer->it.cpu.expires.cpu)) | ||
628 | break; | 636 | break; |
629 | p->signal->cputime_expires.virt_exp = | 637 | sig->cputime_expires.virt_exp = exp->cpu; |
630 | timer->it.cpu.expires.cpu; | ||
631 | break; | 638 | break; |
632 | case CPUCLOCK_PROF: | 639 | case CPUCLOCK_PROF: |
633 | if (!cputime_eq(p->signal->it_prof_expires, | 640 | if (expires_le(sig->it[CPUCLOCK_PROF].expires, |
634 | cputime_zero) && | 641 | exp->cpu)) |
635 | cputime_lt(p->signal->it_prof_expires, | ||
636 | timer->it.cpu.expires.cpu)) | ||
637 | break; | 642 | break; |
638 | i = p->signal->rlim[RLIMIT_CPU].rlim_cur; | 643 | i = sig->rlim[RLIMIT_CPU].rlim_cur; |
639 | if (i != RLIM_INFINITY && | 644 | if (i != RLIM_INFINITY && |
640 | i <= cputime_to_secs(timer->it.cpu.expires.cpu)) | 645 | i <= cputime_to_secs(exp->cpu)) |
641 | break; | 646 | break; |
642 | p->signal->cputime_expires.prof_exp = | 647 | sig->cputime_expires.prof_exp = exp->cpu; |
643 | timer->it.cpu.expires.cpu; | ||
644 | break; | 648 | break; |
645 | case CPUCLOCK_SCHED: | 649 | case CPUCLOCK_SCHED: |
646 | p->signal->cputime_expires.sched_exp = | 650 | sig->cputime_expires.sched_exp = exp->sched; |
647 | timer->it.cpu.expires.sched; | ||
648 | break; | 651 | break; |
649 | } | 652 | } |
650 | } | 653 | } |
@@ -1071,6 +1074,40 @@ static void stop_process_timers(struct task_struct *tsk) | |||
1071 | spin_unlock_irqrestore(&cputimer->lock, flags); | 1074 | spin_unlock_irqrestore(&cputimer->lock, flags); |
1072 | } | 1075 | } |
1073 | 1076 | ||
1077 | static u32 onecputick; | ||
1078 | |||
1079 | static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it, | ||
1080 | cputime_t *expires, cputime_t cur_time, int signo) | ||
1081 | { | ||
1082 | if (cputime_eq(it->expires, cputime_zero)) | ||
1083 | return; | ||
1084 | |||
1085 | if (cputime_ge(cur_time, it->expires)) { | ||
1086 | if (!cputime_eq(it->incr, cputime_zero)) { | ||
1087 | it->expires = cputime_add(it->expires, it->incr); | ||
1088 | it->error += it->incr_error; | ||
1089 | if (it->error >= onecputick) { | ||
1090 | it->expires = cputime_sub(it->expires, | ||
1091 | cputime_one_jiffy); | ||
1092 | it->error -= onecputick; | ||
1093 | } | ||
1094 | } else { | ||
1095 | it->expires = cputime_zero; | ||
1096 | } | ||
1097 | |||
1098 | trace_itimer_expire(signo == SIGPROF ? | ||
1099 | ITIMER_PROF : ITIMER_VIRTUAL, | ||
1100 | tsk->signal->leader_pid, cur_time); | ||
1101 | __group_send_sig_info(signo, SEND_SIG_PRIV, tsk); | ||
1102 | } | ||
1103 | |||
1104 | if (!cputime_eq(it->expires, cputime_zero) && | ||
1105 | (cputime_eq(*expires, cputime_zero) || | ||
1106 | cputime_lt(it->expires, *expires))) { | ||
1107 | *expires = it->expires; | ||
1108 | } | ||
1109 | } | ||
1110 | |||
1074 | /* | 1111 | /* |
1075 | * Check for any per-thread CPU timers that have fired and move them | 1112 | * Check for any per-thread CPU timers that have fired and move them |
1076 | * off the tsk->*_timers list onto the firing list. Per-thread timers | 1113 | * off the tsk->*_timers list onto the firing list. Per-thread timers |
@@ -1090,10 +1127,10 @@ static void check_process_timers(struct task_struct *tsk, | |||
1090 | * Don't sample the current process CPU clocks if there are no timers. | 1127 | * Don't sample the current process CPU clocks if there are no timers. |
1091 | */ | 1128 | */ |
1092 | if (list_empty(&timers[CPUCLOCK_PROF]) && | 1129 | if (list_empty(&timers[CPUCLOCK_PROF]) && |
1093 | cputime_eq(sig->it_prof_expires, cputime_zero) && | 1130 | cputime_eq(sig->it[CPUCLOCK_PROF].expires, cputime_zero) && |
1094 | sig->rlim[RLIMIT_CPU].rlim_cur == RLIM_INFINITY && | 1131 | sig->rlim[RLIMIT_CPU].rlim_cur == RLIM_INFINITY && |
1095 | list_empty(&timers[CPUCLOCK_VIRT]) && | 1132 | list_empty(&timers[CPUCLOCK_VIRT]) && |
1096 | cputime_eq(sig->it_virt_expires, cputime_zero) && | 1133 | cputime_eq(sig->it[CPUCLOCK_VIRT].expires, cputime_zero) && |
1097 | list_empty(&timers[CPUCLOCK_SCHED])) { | 1134 | list_empty(&timers[CPUCLOCK_SCHED])) { |
1098 | stop_process_timers(tsk); | 1135 | stop_process_timers(tsk); |
1099 | return; | 1136 | return; |
@@ -1153,38 +1190,11 @@ static void check_process_timers(struct task_struct *tsk, | |||
1153 | /* | 1190 | /* |
1154 | * Check for the special case process timers. | 1191 | * Check for the special case process timers. |
1155 | */ | 1192 | */ |
1156 | if (!cputime_eq(sig->it_prof_expires, cputime_zero)) { | 1193 | check_cpu_itimer(tsk, &sig->it[CPUCLOCK_PROF], &prof_expires, ptime, |
1157 | if (cputime_ge(ptime, sig->it_prof_expires)) { | 1194 | SIGPROF); |
1158 | /* ITIMER_PROF fires and reloads. */ | 1195 | check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT], &virt_expires, utime, |
1159 | sig->it_prof_expires = sig->it_prof_incr; | 1196 | SIGVTALRM); |
1160 | if (!cputime_eq(sig->it_prof_expires, cputime_zero)) { | 1197 | |
1161 | sig->it_prof_expires = cputime_add( | ||
1162 | sig->it_prof_expires, ptime); | ||
1163 | } | ||
1164 | __group_send_sig_info(SIGPROF, SEND_SIG_PRIV, tsk); | ||
1165 | } | ||
1166 | if (!cputime_eq(sig->it_prof_expires, cputime_zero) && | ||
1167 | (cputime_eq(prof_expires, cputime_zero) || | ||
1168 | cputime_lt(sig->it_prof_expires, prof_expires))) { | ||
1169 | prof_expires = sig->it_prof_expires; | ||
1170 | } | ||
1171 | } | ||
1172 | if (!cputime_eq(sig->it_virt_expires, cputime_zero)) { | ||
1173 | if (cputime_ge(utime, sig->it_virt_expires)) { | ||
1174 | /* ITIMER_VIRTUAL fires and reloads. */ | ||
1175 | sig->it_virt_expires = sig->it_virt_incr; | ||
1176 | if (!cputime_eq(sig->it_virt_expires, cputime_zero)) { | ||
1177 | sig->it_virt_expires = cputime_add( | ||
1178 | sig->it_virt_expires, utime); | ||
1179 | } | ||
1180 | __group_send_sig_info(SIGVTALRM, SEND_SIG_PRIV, tsk); | ||
1181 | } | ||
1182 | if (!cputime_eq(sig->it_virt_expires, cputime_zero) && | ||
1183 | (cputime_eq(virt_expires, cputime_zero) || | ||
1184 | cputime_lt(sig->it_virt_expires, virt_expires))) { | ||
1185 | virt_expires = sig->it_virt_expires; | ||
1186 | } | ||
1187 | } | ||
1188 | if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) { | 1198 | if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) { |
1189 | unsigned long psecs = cputime_to_secs(ptime); | 1199 | unsigned long psecs = cputime_to_secs(ptime); |
1190 | cputime_t x; | 1200 | cputime_t x; |
@@ -1457,7 +1467,7 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx, | |||
1457 | if (!cputime_eq(*oldval, cputime_zero)) { | 1467 | if (!cputime_eq(*oldval, cputime_zero)) { |
1458 | if (cputime_le(*oldval, now.cpu)) { | 1468 | if (cputime_le(*oldval, now.cpu)) { |
1459 | /* Just about to fire. */ | 1469 | /* Just about to fire. */ |
1460 | *oldval = jiffies_to_cputime(1); | 1470 | *oldval = cputime_one_jiffy; |
1461 | } else { | 1471 | } else { |
1462 | *oldval = cputime_sub(*oldval, now.cpu); | 1472 | *oldval = cputime_sub(*oldval, now.cpu); |
1463 | } | 1473 | } |
@@ -1703,10 +1713,15 @@ static __init int init_posix_cpu_timers(void) | |||
1703 | .nsleep = thread_cpu_nsleep, | 1713 | .nsleep = thread_cpu_nsleep, |
1704 | .nsleep_restart = thread_cpu_nsleep_restart, | 1714 | .nsleep_restart = thread_cpu_nsleep_restart, |
1705 | }; | 1715 | }; |
1716 | struct timespec ts; | ||
1706 | 1717 | ||
1707 | register_posix_clock(CLOCK_PROCESS_CPUTIME_ID, &process); | 1718 | register_posix_clock(CLOCK_PROCESS_CPUTIME_ID, &process); |
1708 | register_posix_clock(CLOCK_THREAD_CPUTIME_ID, &thread); | 1719 | register_posix_clock(CLOCK_THREAD_CPUTIME_ID, &thread); |
1709 | 1720 | ||
1721 | cputime_to_timespec(cputime_one_jiffy, &ts); | ||
1722 | onecputick = ts.tv_nsec; | ||
1723 | WARN_ON(ts.tv_sec != 0); | ||
1724 | |||
1710 | return 0; | 1725 | return 0; |
1711 | } | 1726 | } |
1712 | __initcall(init_posix_cpu_timers); | 1727 | __initcall(init_posix_cpu_timers); |
diff --git a/kernel/power/console.c b/kernel/power/console.c index a3961b205de7..5187136fe1de 100644 --- a/kernel/power/console.c +++ b/kernel/power/console.c | |||
@@ -14,56 +14,13 @@ | |||
14 | #define SUSPEND_CONSOLE (MAX_NR_CONSOLES-1) | 14 | #define SUSPEND_CONSOLE (MAX_NR_CONSOLES-1) |
15 | 15 | ||
16 | static int orig_fgconsole, orig_kmsg; | 16 | static int orig_fgconsole, orig_kmsg; |
17 | static int disable_vt_switch; | ||
18 | |||
19 | /* | ||
20 | * Normally during a suspend, we allocate a new console and switch to it. | ||
21 | * When we resume, we switch back to the original console. This switch | ||
22 | * can be slow, so on systems where the framebuffer can handle restoration | ||
23 | * of video registers anyways, there's little point in doing the console | ||
24 | * switch. This function allows you to disable it by passing it '0'. | ||
25 | */ | ||
26 | void pm_set_vt_switch(int do_switch) | ||
27 | { | ||
28 | acquire_console_sem(); | ||
29 | disable_vt_switch = !do_switch; | ||
30 | release_console_sem(); | ||
31 | } | ||
32 | EXPORT_SYMBOL(pm_set_vt_switch); | ||
33 | 17 | ||
34 | int pm_prepare_console(void) | 18 | int pm_prepare_console(void) |
35 | { | 19 | { |
36 | acquire_console_sem(); | 20 | orig_fgconsole = vt_move_to_console(SUSPEND_CONSOLE, 1); |
37 | 21 | if (orig_fgconsole < 0) | |
38 | if (disable_vt_switch) { | ||
39 | release_console_sem(); | ||
40 | return 0; | ||
41 | } | ||
42 | |||
43 | orig_fgconsole = fg_console; | ||
44 | |||
45 | if (vc_allocate(SUSPEND_CONSOLE)) { | ||
46 | /* we can't have a free VC for now. Too bad, | ||
47 | * we don't want to mess the screen for now. */ | ||
48 | release_console_sem(); | ||
49 | return 1; | 22 | return 1; |
50 | } | ||
51 | 23 | ||
52 | if (set_console(SUSPEND_CONSOLE)) { | ||
53 | /* | ||
54 | * We're unable to switch to the SUSPEND_CONSOLE. | ||
55 | * Let the calling function know so it can decide | ||
56 | * what to do. | ||
57 | */ | ||
58 | release_console_sem(); | ||
59 | return 1; | ||
60 | } | ||
61 | release_console_sem(); | ||
62 | |||
63 | if (vt_waitactive(SUSPEND_CONSOLE)) { | ||
64 | pr_debug("Suspend: Can't switch VCs."); | ||
65 | return 1; | ||
66 | } | ||
67 | orig_kmsg = kmsg_redirect; | 24 | orig_kmsg = kmsg_redirect; |
68 | kmsg_redirect = SUSPEND_CONSOLE; | 25 | kmsg_redirect = SUSPEND_CONSOLE; |
69 | return 0; | 26 | return 0; |
@@ -71,19 +28,9 @@ int pm_prepare_console(void) | |||
71 | 28 | ||
72 | void pm_restore_console(void) | 29 | void pm_restore_console(void) |
73 | { | 30 | { |
74 | acquire_console_sem(); | 31 | if (orig_fgconsole >= 0) { |
75 | if (disable_vt_switch) { | 32 | vt_move_to_console(orig_fgconsole, 0); |
76 | release_console_sem(); | 33 | kmsg_redirect = orig_kmsg; |
77 | return; | ||
78 | } | ||
79 | set_console(orig_fgconsole); | ||
80 | release_console_sem(); | ||
81 | |||
82 | if (vt_waitactive(orig_fgconsole)) { | ||
83 | pr_debug("Resume: Can't switch VCs."); | ||
84 | return; | ||
85 | } | 34 | } |
86 | |||
87 | kmsg_redirect = orig_kmsg; | ||
88 | } | 35 | } |
89 | #endif | 36 | #endif |
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index 04b3a83d686f..04a9e90d248f 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c | |||
@@ -693,21 +693,22 @@ static int software_resume(void) | |||
693 | /* The snapshot device should not be opened while we're running */ | 693 | /* The snapshot device should not be opened while we're running */ |
694 | if (!atomic_add_unless(&snapshot_device_available, -1, 0)) { | 694 | if (!atomic_add_unless(&snapshot_device_available, -1, 0)) { |
695 | error = -EBUSY; | 695 | error = -EBUSY; |
696 | swsusp_close(FMODE_READ); | ||
696 | goto Unlock; | 697 | goto Unlock; |
697 | } | 698 | } |
698 | 699 | ||
699 | pm_prepare_console(); | 700 | pm_prepare_console(); |
700 | error = pm_notifier_call_chain(PM_RESTORE_PREPARE); | 701 | error = pm_notifier_call_chain(PM_RESTORE_PREPARE); |
701 | if (error) | 702 | if (error) |
702 | goto Finish; | 703 | goto close_finish; |
703 | 704 | ||
704 | error = usermodehelper_disable(); | 705 | error = usermodehelper_disable(); |
705 | if (error) | 706 | if (error) |
706 | goto Finish; | 707 | goto close_finish; |
707 | 708 | ||
708 | error = create_basic_memory_bitmaps(); | 709 | error = create_basic_memory_bitmaps(); |
709 | if (error) | 710 | if (error) |
710 | goto Finish; | 711 | goto close_finish; |
711 | 712 | ||
712 | pr_debug("PM: Preparing processes for restore.\n"); | 713 | pr_debug("PM: Preparing processes for restore.\n"); |
713 | error = prepare_processes(); | 714 | error = prepare_processes(); |
@@ -719,6 +720,7 @@ static int software_resume(void) | |||
719 | pr_debug("PM: Reading hibernation image.\n"); | 720 | pr_debug("PM: Reading hibernation image.\n"); |
720 | 721 | ||
721 | error = swsusp_read(&flags); | 722 | error = swsusp_read(&flags); |
723 | swsusp_close(FMODE_READ); | ||
722 | if (!error) | 724 | if (!error) |
723 | hibernation_restore(flags & SF_PLATFORM_MODE); | 725 | hibernation_restore(flags & SF_PLATFORM_MODE); |
724 | 726 | ||
@@ -737,6 +739,9 @@ static int software_resume(void) | |||
737 | mutex_unlock(&pm_mutex); | 739 | mutex_unlock(&pm_mutex); |
738 | pr_debug("PM: Resume from disk failed.\n"); | 740 | pr_debug("PM: Resume from disk failed.\n"); |
739 | return error; | 741 | return error; |
742 | close_finish: | ||
743 | swsusp_close(FMODE_READ); | ||
744 | goto Finish; | ||
740 | } | 745 | } |
741 | 746 | ||
742 | late_initcall(software_resume); | 747 | late_initcall(software_resume); |
diff --git a/kernel/power/process.c b/kernel/power/process.c index da2072d73811..cc2e55373b68 100644 --- a/kernel/power/process.c +++ b/kernel/power/process.c | |||
@@ -9,6 +9,7 @@ | |||
9 | #undef DEBUG | 9 | #undef DEBUG |
10 | 10 | ||
11 | #include <linux/interrupt.h> | 11 | #include <linux/interrupt.h> |
12 | #include <linux/oom.h> | ||
12 | #include <linux/suspend.h> | 13 | #include <linux/suspend.h> |
13 | #include <linux/module.h> | 14 | #include <linux/module.h> |
14 | #include <linux/syscalls.h> | 15 | #include <linux/syscalls.h> |
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 97955b0e44f4..36cb168e4330 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c | |||
@@ -619,7 +619,7 @@ __register_nosave_region(unsigned long start_pfn, unsigned long end_pfn, | |||
619 | BUG_ON(!region); | 619 | BUG_ON(!region); |
620 | } else | 620 | } else |
621 | /* This allocation cannot fail */ | 621 | /* This allocation cannot fail */ |
622 | region = alloc_bootmem_low(sizeof(struct nosave_region)); | 622 | region = alloc_bootmem(sizeof(struct nosave_region)); |
623 | region->start_pfn = start_pfn; | 623 | region->start_pfn = start_pfn; |
624 | region->end_pfn = end_pfn; | 624 | region->end_pfn = end_pfn; |
625 | list_add_tail(®ion->list, &nosave_regions); | 625 | list_add_tail(®ion->list, &nosave_regions); |
diff --git a/kernel/power/suspend_test.c b/kernel/power/suspend_test.c index 17d8bb1acf9c..25596e450ac7 100644 --- a/kernel/power/suspend_test.c +++ b/kernel/power/suspend_test.c | |||
@@ -19,7 +19,7 @@ | |||
19 | * The time it takes is system-specific though, so when we test this | 19 | * The time it takes is system-specific though, so when we test this |
20 | * during system bootup we allow a LOT of time. | 20 | * during system bootup we allow a LOT of time. |
21 | */ | 21 | */ |
22 | #define TEST_SUSPEND_SECONDS 5 | 22 | #define TEST_SUSPEND_SECONDS 10 |
23 | 23 | ||
24 | static unsigned long suspend_test_start_time; | 24 | static unsigned long suspend_test_start_time; |
25 | 25 | ||
@@ -49,7 +49,8 @@ void suspend_test_finish(const char *label) | |||
49 | * has some performance issues. The stack dump of a WARN_ON | 49 | * has some performance issues. The stack dump of a WARN_ON |
50 | * is more likely to get the right attention than a printk... | 50 | * is more likely to get the right attention than a printk... |
51 | */ | 51 | */ |
52 | WARN(msec > (TEST_SUSPEND_SECONDS * 1000), "Component: %s\n", label); | 52 | WARN(msec > (TEST_SUSPEND_SECONDS * 1000), |
53 | "Component: %s, time: %u\n", label, msec); | ||
53 | } | 54 | } |
54 | 55 | ||
55 | /* | 56 | /* |
diff --git a/kernel/power/swap.c b/kernel/power/swap.c index 8ba052c86d48..890f6b11b1d3 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c | |||
@@ -13,7 +13,6 @@ | |||
13 | 13 | ||
14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
15 | #include <linux/file.h> | 15 | #include <linux/file.h> |
16 | #include <linux/utsname.h> | ||
17 | #include <linux/delay.h> | 16 | #include <linux/delay.h> |
18 | #include <linux/bitops.h> | 17 | #include <linux/bitops.h> |
19 | #include <linux/genhd.h> | 18 | #include <linux/genhd.h> |
@@ -315,7 +314,6 @@ static int save_image(struct swap_map_handle *handle, | |||
315 | { | 314 | { |
316 | unsigned int m; | 315 | unsigned int m; |
317 | int ret; | 316 | int ret; |
318 | int error = 0; | ||
319 | int nr_pages; | 317 | int nr_pages; |
320 | int err2; | 318 | int err2; |
321 | struct bio *bio; | 319 | struct bio *bio; |
@@ -330,26 +328,27 @@ static int save_image(struct swap_map_handle *handle, | |||
330 | nr_pages = 0; | 328 | nr_pages = 0; |
331 | bio = NULL; | 329 | bio = NULL; |
332 | do_gettimeofday(&start); | 330 | do_gettimeofday(&start); |
333 | do { | 331 | while (1) { |
334 | ret = snapshot_read_next(snapshot, PAGE_SIZE); | 332 | ret = snapshot_read_next(snapshot, PAGE_SIZE); |
335 | if (ret > 0) { | 333 | if (ret <= 0) |
336 | error = swap_write_page(handle, data_of(*snapshot), | 334 | break; |
337 | &bio); | 335 | ret = swap_write_page(handle, data_of(*snapshot), &bio); |
338 | if (error) | 336 | if (ret) |
339 | break; | 337 | break; |
340 | if (!(nr_pages % m)) | 338 | if (!(nr_pages % m)) |
341 | printk("\b\b\b\b%3d%%", nr_pages / m); | 339 | printk("\b\b\b\b%3d%%", nr_pages / m); |
342 | nr_pages++; | 340 | nr_pages++; |
343 | } | 341 | } |
344 | } while (ret > 0); | ||
345 | err2 = wait_on_bio_chain(&bio); | 342 | err2 = wait_on_bio_chain(&bio); |
346 | do_gettimeofday(&stop); | 343 | do_gettimeofday(&stop); |
347 | if (!error) | 344 | if (!ret) |
348 | error = err2; | 345 | ret = err2; |
349 | if (!error) | 346 | if (!ret) |
350 | printk("\b\b\b\bdone\n"); | 347 | printk("\b\b\b\bdone\n"); |
348 | else | ||
349 | printk("\n"); | ||
351 | swsusp_show_speed(&start, &stop, nr_to_write, "Wrote"); | 350 | swsusp_show_speed(&start, &stop, nr_to_write, "Wrote"); |
352 | return error; | 351 | return ret; |
353 | } | 352 | } |
354 | 353 | ||
355 | /** | 354 | /** |
@@ -537,7 +536,8 @@ static int load_image(struct swap_map_handle *handle, | |||
537 | snapshot_write_finalize(snapshot); | 536 | snapshot_write_finalize(snapshot); |
538 | if (!snapshot_image_loaded(snapshot)) | 537 | if (!snapshot_image_loaded(snapshot)) |
539 | error = -ENODATA; | 538 | error = -ENODATA; |
540 | } | 539 | } else |
540 | printk("\n"); | ||
541 | swsusp_show_speed(&start, &stop, nr_to_read, "Read"); | 541 | swsusp_show_speed(&start, &stop, nr_to_read, "Read"); |
542 | return error; | 542 | return error; |
543 | } | 543 | } |
@@ -573,8 +573,6 @@ int swsusp_read(unsigned int *flags_p) | |||
573 | error = load_image(&handle, &snapshot, header->pages - 1); | 573 | error = load_image(&handle, &snapshot, header->pages - 1); |
574 | release_swap_reader(&handle); | 574 | release_swap_reader(&handle); |
575 | 575 | ||
576 | blkdev_put(resume_bdev, FMODE_READ); | ||
577 | |||
578 | if (!error) | 576 | if (!error) |
579 | pr_debug("PM: Image successfully loaded\n"); | 577 | pr_debug("PM: Image successfully loaded\n"); |
580 | else | 578 | else |
@@ -597,7 +595,7 @@ int swsusp_check(void) | |||
597 | error = bio_read_page(swsusp_resume_block, | 595 | error = bio_read_page(swsusp_resume_block, |
598 | swsusp_header, NULL); | 596 | swsusp_header, NULL); |
599 | if (error) | 597 | if (error) |
600 | return error; | 598 | goto put; |
601 | 599 | ||
602 | if (!memcmp(SWSUSP_SIG, swsusp_header->sig, 10)) { | 600 | if (!memcmp(SWSUSP_SIG, swsusp_header->sig, 10)) { |
603 | memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10); | 601 | memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10); |
@@ -605,8 +603,10 @@ int swsusp_check(void) | |||
605 | error = bio_write_page(swsusp_resume_block, | 603 | error = bio_write_page(swsusp_resume_block, |
606 | swsusp_header, NULL); | 604 | swsusp_header, NULL); |
607 | } else { | 605 | } else { |
608 | return -EINVAL; | 606 | error = -EINVAL; |
609 | } | 607 | } |
608 | |||
609 | put: | ||
610 | if (error) | 610 | if (error) |
611 | blkdev_put(resume_bdev, FMODE_READ); | 611 | blkdev_put(resume_bdev, FMODE_READ); |
612 | else | 612 | else |
diff --git a/kernel/printk.c b/kernel/printk.c index 602033acd6c7..f38b07f78a4e 100644 --- a/kernel/printk.c +++ b/kernel/printk.c | |||
@@ -206,12 +206,11 @@ __setup("log_buf_len=", log_buf_len_setup); | |||
206 | #ifdef CONFIG_BOOT_PRINTK_DELAY | 206 | #ifdef CONFIG_BOOT_PRINTK_DELAY |
207 | 207 | ||
208 | static unsigned int boot_delay; /* msecs delay after each printk during bootup */ | 208 | static unsigned int boot_delay; /* msecs delay after each printk during bootup */ |
209 | static unsigned long long printk_delay_msec; /* per msec, based on boot_delay */ | 209 | static unsigned long long loops_per_msec; /* based on boot_delay */ |
210 | 210 | ||
211 | static int __init boot_delay_setup(char *str) | 211 | static int __init boot_delay_setup(char *str) |
212 | { | 212 | { |
213 | unsigned long lpj; | 213 | unsigned long lpj; |
214 | unsigned long long loops_per_msec; | ||
215 | 214 | ||
216 | lpj = preset_lpj ? preset_lpj : 1000000; /* some guess */ | 215 | lpj = preset_lpj ? preset_lpj : 1000000; /* some guess */ |
217 | loops_per_msec = (unsigned long long)lpj / 1000 * HZ; | 216 | loops_per_msec = (unsigned long long)lpj / 1000 * HZ; |
@@ -220,10 +219,9 @@ static int __init boot_delay_setup(char *str) | |||
220 | if (boot_delay > 10 * 1000) | 219 | if (boot_delay > 10 * 1000) |
221 | boot_delay = 0; | 220 | boot_delay = 0; |
222 | 221 | ||
223 | printk_delay_msec = loops_per_msec; | 222 | pr_debug("boot_delay: %u, preset_lpj: %ld, lpj: %lu, " |
224 | printk(KERN_DEBUG "boot_delay: %u, preset_lpj: %ld, lpj: %lu, " | 223 | "HZ: %d, loops_per_msec: %llu\n", |
225 | "HZ: %d, printk_delay_msec: %llu\n", | 224 | boot_delay, preset_lpj, lpj, HZ, loops_per_msec); |
226 | boot_delay, preset_lpj, lpj, HZ, printk_delay_msec); | ||
227 | return 1; | 225 | return 1; |
228 | } | 226 | } |
229 | __setup("boot_delay=", boot_delay_setup); | 227 | __setup("boot_delay=", boot_delay_setup); |
@@ -236,7 +234,7 @@ static void boot_delay_msec(void) | |||
236 | if (boot_delay == 0 || system_state != SYSTEM_BOOTING) | 234 | if (boot_delay == 0 || system_state != SYSTEM_BOOTING) |
237 | return; | 235 | return; |
238 | 236 | ||
239 | k = (unsigned long long)printk_delay_msec * boot_delay; | 237 | k = (unsigned long long)loops_per_msec * boot_delay; |
240 | 238 | ||
241 | timeout = jiffies + msecs_to_jiffies(boot_delay); | 239 | timeout = jiffies + msecs_to_jiffies(boot_delay); |
242 | while (k) { | 240 | while (k) { |
@@ -655,6 +653,20 @@ static int recursion_bug; | |||
655 | static int new_text_line = 1; | 653 | static int new_text_line = 1; |
656 | static char printk_buf[1024]; | 654 | static char printk_buf[1024]; |
657 | 655 | ||
656 | int printk_delay_msec __read_mostly; | ||
657 | |||
658 | static inline void printk_delay(void) | ||
659 | { | ||
660 | if (unlikely(printk_delay_msec)) { | ||
661 | int m = printk_delay_msec; | ||
662 | |||
663 | while (m--) { | ||
664 | mdelay(1); | ||
665 | touch_nmi_watchdog(); | ||
666 | } | ||
667 | } | ||
668 | } | ||
669 | |||
658 | asmlinkage int vprintk(const char *fmt, va_list args) | 670 | asmlinkage int vprintk(const char *fmt, va_list args) |
659 | { | 671 | { |
660 | int printed_len = 0; | 672 | int printed_len = 0; |
@@ -664,6 +676,7 @@ asmlinkage int vprintk(const char *fmt, va_list args) | |||
664 | char *p; | 676 | char *p; |
665 | 677 | ||
666 | boot_delay_msec(); | 678 | boot_delay_msec(); |
679 | printk_delay(); | ||
667 | 680 | ||
668 | preempt_disable(); | 681 | preempt_disable(); |
669 | /* This stops the holder of console_sem just where we want him */ | 682 | /* This stops the holder of console_sem just where we want him */ |
diff --git a/kernel/profile.c b/kernel/profile.c index 419250ebec4d..a55d3a367ae8 100644 --- a/kernel/profile.c +++ b/kernel/profile.c | |||
@@ -442,48 +442,51 @@ void profile_tick(int type) | |||
442 | 442 | ||
443 | #ifdef CONFIG_PROC_FS | 443 | #ifdef CONFIG_PROC_FS |
444 | #include <linux/proc_fs.h> | 444 | #include <linux/proc_fs.h> |
445 | #include <linux/seq_file.h> | ||
445 | #include <asm/uaccess.h> | 446 | #include <asm/uaccess.h> |
446 | 447 | ||
447 | static int prof_cpu_mask_read_proc(char *page, char **start, off_t off, | 448 | static int prof_cpu_mask_proc_show(struct seq_file *m, void *v) |
448 | int count, int *eof, void *data) | ||
449 | { | 449 | { |
450 | int len = cpumask_scnprintf(page, count, data); | 450 | seq_cpumask(m, prof_cpu_mask); |
451 | if (count - len < 2) | 451 | seq_putc(m, '\n'); |
452 | return -EINVAL; | 452 | return 0; |
453 | len += sprintf(page + len, "\n"); | ||
454 | return len; | ||
455 | } | 453 | } |
456 | 454 | ||
457 | static int prof_cpu_mask_write_proc(struct file *file, | 455 | static int prof_cpu_mask_proc_open(struct inode *inode, struct file *file) |
458 | const char __user *buffer, unsigned long count, void *data) | 456 | { |
457 | return single_open(file, prof_cpu_mask_proc_show, NULL); | ||
458 | } | ||
459 | |||
460 | static ssize_t prof_cpu_mask_proc_write(struct file *file, | ||
461 | const char __user *buffer, size_t count, loff_t *pos) | ||
459 | { | 462 | { |
460 | struct cpumask *mask = data; | ||
461 | unsigned long full_count = count, err; | ||
462 | cpumask_var_t new_value; | 463 | cpumask_var_t new_value; |
464 | int err; | ||
463 | 465 | ||
464 | if (!alloc_cpumask_var(&new_value, GFP_KERNEL)) | 466 | if (!alloc_cpumask_var(&new_value, GFP_KERNEL)) |
465 | return -ENOMEM; | 467 | return -ENOMEM; |
466 | 468 | ||
467 | err = cpumask_parse_user(buffer, count, new_value); | 469 | err = cpumask_parse_user(buffer, count, new_value); |
468 | if (!err) { | 470 | if (!err) { |
469 | cpumask_copy(mask, new_value); | 471 | cpumask_copy(prof_cpu_mask, new_value); |
470 | err = full_count; | 472 | err = count; |
471 | } | 473 | } |
472 | free_cpumask_var(new_value); | 474 | free_cpumask_var(new_value); |
473 | return err; | 475 | return err; |
474 | } | 476 | } |
475 | 477 | ||
478 | static const struct file_operations prof_cpu_mask_proc_fops = { | ||
479 | .open = prof_cpu_mask_proc_open, | ||
480 | .read = seq_read, | ||
481 | .llseek = seq_lseek, | ||
482 | .release = single_release, | ||
483 | .write = prof_cpu_mask_proc_write, | ||
484 | }; | ||
485 | |||
476 | void create_prof_cpu_mask(struct proc_dir_entry *root_irq_dir) | 486 | void create_prof_cpu_mask(struct proc_dir_entry *root_irq_dir) |
477 | { | 487 | { |
478 | struct proc_dir_entry *entry; | ||
479 | |||
480 | /* create /proc/irq/prof_cpu_mask */ | 488 | /* create /proc/irq/prof_cpu_mask */ |
481 | entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir); | 489 | proc_create("prof_cpu_mask", 0600, root_irq_dir, &prof_cpu_mask_proc_fops); |
482 | if (!entry) | ||
483 | return; | ||
484 | entry->data = prof_cpu_mask; | ||
485 | entry->read_proc = prof_cpu_mask_read_proc; | ||
486 | entry->write_proc = prof_cpu_mask_write_proc; | ||
487 | } | 490 | } |
488 | 491 | ||
489 | /* | 492 | /* |
diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 307c285af59e..23bd09cd042e 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c | |||
@@ -266,9 +266,10 @@ static int ignoring_children(struct sighand_struct *sigh) | |||
266 | * or self-reaping. Do notification now if it would have happened earlier. | 266 | * or self-reaping. Do notification now if it would have happened earlier. |
267 | * If it should reap itself, return true. | 267 | * If it should reap itself, return true. |
268 | * | 268 | * |
269 | * If it's our own child, there is no notification to do. | 269 | * If it's our own child, there is no notification to do. But if our normal |
270 | * But if our normal children self-reap, then this child | 270 | * children self-reap, then this child was prevented by ptrace and we must |
271 | * was prevented by ptrace and we must reap it now. | 271 | * reap it now, in that case we must also wake up sub-threads sleeping in |
272 | * do_wait(). | ||
272 | */ | 273 | */ |
273 | static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p) | 274 | static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p) |
274 | { | 275 | { |
@@ -278,8 +279,10 @@ static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p) | |||
278 | if (!task_detached(p) && thread_group_empty(p)) { | 279 | if (!task_detached(p) && thread_group_empty(p)) { |
279 | if (!same_thread_group(p->real_parent, tracer)) | 280 | if (!same_thread_group(p->real_parent, tracer)) |
280 | do_notify_parent(p, p->exit_signal); | 281 | do_notify_parent(p, p->exit_signal); |
281 | else if (ignoring_children(tracer->sighand)) | 282 | else if (ignoring_children(tracer->sighand)) { |
283 | __wake_up_parent(p, tracer); | ||
282 | p->exit_signal = -1; | 284 | p->exit_signal = -1; |
285 | } | ||
283 | } | 286 | } |
284 | if (task_detached(p)) { | 287 | if (task_detached(p)) { |
285 | /* Mark it as in the process of being reaped. */ | 288 | /* Mark it as in the process of being reaped. */ |
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index bd5d5c8e5140..400183346ad2 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c | |||
@@ -19,7 +19,7 @@ | |||
19 | * | 19 | * |
20 | * Authors: Dipankar Sarma <dipankar@in.ibm.com> | 20 | * Authors: Dipankar Sarma <dipankar@in.ibm.com> |
21 | * Manfred Spraul <manfred@colorfullife.com> | 21 | * Manfred Spraul <manfred@colorfullife.com> |
22 | * | 22 | * |
23 | * Based on the original work by Paul McKenney <paulmck@us.ibm.com> | 23 | * Based on the original work by Paul McKenney <paulmck@us.ibm.com> |
24 | * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. | 24 | * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. |
25 | * Papers: | 25 | * Papers: |
@@ -27,7 +27,7 @@ | |||
27 | * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) | 27 | * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) |
28 | * | 28 | * |
29 | * For detailed explanation of Read-Copy Update mechanism see - | 29 | * For detailed explanation of Read-Copy Update mechanism see - |
30 | * http://lse.sourceforge.net/locking/rcupdate.html | 30 | * http://lse.sourceforge.net/locking/rcupdate.html |
31 | * | 31 | * |
32 | */ | 32 | */ |
33 | #include <linux/types.h> | 33 | #include <linux/types.h> |
@@ -46,22 +46,15 @@ | |||
46 | #include <linux/module.h> | 46 | #include <linux/module.h> |
47 | #include <linux/kernel_stat.h> | 47 | #include <linux/kernel_stat.h> |
48 | 48 | ||
49 | enum rcu_barrier { | 49 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
50 | RCU_BARRIER_STD, | 50 | static struct lock_class_key rcu_lock_key; |
51 | RCU_BARRIER_BH, | 51 | struct lockdep_map rcu_lock_map = |
52 | RCU_BARRIER_SCHED, | 52 | STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key); |
53 | }; | 53 | EXPORT_SYMBOL_GPL(rcu_lock_map); |
54 | #endif | ||
54 | 55 | ||
55 | static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL}; | ||
56 | static atomic_t rcu_barrier_cpu_count; | ||
57 | static DEFINE_MUTEX(rcu_barrier_mutex); | ||
58 | static struct completion rcu_barrier_completion; | ||
59 | int rcu_scheduler_active __read_mostly; | 56 | int rcu_scheduler_active __read_mostly; |
60 | 57 | ||
61 | static atomic_t rcu_migrate_type_count = ATOMIC_INIT(0); | ||
62 | static struct rcu_head rcu_migrate_head[3]; | ||
63 | static DECLARE_WAIT_QUEUE_HEAD(rcu_migrate_wq); | ||
64 | |||
65 | /* | 58 | /* |
66 | * Awaken the corresponding synchronize_rcu() instance now that a | 59 | * Awaken the corresponding synchronize_rcu() instance now that a |
67 | * grace period has elapsed. | 60 | * grace period has elapsed. |
@@ -74,6 +67,8 @@ void wakeme_after_rcu(struct rcu_head *head) | |||
74 | complete(&rcu->completion); | 67 | complete(&rcu->completion); |
75 | } | 68 | } |
76 | 69 | ||
70 | #ifdef CONFIG_TREE_PREEMPT_RCU | ||
71 | |||
77 | /** | 72 | /** |
78 | * synchronize_rcu - wait until a grace period has elapsed. | 73 | * synchronize_rcu - wait until a grace period has elapsed. |
79 | * | 74 | * |
@@ -87,7 +82,7 @@ void synchronize_rcu(void) | |||
87 | { | 82 | { |
88 | struct rcu_synchronize rcu; | 83 | struct rcu_synchronize rcu; |
89 | 84 | ||
90 | if (rcu_blocking_is_gp()) | 85 | if (!rcu_scheduler_active) |
91 | return; | 86 | return; |
92 | 87 | ||
93 | init_completion(&rcu.completion); | 88 | init_completion(&rcu.completion); |
@@ -98,6 +93,46 @@ void synchronize_rcu(void) | |||
98 | } | 93 | } |
99 | EXPORT_SYMBOL_GPL(synchronize_rcu); | 94 | EXPORT_SYMBOL_GPL(synchronize_rcu); |
100 | 95 | ||
96 | #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ | ||
97 | |||
98 | /** | ||
99 | * synchronize_sched - wait until an rcu-sched grace period has elapsed. | ||
100 | * | ||
101 | * Control will return to the caller some time after a full rcu-sched | ||
102 | * grace period has elapsed, in other words after all currently executing | ||
103 | * rcu-sched read-side critical sections have completed. These read-side | ||
104 | * critical sections are delimited by rcu_read_lock_sched() and | ||
105 | * rcu_read_unlock_sched(), and may be nested. Note that preempt_disable(), | ||
106 | * local_irq_disable(), and so on may be used in place of | ||
107 | * rcu_read_lock_sched(). | ||
108 | * | ||
109 | * This means that all preempt_disable code sequences, including NMI and | ||
110 | * hardware-interrupt handlers, in progress on entry will have completed | ||
111 | * before this primitive returns. However, this does not guarantee that | ||
112 | * softirq handlers will have completed, since in some kernels, these | ||
113 | * handlers can run in process context, and can block. | ||
114 | * | ||
115 | * This primitive provides the guarantees made by the (now removed) | ||
116 | * synchronize_kernel() API. In contrast, synchronize_rcu() only | ||
117 | * guarantees that rcu_read_lock() sections will have completed. | ||
118 | * In "classic RCU", these two guarantees happen to be one and | ||
119 | * the same, but can differ in realtime RCU implementations. | ||
120 | */ | ||
121 | void synchronize_sched(void) | ||
122 | { | ||
123 | struct rcu_synchronize rcu; | ||
124 | |||
125 | if (rcu_blocking_is_gp()) | ||
126 | return; | ||
127 | |||
128 | init_completion(&rcu.completion); | ||
129 | /* Will wake me after RCU finished. */ | ||
130 | call_rcu_sched(&rcu.head, wakeme_after_rcu); | ||
131 | /* Wait for it. */ | ||
132 | wait_for_completion(&rcu.completion); | ||
133 | } | ||
134 | EXPORT_SYMBOL_GPL(synchronize_sched); | ||
135 | |||
101 | /** | 136 | /** |
102 | * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed. | 137 | * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed. |
103 | * | 138 | * |
@@ -122,129 +157,10 @@ void synchronize_rcu_bh(void) | |||
122 | } | 157 | } |
123 | EXPORT_SYMBOL_GPL(synchronize_rcu_bh); | 158 | EXPORT_SYMBOL_GPL(synchronize_rcu_bh); |
124 | 159 | ||
125 | static void rcu_barrier_callback(struct rcu_head *notused) | ||
126 | { | ||
127 | if (atomic_dec_and_test(&rcu_barrier_cpu_count)) | ||
128 | complete(&rcu_barrier_completion); | ||
129 | } | ||
130 | |||
131 | /* | ||
132 | * Called with preemption disabled, and from cross-cpu IRQ context. | ||
133 | */ | ||
134 | static void rcu_barrier_func(void *type) | ||
135 | { | ||
136 | int cpu = smp_processor_id(); | ||
137 | struct rcu_head *head = &per_cpu(rcu_barrier_head, cpu); | ||
138 | |||
139 | atomic_inc(&rcu_barrier_cpu_count); | ||
140 | switch ((enum rcu_barrier)type) { | ||
141 | case RCU_BARRIER_STD: | ||
142 | call_rcu(head, rcu_barrier_callback); | ||
143 | break; | ||
144 | case RCU_BARRIER_BH: | ||
145 | call_rcu_bh(head, rcu_barrier_callback); | ||
146 | break; | ||
147 | case RCU_BARRIER_SCHED: | ||
148 | call_rcu_sched(head, rcu_barrier_callback); | ||
149 | break; | ||
150 | } | ||
151 | } | ||
152 | |||
153 | static inline void wait_migrated_callbacks(void) | ||
154 | { | ||
155 | wait_event(rcu_migrate_wq, !atomic_read(&rcu_migrate_type_count)); | ||
156 | smp_mb(); /* In case we didn't sleep. */ | ||
157 | } | ||
158 | |||
159 | /* | ||
160 | * Orchestrate the specified type of RCU barrier, waiting for all | ||
161 | * RCU callbacks of the specified type to complete. | ||
162 | */ | ||
163 | static void _rcu_barrier(enum rcu_barrier type) | ||
164 | { | ||
165 | BUG_ON(in_interrupt()); | ||
166 | /* Take cpucontrol mutex to protect against CPU hotplug */ | ||
167 | mutex_lock(&rcu_barrier_mutex); | ||
168 | init_completion(&rcu_barrier_completion); | ||
169 | /* | ||
170 | * Initialize rcu_barrier_cpu_count to 1, then invoke | ||
171 | * rcu_barrier_func() on each CPU, so that each CPU also has | ||
172 | * incremented rcu_barrier_cpu_count. Only then is it safe to | ||
173 | * decrement rcu_barrier_cpu_count -- otherwise the first CPU | ||
174 | * might complete its grace period before all of the other CPUs | ||
175 | * did their increment, causing this function to return too | ||
176 | * early. | ||
177 | */ | ||
178 | atomic_set(&rcu_barrier_cpu_count, 1); | ||
179 | on_each_cpu(rcu_barrier_func, (void *)type, 1); | ||
180 | if (atomic_dec_and_test(&rcu_barrier_cpu_count)) | ||
181 | complete(&rcu_barrier_completion); | ||
182 | wait_for_completion(&rcu_barrier_completion); | ||
183 | mutex_unlock(&rcu_barrier_mutex); | ||
184 | wait_migrated_callbacks(); | ||
185 | } | ||
186 | |||
187 | /** | ||
188 | * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete. | ||
189 | */ | ||
190 | void rcu_barrier(void) | ||
191 | { | ||
192 | _rcu_barrier(RCU_BARRIER_STD); | ||
193 | } | ||
194 | EXPORT_SYMBOL_GPL(rcu_barrier); | ||
195 | |||
196 | /** | ||
197 | * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete. | ||
198 | */ | ||
199 | void rcu_barrier_bh(void) | ||
200 | { | ||
201 | _rcu_barrier(RCU_BARRIER_BH); | ||
202 | } | ||
203 | EXPORT_SYMBOL_GPL(rcu_barrier_bh); | ||
204 | |||
205 | /** | ||
206 | * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks. | ||
207 | */ | ||
208 | void rcu_barrier_sched(void) | ||
209 | { | ||
210 | _rcu_barrier(RCU_BARRIER_SCHED); | ||
211 | } | ||
212 | EXPORT_SYMBOL_GPL(rcu_barrier_sched); | ||
213 | |||
214 | static void rcu_migrate_callback(struct rcu_head *notused) | ||
215 | { | ||
216 | if (atomic_dec_and_test(&rcu_migrate_type_count)) | ||
217 | wake_up(&rcu_migrate_wq); | ||
218 | } | ||
219 | |||
220 | extern int rcu_cpu_notify(struct notifier_block *self, | ||
221 | unsigned long action, void *hcpu); | ||
222 | |||
223 | static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self, | 160 | static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self, |
224 | unsigned long action, void *hcpu) | 161 | unsigned long action, void *hcpu) |
225 | { | 162 | { |
226 | rcu_cpu_notify(self, action, hcpu); | 163 | return rcu_cpu_notify(self, action, hcpu); |
227 | if (action == CPU_DYING) { | ||
228 | /* | ||
229 | * preempt_disable() in on_each_cpu() prevents stop_machine(), | ||
230 | * so when "on_each_cpu(rcu_barrier_func, (void *)type, 1);" | ||
231 | * returns, all online cpus have queued rcu_barrier_func(), | ||
232 | * and the dead cpu(if it exist) queues rcu_migrate_callback()s. | ||
233 | * | ||
234 | * These callbacks ensure _rcu_barrier() waits for all | ||
235 | * RCU callbacks of the specified type to complete. | ||
236 | */ | ||
237 | atomic_set(&rcu_migrate_type_count, 3); | ||
238 | call_rcu_bh(rcu_migrate_head, rcu_migrate_callback); | ||
239 | call_rcu_sched(rcu_migrate_head + 1, rcu_migrate_callback); | ||
240 | call_rcu(rcu_migrate_head + 2, rcu_migrate_callback); | ||
241 | } else if (action == CPU_DOWN_PREPARE) { | ||
242 | /* Don't need to wait until next removal operation. */ | ||
243 | /* rcu_migrate_head is protected by cpu_add_remove_lock */ | ||
244 | wait_migrated_callbacks(); | ||
245 | } | ||
246 | |||
247 | return NOTIFY_OK; | ||
248 | } | 164 | } |
249 | 165 | ||
250 | void __init rcu_init(void) | 166 | void __init rcu_init(void) |
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c index b33db539a8ad..697c0a0229d4 100644 --- a/kernel/rcutorture.c +++ b/kernel/rcutorture.c | |||
@@ -18,7 +18,7 @@ | |||
18 | * Copyright (C) IBM Corporation, 2005, 2006 | 18 | * Copyright (C) IBM Corporation, 2005, 2006 |
19 | * | 19 | * |
20 | * Authors: Paul E. McKenney <paulmck@us.ibm.com> | 20 | * Authors: Paul E. McKenney <paulmck@us.ibm.com> |
21 | * Josh Triplett <josh@freedesktop.org> | 21 | * Josh Triplett <josh@freedesktop.org> |
22 | * | 22 | * |
23 | * See also: Documentation/RCU/torture.txt | 23 | * See also: Documentation/RCU/torture.txt |
24 | */ | 24 | */ |
@@ -50,7 +50,7 @@ | |||
50 | 50 | ||
51 | MODULE_LICENSE("GPL"); | 51 | MODULE_LICENSE("GPL"); |
52 | MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and " | 52 | MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and " |
53 | "Josh Triplett <josh@freedesktop.org>"); | 53 | "Josh Triplett <josh@freedesktop.org>"); |
54 | 54 | ||
55 | static int nreaders = -1; /* # reader threads, defaults to 2*ncpus */ | 55 | static int nreaders = -1; /* # reader threads, defaults to 2*ncpus */ |
56 | static int nfakewriters = 4; /* # fake writer threads */ | 56 | static int nfakewriters = 4; /* # fake writer threads */ |
@@ -110,8 +110,8 @@ struct rcu_torture { | |||
110 | }; | 110 | }; |
111 | 111 | ||
112 | static LIST_HEAD(rcu_torture_freelist); | 112 | static LIST_HEAD(rcu_torture_freelist); |
113 | static struct rcu_torture *rcu_torture_current = NULL; | 113 | static struct rcu_torture *rcu_torture_current; |
114 | static long rcu_torture_current_version = 0; | 114 | static long rcu_torture_current_version; |
115 | static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN]; | 115 | static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN]; |
116 | static DEFINE_SPINLOCK(rcu_torture_lock); | 116 | static DEFINE_SPINLOCK(rcu_torture_lock); |
117 | static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) = | 117 | static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) = |
@@ -124,11 +124,11 @@ static atomic_t n_rcu_torture_alloc_fail; | |||
124 | static atomic_t n_rcu_torture_free; | 124 | static atomic_t n_rcu_torture_free; |
125 | static atomic_t n_rcu_torture_mberror; | 125 | static atomic_t n_rcu_torture_mberror; |
126 | static atomic_t n_rcu_torture_error; | 126 | static atomic_t n_rcu_torture_error; |
127 | static long n_rcu_torture_timers = 0; | 127 | static long n_rcu_torture_timers; |
128 | static struct list_head rcu_torture_removed; | 128 | static struct list_head rcu_torture_removed; |
129 | static cpumask_var_t shuffle_tmp_mask; | 129 | static cpumask_var_t shuffle_tmp_mask; |
130 | 130 | ||
131 | static int stutter_pause_test = 0; | 131 | static int stutter_pause_test; |
132 | 132 | ||
133 | #if defined(MODULE) || defined(CONFIG_RCU_TORTURE_TEST_RUNNABLE) | 133 | #if defined(MODULE) || defined(CONFIG_RCU_TORTURE_TEST_RUNNABLE) |
134 | #define RCUTORTURE_RUNNABLE_INIT 1 | 134 | #define RCUTORTURE_RUNNABLE_INIT 1 |
@@ -267,7 +267,8 @@ struct rcu_torture_ops { | |||
267 | int irq_capable; | 267 | int irq_capable; |
268 | char *name; | 268 | char *name; |
269 | }; | 269 | }; |
270 | static struct rcu_torture_ops *cur_ops = NULL; | 270 | |
271 | static struct rcu_torture_ops *cur_ops; | ||
271 | 272 | ||
272 | /* | 273 | /* |
273 | * Definitions for rcu torture testing. | 274 | * Definitions for rcu torture testing. |
@@ -281,14 +282,17 @@ static int rcu_torture_read_lock(void) __acquires(RCU) | |||
281 | 282 | ||
282 | static void rcu_read_delay(struct rcu_random_state *rrsp) | 283 | static void rcu_read_delay(struct rcu_random_state *rrsp) |
283 | { | 284 | { |
284 | long delay; | 285 | const unsigned long shortdelay_us = 200; |
285 | const long longdelay = 200; | 286 | const unsigned long longdelay_ms = 50; |
286 | 287 | ||
287 | /* We want there to be long-running readers, but not all the time. */ | 288 | /* We want a short delay sometimes to make a reader delay the grace |
289 | * period, and we want a long delay occasionally to trigger | ||
290 | * force_quiescent_state. */ | ||
288 | 291 | ||
289 | delay = rcu_random(rrsp) % (nrealreaders * 2 * longdelay); | 292 | if (!(rcu_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) |
290 | if (!delay) | 293 | mdelay(longdelay_ms); |
291 | udelay(longdelay); | 294 | if (!(rcu_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) |
295 | udelay(shortdelay_us); | ||
292 | } | 296 | } |
293 | 297 | ||
294 | static void rcu_torture_read_unlock(int idx) __releases(RCU) | 298 | static void rcu_torture_read_unlock(int idx) __releases(RCU) |
@@ -339,8 +343,8 @@ static struct rcu_torture_ops rcu_ops = { | |||
339 | .sync = synchronize_rcu, | 343 | .sync = synchronize_rcu, |
340 | .cb_barrier = rcu_barrier, | 344 | .cb_barrier = rcu_barrier, |
341 | .stats = NULL, | 345 | .stats = NULL, |
342 | .irq_capable = 1, | 346 | .irq_capable = 1, |
343 | .name = "rcu" | 347 | .name = "rcu" |
344 | }; | 348 | }; |
345 | 349 | ||
346 | static void rcu_sync_torture_deferred_free(struct rcu_torture *p) | 350 | static void rcu_sync_torture_deferred_free(struct rcu_torture *p) |
@@ -602,8 +606,6 @@ static struct rcu_torture_ops sched_ops_sync = { | |||
602 | .name = "sched_sync" | 606 | .name = "sched_sync" |
603 | }; | 607 | }; |
604 | 608 | ||
605 | extern int rcu_expedited_torture_stats(char *page); | ||
606 | |||
607 | static struct rcu_torture_ops sched_expedited_ops = { | 609 | static struct rcu_torture_ops sched_expedited_ops = { |
608 | .init = rcu_sync_torture_init, | 610 | .init = rcu_sync_torture_init, |
609 | .cleanup = NULL, | 611 | .cleanup = NULL, |
@@ -638,14 +640,15 @@ rcu_torture_writer(void *arg) | |||
638 | 640 | ||
639 | do { | 641 | do { |
640 | schedule_timeout_uninterruptible(1); | 642 | schedule_timeout_uninterruptible(1); |
641 | if ((rp = rcu_torture_alloc()) == NULL) | 643 | rp = rcu_torture_alloc(); |
644 | if (rp == NULL) | ||
642 | continue; | 645 | continue; |
643 | rp->rtort_pipe_count = 0; | 646 | rp->rtort_pipe_count = 0; |
644 | udelay(rcu_random(&rand) & 0x3ff); | 647 | udelay(rcu_random(&rand) & 0x3ff); |
645 | old_rp = rcu_torture_current; | 648 | old_rp = rcu_torture_current; |
646 | rp->rtort_mbtest = 1; | 649 | rp->rtort_mbtest = 1; |
647 | rcu_assign_pointer(rcu_torture_current, rp); | 650 | rcu_assign_pointer(rcu_torture_current, rp); |
648 | smp_wmb(); | 651 | smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */ |
649 | if (old_rp) { | 652 | if (old_rp) { |
650 | i = old_rp->rtort_pipe_count; | 653 | i = old_rp->rtort_pipe_count; |
651 | if (i > RCU_TORTURE_PIPE_LEN) | 654 | if (i > RCU_TORTURE_PIPE_LEN) |
@@ -1110,7 +1113,7 @@ rcu_torture_init(void) | |||
1110 | printk(KERN_ALERT "rcutorture: invalid torture type: \"%s\"\n", | 1113 | printk(KERN_ALERT "rcutorture: invalid torture type: \"%s\"\n", |
1111 | torture_type); | 1114 | torture_type); |
1112 | mutex_unlock(&fullstop_mutex); | 1115 | mutex_unlock(&fullstop_mutex); |
1113 | return (-EINVAL); | 1116 | return -EINVAL; |
1114 | } | 1117 | } |
1115 | if (cur_ops->init) | 1118 | if (cur_ops->init) |
1116 | cur_ops->init(); /* no "goto unwind" prior to this point!!! */ | 1119 | cur_ops->init(); /* no "goto unwind" prior to this point!!! */ |
@@ -1161,7 +1164,7 @@ rcu_torture_init(void) | |||
1161 | goto unwind; | 1164 | goto unwind; |
1162 | } | 1165 | } |
1163 | fakewriter_tasks = kzalloc(nfakewriters * sizeof(fakewriter_tasks[0]), | 1166 | fakewriter_tasks = kzalloc(nfakewriters * sizeof(fakewriter_tasks[0]), |
1164 | GFP_KERNEL); | 1167 | GFP_KERNEL); |
1165 | if (fakewriter_tasks == NULL) { | 1168 | if (fakewriter_tasks == NULL) { |
1166 | VERBOSE_PRINTK_ERRSTRING("out of memory"); | 1169 | VERBOSE_PRINTK_ERRSTRING("out of memory"); |
1167 | firsterr = -ENOMEM; | 1170 | firsterr = -ENOMEM; |
@@ -1170,7 +1173,7 @@ rcu_torture_init(void) | |||
1170 | for (i = 0; i < nfakewriters; i++) { | 1173 | for (i = 0; i < nfakewriters; i++) { |
1171 | VERBOSE_PRINTK_STRING("Creating rcu_torture_fakewriter task"); | 1174 | VERBOSE_PRINTK_STRING("Creating rcu_torture_fakewriter task"); |
1172 | fakewriter_tasks[i] = kthread_run(rcu_torture_fakewriter, NULL, | 1175 | fakewriter_tasks[i] = kthread_run(rcu_torture_fakewriter, NULL, |
1173 | "rcu_torture_fakewriter"); | 1176 | "rcu_torture_fakewriter"); |
1174 | if (IS_ERR(fakewriter_tasks[i])) { | 1177 | if (IS_ERR(fakewriter_tasks[i])) { |
1175 | firsterr = PTR_ERR(fakewriter_tasks[i]); | 1178 | firsterr = PTR_ERR(fakewriter_tasks[i]); |
1176 | VERBOSE_PRINTK_ERRSTRING("Failed to create fakewriter"); | 1179 | VERBOSE_PRINTK_ERRSTRING("Failed to create fakewriter"); |
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 6b11b07cfe7f..f3077c0ab181 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
@@ -25,7 +25,7 @@ | |||
25 | * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. | 25 | * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. |
26 | * | 26 | * |
27 | * For detailed explanation of Read-Copy Update mechanism see - | 27 | * For detailed explanation of Read-Copy Update mechanism see - |
28 | * Documentation/RCU | 28 | * Documentation/RCU |
29 | */ | 29 | */ |
30 | #include <linux/types.h> | 30 | #include <linux/types.h> |
31 | #include <linux/kernel.h> | 31 | #include <linux/kernel.h> |
@@ -49,13 +49,6 @@ | |||
49 | 49 | ||
50 | #include "rcutree.h" | 50 | #include "rcutree.h" |
51 | 51 | ||
52 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
53 | static struct lock_class_key rcu_lock_key; | ||
54 | struct lockdep_map rcu_lock_map = | ||
55 | STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key); | ||
56 | EXPORT_SYMBOL_GPL(rcu_lock_map); | ||
57 | #endif | ||
58 | |||
59 | /* Data structures. */ | 52 | /* Data structures. */ |
60 | 53 | ||
61 | #define RCU_STATE_INITIALIZER(name) { \ | 54 | #define RCU_STATE_INITIALIZER(name) { \ |
@@ -66,10 +59,13 @@ EXPORT_SYMBOL_GPL(rcu_lock_map); | |||
66 | NUM_RCU_LVL_2, \ | 59 | NUM_RCU_LVL_2, \ |
67 | NUM_RCU_LVL_3, /* == MAX_RCU_LVLS */ \ | 60 | NUM_RCU_LVL_3, /* == MAX_RCU_LVLS */ \ |
68 | }, \ | 61 | }, \ |
69 | .signaled = RCU_SIGNAL_INIT, \ | 62 | .signaled = RCU_GP_IDLE, \ |
70 | .gpnum = -300, \ | 63 | .gpnum = -300, \ |
71 | .completed = -300, \ | 64 | .completed = -300, \ |
72 | .onofflock = __SPIN_LOCK_UNLOCKED(&name.onofflock), \ | 65 | .onofflock = __SPIN_LOCK_UNLOCKED(&name.onofflock), \ |
66 | .orphan_cbs_list = NULL, \ | ||
67 | .orphan_cbs_tail = &name.orphan_cbs_list, \ | ||
68 | .orphan_qlen = 0, \ | ||
73 | .fqslock = __SPIN_LOCK_UNLOCKED(&name.fqslock), \ | 69 | .fqslock = __SPIN_LOCK_UNLOCKED(&name.fqslock), \ |
74 | .n_force_qs = 0, \ | 70 | .n_force_qs = 0, \ |
75 | .n_force_qs_ngp = 0, \ | 71 | .n_force_qs_ngp = 0, \ |
@@ -81,24 +77,16 @@ DEFINE_PER_CPU(struct rcu_data, rcu_sched_data); | |||
81 | struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state); | 77 | struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state); |
82 | DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); | 78 | DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); |
83 | 79 | ||
84 | extern long rcu_batches_completed_sched(void); | ||
85 | static struct rcu_node *rcu_get_root(struct rcu_state *rsp); | ||
86 | static void cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp, | ||
87 | struct rcu_node *rnp, unsigned long flags); | ||
88 | static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags); | ||
89 | #ifdef CONFIG_HOTPLUG_CPU | ||
90 | static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp); | ||
91 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | ||
92 | static void __rcu_process_callbacks(struct rcu_state *rsp, | ||
93 | struct rcu_data *rdp); | ||
94 | static void __call_rcu(struct rcu_head *head, | ||
95 | void (*func)(struct rcu_head *rcu), | ||
96 | struct rcu_state *rsp); | ||
97 | static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp); | ||
98 | static void __cpuinit rcu_init_percpu_data(int cpu, struct rcu_state *rsp, | ||
99 | int preemptable); | ||
100 | 80 | ||
101 | #include "rcutree_plugin.h" | 81 | /* |
82 | * Return true if an RCU grace period is in progress. The ACCESS_ONCE()s | ||
83 | * permit this function to be invoked without holding the root rcu_node | ||
84 | * structure's ->lock, but of course results can be subject to change. | ||
85 | */ | ||
86 | static int rcu_gp_in_progress(struct rcu_state *rsp) | ||
87 | { | ||
88 | return ACCESS_ONCE(rsp->completed) != ACCESS_ONCE(rsp->gpnum); | ||
89 | } | ||
102 | 90 | ||
103 | /* | 91 | /* |
104 | * Note a quiescent state. Because we do not need to know | 92 | * Note a quiescent state. Because we do not need to know |
@@ -107,27 +95,23 @@ static void __cpuinit rcu_init_percpu_data(int cpu, struct rcu_state *rsp, | |||
107 | */ | 95 | */ |
108 | void rcu_sched_qs(int cpu) | 96 | void rcu_sched_qs(int cpu) |
109 | { | 97 | { |
110 | unsigned long flags; | ||
111 | struct rcu_data *rdp; | 98 | struct rcu_data *rdp; |
112 | 99 | ||
113 | local_irq_save(flags); | ||
114 | rdp = &per_cpu(rcu_sched_data, cpu); | 100 | rdp = &per_cpu(rcu_sched_data, cpu); |
115 | rdp->passed_quiesc = 1; | ||
116 | rdp->passed_quiesc_completed = rdp->completed; | 101 | rdp->passed_quiesc_completed = rdp->completed; |
117 | rcu_preempt_qs(cpu); | 102 | barrier(); |
118 | local_irq_restore(flags); | 103 | rdp->passed_quiesc = 1; |
104 | rcu_preempt_note_context_switch(cpu); | ||
119 | } | 105 | } |
120 | 106 | ||
121 | void rcu_bh_qs(int cpu) | 107 | void rcu_bh_qs(int cpu) |
122 | { | 108 | { |
123 | unsigned long flags; | ||
124 | struct rcu_data *rdp; | 109 | struct rcu_data *rdp; |
125 | 110 | ||
126 | local_irq_save(flags); | ||
127 | rdp = &per_cpu(rcu_bh_data, cpu); | 111 | rdp = &per_cpu(rcu_bh_data, cpu); |
128 | rdp->passed_quiesc = 1; | ||
129 | rdp->passed_quiesc_completed = rdp->completed; | 112 | rdp->passed_quiesc_completed = rdp->completed; |
130 | local_irq_restore(flags); | 113 | barrier(); |
114 | rdp->passed_quiesc = 1; | ||
131 | } | 115 | } |
132 | 116 | ||
133 | #ifdef CONFIG_NO_HZ | 117 | #ifdef CONFIG_NO_HZ |
@@ -141,6 +125,10 @@ static int blimit = 10; /* Maximum callbacks per softirq. */ | |||
141 | static int qhimark = 10000; /* If this many pending, ignore blimit. */ | 125 | static int qhimark = 10000; /* If this many pending, ignore blimit. */ |
142 | static int qlowmark = 100; /* Once only this many pending, use blimit. */ | 126 | static int qlowmark = 100; /* Once only this many pending, use blimit. */ |
143 | 127 | ||
128 | module_param(blimit, int, 0); | ||
129 | module_param(qhimark, int, 0); | ||
130 | module_param(qlowmark, int, 0); | ||
131 | |||
144 | static void force_quiescent_state(struct rcu_state *rsp, int relaxed); | 132 | static void force_quiescent_state(struct rcu_state *rsp, int relaxed); |
145 | static int rcu_pending(int cpu); | 133 | static int rcu_pending(int cpu); |
146 | 134 | ||
@@ -177,9 +165,7 @@ cpu_has_callbacks_ready_to_invoke(struct rcu_data *rdp) | |||
177 | static int | 165 | static int |
178 | cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp) | 166 | cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp) |
179 | { | 167 | { |
180 | /* ACCESS_ONCE() because we are accessing outside of lock. */ | 168 | return *rdp->nxttail[RCU_DONE_TAIL] && !rcu_gp_in_progress(rsp); |
181 | return *rdp->nxttail[RCU_DONE_TAIL] && | ||
182 | ACCESS_ONCE(rsp->completed) == ACCESS_ONCE(rsp->gpnum); | ||
183 | } | 169 | } |
184 | 170 | ||
185 | /* | 171 | /* |
@@ -373,7 +359,7 @@ static long dyntick_recall_completed(struct rcu_state *rsp) | |||
373 | /* | 359 | /* |
374 | * Snapshot the specified CPU's dynticks counter so that we can later | 360 | * Snapshot the specified CPU's dynticks counter so that we can later |
375 | * credit them with an implicit quiescent state. Return 1 if this CPU | 361 | * credit them with an implicit quiescent state. Return 1 if this CPU |
376 | * is already in a quiescent state courtesy of dynticks idle mode. | 362 | * is in dynticks idle mode, which is an extended quiescent state. |
377 | */ | 363 | */ |
378 | static int dyntick_save_progress_counter(struct rcu_data *rdp) | 364 | static int dyntick_save_progress_counter(struct rcu_data *rdp) |
379 | { | 365 | { |
@@ -479,30 +465,34 @@ static void print_other_cpu_stall(struct rcu_state *rsp) | |||
479 | long delta; | 465 | long delta; |
480 | unsigned long flags; | 466 | unsigned long flags; |
481 | struct rcu_node *rnp = rcu_get_root(rsp); | 467 | struct rcu_node *rnp = rcu_get_root(rsp); |
482 | struct rcu_node *rnp_cur = rsp->level[NUM_RCU_LVLS - 1]; | ||
483 | struct rcu_node *rnp_end = &rsp->node[NUM_RCU_NODES]; | ||
484 | 468 | ||
485 | /* Only let one CPU complain about others per time interval. */ | 469 | /* Only let one CPU complain about others per time interval. */ |
486 | 470 | ||
487 | spin_lock_irqsave(&rnp->lock, flags); | 471 | spin_lock_irqsave(&rnp->lock, flags); |
488 | delta = jiffies - rsp->jiffies_stall; | 472 | delta = jiffies - rsp->jiffies_stall; |
489 | if (delta < RCU_STALL_RAT_DELAY || rsp->gpnum == rsp->completed) { | 473 | if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) { |
490 | spin_unlock_irqrestore(&rnp->lock, flags); | 474 | spin_unlock_irqrestore(&rnp->lock, flags); |
491 | return; | 475 | return; |
492 | } | 476 | } |
493 | rsp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_RECHECK; | 477 | rsp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_RECHECK; |
478 | |||
479 | /* | ||
480 | * Now rat on any tasks that got kicked up to the root rcu_node | ||
481 | * due to CPU offlining. | ||
482 | */ | ||
483 | rcu_print_task_stall(rnp); | ||
494 | spin_unlock_irqrestore(&rnp->lock, flags); | 484 | spin_unlock_irqrestore(&rnp->lock, flags); |
495 | 485 | ||
496 | /* OK, time to rat on our buddy... */ | 486 | /* OK, time to rat on our buddy... */ |
497 | 487 | ||
498 | printk(KERN_ERR "INFO: RCU detected CPU stalls:"); | 488 | printk(KERN_ERR "INFO: RCU detected CPU stalls:"); |
499 | for (; rnp_cur < rnp_end; rnp_cur++) { | 489 | rcu_for_each_leaf_node(rsp, rnp) { |
500 | rcu_print_task_stall(rnp); | 490 | rcu_print_task_stall(rnp); |
501 | if (rnp_cur->qsmask == 0) | 491 | if (rnp->qsmask == 0) |
502 | continue; | 492 | continue; |
503 | for (cpu = 0; cpu <= rnp_cur->grphi - rnp_cur->grplo; cpu++) | 493 | for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++) |
504 | if (rnp_cur->qsmask & (1UL << cpu)) | 494 | if (rnp->qsmask & (1UL << cpu)) |
505 | printk(" %d", rnp_cur->grplo + cpu); | 495 | printk(" %d", rnp->grplo + cpu); |
506 | } | 496 | } |
507 | printk(" (detected by %d, t=%ld jiffies)\n", | 497 | printk(" (detected by %d, t=%ld jiffies)\n", |
508 | smp_processor_id(), (long)(jiffies - rsp->gp_start)); | 498 | smp_processor_id(), (long)(jiffies - rsp->gp_start)); |
@@ -541,8 +531,7 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp) | |||
541 | /* We haven't checked in, so go dump stack. */ | 531 | /* We haven't checked in, so go dump stack. */ |
542 | print_cpu_stall(rsp); | 532 | print_cpu_stall(rsp); |
543 | 533 | ||
544 | } else if (rsp->gpnum != rsp->completed && | 534 | } else if (rcu_gp_in_progress(rsp) && delta >= RCU_STALL_RAT_DELAY) { |
545 | delta >= RCU_STALL_RAT_DELAY) { | ||
546 | 535 | ||
547 | /* They had two time units to dump stack, so complain. */ | 536 | /* They had two time units to dump stack, so complain. */ |
548 | print_other_cpu_stall(rsp); | 537 | print_other_cpu_stall(rsp); |
@@ -605,8 +594,6 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags) | |||
605 | { | 594 | { |
606 | struct rcu_data *rdp = rsp->rda[smp_processor_id()]; | 595 | struct rcu_data *rdp = rsp->rda[smp_processor_id()]; |
607 | struct rcu_node *rnp = rcu_get_root(rsp); | 596 | struct rcu_node *rnp = rcu_get_root(rsp); |
608 | struct rcu_node *rnp_cur; | ||
609 | struct rcu_node *rnp_end; | ||
610 | 597 | ||
611 | if (!cpu_needs_another_gp(rsp, rdp)) { | 598 | if (!cpu_needs_another_gp(rsp, rdp)) { |
612 | spin_unlock_irqrestore(&rnp->lock, flags); | 599 | spin_unlock_irqrestore(&rnp->lock, flags); |
@@ -615,6 +602,7 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags) | |||
615 | 602 | ||
616 | /* Advance to a new grace period and initialize state. */ | 603 | /* Advance to a new grace period and initialize state. */ |
617 | rsp->gpnum++; | 604 | rsp->gpnum++; |
605 | WARN_ON_ONCE(rsp->signaled == RCU_GP_INIT); | ||
618 | rsp->signaled = RCU_GP_INIT; /* Hold off force_quiescent_state. */ | 606 | rsp->signaled = RCU_GP_INIT; /* Hold off force_quiescent_state. */ |
619 | rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; | 607 | rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; |
620 | record_gp_stall_check_time(rsp); | 608 | record_gp_stall_check_time(rsp); |
@@ -622,16 +610,24 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags) | |||
622 | note_new_gpnum(rsp, rdp); | 610 | note_new_gpnum(rsp, rdp); |
623 | 611 | ||
624 | /* | 612 | /* |
625 | * Because we are first, we know that all our callbacks will | 613 | * Because this CPU just now started the new grace period, we know |
626 | * be covered by this upcoming grace period, even the ones | 614 | * that all of its callbacks will be covered by this upcoming grace |
627 | * that were registered arbitrarily recently. | 615 | * period, even the ones that were registered arbitrarily recently. |
616 | * Therefore, advance all outstanding callbacks to RCU_WAIT_TAIL. | ||
617 | * | ||
618 | * Other CPUs cannot be sure exactly when the grace period started. | ||
619 | * Therefore, their recently registered callbacks must pass through | ||
620 | * an additional RCU_NEXT_READY stage, so that they will be handled | ||
621 | * by the next RCU grace period. | ||
628 | */ | 622 | */ |
629 | rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; | 623 | rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; |
630 | rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; | 624 | rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; |
631 | 625 | ||
632 | /* Special-case the common single-level case. */ | 626 | /* Special-case the common single-level case. */ |
633 | if (NUM_RCU_NODES == 1) { | 627 | if (NUM_RCU_NODES == 1) { |
628 | rcu_preempt_check_blocked_tasks(rnp); | ||
634 | rnp->qsmask = rnp->qsmaskinit; | 629 | rnp->qsmask = rnp->qsmaskinit; |
630 | rnp->gpnum = rsp->gpnum; | ||
635 | rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */ | 631 | rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */ |
636 | spin_unlock_irqrestore(&rnp->lock, flags); | 632 | spin_unlock_irqrestore(&rnp->lock, flags); |
637 | return; | 633 | return; |
@@ -644,45 +640,34 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags) | |||
644 | spin_lock(&rsp->onofflock); /* irqs already disabled. */ | 640 | spin_lock(&rsp->onofflock); /* irqs already disabled. */ |
645 | 641 | ||
646 | /* | 642 | /* |
647 | * Set the quiescent-state-needed bits in all the non-leaf RCU | 643 | * Set the quiescent-state-needed bits in all the rcu_node |
648 | * nodes for all currently online CPUs. This operation relies | 644 | * structures for all currently online CPUs in breadth-first |
649 | * on the layout of the hierarchy within the rsp->node[] array. | 645 | * order, starting from the root rcu_node structure. This |
650 | * Note that other CPUs will access only the leaves of the | 646 | * operation relies on the layout of the hierarchy within the |
651 | * hierarchy, which still indicate that no grace period is in | 647 | * rsp->node[] array. Note that other CPUs will access only |
652 | * progress. In addition, we have excluded CPU-hotplug operations. | 648 | * the leaves of the hierarchy, which still indicate that no |
653 | * | 649 | * grace period is in progress, at least until the corresponding |
654 | * We therefore do not need to hold any locks. Any required | 650 | * leaf node has been initialized. In addition, we have excluded |
655 | * memory barriers will be supplied by the locks guarding the | 651 | * CPU-hotplug operations. |
656 | * leaf rcu_nodes in the hierarchy. | ||
657 | */ | ||
658 | |||
659 | rnp_end = rsp->level[NUM_RCU_LVLS - 1]; | ||
660 | for (rnp_cur = &rsp->node[0]; rnp_cur < rnp_end; rnp_cur++) | ||
661 | rnp_cur->qsmask = rnp_cur->qsmaskinit; | ||
662 | |||
663 | /* | ||
664 | * Now set up the leaf nodes. Here we must be careful. First, | ||
665 | * we need to hold the lock in order to exclude other CPUs, which | ||
666 | * might be contending for the leaf nodes' locks. Second, as | ||
667 | * soon as we initialize a given leaf node, its CPUs might run | ||
668 | * up the rest of the hierarchy. We must therefore acquire locks | ||
669 | * for each node that we touch during this stage. (But we still | ||
670 | * are excluding CPU-hotplug operations.) | ||
671 | * | 652 | * |
672 | * Note that the grace period cannot complete until we finish | 653 | * Note that the grace period cannot complete until we finish |
673 | * the initialization process, as there will be at least one | 654 | * the initialization process, as there will be at least one |
674 | * qsmask bit set in the root node until that time, namely the | 655 | * qsmask bit set in the root node until that time, namely the |
675 | * one corresponding to this CPU. | 656 | * one corresponding to this CPU, due to the fact that we have |
657 | * irqs disabled. | ||
676 | */ | 658 | */ |
677 | rnp_end = &rsp->node[NUM_RCU_NODES]; | 659 | rcu_for_each_node_breadth_first(rsp, rnp) { |
678 | rnp_cur = rsp->level[NUM_RCU_LVLS - 1]; | 660 | spin_lock(&rnp->lock); /* irqs already disabled. */ |
679 | for (; rnp_cur < rnp_end; rnp_cur++) { | 661 | rcu_preempt_check_blocked_tasks(rnp); |
680 | spin_lock(&rnp_cur->lock); /* irqs already disabled. */ | 662 | rnp->qsmask = rnp->qsmaskinit; |
681 | rnp_cur->qsmask = rnp_cur->qsmaskinit; | 663 | rnp->gpnum = rsp->gpnum; |
682 | spin_unlock(&rnp_cur->lock); /* irqs already disabled. */ | 664 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
683 | } | 665 | } |
684 | 666 | ||
667 | rnp = rcu_get_root(rsp); | ||
668 | spin_lock(&rnp->lock); /* irqs already disabled. */ | ||
685 | rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */ | 669 | rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */ |
670 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ | ||
686 | spin_unlock_irqrestore(&rsp->onofflock, flags); | 671 | spin_unlock_irqrestore(&rsp->onofflock, flags); |
687 | } | 672 | } |
688 | 673 | ||
@@ -720,9 +705,11 @@ rcu_process_gp_end(struct rcu_state *rsp, struct rcu_data *rdp) | |||
720 | * hold rnp->lock, as required by rcu_start_gp(), which will release it. | 705 | * hold rnp->lock, as required by rcu_start_gp(), which will release it. |
721 | */ | 706 | */ |
722 | static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags) | 707 | static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags) |
723 | __releases(rnp->lock) | 708 | __releases(rcu_get_root(rsp)->lock) |
724 | { | 709 | { |
710 | WARN_ON_ONCE(!rcu_gp_in_progress(rsp)); | ||
725 | rsp->completed = rsp->gpnum; | 711 | rsp->completed = rsp->gpnum; |
712 | rsp->signaled = RCU_GP_IDLE; | ||
726 | rcu_process_gp_end(rsp, rsp->rda[smp_processor_id()]); | 713 | rcu_process_gp_end(rsp, rsp->rda[smp_processor_id()]); |
727 | rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */ | 714 | rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */ |
728 | } | 715 | } |
@@ -739,6 +726,8 @@ cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp, struct rcu_node *rnp, | |||
739 | unsigned long flags) | 726 | unsigned long flags) |
740 | __releases(rnp->lock) | 727 | __releases(rnp->lock) |
741 | { | 728 | { |
729 | struct rcu_node *rnp_c; | ||
730 | |||
742 | /* Walk up the rcu_node hierarchy. */ | 731 | /* Walk up the rcu_node hierarchy. */ |
743 | for (;;) { | 732 | for (;;) { |
744 | if (!(rnp->qsmask & mask)) { | 733 | if (!(rnp->qsmask & mask)) { |
@@ -762,8 +751,10 @@ cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp, struct rcu_node *rnp, | |||
762 | break; | 751 | break; |
763 | } | 752 | } |
764 | spin_unlock_irqrestore(&rnp->lock, flags); | 753 | spin_unlock_irqrestore(&rnp->lock, flags); |
754 | rnp_c = rnp; | ||
765 | rnp = rnp->parent; | 755 | rnp = rnp->parent; |
766 | spin_lock_irqsave(&rnp->lock, flags); | 756 | spin_lock_irqsave(&rnp->lock, flags); |
757 | WARN_ON_ONCE(rnp_c->qsmask); | ||
767 | } | 758 | } |
768 | 759 | ||
769 | /* | 760 | /* |
@@ -776,10 +767,10 @@ cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp, struct rcu_node *rnp, | |||
776 | 767 | ||
777 | /* | 768 | /* |
778 | * Record a quiescent state for the specified CPU, which must either be | 769 | * Record a quiescent state for the specified CPU, which must either be |
779 | * the current CPU or an offline CPU. The lastcomp argument is used to | 770 | * the current CPU. The lastcomp argument is used to make sure we are |
780 | * make sure we are still in the grace period of interest. We don't want | 771 | * still in the grace period of interest. We don't want to end the current |
781 | * to end the current grace period based on quiescent states detected in | 772 | * grace period based on quiescent states detected in an earlier grace |
782 | * an earlier grace period! | 773 | * period! |
783 | */ | 774 | */ |
784 | static void | 775 | static void |
785 | cpu_quiet(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp) | 776 | cpu_quiet(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp) |
@@ -814,7 +805,6 @@ cpu_quiet(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp) | |||
814 | * This GP can't end until cpu checks in, so all of our | 805 | * This GP can't end until cpu checks in, so all of our |
815 | * callbacks can be processed during the next GP. | 806 | * callbacks can be processed during the next GP. |
816 | */ | 807 | */ |
817 | rdp = rsp->rda[smp_processor_id()]; | ||
818 | rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; | 808 | rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; |
819 | 809 | ||
820 | cpu_quiet_msk(mask, rsp, rnp, flags); /* releases rnp->lock */ | 810 | cpu_quiet_msk(mask, rsp, rnp, flags); /* releases rnp->lock */ |
@@ -855,24 +845,70 @@ rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp) | |||
855 | #ifdef CONFIG_HOTPLUG_CPU | 845 | #ifdef CONFIG_HOTPLUG_CPU |
856 | 846 | ||
857 | /* | 847 | /* |
848 | * Move a dying CPU's RCU callbacks to the ->orphan_cbs_list for the | ||
849 | * specified flavor of RCU. The callbacks will be adopted by the next | ||
850 | * _rcu_barrier() invocation or by the CPU_DEAD notifier, whichever | ||
851 | * comes first. Because this is invoked from the CPU_DYING notifier, | ||
852 | * irqs are already disabled. | ||
853 | */ | ||
854 | static void rcu_send_cbs_to_orphanage(struct rcu_state *rsp) | ||
855 | { | ||
856 | int i; | ||
857 | struct rcu_data *rdp = rsp->rda[smp_processor_id()]; | ||
858 | |||
859 | if (rdp->nxtlist == NULL) | ||
860 | return; /* irqs disabled, so comparison is stable. */ | ||
861 | spin_lock(&rsp->onofflock); /* irqs already disabled. */ | ||
862 | *rsp->orphan_cbs_tail = rdp->nxtlist; | ||
863 | rsp->orphan_cbs_tail = rdp->nxttail[RCU_NEXT_TAIL]; | ||
864 | rdp->nxtlist = NULL; | ||
865 | for (i = 0; i < RCU_NEXT_SIZE; i++) | ||
866 | rdp->nxttail[i] = &rdp->nxtlist; | ||
867 | rsp->orphan_qlen += rdp->qlen; | ||
868 | rdp->qlen = 0; | ||
869 | spin_unlock(&rsp->onofflock); /* irqs remain disabled. */ | ||
870 | } | ||
871 | |||
872 | /* | ||
873 | * Adopt previously orphaned RCU callbacks. | ||
874 | */ | ||
875 | static void rcu_adopt_orphan_cbs(struct rcu_state *rsp) | ||
876 | { | ||
877 | unsigned long flags; | ||
878 | struct rcu_data *rdp; | ||
879 | |||
880 | spin_lock_irqsave(&rsp->onofflock, flags); | ||
881 | rdp = rsp->rda[smp_processor_id()]; | ||
882 | if (rsp->orphan_cbs_list == NULL) { | ||
883 | spin_unlock_irqrestore(&rsp->onofflock, flags); | ||
884 | return; | ||
885 | } | ||
886 | *rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_cbs_list; | ||
887 | rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_cbs_tail; | ||
888 | rdp->qlen += rsp->orphan_qlen; | ||
889 | rsp->orphan_cbs_list = NULL; | ||
890 | rsp->orphan_cbs_tail = &rsp->orphan_cbs_list; | ||
891 | rsp->orphan_qlen = 0; | ||
892 | spin_unlock_irqrestore(&rsp->onofflock, flags); | ||
893 | } | ||
894 | |||
895 | /* | ||
858 | * Remove the outgoing CPU from the bitmasks in the rcu_node hierarchy | 896 | * Remove the outgoing CPU from the bitmasks in the rcu_node hierarchy |
859 | * and move all callbacks from the outgoing CPU to the current one. | 897 | * and move all callbacks from the outgoing CPU to the current one. |
860 | */ | 898 | */ |
861 | static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) | 899 | static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) |
862 | { | 900 | { |
863 | int i; | ||
864 | unsigned long flags; | 901 | unsigned long flags; |
865 | long lastcomp; | 902 | long lastcomp; |
866 | unsigned long mask; | 903 | unsigned long mask; |
867 | struct rcu_data *rdp = rsp->rda[cpu]; | 904 | struct rcu_data *rdp = rsp->rda[cpu]; |
868 | struct rcu_data *rdp_me; | ||
869 | struct rcu_node *rnp; | 905 | struct rcu_node *rnp; |
870 | 906 | ||
871 | /* Exclude any attempts to start a new grace period. */ | 907 | /* Exclude any attempts to start a new grace period. */ |
872 | spin_lock_irqsave(&rsp->onofflock, flags); | 908 | spin_lock_irqsave(&rsp->onofflock, flags); |
873 | 909 | ||
874 | /* Remove the outgoing CPU from the masks in the rcu_node hierarchy. */ | 910 | /* Remove the outgoing CPU from the masks in the rcu_node hierarchy. */ |
875 | rnp = rdp->mynode; | 911 | rnp = rdp->mynode; /* this is the outgoing CPU's rnp. */ |
876 | mask = rdp->grpmask; /* rnp->grplo is constant. */ | 912 | mask = rdp->grpmask; /* rnp->grplo is constant. */ |
877 | do { | 913 | do { |
878 | spin_lock(&rnp->lock); /* irqs already disabled. */ | 914 | spin_lock(&rnp->lock); /* irqs already disabled. */ |
@@ -881,42 +917,29 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) | |||
881 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ | 917 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
882 | break; | 918 | break; |
883 | } | 919 | } |
884 | rcu_preempt_offline_tasks(rsp, rnp); | 920 | |
921 | /* | ||
922 | * If there was a task blocking the current grace period, | ||
923 | * and if all CPUs have checked in, we need to propagate | ||
924 | * the quiescent state up the rcu_node hierarchy. But that | ||
925 | * is inconvenient at the moment due to deadlock issues if | ||
926 | * this should end the current grace period. So set the | ||
927 | * offlined CPU's bit in ->qsmask in order to force the | ||
928 | * next force_quiescent_state() invocation to clean up this | ||
929 | * mess in a deadlock-free manner. | ||
930 | */ | ||
931 | if (rcu_preempt_offline_tasks(rsp, rnp, rdp) && !rnp->qsmask) | ||
932 | rnp->qsmask |= mask; | ||
933 | |||
885 | mask = rnp->grpmask; | 934 | mask = rnp->grpmask; |
886 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ | 935 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
887 | rnp = rnp->parent; | 936 | rnp = rnp->parent; |
888 | } while (rnp != NULL); | 937 | } while (rnp != NULL); |
889 | lastcomp = rsp->completed; | 938 | lastcomp = rsp->completed; |
890 | 939 | ||
891 | spin_unlock(&rsp->onofflock); /* irqs remain disabled. */ | 940 | spin_unlock_irqrestore(&rsp->onofflock, flags); |
892 | |||
893 | /* Being offline is a quiescent state, so go record it. */ | ||
894 | cpu_quiet(cpu, rsp, rdp, lastcomp); | ||
895 | 941 | ||
896 | /* | 942 | rcu_adopt_orphan_cbs(rsp); |
897 | * Move callbacks from the outgoing CPU to the running CPU. | ||
898 | * Note that the outgoing CPU is now quiscent, so it is now | ||
899 | * (uncharacteristically) safe to access its rcu_data structure. | ||
900 | * Note also that we must carefully retain the order of the | ||
901 | * outgoing CPU's callbacks in order for rcu_barrier() to work | ||
902 | * correctly. Finally, note that we start all the callbacks | ||
903 | * afresh, even those that have passed through a grace period | ||
904 | * and are therefore ready to invoke. The theory is that hotplug | ||
905 | * events are rare, and that if they are frequent enough to | ||
906 | * indefinitely delay callbacks, you have far worse things to | ||
907 | * be worrying about. | ||
908 | */ | ||
909 | rdp_me = rsp->rda[smp_processor_id()]; | ||
910 | if (rdp->nxtlist != NULL) { | ||
911 | *rdp_me->nxttail[RCU_NEXT_TAIL] = rdp->nxtlist; | ||
912 | rdp_me->nxttail[RCU_NEXT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; | ||
913 | rdp->nxtlist = NULL; | ||
914 | for (i = 0; i < RCU_NEXT_SIZE; i++) | ||
915 | rdp->nxttail[i] = &rdp->nxtlist; | ||
916 | rdp_me->qlen += rdp->qlen; | ||
917 | rdp->qlen = 0; | ||
918 | } | ||
919 | local_irq_restore(flags); | ||
920 | } | 943 | } |
921 | 944 | ||
922 | /* | 945 | /* |
@@ -934,6 +957,14 @@ static void rcu_offline_cpu(int cpu) | |||
934 | 957 | ||
935 | #else /* #ifdef CONFIG_HOTPLUG_CPU */ | 958 | #else /* #ifdef CONFIG_HOTPLUG_CPU */ |
936 | 959 | ||
960 | static void rcu_send_cbs_to_orphanage(struct rcu_state *rsp) | ||
961 | { | ||
962 | } | ||
963 | |||
964 | static void rcu_adopt_orphan_cbs(struct rcu_state *rsp) | ||
965 | { | ||
966 | } | ||
967 | |||
937 | static void rcu_offline_cpu(int cpu) | 968 | static void rcu_offline_cpu(int cpu) |
938 | { | 969 | { |
939 | } | 970 | } |
@@ -944,7 +975,7 @@ static void rcu_offline_cpu(int cpu) | |||
944 | * Invoke any RCU callbacks that have made it to the end of their grace | 975 | * Invoke any RCU callbacks that have made it to the end of their grace |
945 | * period. Thottle as specified by rdp->blimit. | 976 | * period. Thottle as specified by rdp->blimit. |
946 | */ | 977 | */ |
947 | static void rcu_do_batch(struct rcu_data *rdp) | 978 | static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp) |
948 | { | 979 | { |
949 | unsigned long flags; | 980 | unsigned long flags; |
950 | struct rcu_head *next, *list, **tail; | 981 | struct rcu_head *next, *list, **tail; |
@@ -997,6 +1028,13 @@ static void rcu_do_batch(struct rcu_data *rdp) | |||
997 | if (rdp->blimit == LONG_MAX && rdp->qlen <= qlowmark) | 1028 | if (rdp->blimit == LONG_MAX && rdp->qlen <= qlowmark) |
998 | rdp->blimit = blimit; | 1029 | rdp->blimit = blimit; |
999 | 1030 | ||
1031 | /* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */ | ||
1032 | if (rdp->qlen == 0 && rdp->qlen_last_fqs_check != 0) { | ||
1033 | rdp->qlen_last_fqs_check = 0; | ||
1034 | rdp->n_force_qs_snap = rsp->n_force_qs; | ||
1035 | } else if (rdp->qlen < rdp->qlen_last_fqs_check - qhimark) | ||
1036 | rdp->qlen_last_fqs_check = rdp->qlen; | ||
1037 | |||
1000 | local_irq_restore(flags); | 1038 | local_irq_restore(flags); |
1001 | 1039 | ||
1002 | /* Re-raise the RCU softirq if there are callbacks remaining. */ | 1040 | /* Re-raise the RCU softirq if there are callbacks remaining. */ |
@@ -1066,33 +1104,32 @@ static int rcu_process_dyntick(struct rcu_state *rsp, long lastcomp, | |||
1066 | int cpu; | 1104 | int cpu; |
1067 | unsigned long flags; | 1105 | unsigned long flags; |
1068 | unsigned long mask; | 1106 | unsigned long mask; |
1069 | struct rcu_node *rnp_cur = rsp->level[NUM_RCU_LVLS - 1]; | 1107 | struct rcu_node *rnp; |
1070 | struct rcu_node *rnp_end = &rsp->node[NUM_RCU_NODES]; | ||
1071 | 1108 | ||
1072 | for (; rnp_cur < rnp_end; rnp_cur++) { | 1109 | rcu_for_each_leaf_node(rsp, rnp) { |
1073 | mask = 0; | 1110 | mask = 0; |
1074 | spin_lock_irqsave(&rnp_cur->lock, flags); | 1111 | spin_lock_irqsave(&rnp->lock, flags); |
1075 | if (rsp->completed != lastcomp) { | 1112 | if (rsp->completed != lastcomp) { |
1076 | spin_unlock_irqrestore(&rnp_cur->lock, flags); | 1113 | spin_unlock_irqrestore(&rnp->lock, flags); |
1077 | return 1; | 1114 | return 1; |
1078 | } | 1115 | } |
1079 | if (rnp_cur->qsmask == 0) { | 1116 | if (rnp->qsmask == 0) { |
1080 | spin_unlock_irqrestore(&rnp_cur->lock, flags); | 1117 | spin_unlock_irqrestore(&rnp->lock, flags); |
1081 | continue; | 1118 | continue; |
1082 | } | 1119 | } |
1083 | cpu = rnp_cur->grplo; | 1120 | cpu = rnp->grplo; |
1084 | bit = 1; | 1121 | bit = 1; |
1085 | for (; cpu <= rnp_cur->grphi; cpu++, bit <<= 1) { | 1122 | for (; cpu <= rnp->grphi; cpu++, bit <<= 1) { |
1086 | if ((rnp_cur->qsmask & bit) != 0 && f(rsp->rda[cpu])) | 1123 | if ((rnp->qsmask & bit) != 0 && f(rsp->rda[cpu])) |
1087 | mask |= bit; | 1124 | mask |= bit; |
1088 | } | 1125 | } |
1089 | if (mask != 0 && rsp->completed == lastcomp) { | 1126 | if (mask != 0 && rsp->completed == lastcomp) { |
1090 | 1127 | ||
1091 | /* cpu_quiet_msk() releases rnp_cur->lock. */ | 1128 | /* cpu_quiet_msk() releases rnp->lock. */ |
1092 | cpu_quiet_msk(mask, rsp, rnp_cur, flags); | 1129 | cpu_quiet_msk(mask, rsp, rnp, flags); |
1093 | continue; | 1130 | continue; |
1094 | } | 1131 | } |
1095 | spin_unlock_irqrestore(&rnp_cur->lock, flags); | 1132 | spin_unlock_irqrestore(&rnp->lock, flags); |
1096 | } | 1133 | } |
1097 | return 0; | 1134 | return 0; |
1098 | } | 1135 | } |
@@ -1108,7 +1145,7 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed) | |||
1108 | struct rcu_node *rnp = rcu_get_root(rsp); | 1145 | struct rcu_node *rnp = rcu_get_root(rsp); |
1109 | u8 signaled; | 1146 | u8 signaled; |
1110 | 1147 | ||
1111 | if (ACCESS_ONCE(rsp->completed) == ACCESS_ONCE(rsp->gpnum)) | 1148 | if (!rcu_gp_in_progress(rsp)) |
1112 | return; /* No grace period in progress, nothing to force. */ | 1149 | return; /* No grace period in progress, nothing to force. */ |
1113 | if (!spin_trylock_irqsave(&rsp->fqslock, flags)) { | 1150 | if (!spin_trylock_irqsave(&rsp->fqslock, flags)) { |
1114 | rsp->n_force_qs_lh++; /* Inexact, can lose counts. Tough! */ | 1151 | rsp->n_force_qs_lh++; /* Inexact, can lose counts. Tough! */ |
@@ -1129,9 +1166,10 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed) | |||
1129 | } | 1166 | } |
1130 | spin_unlock(&rnp->lock); | 1167 | spin_unlock(&rnp->lock); |
1131 | switch (signaled) { | 1168 | switch (signaled) { |
1169 | case RCU_GP_IDLE: | ||
1132 | case RCU_GP_INIT: | 1170 | case RCU_GP_INIT: |
1133 | 1171 | ||
1134 | break; /* grace period still initializing, ignore. */ | 1172 | break; /* grace period idle or initializing, ignore. */ |
1135 | 1173 | ||
1136 | case RCU_SAVE_DYNTICK: | 1174 | case RCU_SAVE_DYNTICK: |
1137 | 1175 | ||
@@ -1145,7 +1183,8 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed) | |||
1145 | 1183 | ||
1146 | /* Update state, record completion counter. */ | 1184 | /* Update state, record completion counter. */ |
1147 | spin_lock(&rnp->lock); | 1185 | spin_lock(&rnp->lock); |
1148 | if (lastcomp == rsp->completed) { | 1186 | if (lastcomp == rsp->completed && |
1187 | rsp->signaled == RCU_SAVE_DYNTICK) { | ||
1149 | rsp->signaled = RCU_FORCE_QS; | 1188 | rsp->signaled = RCU_FORCE_QS; |
1150 | dyntick_record_completed(rsp, lastcomp); | 1189 | dyntick_record_completed(rsp, lastcomp); |
1151 | } | 1190 | } |
@@ -1211,7 +1250,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) | |||
1211 | } | 1250 | } |
1212 | 1251 | ||
1213 | /* If there are callbacks ready, invoke them. */ | 1252 | /* If there are callbacks ready, invoke them. */ |
1214 | rcu_do_batch(rdp); | 1253 | rcu_do_batch(rsp, rdp); |
1215 | } | 1254 | } |
1216 | 1255 | ||
1217 | /* | 1256 | /* |
@@ -1267,7 +1306,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), | |||
1267 | rdp->nxttail[RCU_NEXT_TAIL] = &head->next; | 1306 | rdp->nxttail[RCU_NEXT_TAIL] = &head->next; |
1268 | 1307 | ||
1269 | /* Start a new grace period if one not already started. */ | 1308 | /* Start a new grace period if one not already started. */ |
1270 | if (ACCESS_ONCE(rsp->completed) == ACCESS_ONCE(rsp->gpnum)) { | 1309 | if (!rcu_gp_in_progress(rsp)) { |
1271 | unsigned long nestflag; | 1310 | unsigned long nestflag; |
1272 | struct rcu_node *rnp_root = rcu_get_root(rsp); | 1311 | struct rcu_node *rnp_root = rcu_get_root(rsp); |
1273 | 1312 | ||
@@ -1275,10 +1314,20 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), | |||
1275 | rcu_start_gp(rsp, nestflag); /* releases rnp_root->lock. */ | 1314 | rcu_start_gp(rsp, nestflag); /* releases rnp_root->lock. */ |
1276 | } | 1315 | } |
1277 | 1316 | ||
1278 | /* Force the grace period if too many callbacks or too long waiting. */ | 1317 | /* |
1279 | if (unlikely(++rdp->qlen > qhimark)) { | 1318 | * Force the grace period if too many callbacks or too long waiting. |
1319 | * Enforce hysteresis, and don't invoke force_quiescent_state() | ||
1320 | * if some other CPU has recently done so. Also, don't bother | ||
1321 | * invoking force_quiescent_state() if the newly enqueued callback | ||
1322 | * is the only one waiting for a grace period to complete. | ||
1323 | */ | ||
1324 | if (unlikely(++rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) { | ||
1280 | rdp->blimit = LONG_MAX; | 1325 | rdp->blimit = LONG_MAX; |
1281 | force_quiescent_state(rsp, 0); | 1326 | if (rsp->n_force_qs == rdp->n_force_qs_snap && |
1327 | *rdp->nxttail[RCU_DONE_TAIL] != head) | ||
1328 | force_quiescent_state(rsp, 0); | ||
1329 | rdp->n_force_qs_snap = rsp->n_force_qs; | ||
1330 | rdp->qlen_last_fqs_check = rdp->qlen; | ||
1282 | } else if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0) | 1331 | } else if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0) |
1283 | force_quiescent_state(rsp, 1); | 1332 | force_quiescent_state(rsp, 1); |
1284 | local_irq_restore(flags); | 1333 | local_irq_restore(flags); |
@@ -1347,7 +1396,7 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp) | |||
1347 | } | 1396 | } |
1348 | 1397 | ||
1349 | /* Has an RCU GP gone long enough to send resched IPIs &c? */ | 1398 | /* Has an RCU GP gone long enough to send resched IPIs &c? */ |
1350 | if (ACCESS_ONCE(rsp->completed) != ACCESS_ONCE(rsp->gpnum) && | 1399 | if (rcu_gp_in_progress(rsp) && |
1351 | ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0)) { | 1400 | ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0)) { |
1352 | rdp->n_rp_need_fqs++; | 1401 | rdp->n_rp_need_fqs++; |
1353 | return 1; | 1402 | return 1; |
@@ -1384,6 +1433,82 @@ int rcu_needs_cpu(int cpu) | |||
1384 | rcu_preempt_needs_cpu(cpu); | 1433 | rcu_preempt_needs_cpu(cpu); |
1385 | } | 1434 | } |
1386 | 1435 | ||
1436 | static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL}; | ||
1437 | static atomic_t rcu_barrier_cpu_count; | ||
1438 | static DEFINE_MUTEX(rcu_barrier_mutex); | ||
1439 | static struct completion rcu_barrier_completion; | ||
1440 | |||
1441 | static void rcu_barrier_callback(struct rcu_head *notused) | ||
1442 | { | ||
1443 | if (atomic_dec_and_test(&rcu_barrier_cpu_count)) | ||
1444 | complete(&rcu_barrier_completion); | ||
1445 | } | ||
1446 | |||
1447 | /* | ||
1448 | * Called with preemption disabled, and from cross-cpu IRQ context. | ||
1449 | */ | ||
1450 | static void rcu_barrier_func(void *type) | ||
1451 | { | ||
1452 | int cpu = smp_processor_id(); | ||
1453 | struct rcu_head *head = &per_cpu(rcu_barrier_head, cpu); | ||
1454 | void (*call_rcu_func)(struct rcu_head *head, | ||
1455 | void (*func)(struct rcu_head *head)); | ||
1456 | |||
1457 | atomic_inc(&rcu_barrier_cpu_count); | ||
1458 | call_rcu_func = type; | ||
1459 | call_rcu_func(head, rcu_barrier_callback); | ||
1460 | } | ||
1461 | |||
1462 | /* | ||
1463 | * Orchestrate the specified type of RCU barrier, waiting for all | ||
1464 | * RCU callbacks of the specified type to complete. | ||
1465 | */ | ||
1466 | static void _rcu_barrier(struct rcu_state *rsp, | ||
1467 | void (*call_rcu_func)(struct rcu_head *head, | ||
1468 | void (*func)(struct rcu_head *head))) | ||
1469 | { | ||
1470 | BUG_ON(in_interrupt()); | ||
1471 | /* Take mutex to serialize concurrent rcu_barrier() requests. */ | ||
1472 | mutex_lock(&rcu_barrier_mutex); | ||
1473 | init_completion(&rcu_barrier_completion); | ||
1474 | /* | ||
1475 | * Initialize rcu_barrier_cpu_count to 1, then invoke | ||
1476 | * rcu_barrier_func() on each CPU, so that each CPU also has | ||
1477 | * incremented rcu_barrier_cpu_count. Only then is it safe to | ||
1478 | * decrement rcu_barrier_cpu_count -- otherwise the first CPU | ||
1479 | * might complete its grace period before all of the other CPUs | ||
1480 | * did their increment, causing this function to return too | ||
1481 | * early. | ||
1482 | */ | ||
1483 | atomic_set(&rcu_barrier_cpu_count, 1); | ||
1484 | preempt_disable(); /* stop CPU_DYING from filling orphan_cbs_list */ | ||
1485 | rcu_adopt_orphan_cbs(rsp); | ||
1486 | on_each_cpu(rcu_barrier_func, (void *)call_rcu_func, 1); | ||
1487 | preempt_enable(); /* CPU_DYING can again fill orphan_cbs_list */ | ||
1488 | if (atomic_dec_and_test(&rcu_barrier_cpu_count)) | ||
1489 | complete(&rcu_barrier_completion); | ||
1490 | wait_for_completion(&rcu_barrier_completion); | ||
1491 | mutex_unlock(&rcu_barrier_mutex); | ||
1492 | } | ||
1493 | |||
1494 | /** | ||
1495 | * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete. | ||
1496 | */ | ||
1497 | void rcu_barrier_bh(void) | ||
1498 | { | ||
1499 | _rcu_barrier(&rcu_bh_state, call_rcu_bh); | ||
1500 | } | ||
1501 | EXPORT_SYMBOL_GPL(rcu_barrier_bh); | ||
1502 | |||
1503 | /** | ||
1504 | * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks. | ||
1505 | */ | ||
1506 | void rcu_barrier_sched(void) | ||
1507 | { | ||
1508 | _rcu_barrier(&rcu_sched_state, call_rcu_sched); | ||
1509 | } | ||
1510 | EXPORT_SYMBOL_GPL(rcu_barrier_sched); | ||
1511 | |||
1387 | /* | 1512 | /* |
1388 | * Do boot-time initialization of a CPU's per-CPU RCU data. | 1513 | * Do boot-time initialization of a CPU's per-CPU RCU data. |
1389 | */ | 1514 | */ |
@@ -1434,6 +1559,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable) | |||
1434 | rdp->beenonline = 1; /* We have now been online. */ | 1559 | rdp->beenonline = 1; /* We have now been online. */ |
1435 | rdp->preemptable = preemptable; | 1560 | rdp->preemptable = preemptable; |
1436 | rdp->passed_quiesc_completed = lastcomp - 1; | 1561 | rdp->passed_quiesc_completed = lastcomp - 1; |
1562 | rdp->qlen_last_fqs_check = 0; | ||
1563 | rdp->n_force_qs_snap = rsp->n_force_qs; | ||
1437 | rdp->blimit = blimit; | 1564 | rdp->blimit = blimit; |
1438 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ | 1565 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
1439 | 1566 | ||
@@ -1457,20 +1584,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable) | |||
1457 | rnp = rnp->parent; | 1584 | rnp = rnp->parent; |
1458 | } while (rnp != NULL && !(rnp->qsmaskinit & mask)); | 1585 | } while (rnp != NULL && !(rnp->qsmaskinit & mask)); |
1459 | 1586 | ||
1460 | spin_unlock(&rsp->onofflock); /* irqs remain disabled. */ | 1587 | spin_unlock_irqrestore(&rsp->onofflock, flags); |
1461 | |||
1462 | /* | ||
1463 | * A new grace period might start here. If so, we will be part of | ||
1464 | * it, and its gpnum will be greater than ours, so we will | ||
1465 | * participate. It is also possible for the gpnum to have been | ||
1466 | * incremented before this function was called, and the bitmasks | ||
1467 | * to not be filled out until now, in which case we will also | ||
1468 | * participate due to our gpnum being behind. | ||
1469 | */ | ||
1470 | |||
1471 | /* Since it is coming online, the CPU is in a quiescent state. */ | ||
1472 | cpu_quiet(cpu, rsp, rdp, lastcomp); | ||
1473 | local_irq_restore(flags); | ||
1474 | } | 1588 | } |
1475 | 1589 | ||
1476 | static void __cpuinit rcu_online_cpu(int cpu) | 1590 | static void __cpuinit rcu_online_cpu(int cpu) |
@@ -1493,6 +1607,22 @@ int __cpuinit rcu_cpu_notify(struct notifier_block *self, | |||
1493 | case CPU_UP_PREPARE_FROZEN: | 1607 | case CPU_UP_PREPARE_FROZEN: |
1494 | rcu_online_cpu(cpu); | 1608 | rcu_online_cpu(cpu); |
1495 | break; | 1609 | break; |
1610 | case CPU_DYING: | ||
1611 | case CPU_DYING_FROZEN: | ||
1612 | /* | ||
1613 | * preempt_disable() in _rcu_barrier() prevents stop_machine(), | ||
1614 | * so when "on_each_cpu(rcu_barrier_func, (void *)type, 1);" | ||
1615 | * returns, all online cpus have queued rcu_barrier_func(). | ||
1616 | * The dying CPU clears its cpu_online_mask bit and | ||
1617 | * moves all of its RCU callbacks to ->orphan_cbs_list | ||
1618 | * in the context of stop_machine(), so subsequent calls | ||
1619 | * to _rcu_barrier() will adopt these callbacks and only | ||
1620 | * then queue rcu_barrier_func() on all remaining CPUs. | ||
1621 | */ | ||
1622 | rcu_send_cbs_to_orphanage(&rcu_bh_state); | ||
1623 | rcu_send_cbs_to_orphanage(&rcu_sched_state); | ||
1624 | rcu_preempt_send_cbs_to_orphanage(); | ||
1625 | break; | ||
1496 | case CPU_DEAD: | 1626 | case CPU_DEAD: |
1497 | case CPU_DEAD_FROZEN: | 1627 | case CPU_DEAD_FROZEN: |
1498 | case CPU_UP_CANCELED: | 1628 | case CPU_UP_CANCELED: |
@@ -1555,7 +1685,8 @@ static void __init rcu_init_one(struct rcu_state *rsp) | |||
1555 | cpustride *= rsp->levelspread[i]; | 1685 | cpustride *= rsp->levelspread[i]; |
1556 | rnp = rsp->level[i]; | 1686 | rnp = rsp->level[i]; |
1557 | for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) { | 1687 | for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) { |
1558 | spin_lock_init(&rnp->lock); | 1688 | if (rnp != rcu_get_root(rsp)) |
1689 | spin_lock_init(&rnp->lock); | ||
1559 | rnp->gpnum = 0; | 1690 | rnp->gpnum = 0; |
1560 | rnp->qsmask = 0; | 1691 | rnp->qsmask = 0; |
1561 | rnp->qsmaskinit = 0; | 1692 | rnp->qsmaskinit = 0; |
@@ -1578,6 +1709,7 @@ static void __init rcu_init_one(struct rcu_state *rsp) | |||
1578 | INIT_LIST_HEAD(&rnp->blocked_tasks[1]); | 1709 | INIT_LIST_HEAD(&rnp->blocked_tasks[1]); |
1579 | } | 1710 | } |
1580 | } | 1711 | } |
1712 | spin_lock_init(&rcu_get_root(rsp)->lock); | ||
1581 | } | 1713 | } |
1582 | 1714 | ||
1583 | /* | 1715 | /* |
@@ -1587,6 +1719,10 @@ static void __init rcu_init_one(struct rcu_state *rsp) | |||
1587 | */ | 1719 | */ |
1588 | #define RCU_INIT_FLAVOR(rsp, rcu_data) \ | 1720 | #define RCU_INIT_FLAVOR(rsp, rcu_data) \ |
1589 | do { \ | 1721 | do { \ |
1722 | int i; \ | ||
1723 | int j; \ | ||
1724 | struct rcu_node *rnp; \ | ||
1725 | \ | ||
1590 | rcu_init_one(rsp); \ | 1726 | rcu_init_one(rsp); \ |
1591 | rnp = (rsp)->level[NUM_RCU_LVLS - 1]; \ | 1727 | rnp = (rsp)->level[NUM_RCU_LVLS - 1]; \ |
1592 | j = 0; \ | 1728 | j = 0; \ |
@@ -1599,31 +1735,8 @@ do { \ | |||
1599 | } \ | 1735 | } \ |
1600 | } while (0) | 1736 | } while (0) |
1601 | 1737 | ||
1602 | #ifdef CONFIG_TREE_PREEMPT_RCU | ||
1603 | |||
1604 | void __init __rcu_init_preempt(void) | ||
1605 | { | ||
1606 | int i; /* All used by RCU_INIT_FLAVOR(). */ | ||
1607 | int j; | ||
1608 | struct rcu_node *rnp; | ||
1609 | |||
1610 | RCU_INIT_FLAVOR(&rcu_preempt_state, rcu_preempt_data); | ||
1611 | } | ||
1612 | |||
1613 | #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ | ||
1614 | |||
1615 | void __init __rcu_init_preempt(void) | ||
1616 | { | ||
1617 | } | ||
1618 | |||
1619 | #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */ | ||
1620 | |||
1621 | void __init __rcu_init(void) | 1738 | void __init __rcu_init(void) |
1622 | { | 1739 | { |
1623 | int i; /* All used by RCU_INIT_FLAVOR(). */ | ||
1624 | int j; | ||
1625 | struct rcu_node *rnp; | ||
1626 | |||
1627 | rcu_bootup_announce(); | 1740 | rcu_bootup_announce(); |
1628 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | 1741 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR |
1629 | printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n"); | 1742 | printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n"); |
@@ -1634,6 +1747,4 @@ void __init __rcu_init(void) | |||
1634 | open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); | 1747 | open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); |
1635 | } | 1748 | } |
1636 | 1749 | ||
1637 | module_param(blimit, int, 0); | 1750 | #include "rcutree_plugin.h" |
1638 | module_param(qhimark, int, 0); | ||
1639 | module_param(qlowmark, int, 0); | ||
diff --git a/kernel/rcutree.h b/kernel/rcutree.h index bf8a6f9f134d..1899023b0962 100644 --- a/kernel/rcutree.h +++ b/kernel/rcutree.h | |||
@@ -48,14 +48,14 @@ | |||
48 | #elif NR_CPUS <= RCU_FANOUT_SQ | 48 | #elif NR_CPUS <= RCU_FANOUT_SQ |
49 | # define NUM_RCU_LVLS 2 | 49 | # define NUM_RCU_LVLS 2 |
50 | # define NUM_RCU_LVL_0 1 | 50 | # define NUM_RCU_LVL_0 1 |
51 | # define NUM_RCU_LVL_1 (((NR_CPUS) + RCU_FANOUT - 1) / RCU_FANOUT) | 51 | # define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT) |
52 | # define NUM_RCU_LVL_2 (NR_CPUS) | 52 | # define NUM_RCU_LVL_2 (NR_CPUS) |
53 | # define NUM_RCU_LVL_3 0 | 53 | # define NUM_RCU_LVL_3 0 |
54 | #elif NR_CPUS <= RCU_FANOUT_CUBE | 54 | #elif NR_CPUS <= RCU_FANOUT_CUBE |
55 | # define NUM_RCU_LVLS 3 | 55 | # define NUM_RCU_LVLS 3 |
56 | # define NUM_RCU_LVL_0 1 | 56 | # define NUM_RCU_LVL_0 1 |
57 | # define NUM_RCU_LVL_1 (((NR_CPUS) + RCU_FANOUT_SQ - 1) / RCU_FANOUT_SQ) | 57 | # define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_SQ) |
58 | # define NUM_RCU_LVL_2 (((NR_CPUS) + (RCU_FANOUT) - 1) / (RCU_FANOUT)) | 58 | # define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT) |
59 | # define NUM_RCU_LVL_3 NR_CPUS | 59 | # define NUM_RCU_LVL_3 NR_CPUS |
60 | #else | 60 | #else |
61 | # error "CONFIG_RCU_FANOUT insufficient for NR_CPUS" | 61 | # error "CONFIG_RCU_FANOUT insufficient for NR_CPUS" |
@@ -79,15 +79,21 @@ struct rcu_dynticks { | |||
79 | * Definition for node within the RCU grace-period-detection hierarchy. | 79 | * Definition for node within the RCU grace-period-detection hierarchy. |
80 | */ | 80 | */ |
81 | struct rcu_node { | 81 | struct rcu_node { |
82 | spinlock_t lock; | 82 | spinlock_t lock; /* Root rcu_node's lock protects some */ |
83 | /* rcu_state fields as well as following. */ | ||
83 | long gpnum; /* Current grace period for this node. */ | 84 | long gpnum; /* Current grace period for this node. */ |
84 | /* This will either be equal to or one */ | 85 | /* This will either be equal to or one */ |
85 | /* behind the root rcu_node's gpnum. */ | 86 | /* behind the root rcu_node's gpnum. */ |
86 | unsigned long qsmask; /* CPUs or groups that need to switch in */ | 87 | unsigned long qsmask; /* CPUs or groups that need to switch in */ |
87 | /* order for current grace period to proceed.*/ | 88 | /* order for current grace period to proceed.*/ |
89 | /* In leaf rcu_node, each bit corresponds to */ | ||
90 | /* an rcu_data structure, otherwise, each */ | ||
91 | /* bit corresponds to a child rcu_node */ | ||
92 | /* structure. */ | ||
88 | unsigned long qsmaskinit; | 93 | unsigned long qsmaskinit; |
89 | /* Per-GP initialization for qsmask. */ | 94 | /* Per-GP initialization for qsmask. */ |
90 | unsigned long grpmask; /* Mask to apply to parent qsmask. */ | 95 | unsigned long grpmask; /* Mask to apply to parent qsmask. */ |
96 | /* Only one bit will be set in this mask. */ | ||
91 | int grplo; /* lowest-numbered CPU or group here. */ | 97 | int grplo; /* lowest-numbered CPU or group here. */ |
92 | int grphi; /* highest-numbered CPU or group here. */ | 98 | int grphi; /* highest-numbered CPU or group here. */ |
93 | u8 grpnum; /* CPU/group number for next level up. */ | 99 | u8 grpnum; /* CPU/group number for next level up. */ |
@@ -95,8 +101,23 @@ struct rcu_node { | |||
95 | struct rcu_node *parent; | 101 | struct rcu_node *parent; |
96 | struct list_head blocked_tasks[2]; | 102 | struct list_head blocked_tasks[2]; |
97 | /* Tasks blocked in RCU read-side critsect. */ | 103 | /* Tasks blocked in RCU read-side critsect. */ |
104 | /* Grace period number (->gpnum) x blocked */ | ||
105 | /* by tasks on the (x & 0x1) element of the */ | ||
106 | /* blocked_tasks[] array. */ | ||
98 | } ____cacheline_internodealigned_in_smp; | 107 | } ____cacheline_internodealigned_in_smp; |
99 | 108 | ||
109 | /* | ||
110 | * Do a full breadth-first scan of the rcu_node structures for the | ||
111 | * specified rcu_state structure. | ||
112 | */ | ||
113 | #define rcu_for_each_node_breadth_first(rsp, rnp) \ | ||
114 | for ((rnp) = &(rsp)->node[0]; \ | ||
115 | (rnp) < &(rsp)->node[NUM_RCU_NODES]; (rnp)++) | ||
116 | |||
117 | #define rcu_for_each_leaf_node(rsp, rnp) \ | ||
118 | for ((rnp) = (rsp)->level[NUM_RCU_LVLS - 1]; \ | ||
119 | (rnp) < &(rsp)->node[NUM_RCU_NODES]; (rnp)++) | ||
120 | |||
100 | /* Index values for nxttail array in struct rcu_data. */ | 121 | /* Index values for nxttail array in struct rcu_data. */ |
101 | #define RCU_DONE_TAIL 0 /* Also RCU_WAIT head. */ | 122 | #define RCU_DONE_TAIL 0 /* Also RCU_WAIT head. */ |
102 | #define RCU_WAIT_TAIL 1 /* Also RCU_NEXT_READY head. */ | 123 | #define RCU_WAIT_TAIL 1 /* Also RCU_NEXT_READY head. */ |
@@ -126,23 +147,30 @@ struct rcu_data { | |||
126 | * Any of the partitions might be empty, in which case the | 147 | * Any of the partitions might be empty, in which case the |
127 | * pointer to that partition will be equal to the pointer for | 148 | * pointer to that partition will be equal to the pointer for |
128 | * the following partition. When the list is empty, all of | 149 | * the following partition. When the list is empty, all of |
129 | * the nxttail elements point to nxtlist, which is NULL. | 150 | * the nxttail elements point to the ->nxtlist pointer itself, |
151 | * which in that case is NULL. | ||
130 | * | 152 | * |
131 | * [*nxttail[RCU_NEXT_READY_TAIL], NULL = *nxttail[RCU_NEXT_TAIL]): | ||
132 | * Entries that might have arrived after current GP ended | ||
133 | * [*nxttail[RCU_WAIT_TAIL], *nxttail[RCU_NEXT_READY_TAIL]): | ||
134 | * Entries known to have arrived before current GP ended | ||
135 | * [*nxttail[RCU_DONE_TAIL], *nxttail[RCU_WAIT_TAIL]): | ||
136 | * Entries that batch # <= ->completed - 1: waiting for current GP | ||
137 | * [nxtlist, *nxttail[RCU_DONE_TAIL]): | 153 | * [nxtlist, *nxttail[RCU_DONE_TAIL]): |
138 | * Entries that batch # <= ->completed | 154 | * Entries that batch # <= ->completed |
139 | * The grace period for these entries has completed, and | 155 | * The grace period for these entries has completed, and |
140 | * the other grace-period-completed entries may be moved | 156 | * the other grace-period-completed entries may be moved |
141 | * here temporarily in rcu_process_callbacks(). | 157 | * here temporarily in rcu_process_callbacks(). |
158 | * [*nxttail[RCU_DONE_TAIL], *nxttail[RCU_WAIT_TAIL]): | ||
159 | * Entries that batch # <= ->completed - 1: waiting for current GP | ||
160 | * [*nxttail[RCU_WAIT_TAIL], *nxttail[RCU_NEXT_READY_TAIL]): | ||
161 | * Entries known to have arrived before current GP ended | ||
162 | * [*nxttail[RCU_NEXT_READY_TAIL], *nxttail[RCU_NEXT_TAIL]): | ||
163 | * Entries that might have arrived after current GP ended | ||
164 | * Note that the value of *nxttail[RCU_NEXT_TAIL] will | ||
165 | * always be NULL, as this is the end of the list. | ||
142 | */ | 166 | */ |
143 | struct rcu_head *nxtlist; | 167 | struct rcu_head *nxtlist; |
144 | struct rcu_head **nxttail[RCU_NEXT_SIZE]; | 168 | struct rcu_head **nxttail[RCU_NEXT_SIZE]; |
145 | long qlen; /* # of queued callbacks */ | 169 | long qlen; /* # of queued callbacks */ |
170 | long qlen_last_fqs_check; | ||
171 | /* qlen at last check for QS forcing */ | ||
172 | unsigned long n_force_qs_snap; | ||
173 | /* did other CPU force QS recently? */ | ||
146 | long blimit; /* Upper limit on a processed batch */ | 174 | long blimit; /* Upper limit on a processed batch */ |
147 | 175 | ||
148 | #ifdef CONFIG_NO_HZ | 176 | #ifdef CONFIG_NO_HZ |
@@ -173,9 +201,10 @@ struct rcu_data { | |||
173 | }; | 201 | }; |
174 | 202 | ||
175 | /* Values for signaled field in struct rcu_state. */ | 203 | /* Values for signaled field in struct rcu_state. */ |
176 | #define RCU_GP_INIT 0 /* Grace period being initialized. */ | 204 | #define RCU_GP_IDLE 0 /* No grace period in progress. */ |
177 | #define RCU_SAVE_DYNTICK 1 /* Need to scan dyntick state. */ | 205 | #define RCU_GP_INIT 1 /* Grace period being initialized. */ |
178 | #define RCU_FORCE_QS 2 /* Need to force quiescent state. */ | 206 | #define RCU_SAVE_DYNTICK 2 /* Need to scan dyntick state. */ |
207 | #define RCU_FORCE_QS 3 /* Need to force quiescent state. */ | ||
179 | #ifdef CONFIG_NO_HZ | 208 | #ifdef CONFIG_NO_HZ |
180 | #define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK | 209 | #define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK |
181 | #else /* #ifdef CONFIG_NO_HZ */ | 210 | #else /* #ifdef CONFIG_NO_HZ */ |
@@ -216,8 +245,19 @@ struct rcu_state { | |||
216 | /* Force QS state. */ | 245 | /* Force QS state. */ |
217 | long gpnum; /* Current gp number. */ | 246 | long gpnum; /* Current gp number. */ |
218 | long completed; /* # of last completed gp. */ | 247 | long completed; /* # of last completed gp. */ |
248 | |||
249 | /* End of fields guarded by root rcu_node's lock. */ | ||
250 | |||
219 | spinlock_t onofflock; /* exclude on/offline and */ | 251 | spinlock_t onofflock; /* exclude on/offline and */ |
220 | /* starting new GP. */ | 252 | /* starting new GP. Also */ |
253 | /* protects the following */ | ||
254 | /* orphan_cbs fields. */ | ||
255 | struct rcu_head *orphan_cbs_list; /* list of rcu_head structs */ | ||
256 | /* orphaned by all CPUs in */ | ||
257 | /* a given leaf rcu_node */ | ||
258 | /* going offline. */ | ||
259 | struct rcu_head **orphan_cbs_tail; /* And tail pointer. */ | ||
260 | long orphan_qlen; /* Number of orphaned cbs. */ | ||
221 | spinlock_t fqslock; /* Only one task forcing */ | 261 | spinlock_t fqslock; /* Only one task forcing */ |
222 | /* quiescent states. */ | 262 | /* quiescent states. */ |
223 | unsigned long jiffies_force_qs; /* Time at which to invoke */ | 263 | unsigned long jiffies_force_qs; /* Time at which to invoke */ |
@@ -255,5 +295,30 @@ extern struct rcu_state rcu_preempt_state; | |||
255 | DECLARE_PER_CPU(struct rcu_data, rcu_preempt_data); | 295 | DECLARE_PER_CPU(struct rcu_data, rcu_preempt_data); |
256 | #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ | 296 | #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ |
257 | 297 | ||
258 | #endif /* #ifdef RCU_TREE_NONCORE */ | 298 | #else /* #ifdef RCU_TREE_NONCORE */ |
299 | |||
300 | /* Forward declarations for rcutree_plugin.h */ | ||
301 | static inline void rcu_bootup_announce(void); | ||
302 | long rcu_batches_completed(void); | ||
303 | static void rcu_preempt_note_context_switch(int cpu); | ||
304 | static int rcu_preempted_readers(struct rcu_node *rnp); | ||
305 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | ||
306 | static void rcu_print_task_stall(struct rcu_node *rnp); | ||
307 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | ||
308 | static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp); | ||
309 | #ifdef CONFIG_HOTPLUG_CPU | ||
310 | static int rcu_preempt_offline_tasks(struct rcu_state *rsp, | ||
311 | struct rcu_node *rnp, | ||
312 | struct rcu_data *rdp); | ||
313 | static void rcu_preempt_offline_cpu(int cpu); | ||
314 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | ||
315 | static void rcu_preempt_check_callbacks(int cpu); | ||
316 | static void rcu_preempt_process_callbacks(void); | ||
317 | void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)); | ||
318 | static int rcu_preempt_pending(int cpu); | ||
319 | static int rcu_preempt_needs_cpu(int cpu); | ||
320 | static void __cpuinit rcu_preempt_init_percpu_data(int cpu); | ||
321 | static void rcu_preempt_send_cbs_to_orphanage(void); | ||
322 | static void __init __rcu_init_preempt(void); | ||
259 | 323 | ||
324 | #endif /* #else #ifdef RCU_TREE_NONCORE */ | ||
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index 47789369ea59..ef2a58c2b9d5 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h | |||
@@ -64,22 +64,31 @@ EXPORT_SYMBOL_GPL(rcu_batches_completed); | |||
64 | * not in a quiescent state. There might be any number of tasks blocked | 64 | * not in a quiescent state. There might be any number of tasks blocked |
65 | * while in an RCU read-side critical section. | 65 | * while in an RCU read-side critical section. |
66 | */ | 66 | */ |
67 | static void rcu_preempt_qs_record(int cpu) | 67 | static void rcu_preempt_qs(int cpu) |
68 | { | 68 | { |
69 | struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu); | 69 | struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu); |
70 | rdp->passed_quiesc = 1; | ||
71 | rdp->passed_quiesc_completed = rdp->completed; | 70 | rdp->passed_quiesc_completed = rdp->completed; |
71 | barrier(); | ||
72 | rdp->passed_quiesc = 1; | ||
72 | } | 73 | } |
73 | 74 | ||
74 | /* | 75 | /* |
75 | * We have entered the scheduler or are between softirqs in ksoftirqd. | 76 | * We have entered the scheduler, and the current task might soon be |
76 | * If we are in an RCU read-side critical section, we need to reflect | 77 | * context-switched away from. If this task is in an RCU read-side |
77 | * that in the state of the rcu_node structure corresponding to this CPU. | 78 | * critical section, we will no longer be able to rely on the CPU to |
78 | * Caller must disable hardirqs. | 79 | * record that fact, so we enqueue the task on the appropriate entry |
80 | * of the blocked_tasks[] array. The task will dequeue itself when | ||
81 | * it exits the outermost enclosing RCU read-side critical section. | ||
82 | * Therefore, the current grace period cannot be permitted to complete | ||
83 | * until the blocked_tasks[] entry indexed by the low-order bit of | ||
84 | * rnp->gpnum empties. | ||
85 | * | ||
86 | * Caller must disable preemption. | ||
79 | */ | 87 | */ |
80 | static void rcu_preempt_qs(int cpu) | 88 | static void rcu_preempt_note_context_switch(int cpu) |
81 | { | 89 | { |
82 | struct task_struct *t = current; | 90 | struct task_struct *t = current; |
91 | unsigned long flags; | ||
83 | int phase; | 92 | int phase; |
84 | struct rcu_data *rdp; | 93 | struct rcu_data *rdp; |
85 | struct rcu_node *rnp; | 94 | struct rcu_node *rnp; |
@@ -90,7 +99,7 @@ static void rcu_preempt_qs(int cpu) | |||
90 | /* Possibly blocking in an RCU read-side critical section. */ | 99 | /* Possibly blocking in an RCU read-side critical section. */ |
91 | rdp = rcu_preempt_state.rda[cpu]; | 100 | rdp = rcu_preempt_state.rda[cpu]; |
92 | rnp = rdp->mynode; | 101 | rnp = rdp->mynode; |
93 | spin_lock(&rnp->lock); | 102 | spin_lock_irqsave(&rnp->lock, flags); |
94 | t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED; | 103 | t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED; |
95 | t->rcu_blocked_node = rnp; | 104 | t->rcu_blocked_node = rnp; |
96 | 105 | ||
@@ -103,11 +112,15 @@ static void rcu_preempt_qs(int cpu) | |||
103 | * state for the current grace period), then as long | 112 | * state for the current grace period), then as long |
104 | * as that task remains queued, the current grace period | 113 | * as that task remains queued, the current grace period |
105 | * cannot end. | 114 | * cannot end. |
115 | * | ||
116 | * But first, note that the current CPU must still be | ||
117 | * on line! | ||
106 | */ | 118 | */ |
107 | phase = !(rnp->qsmask & rdp->grpmask) ^ (rnp->gpnum & 0x1); | 119 | WARN_ON_ONCE((rdp->grpmask & rnp->qsmaskinit) == 0); |
120 | WARN_ON_ONCE(!list_empty(&t->rcu_node_entry)); | ||
121 | phase = (rnp->gpnum + !(rnp->qsmask & rdp->grpmask)) & 0x1; | ||
108 | list_add(&t->rcu_node_entry, &rnp->blocked_tasks[phase]); | 122 | list_add(&t->rcu_node_entry, &rnp->blocked_tasks[phase]); |
109 | smp_mb(); /* Ensure later ctxt swtch seen after above. */ | 123 | spin_unlock_irqrestore(&rnp->lock, flags); |
110 | spin_unlock(&rnp->lock); | ||
111 | } | 124 | } |
112 | 125 | ||
113 | /* | 126 | /* |
@@ -119,9 +132,10 @@ static void rcu_preempt_qs(int cpu) | |||
119 | * grace period, then the fact that the task has been enqueued | 132 | * grace period, then the fact that the task has been enqueued |
120 | * means that we continue to block the current grace period. | 133 | * means that we continue to block the current grace period. |
121 | */ | 134 | */ |
122 | rcu_preempt_qs_record(cpu); | 135 | rcu_preempt_qs(cpu); |
123 | t->rcu_read_unlock_special &= ~(RCU_READ_UNLOCK_NEED_QS | | 136 | local_irq_save(flags); |
124 | RCU_READ_UNLOCK_GOT_QS); | 137 | t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; |
138 | local_irq_restore(flags); | ||
125 | } | 139 | } |
126 | 140 | ||
127 | /* | 141 | /* |
@@ -136,6 +150,16 @@ void __rcu_read_lock(void) | |||
136 | } | 150 | } |
137 | EXPORT_SYMBOL_GPL(__rcu_read_lock); | 151 | EXPORT_SYMBOL_GPL(__rcu_read_lock); |
138 | 152 | ||
153 | /* | ||
154 | * Check for preempted RCU readers blocking the current grace period | ||
155 | * for the specified rcu_node structure. If the caller needs a reliable | ||
156 | * answer, it must hold the rcu_node's ->lock. | ||
157 | */ | ||
158 | static int rcu_preempted_readers(struct rcu_node *rnp) | ||
159 | { | ||
160 | return !list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1]); | ||
161 | } | ||
162 | |||
139 | static void rcu_read_unlock_special(struct task_struct *t) | 163 | static void rcu_read_unlock_special(struct task_struct *t) |
140 | { | 164 | { |
141 | int empty; | 165 | int empty; |
@@ -157,7 +181,7 @@ static void rcu_read_unlock_special(struct task_struct *t) | |||
157 | special = t->rcu_read_unlock_special; | 181 | special = t->rcu_read_unlock_special; |
158 | if (special & RCU_READ_UNLOCK_NEED_QS) { | 182 | if (special & RCU_READ_UNLOCK_NEED_QS) { |
159 | t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; | 183 | t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; |
160 | t->rcu_read_unlock_special |= RCU_READ_UNLOCK_GOT_QS; | 184 | rcu_preempt_qs(smp_processor_id()); |
161 | } | 185 | } |
162 | 186 | ||
163 | /* Hardware IRQ handlers cannot block. */ | 187 | /* Hardware IRQ handlers cannot block. */ |
@@ -177,12 +201,12 @@ static void rcu_read_unlock_special(struct task_struct *t) | |||
177 | */ | 201 | */ |
178 | for (;;) { | 202 | for (;;) { |
179 | rnp = t->rcu_blocked_node; | 203 | rnp = t->rcu_blocked_node; |
180 | spin_lock(&rnp->lock); | 204 | spin_lock(&rnp->lock); /* irqs already disabled. */ |
181 | if (rnp == t->rcu_blocked_node) | 205 | if (rnp == t->rcu_blocked_node) |
182 | break; | 206 | break; |
183 | spin_unlock(&rnp->lock); | 207 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
184 | } | 208 | } |
185 | empty = list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1]); | 209 | empty = !rcu_preempted_readers(rnp); |
186 | list_del_init(&t->rcu_node_entry); | 210 | list_del_init(&t->rcu_node_entry); |
187 | t->rcu_blocked_node = NULL; | 211 | t->rcu_blocked_node = NULL; |
188 | 212 | ||
@@ -193,10 +217,9 @@ static void rcu_read_unlock_special(struct task_struct *t) | |||
193 | * drop rnp->lock and restore irq. | 217 | * drop rnp->lock and restore irq. |
194 | */ | 218 | */ |
195 | if (!empty && rnp->qsmask == 0 && | 219 | if (!empty && rnp->qsmask == 0 && |
196 | list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1])) { | 220 | !rcu_preempted_readers(rnp)) { |
197 | t->rcu_read_unlock_special &= | 221 | struct rcu_node *rnp_p; |
198 | ~(RCU_READ_UNLOCK_NEED_QS | | 222 | |
199 | RCU_READ_UNLOCK_GOT_QS); | ||
200 | if (rnp->parent == NULL) { | 223 | if (rnp->parent == NULL) { |
201 | /* Only one rcu_node in the tree. */ | 224 | /* Only one rcu_node in the tree. */ |
202 | cpu_quiet_msk_finish(&rcu_preempt_state, flags); | 225 | cpu_quiet_msk_finish(&rcu_preempt_state, flags); |
@@ -205,9 +228,10 @@ static void rcu_read_unlock_special(struct task_struct *t) | |||
205 | /* Report up the rest of the hierarchy. */ | 228 | /* Report up the rest of the hierarchy. */ |
206 | mask = rnp->grpmask; | 229 | mask = rnp->grpmask; |
207 | spin_unlock_irqrestore(&rnp->lock, flags); | 230 | spin_unlock_irqrestore(&rnp->lock, flags); |
208 | rnp = rnp->parent; | 231 | rnp_p = rnp->parent; |
209 | spin_lock_irqsave(&rnp->lock, flags); | 232 | spin_lock_irqsave(&rnp_p->lock, flags); |
210 | cpu_quiet_msk(mask, &rcu_preempt_state, rnp, flags); | 233 | WARN_ON_ONCE(rnp->qsmask); |
234 | cpu_quiet_msk(mask, &rcu_preempt_state, rnp_p, flags); | ||
211 | return; | 235 | return; |
212 | } | 236 | } |
213 | spin_unlock(&rnp->lock); | 237 | spin_unlock(&rnp->lock); |
@@ -243,12 +267,12 @@ static void rcu_print_task_stall(struct rcu_node *rnp) | |||
243 | { | 267 | { |
244 | unsigned long flags; | 268 | unsigned long flags; |
245 | struct list_head *lp; | 269 | struct list_head *lp; |
246 | int phase = rnp->gpnum & 0x1; | 270 | int phase; |
247 | struct task_struct *t; | 271 | struct task_struct *t; |
248 | 272 | ||
249 | if (!list_empty(&rnp->blocked_tasks[phase])) { | 273 | if (rcu_preempted_readers(rnp)) { |
250 | spin_lock_irqsave(&rnp->lock, flags); | 274 | spin_lock_irqsave(&rnp->lock, flags); |
251 | phase = rnp->gpnum & 0x1; /* re-read under lock. */ | 275 | phase = rnp->gpnum & 0x1; |
252 | lp = &rnp->blocked_tasks[phase]; | 276 | lp = &rnp->blocked_tasks[phase]; |
253 | list_for_each_entry(t, lp, rcu_node_entry) | 277 | list_for_each_entry(t, lp, rcu_node_entry) |
254 | printk(" P%d", t->pid); | 278 | printk(" P%d", t->pid); |
@@ -259,13 +283,16 @@ static void rcu_print_task_stall(struct rcu_node *rnp) | |||
259 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | 283 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ |
260 | 284 | ||
261 | /* | 285 | /* |
262 | * Check for preempted RCU readers for the specified rcu_node structure. | 286 | * Check that the list of blocked tasks for the newly completed grace |
263 | * If the caller needs a reliable answer, it must hold the rcu_node's | 287 | * period is in fact empty. It is a serious bug to complete a grace |
264 | * >lock. | 288 | * period that still has RCU readers blocked! This function must be |
289 | * invoked -before- updating this rnp's ->gpnum, and the rnp's ->lock | ||
290 | * must be held by the caller. | ||
265 | */ | 291 | */ |
266 | static int rcu_preempted_readers(struct rcu_node *rnp) | 292 | static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) |
267 | { | 293 | { |
268 | return !list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1]); | 294 | WARN_ON_ONCE(rcu_preempted_readers(rnp)); |
295 | WARN_ON_ONCE(rnp->qsmask); | ||
269 | } | 296 | } |
270 | 297 | ||
271 | #ifdef CONFIG_HOTPLUG_CPU | 298 | #ifdef CONFIG_HOTPLUG_CPU |
@@ -277,21 +304,29 @@ static int rcu_preempted_readers(struct rcu_node *rnp) | |||
277 | * parent is to remove the need for rcu_read_unlock_special() to | 304 | * parent is to remove the need for rcu_read_unlock_special() to |
278 | * make more than two attempts to acquire the target rcu_node's lock. | 305 | * make more than two attempts to acquire the target rcu_node's lock. |
279 | * | 306 | * |
307 | * Returns 1 if there was previously a task blocking the current grace | ||
308 | * period on the specified rcu_node structure. | ||
309 | * | ||
280 | * The caller must hold rnp->lock with irqs disabled. | 310 | * The caller must hold rnp->lock with irqs disabled. |
281 | */ | 311 | */ |
282 | static void rcu_preempt_offline_tasks(struct rcu_state *rsp, | 312 | static int rcu_preempt_offline_tasks(struct rcu_state *rsp, |
283 | struct rcu_node *rnp) | 313 | struct rcu_node *rnp, |
314 | struct rcu_data *rdp) | ||
284 | { | 315 | { |
285 | int i; | 316 | int i; |
286 | struct list_head *lp; | 317 | struct list_head *lp; |
287 | struct list_head *lp_root; | 318 | struct list_head *lp_root; |
319 | int retval = rcu_preempted_readers(rnp); | ||
288 | struct rcu_node *rnp_root = rcu_get_root(rsp); | 320 | struct rcu_node *rnp_root = rcu_get_root(rsp); |
289 | struct task_struct *tp; | 321 | struct task_struct *tp; |
290 | 322 | ||
291 | if (rnp == rnp_root) { | 323 | if (rnp == rnp_root) { |
292 | WARN_ONCE(1, "Last CPU thought to be offlined?"); | 324 | WARN_ONCE(1, "Last CPU thought to be offlined?"); |
293 | return; /* Shouldn't happen: at least one CPU online. */ | 325 | return 0; /* Shouldn't happen: at least one CPU online. */ |
294 | } | 326 | } |
327 | WARN_ON_ONCE(rnp != rdp->mynode && | ||
328 | (!list_empty(&rnp->blocked_tasks[0]) || | ||
329 | !list_empty(&rnp->blocked_tasks[1]))); | ||
295 | 330 | ||
296 | /* | 331 | /* |
297 | * Move tasks up to root rcu_node. Rely on the fact that the | 332 | * Move tasks up to root rcu_node. Rely on the fact that the |
@@ -311,6 +346,8 @@ static void rcu_preempt_offline_tasks(struct rcu_state *rsp, | |||
311 | spin_unlock(&rnp_root->lock); /* irqs remain disabled */ | 346 | spin_unlock(&rnp_root->lock); /* irqs remain disabled */ |
312 | } | 347 | } |
313 | } | 348 | } |
349 | |||
350 | return retval; | ||
314 | } | 351 | } |
315 | 352 | ||
316 | /* | 353 | /* |
@@ -335,20 +372,12 @@ static void rcu_preempt_check_callbacks(int cpu) | |||
335 | struct task_struct *t = current; | 372 | struct task_struct *t = current; |
336 | 373 | ||
337 | if (t->rcu_read_lock_nesting == 0) { | 374 | if (t->rcu_read_lock_nesting == 0) { |
338 | t->rcu_read_unlock_special &= | 375 | t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; |
339 | ~(RCU_READ_UNLOCK_NEED_QS | RCU_READ_UNLOCK_GOT_QS); | 376 | rcu_preempt_qs(cpu); |
340 | rcu_preempt_qs_record(cpu); | ||
341 | return; | 377 | return; |
342 | } | 378 | } |
343 | if (per_cpu(rcu_preempt_data, cpu).qs_pending) { | 379 | if (per_cpu(rcu_preempt_data, cpu).qs_pending) |
344 | if (t->rcu_read_unlock_special & RCU_READ_UNLOCK_GOT_QS) { | 380 | t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS; |
345 | rcu_preempt_qs_record(cpu); | ||
346 | t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_GOT_QS; | ||
347 | } else if (!(t->rcu_read_unlock_special & | ||
348 | RCU_READ_UNLOCK_NEED_QS)) { | ||
349 | t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS; | ||
350 | } | ||
351 | } | ||
352 | } | 381 | } |
353 | 382 | ||
354 | /* | 383 | /* |
@@ -370,6 +399,17 @@ void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) | |||
370 | EXPORT_SYMBOL_GPL(call_rcu); | 399 | EXPORT_SYMBOL_GPL(call_rcu); |
371 | 400 | ||
372 | /* | 401 | /* |
402 | * Wait for an rcu-preempt grace period. We are supposed to expedite the | ||
403 | * grace period, but this is the crude slow compatability hack, so just | ||
404 | * invoke synchronize_rcu(). | ||
405 | */ | ||
406 | void synchronize_rcu_expedited(void) | ||
407 | { | ||
408 | synchronize_rcu(); | ||
409 | } | ||
410 | EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); | ||
411 | |||
412 | /* | ||
373 | * Check to see if there is any immediate preemptable-RCU-related work | 413 | * Check to see if there is any immediate preemptable-RCU-related work |
374 | * to be done. | 414 | * to be done. |
375 | */ | 415 | */ |
@@ -387,6 +427,15 @@ static int rcu_preempt_needs_cpu(int cpu) | |||
387 | return !!per_cpu(rcu_preempt_data, cpu).nxtlist; | 427 | return !!per_cpu(rcu_preempt_data, cpu).nxtlist; |
388 | } | 428 | } |
389 | 429 | ||
430 | /** | ||
431 | * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete. | ||
432 | */ | ||
433 | void rcu_barrier(void) | ||
434 | { | ||
435 | _rcu_barrier(&rcu_preempt_state, call_rcu); | ||
436 | } | ||
437 | EXPORT_SYMBOL_GPL(rcu_barrier); | ||
438 | |||
390 | /* | 439 | /* |
391 | * Initialize preemptable RCU's per-CPU data. | 440 | * Initialize preemptable RCU's per-CPU data. |
392 | */ | 441 | */ |
@@ -396,6 +445,22 @@ static void __cpuinit rcu_preempt_init_percpu_data(int cpu) | |||
396 | } | 445 | } |
397 | 446 | ||
398 | /* | 447 | /* |
448 | * Move preemptable RCU's callbacks to ->orphan_cbs_list. | ||
449 | */ | ||
450 | static void rcu_preempt_send_cbs_to_orphanage(void) | ||
451 | { | ||
452 | rcu_send_cbs_to_orphanage(&rcu_preempt_state); | ||
453 | } | ||
454 | |||
455 | /* | ||
456 | * Initialize preemptable RCU's state structures. | ||
457 | */ | ||
458 | static void __init __rcu_init_preempt(void) | ||
459 | { | ||
460 | RCU_INIT_FLAVOR(&rcu_preempt_state, rcu_preempt_data); | ||
461 | } | ||
462 | |||
463 | /* | ||
399 | * Check for a task exiting while in a preemptable-RCU read-side | 464 | * Check for a task exiting while in a preemptable-RCU read-side |
400 | * critical section, clean up if so. No need to issue warnings, | 465 | * critical section, clean up if so. No need to issue warnings, |
401 | * as debug_check_no_locks_held() already does this if lockdep | 466 | * as debug_check_no_locks_held() already does this if lockdep |
@@ -434,10 +499,19 @@ EXPORT_SYMBOL_GPL(rcu_batches_completed); | |||
434 | * Because preemptable RCU does not exist, we never have to check for | 499 | * Because preemptable RCU does not exist, we never have to check for |
435 | * CPUs being in quiescent states. | 500 | * CPUs being in quiescent states. |
436 | */ | 501 | */ |
437 | static void rcu_preempt_qs(int cpu) | 502 | static void rcu_preempt_note_context_switch(int cpu) |
438 | { | 503 | { |
439 | } | 504 | } |
440 | 505 | ||
506 | /* | ||
507 | * Because preemptable RCU does not exist, there are never any preempted | ||
508 | * RCU readers. | ||
509 | */ | ||
510 | static int rcu_preempted_readers(struct rcu_node *rnp) | ||
511 | { | ||
512 | return 0; | ||
513 | } | ||
514 | |||
441 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | 515 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR |
442 | 516 | ||
443 | /* | 517 | /* |
@@ -451,23 +525,28 @@ static void rcu_print_task_stall(struct rcu_node *rnp) | |||
451 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | 525 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ |
452 | 526 | ||
453 | /* | 527 | /* |
454 | * Because preemptable RCU does not exist, there are never any preempted | 528 | * Because there is no preemptable RCU, there can be no readers blocked, |
455 | * RCU readers. | 529 | * so there is no need to check for blocked tasks. So check only for |
530 | * bogus qsmask values. | ||
456 | */ | 531 | */ |
457 | static int rcu_preempted_readers(struct rcu_node *rnp) | 532 | static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) |
458 | { | 533 | { |
459 | return 0; | 534 | WARN_ON_ONCE(rnp->qsmask); |
460 | } | 535 | } |
461 | 536 | ||
462 | #ifdef CONFIG_HOTPLUG_CPU | 537 | #ifdef CONFIG_HOTPLUG_CPU |
463 | 538 | ||
464 | /* | 539 | /* |
465 | * Because preemptable RCU does not exist, it never needs to migrate | 540 | * Because preemptable RCU does not exist, it never needs to migrate |
466 | * tasks that were blocked within RCU read-side critical sections. | 541 | * tasks that were blocked within RCU read-side critical sections, and |
542 | * such non-existent tasks cannot possibly have been blocking the current | ||
543 | * grace period. | ||
467 | */ | 544 | */ |
468 | static void rcu_preempt_offline_tasks(struct rcu_state *rsp, | 545 | static int rcu_preempt_offline_tasks(struct rcu_state *rsp, |
469 | struct rcu_node *rnp) | 546 | struct rcu_node *rnp, |
547 | struct rcu_data *rdp) | ||
470 | { | 548 | { |
549 | return 0; | ||
471 | } | 550 | } |
472 | 551 | ||
473 | /* | 552 | /* |
@@ -484,7 +563,7 @@ static void rcu_preempt_offline_cpu(int cpu) | |||
484 | * Because preemptable RCU does not exist, it never has any callbacks | 563 | * Because preemptable RCU does not exist, it never has any callbacks |
485 | * to check. | 564 | * to check. |
486 | */ | 565 | */ |
487 | void rcu_preempt_check_callbacks(int cpu) | 566 | static void rcu_preempt_check_callbacks(int cpu) |
488 | { | 567 | { |
489 | } | 568 | } |
490 | 569 | ||
@@ -492,7 +571,7 @@ void rcu_preempt_check_callbacks(int cpu) | |||
492 | * Because preemptable RCU does not exist, it never has any callbacks | 571 | * Because preemptable RCU does not exist, it never has any callbacks |
493 | * to process. | 572 | * to process. |
494 | */ | 573 | */ |
495 | void rcu_preempt_process_callbacks(void) | 574 | static void rcu_preempt_process_callbacks(void) |
496 | { | 575 | { |
497 | } | 576 | } |
498 | 577 | ||
@@ -506,6 +585,16 @@ void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) | |||
506 | EXPORT_SYMBOL_GPL(call_rcu); | 585 | EXPORT_SYMBOL_GPL(call_rcu); |
507 | 586 | ||
508 | /* | 587 | /* |
588 | * Wait for an rcu-preempt grace period, but make it happen quickly. | ||
589 | * But because preemptable RCU does not exist, map to rcu-sched. | ||
590 | */ | ||
591 | void synchronize_rcu_expedited(void) | ||
592 | { | ||
593 | synchronize_sched_expedited(); | ||
594 | } | ||
595 | EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); | ||
596 | |||
597 | /* | ||
509 | * Because preemptable RCU does not exist, it never has any work to do. | 598 | * Because preemptable RCU does not exist, it never has any work to do. |
510 | */ | 599 | */ |
511 | static int rcu_preempt_pending(int cpu) | 600 | static int rcu_preempt_pending(int cpu) |
@@ -522,6 +611,16 @@ static int rcu_preempt_needs_cpu(int cpu) | |||
522 | } | 611 | } |
523 | 612 | ||
524 | /* | 613 | /* |
614 | * Because preemptable RCU does not exist, rcu_barrier() is just | ||
615 | * another name for rcu_barrier_sched(). | ||
616 | */ | ||
617 | void rcu_barrier(void) | ||
618 | { | ||
619 | rcu_barrier_sched(); | ||
620 | } | ||
621 | EXPORT_SYMBOL_GPL(rcu_barrier); | ||
622 | |||
623 | /* | ||
525 | * Because preemptable RCU does not exist, there is no per-CPU | 624 | * Because preemptable RCU does not exist, there is no per-CPU |
526 | * data to initialize. | 625 | * data to initialize. |
527 | */ | 626 | */ |
@@ -529,4 +628,18 @@ static void __cpuinit rcu_preempt_init_percpu_data(int cpu) | |||
529 | { | 628 | { |
530 | } | 629 | } |
531 | 630 | ||
631 | /* | ||
632 | * Because there is no preemptable RCU, there are no callbacks to move. | ||
633 | */ | ||
634 | static void rcu_preempt_send_cbs_to_orphanage(void) | ||
635 | { | ||
636 | } | ||
637 | |||
638 | /* | ||
639 | * Because preemptable RCU does not exist, it need not be initialized. | ||
640 | */ | ||
641 | static void __init __rcu_init_preempt(void) | ||
642 | { | ||
643 | } | ||
644 | |||
532 | #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */ | 645 | #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */ |
diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c index 0ea1bff69727..4b31c779e62e 100644 --- a/kernel/rcutree_trace.c +++ b/kernel/rcutree_trace.c | |||
@@ -20,7 +20,7 @@ | |||
20 | * Papers: http://www.rdrop.com/users/paulmck/RCU | 20 | * Papers: http://www.rdrop.com/users/paulmck/RCU |
21 | * | 21 | * |
22 | * For detailed explanation of Read-Copy Update mechanism see - | 22 | * For detailed explanation of Read-Copy Update mechanism see - |
23 | * Documentation/RCU | 23 | * Documentation/RCU |
24 | * | 24 | * |
25 | */ | 25 | */ |
26 | #include <linux/types.h> | 26 | #include <linux/types.h> |
@@ -93,7 +93,7 @@ static int rcudata_open(struct inode *inode, struct file *file) | |||
93 | return single_open(file, show_rcudata, NULL); | 93 | return single_open(file, show_rcudata, NULL); |
94 | } | 94 | } |
95 | 95 | ||
96 | static struct file_operations rcudata_fops = { | 96 | static const struct file_operations rcudata_fops = { |
97 | .owner = THIS_MODULE, | 97 | .owner = THIS_MODULE, |
98 | .open = rcudata_open, | 98 | .open = rcudata_open, |
99 | .read = seq_read, | 99 | .read = seq_read, |
@@ -145,7 +145,7 @@ static int rcudata_csv_open(struct inode *inode, struct file *file) | |||
145 | return single_open(file, show_rcudata_csv, NULL); | 145 | return single_open(file, show_rcudata_csv, NULL); |
146 | } | 146 | } |
147 | 147 | ||
148 | static struct file_operations rcudata_csv_fops = { | 148 | static const struct file_operations rcudata_csv_fops = { |
149 | .owner = THIS_MODULE, | 149 | .owner = THIS_MODULE, |
150 | .open = rcudata_csv_open, | 150 | .open = rcudata_csv_open, |
151 | .read = seq_read, | 151 | .read = seq_read, |
@@ -159,13 +159,13 @@ static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp) | |||
159 | struct rcu_node *rnp; | 159 | struct rcu_node *rnp; |
160 | 160 | ||
161 | seq_printf(m, "c=%ld g=%ld s=%d jfq=%ld j=%x " | 161 | seq_printf(m, "c=%ld g=%ld s=%d jfq=%ld j=%x " |
162 | "nfqs=%lu/nfqsng=%lu(%lu) fqlh=%lu\n", | 162 | "nfqs=%lu/nfqsng=%lu(%lu) fqlh=%lu oqlen=%ld\n", |
163 | rsp->completed, rsp->gpnum, rsp->signaled, | 163 | rsp->completed, rsp->gpnum, rsp->signaled, |
164 | (long)(rsp->jiffies_force_qs - jiffies), | 164 | (long)(rsp->jiffies_force_qs - jiffies), |
165 | (int)(jiffies & 0xffff), | 165 | (int)(jiffies & 0xffff), |
166 | rsp->n_force_qs, rsp->n_force_qs_ngp, | 166 | rsp->n_force_qs, rsp->n_force_qs_ngp, |
167 | rsp->n_force_qs - rsp->n_force_qs_ngp, | 167 | rsp->n_force_qs - rsp->n_force_qs_ngp, |
168 | rsp->n_force_qs_lh); | 168 | rsp->n_force_qs_lh, rsp->orphan_qlen); |
169 | for (rnp = &rsp->node[0]; rnp - &rsp->node[0] < NUM_RCU_NODES; rnp++) { | 169 | for (rnp = &rsp->node[0]; rnp - &rsp->node[0] < NUM_RCU_NODES; rnp++) { |
170 | if (rnp->level != level) { | 170 | if (rnp->level != level) { |
171 | seq_puts(m, "\n"); | 171 | seq_puts(m, "\n"); |
@@ -196,7 +196,7 @@ static int rcuhier_open(struct inode *inode, struct file *file) | |||
196 | return single_open(file, show_rcuhier, NULL); | 196 | return single_open(file, show_rcuhier, NULL); |
197 | } | 197 | } |
198 | 198 | ||
199 | static struct file_operations rcuhier_fops = { | 199 | static const struct file_operations rcuhier_fops = { |
200 | .owner = THIS_MODULE, | 200 | .owner = THIS_MODULE, |
201 | .open = rcuhier_open, | 201 | .open = rcuhier_open, |
202 | .read = seq_read, | 202 | .read = seq_read, |
@@ -222,7 +222,7 @@ static int rcugp_open(struct inode *inode, struct file *file) | |||
222 | return single_open(file, show_rcugp, NULL); | 222 | return single_open(file, show_rcugp, NULL); |
223 | } | 223 | } |
224 | 224 | ||
225 | static struct file_operations rcugp_fops = { | 225 | static const struct file_operations rcugp_fops = { |
226 | .owner = THIS_MODULE, | 226 | .owner = THIS_MODULE, |
227 | .open = rcugp_open, | 227 | .open = rcugp_open, |
228 | .read = seq_read, | 228 | .read = seq_read, |
@@ -276,7 +276,7 @@ static int rcu_pending_open(struct inode *inode, struct file *file) | |||
276 | return single_open(file, show_rcu_pending, NULL); | 276 | return single_open(file, show_rcu_pending, NULL); |
277 | } | 277 | } |
278 | 278 | ||
279 | static struct file_operations rcu_pending_fops = { | 279 | static const struct file_operations rcu_pending_fops = { |
280 | .owner = THIS_MODULE, | 280 | .owner = THIS_MODULE, |
281 | .open = rcu_pending_open, | 281 | .open = rcu_pending_open, |
282 | .read = seq_read, | 282 | .read = seq_read, |
diff --git a/kernel/relay.c b/kernel/relay.c index bc188549788f..760c26209a3c 100644 --- a/kernel/relay.c +++ b/kernel/relay.c | |||
@@ -60,7 +60,7 @@ static int relay_buf_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
60 | /* | 60 | /* |
61 | * vm_ops for relay file mappings. | 61 | * vm_ops for relay file mappings. |
62 | */ | 62 | */ |
63 | static struct vm_operations_struct relay_file_mmap_ops = { | 63 | static const struct vm_operations_struct relay_file_mmap_ops = { |
64 | .fault = relay_buf_fault, | 64 | .fault = relay_buf_fault, |
65 | .close = relay_file_mmap_close, | 65 | .close = relay_file_mmap_close, |
66 | }; | 66 | }; |
diff --git a/kernel/res_counter.c b/kernel/res_counter.c index e1338f074314..bcdabf37c40b 100644 --- a/kernel/res_counter.c +++ b/kernel/res_counter.c | |||
@@ -19,6 +19,7 @@ void res_counter_init(struct res_counter *counter, struct res_counter *parent) | |||
19 | { | 19 | { |
20 | spin_lock_init(&counter->lock); | 20 | spin_lock_init(&counter->lock); |
21 | counter->limit = RESOURCE_MAX; | 21 | counter->limit = RESOURCE_MAX; |
22 | counter->soft_limit = RESOURCE_MAX; | ||
22 | counter->parent = parent; | 23 | counter->parent = parent; |
23 | } | 24 | } |
24 | 25 | ||
@@ -101,6 +102,8 @@ res_counter_member(struct res_counter *counter, int member) | |||
101 | return &counter->limit; | 102 | return &counter->limit; |
102 | case RES_FAILCNT: | 103 | case RES_FAILCNT: |
103 | return &counter->failcnt; | 104 | return &counter->failcnt; |
105 | case RES_SOFT_LIMIT: | ||
106 | return &counter->soft_limit; | ||
104 | }; | 107 | }; |
105 | 108 | ||
106 | BUG(); | 109 | BUG(); |
diff --git a/kernel/resource.c b/kernel/resource.c index 78b087221c15..fb11a58b9594 100644 --- a/kernel/resource.c +++ b/kernel/resource.c | |||
@@ -223,13 +223,13 @@ int release_resource(struct resource *old) | |||
223 | 223 | ||
224 | EXPORT_SYMBOL(release_resource); | 224 | EXPORT_SYMBOL(release_resource); |
225 | 225 | ||
226 | #if defined(CONFIG_MEMORY_HOTPLUG) && !defined(CONFIG_ARCH_HAS_WALK_MEMORY) | 226 | #if !defined(CONFIG_ARCH_HAS_WALK_MEMORY) |
227 | /* | 227 | /* |
228 | * Finds the lowest memory reosurce exists within [res->start.res->end) | 228 | * Finds the lowest memory reosurce exists within [res->start.res->end) |
229 | * the caller must specify res->start, res->end, res->flags. | 229 | * the caller must specify res->start, res->end, res->flags and "name". |
230 | * If found, returns 0, res is overwritten, if not found, returns -1. | 230 | * If found, returns 0, res is overwritten, if not found, returns -1. |
231 | */ | 231 | */ |
232 | static int find_next_system_ram(struct resource *res) | 232 | static int find_next_system_ram(struct resource *res, char *name) |
233 | { | 233 | { |
234 | resource_size_t start, end; | 234 | resource_size_t start, end; |
235 | struct resource *p; | 235 | struct resource *p; |
@@ -245,6 +245,8 @@ static int find_next_system_ram(struct resource *res) | |||
245 | /* system ram is just marked as IORESOURCE_MEM */ | 245 | /* system ram is just marked as IORESOURCE_MEM */ |
246 | if (p->flags != res->flags) | 246 | if (p->flags != res->flags) |
247 | continue; | 247 | continue; |
248 | if (name && strcmp(p->name, name)) | ||
249 | continue; | ||
248 | if (p->start > end) { | 250 | if (p->start > end) { |
249 | p = NULL; | 251 | p = NULL; |
250 | break; | 252 | break; |
@@ -262,19 +264,26 @@ static int find_next_system_ram(struct resource *res) | |||
262 | res->end = p->end; | 264 | res->end = p->end; |
263 | return 0; | 265 | return 0; |
264 | } | 266 | } |
265 | int | 267 | |
266 | walk_memory_resource(unsigned long start_pfn, unsigned long nr_pages, void *arg, | 268 | /* |
267 | int (*func)(unsigned long, unsigned long, void *)) | 269 | * This function calls callback against all memory range of "System RAM" |
270 | * which are marked as IORESOURCE_MEM and IORESOUCE_BUSY. | ||
271 | * Now, this function is only for "System RAM". | ||
272 | */ | ||
273 | int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, | ||
274 | void *arg, int (*func)(unsigned long, unsigned long, void *)) | ||
268 | { | 275 | { |
269 | struct resource res; | 276 | struct resource res; |
270 | unsigned long pfn, len; | 277 | unsigned long pfn, len; |
271 | u64 orig_end; | 278 | u64 orig_end; |
272 | int ret = -1; | 279 | int ret = -1; |
280 | |||
273 | res.start = (u64) start_pfn << PAGE_SHIFT; | 281 | res.start = (u64) start_pfn << PAGE_SHIFT; |
274 | res.end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1; | 282 | res.end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1; |
275 | res.flags = IORESOURCE_MEM | IORESOURCE_BUSY; | 283 | res.flags = IORESOURCE_MEM | IORESOURCE_BUSY; |
276 | orig_end = res.end; | 284 | orig_end = res.end; |
277 | while ((res.start < res.end) && (find_next_system_ram(&res) >= 0)) { | 285 | while ((res.start < res.end) && |
286 | (find_next_system_ram(&res, "System RAM") >= 0)) { | ||
278 | pfn = (unsigned long)(res.start >> PAGE_SHIFT); | 287 | pfn = (unsigned long)(res.start >> PAGE_SHIFT); |
279 | len = (unsigned long)((res.end + 1 - res.start) >> PAGE_SHIFT); | 288 | len = (unsigned long)((res.end + 1 - res.start) >> PAGE_SHIFT); |
280 | ret = (*func)(pfn, len, arg); | 289 | ret = (*func)(pfn, len, arg); |
diff --git a/kernel/sched.c b/kernel/sched.c index faf4d463bbff..3c11ae0a948d 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -39,7 +39,7 @@ | |||
39 | #include <linux/completion.h> | 39 | #include <linux/completion.h> |
40 | #include <linux/kernel_stat.h> | 40 | #include <linux/kernel_stat.h> |
41 | #include <linux/debug_locks.h> | 41 | #include <linux/debug_locks.h> |
42 | #include <linux/perf_counter.h> | 42 | #include <linux/perf_event.h> |
43 | #include <linux/security.h> | 43 | #include <linux/security.h> |
44 | #include <linux/notifier.h> | 44 | #include <linux/notifier.h> |
45 | #include <linux/profile.h> | 45 | #include <linux/profile.h> |
@@ -309,6 +309,8 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq); | |||
309 | */ | 309 | */ |
310 | static DEFINE_SPINLOCK(task_group_lock); | 310 | static DEFINE_SPINLOCK(task_group_lock); |
311 | 311 | ||
312 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
313 | |||
312 | #ifdef CONFIG_SMP | 314 | #ifdef CONFIG_SMP |
313 | static int root_task_group_empty(void) | 315 | static int root_task_group_empty(void) |
314 | { | 316 | { |
@@ -316,7 +318,6 @@ static int root_task_group_empty(void) | |||
316 | } | 318 | } |
317 | #endif | 319 | #endif |
318 | 320 | ||
319 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
320 | #ifdef CONFIG_USER_SCHED | 321 | #ifdef CONFIG_USER_SCHED |
321 | # define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD) | 322 | # define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD) |
322 | #else /* !CONFIG_USER_SCHED */ | 323 | #else /* !CONFIG_USER_SCHED */ |
@@ -676,20 +677,15 @@ inline void update_rq_clock(struct rq *rq) | |||
676 | 677 | ||
677 | /** | 678 | /** |
678 | * runqueue_is_locked | 679 | * runqueue_is_locked |
680 | * @cpu: the processor in question. | ||
679 | * | 681 | * |
680 | * Returns true if the current cpu runqueue is locked. | 682 | * Returns true if the current cpu runqueue is locked. |
681 | * This interface allows printk to be called with the runqueue lock | 683 | * This interface allows printk to be called with the runqueue lock |
682 | * held and know whether or not it is OK to wake up the klogd. | 684 | * held and know whether or not it is OK to wake up the klogd. |
683 | */ | 685 | */ |
684 | int runqueue_is_locked(void) | 686 | int runqueue_is_locked(int cpu) |
685 | { | 687 | { |
686 | int cpu = get_cpu(); | 688 | return spin_is_locked(&cpu_rq(cpu)->lock); |
687 | struct rq *rq = cpu_rq(cpu); | ||
688 | int ret; | ||
689 | |||
690 | ret = spin_is_locked(&rq->lock); | ||
691 | put_cpu(); | ||
692 | return ret; | ||
693 | } | 689 | } |
694 | 690 | ||
695 | /* | 691 | /* |
@@ -786,7 +782,7 @@ static int sched_feat_open(struct inode *inode, struct file *filp) | |||
786 | return single_open(filp, sched_feat_show, NULL); | 782 | return single_open(filp, sched_feat_show, NULL); |
787 | } | 783 | } |
788 | 784 | ||
789 | static struct file_operations sched_feat_fops = { | 785 | static const struct file_operations sched_feat_fops = { |
790 | .open = sched_feat_open, | 786 | .open = sched_feat_open, |
791 | .write = sched_feat_write, | 787 | .write = sched_feat_write, |
792 | .read = seq_read, | 788 | .read = seq_read, |
@@ -1569,11 +1565,7 @@ static unsigned long cpu_avg_load_per_task(int cpu) | |||
1569 | 1565 | ||
1570 | #ifdef CONFIG_FAIR_GROUP_SCHED | 1566 | #ifdef CONFIG_FAIR_GROUP_SCHED |
1571 | 1567 | ||
1572 | struct update_shares_data { | 1568 | static __read_mostly unsigned long *update_shares_data; |
1573 | unsigned long rq_weight[NR_CPUS]; | ||
1574 | }; | ||
1575 | |||
1576 | static DEFINE_PER_CPU(struct update_shares_data, update_shares_data); | ||
1577 | 1569 | ||
1578 | static void __set_se_shares(struct sched_entity *se, unsigned long shares); | 1570 | static void __set_se_shares(struct sched_entity *se, unsigned long shares); |
1579 | 1571 | ||
@@ -1583,12 +1575,12 @@ static void __set_se_shares(struct sched_entity *se, unsigned long shares); | |||
1583 | static void update_group_shares_cpu(struct task_group *tg, int cpu, | 1575 | static void update_group_shares_cpu(struct task_group *tg, int cpu, |
1584 | unsigned long sd_shares, | 1576 | unsigned long sd_shares, |
1585 | unsigned long sd_rq_weight, | 1577 | unsigned long sd_rq_weight, |
1586 | struct update_shares_data *usd) | 1578 | unsigned long *usd_rq_weight) |
1587 | { | 1579 | { |
1588 | unsigned long shares, rq_weight; | 1580 | unsigned long shares, rq_weight; |
1589 | int boost = 0; | 1581 | int boost = 0; |
1590 | 1582 | ||
1591 | rq_weight = usd->rq_weight[cpu]; | 1583 | rq_weight = usd_rq_weight[cpu]; |
1592 | if (!rq_weight) { | 1584 | if (!rq_weight) { |
1593 | boost = 1; | 1585 | boost = 1; |
1594 | rq_weight = NICE_0_LOAD; | 1586 | rq_weight = NICE_0_LOAD; |
@@ -1623,7 +1615,7 @@ static void update_group_shares_cpu(struct task_group *tg, int cpu, | |||
1623 | static int tg_shares_up(struct task_group *tg, void *data) | 1615 | static int tg_shares_up(struct task_group *tg, void *data) |
1624 | { | 1616 | { |
1625 | unsigned long weight, rq_weight = 0, shares = 0; | 1617 | unsigned long weight, rq_weight = 0, shares = 0; |
1626 | struct update_shares_data *usd; | 1618 | unsigned long *usd_rq_weight; |
1627 | struct sched_domain *sd = data; | 1619 | struct sched_domain *sd = data; |
1628 | unsigned long flags; | 1620 | unsigned long flags; |
1629 | int i; | 1621 | int i; |
@@ -1632,11 +1624,11 @@ static int tg_shares_up(struct task_group *tg, void *data) | |||
1632 | return 0; | 1624 | return 0; |
1633 | 1625 | ||
1634 | local_irq_save(flags); | 1626 | local_irq_save(flags); |
1635 | usd = &__get_cpu_var(update_shares_data); | 1627 | usd_rq_weight = per_cpu_ptr(update_shares_data, smp_processor_id()); |
1636 | 1628 | ||
1637 | for_each_cpu(i, sched_domain_span(sd)) { | 1629 | for_each_cpu(i, sched_domain_span(sd)) { |
1638 | weight = tg->cfs_rq[i]->load.weight; | 1630 | weight = tg->cfs_rq[i]->load.weight; |
1639 | usd->rq_weight[i] = weight; | 1631 | usd_rq_weight[i] = weight; |
1640 | 1632 | ||
1641 | /* | 1633 | /* |
1642 | * If there are currently no tasks on the cpu pretend there | 1634 | * If there are currently no tasks on the cpu pretend there |
@@ -1657,7 +1649,7 @@ static int tg_shares_up(struct task_group *tg, void *data) | |||
1657 | shares = tg->shares; | 1649 | shares = tg->shares; |
1658 | 1650 | ||
1659 | for_each_cpu(i, sched_domain_span(sd)) | 1651 | for_each_cpu(i, sched_domain_span(sd)) |
1660 | update_group_shares_cpu(tg, i, shares, rq_weight, usd); | 1652 | update_group_shares_cpu(tg, i, shares, rq_weight, usd_rq_weight); |
1661 | 1653 | ||
1662 | local_irq_restore(flags); | 1654 | local_irq_restore(flags); |
1663 | 1655 | ||
@@ -2001,6 +1993,38 @@ static inline void check_class_changed(struct rq *rq, struct task_struct *p, | |||
2001 | p->sched_class->prio_changed(rq, p, oldprio, running); | 1993 | p->sched_class->prio_changed(rq, p, oldprio, running); |
2002 | } | 1994 | } |
2003 | 1995 | ||
1996 | /** | ||
1997 | * kthread_bind - bind a just-created kthread to a cpu. | ||
1998 | * @p: thread created by kthread_create(). | ||
1999 | * @cpu: cpu (might not be online, must be possible) for @k to run on. | ||
2000 | * | ||
2001 | * Description: This function is equivalent to set_cpus_allowed(), | ||
2002 | * except that @cpu doesn't need to be online, and the thread must be | ||
2003 | * stopped (i.e., just returned from kthread_create()). | ||
2004 | * | ||
2005 | * Function lives here instead of kthread.c because it messes with | ||
2006 | * scheduler internals which require locking. | ||
2007 | */ | ||
2008 | void kthread_bind(struct task_struct *p, unsigned int cpu) | ||
2009 | { | ||
2010 | struct rq *rq = cpu_rq(cpu); | ||
2011 | unsigned long flags; | ||
2012 | |||
2013 | /* Must have done schedule() in kthread() before we set_task_cpu */ | ||
2014 | if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) { | ||
2015 | WARN_ON(1); | ||
2016 | return; | ||
2017 | } | ||
2018 | |||
2019 | spin_lock_irqsave(&rq->lock, flags); | ||
2020 | set_task_cpu(p, cpu); | ||
2021 | p->cpus_allowed = cpumask_of_cpu(cpu); | ||
2022 | p->rt.nr_cpus_allowed = 1; | ||
2023 | p->flags |= PF_THREAD_BOUND; | ||
2024 | spin_unlock_irqrestore(&rq->lock, flags); | ||
2025 | } | ||
2026 | EXPORT_SYMBOL(kthread_bind); | ||
2027 | |||
2004 | #ifdef CONFIG_SMP | 2028 | #ifdef CONFIG_SMP |
2005 | /* | 2029 | /* |
2006 | * Is this task likely cache-hot: | 2030 | * Is this task likely cache-hot: |
@@ -2013,7 +2037,7 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd) | |||
2013 | /* | 2037 | /* |
2014 | * Buddy candidates are cache hot: | 2038 | * Buddy candidates are cache hot: |
2015 | */ | 2039 | */ |
2016 | if (sched_feat(CACHE_HOT_BUDDY) && | 2040 | if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running && |
2017 | (&p->se == cfs_rq_of(&p->se)->next || | 2041 | (&p->se == cfs_rq_of(&p->se)->next || |
2018 | &p->se == cfs_rq_of(&p->se)->last)) | 2042 | &p->se == cfs_rq_of(&p->se)->last)) |
2019 | return 1; | 2043 | return 1; |
@@ -2059,7 +2083,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu) | |||
2059 | if (task_hot(p, old_rq->clock, NULL)) | 2083 | if (task_hot(p, old_rq->clock, NULL)) |
2060 | schedstat_inc(p, se.nr_forced2_migrations); | 2084 | schedstat_inc(p, se.nr_forced2_migrations); |
2061 | #endif | 2085 | #endif |
2062 | perf_swcounter_event(PERF_COUNT_SW_CPU_MIGRATIONS, | 2086 | perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, |
2063 | 1, 1, NULL, 0); | 2087 | 1, 1, NULL, 0); |
2064 | } | 2088 | } |
2065 | p->se.vruntime -= old_cfsrq->min_vruntime - | 2089 | p->se.vruntime -= old_cfsrq->min_vruntime - |
@@ -2317,7 +2341,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, | |||
2317 | { | 2341 | { |
2318 | int cpu, orig_cpu, this_cpu, success = 0; | 2342 | int cpu, orig_cpu, this_cpu, success = 0; |
2319 | unsigned long flags; | 2343 | unsigned long flags; |
2320 | struct rq *rq; | 2344 | struct rq *rq, *orig_rq; |
2321 | 2345 | ||
2322 | if (!sched_feat(SYNC_WAKEUPS)) | 2346 | if (!sched_feat(SYNC_WAKEUPS)) |
2323 | wake_flags &= ~WF_SYNC; | 2347 | wake_flags &= ~WF_SYNC; |
@@ -2325,7 +2349,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, | |||
2325 | this_cpu = get_cpu(); | 2349 | this_cpu = get_cpu(); |
2326 | 2350 | ||
2327 | smp_wmb(); | 2351 | smp_wmb(); |
2328 | rq = task_rq_lock(p, &flags); | 2352 | rq = orig_rq = task_rq_lock(p, &flags); |
2329 | update_rq_clock(rq); | 2353 | update_rq_clock(rq); |
2330 | if (!(p->state & state)) | 2354 | if (!(p->state & state)) |
2331 | goto out; | 2355 | goto out; |
@@ -2356,6 +2380,10 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, | |||
2356 | set_task_cpu(p, cpu); | 2380 | set_task_cpu(p, cpu); |
2357 | 2381 | ||
2358 | rq = task_rq_lock(p, &flags); | 2382 | rq = task_rq_lock(p, &flags); |
2383 | |||
2384 | if (rq != orig_rq) | ||
2385 | update_rq_clock(rq); | ||
2386 | |||
2359 | WARN_ON(p->state != TASK_WAKING); | 2387 | WARN_ON(p->state != TASK_WAKING); |
2360 | cpu = task_cpu(p); | 2388 | cpu = task_cpu(p); |
2361 | 2389 | ||
@@ -2521,22 +2549,17 @@ void sched_fork(struct task_struct *p, int clone_flags) | |||
2521 | __sched_fork(p); | 2549 | __sched_fork(p); |
2522 | 2550 | ||
2523 | /* | 2551 | /* |
2524 | * Make sure we do not leak PI boosting priority to the child. | ||
2525 | */ | ||
2526 | p->prio = current->normal_prio; | ||
2527 | |||
2528 | /* | ||
2529 | * Revert to default priority/policy on fork if requested. | 2552 | * Revert to default priority/policy on fork if requested. |
2530 | */ | 2553 | */ |
2531 | if (unlikely(p->sched_reset_on_fork)) { | 2554 | if (unlikely(p->sched_reset_on_fork)) { |
2532 | if (p->policy == SCHED_FIFO || p->policy == SCHED_RR) | 2555 | if (p->policy == SCHED_FIFO || p->policy == SCHED_RR) { |
2533 | p->policy = SCHED_NORMAL; | 2556 | p->policy = SCHED_NORMAL; |
2534 | 2557 | p->normal_prio = p->static_prio; | |
2535 | if (p->normal_prio < DEFAULT_PRIO) | 2558 | } |
2536 | p->prio = DEFAULT_PRIO; | ||
2537 | 2559 | ||
2538 | if (PRIO_TO_NICE(p->static_prio) < 0) { | 2560 | if (PRIO_TO_NICE(p->static_prio) < 0) { |
2539 | p->static_prio = NICE_TO_PRIO(0); | 2561 | p->static_prio = NICE_TO_PRIO(0); |
2562 | p->normal_prio = p->static_prio; | ||
2540 | set_load_weight(p); | 2563 | set_load_weight(p); |
2541 | } | 2564 | } |
2542 | 2565 | ||
@@ -2547,6 +2570,11 @@ void sched_fork(struct task_struct *p, int clone_flags) | |||
2547 | p->sched_reset_on_fork = 0; | 2570 | p->sched_reset_on_fork = 0; |
2548 | } | 2571 | } |
2549 | 2572 | ||
2573 | /* | ||
2574 | * Make sure we do not leak PI boosting priority to the child. | ||
2575 | */ | ||
2576 | p->prio = current->normal_prio; | ||
2577 | |||
2550 | if (!rt_prio(p->prio)) | 2578 | if (!rt_prio(p->prio)) |
2551 | p->sched_class = &fair_sched_class; | 2579 | p->sched_class = &fair_sched_class; |
2552 | 2580 | ||
@@ -2587,8 +2615,6 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags) | |||
2587 | BUG_ON(p->state != TASK_RUNNING); | 2615 | BUG_ON(p->state != TASK_RUNNING); |
2588 | update_rq_clock(rq); | 2616 | update_rq_clock(rq); |
2589 | 2617 | ||
2590 | p->prio = effective_prio(p); | ||
2591 | |||
2592 | if (!p->sched_class->task_new || !current->se.on_rq) { | 2618 | if (!p->sched_class->task_new || !current->se.on_rq) { |
2593 | activate_task(rq, p, 0); | 2619 | activate_task(rq, p, 0); |
2594 | } else { | 2620 | } else { |
@@ -2724,7 +2750,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev) | |||
2724 | */ | 2750 | */ |
2725 | prev_state = prev->state; | 2751 | prev_state = prev->state; |
2726 | finish_arch_switch(prev); | 2752 | finish_arch_switch(prev); |
2727 | perf_counter_task_sched_in(current, cpu_of(rq)); | 2753 | perf_event_task_sched_in(current, cpu_of(rq)); |
2728 | finish_lock_switch(rq, prev); | 2754 | finish_lock_switch(rq, prev); |
2729 | 2755 | ||
2730 | fire_sched_in_preempt_notifiers(current); | 2756 | fire_sched_in_preempt_notifiers(current); |
@@ -2910,6 +2936,19 @@ unsigned long nr_iowait(void) | |||
2910 | return sum; | 2936 | return sum; |
2911 | } | 2937 | } |
2912 | 2938 | ||
2939 | unsigned long nr_iowait_cpu(void) | ||
2940 | { | ||
2941 | struct rq *this = this_rq(); | ||
2942 | return atomic_read(&this->nr_iowait); | ||
2943 | } | ||
2944 | |||
2945 | unsigned long this_cpu_load(void) | ||
2946 | { | ||
2947 | struct rq *this = this_rq(); | ||
2948 | return this->cpu_load[0]; | ||
2949 | } | ||
2950 | |||
2951 | |||
2913 | /* Variables and functions for calc_load */ | 2952 | /* Variables and functions for calc_load */ |
2914 | static atomic_long_t calc_load_tasks; | 2953 | static atomic_long_t calc_load_tasks; |
2915 | static unsigned long calc_load_update; | 2954 | static unsigned long calc_load_update; |
@@ -3651,6 +3690,7 @@ static void update_group_power(struct sched_domain *sd, int cpu) | |||
3651 | 3690 | ||
3652 | /** | 3691 | /** |
3653 | * update_sg_lb_stats - Update sched_group's statistics for load balancing. | 3692 | * update_sg_lb_stats - Update sched_group's statistics for load balancing. |
3693 | * @sd: The sched_domain whose statistics are to be updated. | ||
3654 | * @group: sched_group whose statistics are to be updated. | 3694 | * @group: sched_group whose statistics are to be updated. |
3655 | * @this_cpu: Cpu for which load balance is currently performed. | 3695 | * @this_cpu: Cpu for which load balance is currently performed. |
3656 | * @idle: Idle status of this_cpu | 3696 | * @idle: Idle status of this_cpu |
@@ -5085,17 +5125,16 @@ void account_idle_time(cputime_t cputime) | |||
5085 | */ | 5125 | */ |
5086 | void account_process_tick(struct task_struct *p, int user_tick) | 5126 | void account_process_tick(struct task_struct *p, int user_tick) |
5087 | { | 5127 | { |
5088 | cputime_t one_jiffy = jiffies_to_cputime(1); | 5128 | cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy); |
5089 | cputime_t one_jiffy_scaled = cputime_to_scaled(one_jiffy); | ||
5090 | struct rq *rq = this_rq(); | 5129 | struct rq *rq = this_rq(); |
5091 | 5130 | ||
5092 | if (user_tick) | 5131 | if (user_tick) |
5093 | account_user_time(p, one_jiffy, one_jiffy_scaled); | 5132 | account_user_time(p, cputime_one_jiffy, one_jiffy_scaled); |
5094 | else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET)) | 5133 | else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET)) |
5095 | account_system_time(p, HARDIRQ_OFFSET, one_jiffy, | 5134 | account_system_time(p, HARDIRQ_OFFSET, cputime_one_jiffy, |
5096 | one_jiffy_scaled); | 5135 | one_jiffy_scaled); |
5097 | else | 5136 | else |
5098 | account_idle_time(one_jiffy); | 5137 | account_idle_time(cputime_one_jiffy); |
5099 | } | 5138 | } |
5100 | 5139 | ||
5101 | /* | 5140 | /* |
@@ -5199,7 +5238,7 @@ void scheduler_tick(void) | |||
5199 | curr->sched_class->task_tick(rq, curr, 0); | 5238 | curr->sched_class->task_tick(rq, curr, 0); |
5200 | spin_unlock(&rq->lock); | 5239 | spin_unlock(&rq->lock); |
5201 | 5240 | ||
5202 | perf_counter_task_tick(curr, cpu); | 5241 | perf_event_task_tick(curr, cpu); |
5203 | 5242 | ||
5204 | #ifdef CONFIG_SMP | 5243 | #ifdef CONFIG_SMP |
5205 | rq->idle_at_tick = idle_cpu(cpu); | 5244 | rq->idle_at_tick = idle_cpu(cpu); |
@@ -5415,7 +5454,7 @@ need_resched_nonpreemptible: | |||
5415 | 5454 | ||
5416 | if (likely(prev != next)) { | 5455 | if (likely(prev != next)) { |
5417 | sched_info_switch(prev, next); | 5456 | sched_info_switch(prev, next); |
5418 | perf_counter_task_sched_out(prev, next, cpu); | 5457 | perf_event_task_sched_out(prev, next, cpu); |
5419 | 5458 | ||
5420 | rq->nr_switches++; | 5459 | rq->nr_switches++; |
5421 | rq->curr = next; | 5460 | rq->curr = next; |
@@ -6714,9 +6753,6 @@ EXPORT_SYMBOL(yield); | |||
6714 | /* | 6753 | /* |
6715 | * This task is about to go to sleep on IO. Increment rq->nr_iowait so | 6754 | * This task is about to go to sleep on IO. Increment rq->nr_iowait so |
6716 | * that process accounting knows that this is a task in IO wait state. | 6755 | * that process accounting knows that this is a task in IO wait state. |
6717 | * | ||
6718 | * But don't do that if it is a deliberate, throttling IO wait (this task | ||
6719 | * has set its backing_dev_info: the queue against which it should throttle) | ||
6720 | */ | 6756 | */ |
6721 | void __sched io_schedule(void) | 6757 | void __sched io_schedule(void) |
6722 | { | 6758 | { |
@@ -6825,23 +6861,8 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, | |||
6825 | if (retval) | 6861 | if (retval) |
6826 | goto out_unlock; | 6862 | goto out_unlock; |
6827 | 6863 | ||
6828 | /* | 6864 | time_slice = p->sched_class->get_rr_interval(p); |
6829 | * Time slice is 0 for SCHED_FIFO tasks and for SCHED_OTHER | ||
6830 | * tasks that are on an otherwise idle runqueue: | ||
6831 | */ | ||
6832 | time_slice = 0; | ||
6833 | if (p->policy == SCHED_RR) { | ||
6834 | time_slice = DEF_TIMESLICE; | ||
6835 | } else if (p->policy != SCHED_FIFO) { | ||
6836 | struct sched_entity *se = &p->se; | ||
6837 | unsigned long flags; | ||
6838 | struct rq *rq; | ||
6839 | 6865 | ||
6840 | rq = task_rq_lock(p, &flags); | ||
6841 | if (rq->cfs.load.weight) | ||
6842 | time_slice = NS_TO_JIFFIES(sched_slice(&rq->cfs, se)); | ||
6843 | task_rq_unlock(rq, &flags); | ||
6844 | } | ||
6845 | read_unlock(&tasklist_lock); | 6866 | read_unlock(&tasklist_lock); |
6846 | jiffies_to_timespec(time_slice, &t); | 6867 | jiffies_to_timespec(time_slice, &t); |
6847 | retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0; | 6868 | retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0; |
@@ -7692,7 +7713,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
7692 | /* | 7713 | /* |
7693 | * Register at high priority so that task migration (migrate_all_tasks) | 7714 | * Register at high priority so that task migration (migrate_all_tasks) |
7694 | * happens before everything else. This has to be lower priority than | 7715 | * happens before everything else. This has to be lower priority than |
7695 | * the notifier in the perf_counter subsystem, though. | 7716 | * the notifier in the perf_event subsystem, though. |
7696 | */ | 7717 | */ |
7697 | static struct notifier_block __cpuinitdata migration_notifier = { | 7718 | static struct notifier_block __cpuinitdata migration_notifier = { |
7698 | .notifier_call = migration_call, | 7719 | .notifier_call = migration_call, |
@@ -9171,6 +9192,7 @@ void __init sched_init_smp(void) | |||
9171 | cpumask_var_t non_isolated_cpus; | 9192 | cpumask_var_t non_isolated_cpus; |
9172 | 9193 | ||
9173 | alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL); | 9194 | alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL); |
9195 | alloc_cpumask_var(&fallback_doms, GFP_KERNEL); | ||
9174 | 9196 | ||
9175 | #if defined(CONFIG_NUMA) | 9197 | #if defined(CONFIG_NUMA) |
9176 | sched_group_nodes_bycpu = kzalloc(nr_cpu_ids * sizeof(void **), | 9198 | sched_group_nodes_bycpu = kzalloc(nr_cpu_ids * sizeof(void **), |
@@ -9202,7 +9224,6 @@ void __init sched_init_smp(void) | |||
9202 | sched_init_granularity(); | 9224 | sched_init_granularity(); |
9203 | free_cpumask_var(non_isolated_cpus); | 9225 | free_cpumask_var(non_isolated_cpus); |
9204 | 9226 | ||
9205 | alloc_cpumask_var(&fallback_doms, GFP_KERNEL); | ||
9206 | init_sched_rt_class(); | 9227 | init_sched_rt_class(); |
9207 | } | 9228 | } |
9208 | #else | 9229 | #else |
@@ -9415,6 +9436,10 @@ void __init sched_init(void) | |||
9415 | #endif /* CONFIG_USER_SCHED */ | 9436 | #endif /* CONFIG_USER_SCHED */ |
9416 | #endif /* CONFIG_GROUP_SCHED */ | 9437 | #endif /* CONFIG_GROUP_SCHED */ |
9417 | 9438 | ||
9439 | #if defined CONFIG_FAIR_GROUP_SCHED && defined CONFIG_SMP | ||
9440 | update_shares_data = __alloc_percpu(nr_cpu_ids * sizeof(unsigned long), | ||
9441 | __alignof__(unsigned long)); | ||
9442 | #endif | ||
9418 | for_each_possible_cpu(i) { | 9443 | for_each_possible_cpu(i) { |
9419 | struct rq *rq; | 9444 | struct rq *rq; |
9420 | 9445 | ||
@@ -9540,16 +9565,16 @@ void __init sched_init(void) | |||
9540 | current->sched_class = &fair_sched_class; | 9565 | current->sched_class = &fair_sched_class; |
9541 | 9566 | ||
9542 | /* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */ | 9567 | /* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */ |
9543 | alloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT); | 9568 | zalloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT); |
9544 | #ifdef CONFIG_SMP | 9569 | #ifdef CONFIG_SMP |
9545 | #ifdef CONFIG_NO_HZ | 9570 | #ifdef CONFIG_NO_HZ |
9546 | alloc_cpumask_var(&nohz.cpu_mask, GFP_NOWAIT); | 9571 | zalloc_cpumask_var(&nohz.cpu_mask, GFP_NOWAIT); |
9547 | alloc_cpumask_var(&nohz.ilb_grp_nohz_mask, GFP_NOWAIT); | 9572 | alloc_cpumask_var(&nohz.ilb_grp_nohz_mask, GFP_NOWAIT); |
9548 | #endif | 9573 | #endif |
9549 | alloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT); | 9574 | zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT); |
9550 | #endif /* SMP */ | 9575 | #endif /* SMP */ |
9551 | 9576 | ||
9552 | perf_counter_init(); | 9577 | perf_event_init(); |
9553 | 9578 | ||
9554 | scheduler_running = 1; | 9579 | scheduler_running = 1; |
9555 | } | 9580 | } |
@@ -10321,7 +10346,7 @@ static int sched_rt_global_constraints(void) | |||
10321 | #endif /* CONFIG_RT_GROUP_SCHED */ | 10346 | #endif /* CONFIG_RT_GROUP_SCHED */ |
10322 | 10347 | ||
10323 | int sched_rt_handler(struct ctl_table *table, int write, | 10348 | int sched_rt_handler(struct ctl_table *table, int write, |
10324 | struct file *filp, void __user *buffer, size_t *lenp, | 10349 | void __user *buffer, size_t *lenp, |
10325 | loff_t *ppos) | 10350 | loff_t *ppos) |
10326 | { | 10351 | { |
10327 | int ret; | 10352 | int ret; |
@@ -10332,7 +10357,7 @@ int sched_rt_handler(struct ctl_table *table, int write, | |||
10332 | old_period = sysctl_sched_rt_period; | 10357 | old_period = sysctl_sched_rt_period; |
10333 | old_runtime = sysctl_sched_rt_runtime; | 10358 | old_runtime = sysctl_sched_rt_runtime; |
10334 | 10359 | ||
10335 | ret = proc_dointvec(table, write, filp, buffer, lenp, ppos); | 10360 | ret = proc_dointvec(table, write, buffer, lenp, ppos); |
10336 | 10361 | ||
10337 | if (!ret && write) { | 10362 | if (!ret && write) { |
10338 | ret = sched_rt_global_constraints(); | 10363 | ret = sched_rt_global_constraints(); |
@@ -10386,8 +10411,7 @@ cpu_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp) | |||
10386 | } | 10411 | } |
10387 | 10412 | ||
10388 | static int | 10413 | static int |
10389 | cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, | 10414 | cpu_cgroup_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk) |
10390 | struct task_struct *tsk) | ||
10391 | { | 10415 | { |
10392 | #ifdef CONFIG_RT_GROUP_SCHED | 10416 | #ifdef CONFIG_RT_GROUP_SCHED |
10393 | if (!sched_rt_can_attach(cgroup_tg(cgrp), tsk)) | 10417 | if (!sched_rt_can_attach(cgroup_tg(cgrp), tsk)) |
@@ -10397,15 +10421,45 @@ cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, | |||
10397 | if (tsk->sched_class != &fair_sched_class) | 10421 | if (tsk->sched_class != &fair_sched_class) |
10398 | return -EINVAL; | 10422 | return -EINVAL; |
10399 | #endif | 10423 | #endif |
10424 | return 0; | ||
10425 | } | ||
10400 | 10426 | ||
10427 | static int | ||
10428 | cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, | ||
10429 | struct task_struct *tsk, bool threadgroup) | ||
10430 | { | ||
10431 | int retval = cpu_cgroup_can_attach_task(cgrp, tsk); | ||
10432 | if (retval) | ||
10433 | return retval; | ||
10434 | if (threadgroup) { | ||
10435 | struct task_struct *c; | ||
10436 | rcu_read_lock(); | ||
10437 | list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) { | ||
10438 | retval = cpu_cgroup_can_attach_task(cgrp, c); | ||
10439 | if (retval) { | ||
10440 | rcu_read_unlock(); | ||
10441 | return retval; | ||
10442 | } | ||
10443 | } | ||
10444 | rcu_read_unlock(); | ||
10445 | } | ||
10401 | return 0; | 10446 | return 0; |
10402 | } | 10447 | } |
10403 | 10448 | ||
10404 | static void | 10449 | static void |
10405 | cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, | 10450 | cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, |
10406 | struct cgroup *old_cont, struct task_struct *tsk) | 10451 | struct cgroup *old_cont, struct task_struct *tsk, |
10452 | bool threadgroup) | ||
10407 | { | 10453 | { |
10408 | sched_move_task(tsk); | 10454 | sched_move_task(tsk); |
10455 | if (threadgroup) { | ||
10456 | struct task_struct *c; | ||
10457 | rcu_read_lock(); | ||
10458 | list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) { | ||
10459 | sched_move_task(c); | ||
10460 | } | ||
10461 | rcu_read_unlock(); | ||
10462 | } | ||
10409 | } | 10463 | } |
10410 | 10464 | ||
10411 | #ifdef CONFIG_FAIR_GROUP_SCHED | 10465 | #ifdef CONFIG_FAIR_GROUP_SCHED |
diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c index e1d16c9a7680..479ce5682d7c 100644 --- a/kernel/sched_clock.c +++ b/kernel/sched_clock.c | |||
@@ -48,13 +48,6 @@ static __read_mostly int sched_clock_running; | |||
48 | __read_mostly int sched_clock_stable; | 48 | __read_mostly int sched_clock_stable; |
49 | 49 | ||
50 | struct sched_clock_data { | 50 | struct sched_clock_data { |
51 | /* | ||
52 | * Raw spinlock - this is a special case: this might be called | ||
53 | * from within instrumentation code so we dont want to do any | ||
54 | * instrumentation ourselves. | ||
55 | */ | ||
56 | raw_spinlock_t lock; | ||
57 | |||
58 | u64 tick_raw; | 51 | u64 tick_raw; |
59 | u64 tick_gtod; | 52 | u64 tick_gtod; |
60 | u64 clock; | 53 | u64 clock; |
@@ -80,7 +73,6 @@ void sched_clock_init(void) | |||
80 | for_each_possible_cpu(cpu) { | 73 | for_each_possible_cpu(cpu) { |
81 | struct sched_clock_data *scd = cpu_sdc(cpu); | 74 | struct sched_clock_data *scd = cpu_sdc(cpu); |
82 | 75 | ||
83 | scd->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | ||
84 | scd->tick_raw = 0; | 76 | scd->tick_raw = 0; |
85 | scd->tick_gtod = ktime_now; | 77 | scd->tick_gtod = ktime_now; |
86 | scd->clock = ktime_now; | 78 | scd->clock = ktime_now; |
@@ -109,14 +101,19 @@ static inline u64 wrap_max(u64 x, u64 y) | |||
109 | * - filter out backward motion | 101 | * - filter out backward motion |
110 | * - use the GTOD tick value to create a window to filter crazy TSC values | 102 | * - use the GTOD tick value to create a window to filter crazy TSC values |
111 | */ | 103 | */ |
112 | static u64 __update_sched_clock(struct sched_clock_data *scd, u64 now) | 104 | static u64 sched_clock_local(struct sched_clock_data *scd) |
113 | { | 105 | { |
114 | s64 delta = now - scd->tick_raw; | 106 | u64 now, clock, old_clock, min_clock, max_clock; |
115 | u64 clock, min_clock, max_clock; | 107 | s64 delta; |
116 | 108 | ||
109 | again: | ||
110 | now = sched_clock(); | ||
111 | delta = now - scd->tick_raw; | ||
117 | if (unlikely(delta < 0)) | 112 | if (unlikely(delta < 0)) |
118 | delta = 0; | 113 | delta = 0; |
119 | 114 | ||
115 | old_clock = scd->clock; | ||
116 | |||
120 | /* | 117 | /* |
121 | * scd->clock = clamp(scd->tick_gtod + delta, | 118 | * scd->clock = clamp(scd->tick_gtod + delta, |
122 | * max(scd->tick_gtod, scd->clock), | 119 | * max(scd->tick_gtod, scd->clock), |
@@ -124,84 +121,73 @@ static u64 __update_sched_clock(struct sched_clock_data *scd, u64 now) | |||
124 | */ | 121 | */ |
125 | 122 | ||
126 | clock = scd->tick_gtod + delta; | 123 | clock = scd->tick_gtod + delta; |
127 | min_clock = wrap_max(scd->tick_gtod, scd->clock); | 124 | min_clock = wrap_max(scd->tick_gtod, old_clock); |
128 | max_clock = wrap_max(scd->clock, scd->tick_gtod + TICK_NSEC); | 125 | max_clock = wrap_max(old_clock, scd->tick_gtod + TICK_NSEC); |
129 | 126 | ||
130 | clock = wrap_max(clock, min_clock); | 127 | clock = wrap_max(clock, min_clock); |
131 | clock = wrap_min(clock, max_clock); | 128 | clock = wrap_min(clock, max_clock); |
132 | 129 | ||
133 | scd->clock = clock; | 130 | if (cmpxchg64(&scd->clock, old_clock, clock) != old_clock) |
131 | goto again; | ||
134 | 132 | ||
135 | return scd->clock; | 133 | return clock; |
136 | } | 134 | } |
137 | 135 | ||
138 | static void lock_double_clock(struct sched_clock_data *data1, | 136 | static u64 sched_clock_remote(struct sched_clock_data *scd) |
139 | struct sched_clock_data *data2) | ||
140 | { | 137 | { |
141 | if (data1 < data2) { | 138 | struct sched_clock_data *my_scd = this_scd(); |
142 | __raw_spin_lock(&data1->lock); | 139 | u64 this_clock, remote_clock; |
143 | __raw_spin_lock(&data2->lock); | 140 | u64 *ptr, old_val, val; |
141 | |||
142 | sched_clock_local(my_scd); | ||
143 | again: | ||
144 | this_clock = my_scd->clock; | ||
145 | remote_clock = scd->clock; | ||
146 | |||
147 | /* | ||
148 | * Use the opportunity that we have both locks | ||
149 | * taken to couple the two clocks: we take the | ||
150 | * larger time as the latest time for both | ||
151 | * runqueues. (this creates monotonic movement) | ||
152 | */ | ||
153 | if (likely((s64)(remote_clock - this_clock) < 0)) { | ||
154 | ptr = &scd->clock; | ||
155 | old_val = remote_clock; | ||
156 | val = this_clock; | ||
144 | } else { | 157 | } else { |
145 | __raw_spin_lock(&data2->lock); | 158 | /* |
146 | __raw_spin_lock(&data1->lock); | 159 | * Should be rare, but possible: |
160 | */ | ||
161 | ptr = &my_scd->clock; | ||
162 | old_val = this_clock; | ||
163 | val = remote_clock; | ||
147 | } | 164 | } |
165 | |||
166 | if (cmpxchg64(ptr, old_val, val) != old_val) | ||
167 | goto again; | ||
168 | |||
169 | return val; | ||
148 | } | 170 | } |
149 | 171 | ||
150 | u64 sched_clock_cpu(int cpu) | 172 | u64 sched_clock_cpu(int cpu) |
151 | { | 173 | { |
152 | u64 now, clock, this_clock, remote_clock; | ||
153 | struct sched_clock_data *scd; | 174 | struct sched_clock_data *scd; |
175 | u64 clock; | ||
176 | |||
177 | WARN_ON_ONCE(!irqs_disabled()); | ||
154 | 178 | ||
155 | if (sched_clock_stable) | 179 | if (sched_clock_stable) |
156 | return sched_clock(); | 180 | return sched_clock(); |
157 | 181 | ||
158 | scd = cpu_sdc(cpu); | ||
159 | |||
160 | /* | ||
161 | * Normally this is not called in NMI context - but if it is, | ||
162 | * trying to do any locking here is totally lethal. | ||
163 | */ | ||
164 | if (unlikely(in_nmi())) | ||
165 | return scd->clock; | ||
166 | |||
167 | if (unlikely(!sched_clock_running)) | 182 | if (unlikely(!sched_clock_running)) |
168 | return 0ull; | 183 | return 0ull; |
169 | 184 | ||
170 | WARN_ON_ONCE(!irqs_disabled()); | 185 | scd = cpu_sdc(cpu); |
171 | now = sched_clock(); | ||
172 | |||
173 | if (cpu != raw_smp_processor_id()) { | ||
174 | struct sched_clock_data *my_scd = this_scd(); | ||
175 | |||
176 | lock_double_clock(scd, my_scd); | ||
177 | |||
178 | this_clock = __update_sched_clock(my_scd, now); | ||
179 | remote_clock = scd->clock; | ||
180 | |||
181 | /* | ||
182 | * Use the opportunity that we have both locks | ||
183 | * taken to couple the two clocks: we take the | ||
184 | * larger time as the latest time for both | ||
185 | * runqueues. (this creates monotonic movement) | ||
186 | */ | ||
187 | if (likely((s64)(remote_clock - this_clock) < 0)) { | ||
188 | clock = this_clock; | ||
189 | scd->clock = clock; | ||
190 | } else { | ||
191 | /* | ||
192 | * Should be rare, but possible: | ||
193 | */ | ||
194 | clock = remote_clock; | ||
195 | my_scd->clock = remote_clock; | ||
196 | } | ||
197 | |||
198 | __raw_spin_unlock(&my_scd->lock); | ||
199 | } else { | ||
200 | __raw_spin_lock(&scd->lock); | ||
201 | clock = __update_sched_clock(scd, now); | ||
202 | } | ||
203 | 186 | ||
204 | __raw_spin_unlock(&scd->lock); | 187 | if (cpu != smp_processor_id()) |
188 | clock = sched_clock_remote(scd); | ||
189 | else | ||
190 | clock = sched_clock_local(scd); | ||
205 | 191 | ||
206 | return clock; | 192 | return clock; |
207 | } | 193 | } |
@@ -223,11 +209,9 @@ void sched_clock_tick(void) | |||
223 | now_gtod = ktime_to_ns(ktime_get()); | 209 | now_gtod = ktime_to_ns(ktime_get()); |
224 | now = sched_clock(); | 210 | now = sched_clock(); |
225 | 211 | ||
226 | __raw_spin_lock(&scd->lock); | ||
227 | scd->tick_raw = now; | 212 | scd->tick_raw = now; |
228 | scd->tick_gtod = now_gtod; | 213 | scd->tick_gtod = now_gtod; |
229 | __update_sched_clock(scd, now); | 214 | sched_clock_local(scd); |
230 | __raw_spin_unlock(&scd->lock); | ||
231 | } | 215 | } |
232 | 216 | ||
233 | /* | 217 | /* |
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 10d218ab69f2..37087a7fac22 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -384,10 +384,10 @@ static struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq) | |||
384 | 384 | ||
385 | #ifdef CONFIG_SCHED_DEBUG | 385 | #ifdef CONFIG_SCHED_DEBUG |
386 | int sched_nr_latency_handler(struct ctl_table *table, int write, | 386 | int sched_nr_latency_handler(struct ctl_table *table, int write, |
387 | struct file *filp, void __user *buffer, size_t *lenp, | 387 | void __user *buffer, size_t *lenp, |
388 | loff_t *ppos) | 388 | loff_t *ppos) |
389 | { | 389 | { |
390 | int ret = proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos); | 390 | int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); |
391 | 391 | ||
392 | if (ret || !write) | 392 | if (ret || !write) |
393 | return ret; | 393 | return ret; |
@@ -513,6 +513,7 @@ static void update_curr(struct cfs_rq *cfs_rq) | |||
513 | if (entity_is_task(curr)) { | 513 | if (entity_is_task(curr)) { |
514 | struct task_struct *curtask = task_of(curr); | 514 | struct task_struct *curtask = task_of(curr); |
515 | 515 | ||
516 | trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime); | ||
516 | cpuacct_charge(curtask, delta_exec); | 517 | cpuacct_charge(curtask, delta_exec); |
517 | account_group_exec_runtime(curtask, delta_exec); | 518 | account_group_exec_runtime(curtask, delta_exec); |
518 | } | 519 | } |
@@ -709,31 +710,28 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) | |||
709 | if (initial && sched_feat(START_DEBIT)) | 710 | if (initial && sched_feat(START_DEBIT)) |
710 | vruntime += sched_vslice(cfs_rq, se); | 711 | vruntime += sched_vslice(cfs_rq, se); |
711 | 712 | ||
712 | if (!initial) { | 713 | /* sleeps up to a single latency don't count. */ |
713 | /* sleeps upto a single latency don't count. */ | 714 | if (!initial && sched_feat(FAIR_SLEEPERS)) { |
714 | if (sched_feat(FAIR_SLEEPERS)) { | 715 | unsigned long thresh = sysctl_sched_latency; |
715 | unsigned long thresh = sysctl_sched_latency; | ||
716 | 716 | ||
717 | /* | 717 | /* |
718 | * Convert the sleeper threshold into virtual time. | 718 | * Convert the sleeper threshold into virtual time. |
719 | * SCHED_IDLE is a special sub-class. We care about | 719 | * SCHED_IDLE is a special sub-class. We care about |
720 | * fairness only relative to other SCHED_IDLE tasks, | 720 | * fairness only relative to other SCHED_IDLE tasks, |
721 | * all of which have the same weight. | 721 | * all of which have the same weight. |
722 | */ | 722 | */ |
723 | if (sched_feat(NORMALIZED_SLEEPER) && | 723 | if (sched_feat(NORMALIZED_SLEEPER) && (!entity_is_task(se) || |
724 | (!entity_is_task(se) || | 724 | task_of(se)->policy != SCHED_IDLE)) |
725 | task_of(se)->policy != SCHED_IDLE)) | 725 | thresh = calc_delta_fair(thresh, se); |
726 | thresh = calc_delta_fair(thresh, se); | ||
727 | 726 | ||
728 | /* | 727 | /* |
729 | * Halve their sleep time's effect, to allow | 728 | * Halve their sleep time's effect, to allow |
730 | * for a gentler effect of sleepers: | 729 | * for a gentler effect of sleepers: |
731 | */ | 730 | */ |
732 | if (sched_feat(GENTLE_FAIR_SLEEPERS)) | 731 | if (sched_feat(GENTLE_FAIR_SLEEPERS)) |
733 | thresh >>= 1; | 732 | thresh >>= 1; |
734 | 733 | ||
735 | vruntime -= thresh; | 734 | vruntime -= thresh; |
736 | } | ||
737 | } | 735 | } |
738 | 736 | ||
739 | /* ensure we never gain time by being placed backwards. */ | 737 | /* ensure we never gain time by being placed backwards. */ |
@@ -824,6 +822,26 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) | |||
824 | * re-elected due to buddy favours. | 822 | * re-elected due to buddy favours. |
825 | */ | 823 | */ |
826 | clear_buddies(cfs_rq, curr); | 824 | clear_buddies(cfs_rq, curr); |
825 | return; | ||
826 | } | ||
827 | |||
828 | /* | ||
829 | * Ensure that a task that missed wakeup preemption by a | ||
830 | * narrow margin doesn't have to wait for a full slice. | ||
831 | * This also mitigates buddy induced latencies under load. | ||
832 | */ | ||
833 | if (!sched_feat(WAKEUP_PREEMPT)) | ||
834 | return; | ||
835 | |||
836 | if (delta_exec < sysctl_sched_min_granularity) | ||
837 | return; | ||
838 | |||
839 | if (cfs_rq->nr_running > 1) { | ||
840 | struct sched_entity *se = __pick_next_entity(cfs_rq); | ||
841 | s64 delta = curr->vruntime - se->vruntime; | ||
842 | |||
843 | if (delta > ideal_runtime) | ||
844 | resched_task(rq_of(cfs_rq)->curr); | ||
827 | } | 845 | } |
828 | } | 846 | } |
829 | 847 | ||
@@ -863,12 +881,18 @@ wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se); | |||
863 | static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq) | 881 | static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq) |
864 | { | 882 | { |
865 | struct sched_entity *se = __pick_next_entity(cfs_rq); | 883 | struct sched_entity *se = __pick_next_entity(cfs_rq); |
884 | struct sched_entity *left = se; | ||
866 | 885 | ||
867 | if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, se) < 1) | 886 | if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1) |
868 | return cfs_rq->next; | 887 | se = cfs_rq->next; |
888 | |||
889 | /* | ||
890 | * Prefer last buddy, try to return the CPU to a preempted task. | ||
891 | */ | ||
892 | if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1) | ||
893 | se = cfs_rq->last; | ||
869 | 894 | ||
870 | if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, se) < 1) | 895 | clear_buddies(cfs_rq, se); |
871 | return cfs_rq->last; | ||
872 | 896 | ||
873 | return se; | 897 | return se; |
874 | } | 898 | } |
@@ -1342,7 +1366,8 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag | |||
1342 | int sync = wake_flags & WF_SYNC; | 1366 | int sync = wake_flags & WF_SYNC; |
1343 | 1367 | ||
1344 | if (sd_flag & SD_BALANCE_WAKE) { | 1368 | if (sd_flag & SD_BALANCE_WAKE) { |
1345 | if (sched_feat(AFFINE_WAKEUPS)) | 1369 | if (sched_feat(AFFINE_WAKEUPS) && |
1370 | cpumask_test_cpu(cpu, &p->cpus_allowed)) | ||
1346 | want_affine = 1; | 1371 | want_affine = 1; |
1347 | new_cpu = prev_cpu; | 1372 | new_cpu = prev_cpu; |
1348 | } | 1373 | } |
@@ -1569,6 +1594,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ | |||
1569 | struct sched_entity *se = &curr->se, *pse = &p->se; | 1594 | struct sched_entity *se = &curr->se, *pse = &p->se; |
1570 | struct cfs_rq *cfs_rq = task_cfs_rq(curr); | 1595 | struct cfs_rq *cfs_rq = task_cfs_rq(curr); |
1571 | int sync = wake_flags & WF_SYNC; | 1596 | int sync = wake_flags & WF_SYNC; |
1597 | int scale = cfs_rq->nr_running >= sched_nr_latency; | ||
1572 | 1598 | ||
1573 | update_curr(cfs_rq); | 1599 | update_curr(cfs_rq); |
1574 | 1600 | ||
@@ -1583,18 +1609,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ | |||
1583 | if (unlikely(se == pse)) | 1609 | if (unlikely(se == pse)) |
1584 | return; | 1610 | return; |
1585 | 1611 | ||
1586 | /* | 1612 | if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) |
1587 | * Only set the backward buddy when the current task is still on the | ||
1588 | * rq. This can happen when a wakeup gets interleaved with schedule on | ||
1589 | * the ->pre_schedule() or idle_balance() point, either of which can | ||
1590 | * drop the rq lock. | ||
1591 | * | ||
1592 | * Also, during early boot the idle thread is in the fair class, for | ||
1593 | * obvious reasons its a bad idea to schedule back to the idle thread. | ||
1594 | */ | ||
1595 | if (sched_feat(LAST_BUDDY) && likely(se->on_rq && curr != rq->idle)) | ||
1596 | set_last_buddy(se); | ||
1597 | if (sched_feat(NEXT_BUDDY) && !(wake_flags & WF_FORK)) | ||
1598 | set_next_buddy(pse); | 1613 | set_next_buddy(pse); |
1599 | 1614 | ||
1600 | /* | 1615 | /* |
@@ -1640,8 +1655,22 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ | |||
1640 | 1655 | ||
1641 | BUG_ON(!pse); | 1656 | BUG_ON(!pse); |
1642 | 1657 | ||
1643 | if (wakeup_preempt_entity(se, pse) == 1) | 1658 | if (wakeup_preempt_entity(se, pse) == 1) { |
1644 | resched_task(curr); | 1659 | resched_task(curr); |
1660 | /* | ||
1661 | * Only set the backward buddy when the current task is still | ||
1662 | * on the rq. This can happen when a wakeup gets interleaved | ||
1663 | * with schedule on the ->pre_schedule() or idle_balance() | ||
1664 | * point, either of which can * drop the rq lock. | ||
1665 | * | ||
1666 | * Also, during early boot the idle thread is in the fair class, | ||
1667 | * for obvious reasons its a bad idea to schedule back to it. | ||
1668 | */ | ||
1669 | if (unlikely(!se->on_rq || curr == rq->idle)) | ||
1670 | return; | ||
1671 | if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se)) | ||
1672 | set_last_buddy(se); | ||
1673 | } | ||
1645 | } | 1674 | } |
1646 | 1675 | ||
1647 | static struct task_struct *pick_next_task_fair(struct rq *rq) | 1676 | static struct task_struct *pick_next_task_fair(struct rq *rq) |
@@ -1655,16 +1684,6 @@ static struct task_struct *pick_next_task_fair(struct rq *rq) | |||
1655 | 1684 | ||
1656 | do { | 1685 | do { |
1657 | se = pick_next_entity(cfs_rq); | 1686 | se = pick_next_entity(cfs_rq); |
1658 | /* | ||
1659 | * If se was a buddy, clear it so that it will have to earn | ||
1660 | * the favour again. | ||
1661 | * | ||
1662 | * If se was not a buddy, clear the buddies because neither | ||
1663 | * was elegible to run, let them earn it again. | ||
1664 | * | ||
1665 | * IOW. unconditionally clear buddies. | ||
1666 | */ | ||
1667 | __clear_buddies(cfs_rq, NULL); | ||
1668 | set_next_entity(cfs_rq, se); | 1687 | set_next_entity(cfs_rq, se); |
1669 | cfs_rq = group_cfs_rq(se); | 1688 | cfs_rq = group_cfs_rq(se); |
1670 | } while (cfs_rq); | 1689 | } while (cfs_rq); |
@@ -1940,6 +1959,25 @@ static void moved_group_fair(struct task_struct *p) | |||
1940 | } | 1959 | } |
1941 | #endif | 1960 | #endif |
1942 | 1961 | ||
1962 | unsigned int get_rr_interval_fair(struct task_struct *task) | ||
1963 | { | ||
1964 | struct sched_entity *se = &task->se; | ||
1965 | unsigned long flags; | ||
1966 | struct rq *rq; | ||
1967 | unsigned int rr_interval = 0; | ||
1968 | |||
1969 | /* | ||
1970 | * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise | ||
1971 | * idle runqueue: | ||
1972 | */ | ||
1973 | rq = task_rq_lock(task, &flags); | ||
1974 | if (rq->cfs.load.weight) | ||
1975 | rr_interval = NS_TO_JIFFIES(sched_slice(&rq->cfs, se)); | ||
1976 | task_rq_unlock(rq, &flags); | ||
1977 | |||
1978 | return rr_interval; | ||
1979 | } | ||
1980 | |||
1943 | /* | 1981 | /* |
1944 | * All the scheduling class methods: | 1982 | * All the scheduling class methods: |
1945 | */ | 1983 | */ |
@@ -1968,6 +2006,8 @@ static const struct sched_class fair_sched_class = { | |||
1968 | .prio_changed = prio_changed_fair, | 2006 | .prio_changed = prio_changed_fair, |
1969 | .switched_to = switched_to_fair, | 2007 | .switched_to = switched_to_fair, |
1970 | 2008 | ||
2009 | .get_rr_interval = get_rr_interval_fair, | ||
2010 | |||
1971 | #ifdef CONFIG_FAIR_GROUP_SCHED | 2011 | #ifdef CONFIG_FAIR_GROUP_SCHED |
1972 | .moved_group = moved_group_fair, | 2012 | .moved_group = moved_group_fair, |
1973 | #endif | 2013 | #endif |
diff --git a/kernel/sched_idletask.c b/kernel/sched_idletask.c index a8b448af004b..b133a28fcde3 100644 --- a/kernel/sched_idletask.c +++ b/kernel/sched_idletask.c | |||
@@ -97,6 +97,11 @@ static void prio_changed_idle(struct rq *rq, struct task_struct *p, | |||
97 | check_preempt_curr(rq, p, 0); | 97 | check_preempt_curr(rq, p, 0); |
98 | } | 98 | } |
99 | 99 | ||
100 | unsigned int get_rr_interval_idle(struct task_struct *task) | ||
101 | { | ||
102 | return 0; | ||
103 | } | ||
104 | |||
100 | /* | 105 | /* |
101 | * Simple, special scheduling class for the per-CPU idle tasks: | 106 | * Simple, special scheduling class for the per-CPU idle tasks: |
102 | */ | 107 | */ |
@@ -122,6 +127,8 @@ static const struct sched_class idle_sched_class = { | |||
122 | .set_curr_task = set_curr_task_idle, | 127 | .set_curr_task = set_curr_task_idle, |
123 | .task_tick = task_tick_idle, | 128 | .task_tick = task_tick_idle, |
124 | 129 | ||
130 | .get_rr_interval = get_rr_interval_idle, | ||
131 | |||
125 | .prio_changed = prio_changed_idle, | 132 | .prio_changed = prio_changed_idle, |
126 | .switched_to = switched_to_idle, | 133 | .switched_to = switched_to_idle, |
127 | 134 | ||
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 13de7126a6ab..a4d790cddb19 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -1734,6 +1734,17 @@ static void set_curr_task_rt(struct rq *rq) | |||
1734 | dequeue_pushable_task(rq, p); | 1734 | dequeue_pushable_task(rq, p); |
1735 | } | 1735 | } |
1736 | 1736 | ||
1737 | unsigned int get_rr_interval_rt(struct task_struct *task) | ||
1738 | { | ||
1739 | /* | ||
1740 | * Time slice is 0 for SCHED_FIFO tasks | ||
1741 | */ | ||
1742 | if (task->policy == SCHED_RR) | ||
1743 | return DEF_TIMESLICE; | ||
1744 | else | ||
1745 | return 0; | ||
1746 | } | ||
1747 | |||
1737 | static const struct sched_class rt_sched_class = { | 1748 | static const struct sched_class rt_sched_class = { |
1738 | .next = &fair_sched_class, | 1749 | .next = &fair_sched_class, |
1739 | .enqueue_task = enqueue_task_rt, | 1750 | .enqueue_task = enqueue_task_rt, |
@@ -1762,6 +1773,8 @@ static const struct sched_class rt_sched_class = { | |||
1762 | .set_curr_task = set_curr_task_rt, | 1773 | .set_curr_task = set_curr_task_rt, |
1763 | .task_tick = task_tick_rt, | 1774 | .task_tick = task_tick_rt, |
1764 | 1775 | ||
1776 | .get_rr_interval = get_rr_interval_rt, | ||
1777 | |||
1765 | .prio_changed = prio_changed_rt, | 1778 | .prio_changed = prio_changed_rt, |
1766 | .switched_to = switched_to_rt, | 1779 | .switched_to = switched_to_rt, |
1767 | }; | 1780 | }; |
diff --git a/kernel/signal.c b/kernel/signal.c index 64c5deeaca5d..6705320784fd 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
@@ -705,7 +705,7 @@ static int prepare_signal(int sig, struct task_struct *p, int from_ancestor_ns) | |||
705 | 705 | ||
706 | if (why) { | 706 | if (why) { |
707 | /* | 707 | /* |
708 | * The first thread which returns from finish_stop() | 708 | * The first thread which returns from do_signal_stop() |
709 | * will take ->siglock, notice SIGNAL_CLD_MASK, and | 709 | * will take ->siglock, notice SIGNAL_CLD_MASK, and |
710 | * notify its parent. See get_signal_to_deliver(). | 710 | * notify its parent. See get_signal_to_deliver(). |
711 | */ | 711 | */ |
@@ -971,6 +971,20 @@ specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t) | |||
971 | return send_signal(sig, info, t, 0); | 971 | return send_signal(sig, info, t, 0); |
972 | } | 972 | } |
973 | 973 | ||
974 | int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p, | ||
975 | bool group) | ||
976 | { | ||
977 | unsigned long flags; | ||
978 | int ret = -ESRCH; | ||
979 | |||
980 | if (lock_task_sighand(p, &flags)) { | ||
981 | ret = send_signal(sig, info, p, group); | ||
982 | unlock_task_sighand(p, &flags); | ||
983 | } | ||
984 | |||
985 | return ret; | ||
986 | } | ||
987 | |||
974 | /* | 988 | /* |
975 | * Force a signal that the process can't ignore: if necessary | 989 | * Force a signal that the process can't ignore: if necessary |
976 | * we unblock the signal and change any SIG_IGN to SIG_DFL. | 990 | * we unblock the signal and change any SIG_IGN to SIG_DFL. |
@@ -1036,12 +1050,6 @@ void zap_other_threads(struct task_struct *p) | |||
1036 | } | 1050 | } |
1037 | } | 1051 | } |
1038 | 1052 | ||
1039 | int __fatal_signal_pending(struct task_struct *tsk) | ||
1040 | { | ||
1041 | return sigismember(&tsk->pending.signal, SIGKILL); | ||
1042 | } | ||
1043 | EXPORT_SYMBOL(__fatal_signal_pending); | ||
1044 | |||
1045 | struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags) | 1053 | struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags) |
1046 | { | 1054 | { |
1047 | struct sighand_struct *sighand; | 1055 | struct sighand_struct *sighand; |
@@ -1068,18 +1076,10 @@ struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long | |||
1068 | */ | 1076 | */ |
1069 | int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) | 1077 | int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) |
1070 | { | 1078 | { |
1071 | unsigned long flags; | 1079 | int ret = check_kill_permission(sig, info, p); |
1072 | int ret; | ||
1073 | 1080 | ||
1074 | ret = check_kill_permission(sig, info, p); | 1081 | if (!ret && sig) |
1075 | 1082 | ret = do_send_sig_info(sig, info, p, true); | |
1076 | if (!ret && sig) { | ||
1077 | ret = -ESRCH; | ||
1078 | if (lock_task_sighand(p, &flags)) { | ||
1079 | ret = __group_send_sig_info(sig, info, p); | ||
1080 | unlock_task_sighand(p, &flags); | ||
1081 | } | ||
1082 | } | ||
1083 | 1083 | ||
1084 | return ret; | 1084 | return ret; |
1085 | } | 1085 | } |
@@ -1224,15 +1224,9 @@ static int kill_something_info(int sig, struct siginfo *info, pid_t pid) | |||
1224 | * These are for backward compatibility with the rest of the kernel source. | 1224 | * These are for backward compatibility with the rest of the kernel source. |
1225 | */ | 1225 | */ |
1226 | 1226 | ||
1227 | /* | ||
1228 | * The caller must ensure the task can't exit. | ||
1229 | */ | ||
1230 | int | 1227 | int |
1231 | send_sig_info(int sig, struct siginfo *info, struct task_struct *p) | 1228 | send_sig_info(int sig, struct siginfo *info, struct task_struct *p) |
1232 | { | 1229 | { |
1233 | int ret; | ||
1234 | unsigned long flags; | ||
1235 | |||
1236 | /* | 1230 | /* |
1237 | * Make sure legacy kernel users don't send in bad values | 1231 | * Make sure legacy kernel users don't send in bad values |
1238 | * (normal paths check this in check_kill_permission). | 1232 | * (normal paths check this in check_kill_permission). |
@@ -1240,10 +1234,7 @@ send_sig_info(int sig, struct siginfo *info, struct task_struct *p) | |||
1240 | if (!valid_signal(sig)) | 1234 | if (!valid_signal(sig)) |
1241 | return -EINVAL; | 1235 | return -EINVAL; |
1242 | 1236 | ||
1243 | spin_lock_irqsave(&p->sighand->siglock, flags); | 1237 | return do_send_sig_info(sig, info, p, false); |
1244 | ret = specific_send_sig_info(sig, info, p); | ||
1245 | spin_unlock_irqrestore(&p->sighand->siglock, flags); | ||
1246 | return ret; | ||
1247 | } | 1238 | } |
1248 | 1239 | ||
1249 | #define __si_special(priv) \ | 1240 | #define __si_special(priv) \ |
@@ -1383,15 +1374,6 @@ ret: | |||
1383 | } | 1374 | } |
1384 | 1375 | ||
1385 | /* | 1376 | /* |
1386 | * Wake up any threads in the parent blocked in wait* syscalls. | ||
1387 | */ | ||
1388 | static inline void __wake_up_parent(struct task_struct *p, | ||
1389 | struct task_struct *parent) | ||
1390 | { | ||
1391 | wake_up_interruptible_sync(&parent->signal->wait_chldexit); | ||
1392 | } | ||
1393 | |||
1394 | /* | ||
1395 | * Let a parent know about the death of a child. | 1377 | * Let a parent know about the death of a child. |
1396 | * For a stopped/continued status change, use do_notify_parent_cldstop instead. | 1378 | * For a stopped/continued status change, use do_notify_parent_cldstop instead. |
1397 | * | 1379 | * |
@@ -1673,29 +1655,6 @@ void ptrace_notify(int exit_code) | |||
1673 | spin_unlock_irq(¤t->sighand->siglock); | 1655 | spin_unlock_irq(¤t->sighand->siglock); |
1674 | } | 1656 | } |
1675 | 1657 | ||
1676 | static void | ||
1677 | finish_stop(int stop_count) | ||
1678 | { | ||
1679 | /* | ||
1680 | * If there are no other threads in the group, or if there is | ||
1681 | * a group stop in progress and we are the last to stop, | ||
1682 | * report to the parent. When ptraced, every thread reports itself. | ||
1683 | */ | ||
1684 | if (tracehook_notify_jctl(stop_count == 0, CLD_STOPPED)) { | ||
1685 | read_lock(&tasklist_lock); | ||
1686 | do_notify_parent_cldstop(current, CLD_STOPPED); | ||
1687 | read_unlock(&tasklist_lock); | ||
1688 | } | ||
1689 | |||
1690 | do { | ||
1691 | schedule(); | ||
1692 | } while (try_to_freeze()); | ||
1693 | /* | ||
1694 | * Now we don't run again until continued. | ||
1695 | */ | ||
1696 | current->exit_code = 0; | ||
1697 | } | ||
1698 | |||
1699 | /* | 1658 | /* |
1700 | * This performs the stopping for SIGSTOP and other stop signals. | 1659 | * This performs the stopping for SIGSTOP and other stop signals. |
1701 | * We have to stop all threads in the thread group. | 1660 | * We have to stop all threads in the thread group. |
@@ -1705,15 +1664,9 @@ finish_stop(int stop_count) | |||
1705 | static int do_signal_stop(int signr) | 1664 | static int do_signal_stop(int signr) |
1706 | { | 1665 | { |
1707 | struct signal_struct *sig = current->signal; | 1666 | struct signal_struct *sig = current->signal; |
1708 | int stop_count; | 1667 | int notify; |
1709 | 1668 | ||
1710 | if (sig->group_stop_count > 0) { | 1669 | if (!sig->group_stop_count) { |
1711 | /* | ||
1712 | * There is a group stop in progress. We don't need to | ||
1713 | * start another one. | ||
1714 | */ | ||
1715 | stop_count = --sig->group_stop_count; | ||
1716 | } else { | ||
1717 | struct task_struct *t; | 1670 | struct task_struct *t; |
1718 | 1671 | ||
1719 | if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) || | 1672 | if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) || |
@@ -1725,7 +1678,7 @@ static int do_signal_stop(int signr) | |||
1725 | */ | 1678 | */ |
1726 | sig->group_exit_code = signr; | 1679 | sig->group_exit_code = signr; |
1727 | 1680 | ||
1728 | stop_count = 0; | 1681 | sig->group_stop_count = 1; |
1729 | for (t = next_thread(current); t != current; t = next_thread(t)) | 1682 | for (t = next_thread(current); t != current; t = next_thread(t)) |
1730 | /* | 1683 | /* |
1731 | * Setting state to TASK_STOPPED for a group | 1684 | * Setting state to TASK_STOPPED for a group |
@@ -1734,19 +1687,44 @@ static int do_signal_stop(int signr) | |||
1734 | */ | 1687 | */ |
1735 | if (!(t->flags & PF_EXITING) && | 1688 | if (!(t->flags & PF_EXITING) && |
1736 | !task_is_stopped_or_traced(t)) { | 1689 | !task_is_stopped_or_traced(t)) { |
1737 | stop_count++; | 1690 | sig->group_stop_count++; |
1738 | signal_wake_up(t, 0); | 1691 | signal_wake_up(t, 0); |
1739 | } | 1692 | } |
1740 | sig->group_stop_count = stop_count; | ||
1741 | } | 1693 | } |
1694 | /* | ||
1695 | * If there are no other threads in the group, or if there is | ||
1696 | * a group stop in progress and we are the last to stop, report | ||
1697 | * to the parent. When ptraced, every thread reports itself. | ||
1698 | */ | ||
1699 | notify = sig->group_stop_count == 1 ? CLD_STOPPED : 0; | ||
1700 | notify = tracehook_notify_jctl(notify, CLD_STOPPED); | ||
1701 | /* | ||
1702 | * tracehook_notify_jctl() can drop and reacquire siglock, so | ||
1703 | * we keep ->group_stop_count != 0 before the call. If SIGCONT | ||
1704 | * or SIGKILL comes in between ->group_stop_count == 0. | ||
1705 | */ | ||
1706 | if (sig->group_stop_count) { | ||
1707 | if (!--sig->group_stop_count) | ||
1708 | sig->flags = SIGNAL_STOP_STOPPED; | ||
1709 | current->exit_code = sig->group_exit_code; | ||
1710 | __set_current_state(TASK_STOPPED); | ||
1711 | } | ||
1712 | spin_unlock_irq(¤t->sighand->siglock); | ||
1742 | 1713 | ||
1743 | if (stop_count == 0) | 1714 | if (notify) { |
1744 | sig->flags = SIGNAL_STOP_STOPPED; | 1715 | read_lock(&tasklist_lock); |
1745 | current->exit_code = sig->group_exit_code; | 1716 | do_notify_parent_cldstop(current, notify); |
1746 | __set_current_state(TASK_STOPPED); | 1717 | read_unlock(&tasklist_lock); |
1718 | } | ||
1719 | |||
1720 | /* Now we don't run again until woken by SIGCONT or SIGKILL */ | ||
1721 | do { | ||
1722 | schedule(); | ||
1723 | } while (try_to_freeze()); | ||
1724 | |||
1725 | tracehook_finish_jctl(); | ||
1726 | current->exit_code = 0; | ||
1747 | 1727 | ||
1748 | spin_unlock_irq(¤t->sighand->siglock); | ||
1749 | finish_stop(stop_count); | ||
1750 | return 1; | 1728 | return 1; |
1751 | } | 1729 | } |
1752 | 1730 | ||
@@ -1815,14 +1793,15 @@ relock: | |||
1815 | int why = (signal->flags & SIGNAL_STOP_CONTINUED) | 1793 | int why = (signal->flags & SIGNAL_STOP_CONTINUED) |
1816 | ? CLD_CONTINUED : CLD_STOPPED; | 1794 | ? CLD_CONTINUED : CLD_STOPPED; |
1817 | signal->flags &= ~SIGNAL_CLD_MASK; | 1795 | signal->flags &= ~SIGNAL_CLD_MASK; |
1818 | spin_unlock_irq(&sighand->siglock); | ||
1819 | 1796 | ||
1820 | if (unlikely(!tracehook_notify_jctl(1, why))) | 1797 | why = tracehook_notify_jctl(why, CLD_CONTINUED); |
1821 | goto relock; | 1798 | spin_unlock_irq(&sighand->siglock); |
1822 | 1799 | ||
1823 | read_lock(&tasklist_lock); | 1800 | if (why) { |
1824 | do_notify_parent_cldstop(current->group_leader, why); | 1801 | read_lock(&tasklist_lock); |
1825 | read_unlock(&tasklist_lock); | 1802 | do_notify_parent_cldstop(current->group_leader, why); |
1803 | read_unlock(&tasklist_lock); | ||
1804 | } | ||
1826 | goto relock; | 1805 | goto relock; |
1827 | } | 1806 | } |
1828 | 1807 | ||
@@ -1987,14 +1966,14 @@ void exit_signals(struct task_struct *tsk) | |||
1987 | if (unlikely(tsk->signal->group_stop_count) && | 1966 | if (unlikely(tsk->signal->group_stop_count) && |
1988 | !--tsk->signal->group_stop_count) { | 1967 | !--tsk->signal->group_stop_count) { |
1989 | tsk->signal->flags = SIGNAL_STOP_STOPPED; | 1968 | tsk->signal->flags = SIGNAL_STOP_STOPPED; |
1990 | group_stop = 1; | 1969 | group_stop = tracehook_notify_jctl(CLD_STOPPED, CLD_STOPPED); |
1991 | } | 1970 | } |
1992 | out: | 1971 | out: |
1993 | spin_unlock_irq(&tsk->sighand->siglock); | 1972 | spin_unlock_irq(&tsk->sighand->siglock); |
1994 | 1973 | ||
1995 | if (unlikely(group_stop) && tracehook_notify_jctl(1, CLD_STOPPED)) { | 1974 | if (unlikely(group_stop)) { |
1996 | read_lock(&tasklist_lock); | 1975 | read_lock(&tasklist_lock); |
1997 | do_notify_parent_cldstop(tsk, CLD_STOPPED); | 1976 | do_notify_parent_cldstop(tsk, group_stop); |
1998 | read_unlock(&tasklist_lock); | 1977 | read_unlock(&tasklist_lock); |
1999 | } | 1978 | } |
2000 | } | 1979 | } |
@@ -2290,7 +2269,6 @@ static int | |||
2290 | do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info) | 2269 | do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info) |
2291 | { | 2270 | { |
2292 | struct task_struct *p; | 2271 | struct task_struct *p; |
2293 | unsigned long flags; | ||
2294 | int error = -ESRCH; | 2272 | int error = -ESRCH; |
2295 | 2273 | ||
2296 | rcu_read_lock(); | 2274 | rcu_read_lock(); |
@@ -2300,14 +2278,16 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info) | |||
2300 | /* | 2278 | /* |
2301 | * The null signal is a permissions and process existence | 2279 | * The null signal is a permissions and process existence |
2302 | * probe. No signal is actually delivered. | 2280 | * probe. No signal is actually delivered. |
2303 | * | ||
2304 | * If lock_task_sighand() fails we pretend the task dies | ||
2305 | * after receiving the signal. The window is tiny, and the | ||
2306 | * signal is private anyway. | ||
2307 | */ | 2281 | */ |
2308 | if (!error && sig && lock_task_sighand(p, &flags)) { | 2282 | if (!error && sig) { |
2309 | error = specific_send_sig_info(sig, info, p); | 2283 | error = do_send_sig_info(sig, info, p, false); |
2310 | unlock_task_sighand(p, &flags); | 2284 | /* |
2285 | * If lock_task_sighand() failed we pretend the task | ||
2286 | * dies after receiving the signal. The window is tiny, | ||
2287 | * and the signal is private anyway. | ||
2288 | */ | ||
2289 | if (unlikely(error == -ESRCH)) | ||
2290 | error = 0; | ||
2311 | } | 2291 | } |
2312 | } | 2292 | } |
2313 | rcu_read_unlock(); | 2293 | rcu_read_unlock(); |
diff --git a/kernel/slow-work-proc.c b/kernel/slow-work-proc.c new file mode 100644 index 000000000000..3988032571f5 --- /dev/null +++ b/kernel/slow-work-proc.c | |||
@@ -0,0 +1,227 @@ | |||
1 | /* Slow work debugging | ||
2 | * | ||
3 | * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public Licence | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the Licence, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/module.h> | ||
13 | #include <linux/slow-work.h> | ||
14 | #include <linux/fs.h> | ||
15 | #include <linux/time.h> | ||
16 | #include <linux/seq_file.h> | ||
17 | #include "slow-work.h" | ||
18 | |||
19 | #define ITERATOR_SHIFT (BITS_PER_LONG - 4) | ||
20 | #define ITERATOR_SELECTOR (0xfUL << ITERATOR_SHIFT) | ||
21 | #define ITERATOR_COUNTER (~ITERATOR_SELECTOR) | ||
22 | |||
23 | void slow_work_new_thread_desc(struct slow_work *work, struct seq_file *m) | ||
24 | { | ||
25 | seq_puts(m, "Slow-work: New thread"); | ||
26 | } | ||
27 | |||
28 | /* | ||
29 | * Render the time mark field on a work item into a 5-char time with units plus | ||
30 | * a space | ||
31 | */ | ||
32 | static void slow_work_print_mark(struct seq_file *m, struct slow_work *work) | ||
33 | { | ||
34 | struct timespec now, diff; | ||
35 | |||
36 | now = CURRENT_TIME; | ||
37 | diff = timespec_sub(now, work->mark); | ||
38 | |||
39 | if (diff.tv_sec < 0) | ||
40 | seq_puts(m, " -ve "); | ||
41 | else if (diff.tv_sec == 0 && diff.tv_nsec < 1000) | ||
42 | seq_printf(m, "%3luns ", diff.tv_nsec); | ||
43 | else if (diff.tv_sec == 0 && diff.tv_nsec < 1000000) | ||
44 | seq_printf(m, "%3luus ", diff.tv_nsec / 1000); | ||
45 | else if (diff.tv_sec == 0 && diff.tv_nsec < 1000000000) | ||
46 | seq_printf(m, "%3lums ", diff.tv_nsec / 1000000); | ||
47 | else if (diff.tv_sec <= 1) | ||
48 | seq_puts(m, " 1s "); | ||
49 | else if (diff.tv_sec < 60) | ||
50 | seq_printf(m, "%4lus ", diff.tv_sec); | ||
51 | else if (diff.tv_sec < 60 * 60) | ||
52 | seq_printf(m, "%4lum ", diff.tv_sec / 60); | ||
53 | else if (diff.tv_sec < 60 * 60 * 24) | ||
54 | seq_printf(m, "%4luh ", diff.tv_sec / 3600); | ||
55 | else | ||
56 | seq_puts(m, "exces "); | ||
57 | } | ||
58 | |||
59 | /* | ||
60 | * Describe a slow work item for /proc | ||
61 | */ | ||
62 | static int slow_work_runqueue_show(struct seq_file *m, void *v) | ||
63 | { | ||
64 | struct slow_work *work; | ||
65 | struct list_head *p = v; | ||
66 | unsigned long id; | ||
67 | |||
68 | switch ((unsigned long) v) { | ||
69 | case 1: | ||
70 | seq_puts(m, "THR PID ITEM ADDR FL MARK DESC\n"); | ||
71 | return 0; | ||
72 | case 2: | ||
73 | seq_puts(m, "=== ===== ================ == ===== ==========\n"); | ||
74 | return 0; | ||
75 | |||
76 | case 3 ... 3 + SLOW_WORK_THREAD_LIMIT - 1: | ||
77 | id = (unsigned long) v - 3; | ||
78 | |||
79 | read_lock(&slow_work_execs_lock); | ||
80 | work = slow_work_execs[id]; | ||
81 | if (work) { | ||
82 | smp_read_barrier_depends(); | ||
83 | |||
84 | seq_printf(m, "%3lu %5d %16p %2lx ", | ||
85 | id, slow_work_pids[id], work, work->flags); | ||
86 | slow_work_print_mark(m, work); | ||
87 | |||
88 | if (work->ops->desc) | ||
89 | work->ops->desc(work, m); | ||
90 | seq_putc(m, '\n'); | ||
91 | } | ||
92 | read_unlock(&slow_work_execs_lock); | ||
93 | return 0; | ||
94 | |||
95 | default: | ||
96 | work = list_entry(p, struct slow_work, link); | ||
97 | seq_printf(m, "%3s - %16p %2lx ", | ||
98 | work->flags & SLOW_WORK_VERY_SLOW ? "vsq" : "sq", | ||
99 | work, work->flags); | ||
100 | slow_work_print_mark(m, work); | ||
101 | |||
102 | if (work->ops->desc) | ||
103 | work->ops->desc(work, m); | ||
104 | seq_putc(m, '\n'); | ||
105 | return 0; | ||
106 | } | ||
107 | } | ||
108 | |||
109 | /* | ||
110 | * map the iterator to a work item | ||
111 | */ | ||
112 | static void *slow_work_runqueue_index(struct seq_file *m, loff_t *_pos) | ||
113 | { | ||
114 | struct list_head *p; | ||
115 | unsigned long count, id; | ||
116 | |||
117 | switch (*_pos >> ITERATOR_SHIFT) { | ||
118 | case 0x0: | ||
119 | if (*_pos == 0) | ||
120 | *_pos = 1; | ||
121 | if (*_pos < 3) | ||
122 | return (void *)(unsigned long) *_pos; | ||
123 | if (*_pos < 3 + SLOW_WORK_THREAD_LIMIT) | ||
124 | for (id = *_pos - 3; | ||
125 | id < SLOW_WORK_THREAD_LIMIT; | ||
126 | id++, (*_pos)++) | ||
127 | if (slow_work_execs[id]) | ||
128 | return (void *)(unsigned long) *_pos; | ||
129 | *_pos = 0x1UL << ITERATOR_SHIFT; | ||
130 | |||
131 | case 0x1: | ||
132 | count = *_pos & ITERATOR_COUNTER; | ||
133 | list_for_each(p, &slow_work_queue) { | ||
134 | if (count == 0) | ||
135 | return p; | ||
136 | count--; | ||
137 | } | ||
138 | *_pos = 0x2UL << ITERATOR_SHIFT; | ||
139 | |||
140 | case 0x2: | ||
141 | count = *_pos & ITERATOR_COUNTER; | ||
142 | list_for_each(p, &vslow_work_queue) { | ||
143 | if (count == 0) | ||
144 | return p; | ||
145 | count--; | ||
146 | } | ||
147 | *_pos = 0x3UL << ITERATOR_SHIFT; | ||
148 | |||
149 | default: | ||
150 | return NULL; | ||
151 | } | ||
152 | } | ||
153 | |||
154 | /* | ||
155 | * set up the iterator to start reading from the first line | ||
156 | */ | ||
157 | static void *slow_work_runqueue_start(struct seq_file *m, loff_t *_pos) | ||
158 | { | ||
159 | spin_lock_irq(&slow_work_queue_lock); | ||
160 | return slow_work_runqueue_index(m, _pos); | ||
161 | } | ||
162 | |||
163 | /* | ||
164 | * move to the next line | ||
165 | */ | ||
166 | static void *slow_work_runqueue_next(struct seq_file *m, void *v, loff_t *_pos) | ||
167 | { | ||
168 | struct list_head *p = v; | ||
169 | unsigned long selector = *_pos >> ITERATOR_SHIFT; | ||
170 | |||
171 | (*_pos)++; | ||
172 | switch (selector) { | ||
173 | case 0x0: | ||
174 | return slow_work_runqueue_index(m, _pos); | ||
175 | |||
176 | case 0x1: | ||
177 | if (*_pos >> ITERATOR_SHIFT == 0x1) { | ||
178 | p = p->next; | ||
179 | if (p != &slow_work_queue) | ||
180 | return p; | ||
181 | } | ||
182 | *_pos = 0x2UL << ITERATOR_SHIFT; | ||
183 | p = &vslow_work_queue; | ||
184 | |||
185 | case 0x2: | ||
186 | if (*_pos >> ITERATOR_SHIFT == 0x2) { | ||
187 | p = p->next; | ||
188 | if (p != &vslow_work_queue) | ||
189 | return p; | ||
190 | } | ||
191 | *_pos = 0x3UL << ITERATOR_SHIFT; | ||
192 | |||
193 | default: | ||
194 | return NULL; | ||
195 | } | ||
196 | } | ||
197 | |||
198 | /* | ||
199 | * clean up after reading | ||
200 | */ | ||
201 | static void slow_work_runqueue_stop(struct seq_file *m, void *v) | ||
202 | { | ||
203 | spin_unlock_irq(&slow_work_queue_lock); | ||
204 | } | ||
205 | |||
206 | static const struct seq_operations slow_work_runqueue_ops = { | ||
207 | .start = slow_work_runqueue_start, | ||
208 | .stop = slow_work_runqueue_stop, | ||
209 | .next = slow_work_runqueue_next, | ||
210 | .show = slow_work_runqueue_show, | ||
211 | }; | ||
212 | |||
213 | /* | ||
214 | * open "/proc/slow_work_rq" to list queue contents | ||
215 | */ | ||
216 | static int slow_work_runqueue_open(struct inode *inode, struct file *file) | ||
217 | { | ||
218 | return seq_open(file, &slow_work_runqueue_ops); | ||
219 | } | ||
220 | |||
221 | const struct file_operations slow_work_runqueue_fops = { | ||
222 | .owner = THIS_MODULE, | ||
223 | .open = slow_work_runqueue_open, | ||
224 | .read = seq_read, | ||
225 | .llseek = seq_lseek, | ||
226 | .release = seq_release, | ||
227 | }; | ||
diff --git a/kernel/slow-work.c b/kernel/slow-work.c index 09d7519557d3..da94f3c101af 100644 --- a/kernel/slow-work.c +++ b/kernel/slow-work.c | |||
@@ -16,20 +16,17 @@ | |||
16 | #include <linux/kthread.h> | 16 | #include <linux/kthread.h> |
17 | #include <linux/freezer.h> | 17 | #include <linux/freezer.h> |
18 | #include <linux/wait.h> | 18 | #include <linux/wait.h> |
19 | 19 | #include <linux/proc_fs.h> | |
20 | #define SLOW_WORK_CULL_TIMEOUT (5 * HZ) /* cull threads 5s after running out of | 20 | #include "slow-work.h" |
21 | * things to do */ | ||
22 | #define SLOW_WORK_OOM_TIMEOUT (5 * HZ) /* can't start new threads for 5s after | ||
23 | * OOM */ | ||
24 | 21 | ||
25 | static void slow_work_cull_timeout(unsigned long); | 22 | static void slow_work_cull_timeout(unsigned long); |
26 | static void slow_work_oom_timeout(unsigned long); | 23 | static void slow_work_oom_timeout(unsigned long); |
27 | 24 | ||
28 | #ifdef CONFIG_SYSCTL | 25 | #ifdef CONFIG_SYSCTL |
29 | static int slow_work_min_threads_sysctl(struct ctl_table *, int, struct file *, | 26 | static int slow_work_min_threads_sysctl(struct ctl_table *, int, |
30 | void __user *, size_t *, loff_t *); | 27 | void __user *, size_t *, loff_t *); |
31 | 28 | ||
32 | static int slow_work_max_threads_sysctl(struct ctl_table *, int , struct file *, | 29 | static int slow_work_max_threads_sysctl(struct ctl_table *, int , |
33 | void __user *, size_t *, loff_t *); | 30 | void __user *, size_t *, loff_t *); |
34 | #endif | 31 | #endif |
35 | 32 | ||
@@ -46,7 +43,7 @@ static unsigned vslow_work_proportion = 50; /* % of threads that may process | |||
46 | 43 | ||
47 | #ifdef CONFIG_SYSCTL | 44 | #ifdef CONFIG_SYSCTL |
48 | static const int slow_work_min_min_threads = 2; | 45 | static const int slow_work_min_min_threads = 2; |
49 | static int slow_work_max_max_threads = 255; | 46 | static int slow_work_max_max_threads = SLOW_WORK_THREAD_LIMIT; |
50 | static const int slow_work_min_vslow = 1; | 47 | static const int slow_work_min_vslow = 1; |
51 | static const int slow_work_max_vslow = 99; | 48 | static const int slow_work_max_vslow = 99; |
52 | 49 | ||
@@ -98,6 +95,32 @@ static DEFINE_TIMER(slow_work_oom_timer, slow_work_oom_timeout, 0, 0); | |||
98 | static struct slow_work slow_work_new_thread; /* new thread starter */ | 95 | static struct slow_work slow_work_new_thread; /* new thread starter */ |
99 | 96 | ||
100 | /* | 97 | /* |
98 | * slow work ID allocation (use slow_work_queue_lock) | ||
99 | */ | ||
100 | static DECLARE_BITMAP(slow_work_ids, SLOW_WORK_THREAD_LIMIT); | ||
101 | |||
102 | /* | ||
103 | * Unregistration tracking to prevent put_ref() from disappearing during module | ||
104 | * unload | ||
105 | */ | ||
106 | #ifdef CONFIG_MODULES | ||
107 | static struct module *slow_work_thread_processing[SLOW_WORK_THREAD_LIMIT]; | ||
108 | static struct module *slow_work_unreg_module; | ||
109 | static struct slow_work *slow_work_unreg_work_item; | ||
110 | static DECLARE_WAIT_QUEUE_HEAD(slow_work_unreg_wq); | ||
111 | static DEFINE_MUTEX(slow_work_unreg_sync_lock); | ||
112 | #endif | ||
113 | |||
114 | /* | ||
115 | * Data for tracking currently executing items for indication through /proc | ||
116 | */ | ||
117 | #ifdef CONFIG_SLOW_WORK_PROC | ||
118 | struct slow_work *slow_work_execs[SLOW_WORK_THREAD_LIMIT]; | ||
119 | pid_t slow_work_pids[SLOW_WORK_THREAD_LIMIT]; | ||
120 | DEFINE_RWLOCK(slow_work_execs_lock); | ||
121 | #endif | ||
122 | |||
123 | /* | ||
101 | * The queues of work items and the lock governing access to them. These are | 124 | * The queues of work items and the lock governing access to them. These are |
102 | * shared between all the CPUs. It doesn't make sense to have per-CPU queues | 125 | * shared between all the CPUs. It doesn't make sense to have per-CPU queues |
103 | * as the number of threads bears no relation to the number of CPUs. | 126 | * as the number of threads bears no relation to the number of CPUs. |
@@ -105,9 +128,18 @@ static struct slow_work slow_work_new_thread; /* new thread starter */ | |||
105 | * There are two queues of work items: one for slow work items, and one for | 128 | * There are two queues of work items: one for slow work items, and one for |
106 | * very slow work items. | 129 | * very slow work items. |
107 | */ | 130 | */ |
108 | static LIST_HEAD(slow_work_queue); | 131 | LIST_HEAD(slow_work_queue); |
109 | static LIST_HEAD(vslow_work_queue); | 132 | LIST_HEAD(vslow_work_queue); |
110 | static DEFINE_SPINLOCK(slow_work_queue_lock); | 133 | DEFINE_SPINLOCK(slow_work_queue_lock); |
134 | |||
135 | /* | ||
136 | * The following are two wait queues that get pinged when a work item is placed | ||
137 | * on an empty queue. These allow work items that are hogging a thread by | ||
138 | * sleeping in a way that could be deferred to yield their thread and enqueue | ||
139 | * themselves. | ||
140 | */ | ||
141 | static DECLARE_WAIT_QUEUE_HEAD(slow_work_queue_waits_for_occupation); | ||
142 | static DECLARE_WAIT_QUEUE_HEAD(vslow_work_queue_waits_for_occupation); | ||
111 | 143 | ||
112 | /* | 144 | /* |
113 | * The thread controls. A variable used to signal to the threads that they | 145 | * The thread controls. A variable used to signal to the threads that they |
@@ -126,6 +158,20 @@ static DECLARE_COMPLETION(slow_work_last_thread_exited); | |||
126 | static int slow_work_user_count; | 158 | static int slow_work_user_count; |
127 | static DEFINE_MUTEX(slow_work_user_lock); | 159 | static DEFINE_MUTEX(slow_work_user_lock); |
128 | 160 | ||
161 | static inline int slow_work_get_ref(struct slow_work *work) | ||
162 | { | ||
163 | if (work->ops->get_ref) | ||
164 | return work->ops->get_ref(work); | ||
165 | |||
166 | return 0; | ||
167 | } | ||
168 | |||
169 | static inline void slow_work_put_ref(struct slow_work *work) | ||
170 | { | ||
171 | if (work->ops->put_ref) | ||
172 | work->ops->put_ref(work); | ||
173 | } | ||
174 | |||
129 | /* | 175 | /* |
130 | * Calculate the maximum number of active threads in the pool that are | 176 | * Calculate the maximum number of active threads in the pool that are |
131 | * permitted to process very slow work items. | 177 | * permitted to process very slow work items. |
@@ -149,8 +195,11 @@ static unsigned slow_work_calc_vsmax(void) | |||
149 | * Attempt to execute stuff queued on a slow thread. Return true if we managed | 195 | * Attempt to execute stuff queued on a slow thread. Return true if we managed |
150 | * it, false if there was nothing to do. | 196 | * it, false if there was nothing to do. |
151 | */ | 197 | */ |
152 | static bool slow_work_execute(void) | 198 | static noinline bool slow_work_execute(int id) |
153 | { | 199 | { |
200 | #ifdef CONFIG_MODULES | ||
201 | struct module *module; | ||
202 | #endif | ||
154 | struct slow_work *work = NULL; | 203 | struct slow_work *work = NULL; |
155 | unsigned vsmax; | 204 | unsigned vsmax; |
156 | bool very_slow; | 205 | bool very_slow; |
@@ -186,6 +235,16 @@ static bool slow_work_execute(void) | |||
186 | } else { | 235 | } else { |
187 | very_slow = false; /* avoid the compiler warning */ | 236 | very_slow = false; /* avoid the compiler warning */ |
188 | } | 237 | } |
238 | |||
239 | #ifdef CONFIG_MODULES | ||
240 | if (work) | ||
241 | slow_work_thread_processing[id] = work->owner; | ||
242 | #endif | ||
243 | if (work) { | ||
244 | slow_work_mark_time(work); | ||
245 | slow_work_begin_exec(id, work); | ||
246 | } | ||
247 | |||
189 | spin_unlock_irq(&slow_work_queue_lock); | 248 | spin_unlock_irq(&slow_work_queue_lock); |
190 | 249 | ||
191 | if (!work) | 250 | if (!work) |
@@ -194,12 +253,19 @@ static bool slow_work_execute(void) | |||
194 | if (!test_and_clear_bit(SLOW_WORK_PENDING, &work->flags)) | 253 | if (!test_and_clear_bit(SLOW_WORK_PENDING, &work->flags)) |
195 | BUG(); | 254 | BUG(); |
196 | 255 | ||
197 | work->ops->execute(work); | 256 | /* don't execute if the work is in the process of being cancelled */ |
257 | if (!test_bit(SLOW_WORK_CANCELLING, &work->flags)) | ||
258 | work->ops->execute(work); | ||
198 | 259 | ||
199 | if (very_slow) | 260 | if (very_slow) |
200 | atomic_dec(&vslow_work_executing_count); | 261 | atomic_dec(&vslow_work_executing_count); |
201 | clear_bit_unlock(SLOW_WORK_EXECUTING, &work->flags); | 262 | clear_bit_unlock(SLOW_WORK_EXECUTING, &work->flags); |
202 | 263 | ||
264 | /* wake up anyone waiting for this work to be complete */ | ||
265 | wake_up_bit(&work->flags, SLOW_WORK_EXECUTING); | ||
266 | |||
267 | slow_work_end_exec(id, work); | ||
268 | |||
203 | /* if someone tried to enqueue the item whilst we were executing it, | 269 | /* if someone tried to enqueue the item whilst we were executing it, |
204 | * then it'll be left unenqueued to avoid multiple threads trying to | 270 | * then it'll be left unenqueued to avoid multiple threads trying to |
205 | * execute it simultaneously | 271 | * execute it simultaneously |
@@ -219,7 +285,18 @@ static bool slow_work_execute(void) | |||
219 | spin_unlock_irq(&slow_work_queue_lock); | 285 | spin_unlock_irq(&slow_work_queue_lock); |
220 | } | 286 | } |
221 | 287 | ||
222 | work->ops->put_ref(work); | 288 | /* sort out the race between module unloading and put_ref() */ |
289 | slow_work_put_ref(work); | ||
290 | |||
291 | #ifdef CONFIG_MODULES | ||
292 | module = slow_work_thread_processing[id]; | ||
293 | slow_work_thread_processing[id] = NULL; | ||
294 | smp_mb(); | ||
295 | if (slow_work_unreg_work_item == work || | ||
296 | slow_work_unreg_module == module) | ||
297 | wake_up_all(&slow_work_unreg_wq); | ||
298 | #endif | ||
299 | |||
223 | return true; | 300 | return true; |
224 | 301 | ||
225 | auto_requeue: | 302 | auto_requeue: |
@@ -227,15 +304,61 @@ auto_requeue: | |||
227 | * - we transfer our ref on the item back to the appropriate queue | 304 | * - we transfer our ref on the item back to the appropriate queue |
228 | * - don't wake another thread up as we're awake already | 305 | * - don't wake another thread up as we're awake already |
229 | */ | 306 | */ |
307 | slow_work_mark_time(work); | ||
230 | if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) | 308 | if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) |
231 | list_add_tail(&work->link, &vslow_work_queue); | 309 | list_add_tail(&work->link, &vslow_work_queue); |
232 | else | 310 | else |
233 | list_add_tail(&work->link, &slow_work_queue); | 311 | list_add_tail(&work->link, &slow_work_queue); |
234 | spin_unlock_irq(&slow_work_queue_lock); | 312 | spin_unlock_irq(&slow_work_queue_lock); |
313 | slow_work_thread_processing[id] = NULL; | ||
235 | return true; | 314 | return true; |
236 | } | 315 | } |
237 | 316 | ||
238 | /** | 317 | /** |
318 | * slow_work_sleep_till_thread_needed - Sleep till thread needed by other work | ||
319 | * work: The work item under execution that wants to sleep | ||
320 | * _timeout: Scheduler sleep timeout | ||
321 | * | ||
322 | * Allow a requeueable work item to sleep on a slow-work processor thread until | ||
323 | * that thread is needed to do some other work or the sleep is interrupted by | ||
324 | * some other event. | ||
325 | * | ||
326 | * The caller must set up a wake up event before calling this and must have set | ||
327 | * the appropriate sleep mode (such as TASK_UNINTERRUPTIBLE) and tested its own | ||
328 | * condition before calling this function as no test is made here. | ||
329 | * | ||
330 | * False is returned if there is nothing on the queue; true is returned if the | ||
331 | * work item should be requeued | ||
332 | */ | ||
333 | bool slow_work_sleep_till_thread_needed(struct slow_work *work, | ||
334 | signed long *_timeout) | ||
335 | { | ||
336 | wait_queue_head_t *wfo_wq; | ||
337 | struct list_head *queue; | ||
338 | |||
339 | DEFINE_WAIT(wait); | ||
340 | |||
341 | if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) { | ||
342 | wfo_wq = &vslow_work_queue_waits_for_occupation; | ||
343 | queue = &vslow_work_queue; | ||
344 | } else { | ||
345 | wfo_wq = &slow_work_queue_waits_for_occupation; | ||
346 | queue = &slow_work_queue; | ||
347 | } | ||
348 | |||
349 | if (!list_empty(queue)) | ||
350 | return true; | ||
351 | |||
352 | add_wait_queue_exclusive(wfo_wq, &wait); | ||
353 | if (list_empty(queue)) | ||
354 | *_timeout = schedule_timeout(*_timeout); | ||
355 | finish_wait(wfo_wq, &wait); | ||
356 | |||
357 | return !list_empty(queue); | ||
358 | } | ||
359 | EXPORT_SYMBOL(slow_work_sleep_till_thread_needed); | ||
360 | |||
361 | /** | ||
239 | * slow_work_enqueue - Schedule a slow work item for processing | 362 | * slow_work_enqueue - Schedule a slow work item for processing |
240 | * @work: The work item to queue | 363 | * @work: The work item to queue |
241 | * | 364 | * |
@@ -260,16 +383,22 @@ auto_requeue: | |||
260 | * allowed to pick items to execute. This ensures that very slow items won't | 383 | * allowed to pick items to execute. This ensures that very slow items won't |
261 | * overly block ones that are just ordinarily slow. | 384 | * overly block ones that are just ordinarily slow. |
262 | * | 385 | * |
263 | * Returns 0 if successful, -EAGAIN if not. | 386 | * Returns 0 if successful, -EAGAIN if not (or -ECANCELED if cancelled work is |
387 | * attempted queued) | ||
264 | */ | 388 | */ |
265 | int slow_work_enqueue(struct slow_work *work) | 389 | int slow_work_enqueue(struct slow_work *work) |
266 | { | 390 | { |
391 | wait_queue_head_t *wfo_wq; | ||
392 | struct list_head *queue; | ||
267 | unsigned long flags; | 393 | unsigned long flags; |
394 | int ret; | ||
395 | |||
396 | if (test_bit(SLOW_WORK_CANCELLING, &work->flags)) | ||
397 | return -ECANCELED; | ||
268 | 398 | ||
269 | BUG_ON(slow_work_user_count <= 0); | 399 | BUG_ON(slow_work_user_count <= 0); |
270 | BUG_ON(!work); | 400 | BUG_ON(!work); |
271 | BUG_ON(!work->ops); | 401 | BUG_ON(!work->ops); |
272 | BUG_ON(!work->ops->get_ref); | ||
273 | 402 | ||
274 | /* when honouring an enqueue request, we only promise that we will run | 403 | /* when honouring an enqueue request, we only promise that we will run |
275 | * the work function in the future; we do not promise to run it once | 404 | * the work function in the future; we do not promise to run it once |
@@ -280,8 +409,19 @@ int slow_work_enqueue(struct slow_work *work) | |||
280 | * maintaining our promise | 409 | * maintaining our promise |
281 | */ | 410 | */ |
282 | if (!test_and_set_bit_lock(SLOW_WORK_PENDING, &work->flags)) { | 411 | if (!test_and_set_bit_lock(SLOW_WORK_PENDING, &work->flags)) { |
412 | if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) { | ||
413 | wfo_wq = &vslow_work_queue_waits_for_occupation; | ||
414 | queue = &vslow_work_queue; | ||
415 | } else { | ||
416 | wfo_wq = &slow_work_queue_waits_for_occupation; | ||
417 | queue = &slow_work_queue; | ||
418 | } | ||
419 | |||
283 | spin_lock_irqsave(&slow_work_queue_lock, flags); | 420 | spin_lock_irqsave(&slow_work_queue_lock, flags); |
284 | 421 | ||
422 | if (unlikely(test_bit(SLOW_WORK_CANCELLING, &work->flags))) | ||
423 | goto cancelled; | ||
424 | |||
285 | /* we promise that we will not attempt to execute the work | 425 | /* we promise that we will not attempt to execute the work |
286 | * function in more than one thread simultaneously | 426 | * function in more than one thread simultaneously |
287 | * | 427 | * |
@@ -299,25 +439,221 @@ int slow_work_enqueue(struct slow_work *work) | |||
299 | if (test_bit(SLOW_WORK_EXECUTING, &work->flags)) { | 439 | if (test_bit(SLOW_WORK_EXECUTING, &work->flags)) { |
300 | set_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags); | 440 | set_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags); |
301 | } else { | 441 | } else { |
302 | if (work->ops->get_ref(work) < 0) | 442 | ret = slow_work_get_ref(work); |
303 | goto cant_get_ref; | 443 | if (ret < 0) |
304 | if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) | 444 | goto failed; |
305 | list_add_tail(&work->link, &vslow_work_queue); | 445 | slow_work_mark_time(work); |
306 | else | 446 | list_add_tail(&work->link, queue); |
307 | list_add_tail(&work->link, &slow_work_queue); | ||
308 | wake_up(&slow_work_thread_wq); | 447 | wake_up(&slow_work_thread_wq); |
448 | |||
449 | /* if someone who could be requeued is sleeping on a | ||
450 | * thread, then ask them to yield their thread */ | ||
451 | if (work->link.prev == queue) | ||
452 | wake_up(wfo_wq); | ||
309 | } | 453 | } |
310 | 454 | ||
311 | spin_unlock_irqrestore(&slow_work_queue_lock, flags); | 455 | spin_unlock_irqrestore(&slow_work_queue_lock, flags); |
312 | } | 456 | } |
313 | return 0; | 457 | return 0; |
314 | 458 | ||
315 | cant_get_ref: | 459 | cancelled: |
460 | ret = -ECANCELED; | ||
461 | failed: | ||
316 | spin_unlock_irqrestore(&slow_work_queue_lock, flags); | 462 | spin_unlock_irqrestore(&slow_work_queue_lock, flags); |
317 | return -EAGAIN; | 463 | return ret; |
318 | } | 464 | } |
319 | EXPORT_SYMBOL(slow_work_enqueue); | 465 | EXPORT_SYMBOL(slow_work_enqueue); |
320 | 466 | ||
467 | static int slow_work_wait(void *word) | ||
468 | { | ||
469 | schedule(); | ||
470 | return 0; | ||
471 | } | ||
472 | |||
473 | /** | ||
474 | * slow_work_cancel - Cancel a slow work item | ||
475 | * @work: The work item to cancel | ||
476 | * | ||
477 | * This function will cancel a previously enqueued work item. If we cannot | ||
478 | * cancel the work item, it is guarenteed to have run when this function | ||
479 | * returns. | ||
480 | */ | ||
481 | void slow_work_cancel(struct slow_work *work) | ||
482 | { | ||
483 | bool wait = true, put = false; | ||
484 | |||
485 | set_bit(SLOW_WORK_CANCELLING, &work->flags); | ||
486 | smp_mb(); | ||
487 | |||
488 | /* if the work item is a delayed work item with an active timer, we | ||
489 | * need to wait for the timer to finish _before_ getting the spinlock, | ||
490 | * lest we deadlock against the timer routine | ||
491 | * | ||
492 | * the timer routine will leave DELAYED set if it notices the | ||
493 | * CANCELLING flag in time | ||
494 | */ | ||
495 | if (test_bit(SLOW_WORK_DELAYED, &work->flags)) { | ||
496 | struct delayed_slow_work *dwork = | ||
497 | container_of(work, struct delayed_slow_work, work); | ||
498 | del_timer_sync(&dwork->timer); | ||
499 | } | ||
500 | |||
501 | spin_lock_irq(&slow_work_queue_lock); | ||
502 | |||
503 | if (test_bit(SLOW_WORK_DELAYED, &work->flags)) { | ||
504 | /* the timer routine aborted or never happened, so we are left | ||
505 | * holding the timer's reference on the item and should just | ||
506 | * drop the pending flag and wait for any ongoing execution to | ||
507 | * finish */ | ||
508 | struct delayed_slow_work *dwork = | ||
509 | container_of(work, struct delayed_slow_work, work); | ||
510 | |||
511 | BUG_ON(timer_pending(&dwork->timer)); | ||
512 | BUG_ON(!list_empty(&work->link)); | ||
513 | |||
514 | clear_bit(SLOW_WORK_DELAYED, &work->flags); | ||
515 | put = true; | ||
516 | clear_bit(SLOW_WORK_PENDING, &work->flags); | ||
517 | |||
518 | } else if (test_bit(SLOW_WORK_PENDING, &work->flags) && | ||
519 | !list_empty(&work->link)) { | ||
520 | /* the link in the pending queue holds a reference on the item | ||
521 | * that we will need to release */ | ||
522 | list_del_init(&work->link); | ||
523 | wait = false; | ||
524 | put = true; | ||
525 | clear_bit(SLOW_WORK_PENDING, &work->flags); | ||
526 | |||
527 | } else if (test_and_clear_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags)) { | ||
528 | /* the executor is holding our only reference on the item, so | ||
529 | * we merely need to wait for it to finish executing */ | ||
530 | clear_bit(SLOW_WORK_PENDING, &work->flags); | ||
531 | } | ||
532 | |||
533 | spin_unlock_irq(&slow_work_queue_lock); | ||
534 | |||
535 | /* the EXECUTING flag is set by the executor whilst the spinlock is set | ||
536 | * and before the item is dequeued - so assuming the above doesn't | ||
537 | * actually dequeue it, simply waiting for the EXECUTING flag to be | ||
538 | * released here should be sufficient */ | ||
539 | if (wait) | ||
540 | wait_on_bit(&work->flags, SLOW_WORK_EXECUTING, slow_work_wait, | ||
541 | TASK_UNINTERRUPTIBLE); | ||
542 | |||
543 | clear_bit(SLOW_WORK_CANCELLING, &work->flags); | ||
544 | if (put) | ||
545 | slow_work_put_ref(work); | ||
546 | } | ||
547 | EXPORT_SYMBOL(slow_work_cancel); | ||
548 | |||
549 | /* | ||
550 | * Handle expiry of the delay timer, indicating that a delayed slow work item | ||
551 | * should now be queued if not cancelled | ||
552 | */ | ||
553 | static void delayed_slow_work_timer(unsigned long data) | ||
554 | { | ||
555 | wait_queue_head_t *wfo_wq; | ||
556 | struct list_head *queue; | ||
557 | struct slow_work *work = (struct slow_work *) data; | ||
558 | unsigned long flags; | ||
559 | bool queued = false, put = false, first = false; | ||
560 | |||
561 | if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) { | ||
562 | wfo_wq = &vslow_work_queue_waits_for_occupation; | ||
563 | queue = &vslow_work_queue; | ||
564 | } else { | ||
565 | wfo_wq = &slow_work_queue_waits_for_occupation; | ||
566 | queue = &slow_work_queue; | ||
567 | } | ||
568 | |||
569 | spin_lock_irqsave(&slow_work_queue_lock, flags); | ||
570 | if (likely(!test_bit(SLOW_WORK_CANCELLING, &work->flags))) { | ||
571 | clear_bit(SLOW_WORK_DELAYED, &work->flags); | ||
572 | |||
573 | if (test_bit(SLOW_WORK_EXECUTING, &work->flags)) { | ||
574 | /* we discard the reference the timer was holding in | ||
575 | * favour of the one the executor holds */ | ||
576 | set_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags); | ||
577 | put = true; | ||
578 | } else { | ||
579 | slow_work_mark_time(work); | ||
580 | list_add_tail(&work->link, queue); | ||
581 | queued = true; | ||
582 | if (work->link.prev == queue) | ||
583 | first = true; | ||
584 | } | ||
585 | } | ||
586 | |||
587 | spin_unlock_irqrestore(&slow_work_queue_lock, flags); | ||
588 | if (put) | ||
589 | slow_work_put_ref(work); | ||
590 | if (first) | ||
591 | wake_up(wfo_wq); | ||
592 | if (queued) | ||
593 | wake_up(&slow_work_thread_wq); | ||
594 | } | ||
595 | |||
596 | /** | ||
597 | * delayed_slow_work_enqueue - Schedule a delayed slow work item for processing | ||
598 | * @dwork: The delayed work item to queue | ||
599 | * @delay: When to start executing the work, in jiffies from now | ||
600 | * | ||
601 | * This is similar to slow_work_enqueue(), but it adds a delay before the work | ||
602 | * is actually queued for processing. | ||
603 | * | ||
604 | * The item can have delayed processing requested on it whilst it is being | ||
605 | * executed. The delay will begin immediately, and if it expires before the | ||
606 | * item finishes executing, the item will be placed back on the queue when it | ||
607 | * has done executing. | ||
608 | */ | ||
609 | int delayed_slow_work_enqueue(struct delayed_slow_work *dwork, | ||
610 | unsigned long delay) | ||
611 | { | ||
612 | struct slow_work *work = &dwork->work; | ||
613 | unsigned long flags; | ||
614 | int ret; | ||
615 | |||
616 | if (delay == 0) | ||
617 | return slow_work_enqueue(&dwork->work); | ||
618 | |||
619 | BUG_ON(slow_work_user_count <= 0); | ||
620 | BUG_ON(!work); | ||
621 | BUG_ON(!work->ops); | ||
622 | |||
623 | if (test_bit(SLOW_WORK_CANCELLING, &work->flags)) | ||
624 | return -ECANCELED; | ||
625 | |||
626 | if (!test_and_set_bit_lock(SLOW_WORK_PENDING, &work->flags)) { | ||
627 | spin_lock_irqsave(&slow_work_queue_lock, flags); | ||
628 | |||
629 | if (test_bit(SLOW_WORK_CANCELLING, &work->flags)) | ||
630 | goto cancelled; | ||
631 | |||
632 | /* the timer holds a reference whilst it is pending */ | ||
633 | ret = work->ops->get_ref(work); | ||
634 | if (ret < 0) | ||
635 | goto cant_get_ref; | ||
636 | |||
637 | if (test_and_set_bit(SLOW_WORK_DELAYED, &work->flags)) | ||
638 | BUG(); | ||
639 | dwork->timer.expires = jiffies + delay; | ||
640 | dwork->timer.data = (unsigned long) work; | ||
641 | dwork->timer.function = delayed_slow_work_timer; | ||
642 | add_timer(&dwork->timer); | ||
643 | |||
644 | spin_unlock_irqrestore(&slow_work_queue_lock, flags); | ||
645 | } | ||
646 | |||
647 | return 0; | ||
648 | |||
649 | cancelled: | ||
650 | ret = -ECANCELED; | ||
651 | cant_get_ref: | ||
652 | spin_unlock_irqrestore(&slow_work_queue_lock, flags); | ||
653 | return ret; | ||
654 | } | ||
655 | EXPORT_SYMBOL(delayed_slow_work_enqueue); | ||
656 | |||
321 | /* | 657 | /* |
322 | * Schedule a cull of the thread pool at some time in the near future | 658 | * Schedule a cull of the thread pool at some time in the near future |
323 | */ | 659 | */ |
@@ -368,13 +704,23 @@ static inline bool slow_work_available(int vsmax) | |||
368 | */ | 704 | */ |
369 | static int slow_work_thread(void *_data) | 705 | static int slow_work_thread(void *_data) |
370 | { | 706 | { |
371 | int vsmax; | 707 | int vsmax, id; |
372 | 708 | ||
373 | DEFINE_WAIT(wait); | 709 | DEFINE_WAIT(wait); |
374 | 710 | ||
375 | set_freezable(); | 711 | set_freezable(); |
376 | set_user_nice(current, -5); | 712 | set_user_nice(current, -5); |
377 | 713 | ||
714 | /* allocate ourselves an ID */ | ||
715 | spin_lock_irq(&slow_work_queue_lock); | ||
716 | id = find_first_zero_bit(slow_work_ids, SLOW_WORK_THREAD_LIMIT); | ||
717 | BUG_ON(id < 0 || id >= SLOW_WORK_THREAD_LIMIT); | ||
718 | __set_bit(id, slow_work_ids); | ||
719 | slow_work_set_thread_pid(id, current->pid); | ||
720 | spin_unlock_irq(&slow_work_queue_lock); | ||
721 | |||
722 | sprintf(current->comm, "kslowd%03u", id); | ||
723 | |||
378 | for (;;) { | 724 | for (;;) { |
379 | vsmax = vslow_work_proportion; | 725 | vsmax = vslow_work_proportion; |
380 | vsmax *= atomic_read(&slow_work_thread_count); | 726 | vsmax *= atomic_read(&slow_work_thread_count); |
@@ -395,7 +741,7 @@ static int slow_work_thread(void *_data) | |||
395 | vsmax *= atomic_read(&slow_work_thread_count); | 741 | vsmax *= atomic_read(&slow_work_thread_count); |
396 | vsmax /= 100; | 742 | vsmax /= 100; |
397 | 743 | ||
398 | if (slow_work_available(vsmax) && slow_work_execute()) { | 744 | if (slow_work_available(vsmax) && slow_work_execute(id)) { |
399 | cond_resched(); | 745 | cond_resched(); |
400 | if (list_empty(&slow_work_queue) && | 746 | if (list_empty(&slow_work_queue) && |
401 | list_empty(&vslow_work_queue) && | 747 | list_empty(&vslow_work_queue) && |
@@ -412,6 +758,11 @@ static int slow_work_thread(void *_data) | |||
412 | break; | 758 | break; |
413 | } | 759 | } |
414 | 760 | ||
761 | spin_lock_irq(&slow_work_queue_lock); | ||
762 | slow_work_set_thread_pid(id, 0); | ||
763 | __clear_bit(id, slow_work_ids); | ||
764 | spin_unlock_irq(&slow_work_queue_lock); | ||
765 | |||
415 | if (atomic_dec_and_test(&slow_work_thread_count)) | 766 | if (atomic_dec_and_test(&slow_work_thread_count)) |
416 | complete_and_exit(&slow_work_last_thread_exited, 0); | 767 | complete_and_exit(&slow_work_last_thread_exited, 0); |
417 | return 0; | 768 | return 0; |
@@ -427,21 +778,6 @@ static void slow_work_cull_timeout(unsigned long data) | |||
427 | } | 778 | } |
428 | 779 | ||
429 | /* | 780 | /* |
430 | * Get a reference on slow work thread starter | ||
431 | */ | ||
432 | static int slow_work_new_thread_get_ref(struct slow_work *work) | ||
433 | { | ||
434 | return 0; | ||
435 | } | ||
436 | |||
437 | /* | ||
438 | * Drop a reference on slow work thread starter | ||
439 | */ | ||
440 | static void slow_work_new_thread_put_ref(struct slow_work *work) | ||
441 | { | ||
442 | } | ||
443 | |||
444 | /* | ||
445 | * Start a new slow work thread | 781 | * Start a new slow work thread |
446 | */ | 782 | */ |
447 | static void slow_work_new_thread_execute(struct slow_work *work) | 783 | static void slow_work_new_thread_execute(struct slow_work *work) |
@@ -475,9 +811,11 @@ static void slow_work_new_thread_execute(struct slow_work *work) | |||
475 | } | 811 | } |
476 | 812 | ||
477 | static const struct slow_work_ops slow_work_new_thread_ops = { | 813 | static const struct slow_work_ops slow_work_new_thread_ops = { |
478 | .get_ref = slow_work_new_thread_get_ref, | 814 | .owner = THIS_MODULE, |
479 | .put_ref = slow_work_new_thread_put_ref, | ||
480 | .execute = slow_work_new_thread_execute, | 815 | .execute = slow_work_new_thread_execute, |
816 | #ifdef CONFIG_SLOW_WORK_PROC | ||
817 | .desc = slow_work_new_thread_desc, | ||
818 | #endif | ||
481 | }; | 819 | }; |
482 | 820 | ||
483 | /* | 821 | /* |
@@ -493,10 +831,10 @@ static void slow_work_oom_timeout(unsigned long data) | |||
493 | * Handle adjustment of the minimum number of threads | 831 | * Handle adjustment of the minimum number of threads |
494 | */ | 832 | */ |
495 | static int slow_work_min_threads_sysctl(struct ctl_table *table, int write, | 833 | static int slow_work_min_threads_sysctl(struct ctl_table *table, int write, |
496 | struct file *filp, void __user *buffer, | 834 | void __user *buffer, |
497 | size_t *lenp, loff_t *ppos) | 835 | size_t *lenp, loff_t *ppos) |
498 | { | 836 | { |
499 | int ret = proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos); | 837 | int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); |
500 | int n; | 838 | int n; |
501 | 839 | ||
502 | if (ret == 0) { | 840 | if (ret == 0) { |
@@ -521,10 +859,10 @@ static int slow_work_min_threads_sysctl(struct ctl_table *table, int write, | |||
521 | * Handle adjustment of the maximum number of threads | 859 | * Handle adjustment of the maximum number of threads |
522 | */ | 860 | */ |
523 | static int slow_work_max_threads_sysctl(struct ctl_table *table, int write, | 861 | static int slow_work_max_threads_sysctl(struct ctl_table *table, int write, |
524 | struct file *filp, void __user *buffer, | 862 | void __user *buffer, |
525 | size_t *lenp, loff_t *ppos) | 863 | size_t *lenp, loff_t *ppos) |
526 | { | 864 | { |
527 | int ret = proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos); | 865 | int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); |
528 | int n; | 866 | int n; |
529 | 867 | ||
530 | if (ret == 0) { | 868 | if (ret == 0) { |
@@ -546,12 +884,13 @@ static int slow_work_max_threads_sysctl(struct ctl_table *table, int write, | |||
546 | 884 | ||
547 | /** | 885 | /** |
548 | * slow_work_register_user - Register a user of the facility | 886 | * slow_work_register_user - Register a user of the facility |
887 | * @module: The module about to make use of the facility | ||
549 | * | 888 | * |
550 | * Register a user of the facility, starting up the initial threads if there | 889 | * Register a user of the facility, starting up the initial threads if there |
551 | * aren't any other users at this point. This will return 0 if successful, or | 890 | * aren't any other users at this point. This will return 0 if successful, or |
552 | * an error if not. | 891 | * an error if not. |
553 | */ | 892 | */ |
554 | int slow_work_register_user(void) | 893 | int slow_work_register_user(struct module *module) |
555 | { | 894 | { |
556 | struct task_struct *p; | 895 | struct task_struct *p; |
557 | int loop; | 896 | int loop; |
@@ -598,14 +937,79 @@ error: | |||
598 | } | 937 | } |
599 | EXPORT_SYMBOL(slow_work_register_user); | 938 | EXPORT_SYMBOL(slow_work_register_user); |
600 | 939 | ||
940 | /* | ||
941 | * wait for all outstanding items from the calling module to complete | ||
942 | * - note that more items may be queued whilst we're waiting | ||
943 | */ | ||
944 | static void slow_work_wait_for_items(struct module *module) | ||
945 | { | ||
946 | DECLARE_WAITQUEUE(myself, current); | ||
947 | struct slow_work *work; | ||
948 | int loop; | ||
949 | |||
950 | mutex_lock(&slow_work_unreg_sync_lock); | ||
951 | add_wait_queue(&slow_work_unreg_wq, &myself); | ||
952 | |||
953 | for (;;) { | ||
954 | spin_lock_irq(&slow_work_queue_lock); | ||
955 | |||
956 | /* first of all, we wait for the last queued item in each list | ||
957 | * to be processed */ | ||
958 | list_for_each_entry_reverse(work, &vslow_work_queue, link) { | ||
959 | if (work->owner == module) { | ||
960 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
961 | slow_work_unreg_work_item = work; | ||
962 | goto do_wait; | ||
963 | } | ||
964 | } | ||
965 | list_for_each_entry_reverse(work, &slow_work_queue, link) { | ||
966 | if (work->owner == module) { | ||
967 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
968 | slow_work_unreg_work_item = work; | ||
969 | goto do_wait; | ||
970 | } | ||
971 | } | ||
972 | |||
973 | /* then we wait for the items being processed to finish */ | ||
974 | slow_work_unreg_module = module; | ||
975 | smp_mb(); | ||
976 | for (loop = 0; loop < SLOW_WORK_THREAD_LIMIT; loop++) { | ||
977 | if (slow_work_thread_processing[loop] == module) | ||
978 | goto do_wait; | ||
979 | } | ||
980 | spin_unlock_irq(&slow_work_queue_lock); | ||
981 | break; /* okay, we're done */ | ||
982 | |||
983 | do_wait: | ||
984 | spin_unlock_irq(&slow_work_queue_lock); | ||
985 | schedule(); | ||
986 | slow_work_unreg_work_item = NULL; | ||
987 | slow_work_unreg_module = NULL; | ||
988 | } | ||
989 | |||
990 | remove_wait_queue(&slow_work_unreg_wq, &myself); | ||
991 | mutex_unlock(&slow_work_unreg_sync_lock); | ||
992 | } | ||
993 | |||
601 | /** | 994 | /** |
602 | * slow_work_unregister_user - Unregister a user of the facility | 995 | * slow_work_unregister_user - Unregister a user of the facility |
996 | * @module: The module whose items should be cleared | ||
603 | * | 997 | * |
604 | * Unregister a user of the facility, killing all the threads if this was the | 998 | * Unregister a user of the facility, killing all the threads if this was the |
605 | * last one. | 999 | * last one. |
1000 | * | ||
1001 | * This waits for all the work items belonging to the nominated module to go | ||
1002 | * away before proceeding. | ||
606 | */ | 1003 | */ |
607 | void slow_work_unregister_user(void) | 1004 | void slow_work_unregister_user(struct module *module) |
608 | { | 1005 | { |
1006 | /* first of all, wait for all outstanding items from the calling module | ||
1007 | * to complete */ | ||
1008 | if (module) | ||
1009 | slow_work_wait_for_items(module); | ||
1010 | |||
1011 | /* then we can actually go about shutting down the facility if need | ||
1012 | * be */ | ||
609 | mutex_lock(&slow_work_user_lock); | 1013 | mutex_lock(&slow_work_user_lock); |
610 | 1014 | ||
611 | BUG_ON(slow_work_user_count <= 0); | 1015 | BUG_ON(slow_work_user_count <= 0); |
@@ -639,6 +1043,10 @@ static int __init init_slow_work(void) | |||
639 | if (slow_work_max_max_threads < nr_cpus * 2) | 1043 | if (slow_work_max_max_threads < nr_cpus * 2) |
640 | slow_work_max_max_threads = nr_cpus * 2; | 1044 | slow_work_max_max_threads = nr_cpus * 2; |
641 | #endif | 1045 | #endif |
1046 | #ifdef CONFIG_SLOW_WORK_PROC | ||
1047 | proc_create("slow_work_rq", S_IFREG | 0400, NULL, | ||
1048 | &slow_work_runqueue_fops); | ||
1049 | #endif | ||
642 | return 0; | 1050 | return 0; |
643 | } | 1051 | } |
644 | 1052 | ||
diff --git a/kernel/slow-work.h b/kernel/slow-work.h new file mode 100644 index 000000000000..3c2f007f3ad6 --- /dev/null +++ b/kernel/slow-work.h | |||
@@ -0,0 +1,72 @@ | |||
1 | /* Slow work private definitions | ||
2 | * | ||
3 | * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public Licence | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the Licence, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #define SLOW_WORK_CULL_TIMEOUT (5 * HZ) /* cull threads 5s after running out of | ||
13 | * things to do */ | ||
14 | #define SLOW_WORK_OOM_TIMEOUT (5 * HZ) /* can't start new threads for 5s after | ||
15 | * OOM */ | ||
16 | |||
17 | #define SLOW_WORK_THREAD_LIMIT 255 /* abs maximum number of slow-work threads */ | ||
18 | |||
19 | /* | ||
20 | * slow-work.c | ||
21 | */ | ||
22 | #ifdef CONFIG_SLOW_WORK_PROC | ||
23 | extern struct slow_work *slow_work_execs[]; | ||
24 | extern pid_t slow_work_pids[]; | ||
25 | extern rwlock_t slow_work_execs_lock; | ||
26 | #endif | ||
27 | |||
28 | extern struct list_head slow_work_queue; | ||
29 | extern struct list_head vslow_work_queue; | ||
30 | extern spinlock_t slow_work_queue_lock; | ||
31 | |||
32 | /* | ||
33 | * slow-work-proc.c | ||
34 | */ | ||
35 | #ifdef CONFIG_SLOW_WORK_PROC | ||
36 | extern const struct file_operations slow_work_runqueue_fops; | ||
37 | |||
38 | extern void slow_work_new_thread_desc(struct slow_work *, struct seq_file *); | ||
39 | #endif | ||
40 | |||
41 | /* | ||
42 | * Helper functions | ||
43 | */ | ||
44 | static inline void slow_work_set_thread_pid(int id, pid_t pid) | ||
45 | { | ||
46 | #ifdef CONFIG_SLOW_WORK_PROC | ||
47 | slow_work_pids[id] = pid; | ||
48 | #endif | ||
49 | } | ||
50 | |||
51 | static inline void slow_work_mark_time(struct slow_work *work) | ||
52 | { | ||
53 | #ifdef CONFIG_SLOW_WORK_PROC | ||
54 | work->mark = CURRENT_TIME; | ||
55 | #endif | ||
56 | } | ||
57 | |||
58 | static inline void slow_work_begin_exec(int id, struct slow_work *work) | ||
59 | { | ||
60 | #ifdef CONFIG_SLOW_WORK_PROC | ||
61 | slow_work_execs[id] = work; | ||
62 | #endif | ||
63 | } | ||
64 | |||
65 | static inline void slow_work_end_exec(int id, struct slow_work *work) | ||
66 | { | ||
67 | #ifdef CONFIG_SLOW_WORK_PROC | ||
68 | write_lock(&slow_work_execs_lock); | ||
69 | slow_work_execs[id] = NULL; | ||
70 | write_unlock(&slow_work_execs_lock); | ||
71 | #endif | ||
72 | } | ||
diff --git a/kernel/smp.c b/kernel/smp.c index 8e218500ab14..c9d1c7835c2f 100644 --- a/kernel/smp.c +++ b/kernel/smp.c | |||
@@ -29,8 +29,7 @@ enum { | |||
29 | 29 | ||
30 | struct call_function_data { | 30 | struct call_function_data { |
31 | struct call_single_data csd; | 31 | struct call_single_data csd; |
32 | spinlock_t lock; | 32 | atomic_t refs; |
33 | unsigned int refs; | ||
34 | cpumask_var_t cpumask; | 33 | cpumask_var_t cpumask; |
35 | }; | 34 | }; |
36 | 35 | ||
@@ -39,9 +38,7 @@ struct call_single_queue { | |||
39 | spinlock_t lock; | 38 | spinlock_t lock; |
40 | }; | 39 | }; |
41 | 40 | ||
42 | static DEFINE_PER_CPU(struct call_function_data, cfd_data) = { | 41 | static DEFINE_PER_CPU(struct call_function_data, cfd_data); |
43 | .lock = __SPIN_LOCK_UNLOCKED(cfd_data.lock), | ||
44 | }; | ||
45 | 42 | ||
46 | static int | 43 | static int |
47 | hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu) | 44 | hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu) |
@@ -196,25 +193,18 @@ void generic_smp_call_function_interrupt(void) | |||
196 | list_for_each_entry_rcu(data, &call_function.queue, csd.list) { | 193 | list_for_each_entry_rcu(data, &call_function.queue, csd.list) { |
197 | int refs; | 194 | int refs; |
198 | 195 | ||
199 | spin_lock(&data->lock); | 196 | if (!cpumask_test_and_clear_cpu(cpu, data->cpumask)) |
200 | if (!cpumask_test_cpu(cpu, data->cpumask)) { | ||
201 | spin_unlock(&data->lock); | ||
202 | continue; | 197 | continue; |
203 | } | ||
204 | cpumask_clear_cpu(cpu, data->cpumask); | ||
205 | spin_unlock(&data->lock); | ||
206 | 198 | ||
207 | data->csd.func(data->csd.info); | 199 | data->csd.func(data->csd.info); |
208 | 200 | ||
209 | spin_lock(&data->lock); | 201 | refs = atomic_dec_return(&data->refs); |
210 | WARN_ON(data->refs == 0); | 202 | WARN_ON(refs < 0); |
211 | refs = --data->refs; | ||
212 | if (!refs) { | 203 | if (!refs) { |
213 | spin_lock(&call_function.lock); | 204 | spin_lock(&call_function.lock); |
214 | list_del_rcu(&data->csd.list); | 205 | list_del_rcu(&data->csd.list); |
215 | spin_unlock(&call_function.lock); | 206 | spin_unlock(&call_function.lock); |
216 | } | 207 | } |
217 | spin_unlock(&data->lock); | ||
218 | 208 | ||
219 | if (refs) | 209 | if (refs) |
220 | continue; | 210 | continue; |
@@ -357,13 +347,6 @@ void __smp_call_function_single(int cpu, struct call_single_data *data, | |||
357 | generic_exec_single(cpu, data, wait); | 347 | generic_exec_single(cpu, data, wait); |
358 | } | 348 | } |
359 | 349 | ||
360 | /* Deprecated: shim for archs using old arch_send_call_function_ipi API. */ | ||
361 | |||
362 | #ifndef arch_send_call_function_ipi_mask | ||
363 | # define arch_send_call_function_ipi_mask(maskp) \ | ||
364 | arch_send_call_function_ipi(*(maskp)) | ||
365 | #endif | ||
366 | |||
367 | /** | 350 | /** |
368 | * smp_call_function_many(): Run a function on a set of other CPUs. | 351 | * smp_call_function_many(): Run a function on a set of other CPUs. |
369 | * @mask: The set of cpus to run on (only runs on online subset). | 352 | * @mask: The set of cpus to run on (only runs on online subset). |
@@ -419,23 +402,20 @@ void smp_call_function_many(const struct cpumask *mask, | |||
419 | data = &__get_cpu_var(cfd_data); | 402 | data = &__get_cpu_var(cfd_data); |
420 | csd_lock(&data->csd); | 403 | csd_lock(&data->csd); |
421 | 404 | ||
422 | spin_lock_irqsave(&data->lock, flags); | ||
423 | data->csd.func = func; | 405 | data->csd.func = func; |
424 | data->csd.info = info; | 406 | data->csd.info = info; |
425 | cpumask_and(data->cpumask, mask, cpu_online_mask); | 407 | cpumask_and(data->cpumask, mask, cpu_online_mask); |
426 | cpumask_clear_cpu(this_cpu, data->cpumask); | 408 | cpumask_clear_cpu(this_cpu, data->cpumask); |
427 | data->refs = cpumask_weight(data->cpumask); | 409 | atomic_set(&data->refs, cpumask_weight(data->cpumask)); |
428 | 410 | ||
429 | spin_lock(&call_function.lock); | 411 | spin_lock_irqsave(&call_function.lock, flags); |
430 | /* | 412 | /* |
431 | * Place entry at the _HEAD_ of the list, so that any cpu still | 413 | * Place entry at the _HEAD_ of the list, so that any cpu still |
432 | * observing the entry in generic_smp_call_function_interrupt() | 414 | * observing the entry in generic_smp_call_function_interrupt() |
433 | * will not miss any other list entries: | 415 | * will not miss any other list entries: |
434 | */ | 416 | */ |
435 | list_add_rcu(&data->csd.list, &call_function.queue); | 417 | list_add_rcu(&data->csd.list, &call_function.queue); |
436 | spin_unlock(&call_function.lock); | 418 | spin_unlock_irqrestore(&call_function.lock, flags); |
437 | |||
438 | spin_unlock_irqrestore(&data->lock, flags); | ||
439 | 419 | ||
440 | /* | 420 | /* |
441 | * Make the list addition visible before sending the ipi. | 421 | * Make the list addition visible before sending the ipi. |
diff --git a/kernel/softlockup.c b/kernel/softlockup.c index 88796c330838..81324d12eb35 100644 --- a/kernel/softlockup.c +++ b/kernel/softlockup.c | |||
@@ -90,11 +90,11 @@ void touch_all_softlockup_watchdogs(void) | |||
90 | EXPORT_SYMBOL(touch_all_softlockup_watchdogs); | 90 | EXPORT_SYMBOL(touch_all_softlockup_watchdogs); |
91 | 91 | ||
92 | int proc_dosoftlockup_thresh(struct ctl_table *table, int write, | 92 | int proc_dosoftlockup_thresh(struct ctl_table *table, int write, |
93 | struct file *filp, void __user *buffer, | 93 | void __user *buffer, |
94 | size_t *lenp, loff_t *ppos) | 94 | size_t *lenp, loff_t *ppos) |
95 | { | 95 | { |
96 | touch_all_softlockup_watchdogs(); | 96 | touch_all_softlockup_watchdogs(); |
97 | return proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos); | 97 | return proc_dointvec_minmax(table, write, buffer, lenp, ppos); |
98 | } | 98 | } |
99 | 99 | ||
100 | /* | 100 | /* |
diff --git a/kernel/sys.c b/kernel/sys.c index b3f1097c76fa..ce17760d9c51 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
@@ -14,7 +14,7 @@ | |||
14 | #include <linux/prctl.h> | 14 | #include <linux/prctl.h> |
15 | #include <linux/highuid.h> | 15 | #include <linux/highuid.h> |
16 | #include <linux/fs.h> | 16 | #include <linux/fs.h> |
17 | #include <linux/perf_counter.h> | 17 | #include <linux/perf_event.h> |
18 | #include <linux/resource.h> | 18 | #include <linux/resource.h> |
19 | #include <linux/kernel.h> | 19 | #include <linux/kernel.h> |
20 | #include <linux/kexec.h> | 20 | #include <linux/kexec.h> |
@@ -1110,6 +1110,8 @@ SYSCALL_DEFINE0(setsid) | |||
1110 | err = session; | 1110 | err = session; |
1111 | out: | 1111 | out: |
1112 | write_unlock_irq(&tasklist_lock); | 1112 | write_unlock_irq(&tasklist_lock); |
1113 | if (err > 0) | ||
1114 | proc_sid_connector(group_leader); | ||
1113 | return err; | 1115 | return err; |
1114 | } | 1116 | } |
1115 | 1117 | ||
@@ -1338,6 +1340,7 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r) | |||
1338 | unsigned long flags; | 1340 | unsigned long flags; |
1339 | cputime_t utime, stime; | 1341 | cputime_t utime, stime; |
1340 | struct task_cputime cputime; | 1342 | struct task_cputime cputime; |
1343 | unsigned long maxrss = 0; | ||
1341 | 1344 | ||
1342 | memset((char *) r, 0, sizeof *r); | 1345 | memset((char *) r, 0, sizeof *r); |
1343 | utime = stime = cputime_zero; | 1346 | utime = stime = cputime_zero; |
@@ -1346,6 +1349,7 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r) | |||
1346 | utime = task_utime(current); | 1349 | utime = task_utime(current); |
1347 | stime = task_stime(current); | 1350 | stime = task_stime(current); |
1348 | accumulate_thread_rusage(p, r); | 1351 | accumulate_thread_rusage(p, r); |
1352 | maxrss = p->signal->maxrss; | ||
1349 | goto out; | 1353 | goto out; |
1350 | } | 1354 | } |
1351 | 1355 | ||
@@ -1363,6 +1367,7 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r) | |||
1363 | r->ru_majflt = p->signal->cmaj_flt; | 1367 | r->ru_majflt = p->signal->cmaj_flt; |
1364 | r->ru_inblock = p->signal->cinblock; | 1368 | r->ru_inblock = p->signal->cinblock; |
1365 | r->ru_oublock = p->signal->coublock; | 1369 | r->ru_oublock = p->signal->coublock; |
1370 | maxrss = p->signal->cmaxrss; | ||
1366 | 1371 | ||
1367 | if (who == RUSAGE_CHILDREN) | 1372 | if (who == RUSAGE_CHILDREN) |
1368 | break; | 1373 | break; |
@@ -1377,6 +1382,8 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r) | |||
1377 | r->ru_majflt += p->signal->maj_flt; | 1382 | r->ru_majflt += p->signal->maj_flt; |
1378 | r->ru_inblock += p->signal->inblock; | 1383 | r->ru_inblock += p->signal->inblock; |
1379 | r->ru_oublock += p->signal->oublock; | 1384 | r->ru_oublock += p->signal->oublock; |
1385 | if (maxrss < p->signal->maxrss) | ||
1386 | maxrss = p->signal->maxrss; | ||
1380 | t = p; | 1387 | t = p; |
1381 | do { | 1388 | do { |
1382 | accumulate_thread_rusage(t, r); | 1389 | accumulate_thread_rusage(t, r); |
@@ -1392,6 +1399,15 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r) | |||
1392 | out: | 1399 | out: |
1393 | cputime_to_timeval(utime, &r->ru_utime); | 1400 | cputime_to_timeval(utime, &r->ru_utime); |
1394 | cputime_to_timeval(stime, &r->ru_stime); | 1401 | cputime_to_timeval(stime, &r->ru_stime); |
1402 | |||
1403 | if (who != RUSAGE_CHILDREN) { | ||
1404 | struct mm_struct *mm = get_task_mm(p); | ||
1405 | if (mm) { | ||
1406 | setmax_mm_hiwater_rss(&maxrss, mm); | ||
1407 | mmput(mm); | ||
1408 | } | ||
1409 | } | ||
1410 | r->ru_maxrss = maxrss * (PAGE_SIZE / 1024); /* convert pages to KBs */ | ||
1395 | } | 1411 | } |
1396 | 1412 | ||
1397 | int getrusage(struct task_struct *p, int who, struct rusage __user *ru) | 1413 | int getrusage(struct task_struct *p, int who, struct rusage __user *ru) |
@@ -1511,11 +1527,11 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, | |||
1511 | case PR_SET_TSC: | 1527 | case PR_SET_TSC: |
1512 | error = SET_TSC_CTL(arg2); | 1528 | error = SET_TSC_CTL(arg2); |
1513 | break; | 1529 | break; |
1514 | case PR_TASK_PERF_COUNTERS_DISABLE: | 1530 | case PR_TASK_PERF_EVENTS_DISABLE: |
1515 | error = perf_counter_task_disable(); | 1531 | error = perf_event_task_disable(); |
1516 | break; | 1532 | break; |
1517 | case PR_TASK_PERF_COUNTERS_ENABLE: | 1533 | case PR_TASK_PERF_EVENTS_ENABLE: |
1518 | error = perf_counter_task_enable(); | 1534 | error = perf_event_task_enable(); |
1519 | break; | 1535 | break; |
1520 | case PR_GET_TIMERSLACK: | 1536 | case PR_GET_TIMERSLACK: |
1521 | error = current->timer_slack_ns; | 1537 | error = current->timer_slack_ns; |
@@ -1528,6 +1544,41 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, | |||
1528 | current->timer_slack_ns = arg2; | 1544 | current->timer_slack_ns = arg2; |
1529 | error = 0; | 1545 | error = 0; |
1530 | break; | 1546 | break; |
1547 | case PR_MCE_KILL: | ||
1548 | if (arg4 | arg5) | ||
1549 | return -EINVAL; | ||
1550 | switch (arg2) { | ||
1551 | case PR_MCE_KILL_CLEAR: | ||
1552 | if (arg3 != 0) | ||
1553 | return -EINVAL; | ||
1554 | current->flags &= ~PF_MCE_PROCESS; | ||
1555 | break; | ||
1556 | case PR_MCE_KILL_SET: | ||
1557 | current->flags |= PF_MCE_PROCESS; | ||
1558 | if (arg3 == PR_MCE_KILL_EARLY) | ||
1559 | current->flags |= PF_MCE_EARLY; | ||
1560 | else if (arg3 == PR_MCE_KILL_LATE) | ||
1561 | current->flags &= ~PF_MCE_EARLY; | ||
1562 | else if (arg3 == PR_MCE_KILL_DEFAULT) | ||
1563 | current->flags &= | ||
1564 | ~(PF_MCE_EARLY|PF_MCE_PROCESS); | ||
1565 | else | ||
1566 | return -EINVAL; | ||
1567 | break; | ||
1568 | default: | ||
1569 | return -EINVAL; | ||
1570 | } | ||
1571 | error = 0; | ||
1572 | break; | ||
1573 | case PR_MCE_KILL_GET: | ||
1574 | if (arg2 | arg3 | arg4 | arg5) | ||
1575 | return -EINVAL; | ||
1576 | if (current->flags & PF_MCE_PROCESS) | ||
1577 | error = (current->flags & PF_MCE_EARLY) ? | ||
1578 | PR_MCE_KILL_EARLY : PR_MCE_KILL_LATE; | ||
1579 | else | ||
1580 | error = PR_MCE_KILL_DEFAULT; | ||
1581 | break; | ||
1531 | default: | 1582 | default: |
1532 | error = -EINVAL; | 1583 | error = -EINVAL; |
1533 | break; | 1584 | break; |
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c index 68320f6b07b5..e06d0b8d1951 100644 --- a/kernel/sys_ni.c +++ b/kernel/sys_ni.c | |||
@@ -49,6 +49,7 @@ cond_syscall(sys_sendmsg); | |||
49 | cond_syscall(compat_sys_sendmsg); | 49 | cond_syscall(compat_sys_sendmsg); |
50 | cond_syscall(sys_recvmsg); | 50 | cond_syscall(sys_recvmsg); |
51 | cond_syscall(compat_sys_recvmsg); | 51 | cond_syscall(compat_sys_recvmsg); |
52 | cond_syscall(compat_sys_recvfrom); | ||
52 | cond_syscall(sys_socketcall); | 53 | cond_syscall(sys_socketcall); |
53 | cond_syscall(sys_futex); | 54 | cond_syscall(sys_futex); |
54 | cond_syscall(compat_sys_futex); | 55 | cond_syscall(compat_sys_futex); |
@@ -177,4 +178,4 @@ cond_syscall(sys_eventfd); | |||
177 | cond_syscall(sys_eventfd2); | 178 | cond_syscall(sys_eventfd2); |
178 | 179 | ||
179 | /* performance counters: */ | 180 | /* performance counters: */ |
180 | cond_syscall(sys_perf_counter_open); | 181 | cond_syscall(sys_perf_event_open); |
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 1a631ba684a4..0d949c517412 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
@@ -26,7 +26,6 @@ | |||
26 | #include <linux/proc_fs.h> | 26 | #include <linux/proc_fs.h> |
27 | #include <linux/security.h> | 27 | #include <linux/security.h> |
28 | #include <linux/ctype.h> | 28 | #include <linux/ctype.h> |
29 | #include <linux/utsname.h> | ||
30 | #include <linux/kmemcheck.h> | 29 | #include <linux/kmemcheck.h> |
31 | #include <linux/smp_lock.h> | 30 | #include <linux/smp_lock.h> |
32 | #include <linux/fs.h> | 31 | #include <linux/fs.h> |
@@ -50,7 +49,7 @@ | |||
50 | #include <linux/reboot.h> | 49 | #include <linux/reboot.h> |
51 | #include <linux/ftrace.h> | 50 | #include <linux/ftrace.h> |
52 | #include <linux/slow-work.h> | 51 | #include <linux/slow-work.h> |
53 | #include <linux/perf_counter.h> | 52 | #include <linux/perf_event.h> |
54 | 53 | ||
55 | #include <asm/uaccess.h> | 54 | #include <asm/uaccess.h> |
56 | #include <asm/processor.h> | 55 | #include <asm/processor.h> |
@@ -77,6 +76,7 @@ extern int max_threads; | |||
77 | extern int core_uses_pid; | 76 | extern int core_uses_pid; |
78 | extern int suid_dumpable; | 77 | extern int suid_dumpable; |
79 | extern char core_pattern[]; | 78 | extern char core_pattern[]; |
79 | extern unsigned int core_pipe_limit; | ||
80 | extern int pid_max; | 80 | extern int pid_max; |
81 | extern int min_free_kbytes; | 81 | extern int min_free_kbytes; |
82 | extern int pid_max_min, pid_max_max; | 82 | extern int pid_max_min, pid_max_max; |
@@ -106,6 +106,9 @@ static int __maybe_unused one = 1; | |||
106 | static int __maybe_unused two = 2; | 106 | static int __maybe_unused two = 2; |
107 | static unsigned long one_ul = 1; | 107 | static unsigned long one_ul = 1; |
108 | static int one_hundred = 100; | 108 | static int one_hundred = 100; |
109 | #ifdef CONFIG_PRINTK | ||
110 | static int ten_thousand = 10000; | ||
111 | #endif | ||
109 | 112 | ||
110 | /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */ | 113 | /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */ |
111 | static unsigned long dirty_bytes_min = 2 * PAGE_SIZE; | 114 | static unsigned long dirty_bytes_min = 2 * PAGE_SIZE; |
@@ -160,9 +163,9 @@ extern int max_lock_depth; | |||
160 | #endif | 163 | #endif |
161 | 164 | ||
162 | #ifdef CONFIG_PROC_SYSCTL | 165 | #ifdef CONFIG_PROC_SYSCTL |
163 | static int proc_do_cad_pid(struct ctl_table *table, int write, struct file *filp, | 166 | static int proc_do_cad_pid(struct ctl_table *table, int write, |
164 | void __user *buffer, size_t *lenp, loff_t *ppos); | 167 | void __user *buffer, size_t *lenp, loff_t *ppos); |
165 | static int proc_taint(struct ctl_table *table, int write, struct file *filp, | 168 | static int proc_taint(struct ctl_table *table, int write, |
166 | void __user *buffer, size_t *lenp, loff_t *ppos); | 169 | void __user *buffer, size_t *lenp, loff_t *ppos); |
167 | #endif | 170 | #endif |
168 | 171 | ||
@@ -421,6 +424,14 @@ static struct ctl_table kern_table[] = { | |||
421 | .proc_handler = &proc_dostring, | 424 | .proc_handler = &proc_dostring, |
422 | .strategy = &sysctl_string, | 425 | .strategy = &sysctl_string, |
423 | }, | 426 | }, |
427 | { | ||
428 | .ctl_name = CTL_UNNUMBERED, | ||
429 | .procname = "core_pipe_limit", | ||
430 | .data = &core_pipe_limit, | ||
431 | .maxlen = sizeof(unsigned int), | ||
432 | .mode = 0644, | ||
433 | .proc_handler = &proc_dointvec, | ||
434 | }, | ||
424 | #ifdef CONFIG_PROC_SYSCTL | 435 | #ifdef CONFIG_PROC_SYSCTL |
425 | { | 436 | { |
426 | .procname = "tainted", | 437 | .procname = "tainted", |
@@ -722,6 +733,17 @@ static struct ctl_table kern_table[] = { | |||
722 | .mode = 0644, | 733 | .mode = 0644, |
723 | .proc_handler = &proc_dointvec, | 734 | .proc_handler = &proc_dointvec, |
724 | }, | 735 | }, |
736 | { | ||
737 | .ctl_name = CTL_UNNUMBERED, | ||
738 | .procname = "printk_delay", | ||
739 | .data = &printk_delay_msec, | ||
740 | .maxlen = sizeof(int), | ||
741 | .mode = 0644, | ||
742 | .proc_handler = &proc_dointvec_minmax, | ||
743 | .strategy = &sysctl_intvec, | ||
744 | .extra1 = &zero, | ||
745 | .extra2 = &ten_thousand, | ||
746 | }, | ||
725 | #endif | 747 | #endif |
726 | { | 748 | { |
727 | .ctl_name = KERN_NGROUPS_MAX, | 749 | .ctl_name = KERN_NGROUPS_MAX, |
@@ -964,28 +986,28 @@ static struct ctl_table kern_table[] = { | |||
964 | .child = slow_work_sysctls, | 986 | .child = slow_work_sysctls, |
965 | }, | 987 | }, |
966 | #endif | 988 | #endif |
967 | #ifdef CONFIG_PERF_COUNTERS | 989 | #ifdef CONFIG_PERF_EVENTS |
968 | { | 990 | { |
969 | .ctl_name = CTL_UNNUMBERED, | 991 | .ctl_name = CTL_UNNUMBERED, |
970 | .procname = "perf_counter_paranoid", | 992 | .procname = "perf_event_paranoid", |
971 | .data = &sysctl_perf_counter_paranoid, | 993 | .data = &sysctl_perf_event_paranoid, |
972 | .maxlen = sizeof(sysctl_perf_counter_paranoid), | 994 | .maxlen = sizeof(sysctl_perf_event_paranoid), |
973 | .mode = 0644, | 995 | .mode = 0644, |
974 | .proc_handler = &proc_dointvec, | 996 | .proc_handler = &proc_dointvec, |
975 | }, | 997 | }, |
976 | { | 998 | { |
977 | .ctl_name = CTL_UNNUMBERED, | 999 | .ctl_name = CTL_UNNUMBERED, |
978 | .procname = "perf_counter_mlock_kb", | 1000 | .procname = "perf_event_mlock_kb", |
979 | .data = &sysctl_perf_counter_mlock, | 1001 | .data = &sysctl_perf_event_mlock, |
980 | .maxlen = sizeof(sysctl_perf_counter_mlock), | 1002 | .maxlen = sizeof(sysctl_perf_event_mlock), |
981 | .mode = 0644, | 1003 | .mode = 0644, |
982 | .proc_handler = &proc_dointvec, | 1004 | .proc_handler = &proc_dointvec, |
983 | }, | 1005 | }, |
984 | { | 1006 | { |
985 | .ctl_name = CTL_UNNUMBERED, | 1007 | .ctl_name = CTL_UNNUMBERED, |
986 | .procname = "perf_counter_max_sample_rate", | 1008 | .procname = "perf_event_max_sample_rate", |
987 | .data = &sysctl_perf_counter_sample_rate, | 1009 | .data = &sysctl_perf_event_sample_rate, |
988 | .maxlen = sizeof(sysctl_perf_counter_sample_rate), | 1010 | .maxlen = sizeof(sysctl_perf_event_sample_rate), |
989 | .mode = 0644, | 1011 | .mode = 0644, |
990 | .proc_handler = &proc_dointvec, | 1012 | .proc_handler = &proc_dointvec, |
991 | }, | 1013 | }, |
@@ -1376,6 +1398,31 @@ static struct ctl_table vm_table[] = { | |||
1376 | .mode = 0644, | 1398 | .mode = 0644, |
1377 | .proc_handler = &scan_unevictable_handler, | 1399 | .proc_handler = &scan_unevictable_handler, |
1378 | }, | 1400 | }, |
1401 | #ifdef CONFIG_MEMORY_FAILURE | ||
1402 | { | ||
1403 | .ctl_name = CTL_UNNUMBERED, | ||
1404 | .procname = "memory_failure_early_kill", | ||
1405 | .data = &sysctl_memory_failure_early_kill, | ||
1406 | .maxlen = sizeof(sysctl_memory_failure_early_kill), | ||
1407 | .mode = 0644, | ||
1408 | .proc_handler = &proc_dointvec_minmax, | ||
1409 | .strategy = &sysctl_intvec, | ||
1410 | .extra1 = &zero, | ||
1411 | .extra2 = &one, | ||
1412 | }, | ||
1413 | { | ||
1414 | .ctl_name = CTL_UNNUMBERED, | ||
1415 | .procname = "memory_failure_recovery", | ||
1416 | .data = &sysctl_memory_failure_recovery, | ||
1417 | .maxlen = sizeof(sysctl_memory_failure_recovery), | ||
1418 | .mode = 0644, | ||
1419 | .proc_handler = &proc_dointvec_minmax, | ||
1420 | .strategy = &sysctl_intvec, | ||
1421 | .extra1 = &zero, | ||
1422 | .extra2 = &one, | ||
1423 | }, | ||
1424 | #endif | ||
1425 | |||
1379 | /* | 1426 | /* |
1380 | * NOTE: do not add new entries to this table unless you have read | 1427 | * NOTE: do not add new entries to this table unless you have read |
1381 | * Documentation/sysctl/ctl_unnumbered.txt | 1428 | * Documentation/sysctl/ctl_unnumbered.txt |
@@ -2204,7 +2251,7 @@ void sysctl_head_put(struct ctl_table_header *head) | |||
2204 | #ifdef CONFIG_PROC_SYSCTL | 2251 | #ifdef CONFIG_PROC_SYSCTL |
2205 | 2252 | ||
2206 | static int _proc_do_string(void* data, int maxlen, int write, | 2253 | static int _proc_do_string(void* data, int maxlen, int write, |
2207 | struct file *filp, void __user *buffer, | 2254 | void __user *buffer, |
2208 | size_t *lenp, loff_t *ppos) | 2255 | size_t *lenp, loff_t *ppos) |
2209 | { | 2256 | { |
2210 | size_t len; | 2257 | size_t len; |
@@ -2265,7 +2312,6 @@ static int _proc_do_string(void* data, int maxlen, int write, | |||
2265 | * proc_dostring - read a string sysctl | 2312 | * proc_dostring - read a string sysctl |
2266 | * @table: the sysctl table | 2313 | * @table: the sysctl table |
2267 | * @write: %TRUE if this is a write to the sysctl file | 2314 | * @write: %TRUE if this is a write to the sysctl file |
2268 | * @filp: the file structure | ||
2269 | * @buffer: the user buffer | 2315 | * @buffer: the user buffer |
2270 | * @lenp: the size of the user buffer | 2316 | * @lenp: the size of the user buffer |
2271 | * @ppos: file position | 2317 | * @ppos: file position |
@@ -2279,10 +2325,10 @@ static int _proc_do_string(void* data, int maxlen, int write, | |||
2279 | * | 2325 | * |
2280 | * Returns 0 on success. | 2326 | * Returns 0 on success. |
2281 | */ | 2327 | */ |
2282 | int proc_dostring(struct ctl_table *table, int write, struct file *filp, | 2328 | int proc_dostring(struct ctl_table *table, int write, |
2283 | void __user *buffer, size_t *lenp, loff_t *ppos) | 2329 | void __user *buffer, size_t *lenp, loff_t *ppos) |
2284 | { | 2330 | { |
2285 | return _proc_do_string(table->data, table->maxlen, write, filp, | 2331 | return _proc_do_string(table->data, table->maxlen, write, |
2286 | buffer, lenp, ppos); | 2332 | buffer, lenp, ppos); |
2287 | } | 2333 | } |
2288 | 2334 | ||
@@ -2307,7 +2353,7 @@ static int do_proc_dointvec_conv(int *negp, unsigned long *lvalp, | |||
2307 | } | 2353 | } |
2308 | 2354 | ||
2309 | static int __do_proc_dointvec(void *tbl_data, struct ctl_table *table, | 2355 | static int __do_proc_dointvec(void *tbl_data, struct ctl_table *table, |
2310 | int write, struct file *filp, void __user *buffer, | 2356 | int write, void __user *buffer, |
2311 | size_t *lenp, loff_t *ppos, | 2357 | size_t *lenp, loff_t *ppos, |
2312 | int (*conv)(int *negp, unsigned long *lvalp, int *valp, | 2358 | int (*conv)(int *negp, unsigned long *lvalp, int *valp, |
2313 | int write, void *data), | 2359 | int write, void *data), |
@@ -2414,13 +2460,13 @@ static int __do_proc_dointvec(void *tbl_data, struct ctl_table *table, | |||
2414 | #undef TMPBUFLEN | 2460 | #undef TMPBUFLEN |
2415 | } | 2461 | } |
2416 | 2462 | ||
2417 | static int do_proc_dointvec(struct ctl_table *table, int write, struct file *filp, | 2463 | static int do_proc_dointvec(struct ctl_table *table, int write, |
2418 | void __user *buffer, size_t *lenp, loff_t *ppos, | 2464 | void __user *buffer, size_t *lenp, loff_t *ppos, |
2419 | int (*conv)(int *negp, unsigned long *lvalp, int *valp, | 2465 | int (*conv)(int *negp, unsigned long *lvalp, int *valp, |
2420 | int write, void *data), | 2466 | int write, void *data), |
2421 | void *data) | 2467 | void *data) |
2422 | { | 2468 | { |
2423 | return __do_proc_dointvec(table->data, table, write, filp, | 2469 | return __do_proc_dointvec(table->data, table, write, |
2424 | buffer, lenp, ppos, conv, data); | 2470 | buffer, lenp, ppos, conv, data); |
2425 | } | 2471 | } |
2426 | 2472 | ||
@@ -2428,7 +2474,6 @@ static int do_proc_dointvec(struct ctl_table *table, int write, struct file *fil | |||
2428 | * proc_dointvec - read a vector of integers | 2474 | * proc_dointvec - read a vector of integers |
2429 | * @table: the sysctl table | 2475 | * @table: the sysctl table |
2430 | * @write: %TRUE if this is a write to the sysctl file | 2476 | * @write: %TRUE if this is a write to the sysctl file |
2431 | * @filp: the file structure | ||
2432 | * @buffer: the user buffer | 2477 | * @buffer: the user buffer |
2433 | * @lenp: the size of the user buffer | 2478 | * @lenp: the size of the user buffer |
2434 | * @ppos: file position | 2479 | * @ppos: file position |
@@ -2438,10 +2483,10 @@ static int do_proc_dointvec(struct ctl_table *table, int write, struct file *fil | |||
2438 | * | 2483 | * |
2439 | * Returns 0 on success. | 2484 | * Returns 0 on success. |
2440 | */ | 2485 | */ |
2441 | int proc_dointvec(struct ctl_table *table, int write, struct file *filp, | 2486 | int proc_dointvec(struct ctl_table *table, int write, |
2442 | void __user *buffer, size_t *lenp, loff_t *ppos) | 2487 | void __user *buffer, size_t *lenp, loff_t *ppos) |
2443 | { | 2488 | { |
2444 | return do_proc_dointvec(table,write,filp,buffer,lenp,ppos, | 2489 | return do_proc_dointvec(table,write,buffer,lenp,ppos, |
2445 | NULL,NULL); | 2490 | NULL,NULL); |
2446 | } | 2491 | } |
2447 | 2492 | ||
@@ -2449,7 +2494,7 @@ int proc_dointvec(struct ctl_table *table, int write, struct file *filp, | |||
2449 | * Taint values can only be increased | 2494 | * Taint values can only be increased |
2450 | * This means we can safely use a temporary. | 2495 | * This means we can safely use a temporary. |
2451 | */ | 2496 | */ |
2452 | static int proc_taint(struct ctl_table *table, int write, struct file *filp, | 2497 | static int proc_taint(struct ctl_table *table, int write, |
2453 | void __user *buffer, size_t *lenp, loff_t *ppos) | 2498 | void __user *buffer, size_t *lenp, loff_t *ppos) |
2454 | { | 2499 | { |
2455 | struct ctl_table t; | 2500 | struct ctl_table t; |
@@ -2461,7 +2506,7 @@ static int proc_taint(struct ctl_table *table, int write, struct file *filp, | |||
2461 | 2506 | ||
2462 | t = *table; | 2507 | t = *table; |
2463 | t.data = &tmptaint; | 2508 | t.data = &tmptaint; |
2464 | err = proc_doulongvec_minmax(&t, write, filp, buffer, lenp, ppos); | 2509 | err = proc_doulongvec_minmax(&t, write, buffer, lenp, ppos); |
2465 | if (err < 0) | 2510 | if (err < 0) |
2466 | return err; | 2511 | return err; |
2467 | 2512 | ||
@@ -2513,7 +2558,6 @@ static int do_proc_dointvec_minmax_conv(int *negp, unsigned long *lvalp, | |||
2513 | * proc_dointvec_minmax - read a vector of integers with min/max values | 2558 | * proc_dointvec_minmax - read a vector of integers with min/max values |
2514 | * @table: the sysctl table | 2559 | * @table: the sysctl table |
2515 | * @write: %TRUE if this is a write to the sysctl file | 2560 | * @write: %TRUE if this is a write to the sysctl file |
2516 | * @filp: the file structure | ||
2517 | * @buffer: the user buffer | 2561 | * @buffer: the user buffer |
2518 | * @lenp: the size of the user buffer | 2562 | * @lenp: the size of the user buffer |
2519 | * @ppos: file position | 2563 | * @ppos: file position |
@@ -2526,19 +2570,18 @@ static int do_proc_dointvec_minmax_conv(int *negp, unsigned long *lvalp, | |||
2526 | * | 2570 | * |
2527 | * Returns 0 on success. | 2571 | * Returns 0 on success. |
2528 | */ | 2572 | */ |
2529 | int proc_dointvec_minmax(struct ctl_table *table, int write, struct file *filp, | 2573 | int proc_dointvec_minmax(struct ctl_table *table, int write, |
2530 | void __user *buffer, size_t *lenp, loff_t *ppos) | 2574 | void __user *buffer, size_t *lenp, loff_t *ppos) |
2531 | { | 2575 | { |
2532 | struct do_proc_dointvec_minmax_conv_param param = { | 2576 | struct do_proc_dointvec_minmax_conv_param param = { |
2533 | .min = (int *) table->extra1, | 2577 | .min = (int *) table->extra1, |
2534 | .max = (int *) table->extra2, | 2578 | .max = (int *) table->extra2, |
2535 | }; | 2579 | }; |
2536 | return do_proc_dointvec(table, write, filp, buffer, lenp, ppos, | 2580 | return do_proc_dointvec(table, write, buffer, lenp, ppos, |
2537 | do_proc_dointvec_minmax_conv, ¶m); | 2581 | do_proc_dointvec_minmax_conv, ¶m); |
2538 | } | 2582 | } |
2539 | 2583 | ||
2540 | static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int write, | 2584 | static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int write, |
2541 | struct file *filp, | ||
2542 | void __user *buffer, | 2585 | void __user *buffer, |
2543 | size_t *lenp, loff_t *ppos, | 2586 | size_t *lenp, loff_t *ppos, |
2544 | unsigned long convmul, | 2587 | unsigned long convmul, |
@@ -2643,21 +2686,19 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int | |||
2643 | } | 2686 | } |
2644 | 2687 | ||
2645 | static int do_proc_doulongvec_minmax(struct ctl_table *table, int write, | 2688 | static int do_proc_doulongvec_minmax(struct ctl_table *table, int write, |
2646 | struct file *filp, | ||
2647 | void __user *buffer, | 2689 | void __user *buffer, |
2648 | size_t *lenp, loff_t *ppos, | 2690 | size_t *lenp, loff_t *ppos, |
2649 | unsigned long convmul, | 2691 | unsigned long convmul, |
2650 | unsigned long convdiv) | 2692 | unsigned long convdiv) |
2651 | { | 2693 | { |
2652 | return __do_proc_doulongvec_minmax(table->data, table, write, | 2694 | return __do_proc_doulongvec_minmax(table->data, table, write, |
2653 | filp, buffer, lenp, ppos, convmul, convdiv); | 2695 | buffer, lenp, ppos, convmul, convdiv); |
2654 | } | 2696 | } |
2655 | 2697 | ||
2656 | /** | 2698 | /** |
2657 | * proc_doulongvec_minmax - read a vector of long integers with min/max values | 2699 | * proc_doulongvec_minmax - read a vector of long integers with min/max values |
2658 | * @table: the sysctl table | 2700 | * @table: the sysctl table |
2659 | * @write: %TRUE if this is a write to the sysctl file | 2701 | * @write: %TRUE if this is a write to the sysctl file |
2660 | * @filp: the file structure | ||
2661 | * @buffer: the user buffer | 2702 | * @buffer: the user buffer |
2662 | * @lenp: the size of the user buffer | 2703 | * @lenp: the size of the user buffer |
2663 | * @ppos: file position | 2704 | * @ppos: file position |
@@ -2670,17 +2711,16 @@ static int do_proc_doulongvec_minmax(struct ctl_table *table, int write, | |||
2670 | * | 2711 | * |
2671 | * Returns 0 on success. | 2712 | * Returns 0 on success. |
2672 | */ | 2713 | */ |
2673 | int proc_doulongvec_minmax(struct ctl_table *table, int write, struct file *filp, | 2714 | int proc_doulongvec_minmax(struct ctl_table *table, int write, |
2674 | void __user *buffer, size_t *lenp, loff_t *ppos) | 2715 | void __user *buffer, size_t *lenp, loff_t *ppos) |
2675 | { | 2716 | { |
2676 | return do_proc_doulongvec_minmax(table, write, filp, buffer, lenp, ppos, 1l, 1l); | 2717 | return do_proc_doulongvec_minmax(table, write, buffer, lenp, ppos, 1l, 1l); |
2677 | } | 2718 | } |
2678 | 2719 | ||
2679 | /** | 2720 | /** |
2680 | * proc_doulongvec_ms_jiffies_minmax - read a vector of millisecond values with min/max values | 2721 | * proc_doulongvec_ms_jiffies_minmax - read a vector of millisecond values with min/max values |
2681 | * @table: the sysctl table | 2722 | * @table: the sysctl table |
2682 | * @write: %TRUE if this is a write to the sysctl file | 2723 | * @write: %TRUE if this is a write to the sysctl file |
2683 | * @filp: the file structure | ||
2684 | * @buffer: the user buffer | 2724 | * @buffer: the user buffer |
2685 | * @lenp: the size of the user buffer | 2725 | * @lenp: the size of the user buffer |
2686 | * @ppos: file position | 2726 | * @ppos: file position |
@@ -2695,11 +2735,10 @@ int proc_doulongvec_minmax(struct ctl_table *table, int write, struct file *filp | |||
2695 | * Returns 0 on success. | 2735 | * Returns 0 on success. |
2696 | */ | 2736 | */ |
2697 | int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int write, | 2737 | int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int write, |
2698 | struct file *filp, | ||
2699 | void __user *buffer, | 2738 | void __user *buffer, |
2700 | size_t *lenp, loff_t *ppos) | 2739 | size_t *lenp, loff_t *ppos) |
2701 | { | 2740 | { |
2702 | return do_proc_doulongvec_minmax(table, write, filp, buffer, | 2741 | return do_proc_doulongvec_minmax(table, write, buffer, |
2703 | lenp, ppos, HZ, 1000l); | 2742 | lenp, ppos, HZ, 1000l); |
2704 | } | 2743 | } |
2705 | 2744 | ||
@@ -2775,7 +2814,6 @@ static int do_proc_dointvec_ms_jiffies_conv(int *negp, unsigned long *lvalp, | |||
2775 | * proc_dointvec_jiffies - read a vector of integers as seconds | 2814 | * proc_dointvec_jiffies - read a vector of integers as seconds |
2776 | * @table: the sysctl table | 2815 | * @table: the sysctl table |
2777 | * @write: %TRUE if this is a write to the sysctl file | 2816 | * @write: %TRUE if this is a write to the sysctl file |
2778 | * @filp: the file structure | ||
2779 | * @buffer: the user buffer | 2817 | * @buffer: the user buffer |
2780 | * @lenp: the size of the user buffer | 2818 | * @lenp: the size of the user buffer |
2781 | * @ppos: file position | 2819 | * @ppos: file position |
@@ -2787,10 +2825,10 @@ static int do_proc_dointvec_ms_jiffies_conv(int *negp, unsigned long *lvalp, | |||
2787 | * | 2825 | * |
2788 | * Returns 0 on success. | 2826 | * Returns 0 on success. |
2789 | */ | 2827 | */ |
2790 | int proc_dointvec_jiffies(struct ctl_table *table, int write, struct file *filp, | 2828 | int proc_dointvec_jiffies(struct ctl_table *table, int write, |
2791 | void __user *buffer, size_t *lenp, loff_t *ppos) | 2829 | void __user *buffer, size_t *lenp, loff_t *ppos) |
2792 | { | 2830 | { |
2793 | return do_proc_dointvec(table,write,filp,buffer,lenp,ppos, | 2831 | return do_proc_dointvec(table,write,buffer,lenp,ppos, |
2794 | do_proc_dointvec_jiffies_conv,NULL); | 2832 | do_proc_dointvec_jiffies_conv,NULL); |
2795 | } | 2833 | } |
2796 | 2834 | ||
@@ -2798,7 +2836,6 @@ int proc_dointvec_jiffies(struct ctl_table *table, int write, struct file *filp, | |||
2798 | * proc_dointvec_userhz_jiffies - read a vector of integers as 1/USER_HZ seconds | 2836 | * proc_dointvec_userhz_jiffies - read a vector of integers as 1/USER_HZ seconds |
2799 | * @table: the sysctl table | 2837 | * @table: the sysctl table |
2800 | * @write: %TRUE if this is a write to the sysctl file | 2838 | * @write: %TRUE if this is a write to the sysctl file |
2801 | * @filp: the file structure | ||
2802 | * @buffer: the user buffer | 2839 | * @buffer: the user buffer |
2803 | * @lenp: the size of the user buffer | 2840 | * @lenp: the size of the user buffer |
2804 | * @ppos: pointer to the file position | 2841 | * @ppos: pointer to the file position |
@@ -2810,10 +2847,10 @@ int proc_dointvec_jiffies(struct ctl_table *table, int write, struct file *filp, | |||
2810 | * | 2847 | * |
2811 | * Returns 0 on success. | 2848 | * Returns 0 on success. |
2812 | */ | 2849 | */ |
2813 | int proc_dointvec_userhz_jiffies(struct ctl_table *table, int write, struct file *filp, | 2850 | int proc_dointvec_userhz_jiffies(struct ctl_table *table, int write, |
2814 | void __user *buffer, size_t *lenp, loff_t *ppos) | 2851 | void __user *buffer, size_t *lenp, loff_t *ppos) |
2815 | { | 2852 | { |
2816 | return do_proc_dointvec(table,write,filp,buffer,lenp,ppos, | 2853 | return do_proc_dointvec(table,write,buffer,lenp,ppos, |
2817 | do_proc_dointvec_userhz_jiffies_conv,NULL); | 2854 | do_proc_dointvec_userhz_jiffies_conv,NULL); |
2818 | } | 2855 | } |
2819 | 2856 | ||
@@ -2821,7 +2858,6 @@ int proc_dointvec_userhz_jiffies(struct ctl_table *table, int write, struct file | |||
2821 | * proc_dointvec_ms_jiffies - read a vector of integers as 1 milliseconds | 2858 | * proc_dointvec_ms_jiffies - read a vector of integers as 1 milliseconds |
2822 | * @table: the sysctl table | 2859 | * @table: the sysctl table |
2823 | * @write: %TRUE if this is a write to the sysctl file | 2860 | * @write: %TRUE if this is a write to the sysctl file |
2824 | * @filp: the file structure | ||
2825 | * @buffer: the user buffer | 2861 | * @buffer: the user buffer |
2826 | * @lenp: the size of the user buffer | 2862 | * @lenp: the size of the user buffer |
2827 | * @ppos: file position | 2863 | * @ppos: file position |
@@ -2834,14 +2870,14 @@ int proc_dointvec_userhz_jiffies(struct ctl_table *table, int write, struct file | |||
2834 | * | 2870 | * |
2835 | * Returns 0 on success. | 2871 | * Returns 0 on success. |
2836 | */ | 2872 | */ |
2837 | int proc_dointvec_ms_jiffies(struct ctl_table *table, int write, struct file *filp, | 2873 | int proc_dointvec_ms_jiffies(struct ctl_table *table, int write, |
2838 | void __user *buffer, size_t *lenp, loff_t *ppos) | 2874 | void __user *buffer, size_t *lenp, loff_t *ppos) |
2839 | { | 2875 | { |
2840 | return do_proc_dointvec(table, write, filp, buffer, lenp, ppos, | 2876 | return do_proc_dointvec(table, write, buffer, lenp, ppos, |
2841 | do_proc_dointvec_ms_jiffies_conv, NULL); | 2877 | do_proc_dointvec_ms_jiffies_conv, NULL); |
2842 | } | 2878 | } |
2843 | 2879 | ||
2844 | static int proc_do_cad_pid(struct ctl_table *table, int write, struct file *filp, | 2880 | static int proc_do_cad_pid(struct ctl_table *table, int write, |
2845 | void __user *buffer, size_t *lenp, loff_t *ppos) | 2881 | void __user *buffer, size_t *lenp, loff_t *ppos) |
2846 | { | 2882 | { |
2847 | struct pid *new_pid; | 2883 | struct pid *new_pid; |
@@ -2850,7 +2886,7 @@ static int proc_do_cad_pid(struct ctl_table *table, int write, struct file *filp | |||
2850 | 2886 | ||
2851 | tmp = pid_vnr(cad_pid); | 2887 | tmp = pid_vnr(cad_pid); |
2852 | 2888 | ||
2853 | r = __do_proc_dointvec(&tmp, table, write, filp, buffer, | 2889 | r = __do_proc_dointvec(&tmp, table, write, buffer, |
2854 | lenp, ppos, NULL, NULL); | 2890 | lenp, ppos, NULL, NULL); |
2855 | if (r || !write) | 2891 | if (r || !write) |
2856 | return r; | 2892 | return r; |
@@ -2865,50 +2901,49 @@ static int proc_do_cad_pid(struct ctl_table *table, int write, struct file *filp | |||
2865 | 2901 | ||
2866 | #else /* CONFIG_PROC_FS */ | 2902 | #else /* CONFIG_PROC_FS */ |
2867 | 2903 | ||
2868 | int proc_dostring(struct ctl_table *table, int write, struct file *filp, | 2904 | int proc_dostring(struct ctl_table *table, int write, |
2869 | void __user *buffer, size_t *lenp, loff_t *ppos) | 2905 | void __user *buffer, size_t *lenp, loff_t *ppos) |
2870 | { | 2906 | { |
2871 | return -ENOSYS; | 2907 | return -ENOSYS; |
2872 | } | 2908 | } |
2873 | 2909 | ||
2874 | int proc_dointvec(struct ctl_table *table, int write, struct file *filp, | 2910 | int proc_dointvec(struct ctl_table *table, int write, |
2875 | void __user *buffer, size_t *lenp, loff_t *ppos) | 2911 | void __user *buffer, size_t *lenp, loff_t *ppos) |
2876 | { | 2912 | { |
2877 | return -ENOSYS; | 2913 | return -ENOSYS; |
2878 | } | 2914 | } |
2879 | 2915 | ||
2880 | int proc_dointvec_minmax(struct ctl_table *table, int write, struct file *filp, | 2916 | int proc_dointvec_minmax(struct ctl_table *table, int write, |
2881 | void __user *buffer, size_t *lenp, loff_t *ppos) | 2917 | void __user *buffer, size_t *lenp, loff_t *ppos) |
2882 | { | 2918 | { |
2883 | return -ENOSYS; | 2919 | return -ENOSYS; |
2884 | } | 2920 | } |
2885 | 2921 | ||
2886 | int proc_dointvec_jiffies(struct ctl_table *table, int write, struct file *filp, | 2922 | int proc_dointvec_jiffies(struct ctl_table *table, int write, |
2887 | void __user *buffer, size_t *lenp, loff_t *ppos) | 2923 | void __user *buffer, size_t *lenp, loff_t *ppos) |
2888 | { | 2924 | { |
2889 | return -ENOSYS; | 2925 | return -ENOSYS; |
2890 | } | 2926 | } |
2891 | 2927 | ||
2892 | int proc_dointvec_userhz_jiffies(struct ctl_table *table, int write, struct file *filp, | 2928 | int proc_dointvec_userhz_jiffies(struct ctl_table *table, int write, |
2893 | void __user *buffer, size_t *lenp, loff_t *ppos) | 2929 | void __user *buffer, size_t *lenp, loff_t *ppos) |
2894 | { | 2930 | { |
2895 | return -ENOSYS; | 2931 | return -ENOSYS; |
2896 | } | 2932 | } |
2897 | 2933 | ||
2898 | int proc_dointvec_ms_jiffies(struct ctl_table *table, int write, struct file *filp, | 2934 | int proc_dointvec_ms_jiffies(struct ctl_table *table, int write, |
2899 | void __user *buffer, size_t *lenp, loff_t *ppos) | 2935 | void __user *buffer, size_t *lenp, loff_t *ppos) |
2900 | { | 2936 | { |
2901 | return -ENOSYS; | 2937 | return -ENOSYS; |
2902 | } | 2938 | } |
2903 | 2939 | ||
2904 | int proc_doulongvec_minmax(struct ctl_table *table, int write, struct file *filp, | 2940 | int proc_doulongvec_minmax(struct ctl_table *table, int write, |
2905 | void __user *buffer, size_t *lenp, loff_t *ppos) | 2941 | void __user *buffer, size_t *lenp, loff_t *ppos) |
2906 | { | 2942 | { |
2907 | return -ENOSYS; | 2943 | return -ENOSYS; |
2908 | } | 2944 | } |
2909 | 2945 | ||
2910 | int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int write, | 2946 | int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int write, |
2911 | struct file *filp, | ||
2912 | void __user *buffer, | 2947 | void __user *buffer, |
2913 | size_t *lenp, loff_t *ppos) | 2948 | size_t *lenp, loff_t *ppos) |
2914 | { | 2949 | { |
diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c index b38423ca711a..b6e7aaea4604 100644 --- a/kernel/sysctl_check.c +++ b/kernel/sysctl_check.c | |||
@@ -1521,7 +1521,7 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table) | |||
1521 | if (!table->ctl_name && table->strategy) | 1521 | if (!table->ctl_name && table->strategy) |
1522 | set_fail(&fail, table, "Strategy without ctl_name"); | 1522 | set_fail(&fail, table, "Strategy without ctl_name"); |
1523 | #endif | 1523 | #endif |
1524 | #ifdef CONFIG_PROC_FS | 1524 | #ifdef CONFIG_PROC_SYSCTL |
1525 | if (table->procname && !table->proc_handler) | 1525 | if (table->procname && !table->proc_handler) |
1526 | set_fail(&fail, table, "No proc_handler"); | 1526 | set_fail(&fail, table, "No proc_handler"); |
1527 | #endif | 1527 | #endif |
diff --git a/kernel/time/Makefile b/kernel/time/Makefile index 0b0a6366c9d4..ee266620b06c 100644 --- a/kernel/time/Makefile +++ b/kernel/time/Makefile | |||
@@ -1,4 +1,4 @@ | |||
1 | obj-y += timekeeping.o ntp.o clocksource.o jiffies.o timer_list.o timecompare.o | 1 | obj-y += timekeeping.o ntp.o clocksource.o jiffies.o timer_list.o timecompare.o timeconv.o |
2 | 2 | ||
3 | obj-$(CONFIG_GENERIC_CLOCKEVENTS_BUILD) += clockevents.o | 3 | obj-$(CONFIG_GENERIC_CLOCKEVENTS_BUILD) += clockevents.o |
4 | obj-$(CONFIG_GENERIC_CLOCKEVENTS) += tick-common.o | 4 | obj-$(CONFIG_GENERIC_CLOCKEVENTS) += tick-common.o |
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index 09113347d328..5e18c6ab2c6a 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c | |||
@@ -394,15 +394,11 @@ void clocksource_resume(void) | |||
394 | { | 394 | { |
395 | struct clocksource *cs; | 395 | struct clocksource *cs; |
396 | 396 | ||
397 | mutex_lock(&clocksource_mutex); | ||
398 | |||
399 | list_for_each_entry(cs, &clocksource_list, list) | 397 | list_for_each_entry(cs, &clocksource_list, list) |
400 | if (cs->resume) | 398 | if (cs->resume) |
401 | cs->resume(); | 399 | cs->resume(); |
402 | 400 | ||
403 | clocksource_resume_watchdog(); | 401 | clocksource_resume_watchdog(); |
404 | |||
405 | mutex_unlock(&clocksource_mutex); | ||
406 | } | 402 | } |
407 | 403 | ||
408 | /** | 404 | /** |
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index e0f59a21c061..89aed5933ed4 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
@@ -231,6 +231,13 @@ void tick_nohz_stop_sched_tick(int inidle) | |||
231 | if (!inidle && !ts->inidle) | 231 | if (!inidle && !ts->inidle) |
232 | goto end; | 232 | goto end; |
233 | 233 | ||
234 | /* | ||
235 | * Set ts->inidle unconditionally. Even if the system did not | ||
236 | * switch to NOHZ mode the cpu frequency governers rely on the | ||
237 | * update of the idle time accounting in tick_nohz_start_idle(). | ||
238 | */ | ||
239 | ts->inidle = 1; | ||
240 | |||
234 | now = tick_nohz_start_idle(ts); | 241 | now = tick_nohz_start_idle(ts); |
235 | 242 | ||
236 | /* | 243 | /* |
@@ -248,8 +255,6 @@ void tick_nohz_stop_sched_tick(int inidle) | |||
248 | if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) | 255 | if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) |
249 | goto end; | 256 | goto end; |
250 | 257 | ||
251 | ts->inidle = 1; | ||
252 | |||
253 | if (need_resched()) | 258 | if (need_resched()) |
254 | goto end; | 259 | goto end; |
255 | 260 | ||
diff --git a/kernel/time/timeconv.c b/kernel/time/timeconv.c new file mode 100644 index 000000000000..86628e755f38 --- /dev/null +++ b/kernel/time/timeconv.c | |||
@@ -0,0 +1,127 @@ | |||
1 | /* | ||
2 | * Copyright (C) 1993, 1994, 1995, 1996, 1997 Free Software Foundation, Inc. | ||
3 | * This file is part of the GNU C Library. | ||
4 | * Contributed by Paul Eggert (eggert@twinsun.com). | ||
5 | * | ||
6 | * The GNU C Library is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU Library General Public License as | ||
8 | * published by the Free Software Foundation; either version 2 of the | ||
9 | * License, or (at your option) any later version. | ||
10 | * | ||
11 | * The GNU C Library is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | * Library General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU Library General Public | ||
17 | * License along with the GNU C Library; see the file COPYING.LIB. If not, | ||
18 | * write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, | ||
19 | * Boston, MA 02111-1307, USA. | ||
20 | */ | ||
21 | |||
22 | /* | ||
23 | * Converts the calendar time to broken-down time representation | ||
24 | * Based on code from glibc-2.6 | ||
25 | * | ||
26 | * 2009-7-14: | ||
27 | * Moved from glibc-2.6 to kernel by Zhaolei<zhaolei@cn.fujitsu.com> | ||
28 | */ | ||
29 | |||
30 | #include <linux/time.h> | ||
31 | #include <linux/module.h> | ||
32 | |||
33 | /* | ||
34 | * Nonzero if YEAR is a leap year (every 4 years, | ||
35 | * except every 100th isn't, and every 400th is). | ||
36 | */ | ||
37 | static int __isleap(long year) | ||
38 | { | ||
39 | return (year) % 4 == 0 && ((year) % 100 != 0 || (year) % 400 == 0); | ||
40 | } | ||
41 | |||
42 | /* do a mathdiv for long type */ | ||
43 | static long math_div(long a, long b) | ||
44 | { | ||
45 | return a / b - (a % b < 0); | ||
46 | } | ||
47 | |||
48 | /* How many leap years between y1 and y2, y1 must less or equal to y2 */ | ||
49 | static long leaps_between(long y1, long y2) | ||
50 | { | ||
51 | long leaps1 = math_div(y1 - 1, 4) - math_div(y1 - 1, 100) | ||
52 | + math_div(y1 - 1, 400); | ||
53 | long leaps2 = math_div(y2 - 1, 4) - math_div(y2 - 1, 100) | ||
54 | + math_div(y2 - 1, 400); | ||
55 | return leaps2 - leaps1; | ||
56 | } | ||
57 | |||
58 | /* How many days come before each month (0-12). */ | ||
59 | static const unsigned short __mon_yday[2][13] = { | ||
60 | /* Normal years. */ | ||
61 | {0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365}, | ||
62 | /* Leap years. */ | ||
63 | {0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366} | ||
64 | }; | ||
65 | |||
66 | #define SECS_PER_HOUR (60 * 60) | ||
67 | #define SECS_PER_DAY (SECS_PER_HOUR * 24) | ||
68 | |||
69 | /** | ||
70 | * time_to_tm - converts the calendar time to local broken-down time | ||
71 | * | ||
72 | * @totalsecs the number of seconds elapsed since 00:00:00 on January 1, 1970, | ||
73 | * Coordinated Universal Time (UTC). | ||
74 | * @offset offset seconds adding to totalsecs. | ||
75 | * @result pointer to struct tm variable to receive broken-down time | ||
76 | */ | ||
77 | void time_to_tm(time_t totalsecs, int offset, struct tm *result) | ||
78 | { | ||
79 | long days, rem, y; | ||
80 | const unsigned short *ip; | ||
81 | |||
82 | days = totalsecs / SECS_PER_DAY; | ||
83 | rem = totalsecs % SECS_PER_DAY; | ||
84 | rem += offset; | ||
85 | while (rem < 0) { | ||
86 | rem += SECS_PER_DAY; | ||
87 | --days; | ||
88 | } | ||
89 | while (rem >= SECS_PER_DAY) { | ||
90 | rem -= SECS_PER_DAY; | ||
91 | ++days; | ||
92 | } | ||
93 | |||
94 | result->tm_hour = rem / SECS_PER_HOUR; | ||
95 | rem %= SECS_PER_HOUR; | ||
96 | result->tm_min = rem / 60; | ||
97 | result->tm_sec = rem % 60; | ||
98 | |||
99 | /* January 1, 1970 was a Thursday. */ | ||
100 | result->tm_wday = (4 + days) % 7; | ||
101 | if (result->tm_wday < 0) | ||
102 | result->tm_wday += 7; | ||
103 | |||
104 | y = 1970; | ||
105 | |||
106 | while (days < 0 || days >= (__isleap(y) ? 366 : 365)) { | ||
107 | /* Guess a corrected year, assuming 365 days per year. */ | ||
108 | long yg = y + math_div(days, 365); | ||
109 | |||
110 | /* Adjust DAYS and Y to match the guessed year. */ | ||
111 | days -= (yg - y) * 365 + leaps_between(y, yg); | ||
112 | y = yg; | ||
113 | } | ||
114 | |||
115 | result->tm_year = y - 1900; | ||
116 | |||
117 | result->tm_yday = days; | ||
118 | |||
119 | ip = __mon_yday[__isleap(y)]; | ||
120 | for (y = 11; days < ip[y]; y--) | ||
121 | continue; | ||
122 | days -= ip[y]; | ||
123 | |||
124 | result->tm_mon = y; | ||
125 | result->tm_mday = days + 1; | ||
126 | } | ||
127 | EXPORT_SYMBOL(time_to_tm); | ||
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index fb0f46fa1ecd..c3a4e2907eaa 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/percpu.h> | 13 | #include <linux/percpu.h> |
14 | #include <linux/init.h> | 14 | #include <linux/init.h> |
15 | #include <linux/mm.h> | 15 | #include <linux/mm.h> |
16 | #include <linux/sched.h> | ||
16 | #include <linux/sysdev.h> | 17 | #include <linux/sysdev.h> |
17 | #include <linux/clocksource.h> | 18 | #include <linux/clocksource.h> |
18 | #include <linux/jiffies.h> | 19 | #include <linux/jiffies.h> |
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c index fddd69d16e03..1b5b7aa2fdfd 100644 --- a/kernel/time/timer_list.c +++ b/kernel/time/timer_list.c | |||
@@ -275,7 +275,7 @@ static int timer_list_open(struct inode *inode, struct file *filp) | |||
275 | return single_open(filp, timer_list_show, NULL); | 275 | return single_open(filp, timer_list_show, NULL); |
276 | } | 276 | } |
277 | 277 | ||
278 | static struct file_operations timer_list_fops = { | 278 | static const struct file_operations timer_list_fops = { |
279 | .open = timer_list_open, | 279 | .open = timer_list_open, |
280 | .read = seq_read, | 280 | .read = seq_read, |
281 | .llseek = seq_lseek, | 281 | .llseek = seq_lseek, |
diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c index 4cde8b9c716f..ee5681f8d7ec 100644 --- a/kernel/time/timer_stats.c +++ b/kernel/time/timer_stats.c | |||
@@ -395,7 +395,7 @@ static int tstats_open(struct inode *inode, struct file *filp) | |||
395 | return single_open(filp, tstats_show, NULL); | 395 | return single_open(filp, tstats_show, NULL); |
396 | } | 396 | } |
397 | 397 | ||
398 | static struct file_operations tstats_fops = { | 398 | static const struct file_operations tstats_fops = { |
399 | .open = tstats_open, | 399 | .open = tstats_open, |
400 | .read = seq_read, | 400 | .read = seq_read, |
401 | .write = tstats_write, | 401 | .write = tstats_write, |
diff --git a/kernel/timer.c b/kernel/timer.c index bbb51074680e..5db5a8d26811 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
@@ -37,7 +37,7 @@ | |||
37 | #include <linux/delay.h> | 37 | #include <linux/delay.h> |
38 | #include <linux/tick.h> | 38 | #include <linux/tick.h> |
39 | #include <linux/kallsyms.h> | 39 | #include <linux/kallsyms.h> |
40 | #include <linux/perf_counter.h> | 40 | #include <linux/perf_event.h> |
41 | #include <linux/sched.h> | 41 | #include <linux/sched.h> |
42 | 42 | ||
43 | #include <asm/uaccess.h> | 43 | #include <asm/uaccess.h> |
@@ -46,6 +46,9 @@ | |||
46 | #include <asm/timex.h> | 46 | #include <asm/timex.h> |
47 | #include <asm/io.h> | 47 | #include <asm/io.h> |
48 | 48 | ||
49 | #define CREATE_TRACE_POINTS | ||
50 | #include <trace/events/timer.h> | ||
51 | |||
49 | u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES; | 52 | u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES; |
50 | 53 | ||
51 | EXPORT_SYMBOL(jiffies_64); | 54 | EXPORT_SYMBOL(jiffies_64); |
@@ -521,6 +524,25 @@ static inline void debug_timer_activate(struct timer_list *timer) { } | |||
521 | static inline void debug_timer_deactivate(struct timer_list *timer) { } | 524 | static inline void debug_timer_deactivate(struct timer_list *timer) { } |
522 | #endif | 525 | #endif |
523 | 526 | ||
527 | static inline void debug_init(struct timer_list *timer) | ||
528 | { | ||
529 | debug_timer_init(timer); | ||
530 | trace_timer_init(timer); | ||
531 | } | ||
532 | |||
533 | static inline void | ||
534 | debug_activate(struct timer_list *timer, unsigned long expires) | ||
535 | { | ||
536 | debug_timer_activate(timer); | ||
537 | trace_timer_start(timer, expires); | ||
538 | } | ||
539 | |||
540 | static inline void debug_deactivate(struct timer_list *timer) | ||
541 | { | ||
542 | debug_timer_deactivate(timer); | ||
543 | trace_timer_cancel(timer); | ||
544 | } | ||
545 | |||
524 | static void __init_timer(struct timer_list *timer, | 546 | static void __init_timer(struct timer_list *timer, |
525 | const char *name, | 547 | const char *name, |
526 | struct lock_class_key *key) | 548 | struct lock_class_key *key) |
@@ -549,7 +571,7 @@ void init_timer_key(struct timer_list *timer, | |||
549 | const char *name, | 571 | const char *name, |
550 | struct lock_class_key *key) | 572 | struct lock_class_key *key) |
551 | { | 573 | { |
552 | debug_timer_init(timer); | 574 | debug_init(timer); |
553 | __init_timer(timer, name, key); | 575 | __init_timer(timer, name, key); |
554 | } | 576 | } |
555 | EXPORT_SYMBOL(init_timer_key); | 577 | EXPORT_SYMBOL(init_timer_key); |
@@ -568,7 +590,7 @@ static inline void detach_timer(struct timer_list *timer, | |||
568 | { | 590 | { |
569 | struct list_head *entry = &timer->entry; | 591 | struct list_head *entry = &timer->entry; |
570 | 592 | ||
571 | debug_timer_deactivate(timer); | 593 | debug_deactivate(timer); |
572 | 594 | ||
573 | __list_del(entry->prev, entry->next); | 595 | __list_del(entry->prev, entry->next); |
574 | if (clear_pending) | 596 | if (clear_pending) |
@@ -632,7 +654,7 @@ __mod_timer(struct timer_list *timer, unsigned long expires, | |||
632 | goto out_unlock; | 654 | goto out_unlock; |
633 | } | 655 | } |
634 | 656 | ||
635 | debug_timer_activate(timer); | 657 | debug_activate(timer, expires); |
636 | 658 | ||
637 | new_base = __get_cpu_var(tvec_bases); | 659 | new_base = __get_cpu_var(tvec_bases); |
638 | 660 | ||
@@ -787,7 +809,7 @@ void add_timer_on(struct timer_list *timer, int cpu) | |||
787 | BUG_ON(timer_pending(timer) || !timer->function); | 809 | BUG_ON(timer_pending(timer) || !timer->function); |
788 | spin_lock_irqsave(&base->lock, flags); | 810 | spin_lock_irqsave(&base->lock, flags); |
789 | timer_set_base(timer, base); | 811 | timer_set_base(timer, base); |
790 | debug_timer_activate(timer); | 812 | debug_activate(timer, timer->expires); |
791 | if (time_before(timer->expires, base->next_timer) && | 813 | if (time_before(timer->expires, base->next_timer) && |
792 | !tbase_get_deferrable(timer->base)) | 814 | !tbase_get_deferrable(timer->base)) |
793 | base->next_timer = timer->expires; | 815 | base->next_timer = timer->expires; |
@@ -1000,7 +1022,9 @@ static inline void __run_timers(struct tvec_base *base) | |||
1000 | */ | 1022 | */ |
1001 | lock_map_acquire(&lockdep_map); | 1023 | lock_map_acquire(&lockdep_map); |
1002 | 1024 | ||
1025 | trace_timer_expire_entry(timer); | ||
1003 | fn(data); | 1026 | fn(data); |
1027 | trace_timer_expire_exit(timer); | ||
1004 | 1028 | ||
1005 | lock_map_release(&lockdep_map); | 1029 | lock_map_release(&lockdep_map); |
1006 | 1030 | ||
@@ -1187,7 +1211,7 @@ static void run_timer_softirq(struct softirq_action *h) | |||
1187 | { | 1211 | { |
1188 | struct tvec_base *base = __get_cpu_var(tvec_bases); | 1212 | struct tvec_base *base = __get_cpu_var(tvec_bases); |
1189 | 1213 | ||
1190 | perf_counter_do_pending(); | 1214 | perf_event_do_pending(); |
1191 | 1215 | ||
1192 | hrtimer_run_pending(); | 1216 | hrtimer_run_pending(); |
1193 | 1217 | ||
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index e71634604400..b416512ad17f 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
@@ -83,7 +83,7 @@ config RING_BUFFER_ALLOW_SWAP | |||
83 | # This allows those options to appear when no other tracer is selected. But the | 83 | # This allows those options to appear when no other tracer is selected. But the |
84 | # options do not appear when something else selects it. We need the two options | 84 | # options do not appear when something else selects it. We need the two options |
85 | # GENERIC_TRACER and TRACING to avoid circular dependencies to accomplish the | 85 | # GENERIC_TRACER and TRACING to avoid circular dependencies to accomplish the |
86 | # hidding of the automatic options options. | 86 | # hidding of the automatic options. |
87 | 87 | ||
88 | config TRACING | 88 | config TRACING |
89 | bool | 89 | bool |
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index 844164dca90a..26f03ac07c2b 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile | |||
@@ -42,7 +42,6 @@ obj-$(CONFIG_BOOT_TRACER) += trace_boot.o | |||
42 | obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += trace_functions_graph.o | 42 | obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += trace_functions_graph.o |
43 | obj-$(CONFIG_TRACE_BRANCH_PROFILING) += trace_branch.o | 43 | obj-$(CONFIG_TRACE_BRANCH_PROFILING) += trace_branch.o |
44 | obj-$(CONFIG_HW_BRANCH_TRACER) += trace_hw_branches.o | 44 | obj-$(CONFIG_HW_BRANCH_TRACER) += trace_hw_branches.o |
45 | obj-$(CONFIG_POWER_TRACER) += trace_power.o | ||
46 | obj-$(CONFIG_KMEMTRACE) += kmemtrace.o | 45 | obj-$(CONFIG_KMEMTRACE) += kmemtrace.o |
47 | obj-$(CONFIG_WORKQUEUE_TRACER) += trace_workqueue.o | 46 | obj-$(CONFIG_WORKQUEUE_TRACER) += trace_workqueue.o |
48 | obj-$(CONFIG_BLK_DEV_IO_TRACE) += blktrace.o | 47 | obj-$(CONFIG_BLK_DEV_IO_TRACE) += blktrace.o |
@@ -54,5 +53,6 @@ obj-$(CONFIG_EVENT_TRACING) += trace_export.o | |||
54 | obj-$(CONFIG_FTRACE_SYSCALLS) += trace_syscalls.o | 53 | obj-$(CONFIG_FTRACE_SYSCALLS) += trace_syscalls.o |
55 | obj-$(CONFIG_EVENT_PROFILE) += trace_event_profile.o | 54 | obj-$(CONFIG_EVENT_PROFILE) += trace_event_profile.o |
56 | obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o | 55 | obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o |
56 | obj-$(CONFIG_EVENT_TRACING) += power-traces.o | ||
57 | 57 | ||
58 | libftrace-y := ftrace.o | 58 | libftrace-y := ftrace.o |
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 3eb159c277c8..d9d6206e0b14 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c | |||
@@ -856,6 +856,37 @@ static void blk_add_trace_remap(struct request_queue *q, struct bio *bio, | |||
856 | } | 856 | } |
857 | 857 | ||
858 | /** | 858 | /** |
859 | * blk_add_trace_rq_remap - Add a trace for a request-remap operation | ||
860 | * @q: queue the io is for | ||
861 | * @rq: the source request | ||
862 | * @dev: target device | ||
863 | * @from: source sector | ||
864 | * | ||
865 | * Description: | ||
866 | * Device mapper remaps request to other devices. | ||
867 | * Add a trace for that action. | ||
868 | * | ||
869 | **/ | ||
870 | static void blk_add_trace_rq_remap(struct request_queue *q, | ||
871 | struct request *rq, dev_t dev, | ||
872 | sector_t from) | ||
873 | { | ||
874 | struct blk_trace *bt = q->blk_trace; | ||
875 | struct blk_io_trace_remap r; | ||
876 | |||
877 | if (likely(!bt)) | ||
878 | return; | ||
879 | |||
880 | r.device_from = cpu_to_be32(dev); | ||
881 | r.device_to = cpu_to_be32(disk_devt(rq->rq_disk)); | ||
882 | r.sector_from = cpu_to_be64(from); | ||
883 | |||
884 | __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), | ||
885 | rq_data_dir(rq), BLK_TA_REMAP, !!rq->errors, | ||
886 | sizeof(r), &r); | ||
887 | } | ||
888 | |||
889 | /** | ||
859 | * blk_add_driver_data - Add binary message with driver-specific data | 890 | * blk_add_driver_data - Add binary message with driver-specific data |
860 | * @q: queue the io is for | 891 | * @q: queue the io is for |
861 | * @rq: io request | 892 | * @rq: io request |
@@ -922,10 +953,13 @@ static void blk_register_tracepoints(void) | |||
922 | WARN_ON(ret); | 953 | WARN_ON(ret); |
923 | ret = register_trace_block_remap(blk_add_trace_remap); | 954 | ret = register_trace_block_remap(blk_add_trace_remap); |
924 | WARN_ON(ret); | 955 | WARN_ON(ret); |
956 | ret = register_trace_block_rq_remap(blk_add_trace_rq_remap); | ||
957 | WARN_ON(ret); | ||
925 | } | 958 | } |
926 | 959 | ||
927 | static void blk_unregister_tracepoints(void) | 960 | static void blk_unregister_tracepoints(void) |
928 | { | 961 | { |
962 | unregister_trace_block_rq_remap(blk_add_trace_rq_remap); | ||
929 | unregister_trace_block_remap(blk_add_trace_remap); | 963 | unregister_trace_block_remap(blk_add_trace_remap); |
930 | unregister_trace_block_split(blk_add_trace_split); | 964 | unregister_trace_block_split(blk_add_trace_split); |
931 | unregister_trace_block_unplug_io(blk_add_trace_unplug_io); | 965 | unregister_trace_block_unplug_io(blk_add_trace_unplug_io); |
@@ -1657,6 +1691,11 @@ int blk_trace_init_sysfs(struct device *dev) | |||
1657 | return sysfs_create_group(&dev->kobj, &blk_trace_attr_group); | 1691 | return sysfs_create_group(&dev->kobj, &blk_trace_attr_group); |
1658 | } | 1692 | } |
1659 | 1693 | ||
1694 | void blk_trace_remove_sysfs(struct device *dev) | ||
1695 | { | ||
1696 | sysfs_remove_group(&dev->kobj, &blk_trace_attr_group); | ||
1697 | } | ||
1698 | |||
1660 | #endif /* CONFIG_BLK_DEV_IO_TRACE */ | 1699 | #endif /* CONFIG_BLK_DEV_IO_TRACE */ |
1661 | 1700 | ||
1662 | #ifdef CONFIG_EVENT_TRACING | 1701 | #ifdef CONFIG_EVENT_TRACING |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index cc615f84751b..6dc4e5ef7a01 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -225,7 +225,11 @@ static void ftrace_update_pid_func(void) | |||
225 | if (ftrace_trace_function == ftrace_stub) | 225 | if (ftrace_trace_function == ftrace_stub) |
226 | return; | 226 | return; |
227 | 227 | ||
228 | #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST | ||
228 | func = ftrace_trace_function; | 229 | func = ftrace_trace_function; |
230 | #else | ||
231 | func = __ftrace_trace_function; | ||
232 | #endif | ||
229 | 233 | ||
230 | if (ftrace_pid_trace) { | 234 | if (ftrace_pid_trace) { |
231 | set_ftrace_pid_function(func); | 235 | set_ftrace_pid_function(func); |
@@ -736,7 +740,7 @@ ftrace_profile_write(struct file *filp, const char __user *ubuf, | |||
736 | out: | 740 | out: |
737 | mutex_unlock(&ftrace_profile_lock); | 741 | mutex_unlock(&ftrace_profile_lock); |
738 | 742 | ||
739 | filp->f_pos += cnt; | 743 | *ppos += cnt; |
740 | 744 | ||
741 | return cnt; | 745 | return cnt; |
742 | } | 746 | } |
@@ -1074,14 +1078,9 @@ static void ftrace_replace_code(int enable) | |||
1074 | failed = __ftrace_replace_code(rec, enable); | 1078 | failed = __ftrace_replace_code(rec, enable); |
1075 | if (failed) { | 1079 | if (failed) { |
1076 | rec->flags |= FTRACE_FL_FAILED; | 1080 | rec->flags |= FTRACE_FL_FAILED; |
1077 | if ((system_state == SYSTEM_BOOTING) || | 1081 | ftrace_bug(failed, rec->ip); |
1078 | !core_kernel_text(rec->ip)) { | 1082 | /* Stop processing */ |
1079 | ftrace_free_rec(rec); | 1083 | return; |
1080 | } else { | ||
1081 | ftrace_bug(failed, rec->ip); | ||
1082 | /* Stop processing */ | ||
1083 | return; | ||
1084 | } | ||
1085 | } | 1084 | } |
1086 | } while_for_each_ftrace_rec(); | 1085 | } while_for_each_ftrace_rec(); |
1087 | } | 1086 | } |
@@ -1520,7 +1519,7 @@ static int t_show(struct seq_file *m, void *v) | |||
1520 | return 0; | 1519 | return 0; |
1521 | } | 1520 | } |
1522 | 1521 | ||
1523 | static struct seq_operations show_ftrace_seq_ops = { | 1522 | static const struct seq_operations show_ftrace_seq_ops = { |
1524 | .start = t_start, | 1523 | .start = t_start, |
1525 | .next = t_next, | 1524 | .next = t_next, |
1526 | .stop = t_stop, | 1525 | .stop = t_stop, |
@@ -1621,8 +1620,10 @@ ftrace_regex_open(struct inode *inode, struct file *file, int enable) | |||
1621 | if (!ret) { | 1620 | if (!ret) { |
1622 | struct seq_file *m = file->private_data; | 1621 | struct seq_file *m = file->private_data; |
1623 | m->private = iter; | 1622 | m->private = iter; |
1624 | } else | 1623 | } else { |
1624 | trace_parser_put(&iter->parser); | ||
1625 | kfree(iter); | 1625 | kfree(iter); |
1626 | } | ||
1626 | } else | 1627 | } else |
1627 | file->private_data = iter; | 1628 | file->private_data = iter; |
1628 | mutex_unlock(&ftrace_regex_lock); | 1629 | mutex_unlock(&ftrace_regex_lock); |
@@ -2202,7 +2203,7 @@ ftrace_regex_write(struct file *file, const char __user *ubuf, | |||
2202 | struct trace_parser *parser; | 2203 | struct trace_parser *parser; |
2203 | ssize_t ret, read; | 2204 | ssize_t ret, read; |
2204 | 2205 | ||
2205 | if (!cnt || cnt < 0) | 2206 | if (!cnt) |
2206 | return 0; | 2207 | return 0; |
2207 | 2208 | ||
2208 | mutex_lock(&ftrace_regex_lock); | 2209 | mutex_lock(&ftrace_regex_lock); |
@@ -2216,20 +2217,20 @@ ftrace_regex_write(struct file *file, const char __user *ubuf, | |||
2216 | parser = &iter->parser; | 2217 | parser = &iter->parser; |
2217 | read = trace_get_user(parser, ubuf, cnt, ppos); | 2218 | read = trace_get_user(parser, ubuf, cnt, ppos); |
2218 | 2219 | ||
2219 | if (trace_parser_loaded(parser) && | 2220 | if (read >= 0 && trace_parser_loaded(parser) && |
2220 | !trace_parser_cont(parser)) { | 2221 | !trace_parser_cont(parser)) { |
2221 | ret = ftrace_process_regex(parser->buffer, | 2222 | ret = ftrace_process_regex(parser->buffer, |
2222 | parser->idx, enable); | 2223 | parser->idx, enable); |
2223 | if (ret) | 2224 | if (ret) |
2224 | goto out; | 2225 | goto out_unlock; |
2225 | 2226 | ||
2226 | trace_parser_clear(parser); | 2227 | trace_parser_clear(parser); |
2227 | } | 2228 | } |
2228 | 2229 | ||
2229 | ret = read; | 2230 | ret = read; |
2230 | 2231 | out_unlock: | |
2231 | mutex_unlock(&ftrace_regex_lock); | 2232 | mutex_unlock(&ftrace_regex_lock); |
2232 | out: | 2233 | |
2233 | return ret; | 2234 | return ret; |
2234 | } | 2235 | } |
2235 | 2236 | ||
@@ -2414,11 +2415,9 @@ unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly; | |||
2414 | static void * | 2415 | static void * |
2415 | __g_next(struct seq_file *m, loff_t *pos) | 2416 | __g_next(struct seq_file *m, loff_t *pos) |
2416 | { | 2417 | { |
2417 | unsigned long *array = m->private; | ||
2418 | |||
2419 | if (*pos >= ftrace_graph_count) | 2418 | if (*pos >= ftrace_graph_count) |
2420 | return NULL; | 2419 | return NULL; |
2421 | return &array[*pos]; | 2420 | return &ftrace_graph_funcs[*pos]; |
2422 | } | 2421 | } |
2423 | 2422 | ||
2424 | static void * | 2423 | static void * |
@@ -2461,7 +2460,7 @@ static int g_show(struct seq_file *m, void *v) | |||
2461 | return 0; | 2460 | return 0; |
2462 | } | 2461 | } |
2463 | 2462 | ||
2464 | static struct seq_operations ftrace_graph_seq_ops = { | 2463 | static const struct seq_operations ftrace_graph_seq_ops = { |
2465 | .start = g_start, | 2464 | .start = g_start, |
2466 | .next = g_next, | 2465 | .next = g_next, |
2467 | .stop = g_stop, | 2466 | .stop = g_stop, |
@@ -2482,16 +2481,10 @@ ftrace_graph_open(struct inode *inode, struct file *file) | |||
2482 | ftrace_graph_count = 0; | 2481 | ftrace_graph_count = 0; |
2483 | memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs)); | 2482 | memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs)); |
2484 | } | 2483 | } |
2484 | mutex_unlock(&graph_lock); | ||
2485 | 2485 | ||
2486 | if (file->f_mode & FMODE_READ) { | 2486 | if (file->f_mode & FMODE_READ) |
2487 | ret = seq_open(file, &ftrace_graph_seq_ops); | 2487 | ret = seq_open(file, &ftrace_graph_seq_ops); |
2488 | if (!ret) { | ||
2489 | struct seq_file *m = file->private_data; | ||
2490 | m->private = ftrace_graph_funcs; | ||
2491 | } | ||
2492 | } else | ||
2493 | file->private_data = ftrace_graph_funcs; | ||
2494 | mutex_unlock(&graph_lock); | ||
2495 | 2488 | ||
2496 | return ret; | 2489 | return ret; |
2497 | } | 2490 | } |
@@ -2560,9 +2553,7 @@ ftrace_graph_write(struct file *file, const char __user *ubuf, | |||
2560 | size_t cnt, loff_t *ppos) | 2553 | size_t cnt, loff_t *ppos) |
2561 | { | 2554 | { |
2562 | struct trace_parser parser; | 2555 | struct trace_parser parser; |
2563 | unsigned long *array; | 2556 | ssize_t read, ret; |
2564 | size_t read = 0; | ||
2565 | ssize_t ret; | ||
2566 | 2557 | ||
2567 | if (!cnt || cnt < 0) | 2558 | if (!cnt || cnt < 0) |
2568 | return 0; | 2559 | return 0; |
@@ -2571,35 +2562,31 @@ ftrace_graph_write(struct file *file, const char __user *ubuf, | |||
2571 | 2562 | ||
2572 | if (ftrace_graph_count >= FTRACE_GRAPH_MAX_FUNCS) { | 2563 | if (ftrace_graph_count >= FTRACE_GRAPH_MAX_FUNCS) { |
2573 | ret = -EBUSY; | 2564 | ret = -EBUSY; |
2574 | goto out; | 2565 | goto out_unlock; |
2575 | } | 2566 | } |
2576 | 2567 | ||
2577 | if (file->f_mode & FMODE_READ) { | ||
2578 | struct seq_file *m = file->private_data; | ||
2579 | array = m->private; | ||
2580 | } else | ||
2581 | array = file->private_data; | ||
2582 | |||
2583 | if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) { | 2568 | if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) { |
2584 | ret = -ENOMEM; | 2569 | ret = -ENOMEM; |
2585 | goto out; | 2570 | goto out_unlock; |
2586 | } | 2571 | } |
2587 | 2572 | ||
2588 | read = trace_get_user(&parser, ubuf, cnt, ppos); | 2573 | read = trace_get_user(&parser, ubuf, cnt, ppos); |
2589 | 2574 | ||
2590 | if (trace_parser_loaded((&parser))) { | 2575 | if (read >= 0 && trace_parser_loaded((&parser))) { |
2591 | parser.buffer[parser.idx] = 0; | 2576 | parser.buffer[parser.idx] = 0; |
2592 | 2577 | ||
2593 | /* we allow only one expression at a time */ | 2578 | /* we allow only one expression at a time */ |
2594 | ret = ftrace_set_func(array, &ftrace_graph_count, | 2579 | ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count, |
2595 | parser.buffer); | 2580 | parser.buffer); |
2596 | if (ret) | 2581 | if (ret) |
2597 | goto out; | 2582 | goto out_free; |
2598 | } | 2583 | } |
2599 | 2584 | ||
2600 | ret = read; | 2585 | ret = read; |
2601 | out: | 2586 | |
2587 | out_free: | ||
2602 | trace_parser_put(&parser); | 2588 | trace_parser_put(&parser); |
2589 | out_unlock: | ||
2603 | mutex_unlock(&graph_lock); | 2590 | mutex_unlock(&graph_lock); |
2604 | 2591 | ||
2605 | return ret; | 2592 | return ret; |
@@ -2670,19 +2657,17 @@ static int ftrace_convert_nops(struct module *mod, | |||
2670 | } | 2657 | } |
2671 | 2658 | ||
2672 | #ifdef CONFIG_MODULES | 2659 | #ifdef CONFIG_MODULES |
2673 | void ftrace_release(void *start, void *end) | 2660 | void ftrace_release_mod(struct module *mod) |
2674 | { | 2661 | { |
2675 | struct dyn_ftrace *rec; | 2662 | struct dyn_ftrace *rec; |
2676 | struct ftrace_page *pg; | 2663 | struct ftrace_page *pg; |
2677 | unsigned long s = (unsigned long)start; | ||
2678 | unsigned long e = (unsigned long)end; | ||
2679 | 2664 | ||
2680 | if (ftrace_disabled || !start || start == end) | 2665 | if (ftrace_disabled) |
2681 | return; | 2666 | return; |
2682 | 2667 | ||
2683 | mutex_lock(&ftrace_lock); | 2668 | mutex_lock(&ftrace_lock); |
2684 | do_for_each_ftrace_rec(pg, rec) { | 2669 | do_for_each_ftrace_rec(pg, rec) { |
2685 | if ((rec->ip >= s) && (rec->ip < e)) { | 2670 | if (within_module_core(rec->ip, mod)) { |
2686 | /* | 2671 | /* |
2687 | * rec->ip is changed in ftrace_free_rec() | 2672 | * rec->ip is changed in ftrace_free_rec() |
2688 | * It should not between s and e if record was freed. | 2673 | * It should not between s and e if record was freed. |
@@ -2714,9 +2699,7 @@ static int ftrace_module_notify(struct notifier_block *self, | |||
2714 | mod->num_ftrace_callsites); | 2699 | mod->num_ftrace_callsites); |
2715 | break; | 2700 | break; |
2716 | case MODULE_STATE_GOING: | 2701 | case MODULE_STATE_GOING: |
2717 | ftrace_release(mod->ftrace_callsites, | 2702 | ftrace_release_mod(mod); |
2718 | mod->ftrace_callsites + | ||
2719 | mod->num_ftrace_callsites); | ||
2720 | break; | 2703 | break; |
2721 | } | 2704 | } |
2722 | 2705 | ||
@@ -3030,7 +3013,7 @@ int unregister_ftrace_function(struct ftrace_ops *ops) | |||
3030 | 3013 | ||
3031 | int | 3014 | int |
3032 | ftrace_enable_sysctl(struct ctl_table *table, int write, | 3015 | ftrace_enable_sysctl(struct ctl_table *table, int write, |
3033 | struct file *file, void __user *buffer, size_t *lenp, | 3016 | void __user *buffer, size_t *lenp, |
3034 | loff_t *ppos) | 3017 | loff_t *ppos) |
3035 | { | 3018 | { |
3036 | int ret; | 3019 | int ret; |
@@ -3040,7 +3023,7 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, | |||
3040 | 3023 | ||
3041 | mutex_lock(&ftrace_lock); | 3024 | mutex_lock(&ftrace_lock); |
3042 | 3025 | ||
3043 | ret = proc_dointvec(table, write, file, buffer, lenp, ppos); | 3026 | ret = proc_dointvec(table, write, buffer, lenp, ppos); |
3044 | 3027 | ||
3045 | if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled)) | 3028 | if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled)) |
3046 | goto out; | 3029 | goto out; |
diff --git a/kernel/trace/kmemtrace.c b/kernel/trace/kmemtrace.c index 81b1645c8549..a91da69f153a 100644 --- a/kernel/trace/kmemtrace.c +++ b/kernel/trace/kmemtrace.c | |||
@@ -501,7 +501,7 @@ static int __init init_kmem_tracer(void) | |||
501 | return 1; | 501 | return 1; |
502 | } | 502 | } |
503 | 503 | ||
504 | if (!register_tracer(&kmem_tracer)) { | 504 | if (register_tracer(&kmem_tracer) != 0) { |
505 | pr_warning("Warning: could not register the kmem tracer\n"); | 505 | pr_warning("Warning: could not register the kmem tracer\n"); |
506 | return 1; | 506 | return 1; |
507 | } | 507 | } |
diff --git a/kernel/trace/power-traces.c b/kernel/trace/power-traces.c new file mode 100644 index 000000000000..e06c6e3d56a3 --- /dev/null +++ b/kernel/trace/power-traces.c | |||
@@ -0,0 +1,20 @@ | |||
1 | /* | ||
2 | * Power trace points | ||
3 | * | ||
4 | * Copyright (C) 2009 Arjan van de Ven <arjan@linux.intel.com> | ||
5 | */ | ||
6 | |||
7 | #include <linux/string.h> | ||
8 | #include <linux/types.h> | ||
9 | #include <linux/workqueue.h> | ||
10 | #include <linux/sched.h> | ||
11 | #include <linux/module.h> | ||
12 | #include <linux/slab.h> | ||
13 | |||
14 | #define CREATE_TRACE_POINTS | ||
15 | #include <trace/events/power.h> | ||
16 | |||
17 | EXPORT_TRACEPOINT_SYMBOL_GPL(power_start); | ||
18 | EXPORT_TRACEPOINT_SYMBOL_GPL(power_end); | ||
19 | EXPORT_TRACEPOINT_SYMBOL_GPL(power_frequency); | ||
20 | |||
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 6eef38923b07..5dd017fea6f5 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -201,8 +201,6 @@ int tracing_is_on(void) | |||
201 | } | 201 | } |
202 | EXPORT_SYMBOL_GPL(tracing_is_on); | 202 | EXPORT_SYMBOL_GPL(tracing_is_on); |
203 | 203 | ||
204 | #include "trace.h" | ||
205 | |||
206 | #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array)) | 204 | #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array)) |
207 | #define RB_ALIGNMENT 4U | 205 | #define RB_ALIGNMENT 4U |
208 | #define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX) | 206 | #define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX) |
@@ -485,7 +483,7 @@ struct ring_buffer_iter { | |||
485 | /* Up this if you want to test the TIME_EXTENTS and normalization */ | 483 | /* Up this if you want to test the TIME_EXTENTS and normalization */ |
486 | #define DEBUG_SHIFT 0 | 484 | #define DEBUG_SHIFT 0 |
487 | 485 | ||
488 | static inline u64 rb_time_stamp(struct ring_buffer *buffer, int cpu) | 486 | static inline u64 rb_time_stamp(struct ring_buffer *buffer) |
489 | { | 487 | { |
490 | /* shift to debug/test normalization and TIME_EXTENTS */ | 488 | /* shift to debug/test normalization and TIME_EXTENTS */ |
491 | return buffer->clock() << DEBUG_SHIFT; | 489 | return buffer->clock() << DEBUG_SHIFT; |
@@ -496,7 +494,7 @@ u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu) | |||
496 | u64 time; | 494 | u64 time; |
497 | 495 | ||
498 | preempt_disable_notrace(); | 496 | preempt_disable_notrace(); |
499 | time = rb_time_stamp(buffer, cpu); | 497 | time = rb_time_stamp(buffer); |
500 | preempt_enable_no_resched_notrace(); | 498 | preempt_enable_no_resched_notrace(); |
501 | 499 | ||
502 | return time; | 500 | return time; |
@@ -601,7 +599,7 @@ static struct list_head *rb_list_head(struct list_head *list) | |||
601 | } | 599 | } |
602 | 600 | ||
603 | /* | 601 | /* |
604 | * rb_is_head_page - test if the give page is the head page | 602 | * rb_is_head_page - test if the given page is the head page |
605 | * | 603 | * |
606 | * Because the reader may move the head_page pointer, we can | 604 | * Because the reader may move the head_page pointer, we can |
607 | * not trust what the head page is (it may be pointing to | 605 | * not trust what the head page is (it may be pointing to |
@@ -1195,6 +1193,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages) | |||
1195 | atomic_inc(&cpu_buffer->record_disabled); | 1193 | atomic_inc(&cpu_buffer->record_disabled); |
1196 | synchronize_sched(); | 1194 | synchronize_sched(); |
1197 | 1195 | ||
1196 | spin_lock_irq(&cpu_buffer->reader_lock); | ||
1198 | rb_head_page_deactivate(cpu_buffer); | 1197 | rb_head_page_deactivate(cpu_buffer); |
1199 | 1198 | ||
1200 | for (i = 0; i < nr_pages; i++) { | 1199 | for (i = 0; i < nr_pages; i++) { |
@@ -1209,6 +1208,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages) | |||
1209 | return; | 1208 | return; |
1210 | 1209 | ||
1211 | rb_reset_cpu(cpu_buffer); | 1210 | rb_reset_cpu(cpu_buffer); |
1211 | spin_unlock_irq(&cpu_buffer->reader_lock); | ||
1212 | 1212 | ||
1213 | rb_check_pages(cpu_buffer); | 1213 | rb_check_pages(cpu_buffer); |
1214 | 1214 | ||
@@ -1870,7 +1870,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, | |||
1870 | * Nested commits always have zero deltas, so | 1870 | * Nested commits always have zero deltas, so |
1871 | * just reread the time stamp | 1871 | * just reread the time stamp |
1872 | */ | 1872 | */ |
1873 | *ts = rb_time_stamp(buffer, cpu_buffer->cpu); | 1873 | *ts = rb_time_stamp(buffer); |
1874 | next_page->page->time_stamp = *ts; | 1874 | next_page->page->time_stamp = *ts; |
1875 | } | 1875 | } |
1876 | 1876 | ||
@@ -2113,7 +2113,7 @@ rb_reserve_next_event(struct ring_buffer *buffer, | |||
2113 | if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000)) | 2113 | if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000)) |
2114 | goto out_fail; | 2114 | goto out_fail; |
2115 | 2115 | ||
2116 | ts = rb_time_stamp(cpu_buffer->buffer, cpu_buffer->cpu); | 2116 | ts = rb_time_stamp(cpu_buffer->buffer); |
2117 | 2117 | ||
2118 | /* | 2118 | /* |
2119 | * Only the first commit can update the timestamp. | 2119 | * Only the first commit can update the timestamp. |
@@ -2683,7 +2683,7 @@ unsigned long ring_buffer_entries(struct ring_buffer *buffer) | |||
2683 | EXPORT_SYMBOL_GPL(ring_buffer_entries); | 2683 | EXPORT_SYMBOL_GPL(ring_buffer_entries); |
2684 | 2684 | ||
2685 | /** | 2685 | /** |
2686 | * ring_buffer_overrun_cpu - get the number of overruns in buffer | 2686 | * ring_buffer_overruns - get the number of overruns in buffer |
2687 | * @buffer: The ring buffer | 2687 | * @buffer: The ring buffer |
2688 | * | 2688 | * |
2689 | * Returns the total number of overruns in the ring buffer | 2689 | * Returns the total number of overruns in the ring buffer |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index fd52a19dd172..b20d3ec75de9 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -125,13 +125,13 @@ int ftrace_dump_on_oops; | |||
125 | 125 | ||
126 | static int tracing_set_tracer(const char *buf); | 126 | static int tracing_set_tracer(const char *buf); |
127 | 127 | ||
128 | #define BOOTUP_TRACER_SIZE 100 | 128 | #define MAX_TRACER_SIZE 100 |
129 | static char bootup_tracer_buf[BOOTUP_TRACER_SIZE] __initdata; | 129 | static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata; |
130 | static char *default_bootup_tracer; | 130 | static char *default_bootup_tracer; |
131 | 131 | ||
132 | static int __init set_ftrace(char *str) | 132 | static int __init set_ftrace(char *str) |
133 | { | 133 | { |
134 | strncpy(bootup_tracer_buf, str, BOOTUP_TRACER_SIZE); | 134 | strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE); |
135 | default_bootup_tracer = bootup_tracer_buf; | 135 | default_bootup_tracer = bootup_tracer_buf; |
136 | /* We are using ftrace early, expand it */ | 136 | /* We are using ftrace early, expand it */ |
137 | ring_buffer_expanded = 1; | 137 | ring_buffer_expanded = 1; |
@@ -242,13 +242,6 @@ static struct tracer *trace_types __read_mostly; | |||
242 | static struct tracer *current_trace __read_mostly; | 242 | static struct tracer *current_trace __read_mostly; |
243 | 243 | ||
244 | /* | 244 | /* |
245 | * max_tracer_type_len is used to simplify the allocating of | ||
246 | * buffers to read userspace tracer names. We keep track of | ||
247 | * the longest tracer name registered. | ||
248 | */ | ||
249 | static int max_tracer_type_len; | ||
250 | |||
251 | /* | ||
252 | * trace_types_lock is used to protect the trace_types list. | 245 | * trace_types_lock is used to protect the trace_types list. |
253 | * This lock is also used to keep user access serialized. | 246 | * This lock is also used to keep user access serialized. |
254 | * Accesses from userspace will grab this lock while userspace | 247 | * Accesses from userspace will grab this lock while userspace |
@@ -275,12 +268,18 @@ static DEFINE_SPINLOCK(tracing_start_lock); | |||
275 | */ | 268 | */ |
276 | void trace_wake_up(void) | 269 | void trace_wake_up(void) |
277 | { | 270 | { |
271 | int cpu; | ||
272 | |||
273 | if (trace_flags & TRACE_ITER_BLOCK) | ||
274 | return; | ||
278 | /* | 275 | /* |
279 | * The runqueue_is_locked() can fail, but this is the best we | 276 | * The runqueue_is_locked() can fail, but this is the best we |
280 | * have for now: | 277 | * have for now: |
281 | */ | 278 | */ |
282 | if (!(trace_flags & TRACE_ITER_BLOCK) && !runqueue_is_locked()) | 279 | cpu = get_cpu(); |
280 | if (!runqueue_is_locked(cpu)) | ||
283 | wake_up(&trace_wait); | 281 | wake_up(&trace_wait); |
282 | put_cpu(); | ||
284 | } | 283 | } |
285 | 284 | ||
286 | static int __init set_buf_size(char *str) | 285 | static int __init set_buf_size(char *str) |
@@ -416,7 +415,7 @@ int trace_get_user(struct trace_parser *parser, const char __user *ubuf, | |||
416 | 415 | ||
417 | /* read the non-space input */ | 416 | /* read the non-space input */ |
418 | while (cnt && !isspace(ch)) { | 417 | while (cnt && !isspace(ch)) { |
419 | if (parser->idx < parser->size) | 418 | if (parser->idx < parser->size - 1) |
420 | parser->buffer[parser->idx++] = ch; | 419 | parser->buffer[parser->idx++] = ch; |
421 | else { | 420 | else { |
422 | ret = -EINVAL; | 421 | ret = -EINVAL; |
@@ -619,7 +618,6 @@ __releases(kernel_lock) | |||
619 | __acquires(kernel_lock) | 618 | __acquires(kernel_lock) |
620 | { | 619 | { |
621 | struct tracer *t; | 620 | struct tracer *t; |
622 | int len; | ||
623 | int ret = 0; | 621 | int ret = 0; |
624 | 622 | ||
625 | if (!type->name) { | 623 | if (!type->name) { |
@@ -627,6 +625,11 @@ __acquires(kernel_lock) | |||
627 | return -1; | 625 | return -1; |
628 | } | 626 | } |
629 | 627 | ||
628 | if (strlen(type->name) > MAX_TRACER_SIZE) { | ||
629 | pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE); | ||
630 | return -1; | ||
631 | } | ||
632 | |||
630 | /* | 633 | /* |
631 | * When this gets called we hold the BKL which means that | 634 | * When this gets called we hold the BKL which means that |
632 | * preemption is disabled. Various trace selftests however | 635 | * preemption is disabled. Various trace selftests however |
@@ -641,7 +644,7 @@ __acquires(kernel_lock) | |||
641 | for (t = trace_types; t; t = t->next) { | 644 | for (t = trace_types; t; t = t->next) { |
642 | if (strcmp(type->name, t->name) == 0) { | 645 | if (strcmp(type->name, t->name) == 0) { |
643 | /* already found */ | 646 | /* already found */ |
644 | pr_info("Trace %s already registered\n", | 647 | pr_info("Tracer %s already registered\n", |
645 | type->name); | 648 | type->name); |
646 | ret = -1; | 649 | ret = -1; |
647 | goto out; | 650 | goto out; |
@@ -692,9 +695,6 @@ __acquires(kernel_lock) | |||
692 | 695 | ||
693 | type->next = trace_types; | 696 | type->next = trace_types; |
694 | trace_types = type; | 697 | trace_types = type; |
695 | len = strlen(type->name); | ||
696 | if (len > max_tracer_type_len) | ||
697 | max_tracer_type_len = len; | ||
698 | 698 | ||
699 | out: | 699 | out: |
700 | tracing_selftest_running = false; | 700 | tracing_selftest_running = false; |
@@ -703,7 +703,7 @@ __acquires(kernel_lock) | |||
703 | if (ret || !default_bootup_tracer) | 703 | if (ret || !default_bootup_tracer) |
704 | goto out_unlock; | 704 | goto out_unlock; |
705 | 705 | ||
706 | if (strncmp(default_bootup_tracer, type->name, BOOTUP_TRACER_SIZE)) | 706 | if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE)) |
707 | goto out_unlock; | 707 | goto out_unlock; |
708 | 708 | ||
709 | printk(KERN_INFO "Starting tracer '%s'\n", type->name); | 709 | printk(KERN_INFO "Starting tracer '%s'\n", type->name); |
@@ -725,14 +725,13 @@ __acquires(kernel_lock) | |||
725 | void unregister_tracer(struct tracer *type) | 725 | void unregister_tracer(struct tracer *type) |
726 | { | 726 | { |
727 | struct tracer **t; | 727 | struct tracer **t; |
728 | int len; | ||
729 | 728 | ||
730 | mutex_lock(&trace_types_lock); | 729 | mutex_lock(&trace_types_lock); |
731 | for (t = &trace_types; *t; t = &(*t)->next) { | 730 | for (t = &trace_types; *t; t = &(*t)->next) { |
732 | if (*t == type) | 731 | if (*t == type) |
733 | goto found; | 732 | goto found; |
734 | } | 733 | } |
735 | pr_info("Trace %s not registered\n", type->name); | 734 | pr_info("Tracer %s not registered\n", type->name); |
736 | goto out; | 735 | goto out; |
737 | 736 | ||
738 | found: | 737 | found: |
@@ -745,17 +744,7 @@ void unregister_tracer(struct tracer *type) | |||
745 | current_trace->stop(&global_trace); | 744 | current_trace->stop(&global_trace); |
746 | current_trace = &nop_trace; | 745 | current_trace = &nop_trace; |
747 | } | 746 | } |
748 | 747 | out: | |
749 | if (strlen(type->name) != max_tracer_type_len) | ||
750 | goto out; | ||
751 | |||
752 | max_tracer_type_len = 0; | ||
753 | for (t = &trace_types; *t; t = &(*t)->next) { | ||
754 | len = strlen((*t)->name); | ||
755 | if (len > max_tracer_type_len) | ||
756 | max_tracer_type_len = len; | ||
757 | } | ||
758 | out: | ||
759 | mutex_unlock(&trace_types_lock); | 748 | mutex_unlock(&trace_types_lock); |
760 | } | 749 | } |
761 | 750 | ||
@@ -1404,7 +1393,7 @@ int trace_array_vprintk(struct trace_array *tr, | |||
1404 | 1393 | ||
1405 | int trace_vprintk(unsigned long ip, const char *fmt, va_list args) | 1394 | int trace_vprintk(unsigned long ip, const char *fmt, va_list args) |
1406 | { | 1395 | { |
1407 | return trace_array_printk(&global_trace, ip, fmt, args); | 1396 | return trace_array_vprintk(&global_trace, ip, fmt, args); |
1408 | } | 1397 | } |
1409 | EXPORT_SYMBOL_GPL(trace_vprintk); | 1398 | EXPORT_SYMBOL_GPL(trace_vprintk); |
1410 | 1399 | ||
@@ -1960,7 +1949,7 @@ static int s_show(struct seq_file *m, void *v) | |||
1960 | return 0; | 1949 | return 0; |
1961 | } | 1950 | } |
1962 | 1951 | ||
1963 | static struct seq_operations tracer_seq_ops = { | 1952 | static const struct seq_operations tracer_seq_ops = { |
1964 | .start = s_start, | 1953 | .start = s_start, |
1965 | .next = s_next, | 1954 | .next = s_next, |
1966 | .stop = s_stop, | 1955 | .stop = s_stop, |
@@ -1995,11 +1984,9 @@ __tracing_open(struct inode *inode, struct file *file) | |||
1995 | if (current_trace) | 1984 | if (current_trace) |
1996 | *iter->trace = *current_trace; | 1985 | *iter->trace = *current_trace; |
1997 | 1986 | ||
1998 | if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) | 1987 | if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL)) |
1999 | goto fail; | 1988 | goto fail; |
2000 | 1989 | ||
2001 | cpumask_clear(iter->started); | ||
2002 | |||
2003 | if (current_trace && current_trace->print_max) | 1990 | if (current_trace && current_trace->print_max) |
2004 | iter->tr = &max_tr; | 1991 | iter->tr = &max_tr; |
2005 | else | 1992 | else |
@@ -2174,7 +2161,7 @@ static int t_show(struct seq_file *m, void *v) | |||
2174 | return 0; | 2161 | return 0; |
2175 | } | 2162 | } |
2176 | 2163 | ||
2177 | static struct seq_operations show_traces_seq_ops = { | 2164 | static const struct seq_operations show_traces_seq_ops = { |
2178 | .start = t_start, | 2165 | .start = t_start, |
2179 | .next = t_next, | 2166 | .next = t_next, |
2180 | .stop = t_stop, | 2167 | .stop = t_stop, |
@@ -2453,7 +2440,7 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf, | |||
2453 | return ret; | 2440 | return ret; |
2454 | } | 2441 | } |
2455 | 2442 | ||
2456 | filp->f_pos += cnt; | 2443 | *ppos += cnt; |
2457 | 2444 | ||
2458 | return cnt; | 2445 | return cnt; |
2459 | } | 2446 | } |
@@ -2595,7 +2582,7 @@ tracing_ctrl_write(struct file *filp, const char __user *ubuf, | |||
2595 | } | 2582 | } |
2596 | mutex_unlock(&trace_types_lock); | 2583 | mutex_unlock(&trace_types_lock); |
2597 | 2584 | ||
2598 | filp->f_pos += cnt; | 2585 | *ppos += cnt; |
2599 | 2586 | ||
2600 | return cnt; | 2587 | return cnt; |
2601 | } | 2588 | } |
@@ -2604,7 +2591,7 @@ static ssize_t | |||
2604 | tracing_set_trace_read(struct file *filp, char __user *ubuf, | 2591 | tracing_set_trace_read(struct file *filp, char __user *ubuf, |
2605 | size_t cnt, loff_t *ppos) | 2592 | size_t cnt, loff_t *ppos) |
2606 | { | 2593 | { |
2607 | char buf[max_tracer_type_len+2]; | 2594 | char buf[MAX_TRACER_SIZE+2]; |
2608 | int r; | 2595 | int r; |
2609 | 2596 | ||
2610 | mutex_lock(&trace_types_lock); | 2597 | mutex_lock(&trace_types_lock); |
@@ -2754,15 +2741,15 @@ static ssize_t | |||
2754 | tracing_set_trace_write(struct file *filp, const char __user *ubuf, | 2741 | tracing_set_trace_write(struct file *filp, const char __user *ubuf, |
2755 | size_t cnt, loff_t *ppos) | 2742 | size_t cnt, loff_t *ppos) |
2756 | { | 2743 | { |
2757 | char buf[max_tracer_type_len+1]; | 2744 | char buf[MAX_TRACER_SIZE+1]; |
2758 | int i; | 2745 | int i; |
2759 | size_t ret; | 2746 | size_t ret; |
2760 | int err; | 2747 | int err; |
2761 | 2748 | ||
2762 | ret = cnt; | 2749 | ret = cnt; |
2763 | 2750 | ||
2764 | if (cnt > max_tracer_type_len) | 2751 | if (cnt > MAX_TRACER_SIZE) |
2765 | cnt = max_tracer_type_len; | 2752 | cnt = MAX_TRACER_SIZE; |
2766 | 2753 | ||
2767 | if (copy_from_user(&buf, ubuf, cnt)) | 2754 | if (copy_from_user(&buf, ubuf, cnt)) |
2768 | return -EFAULT; | 2755 | return -EFAULT; |
@@ -2777,7 +2764,7 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf, | |||
2777 | if (err) | 2764 | if (err) |
2778 | return err; | 2765 | return err; |
2779 | 2766 | ||
2780 | filp->f_pos += ret; | 2767 | *ppos += ret; |
2781 | 2768 | ||
2782 | return ret; | 2769 | return ret; |
2783 | } | 2770 | } |
@@ -3312,7 +3299,7 @@ tracing_entries_write(struct file *filp, const char __user *ubuf, | |||
3312 | } | 3299 | } |
3313 | } | 3300 | } |
3314 | 3301 | ||
3315 | filp->f_pos += cnt; | 3302 | *ppos += cnt; |
3316 | 3303 | ||
3317 | /* If check pages failed, return ENOMEM */ | 3304 | /* If check pages failed, return ENOMEM */ |
3318 | if (tracing_disabled) | 3305 | if (tracing_disabled) |
@@ -4400,7 +4387,7 @@ __init static int tracer_alloc_buffers(void) | |||
4400 | if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL)) | 4387 | if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL)) |
4401 | goto out_free_buffer_mask; | 4388 | goto out_free_buffer_mask; |
4402 | 4389 | ||
4403 | if (!alloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL)) | 4390 | if (!zalloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL)) |
4404 | goto out_free_tracing_cpumask; | 4391 | goto out_free_tracing_cpumask; |
4405 | 4392 | ||
4406 | /* To save memory, keep the ring buffer size to its minimum */ | 4393 | /* To save memory, keep the ring buffer size to its minimum */ |
@@ -4411,7 +4398,6 @@ __init static int tracer_alloc_buffers(void) | |||
4411 | 4398 | ||
4412 | cpumask_copy(tracing_buffer_mask, cpu_possible_mask); | 4399 | cpumask_copy(tracing_buffer_mask, cpu_possible_mask); |
4413 | cpumask_copy(tracing_cpumask, cpu_all_mask); | 4400 | cpumask_copy(tracing_cpumask, cpu_all_mask); |
4414 | cpumask_clear(tracing_reader_cpumask); | ||
4415 | 4401 | ||
4416 | /* TODO: make the number of buffers hot pluggable with CPUS */ | 4402 | /* TODO: make the number of buffers hot pluggable with CPUS */ |
4417 | global_trace.buffer = ring_buffer_alloc(ring_buf_size, | 4403 | global_trace.buffer = ring_buffer_alloc(ring_buf_size, |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 86bcff94791a..405cb850b75d 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -11,7 +11,6 @@ | |||
11 | #include <linux/ftrace.h> | 11 | #include <linux/ftrace.h> |
12 | #include <trace/boot.h> | 12 | #include <trace/boot.h> |
13 | #include <linux/kmemtrace.h> | 13 | #include <linux/kmemtrace.h> |
14 | #include <trace/power.h> | ||
15 | 14 | ||
16 | #include <linux/trace_seq.h> | 15 | #include <linux/trace_seq.h> |
17 | #include <linux/ftrace_event.h> | 16 | #include <linux/ftrace_event.h> |
@@ -37,7 +36,6 @@ enum trace_type { | |||
37 | TRACE_HW_BRANCHES, | 36 | TRACE_HW_BRANCHES, |
38 | TRACE_KMEM_ALLOC, | 37 | TRACE_KMEM_ALLOC, |
39 | TRACE_KMEM_FREE, | 38 | TRACE_KMEM_FREE, |
40 | TRACE_POWER, | ||
41 | TRACE_BLK, | 39 | TRACE_BLK, |
42 | 40 | ||
43 | __TRACE_LAST_TYPE, | 41 | __TRACE_LAST_TYPE, |
@@ -207,7 +205,6 @@ extern void __ftrace_bad_type(void); | |||
207 | IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \ | 205 | IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \ |
208 | TRACE_GRAPH_RET); \ | 206 | TRACE_GRAPH_RET); \ |
209 | IF_ASSIGN(var, ent, struct hw_branch_entry, TRACE_HW_BRANCHES);\ | 207 | IF_ASSIGN(var, ent, struct hw_branch_entry, TRACE_HW_BRANCHES);\ |
210 | IF_ASSIGN(var, ent, struct trace_power, TRACE_POWER); \ | ||
211 | IF_ASSIGN(var, ent, struct kmemtrace_alloc_entry, \ | 208 | IF_ASSIGN(var, ent, struct kmemtrace_alloc_entry, \ |
212 | TRACE_KMEM_ALLOC); \ | 209 | TRACE_KMEM_ALLOC); \ |
213 | IF_ASSIGN(var, ent, struct kmemtrace_free_entry, \ | 210 | IF_ASSIGN(var, ent, struct kmemtrace_free_entry, \ |
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c index 7a7a9fd249a9..4a194f08f88c 100644 --- a/kernel/trace/trace_branch.c +++ b/kernel/trace/trace_branch.c | |||
@@ -34,6 +34,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) | |||
34 | struct trace_array *tr = branch_tracer; | 34 | struct trace_array *tr = branch_tracer; |
35 | struct ring_buffer_event *event; | 35 | struct ring_buffer_event *event; |
36 | struct trace_branch *entry; | 36 | struct trace_branch *entry; |
37 | struct ring_buffer *buffer; | ||
37 | unsigned long flags; | 38 | unsigned long flags; |
38 | int cpu, pc; | 39 | int cpu, pc; |
39 | const char *p; | 40 | const char *p; |
@@ -54,7 +55,8 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) | |||
54 | goto out; | 55 | goto out; |
55 | 56 | ||
56 | pc = preempt_count(); | 57 | pc = preempt_count(); |
57 | event = trace_buffer_lock_reserve(tr, TRACE_BRANCH, | 58 | buffer = tr->buffer; |
59 | event = trace_buffer_lock_reserve(buffer, TRACE_BRANCH, | ||
58 | sizeof(*entry), flags, pc); | 60 | sizeof(*entry), flags, pc); |
59 | if (!event) | 61 | if (!event) |
60 | goto out; | 62 | goto out; |
@@ -74,8 +76,8 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) | |||
74 | entry->line = f->line; | 76 | entry->line = f->line; |
75 | entry->correct = val == expect; | 77 | entry->correct = val == expect; |
76 | 78 | ||
77 | if (!filter_check_discard(call, entry, tr->buffer, event)) | 79 | if (!filter_check_discard(call, entry, buffer, event)) |
78 | ring_buffer_unlock_commit(tr->buffer, event); | 80 | ring_buffer_unlock_commit(buffer, event); |
79 | 81 | ||
80 | out: | 82 | out: |
81 | atomic_dec(&tr->data[cpu]->disabled); | 83 | atomic_dec(&tr->data[cpu]->disabled); |
diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h index a431748ddd6e..ead3d724599d 100644 --- a/kernel/trace/trace_entries.h +++ b/kernel/trace/trace_entries.h | |||
@@ -330,23 +330,6 @@ FTRACE_ENTRY(hw_branch, hw_branch_entry, | |||
330 | F_printk("from: %llx to: %llx", __entry->from, __entry->to) | 330 | F_printk("from: %llx to: %llx", __entry->from, __entry->to) |
331 | ); | 331 | ); |
332 | 332 | ||
333 | FTRACE_ENTRY(power, trace_power, | ||
334 | |||
335 | TRACE_POWER, | ||
336 | |||
337 | F_STRUCT( | ||
338 | __field_struct( struct power_trace, state_data ) | ||
339 | __field_desc( s64, state_data, stamp ) | ||
340 | __field_desc( s64, state_data, end ) | ||
341 | __field_desc( int, state_data, type ) | ||
342 | __field_desc( int, state_data, state ) | ||
343 | ), | ||
344 | |||
345 | F_printk("%llx->%llx type:%u state:%u", | ||
346 | __entry->stamp, __entry->end, | ||
347 | __entry->type, __entry->state) | ||
348 | ); | ||
349 | |||
350 | FTRACE_ENTRY(kmem_alloc, kmemtrace_alloc_entry, | 333 | FTRACE_ENTRY(kmem_alloc, kmemtrace_alloc_entry, |
351 | 334 | ||
352 | TRACE_KMEM_ALLOC, | 335 | TRACE_KMEM_ALLOC, |
diff --git a/kernel/trace/trace_event_profile.c b/kernel/trace/trace_event_profile.c index 55a25c933d15..8d5c171cc998 100644 --- a/kernel/trace/trace_event_profile.c +++ b/kernel/trace/trace_event_profile.c | |||
@@ -8,6 +8,62 @@ | |||
8 | #include <linux/module.h> | 8 | #include <linux/module.h> |
9 | #include "trace.h" | 9 | #include "trace.h" |
10 | 10 | ||
11 | /* | ||
12 | * We can't use a size but a type in alloc_percpu() | ||
13 | * So let's create a dummy type that matches the desired size | ||
14 | */ | ||
15 | typedef struct {char buf[FTRACE_MAX_PROFILE_SIZE];} profile_buf_t; | ||
16 | |||
17 | char *trace_profile_buf; | ||
18 | EXPORT_SYMBOL_GPL(trace_profile_buf); | ||
19 | |||
20 | char *trace_profile_buf_nmi; | ||
21 | EXPORT_SYMBOL_GPL(trace_profile_buf_nmi); | ||
22 | |||
23 | /* Count the events in use (per event id, not per instance) */ | ||
24 | static int total_profile_count; | ||
25 | |||
26 | static int ftrace_profile_enable_event(struct ftrace_event_call *event) | ||
27 | { | ||
28 | char *buf; | ||
29 | int ret = -ENOMEM; | ||
30 | |||
31 | if (atomic_inc_return(&event->profile_count)) | ||
32 | return 0; | ||
33 | |||
34 | if (!total_profile_count) { | ||
35 | buf = (char *)alloc_percpu(profile_buf_t); | ||
36 | if (!buf) | ||
37 | goto fail_buf; | ||
38 | |||
39 | rcu_assign_pointer(trace_profile_buf, buf); | ||
40 | |||
41 | buf = (char *)alloc_percpu(profile_buf_t); | ||
42 | if (!buf) | ||
43 | goto fail_buf_nmi; | ||
44 | |||
45 | rcu_assign_pointer(trace_profile_buf_nmi, buf); | ||
46 | } | ||
47 | |||
48 | ret = event->profile_enable(); | ||
49 | if (!ret) { | ||
50 | total_profile_count++; | ||
51 | return 0; | ||
52 | } | ||
53 | |||
54 | fail_buf_nmi: | ||
55 | if (!total_profile_count) { | ||
56 | free_percpu(trace_profile_buf_nmi); | ||
57 | free_percpu(trace_profile_buf); | ||
58 | trace_profile_buf_nmi = NULL; | ||
59 | trace_profile_buf = NULL; | ||
60 | } | ||
61 | fail_buf: | ||
62 | atomic_dec(&event->profile_count); | ||
63 | |||
64 | return ret; | ||
65 | } | ||
66 | |||
11 | int ftrace_profile_enable(int event_id) | 67 | int ftrace_profile_enable(int event_id) |
12 | { | 68 | { |
13 | struct ftrace_event_call *event; | 69 | struct ftrace_event_call *event; |
@@ -17,7 +73,7 @@ int ftrace_profile_enable(int event_id) | |||
17 | list_for_each_entry(event, &ftrace_events, list) { | 73 | list_for_each_entry(event, &ftrace_events, list) { |
18 | if (event->id == event_id && event->profile_enable && | 74 | if (event->id == event_id && event->profile_enable && |
19 | try_module_get(event->mod)) { | 75 | try_module_get(event->mod)) { |
20 | ret = event->profile_enable(event); | 76 | ret = ftrace_profile_enable_event(event); |
21 | break; | 77 | break; |
22 | } | 78 | } |
23 | } | 79 | } |
@@ -26,6 +82,33 @@ int ftrace_profile_enable(int event_id) | |||
26 | return ret; | 82 | return ret; |
27 | } | 83 | } |
28 | 84 | ||
85 | static void ftrace_profile_disable_event(struct ftrace_event_call *event) | ||
86 | { | ||
87 | char *buf, *nmi_buf; | ||
88 | |||
89 | if (!atomic_add_negative(-1, &event->profile_count)) | ||
90 | return; | ||
91 | |||
92 | event->profile_disable(); | ||
93 | |||
94 | if (!--total_profile_count) { | ||
95 | buf = trace_profile_buf; | ||
96 | rcu_assign_pointer(trace_profile_buf, NULL); | ||
97 | |||
98 | nmi_buf = trace_profile_buf_nmi; | ||
99 | rcu_assign_pointer(trace_profile_buf_nmi, NULL); | ||
100 | |||
101 | /* | ||
102 | * Ensure every events in profiling have finished before | ||
103 | * releasing the buffers | ||
104 | */ | ||
105 | synchronize_sched(); | ||
106 | |||
107 | free_percpu(buf); | ||
108 | free_percpu(nmi_buf); | ||
109 | } | ||
110 | } | ||
111 | |||
29 | void ftrace_profile_disable(int event_id) | 112 | void ftrace_profile_disable(int event_id) |
30 | { | 113 | { |
31 | struct ftrace_event_call *event; | 114 | struct ftrace_event_call *event; |
@@ -33,7 +116,7 @@ void ftrace_profile_disable(int event_id) | |||
33 | mutex_lock(&event_mutex); | 116 | mutex_lock(&event_mutex); |
34 | list_for_each_entry(event, &ftrace_events, list) { | 117 | list_for_each_entry(event, &ftrace_events, list) { |
35 | if (event->id == event_id) { | 118 | if (event->id == event_id) { |
36 | event->profile_disable(event); | 119 | ftrace_profile_disable_event(event); |
37 | module_put(event->mod); | 120 | module_put(event->mod); |
38 | break; | 121 | break; |
39 | } | 122 | } |
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 56c260b83a9c..d128f65778e6 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
@@ -232,10 +232,9 @@ ftrace_event_write(struct file *file, const char __user *ubuf, | |||
232 | size_t cnt, loff_t *ppos) | 232 | size_t cnt, loff_t *ppos) |
233 | { | 233 | { |
234 | struct trace_parser parser; | 234 | struct trace_parser parser; |
235 | size_t read = 0; | 235 | ssize_t read, ret; |
236 | ssize_t ret; | ||
237 | 236 | ||
238 | if (!cnt || cnt < 0) | 237 | if (!cnt) |
239 | return 0; | 238 | return 0; |
240 | 239 | ||
241 | ret = tracing_update_buffers(); | 240 | ret = tracing_update_buffers(); |
@@ -247,7 +246,7 @@ ftrace_event_write(struct file *file, const char __user *ubuf, | |||
247 | 246 | ||
248 | read = trace_get_user(&parser, ubuf, cnt, ppos); | 247 | read = trace_get_user(&parser, ubuf, cnt, ppos); |
249 | 248 | ||
250 | if (trace_parser_loaded((&parser))) { | 249 | if (read >= 0 && trace_parser_loaded((&parser))) { |
251 | int set = 1; | 250 | int set = 1; |
252 | 251 | ||
253 | if (*parser.buffer == '!') | 252 | if (*parser.buffer == '!') |
@@ -271,42 +270,32 @@ ftrace_event_write(struct file *file, const char __user *ubuf, | |||
271 | static void * | 270 | static void * |
272 | t_next(struct seq_file *m, void *v, loff_t *pos) | 271 | t_next(struct seq_file *m, void *v, loff_t *pos) |
273 | { | 272 | { |
274 | struct list_head *list = m->private; | 273 | struct ftrace_event_call *call = v; |
275 | struct ftrace_event_call *call; | ||
276 | 274 | ||
277 | (*pos)++; | 275 | (*pos)++; |
278 | 276 | ||
279 | for (;;) { | 277 | list_for_each_entry_continue(call, &ftrace_events, list) { |
280 | if (list == &ftrace_events) | ||
281 | return NULL; | ||
282 | |||
283 | call = list_entry(list, struct ftrace_event_call, list); | ||
284 | |||
285 | /* | 278 | /* |
286 | * The ftrace subsystem is for showing formats only. | 279 | * The ftrace subsystem is for showing formats only. |
287 | * They can not be enabled or disabled via the event files. | 280 | * They can not be enabled or disabled via the event files. |
288 | */ | 281 | */ |
289 | if (call->regfunc) | 282 | if (call->regfunc) |
290 | break; | 283 | return call; |
291 | |||
292 | list = list->next; | ||
293 | } | 284 | } |
294 | 285 | ||
295 | m->private = list->next; | 286 | return NULL; |
296 | |||
297 | return call; | ||
298 | } | 287 | } |
299 | 288 | ||
300 | static void *t_start(struct seq_file *m, loff_t *pos) | 289 | static void *t_start(struct seq_file *m, loff_t *pos) |
301 | { | 290 | { |
302 | struct ftrace_event_call *call = NULL; | 291 | struct ftrace_event_call *call; |
303 | loff_t l; | 292 | loff_t l; |
304 | 293 | ||
305 | mutex_lock(&event_mutex); | 294 | mutex_lock(&event_mutex); |
306 | 295 | ||
307 | m->private = ftrace_events.next; | 296 | call = list_entry(&ftrace_events, struct ftrace_event_call, list); |
308 | for (l = 0; l <= *pos; ) { | 297 | for (l = 0; l <= *pos; ) { |
309 | call = t_next(m, NULL, &l); | 298 | call = t_next(m, call, &l); |
310 | if (!call) | 299 | if (!call) |
311 | break; | 300 | break; |
312 | } | 301 | } |
@@ -316,37 +305,28 @@ static void *t_start(struct seq_file *m, loff_t *pos) | |||
316 | static void * | 305 | static void * |
317 | s_next(struct seq_file *m, void *v, loff_t *pos) | 306 | s_next(struct seq_file *m, void *v, loff_t *pos) |
318 | { | 307 | { |
319 | struct list_head *list = m->private; | 308 | struct ftrace_event_call *call = v; |
320 | struct ftrace_event_call *call; | ||
321 | 309 | ||
322 | (*pos)++; | 310 | (*pos)++; |
323 | 311 | ||
324 | retry: | 312 | list_for_each_entry_continue(call, &ftrace_events, list) { |
325 | if (list == &ftrace_events) | 313 | if (call->enabled) |
326 | return NULL; | 314 | return call; |
327 | |||
328 | call = list_entry(list, struct ftrace_event_call, list); | ||
329 | |||
330 | if (!call->enabled) { | ||
331 | list = list->next; | ||
332 | goto retry; | ||
333 | } | 315 | } |
334 | 316 | ||
335 | m->private = list->next; | 317 | return NULL; |
336 | |||
337 | return call; | ||
338 | } | 318 | } |
339 | 319 | ||
340 | static void *s_start(struct seq_file *m, loff_t *pos) | 320 | static void *s_start(struct seq_file *m, loff_t *pos) |
341 | { | 321 | { |
342 | struct ftrace_event_call *call = NULL; | 322 | struct ftrace_event_call *call; |
343 | loff_t l; | 323 | loff_t l; |
344 | 324 | ||
345 | mutex_lock(&event_mutex); | 325 | mutex_lock(&event_mutex); |
346 | 326 | ||
347 | m->private = ftrace_events.next; | 327 | call = list_entry(&ftrace_events, struct ftrace_event_call, list); |
348 | for (l = 0; l <= *pos; ) { | 328 | for (l = 0; l <= *pos; ) { |
349 | call = s_next(m, NULL, &l); | 329 | call = s_next(m, call, &l); |
350 | if (!call) | 330 | if (!call) |
351 | break; | 331 | break; |
352 | } | 332 | } |
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 23245785927f..98a6cc5c64ed 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c | |||
@@ -933,8 +933,9 @@ static void postfix_clear(struct filter_parse_state *ps) | |||
933 | 933 | ||
934 | while (!list_empty(&ps->postfix)) { | 934 | while (!list_empty(&ps->postfix)) { |
935 | elt = list_first_entry(&ps->postfix, struct postfix_elt, list); | 935 | elt = list_first_entry(&ps->postfix, struct postfix_elt, list); |
936 | kfree(elt->operand); | ||
937 | list_del(&elt->list); | 936 | list_del(&elt->list); |
937 | kfree(elt->operand); | ||
938 | kfree(elt); | ||
938 | } | 939 | } |
939 | } | 940 | } |
940 | 941 | ||
diff --git a/kernel/trace/trace_hw_branches.c b/kernel/trace/trace_hw_branches.c index ca7d7c4d0c2a..69543a905cd5 100644 --- a/kernel/trace/trace_hw_branches.c +++ b/kernel/trace/trace_hw_branches.c | |||
@@ -155,7 +155,7 @@ static enum print_line_t bts_trace_print_line(struct trace_iterator *iter) | |||
155 | seq_print_ip_sym(seq, it->from, symflags) && | 155 | seq_print_ip_sym(seq, it->from, symflags) && |
156 | trace_seq_printf(seq, "\n")) | 156 | trace_seq_printf(seq, "\n")) |
157 | return TRACE_TYPE_HANDLED; | 157 | return TRACE_TYPE_HANDLED; |
158 | return TRACE_TYPE_PARTIAL_LINE;; | 158 | return TRACE_TYPE_PARTIAL_LINE; |
159 | } | 159 | } |
160 | return TRACE_TYPE_UNHANDLED; | 160 | return TRACE_TYPE_UNHANDLED; |
161 | } | 161 | } |
@@ -165,6 +165,7 @@ void trace_hw_branch(u64 from, u64 to) | |||
165 | struct ftrace_event_call *call = &event_hw_branch; | 165 | struct ftrace_event_call *call = &event_hw_branch; |
166 | struct trace_array *tr = hw_branch_trace; | 166 | struct trace_array *tr = hw_branch_trace; |
167 | struct ring_buffer_event *event; | 167 | struct ring_buffer_event *event; |
168 | struct ring_buffer *buf; | ||
168 | struct hw_branch_entry *entry; | 169 | struct hw_branch_entry *entry; |
169 | unsigned long irq1; | 170 | unsigned long irq1; |
170 | int cpu; | 171 | int cpu; |
@@ -180,7 +181,8 @@ void trace_hw_branch(u64 from, u64 to) | |||
180 | if (atomic_inc_return(&tr->data[cpu]->disabled) != 1) | 181 | if (atomic_inc_return(&tr->data[cpu]->disabled) != 1) |
181 | goto out; | 182 | goto out; |
182 | 183 | ||
183 | event = trace_buffer_lock_reserve(tr, TRACE_HW_BRANCHES, | 184 | buf = tr->buffer; |
185 | event = trace_buffer_lock_reserve(buf, TRACE_HW_BRANCHES, | ||
184 | sizeof(*entry), 0, 0); | 186 | sizeof(*entry), 0, 0); |
185 | if (!event) | 187 | if (!event) |
186 | goto out; | 188 | goto out; |
@@ -189,8 +191,8 @@ void trace_hw_branch(u64 from, u64 to) | |||
189 | entry->ent.type = TRACE_HW_BRANCHES; | 191 | entry->ent.type = TRACE_HW_BRANCHES; |
190 | entry->from = from; | 192 | entry->from = from; |
191 | entry->to = to; | 193 | entry->to = to; |
192 | if (!filter_check_discard(call, entry, tr->buffer, event)) | 194 | if (!filter_check_discard(call, entry, buf, event)) |
193 | trace_buffer_unlock_commit(tr, event, 0, 0); | 195 | trace_buffer_unlock_commit(buf, event, 0, 0); |
194 | 196 | ||
195 | out: | 197 | out: |
196 | atomic_dec(&tr->data[cpu]->disabled); | 198 | atomic_dec(&tr->data[cpu]->disabled); |
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index f572f44c6e1e..b6c12c6a1bcd 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c | |||
@@ -69,6 +69,9 @@ enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter) | |||
69 | * @s: trace sequence descriptor | 69 | * @s: trace sequence descriptor |
70 | * @fmt: printf format string | 70 | * @fmt: printf format string |
71 | * | 71 | * |
72 | * It returns 0 if the trace oversizes the buffer's free | ||
73 | * space, 1 otherwise. | ||
74 | * | ||
72 | * The tracer may use either sequence operations or its own | 75 | * The tracer may use either sequence operations or its own |
73 | * copy to user routines. To simplify formating of a trace | 76 | * copy to user routines. To simplify formating of a trace |
74 | * trace_seq_printf is used to store strings into a special | 77 | * trace_seq_printf is used to store strings into a special |
@@ -95,7 +98,7 @@ trace_seq_printf(struct trace_seq *s, const char *fmt, ...) | |||
95 | 98 | ||
96 | s->len += ret; | 99 | s->len += ret; |
97 | 100 | ||
98 | return len; | 101 | return 1; |
99 | } | 102 | } |
100 | EXPORT_SYMBOL_GPL(trace_seq_printf); | 103 | EXPORT_SYMBOL_GPL(trace_seq_printf); |
101 | 104 | ||
@@ -486,16 +489,18 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry) | |||
486 | hardirq ? 'h' : softirq ? 's' : '.')) | 489 | hardirq ? 'h' : softirq ? 's' : '.')) |
487 | return 0; | 490 | return 0; |
488 | 491 | ||
489 | if (entry->lock_depth < 0) | 492 | if (entry->preempt_count) |
490 | ret = trace_seq_putc(s, '.'); | 493 | ret = trace_seq_printf(s, "%x", entry->preempt_count); |
491 | else | 494 | else |
492 | ret = trace_seq_printf(s, "%d", entry->lock_depth); | 495 | ret = trace_seq_putc(s, '.'); |
496 | |||
493 | if (!ret) | 497 | if (!ret) |
494 | return 0; | 498 | return 0; |
495 | 499 | ||
496 | if (entry->preempt_count) | 500 | if (entry->lock_depth < 0) |
497 | return trace_seq_printf(s, "%x", entry->preempt_count); | 501 | return trace_seq_putc(s, '.'); |
498 | return trace_seq_putc(s, '.'); | 502 | |
503 | return trace_seq_printf(s, "%d", entry->lock_depth); | ||
499 | } | 504 | } |
500 | 505 | ||
501 | static int | 506 | static int |
@@ -883,7 +888,7 @@ static int trace_ctxwake_raw(struct trace_iterator *iter, char S) | |||
883 | trace_assign_type(field, iter->ent); | 888 | trace_assign_type(field, iter->ent); |
884 | 889 | ||
885 | if (!S) | 890 | if (!S) |
886 | task_state_char(field->prev_state); | 891 | S = task_state_char(field->prev_state); |
887 | T = task_state_char(field->next_state); | 892 | T = task_state_char(field->next_state); |
888 | if (!trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n", | 893 | if (!trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n", |
889 | field->prev_pid, | 894 | field->prev_pid, |
@@ -918,7 +923,7 @@ static int trace_ctxwake_hex(struct trace_iterator *iter, char S) | |||
918 | trace_assign_type(field, iter->ent); | 923 | trace_assign_type(field, iter->ent); |
919 | 924 | ||
920 | if (!S) | 925 | if (!S) |
921 | task_state_char(field->prev_state); | 926 | S = task_state_char(field->prev_state); |
922 | T = task_state_char(field->next_state); | 927 | T = task_state_char(field->next_state); |
923 | 928 | ||
924 | SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid); | 929 | SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid); |
diff --git a/kernel/trace/trace_power.c b/kernel/trace/trace_power.c deleted file mode 100644 index fe1a00f1445a..000000000000 --- a/kernel/trace/trace_power.c +++ /dev/null | |||
@@ -1,218 +0,0 @@ | |||
1 | /* | ||
2 | * ring buffer based C-state tracer | ||
3 | * | ||
4 | * Arjan van de Ven <arjan@linux.intel.com> | ||
5 | * Copyright (C) 2008 Intel Corporation | ||
6 | * | ||
7 | * Much is borrowed from trace_boot.c which is | ||
8 | * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com> | ||
9 | * | ||
10 | */ | ||
11 | |||
12 | #include <linux/init.h> | ||
13 | #include <linux/debugfs.h> | ||
14 | #include <trace/power.h> | ||
15 | #include <linux/kallsyms.h> | ||
16 | #include <linux/module.h> | ||
17 | |||
18 | #include "trace.h" | ||
19 | #include "trace_output.h" | ||
20 | |||
21 | static struct trace_array *power_trace; | ||
22 | static int __read_mostly trace_power_enabled; | ||
23 | |||
24 | static void probe_power_start(struct power_trace *it, unsigned int type, | ||
25 | unsigned int level) | ||
26 | { | ||
27 | if (!trace_power_enabled) | ||
28 | return; | ||
29 | |||
30 | memset(it, 0, sizeof(struct power_trace)); | ||
31 | it->state = level; | ||
32 | it->type = type; | ||
33 | it->stamp = ktime_get(); | ||
34 | } | ||
35 | |||
36 | |||
37 | static void probe_power_end(struct power_trace *it) | ||
38 | { | ||
39 | struct ftrace_event_call *call = &event_power; | ||
40 | struct ring_buffer_event *event; | ||
41 | struct ring_buffer *buffer; | ||
42 | struct trace_power *entry; | ||
43 | struct trace_array_cpu *data; | ||
44 | struct trace_array *tr = power_trace; | ||
45 | |||
46 | if (!trace_power_enabled) | ||
47 | return; | ||
48 | |||
49 | buffer = tr->buffer; | ||
50 | |||
51 | preempt_disable(); | ||
52 | it->end = ktime_get(); | ||
53 | data = tr->data[smp_processor_id()]; | ||
54 | |||
55 | event = trace_buffer_lock_reserve(buffer, TRACE_POWER, | ||
56 | sizeof(*entry), 0, 0); | ||
57 | if (!event) | ||
58 | goto out; | ||
59 | entry = ring_buffer_event_data(event); | ||
60 | entry->state_data = *it; | ||
61 | if (!filter_check_discard(call, entry, buffer, event)) | ||
62 | trace_buffer_unlock_commit(buffer, event, 0, 0); | ||
63 | out: | ||
64 | preempt_enable(); | ||
65 | } | ||
66 | |||
67 | static void probe_power_mark(struct power_trace *it, unsigned int type, | ||
68 | unsigned int level) | ||
69 | { | ||
70 | struct ftrace_event_call *call = &event_power; | ||
71 | struct ring_buffer_event *event; | ||
72 | struct ring_buffer *buffer; | ||
73 | struct trace_power *entry; | ||
74 | struct trace_array_cpu *data; | ||
75 | struct trace_array *tr = power_trace; | ||
76 | |||
77 | if (!trace_power_enabled) | ||
78 | return; | ||
79 | |||
80 | buffer = tr->buffer; | ||
81 | |||
82 | memset(it, 0, sizeof(struct power_trace)); | ||
83 | it->state = level; | ||
84 | it->type = type; | ||
85 | it->stamp = ktime_get(); | ||
86 | preempt_disable(); | ||
87 | it->end = it->stamp; | ||
88 | data = tr->data[smp_processor_id()]; | ||
89 | |||
90 | event = trace_buffer_lock_reserve(buffer, TRACE_POWER, | ||
91 | sizeof(*entry), 0, 0); | ||
92 | if (!event) | ||
93 | goto out; | ||
94 | entry = ring_buffer_event_data(event); | ||
95 | entry->state_data = *it; | ||
96 | if (!filter_check_discard(call, entry, buffer, event)) | ||
97 | trace_buffer_unlock_commit(buffer, event, 0, 0); | ||
98 | out: | ||
99 | preempt_enable(); | ||
100 | } | ||
101 | |||
102 | static int tracing_power_register(void) | ||
103 | { | ||
104 | int ret; | ||
105 | |||
106 | ret = register_trace_power_start(probe_power_start); | ||
107 | if (ret) { | ||
108 | pr_info("power trace: Couldn't activate tracepoint" | ||
109 | " probe to trace_power_start\n"); | ||
110 | return ret; | ||
111 | } | ||
112 | ret = register_trace_power_end(probe_power_end); | ||
113 | if (ret) { | ||
114 | pr_info("power trace: Couldn't activate tracepoint" | ||
115 | " probe to trace_power_end\n"); | ||
116 | goto fail_start; | ||
117 | } | ||
118 | ret = register_trace_power_mark(probe_power_mark); | ||
119 | if (ret) { | ||
120 | pr_info("power trace: Couldn't activate tracepoint" | ||
121 | " probe to trace_power_mark\n"); | ||
122 | goto fail_end; | ||
123 | } | ||
124 | return ret; | ||
125 | fail_end: | ||
126 | unregister_trace_power_end(probe_power_end); | ||
127 | fail_start: | ||
128 | unregister_trace_power_start(probe_power_start); | ||
129 | return ret; | ||
130 | } | ||
131 | |||
132 | static void start_power_trace(struct trace_array *tr) | ||
133 | { | ||
134 | trace_power_enabled = 1; | ||
135 | } | ||
136 | |||
137 | static void stop_power_trace(struct trace_array *tr) | ||
138 | { | ||
139 | trace_power_enabled = 0; | ||
140 | } | ||
141 | |||
142 | static void power_trace_reset(struct trace_array *tr) | ||
143 | { | ||
144 | trace_power_enabled = 0; | ||
145 | unregister_trace_power_start(probe_power_start); | ||
146 | unregister_trace_power_end(probe_power_end); | ||
147 | unregister_trace_power_mark(probe_power_mark); | ||
148 | } | ||
149 | |||
150 | |||
151 | static int power_trace_init(struct trace_array *tr) | ||
152 | { | ||
153 | power_trace = tr; | ||
154 | |||
155 | trace_power_enabled = 1; | ||
156 | tracing_power_register(); | ||
157 | |||
158 | tracing_reset_online_cpus(tr); | ||
159 | return 0; | ||
160 | } | ||
161 | |||
162 | static enum print_line_t power_print_line(struct trace_iterator *iter) | ||
163 | { | ||
164 | int ret = 0; | ||
165 | struct trace_entry *entry = iter->ent; | ||
166 | struct trace_power *field ; | ||
167 | struct power_trace *it; | ||
168 | struct trace_seq *s = &iter->seq; | ||
169 | struct timespec stamp; | ||
170 | struct timespec duration; | ||
171 | |||
172 | trace_assign_type(field, entry); | ||
173 | it = &field->state_data; | ||
174 | stamp = ktime_to_timespec(it->stamp); | ||
175 | duration = ktime_to_timespec(ktime_sub(it->end, it->stamp)); | ||
176 | |||
177 | if (entry->type == TRACE_POWER) { | ||
178 | if (it->type == POWER_CSTATE) | ||
179 | ret = trace_seq_printf(s, "[%5ld.%09ld] CSTATE: Going to C%i on cpu %i for %ld.%09ld\n", | ||
180 | stamp.tv_sec, | ||
181 | stamp.tv_nsec, | ||
182 | it->state, iter->cpu, | ||
183 | duration.tv_sec, | ||
184 | duration.tv_nsec); | ||
185 | if (it->type == POWER_PSTATE) | ||
186 | ret = trace_seq_printf(s, "[%5ld.%09ld] PSTATE: Going to P%i on cpu %i\n", | ||
187 | stamp.tv_sec, | ||
188 | stamp.tv_nsec, | ||
189 | it->state, iter->cpu); | ||
190 | if (!ret) | ||
191 | return TRACE_TYPE_PARTIAL_LINE; | ||
192 | return TRACE_TYPE_HANDLED; | ||
193 | } | ||
194 | return TRACE_TYPE_UNHANDLED; | ||
195 | } | ||
196 | |||
197 | static void power_print_header(struct seq_file *s) | ||
198 | { | ||
199 | seq_puts(s, "# TIMESTAMP STATE EVENT\n"); | ||
200 | seq_puts(s, "# | | |\n"); | ||
201 | } | ||
202 | |||
203 | static struct tracer power_tracer __read_mostly = | ||
204 | { | ||
205 | .name = "power", | ||
206 | .init = power_trace_init, | ||
207 | .start = start_power_trace, | ||
208 | .stop = stop_power_trace, | ||
209 | .reset = power_trace_reset, | ||
210 | .print_line = power_print_line, | ||
211 | .print_header = power_print_header, | ||
212 | }; | ||
213 | |||
214 | static int init_power_trace(void) | ||
215 | { | ||
216 | return register_tracer(&power_tracer); | ||
217 | } | ||
218 | device_initcall(init_power_trace); | ||
diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c index 687699d365ae..2547d8813cf0 100644 --- a/kernel/trace/trace_printk.c +++ b/kernel/trace/trace_printk.c | |||
@@ -11,7 +11,6 @@ | |||
11 | #include <linux/ftrace.h> | 11 | #include <linux/ftrace.h> |
12 | #include <linux/string.h> | 12 | #include <linux/string.h> |
13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
14 | #include <linux/marker.h> | ||
15 | #include <linux/mutex.h> | 14 | #include <linux/mutex.h> |
16 | #include <linux/ctype.h> | 15 | #include <linux/ctype.h> |
17 | #include <linux/list.h> | 16 | #include <linux/list.h> |
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index 0f6facb050a1..8504ac71e4e8 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c | |||
@@ -296,14 +296,14 @@ static const struct file_operations stack_trace_fops = { | |||
296 | 296 | ||
297 | int | 297 | int |
298 | stack_trace_sysctl(struct ctl_table *table, int write, | 298 | stack_trace_sysctl(struct ctl_table *table, int write, |
299 | struct file *file, void __user *buffer, size_t *lenp, | 299 | void __user *buffer, size_t *lenp, |
300 | loff_t *ppos) | 300 | loff_t *ppos) |
301 | { | 301 | { |
302 | int ret; | 302 | int ret; |
303 | 303 | ||
304 | mutex_lock(&stack_sysctl_mutex); | 304 | mutex_lock(&stack_sysctl_mutex); |
305 | 305 | ||
306 | ret = proc_dointvec(table, write, file, buffer, lenp, ppos); | 306 | ret = proc_dointvec(table, write, buffer, lenp, ppos); |
307 | 307 | ||
308 | if (ret || !write || | 308 | if (ret || !write || |
309 | (last_stack_tracer_enabled == !!stack_tracer_enabled)) | 309 | (last_stack_tracer_enabled == !!stack_tracer_enabled)) |
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index 8712ce3c6a0e..527e17eae575 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c | |||
@@ -2,7 +2,7 @@ | |||
2 | #include <trace/events/syscalls.h> | 2 | #include <trace/events/syscalls.h> |
3 | #include <linux/kernel.h> | 3 | #include <linux/kernel.h> |
4 | #include <linux/ftrace.h> | 4 | #include <linux/ftrace.h> |
5 | #include <linux/perf_counter.h> | 5 | #include <linux/perf_event.h> |
6 | #include <asm/syscall.h> | 6 | #include <asm/syscall.h> |
7 | 7 | ||
8 | #include "trace_output.h" | 8 | #include "trace_output.h" |
@@ -166,7 +166,7 @@ int syscall_exit_format(struct ftrace_event_call *call, struct trace_seq *s) | |||
166 | "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n" | 166 | "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n" |
167 | "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n", | 167 | "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n", |
168 | SYSCALL_FIELD(int, nr), | 168 | SYSCALL_FIELD(int, nr), |
169 | SYSCALL_FIELD(unsigned long, ret)); | 169 | SYSCALL_FIELD(long, ret)); |
170 | if (!ret) | 170 | if (!ret) |
171 | return 0; | 171 | return 0; |
172 | 172 | ||
@@ -212,7 +212,7 @@ int syscall_exit_define_fields(struct ftrace_event_call *call) | |||
212 | if (ret) | 212 | if (ret) |
213 | return ret; | 213 | return ret; |
214 | 214 | ||
215 | ret = trace_define_field(call, SYSCALL_FIELD(unsigned long, ret), 0, | 215 | ret = trace_define_field(call, SYSCALL_FIELD(long, ret), 0, |
216 | FILTER_OTHER); | 216 | FILTER_OTHER); |
217 | 217 | ||
218 | return ret; | 218 | return ret; |
@@ -384,10 +384,13 @@ static int sys_prof_refcount_exit; | |||
384 | 384 | ||
385 | static void prof_syscall_enter(struct pt_regs *regs, long id) | 385 | static void prof_syscall_enter(struct pt_regs *regs, long id) |
386 | { | 386 | { |
387 | struct syscall_trace_enter *rec; | ||
388 | struct syscall_metadata *sys_data; | 387 | struct syscall_metadata *sys_data; |
388 | struct syscall_trace_enter *rec; | ||
389 | unsigned long flags; | ||
390 | char *raw_data; | ||
389 | int syscall_nr; | 391 | int syscall_nr; |
390 | int size; | 392 | int size; |
393 | int cpu; | ||
391 | 394 | ||
392 | syscall_nr = syscall_get_nr(current, regs); | 395 | syscall_nr = syscall_get_nr(current, regs); |
393 | if (!test_bit(syscall_nr, enabled_prof_enter_syscalls)) | 396 | if (!test_bit(syscall_nr, enabled_prof_enter_syscalls)) |
@@ -402,20 +405,38 @@ static void prof_syscall_enter(struct pt_regs *regs, long id) | |||
402 | size = ALIGN(size + sizeof(u32), sizeof(u64)); | 405 | size = ALIGN(size + sizeof(u32), sizeof(u64)); |
403 | size -= sizeof(u32); | 406 | size -= sizeof(u32); |
404 | 407 | ||
405 | do { | 408 | if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, |
406 | char raw_data[size]; | 409 | "profile buffer not large enough")) |
410 | return; | ||
411 | |||
412 | /* Protect the per cpu buffer, begin the rcu read side */ | ||
413 | local_irq_save(flags); | ||
407 | 414 | ||
408 | /* zero the dead bytes from align to not leak stack to user */ | 415 | cpu = smp_processor_id(); |
409 | *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; | 416 | |
417 | if (in_nmi()) | ||
418 | raw_data = rcu_dereference(trace_profile_buf_nmi); | ||
419 | else | ||
420 | raw_data = rcu_dereference(trace_profile_buf); | ||
421 | |||
422 | if (!raw_data) | ||
423 | goto end; | ||
410 | 424 | ||
411 | rec = (struct syscall_trace_enter *) raw_data; | 425 | raw_data = per_cpu_ptr(raw_data, cpu); |
412 | tracing_generic_entry_update(&rec->ent, 0, 0); | 426 | |
413 | rec->ent.type = sys_data->enter_id; | 427 | /* zero the dead bytes from align to not leak stack to user */ |
414 | rec->nr = syscall_nr; | 428 | *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; |
415 | syscall_get_arguments(current, regs, 0, sys_data->nb_args, | 429 | |
416 | (unsigned long *)&rec->args); | 430 | rec = (struct syscall_trace_enter *) raw_data; |
417 | perf_tpcounter_event(sys_data->enter_id, 0, 1, rec, size); | 431 | tracing_generic_entry_update(&rec->ent, 0, 0); |
418 | } while(0); | 432 | rec->ent.type = sys_data->enter_id; |
433 | rec->nr = syscall_nr; | ||
434 | syscall_get_arguments(current, regs, 0, sys_data->nb_args, | ||
435 | (unsigned long *)&rec->args); | ||
436 | perf_tp_event(sys_data->enter_id, 0, 1, rec, size); | ||
437 | |||
438 | end: | ||
439 | local_irq_restore(flags); | ||
419 | } | 440 | } |
420 | 441 | ||
421 | int reg_prof_syscall_enter(char *name) | 442 | int reg_prof_syscall_enter(char *name) |
@@ -460,8 +481,12 @@ void unreg_prof_syscall_enter(char *name) | |||
460 | static void prof_syscall_exit(struct pt_regs *regs, long ret) | 481 | static void prof_syscall_exit(struct pt_regs *regs, long ret) |
461 | { | 482 | { |
462 | struct syscall_metadata *sys_data; | 483 | struct syscall_metadata *sys_data; |
463 | struct syscall_trace_exit rec; | 484 | struct syscall_trace_exit *rec; |
485 | unsigned long flags; | ||
464 | int syscall_nr; | 486 | int syscall_nr; |
487 | char *raw_data; | ||
488 | int size; | ||
489 | int cpu; | ||
465 | 490 | ||
466 | syscall_nr = syscall_get_nr(current, regs); | 491 | syscall_nr = syscall_get_nr(current, regs); |
467 | if (!test_bit(syscall_nr, enabled_prof_exit_syscalls)) | 492 | if (!test_bit(syscall_nr, enabled_prof_exit_syscalls)) |
@@ -471,12 +496,46 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret) | |||
471 | if (!sys_data) | 496 | if (!sys_data) |
472 | return; | 497 | return; |
473 | 498 | ||
474 | tracing_generic_entry_update(&rec.ent, 0, 0); | 499 | /* We can probably do that at build time */ |
475 | rec.ent.type = sys_data->exit_id; | 500 | size = ALIGN(sizeof(*rec) + sizeof(u32), sizeof(u64)); |
476 | rec.nr = syscall_nr; | 501 | size -= sizeof(u32); |
477 | rec.ret = syscall_get_return_value(current, regs); | ||
478 | 502 | ||
479 | perf_tpcounter_event(sys_data->exit_id, 0, 1, &rec, sizeof(rec)); | 503 | /* |
504 | * Impossible, but be paranoid with the future | ||
505 | * How to put this check outside runtime? | ||
506 | */ | ||
507 | if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, | ||
508 | "exit event has grown above profile buffer size")) | ||
509 | return; | ||
510 | |||
511 | /* Protect the per cpu buffer, begin the rcu read side */ | ||
512 | local_irq_save(flags); | ||
513 | cpu = smp_processor_id(); | ||
514 | |||
515 | if (in_nmi()) | ||
516 | raw_data = rcu_dereference(trace_profile_buf_nmi); | ||
517 | else | ||
518 | raw_data = rcu_dereference(trace_profile_buf); | ||
519 | |||
520 | if (!raw_data) | ||
521 | goto end; | ||
522 | |||
523 | raw_data = per_cpu_ptr(raw_data, cpu); | ||
524 | |||
525 | /* zero the dead bytes from align to not leak stack to user */ | ||
526 | *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; | ||
527 | |||
528 | rec = (struct syscall_trace_exit *)raw_data; | ||
529 | |||
530 | tracing_generic_entry_update(&rec->ent, 0, 0); | ||
531 | rec->ent.type = sys_data->exit_id; | ||
532 | rec->nr = syscall_nr; | ||
533 | rec->ret = syscall_get_return_value(current, regs); | ||
534 | |||
535 | perf_tp_event(sys_data->exit_id, 0, 1, rec, size); | ||
536 | |||
537 | end: | ||
538 | local_irq_restore(flags); | ||
480 | } | 539 | } |
481 | 540 | ||
482 | int reg_prof_syscall_exit(char *name) | 541 | int reg_prof_syscall_exit(char *name) |
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c index 9489a0a9b1be..cc89be5bc0f8 100644 --- a/kernel/tracepoint.c +++ b/kernel/tracepoint.c | |||
@@ -48,7 +48,7 @@ static struct hlist_head tracepoint_table[TRACEPOINT_TABLE_SIZE]; | |||
48 | 48 | ||
49 | /* | 49 | /* |
50 | * Note about RCU : | 50 | * Note about RCU : |
51 | * It is used to to delay the free of multiple probes array until a quiescent | 51 | * It is used to delay the free of multiple probes array until a quiescent |
52 | * state is reached. | 52 | * state is reached. |
53 | * Tracepoint entries modifications are protected by the tracepoints_mutex. | 53 | * Tracepoint entries modifications are protected by the tracepoints_mutex. |
54 | */ | 54 | */ |
diff --git a/kernel/uid16.c b/kernel/uid16.c index 0314501688b9..419209893d87 100644 --- a/kernel/uid16.c +++ b/kernel/uid16.c | |||
@@ -4,7 +4,6 @@ | |||
4 | */ | 4 | */ |
5 | 5 | ||
6 | #include <linux/mm.h> | 6 | #include <linux/mm.h> |
7 | #include <linux/utsname.h> | ||
8 | #include <linux/mman.h> | 7 | #include <linux/mman.h> |
9 | #include <linux/notifier.h> | 8 | #include <linux/notifier.h> |
10 | #include <linux/reboot.h> | 9 | #include <linux/reboot.h> |
diff --git a/kernel/user.c b/kernel/user.c index 2c000e7132ac..46d0165ca70c 100644 --- a/kernel/user.c +++ b/kernel/user.c | |||
@@ -330,9 +330,9 @@ done: | |||
330 | */ | 330 | */ |
331 | static void free_user(struct user_struct *up, unsigned long flags) | 331 | static void free_user(struct user_struct *up, unsigned long flags) |
332 | { | 332 | { |
333 | spin_unlock_irqrestore(&uidhash_lock, flags); | ||
334 | INIT_DELAYED_WORK(&up->work, cleanup_user_struct); | 333 | INIT_DELAYED_WORK(&up->work, cleanup_user_struct); |
335 | schedule_delayed_work(&up->work, msecs_to_jiffies(1000)); | 334 | schedule_delayed_work(&up->work, msecs_to_jiffies(1000)); |
335 | spin_unlock_irqrestore(&uidhash_lock, flags); | ||
336 | } | 336 | } |
337 | 337 | ||
338 | #else /* CONFIG_USER_SCHED && CONFIG_SYSFS */ | 338 | #else /* CONFIG_USER_SCHED && CONFIG_SYSFS */ |
diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c index 92359cc747a7..69eae358a726 100644 --- a/kernel/utsname_sysctl.c +++ b/kernel/utsname_sysctl.c | |||
@@ -42,14 +42,14 @@ static void put_uts(ctl_table *table, int write, void *which) | |||
42 | * Special case of dostring for the UTS structure. This has locks | 42 | * Special case of dostring for the UTS structure. This has locks |
43 | * to observe. Should this be in kernel/sys.c ???? | 43 | * to observe. Should this be in kernel/sys.c ???? |
44 | */ | 44 | */ |
45 | static int proc_do_uts_string(ctl_table *table, int write, struct file *filp, | 45 | static int proc_do_uts_string(ctl_table *table, int write, |
46 | void __user *buffer, size_t *lenp, loff_t *ppos) | 46 | void __user *buffer, size_t *lenp, loff_t *ppos) |
47 | { | 47 | { |
48 | struct ctl_table uts_table; | 48 | struct ctl_table uts_table; |
49 | int r; | 49 | int r; |
50 | memcpy(&uts_table, table, sizeof(uts_table)); | 50 | memcpy(&uts_table, table, sizeof(uts_table)); |
51 | uts_table.data = get_uts(table, write); | 51 | uts_table.data = get_uts(table, write); |
52 | r = proc_dostring(&uts_table,write,filp,buffer,lenp, ppos); | 52 | r = proc_dostring(&uts_table,write,buffer,lenp, ppos); |
53 | put_uts(table, write, uts_table.data); | 53 | put_uts(table, write, uts_table.data); |
54 | return r; | 54 | return r; |
55 | } | 55 | } |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index addfe2df93b1..67e526b6ae81 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -640,6 +640,24 @@ int schedule_delayed_work(struct delayed_work *dwork, | |||
640 | EXPORT_SYMBOL(schedule_delayed_work); | 640 | EXPORT_SYMBOL(schedule_delayed_work); |
641 | 641 | ||
642 | /** | 642 | /** |
643 | * flush_delayed_work - block until a dwork_struct's callback has terminated | ||
644 | * @dwork: the delayed work which is to be flushed | ||
645 | * | ||
646 | * Any timeout is cancelled, and any pending work is run immediately. | ||
647 | */ | ||
648 | void flush_delayed_work(struct delayed_work *dwork) | ||
649 | { | ||
650 | if (del_timer_sync(&dwork->timer)) { | ||
651 | struct cpu_workqueue_struct *cwq; | ||
652 | cwq = wq_per_cpu(keventd_wq, get_cpu()); | ||
653 | __queue_work(cwq, &dwork->work); | ||
654 | put_cpu(); | ||
655 | } | ||
656 | flush_work(&dwork->work); | ||
657 | } | ||
658 | EXPORT_SYMBOL(flush_delayed_work); | ||
659 | |||
660 | /** | ||
643 | * schedule_delayed_work_on - queue work in global workqueue on CPU after delay | 661 | * schedule_delayed_work_on - queue work in global workqueue on CPU after delay |
644 | * @cpu: cpu to use | 662 | * @cpu: cpu to use |
645 | * @dwork: job to be done | 663 | * @dwork: job to be done |
@@ -667,6 +685,7 @@ EXPORT_SYMBOL(schedule_delayed_work_on); | |||
667 | int schedule_on_each_cpu(work_func_t func) | 685 | int schedule_on_each_cpu(work_func_t func) |
668 | { | 686 | { |
669 | int cpu; | 687 | int cpu; |
688 | int orig = -1; | ||
670 | struct work_struct *works; | 689 | struct work_struct *works; |
671 | 690 | ||
672 | works = alloc_percpu(struct work_struct); | 691 | works = alloc_percpu(struct work_struct); |
@@ -674,14 +693,28 @@ int schedule_on_each_cpu(work_func_t func) | |||
674 | return -ENOMEM; | 693 | return -ENOMEM; |
675 | 694 | ||
676 | get_online_cpus(); | 695 | get_online_cpus(); |
696 | |||
697 | /* | ||
698 | * When running in keventd don't schedule a work item on | ||
699 | * itself. Can just call directly because the work queue is | ||
700 | * already bound. This also is faster. | ||
701 | */ | ||
702 | if (current_is_keventd()) | ||
703 | orig = raw_smp_processor_id(); | ||
704 | |||
677 | for_each_online_cpu(cpu) { | 705 | for_each_online_cpu(cpu) { |
678 | struct work_struct *work = per_cpu_ptr(works, cpu); | 706 | struct work_struct *work = per_cpu_ptr(works, cpu); |
679 | 707 | ||
680 | INIT_WORK(work, func); | 708 | INIT_WORK(work, func); |
681 | schedule_work_on(cpu, work); | 709 | if (cpu != orig) |
710 | schedule_work_on(cpu, work); | ||
682 | } | 711 | } |
712 | if (orig >= 0) | ||
713 | func(per_cpu_ptr(works, orig)); | ||
714 | |||
683 | for_each_online_cpu(cpu) | 715 | for_each_online_cpu(cpu) |
684 | flush_work(per_cpu_ptr(works, cpu)); | 716 | flush_work(per_cpu_ptr(works, cpu)); |
717 | |||
685 | put_online_cpus(); | 718 | put_online_cpus(); |
686 | free_percpu(works); | 719 | free_percpu(works); |
687 | return 0; | 720 | return 0; |