diff options
Diffstat (limited to 'kernel')
91 files changed, 3458 insertions, 3479 deletions
diff --git a/kernel/capability.c b/kernel/capability.c index 34019c57888d..a8d63df0c322 100644 --- a/kernel/capability.c +++ b/kernel/capability.c | |||
| @@ -7,6 +7,8 @@ | |||
| 7 | * 30 May 2002: Cleanup, Robert M. Love <rml@tech9.net> | 7 | * 30 May 2002: Cleanup, Robert M. Love <rml@tech9.net> |
| 8 | */ | 8 | */ |
| 9 | 9 | ||
| 10 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
| 11 | |||
| 10 | #include <linux/audit.h> | 12 | #include <linux/audit.h> |
| 11 | #include <linux/capability.h> | 13 | #include <linux/capability.h> |
| 12 | #include <linux/mm.h> | 14 | #include <linux/mm.h> |
| @@ -42,15 +44,10 @@ __setup("no_file_caps", file_caps_disable); | |||
| 42 | 44 | ||
| 43 | static void warn_legacy_capability_use(void) | 45 | static void warn_legacy_capability_use(void) |
| 44 | { | 46 | { |
| 45 | static int warned; | 47 | char name[sizeof(current->comm)]; |
| 46 | if (!warned) { | 48 | |
| 47 | char name[sizeof(current->comm)]; | 49 | pr_info_once("warning: `%s' uses 32-bit capabilities (legacy support in use)\n", |
| 48 | 50 | get_task_comm(name, current)); | |
| 49 | printk(KERN_INFO "warning: `%s' uses 32-bit capabilities" | ||
| 50 | " (legacy support in use)\n", | ||
| 51 | get_task_comm(name, current)); | ||
| 52 | warned = 1; | ||
| 53 | } | ||
| 54 | } | 51 | } |
| 55 | 52 | ||
| 56 | /* | 53 | /* |
| @@ -71,16 +68,10 @@ static void warn_legacy_capability_use(void) | |||
| 71 | 68 | ||
| 72 | static void warn_deprecated_v2(void) | 69 | static void warn_deprecated_v2(void) |
| 73 | { | 70 | { |
| 74 | static int warned; | 71 | char name[sizeof(current->comm)]; |
| 75 | 72 | ||
| 76 | if (!warned) { | 73 | pr_info_once("warning: `%s' uses deprecated v2 capabilities in a way that may be insecure\n", |
| 77 | char name[sizeof(current->comm)]; | 74 | get_task_comm(name, current)); |
| 78 | |||
| 79 | printk(KERN_INFO "warning: `%s' uses deprecated v2" | ||
| 80 | " capabilities in a way that may be insecure.\n", | ||
| 81 | get_task_comm(name, current)); | ||
| 82 | warned = 1; | ||
| 83 | } | ||
| 84 | } | 75 | } |
| 85 | 76 | ||
| 86 | /* | 77 | /* |
| @@ -380,7 +371,7 @@ bool has_capability_noaudit(struct task_struct *t, int cap) | |||
| 380 | bool ns_capable(struct user_namespace *ns, int cap) | 371 | bool ns_capable(struct user_namespace *ns, int cap) |
| 381 | { | 372 | { |
| 382 | if (unlikely(!cap_valid(cap))) { | 373 | if (unlikely(!cap_valid(cap))) { |
| 383 | printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap); | 374 | pr_crit("capable() called with invalid cap=%u\n", cap); |
| 384 | BUG(); | 375 | BUG(); |
| 385 | } | 376 | } |
| 386 | 377 | ||
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 0c753ddd223b..9fcdaa705b6c 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
| @@ -40,23 +40,20 @@ | |||
| 40 | #include <linux/proc_fs.h> | 40 | #include <linux/proc_fs.h> |
| 41 | #include <linux/rcupdate.h> | 41 | #include <linux/rcupdate.h> |
| 42 | #include <linux/sched.h> | 42 | #include <linux/sched.h> |
| 43 | #include <linux/backing-dev.h> | ||
| 44 | #include <linux/slab.h> | 43 | #include <linux/slab.h> |
| 45 | #include <linux/magic.h> | ||
| 46 | #include <linux/spinlock.h> | 44 | #include <linux/spinlock.h> |
| 45 | #include <linux/rwsem.h> | ||
| 47 | #include <linux/string.h> | 46 | #include <linux/string.h> |
| 48 | #include <linux/sort.h> | 47 | #include <linux/sort.h> |
| 49 | #include <linux/kmod.h> | 48 | #include <linux/kmod.h> |
| 50 | #include <linux/module.h> | ||
| 51 | #include <linux/delayacct.h> | 49 | #include <linux/delayacct.h> |
| 52 | #include <linux/cgroupstats.h> | 50 | #include <linux/cgroupstats.h> |
| 53 | #include <linux/hashtable.h> | 51 | #include <linux/hashtable.h> |
| 54 | #include <linux/namei.h> | ||
| 55 | #include <linux/pid_namespace.h> | 52 | #include <linux/pid_namespace.h> |
| 56 | #include <linux/idr.h> | 53 | #include <linux/idr.h> |
| 57 | #include <linux/vmalloc.h> /* TODO: replace with more sophisticated array */ | 54 | #include <linux/vmalloc.h> /* TODO: replace with more sophisticated array */ |
| 58 | #include <linux/flex_array.h> /* used in cgroup_attach_task */ | ||
| 59 | #include <linux/kthread.h> | 55 | #include <linux/kthread.h> |
| 56 | #include <linux/delay.h> | ||
| 60 | 57 | ||
| 61 | #include <linux/atomic.h> | 58 | #include <linux/atomic.h> |
| 62 | 59 | ||
| @@ -68,43 +65,49 @@ | |||
| 68 | */ | 65 | */ |
| 69 | #define CGROUP_PIDLIST_DESTROY_DELAY HZ | 66 | #define CGROUP_PIDLIST_DESTROY_DELAY HZ |
| 70 | 67 | ||
| 68 | #define CGROUP_FILE_NAME_MAX (MAX_CGROUP_TYPE_NAMELEN + \ | ||
| 69 | MAX_CFTYPE_NAME + 2) | ||
| 70 | |||
| 71 | /* | ||
| 72 | * cgroup_tree_mutex nests above cgroup_mutex and protects cftypes, file | ||
| 73 | * creation/removal and hierarchy changing operations including cgroup | ||
| 74 | * creation, removal, css association and controller rebinding. This outer | ||
| 75 | * lock is needed mainly to resolve the circular dependency between kernfs | ||
| 76 | * active ref and cgroup_mutex. cgroup_tree_mutex nests above both. | ||
| 77 | */ | ||
| 78 | static DEFINE_MUTEX(cgroup_tree_mutex); | ||
| 79 | |||
| 71 | /* | 80 | /* |
| 72 | * cgroup_mutex is the master lock. Any modification to cgroup or its | 81 | * cgroup_mutex is the master lock. Any modification to cgroup or its |
| 73 | * hierarchy must be performed while holding it. | 82 | * hierarchy must be performed while holding it. |
| 74 | * | 83 | * |
| 75 | * cgroup_root_mutex nests inside cgroup_mutex and should be held to modify | 84 | * css_set_rwsem protects task->cgroups pointer, the list of css_set |
| 76 | * cgroupfs_root of any cgroup hierarchy - subsys list, flags, | 85 | * objects, and the chain of tasks off each css_set. |
| 77 | * release_agent_path and so on. Modifying requires both cgroup_mutex and | ||
| 78 | * cgroup_root_mutex. Readers can acquire either of the two. This is to | ||
| 79 | * break the following locking order cycle. | ||
| 80 | * | ||
| 81 | * A. cgroup_mutex -> cred_guard_mutex -> s_type->i_mutex_key -> namespace_sem | ||
| 82 | * B. namespace_sem -> cgroup_mutex | ||
| 83 | * | 86 | * |
| 84 | * B happens only through cgroup_show_options() and using cgroup_root_mutex | 87 | * These locks are exported if CONFIG_PROVE_RCU so that accessors in |
| 85 | * breaks it. | 88 | * cgroup.h can use them for lockdep annotations. |
| 86 | */ | 89 | */ |
| 87 | #ifdef CONFIG_PROVE_RCU | 90 | #ifdef CONFIG_PROVE_RCU |
| 88 | DEFINE_MUTEX(cgroup_mutex); | 91 | DEFINE_MUTEX(cgroup_mutex); |
| 89 | EXPORT_SYMBOL_GPL(cgroup_mutex); /* only for lockdep */ | 92 | DECLARE_RWSEM(css_set_rwsem); |
| 93 | EXPORT_SYMBOL_GPL(cgroup_mutex); | ||
| 94 | EXPORT_SYMBOL_GPL(css_set_rwsem); | ||
| 90 | #else | 95 | #else |
| 91 | static DEFINE_MUTEX(cgroup_mutex); | 96 | static DEFINE_MUTEX(cgroup_mutex); |
| 97 | static DECLARE_RWSEM(css_set_rwsem); | ||
| 92 | #endif | 98 | #endif |
| 93 | 99 | ||
| 94 | static DEFINE_MUTEX(cgroup_root_mutex); | 100 | /* |
| 101 | * Protects cgroup_subsys->release_agent_path. Modifying it also requires | ||
| 102 | * cgroup_mutex. Reading requires either cgroup_mutex or this spinlock. | ||
| 103 | */ | ||
| 104 | static DEFINE_SPINLOCK(release_agent_path_lock); | ||
| 95 | 105 | ||
| 96 | #define cgroup_assert_mutex_or_rcu_locked() \ | 106 | #define cgroup_assert_mutexes_or_rcu_locked() \ |
| 97 | rcu_lockdep_assert(rcu_read_lock_held() || \ | 107 | rcu_lockdep_assert(rcu_read_lock_held() || \ |
| 108 | lockdep_is_held(&cgroup_tree_mutex) || \ | ||
| 98 | lockdep_is_held(&cgroup_mutex), \ | 109 | lockdep_is_held(&cgroup_mutex), \ |
| 99 | "cgroup_mutex or RCU read lock required"); | 110 | "cgroup_[tree_]mutex or RCU read lock required"); |
| 100 | |||
| 101 | #ifdef CONFIG_LOCKDEP | ||
| 102 | #define cgroup_assert_mutex_or_root_locked() \ | ||
| 103 | WARN_ON_ONCE(debug_locks && (!lockdep_is_held(&cgroup_mutex) && \ | ||
| 104 | !lockdep_is_held(&cgroup_root_mutex))) | ||
| 105 | #else | ||
| 106 | #define cgroup_assert_mutex_or_root_locked() do { } while (0) | ||
| 107 | #endif | ||
| 108 | 111 | ||
| 109 | /* | 112 | /* |
| 110 | * cgroup destruction makes heavy use of work items and there can be a lot | 113 | * cgroup destruction makes heavy use of work items and there can be a lot |
| @@ -120,42 +123,41 @@ static struct workqueue_struct *cgroup_destroy_wq; | |||
| 120 | */ | 123 | */ |
| 121 | static struct workqueue_struct *cgroup_pidlist_destroy_wq; | 124 | static struct workqueue_struct *cgroup_pidlist_destroy_wq; |
| 122 | 125 | ||
| 123 | /* | 126 | /* generate an array of cgroup subsystem pointers */ |
| 124 | * Generate an array of cgroup subsystem pointers. At boot time, this is | 127 | #define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys, |
| 125 | * populated with the built in subsystems, and modular subsystems are | 128 | static struct cgroup_subsys *cgroup_subsys[] = { |
| 126 | * registered after that. The mutable section of this array is protected by | 129 | #include <linux/cgroup_subsys.h> |
| 127 | * cgroup_mutex. | 130 | }; |
| 128 | */ | 131 | #undef SUBSYS |
| 129 | #define SUBSYS(_x) [_x ## _subsys_id] = &_x ## _subsys, | 132 | |
| 130 | #define IS_SUBSYS_ENABLED(option) IS_BUILTIN(option) | 133 | /* array of cgroup subsystem names */ |
| 131 | static struct cgroup_subsys *cgroup_subsys[CGROUP_SUBSYS_COUNT] = { | 134 | #define SUBSYS(_x) [_x ## _cgrp_id] = #_x, |
| 135 | static const char *cgroup_subsys_name[] = { | ||
| 132 | #include <linux/cgroup_subsys.h> | 136 | #include <linux/cgroup_subsys.h> |
| 133 | }; | 137 | }; |
| 138 | #undef SUBSYS | ||
| 134 | 139 | ||
| 135 | /* | 140 | /* |
| 136 | * The dummy hierarchy, reserved for the subsystems that are otherwise | 141 | * The default hierarchy, reserved for the subsystems that are otherwise |
| 137 | * unattached - it never has more than a single cgroup, and all tasks are | 142 | * unattached - it never has more than a single cgroup, and all tasks are |
| 138 | * part of that cgroup. | 143 | * part of that cgroup. |
| 139 | */ | 144 | */ |
| 140 | static struct cgroupfs_root cgroup_dummy_root; | 145 | struct cgroup_root cgrp_dfl_root; |
| 141 | 146 | ||
| 142 | /* dummy_top is a shorthand for the dummy hierarchy's top cgroup */ | 147 | /* |
| 143 | static struct cgroup * const cgroup_dummy_top = &cgroup_dummy_root.top_cgroup; | 148 | * The default hierarchy always exists but is hidden until mounted for the |
| 149 | * first time. This is for backward compatibility. | ||
| 150 | */ | ||
| 151 | static bool cgrp_dfl_root_visible; | ||
| 144 | 152 | ||
| 145 | /* The list of hierarchy roots */ | 153 | /* The list of hierarchy roots */ |
| 146 | 154 | ||
| 147 | static LIST_HEAD(cgroup_roots); | 155 | static LIST_HEAD(cgroup_roots); |
| 148 | static int cgroup_root_count; | 156 | static int cgroup_root_count; |
| 149 | 157 | ||
| 150 | /* | 158 | /* hierarchy ID allocation and mapping, protected by cgroup_mutex */ |
| 151 | * Hierarchy ID allocation and mapping. It follows the same exclusion | ||
| 152 | * rules as other root ops - both cgroup_mutex and cgroup_root_mutex for | ||
| 153 | * writes, either for reads. | ||
| 154 | */ | ||
| 155 | static DEFINE_IDR(cgroup_hierarchy_idr); | 159 | static DEFINE_IDR(cgroup_hierarchy_idr); |
| 156 | 160 | ||
| 157 | static struct cgroup_name root_cgroup_name = { .name = "/" }; | ||
| 158 | |||
| 159 | /* | 161 | /* |
| 160 | * Assign a monotonically increasing serial number to cgroups. It | 162 | * Assign a monotonically increasing serial number to cgroups. It |
| 161 | * guarantees cgroups with bigger numbers are newer than those with smaller | 163 | * guarantees cgroups with bigger numbers are newer than those with smaller |
| @@ -175,11 +177,13 @@ static int need_forkexit_callback __read_mostly; | |||
| 175 | 177 | ||
| 176 | static struct cftype cgroup_base_files[]; | 178 | static struct cftype cgroup_base_files[]; |
| 177 | 179 | ||
| 180 | static void cgroup_put(struct cgroup *cgrp); | ||
| 181 | static int rebind_subsystems(struct cgroup_root *dst_root, | ||
| 182 | unsigned long ss_mask); | ||
| 178 | static void cgroup_destroy_css_killed(struct cgroup *cgrp); | 183 | static void cgroup_destroy_css_killed(struct cgroup *cgrp); |
| 179 | static int cgroup_destroy_locked(struct cgroup *cgrp); | 184 | static int cgroup_destroy_locked(struct cgroup *cgrp); |
| 180 | static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[], | 185 | static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[], |
| 181 | bool is_add); | 186 | bool is_add); |
| 182 | static int cgroup_file_release(struct inode *inode, struct file *file); | ||
| 183 | static void cgroup_pidlist_destroy_all(struct cgroup *cgrp); | 187 | static void cgroup_pidlist_destroy_all(struct cgroup *cgrp); |
| 184 | 188 | ||
| 185 | /** | 189 | /** |
| @@ -197,8 +201,9 @@ static struct cgroup_subsys_state *cgroup_css(struct cgroup *cgrp, | |||
| 197 | struct cgroup_subsys *ss) | 201 | struct cgroup_subsys *ss) |
| 198 | { | 202 | { |
| 199 | if (ss) | 203 | if (ss) |
| 200 | return rcu_dereference_check(cgrp->subsys[ss->subsys_id], | 204 | return rcu_dereference_check(cgrp->subsys[ss->id], |
| 201 | lockdep_is_held(&cgroup_mutex)); | 205 | lockdep_is_held(&cgroup_tree_mutex) || |
| 206 | lockdep_is_held(&cgroup_mutex)); | ||
| 202 | else | 207 | else |
| 203 | return &cgrp->dummy_css; | 208 | return &cgrp->dummy_css; |
| 204 | } | 209 | } |
| @@ -209,6 +214,27 @@ static inline bool cgroup_is_dead(const struct cgroup *cgrp) | |||
| 209 | return test_bit(CGRP_DEAD, &cgrp->flags); | 214 | return test_bit(CGRP_DEAD, &cgrp->flags); |
| 210 | } | 215 | } |
| 211 | 216 | ||
| 217 | struct cgroup_subsys_state *seq_css(struct seq_file *seq) | ||
| 218 | { | ||
| 219 | struct kernfs_open_file *of = seq->private; | ||
| 220 | struct cgroup *cgrp = of->kn->parent->priv; | ||
| 221 | struct cftype *cft = seq_cft(seq); | ||
| 222 | |||
| 223 | /* | ||
| 224 | * This is open and unprotected implementation of cgroup_css(). | ||
| 225 | * seq_css() is only called from a kernfs file operation which has | ||
| 226 | * an active reference on the file. Because all the subsystem | ||
| 227 | * files are drained before a css is disassociated with a cgroup, | ||
| 228 | * the matching css from the cgroup's subsys table is guaranteed to | ||
| 229 | * be and stay valid until the enclosing operation is complete. | ||
| 230 | */ | ||
| 231 | if (cft->ss) | ||
| 232 | return rcu_dereference_raw(cgrp->subsys[cft->ss->id]); | ||
| 233 | else | ||
| 234 | return &cgrp->dummy_css; | ||
| 235 | } | ||
| 236 | EXPORT_SYMBOL_GPL(seq_css); | ||
| 237 | |||
| 212 | /** | 238 | /** |
| 213 | * cgroup_is_descendant - test ancestry | 239 | * cgroup_is_descendant - test ancestry |
| 214 | * @cgrp: the cgroup to be tested | 240 | * @cgrp: the cgroup to be tested |
| @@ -227,7 +253,6 @@ bool cgroup_is_descendant(struct cgroup *cgrp, struct cgroup *ancestor) | |||
| 227 | } | 253 | } |
| 228 | return false; | 254 | return false; |
| 229 | } | 255 | } |
| 230 | EXPORT_SYMBOL_GPL(cgroup_is_descendant); | ||
| 231 | 256 | ||
| 232 | static int cgroup_is_releasable(const struct cgroup *cgrp) | 257 | static int cgroup_is_releasable(const struct cgroup *cgrp) |
| 233 | { | 258 | { |
| @@ -254,54 +279,23 @@ static int notify_on_release(const struct cgroup *cgrp) | |||
| 254 | for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++) \ | 279 | for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++) \ |
| 255 | if (!((css) = rcu_dereference_check( \ | 280 | if (!((css) = rcu_dereference_check( \ |
| 256 | (cgrp)->subsys[(ssid)], \ | 281 | (cgrp)->subsys[(ssid)], \ |
| 282 | lockdep_is_held(&cgroup_tree_mutex) || \ | ||
| 257 | lockdep_is_held(&cgroup_mutex)))) { } \ | 283 | lockdep_is_held(&cgroup_mutex)))) { } \ |
| 258 | else | 284 | else |
| 259 | 285 | ||
| 260 | /** | 286 | /** |
| 261 | * for_each_subsys - iterate all loaded cgroup subsystems | 287 | * for_each_subsys - iterate all enabled cgroup subsystems |
| 262 | * @ss: the iteration cursor | 288 | * @ss: the iteration cursor |
| 263 | * @ssid: the index of @ss, CGROUP_SUBSYS_COUNT after reaching the end | 289 | * @ssid: the index of @ss, CGROUP_SUBSYS_COUNT after reaching the end |
| 264 | * | ||
| 265 | * Iterates through all loaded subsystems. Should be called under | ||
| 266 | * cgroup_mutex or cgroup_root_mutex. | ||
| 267 | */ | 290 | */ |
| 268 | #define for_each_subsys(ss, ssid) \ | 291 | #define for_each_subsys(ss, ssid) \ |
| 269 | for (({ cgroup_assert_mutex_or_root_locked(); (ssid) = 0; }); \ | 292 | for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT && \ |
| 270 | (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++) \ | 293 | (((ss) = cgroup_subsys[ssid]) || true); (ssid)++) |
| 271 | if (!((ss) = cgroup_subsys[(ssid)])) { } \ | ||
| 272 | else | ||
| 273 | 294 | ||
| 274 | /** | 295 | /* iterate across the hierarchies */ |
| 275 | * for_each_builtin_subsys - iterate all built-in cgroup subsystems | 296 | #define for_each_root(root) \ |
| 276 | * @ss: the iteration cursor | ||
| 277 | * @i: the index of @ss, CGROUP_BUILTIN_SUBSYS_COUNT after reaching the end | ||
| 278 | * | ||
| 279 | * Bulit-in subsystems are always present and iteration itself doesn't | ||
| 280 | * require any synchronization. | ||
| 281 | */ | ||
| 282 | #define for_each_builtin_subsys(ss, i) \ | ||
| 283 | for ((i) = 0; (i) < CGROUP_BUILTIN_SUBSYS_COUNT && \ | ||
| 284 | (((ss) = cgroup_subsys[i]) || true); (i)++) | ||
| 285 | |||
| 286 | /* iterate across the active hierarchies */ | ||
| 287 | #define for_each_active_root(root) \ | ||
| 288 | list_for_each_entry((root), &cgroup_roots, root_list) | 297 | list_for_each_entry((root), &cgroup_roots, root_list) |
| 289 | 298 | ||
| 290 | static inline struct cgroup *__d_cgrp(struct dentry *dentry) | ||
| 291 | { | ||
| 292 | return dentry->d_fsdata; | ||
| 293 | } | ||
| 294 | |||
| 295 | static inline struct cfent *__d_cfe(struct dentry *dentry) | ||
| 296 | { | ||
| 297 | return dentry->d_fsdata; | ||
| 298 | } | ||
| 299 | |||
| 300 | static inline struct cftype *__d_cft(struct dentry *dentry) | ||
| 301 | { | ||
| 302 | return __d_cfe(dentry)->type; | ||
| 303 | } | ||
| 304 | |||
| 305 | /** | 299 | /** |
| 306 | * cgroup_lock_live_group - take cgroup_mutex and check that cgrp is alive. | 300 | * cgroup_lock_live_group - take cgroup_mutex and check that cgrp is alive. |
| 307 | * @cgrp: the cgroup to be checked for liveness | 301 | * @cgrp: the cgroup to be checked for liveness |
| @@ -347,23 +341,23 @@ struct cgrp_cset_link { | |||
| 347 | struct list_head cgrp_link; | 341 | struct list_head cgrp_link; |
| 348 | }; | 342 | }; |
| 349 | 343 | ||
| 350 | /* The default css_set - used by init and its children prior to any | 344 | /* |
| 345 | * The default css_set - used by init and its children prior to any | ||
| 351 | * hierarchies being mounted. It contains a pointer to the root state | 346 | * hierarchies being mounted. It contains a pointer to the root state |
| 352 | * for each subsystem. Also used to anchor the list of css_sets. Not | 347 | * for each subsystem. Also used to anchor the list of css_sets. Not |
| 353 | * reference-counted, to improve performance when child cgroups | 348 | * reference-counted, to improve performance when child cgroups |
| 354 | * haven't been created. | 349 | * haven't been created. |
| 355 | */ | 350 | */ |
| 351 | static struct css_set init_css_set = { | ||
| 352 | .refcount = ATOMIC_INIT(1), | ||
| 353 | .cgrp_links = LIST_HEAD_INIT(init_css_set.cgrp_links), | ||
| 354 | .tasks = LIST_HEAD_INIT(init_css_set.tasks), | ||
| 355 | .mg_tasks = LIST_HEAD_INIT(init_css_set.mg_tasks), | ||
| 356 | .mg_preload_node = LIST_HEAD_INIT(init_css_set.mg_preload_node), | ||
| 357 | .mg_node = LIST_HEAD_INIT(init_css_set.mg_node), | ||
| 358 | }; | ||
| 356 | 359 | ||
| 357 | static struct css_set init_css_set; | 360 | static int css_set_count = 1; /* 1 for init_css_set */ |
| 358 | static struct cgrp_cset_link init_cgrp_cset_link; | ||
| 359 | |||
| 360 | /* | ||
| 361 | * css_set_lock protects the list of css_set objects, and the chain of | ||
| 362 | * tasks off each css_set. Nests outside task->alloc_lock due to | ||
| 363 | * css_task_iter_start(). | ||
| 364 | */ | ||
| 365 | static DEFINE_RWLOCK(css_set_lock); | ||
| 366 | static int css_set_count; | ||
| 367 | 361 | ||
| 368 | /* | 362 | /* |
| 369 | * hash table for cgroup groups. This improves the performance to find | 363 | * hash table for cgroup groups. This improves the performance to find |
| @@ -386,30 +380,14 @@ static unsigned long css_set_hash(struct cgroup_subsys_state *css[]) | |||
| 386 | return key; | 380 | return key; |
| 387 | } | 381 | } |
| 388 | 382 | ||
| 389 | /* | 383 | static void put_css_set_locked(struct css_set *cset, bool taskexit) |
| 390 | * We don't maintain the lists running through each css_set to its task | ||
| 391 | * until after the first call to css_task_iter_start(). This reduces the | ||
| 392 | * fork()/exit() overhead for people who have cgroups compiled into their | ||
| 393 | * kernel but not actually in use. | ||
| 394 | */ | ||
| 395 | static int use_task_css_set_links __read_mostly; | ||
| 396 | |||
| 397 | static void __put_css_set(struct css_set *cset, int taskexit) | ||
| 398 | { | 384 | { |
| 399 | struct cgrp_cset_link *link, *tmp_link; | 385 | struct cgrp_cset_link *link, *tmp_link; |
| 400 | 386 | ||
| 401 | /* | 387 | lockdep_assert_held(&css_set_rwsem); |
| 402 | * Ensure that the refcount doesn't hit zero while any readers | 388 | |
| 403 | * can see it. Similar to atomic_dec_and_lock(), but for an | 389 | if (!atomic_dec_and_test(&cset->refcount)) |
| 404 | * rwlock | ||
| 405 | */ | ||
| 406 | if (atomic_add_unless(&cset->refcount, -1, 1)) | ||
| 407 | return; | ||
| 408 | write_lock(&css_set_lock); | ||
| 409 | if (!atomic_dec_and_test(&cset->refcount)) { | ||
| 410 | write_unlock(&css_set_lock); | ||
| 411 | return; | 390 | return; |
| 412 | } | ||
| 413 | 391 | ||
| 414 | /* This css_set is dead. unlink it and release cgroup refcounts */ | 392 | /* This css_set is dead. unlink it and release cgroup refcounts */ |
| 415 | hash_del(&cset->hlist); | 393 | hash_del(&cset->hlist); |
| @@ -421,7 +399,7 @@ static void __put_css_set(struct css_set *cset, int taskexit) | |||
| 421 | list_del(&link->cset_link); | 399 | list_del(&link->cset_link); |
| 422 | list_del(&link->cgrp_link); | 400 | list_del(&link->cgrp_link); |
| 423 | 401 | ||
| 424 | /* @cgrp can't go away while we're holding css_set_lock */ | 402 | /* @cgrp can't go away while we're holding css_set_rwsem */ |
| 425 | if (list_empty(&cgrp->cset_links) && notify_on_release(cgrp)) { | 403 | if (list_empty(&cgrp->cset_links) && notify_on_release(cgrp)) { |
| 426 | if (taskexit) | 404 | if (taskexit) |
| 427 | set_bit(CGRP_RELEASABLE, &cgrp->flags); | 405 | set_bit(CGRP_RELEASABLE, &cgrp->flags); |
| @@ -431,10 +409,24 @@ static void __put_css_set(struct css_set *cset, int taskexit) | |||
| 431 | kfree(link); | 409 | kfree(link); |
| 432 | } | 410 | } |
| 433 | 411 | ||
| 434 | write_unlock(&css_set_lock); | ||
| 435 | kfree_rcu(cset, rcu_head); | 412 | kfree_rcu(cset, rcu_head); |
| 436 | } | 413 | } |
| 437 | 414 | ||
| 415 | static void put_css_set(struct css_set *cset, bool taskexit) | ||
| 416 | { | ||
| 417 | /* | ||
| 418 | * Ensure that the refcount doesn't hit zero while any readers | ||
| 419 | * can see it. Similar to atomic_dec_and_lock(), but for an | ||
| 420 | * rwlock | ||
| 421 | */ | ||
| 422 | if (atomic_add_unless(&cset->refcount, -1, 1)) | ||
| 423 | return; | ||
| 424 | |||
| 425 | down_write(&css_set_rwsem); | ||
| 426 | put_css_set_locked(cset, taskexit); | ||
| 427 | up_write(&css_set_rwsem); | ||
| 428 | } | ||
| 429 | |||
| 438 | /* | 430 | /* |
| 439 | * refcounted get/put for css_set objects | 431 | * refcounted get/put for css_set objects |
| 440 | */ | 432 | */ |
| @@ -443,16 +435,6 @@ static inline void get_css_set(struct css_set *cset) | |||
| 443 | atomic_inc(&cset->refcount); | 435 | atomic_inc(&cset->refcount); |
| 444 | } | 436 | } |
| 445 | 437 | ||
| 446 | static inline void put_css_set(struct css_set *cset) | ||
| 447 | { | ||
| 448 | __put_css_set(cset, 0); | ||
| 449 | } | ||
| 450 | |||
| 451 | static inline void put_css_set_taskexit(struct css_set *cset) | ||
| 452 | { | ||
| 453 | __put_css_set(cset, 1); | ||
| 454 | } | ||
| 455 | |||
| 456 | /** | 438 | /** |
| 457 | * compare_css_sets - helper function for find_existing_css_set(). | 439 | * compare_css_sets - helper function for find_existing_css_set(). |
| 458 | * @cset: candidate css_set being tested | 440 | * @cset: candidate css_set being tested |
| @@ -535,7 +517,7 @@ static struct css_set *find_existing_css_set(struct css_set *old_cset, | |||
| 535 | struct cgroup *cgrp, | 517 | struct cgroup *cgrp, |
| 536 | struct cgroup_subsys_state *template[]) | 518 | struct cgroup_subsys_state *template[]) |
| 537 | { | 519 | { |
| 538 | struct cgroupfs_root *root = cgrp->root; | 520 | struct cgroup_root *root = cgrp->root; |
| 539 | struct cgroup_subsys *ss; | 521 | struct cgroup_subsys *ss; |
| 540 | struct css_set *cset; | 522 | struct css_set *cset; |
| 541 | unsigned long key; | 523 | unsigned long key; |
| @@ -547,7 +529,7 @@ static struct css_set *find_existing_css_set(struct css_set *old_cset, | |||
| 547 | * won't change, so no need for locking. | 529 | * won't change, so no need for locking. |
| 548 | */ | 530 | */ |
| 549 | for_each_subsys(ss, i) { | 531 | for_each_subsys(ss, i) { |
| 550 | if (root->subsys_mask & (1UL << i)) { | 532 | if (root->cgrp.subsys_mask & (1UL << i)) { |
| 551 | /* Subsystem is in this hierarchy. So we want | 533 | /* Subsystem is in this hierarchy. So we want |
| 552 | * the subsystem state from the new | 534 | * the subsystem state from the new |
| 553 | * cgroup */ | 535 | * cgroup */ |
| @@ -652,11 +634,11 @@ static struct css_set *find_css_set(struct css_set *old_cset, | |||
| 652 | 634 | ||
| 653 | /* First see if we already have a cgroup group that matches | 635 | /* First see if we already have a cgroup group that matches |
| 654 | * the desired set */ | 636 | * the desired set */ |
| 655 | read_lock(&css_set_lock); | 637 | down_read(&css_set_rwsem); |
| 656 | cset = find_existing_css_set(old_cset, cgrp, template); | 638 | cset = find_existing_css_set(old_cset, cgrp, template); |
| 657 | if (cset) | 639 | if (cset) |
| 658 | get_css_set(cset); | 640 | get_css_set(cset); |
| 659 | read_unlock(&css_set_lock); | 641 | up_read(&css_set_rwsem); |
| 660 | 642 | ||
| 661 | if (cset) | 643 | if (cset) |
| 662 | return cset; | 644 | return cset; |
| @@ -674,13 +656,16 @@ static struct css_set *find_css_set(struct css_set *old_cset, | |||
| 674 | atomic_set(&cset->refcount, 1); | 656 | atomic_set(&cset->refcount, 1); |
| 675 | INIT_LIST_HEAD(&cset->cgrp_links); | 657 | INIT_LIST_HEAD(&cset->cgrp_links); |
| 676 | INIT_LIST_HEAD(&cset->tasks); | 658 | INIT_LIST_HEAD(&cset->tasks); |
| 659 | INIT_LIST_HEAD(&cset->mg_tasks); | ||
| 660 | INIT_LIST_HEAD(&cset->mg_preload_node); | ||
| 661 | INIT_LIST_HEAD(&cset->mg_node); | ||
| 677 | INIT_HLIST_NODE(&cset->hlist); | 662 | INIT_HLIST_NODE(&cset->hlist); |
| 678 | 663 | ||
| 679 | /* Copy the set of subsystem state objects generated in | 664 | /* Copy the set of subsystem state objects generated in |
| 680 | * find_existing_css_set() */ | 665 | * find_existing_css_set() */ |
| 681 | memcpy(cset->subsys, template, sizeof(cset->subsys)); | 666 | memcpy(cset->subsys, template, sizeof(cset->subsys)); |
| 682 | 667 | ||
| 683 | write_lock(&css_set_lock); | 668 | down_write(&css_set_rwsem); |
| 684 | /* Add reference counts and links from the new css_set. */ | 669 | /* Add reference counts and links from the new css_set. */ |
| 685 | list_for_each_entry(link, &old_cset->cgrp_links, cgrp_link) { | 670 | list_for_each_entry(link, &old_cset->cgrp_links, cgrp_link) { |
| 686 | struct cgroup *c = link->cgrp; | 671 | struct cgroup *c = link->cgrp; |
| @@ -698,31 +683,105 @@ static struct css_set *find_css_set(struct css_set *old_cset, | |||
| 698 | key = css_set_hash(cset->subsys); | 683 | key = css_set_hash(cset->subsys); |
| 699 | hash_add(css_set_table, &cset->hlist, key); | 684 | hash_add(css_set_table, &cset->hlist, key); |
| 700 | 685 | ||
| 701 | write_unlock(&css_set_lock); | 686 | up_write(&css_set_rwsem); |
| 702 | 687 | ||
| 703 | return cset; | 688 | return cset; |
| 704 | } | 689 | } |
| 705 | 690 | ||
| 706 | /* | 691 | static struct cgroup_root *cgroup_root_from_kf(struct kernfs_root *kf_root) |
| 707 | * Return the cgroup for "task" from the given hierarchy. Must be | ||
| 708 | * called with cgroup_mutex held. | ||
| 709 | */ | ||
| 710 | static struct cgroup *task_cgroup_from_root(struct task_struct *task, | ||
| 711 | struct cgroupfs_root *root) | ||
| 712 | { | 692 | { |
| 713 | struct css_set *cset; | 693 | struct cgroup *root_cgrp = kf_root->kn->priv; |
| 714 | struct cgroup *res = NULL; | 694 | |
| 695 | return root_cgrp->root; | ||
| 696 | } | ||
| 697 | |||
| 698 | static int cgroup_init_root_id(struct cgroup_root *root) | ||
| 699 | { | ||
| 700 | int id; | ||
| 701 | |||
| 702 | lockdep_assert_held(&cgroup_mutex); | ||
| 703 | |||
| 704 | id = idr_alloc_cyclic(&cgroup_hierarchy_idr, root, 0, 0, GFP_KERNEL); | ||
| 705 | if (id < 0) | ||
| 706 | return id; | ||
| 707 | |||
| 708 | root->hierarchy_id = id; | ||
| 709 | return 0; | ||
| 710 | } | ||
| 711 | |||
| 712 | static void cgroup_exit_root_id(struct cgroup_root *root) | ||
| 713 | { | ||
| 714 | lockdep_assert_held(&cgroup_mutex); | ||
| 715 | |||
| 716 | if (root->hierarchy_id) { | ||
| 717 | idr_remove(&cgroup_hierarchy_idr, root->hierarchy_id); | ||
| 718 | root->hierarchy_id = 0; | ||
| 719 | } | ||
| 720 | } | ||
| 721 | |||
| 722 | static void cgroup_free_root(struct cgroup_root *root) | ||
| 723 | { | ||
| 724 | if (root) { | ||
| 725 | /* hierarhcy ID shoulid already have been released */ | ||
| 726 | WARN_ON_ONCE(root->hierarchy_id); | ||
| 727 | |||
| 728 | idr_destroy(&root->cgroup_idr); | ||
| 729 | kfree(root); | ||
| 730 | } | ||
| 731 | } | ||
| 732 | |||
| 733 | static void cgroup_destroy_root(struct cgroup_root *root) | ||
| 734 | { | ||
| 735 | struct cgroup *cgrp = &root->cgrp; | ||
| 736 | struct cgrp_cset_link *link, *tmp_link; | ||
| 737 | |||
| 738 | mutex_lock(&cgroup_tree_mutex); | ||
| 739 | mutex_lock(&cgroup_mutex); | ||
| 740 | |||
| 741 | BUG_ON(atomic_read(&root->nr_cgrps)); | ||
| 742 | BUG_ON(!list_empty(&cgrp->children)); | ||
| 743 | |||
| 744 | /* Rebind all subsystems back to the default hierarchy */ | ||
| 745 | rebind_subsystems(&cgrp_dfl_root, cgrp->subsys_mask); | ||
| 715 | 746 | ||
| 716 | BUG_ON(!mutex_is_locked(&cgroup_mutex)); | ||
| 717 | read_lock(&css_set_lock); | ||
| 718 | /* | 747 | /* |
| 719 | * No need to lock the task - since we hold cgroup_mutex the | 748 | * Release all the links from cset_links to this hierarchy's |
| 720 | * task can't change groups, so the only thing that can happen | 749 | * root cgroup |
| 721 | * is that it exits and its css is set back to init_css_set. | ||
| 722 | */ | 750 | */ |
| 723 | cset = task_css_set(task); | 751 | down_write(&css_set_rwsem); |
| 752 | |||
| 753 | list_for_each_entry_safe(link, tmp_link, &cgrp->cset_links, cset_link) { | ||
| 754 | list_del(&link->cset_link); | ||
| 755 | list_del(&link->cgrp_link); | ||
| 756 | kfree(link); | ||
| 757 | } | ||
| 758 | up_write(&css_set_rwsem); | ||
| 759 | |||
| 760 | if (!list_empty(&root->root_list)) { | ||
| 761 | list_del(&root->root_list); | ||
| 762 | cgroup_root_count--; | ||
| 763 | } | ||
| 764 | |||
| 765 | cgroup_exit_root_id(root); | ||
| 766 | |||
| 767 | mutex_unlock(&cgroup_mutex); | ||
| 768 | mutex_unlock(&cgroup_tree_mutex); | ||
| 769 | |||
| 770 | kernfs_destroy_root(root->kf_root); | ||
| 771 | cgroup_free_root(root); | ||
| 772 | } | ||
| 773 | |||
| 774 | /* look up cgroup associated with given css_set on the specified hierarchy */ | ||
| 775 | static struct cgroup *cset_cgroup_from_root(struct css_set *cset, | ||
| 776 | struct cgroup_root *root) | ||
| 777 | { | ||
| 778 | struct cgroup *res = NULL; | ||
| 779 | |||
| 780 | lockdep_assert_held(&cgroup_mutex); | ||
| 781 | lockdep_assert_held(&css_set_rwsem); | ||
| 782 | |||
| 724 | if (cset == &init_css_set) { | 783 | if (cset == &init_css_set) { |
| 725 | res = &root->top_cgroup; | 784 | res = &root->cgrp; |
| 726 | } else { | 785 | } else { |
| 727 | struct cgrp_cset_link *link; | 786 | struct cgrp_cset_link *link; |
| 728 | 787 | ||
| @@ -735,16 +794,27 @@ static struct cgroup *task_cgroup_from_root(struct task_struct *task, | |||
| 735 | } | 794 | } |
| 736 | } | 795 | } |
| 737 | } | 796 | } |
| 738 | read_unlock(&css_set_lock); | 797 | |
| 739 | BUG_ON(!res); | 798 | BUG_ON(!res); |
| 740 | return res; | 799 | return res; |
| 741 | } | 800 | } |
| 742 | 801 | ||
| 743 | /* | 802 | /* |
| 744 | * There is one global cgroup mutex. We also require taking | 803 | * Return the cgroup for "task" from the given hierarchy. Must be |
| 745 | * task_lock() when dereferencing a task's cgroup subsys pointers. | 804 | * called with cgroup_mutex and css_set_rwsem held. |
| 746 | * See "The task_lock() exception", at the end of this comment. | 805 | */ |
| 747 | * | 806 | static struct cgroup *task_cgroup_from_root(struct task_struct *task, |
| 807 | struct cgroup_root *root) | ||
| 808 | { | ||
| 809 | /* | ||
| 810 | * No need to lock the task - since we hold cgroup_mutex the | ||
| 811 | * task can't change groups, so the only thing that can happen | ||
| 812 | * is that it exits and its css is set back to init_css_set. | ||
| 813 | */ | ||
| 814 | return cset_cgroup_from_root(task_css_set(task), root); | ||
| 815 | } | ||
| 816 | |||
| 817 | /* | ||
| 748 | * A task must hold cgroup_mutex to modify cgroups. | 818 | * A task must hold cgroup_mutex to modify cgroups. |
| 749 | * | 819 | * |
| 750 | * Any task can increment and decrement the count field without lock. | 820 | * Any task can increment and decrement the count field without lock. |
| @@ -770,98 +840,79 @@ static struct cgroup *task_cgroup_from_root(struct task_struct *task, | |||
| 770 | * A cgroup can only be deleted if both its 'count' of using tasks | 840 | * A cgroup can only be deleted if both its 'count' of using tasks |
| 771 | * is zero, and its list of 'children' cgroups is empty. Since all | 841 | * is zero, and its list of 'children' cgroups is empty. Since all |
| 772 | * tasks in the system use _some_ cgroup, and since there is always at | 842 | * tasks in the system use _some_ cgroup, and since there is always at |
| 773 | * least one task in the system (init, pid == 1), therefore, top_cgroup | 843 | * least one task in the system (init, pid == 1), therefore, root cgroup |
| 774 | * always has either children cgroups and/or using tasks. So we don't | 844 | * always has either children cgroups and/or using tasks. So we don't |
| 775 | * need a special hack to ensure that top_cgroup cannot be deleted. | 845 | * need a special hack to ensure that root cgroup cannot be deleted. |
| 776 | * | ||
| 777 | * The task_lock() exception | ||
| 778 | * | ||
| 779 | * The need for this exception arises from the action of | ||
| 780 | * cgroup_attach_task(), which overwrites one task's cgroup pointer with | ||
| 781 | * another. It does so using cgroup_mutex, however there are | ||
| 782 | * several performance critical places that need to reference | ||
| 783 | * task->cgroup without the expense of grabbing a system global | ||
| 784 | * mutex. Therefore except as noted below, when dereferencing or, as | ||
| 785 | * in cgroup_attach_task(), modifying a task's cgroup pointer we use | ||
| 786 | * task_lock(), which acts on a spinlock (task->alloc_lock) already in | ||
| 787 | * the task_struct routinely used for such matters. | ||
| 788 | * | 846 | * |
| 789 | * P.S. One more locking exception. RCU is used to guard the | 847 | * P.S. One more locking exception. RCU is used to guard the |
| 790 | * update of a tasks cgroup pointer by cgroup_attach_task() | 848 | * update of a tasks cgroup pointer by cgroup_attach_task() |
| 791 | */ | 849 | */ |
| 792 | 850 | ||
| 793 | /* | ||
| 794 | * A couple of forward declarations required, due to cyclic reference loop: | ||
| 795 | * cgroup_mkdir -> cgroup_create -> cgroup_populate_dir -> | ||
| 796 | * cgroup_add_file -> cgroup_create_file -> cgroup_dir_inode_operations | ||
| 797 | * -> cgroup_mkdir. | ||
| 798 | */ | ||
| 799 | |||
| 800 | static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode); | ||
| 801 | static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry); | ||
| 802 | static int cgroup_populate_dir(struct cgroup *cgrp, unsigned long subsys_mask); | 851 | static int cgroup_populate_dir(struct cgroup *cgrp, unsigned long subsys_mask); |
| 803 | static const struct inode_operations cgroup_dir_inode_operations; | 852 | static struct kernfs_syscall_ops cgroup_kf_syscall_ops; |
| 804 | static const struct file_operations proc_cgroupstats_operations; | 853 | static const struct file_operations proc_cgroupstats_operations; |
| 805 | 854 | ||
| 806 | static struct backing_dev_info cgroup_backing_dev_info = { | 855 | static char *cgroup_file_name(struct cgroup *cgrp, const struct cftype *cft, |
| 807 | .name = "cgroup", | 856 | char *buf) |
| 808 | .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK, | ||
| 809 | }; | ||
| 810 | |||
| 811 | static struct inode *cgroup_new_inode(umode_t mode, struct super_block *sb) | ||
| 812 | { | 857 | { |
| 813 | struct inode *inode = new_inode(sb); | 858 | if (cft->ss && !(cft->flags & CFTYPE_NO_PREFIX) && |
| 814 | 859 | !(cgrp->root->flags & CGRP_ROOT_NOPREFIX)) | |
| 815 | if (inode) { | 860 | snprintf(buf, CGROUP_FILE_NAME_MAX, "%s.%s", |
| 816 | inode->i_ino = get_next_ino(); | 861 | cft->ss->name, cft->name); |
| 817 | inode->i_mode = mode; | 862 | else |
| 818 | inode->i_uid = current_fsuid(); | 863 | strncpy(buf, cft->name, CGROUP_FILE_NAME_MAX); |
| 819 | inode->i_gid = current_fsgid(); | 864 | return buf; |
| 820 | inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; | ||
| 821 | inode->i_mapping->backing_dev_info = &cgroup_backing_dev_info; | ||
| 822 | } | ||
| 823 | return inode; | ||
| 824 | } | 865 | } |
| 825 | 866 | ||
| 826 | static struct cgroup_name *cgroup_alloc_name(struct dentry *dentry) | 867 | /** |
| 868 | * cgroup_file_mode - deduce file mode of a control file | ||
| 869 | * @cft: the control file in question | ||
| 870 | * | ||
| 871 | * returns cft->mode if ->mode is not 0 | ||
| 872 | * returns S_IRUGO|S_IWUSR if it has both a read and a write handler | ||
| 873 | * returns S_IRUGO if it has only a read handler | ||
| 874 | * returns S_IWUSR if it has only a write hander | ||
| 875 | */ | ||
| 876 | static umode_t cgroup_file_mode(const struct cftype *cft) | ||
| 827 | { | 877 | { |
| 828 | struct cgroup_name *name; | 878 | umode_t mode = 0; |
| 829 | 879 | ||
| 830 | name = kmalloc(sizeof(*name) + dentry->d_name.len + 1, GFP_KERNEL); | 880 | if (cft->mode) |
| 831 | if (!name) | 881 | return cft->mode; |
| 832 | return NULL; | 882 | |
| 833 | strcpy(name->name, dentry->d_name.name); | 883 | if (cft->read_u64 || cft->read_s64 || cft->seq_show) |
| 834 | return name; | 884 | mode |= S_IRUGO; |
| 885 | |||
| 886 | if (cft->write_u64 || cft->write_s64 || cft->write_string || | ||
| 887 | cft->trigger) | ||
| 888 | mode |= S_IWUSR; | ||
| 889 | |||
| 890 | return mode; | ||
| 835 | } | 891 | } |
| 836 | 892 | ||
| 837 | static void cgroup_free_fn(struct work_struct *work) | 893 | static void cgroup_free_fn(struct work_struct *work) |
| 838 | { | 894 | { |
| 839 | struct cgroup *cgrp = container_of(work, struct cgroup, destroy_work); | 895 | struct cgroup *cgrp = container_of(work, struct cgroup, destroy_work); |
| 840 | 896 | ||
| 841 | mutex_lock(&cgroup_mutex); | 897 | atomic_dec(&cgrp->root->nr_cgrps); |
| 842 | cgrp->root->number_of_cgroups--; | ||
| 843 | mutex_unlock(&cgroup_mutex); | ||
| 844 | |||
| 845 | /* | ||
| 846 | * We get a ref to the parent's dentry, and put the ref when | ||
| 847 | * this cgroup is being freed, so it's guaranteed that the | ||
| 848 | * parent won't be destroyed before its children. | ||
| 849 | */ | ||
| 850 | dput(cgrp->parent->dentry); | ||
| 851 | |||
| 852 | /* | ||
| 853 | * Drop the active superblock reference that we took when we | ||
| 854 | * created the cgroup. This will free cgrp->root, if we are | ||
| 855 | * holding the last reference to @sb. | ||
| 856 | */ | ||
| 857 | deactivate_super(cgrp->root->sb); | ||
| 858 | |||
| 859 | cgroup_pidlist_destroy_all(cgrp); | 898 | cgroup_pidlist_destroy_all(cgrp); |
| 860 | 899 | ||
| 861 | simple_xattrs_free(&cgrp->xattrs); | 900 | if (cgrp->parent) { |
| 862 | 901 | /* | |
| 863 | kfree(rcu_dereference_raw(cgrp->name)); | 902 | * We get a ref to the parent, and put the ref when this |
| 864 | kfree(cgrp); | 903 | * cgroup is being freed, so it's guaranteed that the |
| 904 | * parent won't be destroyed before its children. | ||
| 905 | */ | ||
| 906 | cgroup_put(cgrp->parent); | ||
| 907 | kernfs_put(cgrp->kn); | ||
| 908 | kfree(cgrp); | ||
| 909 | } else { | ||
| 910 | /* | ||
| 911 | * This is root cgroup's refcnt reaching zero, which | ||
| 912 | * indicates that the root should be released. | ||
| 913 | */ | ||
| 914 | cgroup_destroy_root(cgrp->root); | ||
| 915 | } | ||
| 865 | } | 916 | } |
| 866 | 917 | ||
| 867 | static void cgroup_free_rcu(struct rcu_head *head) | 918 | static void cgroup_free_rcu(struct rcu_head *head) |
| @@ -872,73 +923,40 @@ static void cgroup_free_rcu(struct rcu_head *head) | |||
| 872 | queue_work(cgroup_destroy_wq, &cgrp->destroy_work); | 923 | queue_work(cgroup_destroy_wq, &cgrp->destroy_work); |
| 873 | } | 924 | } |
| 874 | 925 | ||
| 875 | static void cgroup_diput(struct dentry *dentry, struct inode *inode) | 926 | static void cgroup_get(struct cgroup *cgrp) |
| 876 | { | ||
| 877 | /* is dentry a directory ? if so, kfree() associated cgroup */ | ||
| 878 | if (S_ISDIR(inode->i_mode)) { | ||
| 879 | struct cgroup *cgrp = dentry->d_fsdata; | ||
| 880 | |||
| 881 | BUG_ON(!(cgroup_is_dead(cgrp))); | ||
| 882 | |||
| 883 | /* | ||
| 884 | * XXX: cgrp->id is only used to look up css's. As cgroup | ||
| 885 | * and css's lifetimes will be decoupled, it should be made | ||
| 886 | * per-subsystem and moved to css->id so that lookups are | ||
| 887 | * successful until the target css is released. | ||
| 888 | */ | ||
| 889 | mutex_lock(&cgroup_mutex); | ||
| 890 | idr_remove(&cgrp->root->cgroup_idr, cgrp->id); | ||
| 891 | mutex_unlock(&cgroup_mutex); | ||
| 892 | cgrp->id = -1; | ||
| 893 | |||
| 894 | call_rcu(&cgrp->rcu_head, cgroup_free_rcu); | ||
| 895 | } else { | ||
| 896 | struct cfent *cfe = __d_cfe(dentry); | ||
| 897 | struct cgroup *cgrp = dentry->d_parent->d_fsdata; | ||
| 898 | |||
| 899 | WARN_ONCE(!list_empty(&cfe->node) && | ||
| 900 | cgrp != &cgrp->root->top_cgroup, | ||
| 901 | "cfe still linked for %s\n", cfe->type->name); | ||
| 902 | simple_xattrs_free(&cfe->xattrs); | ||
| 903 | kfree(cfe); | ||
| 904 | } | ||
| 905 | iput(inode); | ||
| 906 | } | ||
| 907 | |||
| 908 | static void remove_dir(struct dentry *d) | ||
| 909 | { | 927 | { |
| 910 | struct dentry *parent = dget(d->d_parent); | 928 | WARN_ON_ONCE(cgroup_is_dead(cgrp)); |
| 911 | 929 | WARN_ON_ONCE(atomic_read(&cgrp->refcnt) <= 0); | |
| 912 | d_delete(d); | 930 | atomic_inc(&cgrp->refcnt); |
| 913 | simple_rmdir(parent->d_inode, d); | ||
| 914 | dput(parent); | ||
| 915 | } | 931 | } |
| 916 | 932 | ||
| 917 | static void cgroup_rm_file(struct cgroup *cgrp, const struct cftype *cft) | 933 | static void cgroup_put(struct cgroup *cgrp) |
| 918 | { | 934 | { |
| 919 | struct cfent *cfe; | 935 | if (!atomic_dec_and_test(&cgrp->refcnt)) |
| 920 | 936 | return; | |
| 921 | lockdep_assert_held(&cgrp->dentry->d_inode->i_mutex); | 937 | if (WARN_ON_ONCE(cgrp->parent && !cgroup_is_dead(cgrp))) |
| 922 | lockdep_assert_held(&cgroup_mutex); | 938 | return; |
| 923 | 939 | ||
| 924 | /* | 940 | /* |
| 925 | * If we're doing cleanup due to failure of cgroup_create(), | 941 | * XXX: cgrp->id is only used to look up css's. As cgroup and |
| 926 | * the corresponding @cfe may not exist. | 942 | * css's lifetimes will be decoupled, it should be made |
| 943 | * per-subsystem and moved to css->id so that lookups are | ||
| 944 | * successful until the target css is released. | ||
| 927 | */ | 945 | */ |
| 928 | list_for_each_entry(cfe, &cgrp->files, node) { | 946 | mutex_lock(&cgroup_mutex); |
| 929 | struct dentry *d = cfe->dentry; | 947 | idr_remove(&cgrp->root->cgroup_idr, cgrp->id); |
| 948 | mutex_unlock(&cgroup_mutex); | ||
| 949 | cgrp->id = -1; | ||
| 930 | 950 | ||
| 931 | if (cft && cfe->type != cft) | 951 | call_rcu(&cgrp->rcu_head, cgroup_free_rcu); |
| 932 | continue; | 952 | } |
| 933 | 953 | ||
| 934 | dget(d); | 954 | static void cgroup_rm_file(struct cgroup *cgrp, const struct cftype *cft) |
| 935 | d_delete(d); | 955 | { |
| 936 | simple_unlink(cgrp->dentry->d_inode, d); | 956 | char name[CGROUP_FILE_NAME_MAX]; |
| 937 | list_del_init(&cfe->node); | ||
| 938 | dput(d); | ||
| 939 | 957 | ||
| 940 | break; | 958 | lockdep_assert_held(&cgroup_tree_mutex); |
| 941 | } | 959 | kernfs_remove_by_name(cgrp->kn, cgroup_file_name(cgrp, cft, name)); |
| 942 | } | 960 | } |
| 943 | 961 | ||
| 944 | /** | 962 | /** |
| @@ -952,144 +970,106 @@ static void cgroup_clear_dir(struct cgroup *cgrp, unsigned long subsys_mask) | |||
| 952 | int i; | 970 | int i; |
| 953 | 971 | ||
| 954 | for_each_subsys(ss, i) { | 972 | for_each_subsys(ss, i) { |
| 955 | struct cftype_set *set; | 973 | struct cftype *cfts; |
| 956 | 974 | ||
| 957 | if (!test_bit(i, &subsys_mask)) | 975 | if (!test_bit(i, &subsys_mask)) |
| 958 | continue; | 976 | continue; |
| 959 | list_for_each_entry(set, &ss->cftsets, node) | 977 | list_for_each_entry(cfts, &ss->cfts, node) |
| 960 | cgroup_addrm_files(cgrp, set->cfts, false); | 978 | cgroup_addrm_files(cgrp, cfts, false); |
| 961 | } | 979 | } |
| 962 | } | 980 | } |
| 963 | 981 | ||
| 964 | /* | 982 | static int rebind_subsystems(struct cgroup_root *dst_root, |
| 965 | * NOTE : the dentry must have been dget()'ed | 983 | unsigned long ss_mask) |
| 966 | */ | ||
| 967 | static void cgroup_d_remove_dir(struct dentry *dentry) | ||
| 968 | { | ||
| 969 | struct dentry *parent; | ||
| 970 | |||
| 971 | parent = dentry->d_parent; | ||
| 972 | spin_lock(&parent->d_lock); | ||
| 973 | spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); | ||
| 974 | list_del_init(&dentry->d_u.d_child); | ||
| 975 | spin_unlock(&dentry->d_lock); | ||
| 976 | spin_unlock(&parent->d_lock); | ||
| 977 | remove_dir(dentry); | ||
| 978 | } | ||
| 979 | |||
| 980 | /* | ||
| 981 | * Call with cgroup_mutex held. Drops reference counts on modules, including | ||
| 982 | * any duplicate ones that parse_cgroupfs_options took. If this function | ||
| 983 | * returns an error, no reference counts are touched. | ||
| 984 | */ | ||
| 985 | static int rebind_subsystems(struct cgroupfs_root *root, | ||
| 986 | unsigned long added_mask, unsigned removed_mask) | ||
| 987 | { | 984 | { |
| 988 | struct cgroup *cgrp = &root->top_cgroup; | ||
| 989 | struct cgroup_subsys *ss; | 985 | struct cgroup_subsys *ss; |
| 990 | unsigned long pinned = 0; | 986 | int ssid, ret; |
| 991 | int i, ret; | ||
| 992 | 987 | ||
| 993 | BUG_ON(!mutex_is_locked(&cgroup_mutex)); | 988 | lockdep_assert_held(&cgroup_tree_mutex); |
| 994 | BUG_ON(!mutex_is_locked(&cgroup_root_mutex)); | 989 | lockdep_assert_held(&cgroup_mutex); |
| 995 | 990 | ||
| 996 | /* Check that any added subsystems are currently free */ | 991 | for_each_subsys(ss, ssid) { |
| 997 | for_each_subsys(ss, i) { | 992 | if (!(ss_mask & (1 << ssid))) |
| 998 | if (!(added_mask & (1 << i))) | ||
| 999 | continue; | 993 | continue; |
| 1000 | 994 | ||
| 1001 | /* is the subsystem mounted elsewhere? */ | 995 | /* if @ss is on the dummy_root, we can always move it */ |
| 1002 | if (ss->root != &cgroup_dummy_root) { | 996 | if (ss->root == &cgrp_dfl_root) |
| 1003 | ret = -EBUSY; | 997 | continue; |
| 1004 | goto out_put; | ||
| 1005 | } | ||
| 1006 | 998 | ||
| 1007 | /* pin the module */ | 999 | /* if @ss has non-root cgroups attached to it, can't move */ |
| 1008 | if (!try_module_get(ss->module)) { | 1000 | if (!list_empty(&ss->root->cgrp.children)) |
| 1009 | ret = -ENOENT; | 1001 | return -EBUSY; |
| 1010 | goto out_put; | ||
| 1011 | } | ||
| 1012 | pinned |= 1 << i; | ||
| 1013 | } | ||
| 1014 | 1002 | ||
| 1015 | /* subsys could be missing if unloaded between parsing and here */ | 1003 | /* can't move between two non-dummy roots either */ |
| 1016 | if (added_mask != pinned) { | 1004 | if (dst_root != &cgrp_dfl_root) |
| 1017 | ret = -ENOENT; | 1005 | return -EBUSY; |
| 1018 | goto out_put; | ||
| 1019 | } | 1006 | } |
| 1020 | 1007 | ||
| 1021 | ret = cgroup_populate_dir(cgrp, added_mask); | 1008 | ret = cgroup_populate_dir(&dst_root->cgrp, ss_mask); |
| 1022 | if (ret) | 1009 | if (ret) { |
| 1023 | goto out_put; | 1010 | if (dst_root != &cgrp_dfl_root) |
| 1011 | return ret; | ||
| 1012 | |||
| 1013 | /* | ||
| 1014 | * Rebinding back to the default root is not allowed to | ||
| 1015 | * fail. Using both default and non-default roots should | ||
| 1016 | * be rare. Moving subsystems back and forth even more so. | ||
| 1017 | * Just warn about it and continue. | ||
| 1018 | */ | ||
| 1019 | if (cgrp_dfl_root_visible) { | ||
| 1020 | pr_warning("cgroup: failed to create files (%d) while rebinding 0x%lx to default root\n", | ||
| 1021 | ret, ss_mask); | ||
| 1022 | pr_warning("cgroup: you may retry by moving them to a different hierarchy and unbinding\n"); | ||
| 1023 | } | ||
| 1024 | } | ||
| 1024 | 1025 | ||
| 1025 | /* | 1026 | /* |
| 1026 | * Nothing can fail from this point on. Remove files for the | 1027 | * Nothing can fail from this point on. Remove files for the |
| 1027 | * removed subsystems and rebind each subsystem. | 1028 | * removed subsystems and rebind each subsystem. |
| 1028 | */ | 1029 | */ |
| 1029 | cgroup_clear_dir(cgrp, removed_mask); | 1030 | mutex_unlock(&cgroup_mutex); |
| 1030 | 1031 | for_each_subsys(ss, ssid) | |
| 1031 | for_each_subsys(ss, i) { | 1032 | if (ss_mask & (1 << ssid)) |
| 1032 | unsigned long bit = 1UL << i; | 1033 | cgroup_clear_dir(&ss->root->cgrp, 1 << ssid); |
| 1033 | 1034 | mutex_lock(&cgroup_mutex); | |
| 1034 | if (bit & added_mask) { | ||
| 1035 | /* We're binding this subsystem to this hierarchy */ | ||
| 1036 | BUG_ON(cgroup_css(cgrp, ss)); | ||
| 1037 | BUG_ON(!cgroup_css(cgroup_dummy_top, ss)); | ||
| 1038 | BUG_ON(cgroup_css(cgroup_dummy_top, ss)->cgroup != cgroup_dummy_top); | ||
| 1039 | 1035 | ||
| 1040 | rcu_assign_pointer(cgrp->subsys[i], | 1036 | for_each_subsys(ss, ssid) { |
| 1041 | cgroup_css(cgroup_dummy_top, ss)); | 1037 | struct cgroup_root *src_root; |
| 1042 | cgroup_css(cgrp, ss)->cgroup = cgrp; | 1038 | struct cgroup_subsys_state *css; |
| 1043 | 1039 | ||
| 1044 | ss->root = root; | 1040 | if (!(ss_mask & (1 << ssid))) |
| 1045 | if (ss->bind) | 1041 | continue; |
| 1046 | ss->bind(cgroup_css(cgrp, ss)); | ||
| 1047 | 1042 | ||
| 1048 | /* refcount was already taken, and we're keeping it */ | 1043 | src_root = ss->root; |
| 1049 | root->subsys_mask |= bit; | 1044 | css = cgroup_css(&src_root->cgrp, ss); |
| 1050 | } else if (bit & removed_mask) { | ||
| 1051 | /* We're removing this subsystem */ | ||
| 1052 | BUG_ON(cgroup_css(cgrp, ss) != cgroup_css(cgroup_dummy_top, ss)); | ||
| 1053 | BUG_ON(cgroup_css(cgrp, ss)->cgroup != cgrp); | ||
| 1054 | 1045 | ||
| 1055 | if (ss->bind) | 1046 | WARN_ON(!css || cgroup_css(&dst_root->cgrp, ss)); |
| 1056 | ss->bind(cgroup_css(cgroup_dummy_top, ss)); | ||
| 1057 | 1047 | ||
| 1058 | cgroup_css(cgroup_dummy_top, ss)->cgroup = cgroup_dummy_top; | 1048 | RCU_INIT_POINTER(src_root->cgrp.subsys[ssid], NULL); |
| 1059 | RCU_INIT_POINTER(cgrp->subsys[i], NULL); | 1049 | rcu_assign_pointer(dst_root->cgrp.subsys[ssid], css); |
| 1050 | ss->root = dst_root; | ||
| 1051 | css->cgroup = &dst_root->cgrp; | ||
| 1060 | 1052 | ||
| 1061 | cgroup_subsys[i]->root = &cgroup_dummy_root; | 1053 | src_root->cgrp.subsys_mask &= ~(1 << ssid); |
| 1054 | dst_root->cgrp.subsys_mask |= 1 << ssid; | ||
| 1062 | 1055 | ||
| 1063 | /* subsystem is now free - drop reference on module */ | 1056 | if (ss->bind) |
| 1064 | module_put(ss->module); | 1057 | ss->bind(css); |
| 1065 | root->subsys_mask &= ~bit; | ||
| 1066 | } | ||
| 1067 | } | 1058 | } |
| 1068 | 1059 | ||
| 1069 | /* | 1060 | kernfs_activate(dst_root->cgrp.kn); |
| 1070 | * Mark @root has finished binding subsystems. @root->subsys_mask | ||
| 1071 | * now matches the bound subsystems. | ||
| 1072 | */ | ||
| 1073 | root->flags |= CGRP_ROOT_SUBSYS_BOUND; | ||
| 1074 | |||
| 1075 | return 0; | 1061 | return 0; |
| 1076 | |||
| 1077 | out_put: | ||
| 1078 | for_each_subsys(ss, i) | ||
| 1079 | if (pinned & (1 << i)) | ||
| 1080 | module_put(ss->module); | ||
| 1081 | return ret; | ||
| 1082 | } | 1062 | } |
| 1083 | 1063 | ||
| 1084 | static int cgroup_show_options(struct seq_file *seq, struct dentry *dentry) | 1064 | static int cgroup_show_options(struct seq_file *seq, |
| 1065 | struct kernfs_root *kf_root) | ||
| 1085 | { | 1066 | { |
| 1086 | struct cgroupfs_root *root = dentry->d_sb->s_fs_info; | 1067 | struct cgroup_root *root = cgroup_root_from_kf(kf_root); |
| 1087 | struct cgroup_subsys *ss; | 1068 | struct cgroup_subsys *ss; |
| 1088 | int ssid; | 1069 | int ssid; |
| 1089 | 1070 | ||
| 1090 | mutex_lock(&cgroup_root_mutex); | ||
| 1091 | for_each_subsys(ss, ssid) | 1071 | for_each_subsys(ss, ssid) |
| 1092 | if (root->subsys_mask & (1 << ssid)) | 1072 | if (root->cgrp.subsys_mask & (1 << ssid)) |
| 1093 | seq_printf(seq, ",%s", ss->name); | 1073 | seq_printf(seq, ",%s", ss->name); |
| 1094 | if (root->flags & CGRP_ROOT_SANE_BEHAVIOR) | 1074 | if (root->flags & CGRP_ROOT_SANE_BEHAVIOR) |
| 1095 | seq_puts(seq, ",sane_behavior"); | 1075 | seq_puts(seq, ",sane_behavior"); |
| @@ -1097,13 +1077,16 @@ static int cgroup_show_options(struct seq_file *seq, struct dentry *dentry) | |||
| 1097 | seq_puts(seq, ",noprefix"); | 1077 | seq_puts(seq, ",noprefix"); |
| 1098 | if (root->flags & CGRP_ROOT_XATTR) | 1078 | if (root->flags & CGRP_ROOT_XATTR) |
| 1099 | seq_puts(seq, ",xattr"); | 1079 | seq_puts(seq, ",xattr"); |
| 1080 | |||
| 1081 | spin_lock(&release_agent_path_lock); | ||
| 1100 | if (strlen(root->release_agent_path)) | 1082 | if (strlen(root->release_agent_path)) |
| 1101 | seq_printf(seq, ",release_agent=%s", root->release_agent_path); | 1083 | seq_printf(seq, ",release_agent=%s", root->release_agent_path); |
| 1102 | if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->top_cgroup.flags)) | 1084 | spin_unlock(&release_agent_path_lock); |
| 1085 | |||
| 1086 | if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags)) | ||
| 1103 | seq_puts(seq, ",clone_children"); | 1087 | seq_puts(seq, ",clone_children"); |
| 1104 | if (strlen(root->name)) | 1088 | if (strlen(root->name)) |
| 1105 | seq_printf(seq, ",name=%s", root->name); | 1089 | seq_printf(seq, ",name=%s", root->name); |
| 1106 | mutex_unlock(&cgroup_root_mutex); | ||
| 1107 | return 0; | 1090 | return 0; |
| 1108 | } | 1091 | } |
| 1109 | 1092 | ||
| @@ -1115,9 +1098,6 @@ struct cgroup_sb_opts { | |||
| 1115 | char *name; | 1098 | char *name; |
| 1116 | /* User explicitly requested empty subsystem */ | 1099 | /* User explicitly requested empty subsystem */ |
| 1117 | bool none; | 1100 | bool none; |
| 1118 | |||
| 1119 | struct cgroupfs_root *new_root; | ||
| 1120 | |||
| 1121 | }; | 1101 | }; |
| 1122 | 1102 | ||
| 1123 | /* | 1103 | /* |
| @@ -1137,7 +1117,7 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts) | |||
| 1137 | BUG_ON(!mutex_is_locked(&cgroup_mutex)); | 1117 | BUG_ON(!mutex_is_locked(&cgroup_mutex)); |
| 1138 | 1118 | ||
| 1139 | #ifdef CONFIG_CPUSETS | 1119 | #ifdef CONFIG_CPUSETS |
| 1140 | mask = ~(1UL << cpuset_subsys_id); | 1120 | mask = ~(1UL << cpuset_cgrp_id); |
| 1141 | #endif | 1121 | #endif |
| 1142 | 1122 | ||
| 1143 | memset(opts, 0, sizeof(*opts)); | 1123 | memset(opts, 0, sizeof(*opts)); |
| @@ -1227,30 +1207,34 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts) | |||
| 1227 | return -ENOENT; | 1207 | return -ENOENT; |
| 1228 | } | 1208 | } |
| 1229 | 1209 | ||
| 1230 | /* | ||
| 1231 | * If the 'all' option was specified select all the subsystems, | ||
| 1232 | * otherwise if 'none', 'name=' and a subsystem name options | ||
| 1233 | * were not specified, let's default to 'all' | ||
| 1234 | */ | ||
| 1235 | if (all_ss || (!one_ss && !opts->none && !opts->name)) | ||
| 1236 | for_each_subsys(ss, i) | ||
| 1237 | if (!ss->disabled) | ||
| 1238 | set_bit(i, &opts->subsys_mask); | ||
| 1239 | |||
| 1240 | /* Consistency checks */ | 1210 | /* Consistency checks */ |
| 1241 | 1211 | ||
| 1242 | if (opts->flags & CGRP_ROOT_SANE_BEHAVIOR) { | 1212 | if (opts->flags & CGRP_ROOT_SANE_BEHAVIOR) { |
| 1243 | pr_warning("cgroup: sane_behavior: this is still under development and its behaviors will change, proceed at your own risk\n"); | 1213 | pr_warning("cgroup: sane_behavior: this is still under development and its behaviors will change, proceed at your own risk\n"); |
| 1244 | 1214 | ||
| 1245 | if (opts->flags & CGRP_ROOT_NOPREFIX) { | 1215 | if ((opts->flags & (CGRP_ROOT_NOPREFIX | CGRP_ROOT_XATTR)) || |
| 1246 | pr_err("cgroup: sane_behavior: noprefix is not allowed\n"); | 1216 | opts->cpuset_clone_children || opts->release_agent || |
| 1217 | opts->name) { | ||
| 1218 | pr_err("cgroup: sane_behavior: noprefix, xattr, clone_children, release_agent and name are not allowed\n"); | ||
| 1247 | return -EINVAL; | 1219 | return -EINVAL; |
| 1248 | } | 1220 | } |
| 1221 | } else { | ||
| 1222 | /* | ||
| 1223 | * If the 'all' option was specified select all the | ||
| 1224 | * subsystems, otherwise if 'none', 'name=' and a subsystem | ||
| 1225 | * name options were not specified, let's default to 'all' | ||
| 1226 | */ | ||
| 1227 | if (all_ss || (!one_ss && !opts->none && !opts->name)) | ||
| 1228 | for_each_subsys(ss, i) | ||
| 1229 | if (!ss->disabled) | ||
| 1230 | set_bit(i, &opts->subsys_mask); | ||
| 1249 | 1231 | ||
| 1250 | if (opts->cpuset_clone_children) { | 1232 | /* |
| 1251 | pr_err("cgroup: sane_behavior: clone_children is not allowed\n"); | 1233 | * We either have to specify by name or by subsystems. (So |
| 1234 | * all empty hierarchies must have a name). | ||
| 1235 | */ | ||
| 1236 | if (!opts->subsys_mask && !opts->name) | ||
| 1252 | return -EINVAL; | 1237 | return -EINVAL; |
| 1253 | } | ||
| 1254 | } | 1238 | } |
| 1255 | 1239 | ||
| 1256 | /* | 1240 | /* |
| @@ -1266,21 +1250,13 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts) | |||
| 1266 | if (opts->subsys_mask && opts->none) | 1250 | if (opts->subsys_mask && opts->none) |
| 1267 | return -EINVAL; | 1251 | return -EINVAL; |
| 1268 | 1252 | ||
| 1269 | /* | ||
| 1270 | * We either have to specify by name or by subsystems. (So all | ||
| 1271 | * empty hierarchies must have a name). | ||
| 1272 | */ | ||
| 1273 | if (!opts->subsys_mask && !opts->name) | ||
| 1274 | return -EINVAL; | ||
| 1275 | |||
| 1276 | return 0; | 1253 | return 0; |
| 1277 | } | 1254 | } |
| 1278 | 1255 | ||
| 1279 | static int cgroup_remount(struct super_block *sb, int *flags, char *data) | 1256 | static int cgroup_remount(struct kernfs_root *kf_root, int *flags, char *data) |
| 1280 | { | 1257 | { |
| 1281 | int ret = 0; | 1258 | int ret = 0; |
| 1282 | struct cgroupfs_root *root = sb->s_fs_info; | 1259 | struct cgroup_root *root = cgroup_root_from_kf(kf_root); |
| 1283 | struct cgroup *cgrp = &root->top_cgroup; | ||
| 1284 | struct cgroup_sb_opts opts; | 1260 | struct cgroup_sb_opts opts; |
| 1285 | unsigned long added_mask, removed_mask; | 1261 | unsigned long added_mask, removed_mask; |
| 1286 | 1262 | ||
| @@ -1289,21 +1265,20 @@ static int cgroup_remount(struct super_block *sb, int *flags, char *data) | |||
| 1289 | return -EINVAL; | 1265 | return -EINVAL; |
| 1290 | } | 1266 | } |
| 1291 | 1267 | ||
| 1292 | mutex_lock(&cgrp->dentry->d_inode->i_mutex); | 1268 | mutex_lock(&cgroup_tree_mutex); |
| 1293 | mutex_lock(&cgroup_mutex); | 1269 | mutex_lock(&cgroup_mutex); |
| 1294 | mutex_lock(&cgroup_root_mutex); | ||
| 1295 | 1270 | ||
| 1296 | /* See what subsystems are wanted */ | 1271 | /* See what subsystems are wanted */ |
| 1297 | ret = parse_cgroupfs_options(data, &opts); | 1272 | ret = parse_cgroupfs_options(data, &opts); |
| 1298 | if (ret) | 1273 | if (ret) |
| 1299 | goto out_unlock; | 1274 | goto out_unlock; |
| 1300 | 1275 | ||
| 1301 | if (opts.subsys_mask != root->subsys_mask || opts.release_agent) | 1276 | if (opts.subsys_mask != root->cgrp.subsys_mask || opts.release_agent) |
| 1302 | pr_warning("cgroup: option changes via remount are deprecated (pid=%d comm=%s)\n", | 1277 | pr_warning("cgroup: option changes via remount are deprecated (pid=%d comm=%s)\n", |
| 1303 | task_tgid_nr(current), current->comm); | 1278 | task_tgid_nr(current), current->comm); |
| 1304 | 1279 | ||
| 1305 | added_mask = opts.subsys_mask & ~root->subsys_mask; | 1280 | added_mask = opts.subsys_mask & ~root->cgrp.subsys_mask; |
| 1306 | removed_mask = root->subsys_mask & ~opts.subsys_mask; | 1281 | removed_mask = root->cgrp.subsys_mask & ~opts.subsys_mask; |
| 1307 | 1282 | ||
| 1308 | /* Don't allow flags or name to change at remount */ | 1283 | /* Don't allow flags or name to change at remount */ |
| 1309 | if (((opts.flags ^ root->flags) & CGRP_ROOT_OPTION_MASK) || | 1284 | if (((opts.flags ^ root->flags) & CGRP_ROOT_OPTION_MASK) || |
| @@ -1316,422 +1291,332 @@ static int cgroup_remount(struct super_block *sb, int *flags, char *data) | |||
| 1316 | } | 1291 | } |
| 1317 | 1292 | ||
| 1318 | /* remounting is not allowed for populated hierarchies */ | 1293 | /* remounting is not allowed for populated hierarchies */ |
| 1319 | if (root->number_of_cgroups > 1) { | 1294 | if (!list_empty(&root->cgrp.children)) { |
| 1320 | ret = -EBUSY; | 1295 | ret = -EBUSY; |
| 1321 | goto out_unlock; | 1296 | goto out_unlock; |
| 1322 | } | 1297 | } |
| 1323 | 1298 | ||
| 1324 | ret = rebind_subsystems(root, added_mask, removed_mask); | 1299 | ret = rebind_subsystems(root, added_mask); |
| 1325 | if (ret) | 1300 | if (ret) |
| 1326 | goto out_unlock; | 1301 | goto out_unlock; |
| 1327 | 1302 | ||
| 1328 | if (opts.release_agent) | 1303 | rebind_subsystems(&cgrp_dfl_root, removed_mask); |
| 1304 | |||
| 1305 | if (opts.release_agent) { | ||
| 1306 | spin_lock(&release_agent_path_lock); | ||
| 1329 | strcpy(root->release_agent_path, opts.release_agent); | 1307 | strcpy(root->release_agent_path, opts.release_agent); |
| 1308 | spin_unlock(&release_agent_path_lock); | ||
| 1309 | } | ||
| 1330 | out_unlock: | 1310 | out_unlock: |
| 1331 | kfree(opts.release_agent); | 1311 | kfree(opts.release_agent); |
| 1332 | kfree(opts.name); | 1312 | kfree(opts.name); |
| 1333 | mutex_unlock(&cgroup_root_mutex); | ||
| 1334 | mutex_unlock(&cgroup_mutex); | 1313 | mutex_unlock(&cgroup_mutex); |
| 1335 | mutex_unlock(&cgrp->dentry->d_inode->i_mutex); | 1314 | mutex_unlock(&cgroup_tree_mutex); |
| 1336 | return ret; | 1315 | return ret; |
| 1337 | } | 1316 | } |
| 1338 | 1317 | ||
| 1339 | static const struct super_operations cgroup_ops = { | 1318 | /* |
| 1340 | .statfs = simple_statfs, | 1319 | * To reduce the fork() overhead for systems that are not actually using |
| 1341 | .drop_inode = generic_delete_inode, | 1320 | * their cgroups capability, we don't maintain the lists running through |
| 1342 | .show_options = cgroup_show_options, | 1321 | * each css_set to its tasks until we see the list actually used - in other |
| 1343 | .remount_fs = cgroup_remount, | 1322 | * words after the first mount. |
| 1344 | }; | 1323 | */ |
| 1324 | static bool use_task_css_set_links __read_mostly; | ||
| 1325 | |||
| 1326 | static void cgroup_enable_task_cg_lists(void) | ||
| 1327 | { | ||
| 1328 | struct task_struct *p, *g; | ||
| 1329 | |||
| 1330 | down_write(&css_set_rwsem); | ||
| 1331 | |||
| 1332 | if (use_task_css_set_links) | ||
| 1333 | goto out_unlock; | ||
| 1334 | |||
| 1335 | use_task_css_set_links = true; | ||
| 1336 | |||
| 1337 | /* | ||
| 1338 | * We need tasklist_lock because RCU is not safe against | ||
| 1339 | * while_each_thread(). Besides, a forking task that has passed | ||
| 1340 | * cgroup_post_fork() without seeing use_task_css_set_links = 1 | ||
| 1341 | * is not guaranteed to have its child immediately visible in the | ||
| 1342 | * tasklist if we walk through it with RCU. | ||
| 1343 | */ | ||
| 1344 | read_lock(&tasklist_lock); | ||
| 1345 | do_each_thread(g, p) { | ||
| 1346 | WARN_ON_ONCE(!list_empty(&p->cg_list) || | ||
| 1347 | task_css_set(p) != &init_css_set); | ||
| 1348 | |||
| 1349 | /* | ||
| 1350 | * We should check if the process is exiting, otherwise | ||
| 1351 | * it will race with cgroup_exit() in that the list | ||
| 1352 | * entry won't be deleted though the process has exited. | ||
| 1353 | * Do it while holding siglock so that we don't end up | ||
| 1354 | * racing against cgroup_exit(). | ||
| 1355 | */ | ||
| 1356 | spin_lock_irq(&p->sighand->siglock); | ||
| 1357 | if (!(p->flags & PF_EXITING)) { | ||
| 1358 | struct css_set *cset = task_css_set(p); | ||
| 1359 | |||
| 1360 | list_add(&p->cg_list, &cset->tasks); | ||
| 1361 | get_css_set(cset); | ||
| 1362 | } | ||
| 1363 | spin_unlock_irq(&p->sighand->siglock); | ||
| 1364 | } while_each_thread(g, p); | ||
| 1365 | read_unlock(&tasklist_lock); | ||
| 1366 | out_unlock: | ||
| 1367 | up_write(&css_set_rwsem); | ||
| 1368 | } | ||
| 1345 | 1369 | ||
| 1346 | static void init_cgroup_housekeeping(struct cgroup *cgrp) | 1370 | static void init_cgroup_housekeeping(struct cgroup *cgrp) |
| 1347 | { | 1371 | { |
| 1372 | atomic_set(&cgrp->refcnt, 1); | ||
| 1348 | INIT_LIST_HEAD(&cgrp->sibling); | 1373 | INIT_LIST_HEAD(&cgrp->sibling); |
| 1349 | INIT_LIST_HEAD(&cgrp->children); | 1374 | INIT_LIST_HEAD(&cgrp->children); |
| 1350 | INIT_LIST_HEAD(&cgrp->files); | ||
| 1351 | INIT_LIST_HEAD(&cgrp->cset_links); | 1375 | INIT_LIST_HEAD(&cgrp->cset_links); |
| 1352 | INIT_LIST_HEAD(&cgrp->release_list); | 1376 | INIT_LIST_HEAD(&cgrp->release_list); |
| 1353 | INIT_LIST_HEAD(&cgrp->pidlists); | 1377 | INIT_LIST_HEAD(&cgrp->pidlists); |
| 1354 | mutex_init(&cgrp->pidlist_mutex); | 1378 | mutex_init(&cgrp->pidlist_mutex); |
| 1355 | cgrp->dummy_css.cgroup = cgrp; | 1379 | cgrp->dummy_css.cgroup = cgrp; |
| 1356 | simple_xattrs_init(&cgrp->xattrs); | ||
| 1357 | } | 1380 | } |
| 1358 | 1381 | ||
| 1359 | static void init_cgroup_root(struct cgroupfs_root *root) | 1382 | static void init_cgroup_root(struct cgroup_root *root, |
| 1383 | struct cgroup_sb_opts *opts) | ||
| 1360 | { | 1384 | { |
| 1361 | struct cgroup *cgrp = &root->top_cgroup; | 1385 | struct cgroup *cgrp = &root->cgrp; |
| 1362 | 1386 | ||
| 1363 | INIT_LIST_HEAD(&root->root_list); | 1387 | INIT_LIST_HEAD(&root->root_list); |
| 1364 | root->number_of_cgroups = 1; | 1388 | atomic_set(&root->nr_cgrps, 1); |
| 1365 | cgrp->root = root; | 1389 | cgrp->root = root; |
| 1366 | RCU_INIT_POINTER(cgrp->name, &root_cgroup_name); | ||
| 1367 | init_cgroup_housekeeping(cgrp); | 1390 | init_cgroup_housekeeping(cgrp); |
| 1368 | idr_init(&root->cgroup_idr); | 1391 | idr_init(&root->cgroup_idr); |
| 1369 | } | ||
| 1370 | |||
| 1371 | static int cgroup_init_root_id(struct cgroupfs_root *root, int start, int end) | ||
| 1372 | { | ||
| 1373 | int id; | ||
| 1374 | 1392 | ||
| 1375 | lockdep_assert_held(&cgroup_mutex); | ||
| 1376 | lockdep_assert_held(&cgroup_root_mutex); | ||
| 1377 | |||
| 1378 | id = idr_alloc_cyclic(&cgroup_hierarchy_idr, root, start, end, | ||
| 1379 | GFP_KERNEL); | ||
| 1380 | if (id < 0) | ||
| 1381 | return id; | ||
| 1382 | |||
| 1383 | root->hierarchy_id = id; | ||
| 1384 | return 0; | ||
| 1385 | } | ||
| 1386 | |||
| 1387 | static void cgroup_exit_root_id(struct cgroupfs_root *root) | ||
| 1388 | { | ||
| 1389 | lockdep_assert_held(&cgroup_mutex); | ||
| 1390 | lockdep_assert_held(&cgroup_root_mutex); | ||
| 1391 | |||
| 1392 | if (root->hierarchy_id) { | ||
| 1393 | idr_remove(&cgroup_hierarchy_idr, root->hierarchy_id); | ||
| 1394 | root->hierarchy_id = 0; | ||
| 1395 | } | ||
| 1396 | } | ||
| 1397 | |||
| 1398 | static int cgroup_test_super(struct super_block *sb, void *data) | ||
| 1399 | { | ||
| 1400 | struct cgroup_sb_opts *opts = data; | ||
| 1401 | struct cgroupfs_root *root = sb->s_fs_info; | ||
| 1402 | |||
| 1403 | /* If we asked for a name then it must match */ | ||
| 1404 | if (opts->name && strcmp(opts->name, root->name)) | ||
| 1405 | return 0; | ||
| 1406 | |||
| 1407 | /* | ||
| 1408 | * If we asked for subsystems (or explicitly for no | ||
| 1409 | * subsystems) then they must match | ||
| 1410 | */ | ||
| 1411 | if ((opts->subsys_mask || opts->none) | ||
| 1412 | && (opts->subsys_mask != root->subsys_mask)) | ||
| 1413 | return 0; | ||
| 1414 | |||
| 1415 | return 1; | ||
| 1416 | } | ||
| 1417 | |||
| 1418 | static struct cgroupfs_root *cgroup_root_from_opts(struct cgroup_sb_opts *opts) | ||
| 1419 | { | ||
| 1420 | struct cgroupfs_root *root; | ||
| 1421 | |||
| 1422 | if (!opts->subsys_mask && !opts->none) | ||
| 1423 | return NULL; | ||
| 1424 | |||
| 1425 | root = kzalloc(sizeof(*root), GFP_KERNEL); | ||
| 1426 | if (!root) | ||
| 1427 | return ERR_PTR(-ENOMEM); | ||
| 1428 | |||
| 1429 | init_cgroup_root(root); | ||
| 1430 | |||
| 1431 | /* | ||
| 1432 | * We need to set @root->subsys_mask now so that @root can be | ||
| 1433 | * matched by cgroup_test_super() before it finishes | ||
| 1434 | * initialization; otherwise, competing mounts with the same | ||
| 1435 | * options may try to bind the same subsystems instead of waiting | ||
| 1436 | * for the first one leading to unexpected mount errors. | ||
| 1437 | * SUBSYS_BOUND will be set once actual binding is complete. | ||
| 1438 | */ | ||
| 1439 | root->subsys_mask = opts->subsys_mask; | ||
| 1440 | root->flags = opts->flags; | 1393 | root->flags = opts->flags; |
| 1441 | if (opts->release_agent) | 1394 | if (opts->release_agent) |
| 1442 | strcpy(root->release_agent_path, opts->release_agent); | 1395 | strcpy(root->release_agent_path, opts->release_agent); |
| 1443 | if (opts->name) | 1396 | if (opts->name) |
| 1444 | strcpy(root->name, opts->name); | 1397 | strcpy(root->name, opts->name); |
| 1445 | if (opts->cpuset_clone_children) | 1398 | if (opts->cpuset_clone_children) |
| 1446 | set_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->top_cgroup.flags); | 1399 | set_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags); |
| 1447 | return root; | ||
| 1448 | } | 1400 | } |
| 1449 | 1401 | ||
| 1450 | static void cgroup_free_root(struct cgroupfs_root *root) | 1402 | static int cgroup_setup_root(struct cgroup_root *root, unsigned long ss_mask) |
| 1451 | { | 1403 | { |
| 1452 | if (root) { | 1404 | LIST_HEAD(tmp_links); |
| 1453 | /* hierarhcy ID shoulid already have been released */ | 1405 | struct cgroup *root_cgrp = &root->cgrp; |
| 1454 | WARN_ON_ONCE(root->hierarchy_id); | 1406 | struct css_set *cset; |
| 1455 | 1407 | int i, ret; | |
| 1456 | idr_destroy(&root->cgroup_idr); | ||
| 1457 | kfree(root); | ||
| 1458 | } | ||
| 1459 | } | ||
| 1460 | 1408 | ||
| 1461 | static int cgroup_set_super(struct super_block *sb, void *data) | 1409 | lockdep_assert_held(&cgroup_tree_mutex); |
| 1462 | { | 1410 | lockdep_assert_held(&cgroup_mutex); |
| 1463 | int ret; | ||
| 1464 | struct cgroup_sb_opts *opts = data; | ||
| 1465 | 1411 | ||
| 1466 | /* If we don't have a new root, we can't set up a new sb */ | 1412 | ret = idr_alloc(&root->cgroup_idr, root_cgrp, 0, 1, GFP_KERNEL); |
| 1467 | if (!opts->new_root) | 1413 | if (ret < 0) |
| 1468 | return -EINVAL; | 1414 | goto out; |
| 1415 | root_cgrp->id = ret; | ||
| 1469 | 1416 | ||
| 1470 | BUG_ON(!opts->subsys_mask && !opts->none); | 1417 | /* |
| 1418 | * We're accessing css_set_count without locking css_set_rwsem here, | ||
| 1419 | * but that's OK - it can only be increased by someone holding | ||
| 1420 | * cgroup_lock, and that's us. The worst that can happen is that we | ||
| 1421 | * have some link structures left over | ||
| 1422 | */ | ||
| 1423 | ret = allocate_cgrp_cset_links(css_set_count, &tmp_links); | ||
| 1424 | if (ret) | ||
| 1425 | goto out; | ||
| 1471 | 1426 | ||
| 1472 | ret = set_anon_super(sb, NULL); | 1427 | ret = cgroup_init_root_id(root); |
| 1473 | if (ret) | 1428 | if (ret) |
| 1474 | return ret; | 1429 | goto out; |
| 1475 | 1430 | ||
| 1476 | sb->s_fs_info = opts->new_root; | 1431 | root->kf_root = kernfs_create_root(&cgroup_kf_syscall_ops, |
| 1477 | opts->new_root->sb = sb; | 1432 | KERNFS_ROOT_CREATE_DEACTIVATED, |
| 1433 | root_cgrp); | ||
| 1434 | if (IS_ERR(root->kf_root)) { | ||
| 1435 | ret = PTR_ERR(root->kf_root); | ||
| 1436 | goto exit_root_id; | ||
| 1437 | } | ||
| 1438 | root_cgrp->kn = root->kf_root->kn; | ||
| 1478 | 1439 | ||
| 1479 | sb->s_blocksize = PAGE_CACHE_SIZE; | 1440 | ret = cgroup_addrm_files(root_cgrp, cgroup_base_files, true); |
| 1480 | sb->s_blocksize_bits = PAGE_CACHE_SHIFT; | 1441 | if (ret) |
| 1481 | sb->s_magic = CGROUP_SUPER_MAGIC; | 1442 | goto destroy_root; |
| 1482 | sb->s_op = &cgroup_ops; | ||
| 1483 | 1443 | ||
| 1484 | return 0; | 1444 | ret = rebind_subsystems(root, ss_mask); |
| 1485 | } | 1445 | if (ret) |
| 1446 | goto destroy_root; | ||
| 1486 | 1447 | ||
| 1487 | static int cgroup_get_rootdir(struct super_block *sb) | 1448 | /* |
| 1488 | { | 1449 | * There must be no failure case after here, since rebinding takes |
| 1489 | static const struct dentry_operations cgroup_dops = { | 1450 | * care of subsystems' refcounts, which are explicitly dropped in |
| 1490 | .d_iput = cgroup_diput, | 1451 | * the failure exit path. |
| 1491 | .d_delete = always_delete_dentry, | 1452 | */ |
| 1492 | }; | 1453 | list_add(&root->root_list, &cgroup_roots); |
| 1454 | cgroup_root_count++; | ||
| 1493 | 1455 | ||
| 1494 | struct inode *inode = | 1456 | /* |
| 1495 | cgroup_new_inode(S_IFDIR | S_IRUGO | S_IXUGO | S_IWUSR, sb); | 1457 | * Link the root cgroup in this hierarchy into all the css_set |
| 1458 | * objects. | ||
| 1459 | */ | ||
| 1460 | down_write(&css_set_rwsem); | ||
| 1461 | hash_for_each(css_set_table, i, cset, hlist) | ||
| 1462 | link_css_set(&tmp_links, cset, root_cgrp); | ||
| 1463 | up_write(&css_set_rwsem); | ||
| 1496 | 1464 | ||
| 1497 | if (!inode) | 1465 | BUG_ON(!list_empty(&root_cgrp->children)); |
| 1498 | return -ENOMEM; | 1466 | BUG_ON(atomic_read(&root->nr_cgrps) != 1); |
| 1499 | 1467 | ||
| 1500 | inode->i_fop = &simple_dir_operations; | 1468 | kernfs_activate(root_cgrp->kn); |
| 1501 | inode->i_op = &cgroup_dir_inode_operations; | 1469 | ret = 0; |
| 1502 | /* directories start off with i_nlink == 2 (for "." entry) */ | 1470 | goto out; |
| 1503 | inc_nlink(inode); | 1471 | |
| 1504 | sb->s_root = d_make_root(inode); | 1472 | destroy_root: |
| 1505 | if (!sb->s_root) | 1473 | kernfs_destroy_root(root->kf_root); |
| 1506 | return -ENOMEM; | 1474 | root->kf_root = NULL; |
| 1507 | /* for everything else we want ->d_op set */ | 1475 | exit_root_id: |
| 1508 | sb->s_d_op = &cgroup_dops; | 1476 | cgroup_exit_root_id(root); |
| 1509 | return 0; | 1477 | out: |
| 1478 | free_cgrp_cset_links(&tmp_links); | ||
| 1479 | return ret; | ||
| 1510 | } | 1480 | } |
| 1511 | 1481 | ||
| 1512 | static struct dentry *cgroup_mount(struct file_system_type *fs_type, | 1482 | static struct dentry *cgroup_mount(struct file_system_type *fs_type, |
| 1513 | int flags, const char *unused_dev_name, | 1483 | int flags, const char *unused_dev_name, |
| 1514 | void *data) | 1484 | void *data) |
| 1515 | { | 1485 | { |
| 1486 | struct cgroup_root *root; | ||
| 1516 | struct cgroup_sb_opts opts; | 1487 | struct cgroup_sb_opts opts; |
| 1517 | struct cgroupfs_root *root; | 1488 | struct dentry *dentry; |
| 1518 | int ret = 0; | 1489 | int ret; |
| 1519 | struct super_block *sb; | 1490 | bool new_sb; |
| 1520 | struct cgroupfs_root *new_root; | ||
| 1521 | struct list_head tmp_links; | ||
| 1522 | struct inode *inode; | ||
| 1523 | const struct cred *cred; | ||
| 1524 | 1491 | ||
| 1525 | /* First find the desired set of subsystems */ | 1492 | /* |
| 1493 | * The first time anyone tries to mount a cgroup, enable the list | ||
| 1494 | * linking each css_set to its tasks and fix up all existing tasks. | ||
| 1495 | */ | ||
| 1496 | if (!use_task_css_set_links) | ||
| 1497 | cgroup_enable_task_cg_lists(); | ||
| 1498 | retry: | ||
| 1499 | mutex_lock(&cgroup_tree_mutex); | ||
| 1526 | mutex_lock(&cgroup_mutex); | 1500 | mutex_lock(&cgroup_mutex); |
| 1501 | |||
| 1502 | /* First find the desired set of subsystems */ | ||
| 1527 | ret = parse_cgroupfs_options(data, &opts); | 1503 | ret = parse_cgroupfs_options(data, &opts); |
| 1528 | mutex_unlock(&cgroup_mutex); | ||
| 1529 | if (ret) | 1504 | if (ret) |
| 1530 | goto out_err; | 1505 | goto out_unlock; |
| 1531 | |||
| 1532 | /* | ||
| 1533 | * Allocate a new cgroup root. We may not need it if we're | ||
| 1534 | * reusing an existing hierarchy. | ||
| 1535 | */ | ||
| 1536 | new_root = cgroup_root_from_opts(&opts); | ||
| 1537 | if (IS_ERR(new_root)) { | ||
| 1538 | ret = PTR_ERR(new_root); | ||
| 1539 | goto out_err; | ||
| 1540 | } | ||
| 1541 | opts.new_root = new_root; | ||
| 1542 | 1506 | ||
| 1543 | /* Locate an existing or new sb for this hierarchy */ | 1507 | /* look for a matching existing root */ |
| 1544 | sb = sget(fs_type, cgroup_test_super, cgroup_set_super, 0, &opts); | 1508 | if (!opts.subsys_mask && !opts.none && !opts.name) { |
| 1545 | if (IS_ERR(sb)) { | 1509 | cgrp_dfl_root_visible = true; |
| 1546 | ret = PTR_ERR(sb); | 1510 | root = &cgrp_dfl_root; |
| 1547 | cgroup_free_root(opts.new_root); | 1511 | cgroup_get(&root->cgrp); |
| 1548 | goto out_err; | 1512 | ret = 0; |
| 1513 | goto out_unlock; | ||
| 1549 | } | 1514 | } |
| 1550 | 1515 | ||
| 1551 | root = sb->s_fs_info; | 1516 | for_each_root(root) { |
| 1552 | BUG_ON(!root); | 1517 | bool name_match = false; |
| 1553 | if (root == opts.new_root) { | ||
| 1554 | /* We used the new root structure, so this is a new hierarchy */ | ||
| 1555 | struct cgroup *root_cgrp = &root->top_cgroup; | ||
| 1556 | struct cgroupfs_root *existing_root; | ||
| 1557 | int i; | ||
| 1558 | struct css_set *cset; | ||
| 1559 | |||
| 1560 | BUG_ON(sb->s_root != NULL); | ||
| 1561 | 1518 | ||
| 1562 | ret = cgroup_get_rootdir(sb); | 1519 | if (root == &cgrp_dfl_root) |
| 1563 | if (ret) | 1520 | continue; |
| 1564 | goto drop_new_super; | ||
| 1565 | inode = sb->s_root->d_inode; | ||
| 1566 | |||
| 1567 | mutex_lock(&inode->i_mutex); | ||
| 1568 | mutex_lock(&cgroup_mutex); | ||
| 1569 | mutex_lock(&cgroup_root_mutex); | ||
| 1570 | |||
| 1571 | ret = idr_alloc(&root->cgroup_idr, root_cgrp, 0, 1, GFP_KERNEL); | ||
| 1572 | if (ret < 0) | ||
| 1573 | goto unlock_drop; | ||
| 1574 | root_cgrp->id = ret; | ||
| 1575 | |||
| 1576 | /* Check for name clashes with existing mounts */ | ||
| 1577 | ret = -EBUSY; | ||
| 1578 | if (strlen(root->name)) | ||
| 1579 | for_each_active_root(existing_root) | ||
| 1580 | if (!strcmp(existing_root->name, root->name)) | ||
| 1581 | goto unlock_drop; | ||
| 1582 | |||
| 1583 | /* | ||
| 1584 | * We're accessing css_set_count without locking | ||
| 1585 | * css_set_lock here, but that's OK - it can only be | ||
| 1586 | * increased by someone holding cgroup_lock, and | ||
| 1587 | * that's us. The worst that can happen is that we | ||
| 1588 | * have some link structures left over | ||
| 1589 | */ | ||
| 1590 | ret = allocate_cgrp_cset_links(css_set_count, &tmp_links); | ||
| 1591 | if (ret) | ||
| 1592 | goto unlock_drop; | ||
| 1593 | |||
| 1594 | /* ID 0 is reserved for dummy root, 1 for unified hierarchy */ | ||
| 1595 | ret = cgroup_init_root_id(root, 2, 0); | ||
| 1596 | if (ret) | ||
| 1597 | goto unlock_drop; | ||
| 1598 | |||
| 1599 | sb->s_root->d_fsdata = root_cgrp; | ||
| 1600 | root_cgrp->dentry = sb->s_root; | ||
| 1601 | |||
| 1602 | /* | ||
| 1603 | * We're inside get_sb() and will call lookup_one_len() to | ||
| 1604 | * create the root files, which doesn't work if SELinux is | ||
| 1605 | * in use. The following cred dancing somehow works around | ||
| 1606 | * it. See 2ce9738ba ("cgroupfs: use init_cred when | ||
| 1607 | * populating new cgroupfs mount") for more details. | ||
| 1608 | */ | ||
| 1609 | cred = override_creds(&init_cred); | ||
| 1610 | |||
| 1611 | ret = cgroup_addrm_files(root_cgrp, cgroup_base_files, true); | ||
| 1612 | if (ret) | ||
| 1613 | goto rm_base_files; | ||
| 1614 | |||
| 1615 | ret = rebind_subsystems(root, root->subsys_mask, 0); | ||
| 1616 | if (ret) | ||
| 1617 | goto rm_base_files; | ||
| 1618 | |||
| 1619 | revert_creds(cred); | ||
| 1620 | 1521 | ||
| 1621 | /* | 1522 | /* |
| 1622 | * There must be no failure case after here, since rebinding | 1523 | * If we asked for a name then it must match. Also, if |
| 1623 | * takes care of subsystems' refcounts, which are explicitly | 1524 | * name matches but sybsys_mask doesn't, we should fail. |
| 1624 | * dropped in the failure exit path. | 1525 | * Remember whether name matched. |
| 1625 | */ | 1526 | */ |
| 1527 | if (opts.name) { | ||
| 1528 | if (strcmp(opts.name, root->name)) | ||
| 1529 | continue; | ||
| 1530 | name_match = true; | ||
| 1531 | } | ||
| 1626 | 1532 | ||
| 1627 | list_add(&root->root_list, &cgroup_roots); | ||
| 1628 | cgroup_root_count++; | ||
| 1629 | |||
| 1630 | /* Link the top cgroup in this hierarchy into all | ||
| 1631 | * the css_set objects */ | ||
| 1632 | write_lock(&css_set_lock); | ||
| 1633 | hash_for_each(css_set_table, i, cset, hlist) | ||
| 1634 | link_css_set(&tmp_links, cset, root_cgrp); | ||
| 1635 | write_unlock(&css_set_lock); | ||
| 1636 | |||
| 1637 | free_cgrp_cset_links(&tmp_links); | ||
| 1638 | |||
| 1639 | BUG_ON(!list_empty(&root_cgrp->children)); | ||
| 1640 | BUG_ON(root->number_of_cgroups != 1); | ||
| 1641 | |||
| 1642 | mutex_unlock(&cgroup_root_mutex); | ||
| 1643 | mutex_unlock(&cgroup_mutex); | ||
| 1644 | mutex_unlock(&inode->i_mutex); | ||
| 1645 | } else { | ||
| 1646 | /* | 1533 | /* |
| 1647 | * We re-used an existing hierarchy - the new root (if | 1534 | * If we asked for subsystems (or explicitly for no |
| 1648 | * any) is not needed | 1535 | * subsystems) then they must match. |
| 1649 | */ | 1536 | */ |
| 1650 | cgroup_free_root(opts.new_root); | 1537 | if ((opts.subsys_mask || opts.none) && |
| 1538 | (opts.subsys_mask != root->cgrp.subsys_mask)) { | ||
| 1539 | if (!name_match) | ||
| 1540 | continue; | ||
| 1541 | ret = -EBUSY; | ||
| 1542 | goto out_unlock; | ||
| 1543 | } | ||
| 1651 | 1544 | ||
| 1652 | if ((root->flags ^ opts.flags) & CGRP_ROOT_OPTION_MASK) { | 1545 | if ((root->flags ^ opts.flags) & CGRP_ROOT_OPTION_MASK) { |
| 1653 | if ((root->flags | opts.flags) & CGRP_ROOT_SANE_BEHAVIOR) { | 1546 | if ((root->flags | opts.flags) & CGRP_ROOT_SANE_BEHAVIOR) { |
| 1654 | pr_err("cgroup: sane_behavior: new mount options should match the existing superblock\n"); | 1547 | pr_err("cgroup: sane_behavior: new mount options should match the existing superblock\n"); |
| 1655 | ret = -EINVAL; | 1548 | ret = -EINVAL; |
| 1656 | goto drop_new_super; | 1549 | goto out_unlock; |
| 1657 | } else { | 1550 | } else { |
| 1658 | pr_warning("cgroup: new mount options do not match the existing superblock, will be ignored\n"); | 1551 | pr_warning("cgroup: new mount options do not match the existing superblock, will be ignored\n"); |
| 1659 | } | 1552 | } |
| 1660 | } | 1553 | } |
| 1661 | } | ||
| 1662 | |||
| 1663 | kfree(opts.release_agent); | ||
| 1664 | kfree(opts.name); | ||
| 1665 | return dget(sb->s_root); | ||
| 1666 | 1554 | ||
| 1667 | rm_base_files: | 1555 | /* |
| 1668 | free_cgrp_cset_links(&tmp_links); | 1556 | * A root's lifetime is governed by its root cgroup. Zero |
| 1669 | cgroup_addrm_files(&root->top_cgroup, cgroup_base_files, false); | 1557 | * ref indicate that the root is being destroyed. Wait for |
| 1670 | revert_creds(cred); | 1558 | * destruction to complete so that the subsystems are free. |
| 1671 | unlock_drop: | 1559 | * We can use wait_queue for the wait but this path is |
| 1672 | cgroup_exit_root_id(root); | 1560 | * super cold. Let's just sleep for a bit and retry. |
| 1673 | mutex_unlock(&cgroup_root_mutex); | 1561 | */ |
| 1674 | mutex_unlock(&cgroup_mutex); | 1562 | if (!atomic_inc_not_zero(&root->cgrp.refcnt)) { |
| 1675 | mutex_unlock(&inode->i_mutex); | 1563 | mutex_unlock(&cgroup_mutex); |
| 1676 | drop_new_super: | 1564 | mutex_unlock(&cgroup_tree_mutex); |
| 1677 | deactivate_locked_super(sb); | 1565 | kfree(opts.release_agent); |
| 1678 | out_err: | 1566 | kfree(opts.name); |
| 1679 | kfree(opts.release_agent); | 1567 | msleep(10); |
| 1680 | kfree(opts.name); | 1568 | goto retry; |
| 1681 | return ERR_PTR(ret); | 1569 | } |
| 1682 | } | ||
| 1683 | |||
| 1684 | static void cgroup_kill_sb(struct super_block *sb) | ||
| 1685 | { | ||
| 1686 | struct cgroupfs_root *root = sb->s_fs_info; | ||
| 1687 | struct cgroup *cgrp = &root->top_cgroup; | ||
| 1688 | struct cgrp_cset_link *link, *tmp_link; | ||
| 1689 | int ret; | ||
| 1690 | |||
| 1691 | BUG_ON(!root); | ||
| 1692 | |||
| 1693 | BUG_ON(root->number_of_cgroups != 1); | ||
| 1694 | BUG_ON(!list_empty(&cgrp->children)); | ||
| 1695 | |||
| 1696 | mutex_lock(&cgrp->dentry->d_inode->i_mutex); | ||
| 1697 | mutex_lock(&cgroup_mutex); | ||
| 1698 | mutex_lock(&cgroup_root_mutex); | ||
| 1699 | 1570 | ||
| 1700 | /* Rebind all subsystems back to the default hierarchy */ | 1571 | ret = 0; |
| 1701 | if (root->flags & CGRP_ROOT_SUBSYS_BOUND) { | 1572 | goto out_unlock; |
| 1702 | ret = rebind_subsystems(root, 0, root->subsys_mask); | ||
| 1703 | /* Shouldn't be able to fail ... */ | ||
| 1704 | BUG_ON(ret); | ||
| 1705 | } | 1573 | } |
| 1706 | 1574 | ||
| 1707 | /* | 1575 | /* |
| 1708 | * Release all the links from cset_links to this hierarchy's | 1576 | * No such thing, create a new one. name= matching without subsys |
| 1709 | * root cgroup | 1577 | * specification is allowed for already existing hierarchies but we |
| 1578 | * can't create new one without subsys specification. | ||
| 1710 | */ | 1579 | */ |
| 1711 | write_lock(&css_set_lock); | 1580 | if (!opts.subsys_mask && !opts.none) { |
| 1712 | 1581 | ret = -EINVAL; | |
| 1713 | list_for_each_entry_safe(link, tmp_link, &cgrp->cset_links, cset_link) { | 1582 | goto out_unlock; |
| 1714 | list_del(&link->cset_link); | ||
| 1715 | list_del(&link->cgrp_link); | ||
| 1716 | kfree(link); | ||
| 1717 | } | 1583 | } |
| 1718 | write_unlock(&css_set_lock); | ||
| 1719 | 1584 | ||
| 1720 | if (!list_empty(&root->root_list)) { | 1585 | root = kzalloc(sizeof(*root), GFP_KERNEL); |
| 1721 | list_del(&root->root_list); | 1586 | if (!root) { |
| 1722 | cgroup_root_count--; | 1587 | ret = -ENOMEM; |
| 1588 | goto out_unlock; | ||
| 1723 | } | 1589 | } |
| 1724 | 1590 | ||
| 1725 | cgroup_exit_root_id(root); | 1591 | init_cgroup_root(root, &opts); |
| 1592 | |||
| 1593 | ret = cgroup_setup_root(root, opts.subsys_mask); | ||
| 1594 | if (ret) | ||
| 1595 | cgroup_free_root(root); | ||
| 1726 | 1596 | ||
| 1727 | mutex_unlock(&cgroup_root_mutex); | 1597 | out_unlock: |
| 1728 | mutex_unlock(&cgroup_mutex); | 1598 | mutex_unlock(&cgroup_mutex); |
| 1729 | mutex_unlock(&cgrp->dentry->d_inode->i_mutex); | 1599 | mutex_unlock(&cgroup_tree_mutex); |
| 1600 | |||
| 1601 | kfree(opts.release_agent); | ||
| 1602 | kfree(opts.name); | ||
| 1730 | 1603 | ||
| 1731 | simple_xattrs_free(&cgrp->xattrs); | 1604 | if (ret) |
| 1605 | return ERR_PTR(ret); | ||
| 1732 | 1606 | ||
| 1733 | kill_litter_super(sb); | 1607 | dentry = kernfs_mount(fs_type, flags, root->kf_root, &new_sb); |
| 1734 | cgroup_free_root(root); | 1608 | if (IS_ERR(dentry) || !new_sb) |
| 1609 | cgroup_put(&root->cgrp); | ||
| 1610 | return dentry; | ||
| 1611 | } | ||
| 1612 | |||
| 1613 | static void cgroup_kill_sb(struct super_block *sb) | ||
| 1614 | { | ||
| 1615 | struct kernfs_root *kf_root = kernfs_root_from_sb(sb); | ||
| 1616 | struct cgroup_root *root = cgroup_root_from_kf(kf_root); | ||
| 1617 | |||
| 1618 | cgroup_put(&root->cgrp); | ||
| 1619 | kernfs_kill_sb(sb); | ||
| 1735 | } | 1620 | } |
| 1736 | 1621 | ||
| 1737 | static struct file_system_type cgroup_fs_type = { | 1622 | static struct file_system_type cgroup_fs_type = { |
| @@ -1743,57 +1628,6 @@ static struct file_system_type cgroup_fs_type = { | |||
| 1743 | static struct kobject *cgroup_kobj; | 1628 | static struct kobject *cgroup_kobj; |
| 1744 | 1629 | ||
| 1745 | /** | 1630 | /** |
| 1746 | * cgroup_path - generate the path of a cgroup | ||
| 1747 | * @cgrp: the cgroup in question | ||
| 1748 | * @buf: the buffer to write the path into | ||
| 1749 | * @buflen: the length of the buffer | ||
| 1750 | * | ||
| 1751 | * Writes path of cgroup into buf. Returns 0 on success, -errno on error. | ||
| 1752 | * | ||
| 1753 | * We can't generate cgroup path using dentry->d_name, as accessing | ||
| 1754 | * dentry->name must be protected by irq-unsafe dentry->d_lock or parent | ||
| 1755 | * inode's i_mutex, while on the other hand cgroup_path() can be called | ||
| 1756 | * with some irq-safe spinlocks held. | ||
| 1757 | */ | ||
| 1758 | int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen) | ||
| 1759 | { | ||
| 1760 | int ret = -ENAMETOOLONG; | ||
| 1761 | char *start; | ||
| 1762 | |||
| 1763 | if (!cgrp->parent) { | ||
| 1764 | if (strlcpy(buf, "/", buflen) >= buflen) | ||
| 1765 | return -ENAMETOOLONG; | ||
| 1766 | return 0; | ||
| 1767 | } | ||
| 1768 | |||
| 1769 | start = buf + buflen - 1; | ||
| 1770 | *start = '\0'; | ||
| 1771 | |||
| 1772 | rcu_read_lock(); | ||
| 1773 | do { | ||
| 1774 | const char *name = cgroup_name(cgrp); | ||
| 1775 | int len; | ||
| 1776 | |||
| 1777 | len = strlen(name); | ||
| 1778 | if ((start -= len) < buf) | ||
| 1779 | goto out; | ||
| 1780 | memcpy(start, name, len); | ||
| 1781 | |||
| 1782 | if (--start < buf) | ||
| 1783 | goto out; | ||
| 1784 | *start = '/'; | ||
| 1785 | |||
| 1786 | cgrp = cgrp->parent; | ||
| 1787 | } while (cgrp->parent); | ||
| 1788 | ret = 0; | ||
| 1789 | memmove(buf, start, buf + buflen - start); | ||
| 1790 | out: | ||
| 1791 | rcu_read_unlock(); | ||
| 1792 | return ret; | ||
| 1793 | } | ||
| 1794 | EXPORT_SYMBOL_GPL(cgroup_path); | ||
| 1795 | |||
| 1796 | /** | ||
| 1797 | * task_cgroup_path - cgroup path of a task in the first cgroup hierarchy | 1631 | * task_cgroup_path - cgroup path of a task in the first cgroup hierarchy |
| 1798 | * @task: target task | 1632 | * @task: target task |
| 1799 | * @buf: the buffer to write the path into | 1633 | * @buf: the buffer to write the path into |
| @@ -1804,49 +1638,55 @@ EXPORT_SYMBOL_GPL(cgroup_path); | |||
| 1804 | * function grabs cgroup_mutex and shouldn't be used inside locks used by | 1638 | * function grabs cgroup_mutex and shouldn't be used inside locks used by |
| 1805 | * cgroup controller callbacks. | 1639 | * cgroup controller callbacks. |
| 1806 | * | 1640 | * |
| 1807 | * Returns 0 on success, fails with -%ENAMETOOLONG if @buflen is too short. | 1641 | * Return value is the same as kernfs_path(). |
| 1808 | */ | 1642 | */ |
| 1809 | int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen) | 1643 | char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen) |
| 1810 | { | 1644 | { |
| 1811 | struct cgroupfs_root *root; | 1645 | struct cgroup_root *root; |
| 1812 | struct cgroup *cgrp; | 1646 | struct cgroup *cgrp; |
| 1813 | int hierarchy_id = 1, ret = 0; | 1647 | int hierarchy_id = 1; |
| 1814 | 1648 | char *path = NULL; | |
| 1815 | if (buflen < 2) | ||
| 1816 | return -ENAMETOOLONG; | ||
| 1817 | 1649 | ||
| 1818 | mutex_lock(&cgroup_mutex); | 1650 | mutex_lock(&cgroup_mutex); |
| 1651 | down_read(&css_set_rwsem); | ||
| 1819 | 1652 | ||
| 1820 | root = idr_get_next(&cgroup_hierarchy_idr, &hierarchy_id); | 1653 | root = idr_get_next(&cgroup_hierarchy_idr, &hierarchy_id); |
| 1821 | 1654 | ||
| 1822 | if (root) { | 1655 | if (root) { |
| 1823 | cgrp = task_cgroup_from_root(task, root); | 1656 | cgrp = task_cgroup_from_root(task, root); |
| 1824 | ret = cgroup_path(cgrp, buf, buflen); | 1657 | path = cgroup_path(cgrp, buf, buflen); |
| 1825 | } else { | 1658 | } else { |
| 1826 | /* if no hierarchy exists, everyone is in "/" */ | 1659 | /* if no hierarchy exists, everyone is in "/" */ |
| 1827 | memcpy(buf, "/", 2); | 1660 | if (strlcpy(buf, "/", buflen) < buflen) |
| 1661 | path = buf; | ||
| 1828 | } | 1662 | } |
| 1829 | 1663 | ||
| 1664 | up_read(&css_set_rwsem); | ||
| 1830 | mutex_unlock(&cgroup_mutex); | 1665 | mutex_unlock(&cgroup_mutex); |
| 1831 | return ret; | 1666 | return path; |
| 1832 | } | 1667 | } |
| 1833 | EXPORT_SYMBOL_GPL(task_cgroup_path); | 1668 | EXPORT_SYMBOL_GPL(task_cgroup_path); |
| 1834 | 1669 | ||
| 1835 | /* | 1670 | /* used to track tasks and other necessary states during migration */ |
| 1836 | * Control Group taskset | ||
| 1837 | */ | ||
| 1838 | struct task_and_cgroup { | ||
| 1839 | struct task_struct *task; | ||
| 1840 | struct cgroup *cgrp; | ||
| 1841 | struct css_set *cset; | ||
| 1842 | }; | ||
| 1843 | |||
| 1844 | struct cgroup_taskset { | 1671 | struct cgroup_taskset { |
| 1845 | struct task_and_cgroup single; | 1672 | /* the src and dst cset list running through cset->mg_node */ |
| 1846 | struct flex_array *tc_array; | 1673 | struct list_head src_csets; |
| 1847 | int tc_array_len; | 1674 | struct list_head dst_csets; |
| 1848 | int idx; | 1675 | |
| 1849 | struct cgroup *cur_cgrp; | 1676 | /* |
| 1677 | * Fields for cgroup_taskset_*() iteration. | ||
| 1678 | * | ||
| 1679 | * Before migration is committed, the target migration tasks are on | ||
| 1680 | * ->mg_tasks of the csets on ->src_csets. After, on ->mg_tasks of | ||
| 1681 | * the csets on ->dst_csets. ->csets point to either ->src_csets | ||
| 1682 | * or ->dst_csets depending on whether migration is committed. | ||
| 1683 | * | ||
| 1684 | * ->cur_csets and ->cur_task point to the current task position | ||
| 1685 | * during iteration. | ||
| 1686 | */ | ||
| 1687 | struct list_head *csets; | ||
| 1688 | struct css_set *cur_cset; | ||
| 1689 | struct task_struct *cur_task; | ||
| 1850 | }; | 1690 | }; |
| 1851 | 1691 | ||
| 1852 | /** | 1692 | /** |
| @@ -1857,15 +1697,11 @@ struct cgroup_taskset { | |||
| 1857 | */ | 1697 | */ |
| 1858 | struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset) | 1698 | struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset) |
| 1859 | { | 1699 | { |
| 1860 | if (tset->tc_array) { | 1700 | tset->cur_cset = list_first_entry(tset->csets, struct css_set, mg_node); |
| 1861 | tset->idx = 0; | 1701 | tset->cur_task = NULL; |
| 1862 | return cgroup_taskset_next(tset); | 1702 | |
| 1863 | } else { | 1703 | return cgroup_taskset_next(tset); |
| 1864 | tset->cur_cgrp = tset->single.cgrp; | ||
| 1865 | return tset->single.task; | ||
| 1866 | } | ||
| 1867 | } | 1704 | } |
| 1868 | EXPORT_SYMBOL_GPL(cgroup_taskset_first); | ||
| 1869 | 1705 | ||
| 1870 | /** | 1706 | /** |
| 1871 | * cgroup_taskset_next - iterate to the next task in taskset | 1707 | * cgroup_taskset_next - iterate to the next task in taskset |
| @@ -1876,48 +1712,36 @@ EXPORT_SYMBOL_GPL(cgroup_taskset_first); | |||
| 1876 | */ | 1712 | */ |
| 1877 | struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset) | 1713 | struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset) |
| 1878 | { | 1714 | { |
| 1879 | struct task_and_cgroup *tc; | 1715 | struct css_set *cset = tset->cur_cset; |
| 1716 | struct task_struct *task = tset->cur_task; | ||
| 1880 | 1717 | ||
| 1881 | if (!tset->tc_array || tset->idx >= tset->tc_array_len) | 1718 | while (&cset->mg_node != tset->csets) { |
| 1882 | return NULL; | 1719 | if (!task) |
| 1720 | task = list_first_entry(&cset->mg_tasks, | ||
| 1721 | struct task_struct, cg_list); | ||
| 1722 | else | ||
| 1723 | task = list_next_entry(task, cg_list); | ||
| 1883 | 1724 | ||
| 1884 | tc = flex_array_get(tset->tc_array, tset->idx++); | 1725 | if (&task->cg_list != &cset->mg_tasks) { |
| 1885 | tset->cur_cgrp = tc->cgrp; | 1726 | tset->cur_cset = cset; |
| 1886 | return tc->task; | 1727 | tset->cur_task = task; |
| 1887 | } | 1728 | return task; |
| 1888 | EXPORT_SYMBOL_GPL(cgroup_taskset_next); | 1729 | } |
| 1889 | 1730 | ||
| 1890 | /** | 1731 | cset = list_next_entry(cset, mg_node); |
| 1891 | * cgroup_taskset_cur_css - return the matching css for the current task | 1732 | task = NULL; |
| 1892 | * @tset: taskset of interest | 1733 | } |
| 1893 | * @subsys_id: the ID of the target subsystem | ||
| 1894 | * | ||
| 1895 | * Return the css for the current (last returned) task of @tset for | ||
| 1896 | * subsystem specified by @subsys_id. This function must be preceded by | ||
| 1897 | * either cgroup_taskset_first() or cgroup_taskset_next(). | ||
| 1898 | */ | ||
| 1899 | struct cgroup_subsys_state *cgroup_taskset_cur_css(struct cgroup_taskset *tset, | ||
| 1900 | int subsys_id) | ||
| 1901 | { | ||
| 1902 | return cgroup_css(tset->cur_cgrp, cgroup_subsys[subsys_id]); | ||
| 1903 | } | ||
| 1904 | EXPORT_SYMBOL_GPL(cgroup_taskset_cur_css); | ||
| 1905 | 1734 | ||
| 1906 | /** | 1735 | return NULL; |
| 1907 | * cgroup_taskset_size - return the number of tasks in taskset | ||
| 1908 | * @tset: taskset of interest | ||
| 1909 | */ | ||
| 1910 | int cgroup_taskset_size(struct cgroup_taskset *tset) | ||
| 1911 | { | ||
| 1912 | return tset->tc_array ? tset->tc_array_len : 1; | ||
| 1913 | } | 1736 | } |
| 1914 | EXPORT_SYMBOL_GPL(cgroup_taskset_size); | ||
| 1915 | |||
| 1916 | 1737 | ||
| 1917 | /* | 1738 | /** |
| 1918 | * cgroup_task_migrate - move a task from one cgroup to another. | 1739 | * cgroup_task_migrate - move a task from one cgroup to another. |
| 1740 | * @old_cgrp; the cgroup @tsk is being migrated from | ||
| 1741 | * @tsk: the task being migrated | ||
| 1742 | * @new_cset: the new css_set @tsk is being attached to | ||
| 1919 | * | 1743 | * |
| 1920 | * Must be called with cgroup_mutex and threadgroup locked. | 1744 | * Must be called with cgroup_mutex, threadgroup and css_set_rwsem locked. |
| 1921 | */ | 1745 | */ |
| 1922 | static void cgroup_task_migrate(struct cgroup *old_cgrp, | 1746 | static void cgroup_task_migrate(struct cgroup *old_cgrp, |
| 1923 | struct task_struct *tsk, | 1747 | struct task_struct *tsk, |
| @@ -1925,6 +1749,9 @@ static void cgroup_task_migrate(struct cgroup *old_cgrp, | |||
| 1925 | { | 1749 | { |
| 1926 | struct css_set *old_cset; | 1750 | struct css_set *old_cset; |
| 1927 | 1751 | ||
| 1752 | lockdep_assert_held(&cgroup_mutex); | ||
| 1753 | lockdep_assert_held(&css_set_rwsem); | ||
| 1754 | |||
| 1928 | /* | 1755 | /* |
| 1929 | * We are synchronized through threadgroup_lock() against PF_EXITING | 1756 | * We are synchronized through threadgroup_lock() against PF_EXITING |
| 1930 | * setting such that we can't race against cgroup_exit() changing the | 1757 | * setting such that we can't race against cgroup_exit() changing the |
| @@ -1933,15 +1760,16 @@ static void cgroup_task_migrate(struct cgroup *old_cgrp, | |||
| 1933 | WARN_ON_ONCE(tsk->flags & PF_EXITING); | 1760 | WARN_ON_ONCE(tsk->flags & PF_EXITING); |
| 1934 | old_cset = task_css_set(tsk); | 1761 | old_cset = task_css_set(tsk); |
| 1935 | 1762 | ||
| 1936 | task_lock(tsk); | 1763 | get_css_set(new_cset); |
| 1937 | rcu_assign_pointer(tsk->cgroups, new_cset); | 1764 | rcu_assign_pointer(tsk->cgroups, new_cset); |
| 1938 | task_unlock(tsk); | ||
| 1939 | 1765 | ||
| 1940 | /* Update the css_set linked lists if we're using them */ | 1766 | /* |
| 1941 | write_lock(&css_set_lock); | 1767 | * Use move_tail so that cgroup_taskset_first() still returns the |
| 1942 | if (!list_empty(&tsk->cg_list)) | 1768 | * leader after migration. This works because cgroup_migrate() |
| 1943 | list_move(&tsk->cg_list, &new_cset->tasks); | 1769 | * ensures that the dst_cset of the leader is the first on the |
| 1944 | write_unlock(&css_set_lock); | 1770 | * tset's dst_csets list. |
| 1771 | */ | ||
| 1772 | list_move_tail(&tsk->cg_list, &new_cset->mg_tasks); | ||
| 1945 | 1773 | ||
| 1946 | /* | 1774 | /* |
| 1947 | * We just gained a reference on old_cset by taking it from the | 1775 | * We just gained a reference on old_cset by taking it from the |
| @@ -1949,100 +1777,199 @@ static void cgroup_task_migrate(struct cgroup *old_cgrp, | |||
| 1949 | * we're safe to drop it here; it will be freed under RCU. | 1777 | * we're safe to drop it here; it will be freed under RCU. |
| 1950 | */ | 1778 | */ |
| 1951 | set_bit(CGRP_RELEASABLE, &old_cgrp->flags); | 1779 | set_bit(CGRP_RELEASABLE, &old_cgrp->flags); |
| 1952 | put_css_set(old_cset); | 1780 | put_css_set_locked(old_cset, false); |
| 1953 | } | 1781 | } |
| 1954 | 1782 | ||
| 1955 | /** | 1783 | /** |
| 1956 | * cgroup_attach_task - attach a task or a whole threadgroup to a cgroup | 1784 | * cgroup_migrate_finish - cleanup after attach |
| 1957 | * @cgrp: the cgroup to attach to | 1785 | * @preloaded_csets: list of preloaded css_sets |
| 1958 | * @tsk: the task or the leader of the threadgroup to be attached | ||
| 1959 | * @threadgroup: attach the whole threadgroup? | ||
| 1960 | * | 1786 | * |
| 1961 | * Call holding cgroup_mutex and the group_rwsem of the leader. Will take | 1787 | * Undo cgroup_migrate_add_src() and cgroup_migrate_prepare_dst(). See |
| 1962 | * task_lock of @tsk or each thread in the threadgroup individually in turn. | 1788 | * those functions for details. |
| 1963 | */ | 1789 | */ |
| 1964 | static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk, | 1790 | static void cgroup_migrate_finish(struct list_head *preloaded_csets) |
| 1965 | bool threadgroup) | ||
| 1966 | { | 1791 | { |
| 1967 | int retval, i, group_size; | 1792 | struct css_set *cset, *tmp_cset; |
| 1968 | struct cgroupfs_root *root = cgrp->root; | ||
| 1969 | struct cgroup_subsys_state *css, *failed_css = NULL; | ||
| 1970 | /* threadgroup list cursor and array */ | ||
| 1971 | struct task_struct *leader = tsk; | ||
| 1972 | struct task_and_cgroup *tc; | ||
| 1973 | struct flex_array *group; | ||
| 1974 | struct cgroup_taskset tset = { }; | ||
| 1975 | 1793 | ||
| 1976 | /* | 1794 | lockdep_assert_held(&cgroup_mutex); |
| 1977 | * step 0: in order to do expensive, possibly blocking operations for | 1795 | |
| 1978 | * every thread, we cannot iterate the thread group list, since it needs | 1796 | down_write(&css_set_rwsem); |
| 1979 | * rcu or tasklist locked. instead, build an array of all threads in the | 1797 | list_for_each_entry_safe(cset, tmp_cset, preloaded_csets, mg_preload_node) { |
| 1980 | * group - group_rwsem prevents new threads from appearing, and if | 1798 | cset->mg_src_cgrp = NULL; |
| 1981 | * threads exit, this will just be an over-estimate. | 1799 | cset->mg_dst_cset = NULL; |
| 1982 | */ | 1800 | list_del_init(&cset->mg_preload_node); |
| 1983 | if (threadgroup) | 1801 | put_css_set_locked(cset, false); |
| 1984 | group_size = get_nr_threads(tsk); | 1802 | } |
| 1985 | else | 1803 | up_write(&css_set_rwsem); |
| 1986 | group_size = 1; | 1804 | } |
| 1987 | /* flex_array supports very large thread-groups better than kmalloc. */ | 1805 | |
| 1988 | group = flex_array_alloc(sizeof(*tc), group_size, GFP_KERNEL); | 1806 | /** |
| 1989 | if (!group) | 1807 | * cgroup_migrate_add_src - add a migration source css_set |
| 1990 | return -ENOMEM; | 1808 | * @src_cset: the source css_set to add |
| 1991 | /* pre-allocate to guarantee space while iterating in rcu read-side. */ | 1809 | * @dst_cgrp: the destination cgroup |
| 1992 | retval = flex_array_prealloc(group, 0, group_size, GFP_KERNEL); | 1810 | * @preloaded_csets: list of preloaded css_sets |
| 1993 | if (retval) | 1811 | * |
| 1994 | goto out_free_group_list; | 1812 | * Tasks belonging to @src_cset are about to be migrated to @dst_cgrp. Pin |
| 1813 | * @src_cset and add it to @preloaded_csets, which should later be cleaned | ||
| 1814 | * up by cgroup_migrate_finish(). | ||
| 1815 | * | ||
| 1816 | * This function may be called without holding threadgroup_lock even if the | ||
| 1817 | * target is a process. Threads may be created and destroyed but as long | ||
| 1818 | * as cgroup_mutex is not dropped, no new css_set can be put into play and | ||
| 1819 | * the preloaded css_sets are guaranteed to cover all migrations. | ||
| 1820 | */ | ||
| 1821 | static void cgroup_migrate_add_src(struct css_set *src_cset, | ||
| 1822 | struct cgroup *dst_cgrp, | ||
| 1823 | struct list_head *preloaded_csets) | ||
| 1824 | { | ||
| 1825 | struct cgroup *src_cgrp; | ||
| 1826 | |||
| 1827 | lockdep_assert_held(&cgroup_mutex); | ||
| 1828 | lockdep_assert_held(&css_set_rwsem); | ||
| 1829 | |||
| 1830 | src_cgrp = cset_cgroup_from_root(src_cset, dst_cgrp->root); | ||
| 1831 | |||
| 1832 | /* nothing to do if this cset already belongs to the cgroup */ | ||
| 1833 | if (src_cgrp == dst_cgrp) | ||
| 1834 | return; | ||
| 1835 | |||
| 1836 | if (!list_empty(&src_cset->mg_preload_node)) | ||
| 1837 | return; | ||
| 1838 | |||
| 1839 | WARN_ON(src_cset->mg_src_cgrp); | ||
| 1840 | WARN_ON(!list_empty(&src_cset->mg_tasks)); | ||
| 1841 | WARN_ON(!list_empty(&src_cset->mg_node)); | ||
| 1842 | |||
| 1843 | src_cset->mg_src_cgrp = src_cgrp; | ||
| 1844 | get_css_set(src_cset); | ||
| 1845 | list_add(&src_cset->mg_preload_node, preloaded_csets); | ||
| 1846 | } | ||
| 1847 | |||
| 1848 | /** | ||
| 1849 | * cgroup_migrate_prepare_dst - prepare destination css_sets for migration | ||
| 1850 | * @dst_cgrp: the destination cgroup | ||
| 1851 | * @preloaded_csets: list of preloaded source css_sets | ||
| 1852 | * | ||
| 1853 | * Tasks are about to be moved to @dst_cgrp and all the source css_sets | ||
| 1854 | * have been preloaded to @preloaded_csets. This function looks up and | ||
| 1855 | * pins all destination css_sets, links each to its source, and put them on | ||
| 1856 | * @preloaded_csets. | ||
| 1857 | * | ||
| 1858 | * This function must be called after cgroup_migrate_add_src() has been | ||
| 1859 | * called on each migration source css_set. After migration is performed | ||
| 1860 | * using cgroup_migrate(), cgroup_migrate_finish() must be called on | ||
| 1861 | * @preloaded_csets. | ||
| 1862 | */ | ||
| 1863 | static int cgroup_migrate_prepare_dst(struct cgroup *dst_cgrp, | ||
| 1864 | struct list_head *preloaded_csets) | ||
| 1865 | { | ||
| 1866 | LIST_HEAD(csets); | ||
| 1867 | struct css_set *src_cset; | ||
| 1868 | |||
| 1869 | lockdep_assert_held(&cgroup_mutex); | ||
| 1870 | |||
| 1871 | /* look up the dst cset for each src cset and link it to src */ | ||
| 1872 | list_for_each_entry(src_cset, preloaded_csets, mg_preload_node) { | ||
| 1873 | struct css_set *dst_cset; | ||
| 1874 | |||
| 1875 | dst_cset = find_css_set(src_cset, dst_cgrp); | ||
| 1876 | if (!dst_cset) | ||
| 1877 | goto err; | ||
| 1878 | |||
| 1879 | WARN_ON_ONCE(src_cset->mg_dst_cset || dst_cset->mg_dst_cset); | ||
| 1880 | src_cset->mg_dst_cset = dst_cset; | ||
| 1881 | |||
| 1882 | if (list_empty(&dst_cset->mg_preload_node)) | ||
| 1883 | list_add(&dst_cset->mg_preload_node, &csets); | ||
| 1884 | else | ||
| 1885 | put_css_set(dst_cset, false); | ||
| 1886 | } | ||
| 1887 | |||
| 1888 | list_splice(&csets, preloaded_csets); | ||
| 1889 | return 0; | ||
| 1890 | err: | ||
| 1891 | cgroup_migrate_finish(&csets); | ||
| 1892 | return -ENOMEM; | ||
| 1893 | } | ||
| 1894 | |||
| 1895 | /** | ||
| 1896 | * cgroup_migrate - migrate a process or task to a cgroup | ||
| 1897 | * @cgrp: the destination cgroup | ||
| 1898 | * @leader: the leader of the process or the task to migrate | ||
| 1899 | * @threadgroup: whether @leader points to the whole process or a single task | ||
| 1900 | * | ||
| 1901 | * Migrate a process or task denoted by @leader to @cgrp. If migrating a | ||
| 1902 | * process, the caller must be holding threadgroup_lock of @leader. The | ||
| 1903 | * caller is also responsible for invoking cgroup_migrate_add_src() and | ||
| 1904 | * cgroup_migrate_prepare_dst() on the targets before invoking this | ||
| 1905 | * function and following up with cgroup_migrate_finish(). | ||
| 1906 | * | ||
| 1907 | * As long as a controller's ->can_attach() doesn't fail, this function is | ||
| 1908 | * guaranteed to succeed. This means that, excluding ->can_attach() | ||
| 1909 | * failure, when migrating multiple targets, the success or failure can be | ||
| 1910 | * decided for all targets by invoking group_migrate_prepare_dst() before | ||
| 1911 | * actually starting migrating. | ||
| 1912 | */ | ||
| 1913 | static int cgroup_migrate(struct cgroup *cgrp, struct task_struct *leader, | ||
| 1914 | bool threadgroup) | ||
| 1915 | { | ||
| 1916 | struct cgroup_taskset tset = { | ||
| 1917 | .src_csets = LIST_HEAD_INIT(tset.src_csets), | ||
| 1918 | .dst_csets = LIST_HEAD_INIT(tset.dst_csets), | ||
| 1919 | .csets = &tset.src_csets, | ||
| 1920 | }; | ||
| 1921 | struct cgroup_subsys_state *css, *failed_css = NULL; | ||
| 1922 | struct css_set *cset, *tmp_cset; | ||
| 1923 | struct task_struct *task, *tmp_task; | ||
| 1924 | int i, ret; | ||
| 1995 | 1925 | ||
| 1996 | i = 0; | ||
| 1997 | /* | 1926 | /* |
| 1998 | * Prevent freeing of tasks while we take a snapshot. Tasks that are | 1927 | * Prevent freeing of tasks while we take a snapshot. Tasks that are |
| 1999 | * already PF_EXITING could be freed from underneath us unless we | 1928 | * already PF_EXITING could be freed from underneath us unless we |
| 2000 | * take an rcu_read_lock. | 1929 | * take an rcu_read_lock. |
| 2001 | */ | 1930 | */ |
| 1931 | down_write(&css_set_rwsem); | ||
| 2002 | rcu_read_lock(); | 1932 | rcu_read_lock(); |
| 1933 | task = leader; | ||
| 2003 | do { | 1934 | do { |
| 2004 | struct task_and_cgroup ent; | 1935 | /* @task either already exited or can't exit until the end */ |
| 1936 | if (task->flags & PF_EXITING) | ||
| 1937 | goto next; | ||
| 2005 | 1938 | ||
| 2006 | /* @tsk either already exited or can't exit until the end */ | 1939 | /* leave @task alone if post_fork() hasn't linked it yet */ |
| 2007 | if (tsk->flags & PF_EXITING) | 1940 | if (list_empty(&task->cg_list)) |
| 2008 | goto next; | 1941 | goto next; |
| 2009 | 1942 | ||
| 2010 | /* as per above, nr_threads may decrease, but not increase. */ | 1943 | cset = task_css_set(task); |
| 2011 | BUG_ON(i >= group_size); | 1944 | if (!cset->mg_src_cgrp) |
| 2012 | ent.task = tsk; | ||
| 2013 | ent.cgrp = task_cgroup_from_root(tsk, root); | ||
| 2014 | /* nothing to do if this task is already in the cgroup */ | ||
| 2015 | if (ent.cgrp == cgrp) | ||
| 2016 | goto next; | 1945 | goto next; |
| 1946 | |||
| 2017 | /* | 1947 | /* |
| 2018 | * saying GFP_ATOMIC has no effect here because we did prealloc | 1948 | * cgroup_taskset_first() must always return the leader. |
| 2019 | * earlier, but it's good form to communicate our expectations. | 1949 | * Take care to avoid disturbing the ordering. |
| 2020 | */ | 1950 | */ |
| 2021 | retval = flex_array_put(group, i, &ent, GFP_ATOMIC); | 1951 | list_move_tail(&task->cg_list, &cset->mg_tasks); |
| 2022 | BUG_ON(retval != 0); | 1952 | if (list_empty(&cset->mg_node)) |
| 2023 | i++; | 1953 | list_add_tail(&cset->mg_node, &tset.src_csets); |
| 1954 | if (list_empty(&cset->mg_dst_cset->mg_node)) | ||
| 1955 | list_move_tail(&cset->mg_dst_cset->mg_node, | ||
| 1956 | &tset.dst_csets); | ||
| 2024 | next: | 1957 | next: |
| 2025 | if (!threadgroup) | 1958 | if (!threadgroup) |
| 2026 | break; | 1959 | break; |
| 2027 | } while_each_thread(leader, tsk); | 1960 | } while_each_thread(leader, task); |
| 2028 | rcu_read_unlock(); | 1961 | rcu_read_unlock(); |
| 2029 | /* remember the number of threads in the array for later. */ | 1962 | up_write(&css_set_rwsem); |
| 2030 | group_size = i; | ||
| 2031 | tset.tc_array = group; | ||
| 2032 | tset.tc_array_len = group_size; | ||
| 2033 | 1963 | ||
| 2034 | /* methods shouldn't be called if no task is actually migrating */ | 1964 | /* methods shouldn't be called if no task is actually migrating */ |
| 2035 | retval = 0; | 1965 | if (list_empty(&tset.src_csets)) |
| 2036 | if (!group_size) | 1966 | return 0; |
| 2037 | goto out_free_group_list; | ||
| 2038 | 1967 | ||
| 2039 | /* | 1968 | /* check that we can legitimately attach to the cgroup */ |
| 2040 | * step 1: check that we can legitimately attach to the cgroup. | ||
| 2041 | */ | ||
| 2042 | for_each_css(css, i, cgrp) { | 1969 | for_each_css(css, i, cgrp) { |
| 2043 | if (css->ss->can_attach) { | 1970 | if (css->ss->can_attach) { |
| 2044 | retval = css->ss->can_attach(css, &tset); | 1971 | ret = css->ss->can_attach(css, &tset); |
| 2045 | if (retval) { | 1972 | if (ret) { |
| 2046 | failed_css = css; | 1973 | failed_css = css; |
| 2047 | goto out_cancel_attach; | 1974 | goto out_cancel_attach; |
| 2048 | } | 1975 | } |
| @@ -2050,70 +1977,91 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk, | |||
| 2050 | } | 1977 | } |
| 2051 | 1978 | ||
| 2052 | /* | 1979 | /* |
| 2053 | * step 2: make sure css_sets exist for all threads to be migrated. | 1980 | * Now that we're guaranteed success, proceed to move all tasks to |
| 2054 | * we use find_css_set, which allocates a new one if necessary. | 1981 | * the new cgroup. There are no failure cases after here, so this |
| 1982 | * is the commit point. | ||
| 2055 | */ | 1983 | */ |
| 2056 | for (i = 0; i < group_size; i++) { | 1984 | down_write(&css_set_rwsem); |
| 2057 | struct css_set *old_cset; | 1985 | list_for_each_entry(cset, &tset.src_csets, mg_node) { |
| 2058 | 1986 | list_for_each_entry_safe(task, tmp_task, &cset->mg_tasks, cg_list) | |
| 2059 | tc = flex_array_get(group, i); | 1987 | cgroup_task_migrate(cset->mg_src_cgrp, task, |
| 2060 | old_cset = task_css_set(tc->task); | 1988 | cset->mg_dst_cset); |
| 2061 | tc->cset = find_css_set(old_cset, cgrp); | ||
| 2062 | if (!tc->cset) { | ||
| 2063 | retval = -ENOMEM; | ||
| 2064 | goto out_put_css_set_refs; | ||
| 2065 | } | ||
| 2066 | } | 1989 | } |
| 1990 | up_write(&css_set_rwsem); | ||
| 2067 | 1991 | ||
| 2068 | /* | 1992 | /* |
| 2069 | * step 3: now that we're guaranteed success wrt the css_sets, | 1993 | * Migration is committed, all target tasks are now on dst_csets. |
| 2070 | * proceed to move all tasks to the new cgroup. There are no | 1994 | * Nothing is sensitive to fork() after this point. Notify |
| 2071 | * failure cases after here, so this is the commit point. | 1995 | * controllers that migration is complete. |
| 2072 | */ | 1996 | */ |
| 2073 | for (i = 0; i < group_size; i++) { | 1997 | tset.csets = &tset.dst_csets; |
| 2074 | tc = flex_array_get(group, i); | ||
| 2075 | cgroup_task_migrate(tc->cgrp, tc->task, tc->cset); | ||
| 2076 | } | ||
| 2077 | /* nothing is sensitive to fork() after this point. */ | ||
| 2078 | 1998 | ||
| 2079 | /* | ||
| 2080 | * step 4: do subsystem attach callbacks. | ||
| 2081 | */ | ||
| 2082 | for_each_css(css, i, cgrp) | 1999 | for_each_css(css, i, cgrp) |
| 2083 | if (css->ss->attach) | 2000 | if (css->ss->attach) |
| 2084 | css->ss->attach(css, &tset); | 2001 | css->ss->attach(css, &tset); |
| 2085 | 2002 | ||
| 2086 | /* | 2003 | ret = 0; |
| 2087 | * step 5: success! and cleanup | 2004 | goto out_release_tset; |
| 2088 | */ | 2005 | |
| 2089 | retval = 0; | ||
| 2090 | out_put_css_set_refs: | ||
| 2091 | if (retval) { | ||
| 2092 | for (i = 0; i < group_size; i++) { | ||
| 2093 | tc = flex_array_get(group, i); | ||
| 2094 | if (!tc->cset) | ||
| 2095 | break; | ||
| 2096 | put_css_set(tc->cset); | ||
| 2097 | } | ||
| 2098 | } | ||
| 2099 | out_cancel_attach: | 2006 | out_cancel_attach: |
| 2100 | if (retval) { | 2007 | for_each_css(css, i, cgrp) { |
| 2101 | for_each_css(css, i, cgrp) { | 2008 | if (css == failed_css) |
| 2102 | if (css == failed_css) | 2009 | break; |
| 2103 | break; | 2010 | if (css->ss->cancel_attach) |
| 2104 | if (css->ss->cancel_attach) | 2011 | css->ss->cancel_attach(css, &tset); |
| 2105 | css->ss->cancel_attach(css, &tset); | ||
| 2106 | } | ||
| 2107 | } | 2012 | } |
| 2108 | out_free_group_list: | 2013 | out_release_tset: |
| 2109 | flex_array_free(group); | 2014 | down_write(&css_set_rwsem); |
| 2110 | return retval; | 2015 | list_splice_init(&tset.dst_csets, &tset.src_csets); |
| 2016 | list_for_each_entry_safe(cset, tmp_cset, &tset.src_csets, mg_node) { | ||
| 2017 | list_splice_tail_init(&cset->mg_tasks, &cset->tasks); | ||
| 2018 | list_del_init(&cset->mg_node); | ||
| 2019 | } | ||
| 2020 | up_write(&css_set_rwsem); | ||
| 2021 | return ret; | ||
| 2022 | } | ||
| 2023 | |||
| 2024 | /** | ||
| 2025 | * cgroup_attach_task - attach a task or a whole threadgroup to a cgroup | ||
| 2026 | * @dst_cgrp: the cgroup to attach to | ||
| 2027 | * @leader: the task or the leader of the threadgroup to be attached | ||
| 2028 | * @threadgroup: attach the whole threadgroup? | ||
| 2029 | * | ||
| 2030 | * Call holding cgroup_mutex and threadgroup_lock of @leader. | ||
| 2031 | */ | ||
| 2032 | static int cgroup_attach_task(struct cgroup *dst_cgrp, | ||
| 2033 | struct task_struct *leader, bool threadgroup) | ||
| 2034 | { | ||
| 2035 | LIST_HEAD(preloaded_csets); | ||
| 2036 | struct task_struct *task; | ||
| 2037 | int ret; | ||
| 2038 | |||
| 2039 | /* look up all src csets */ | ||
| 2040 | down_read(&css_set_rwsem); | ||
| 2041 | rcu_read_lock(); | ||
| 2042 | task = leader; | ||
| 2043 | do { | ||
| 2044 | cgroup_migrate_add_src(task_css_set(task), dst_cgrp, | ||
| 2045 | &preloaded_csets); | ||
| 2046 | if (!threadgroup) | ||
| 2047 | break; | ||
| 2048 | } while_each_thread(leader, task); | ||
| 2049 | rcu_read_unlock(); | ||
| 2050 | up_read(&css_set_rwsem); | ||
| 2051 | |||
| 2052 | /* prepare dst csets and commit */ | ||
| 2053 | ret = cgroup_migrate_prepare_dst(dst_cgrp, &preloaded_csets); | ||
| 2054 | if (!ret) | ||
| 2055 | ret = cgroup_migrate(dst_cgrp, leader, threadgroup); | ||
| 2056 | |||
| 2057 | cgroup_migrate_finish(&preloaded_csets); | ||
| 2058 | return ret; | ||
| 2111 | } | 2059 | } |
| 2112 | 2060 | ||
| 2113 | /* | 2061 | /* |
| 2114 | * Find the task_struct of the task to attach by vpid and pass it along to the | 2062 | * Find the task_struct of the task to attach by vpid and pass it along to the |
| 2115 | * function to attach either it or all tasks in its threadgroup. Will lock | 2063 | * function to attach either it or all tasks in its threadgroup. Will lock |
| 2116 | * cgroup_mutex and threadgroup; may take task_lock of task. | 2064 | * cgroup_mutex and threadgroup. |
| 2117 | */ | 2065 | */ |
| 2118 | static int attach_task_by_pid(struct cgroup *cgrp, u64 pid, bool threadgroup) | 2066 | static int attach_task_by_pid(struct cgroup *cgrp, u64 pid, bool threadgroup) |
| 2119 | { | 2067 | { |
| @@ -2198,12 +2146,19 @@ out_unlock_cgroup: | |||
| 2198 | */ | 2146 | */ |
| 2199 | int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk) | 2147 | int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk) |
| 2200 | { | 2148 | { |
| 2201 | struct cgroupfs_root *root; | 2149 | struct cgroup_root *root; |
| 2202 | int retval = 0; | 2150 | int retval = 0; |
| 2203 | 2151 | ||
| 2204 | mutex_lock(&cgroup_mutex); | 2152 | mutex_lock(&cgroup_mutex); |
| 2205 | for_each_active_root(root) { | 2153 | for_each_root(root) { |
| 2206 | struct cgroup *from_cgrp = task_cgroup_from_root(from, root); | 2154 | struct cgroup *from_cgrp; |
| 2155 | |||
| 2156 | if (root == &cgrp_dfl_root) | ||
| 2157 | continue; | ||
| 2158 | |||
| 2159 | down_read(&css_set_rwsem); | ||
| 2160 | from_cgrp = task_cgroup_from_root(from, root); | ||
| 2161 | up_read(&css_set_rwsem); | ||
| 2207 | 2162 | ||
| 2208 | retval = cgroup_attach_task(from_cgrp, tsk, false); | 2163 | retval = cgroup_attach_task(from_cgrp, tsk, false); |
| 2209 | if (retval) | 2164 | if (retval) |
| @@ -2228,16 +2183,17 @@ static int cgroup_procs_write(struct cgroup_subsys_state *css, | |||
| 2228 | } | 2183 | } |
| 2229 | 2184 | ||
| 2230 | static int cgroup_release_agent_write(struct cgroup_subsys_state *css, | 2185 | static int cgroup_release_agent_write(struct cgroup_subsys_state *css, |
| 2231 | struct cftype *cft, const char *buffer) | 2186 | struct cftype *cft, char *buffer) |
| 2232 | { | 2187 | { |
| 2233 | BUILD_BUG_ON(sizeof(css->cgroup->root->release_agent_path) < PATH_MAX); | 2188 | struct cgroup_root *root = css->cgroup->root; |
| 2234 | if (strlen(buffer) >= PATH_MAX) | 2189 | |
| 2235 | return -EINVAL; | 2190 | BUILD_BUG_ON(sizeof(root->release_agent_path) < PATH_MAX); |
| 2236 | if (!cgroup_lock_live_group(css->cgroup)) | 2191 | if (!cgroup_lock_live_group(css->cgroup)) |
| 2237 | return -ENODEV; | 2192 | return -ENODEV; |
| 2238 | mutex_lock(&cgroup_root_mutex); | 2193 | spin_lock(&release_agent_path_lock); |
| 2239 | strcpy(css->cgroup->root->release_agent_path, buffer); | 2194 | strlcpy(root->release_agent_path, buffer, |
| 2240 | mutex_unlock(&cgroup_root_mutex); | 2195 | sizeof(root->release_agent_path)); |
| 2196 | spin_unlock(&release_agent_path_lock); | ||
| 2241 | mutex_unlock(&cgroup_mutex); | 2197 | mutex_unlock(&cgroup_mutex); |
| 2242 | return 0; | 2198 | return 0; |
| 2243 | } | 2199 | } |
| @@ -2262,32 +2218,23 @@ static int cgroup_sane_behavior_show(struct seq_file *seq, void *v) | |||
| 2262 | return 0; | 2218 | return 0; |
| 2263 | } | 2219 | } |
| 2264 | 2220 | ||
| 2265 | /* A buffer size big enough for numbers or short strings */ | 2221 | static ssize_t cgroup_file_write(struct kernfs_open_file *of, char *buf, |
| 2266 | #define CGROUP_LOCAL_BUFFER_SIZE 64 | 2222 | size_t nbytes, loff_t off) |
| 2267 | |||
| 2268 | static ssize_t cgroup_file_write(struct file *file, const char __user *userbuf, | ||
| 2269 | size_t nbytes, loff_t *ppos) | ||
| 2270 | { | 2223 | { |
| 2271 | struct cfent *cfe = __d_cfe(file->f_dentry); | 2224 | struct cgroup *cgrp = of->kn->parent->priv; |
| 2272 | struct cftype *cft = __d_cft(file->f_dentry); | 2225 | struct cftype *cft = of->kn->priv; |
| 2273 | struct cgroup_subsys_state *css = cfe->css; | 2226 | struct cgroup_subsys_state *css; |
| 2274 | size_t max_bytes = cft->max_write_len ?: CGROUP_LOCAL_BUFFER_SIZE - 1; | ||
| 2275 | char *buf; | ||
| 2276 | int ret; | 2227 | int ret; |
| 2277 | 2228 | ||
| 2278 | if (nbytes >= max_bytes) | 2229 | /* |
| 2279 | return -E2BIG; | 2230 | * kernfs guarantees that a file isn't deleted with operations in |
| 2280 | 2231 | * flight, which means that the matching css is and stays alive and | |
| 2281 | buf = kmalloc(nbytes + 1, GFP_KERNEL); | 2232 | * doesn't need to be pinned. The RCU locking is not necessary |
| 2282 | if (!buf) | 2233 | * either. It's just for the convenience of using cgroup_css(). |
| 2283 | return -ENOMEM; | 2234 | */ |
| 2284 | 2235 | rcu_read_lock(); | |
| 2285 | if (copy_from_user(buf, userbuf, nbytes)) { | 2236 | css = cgroup_css(cgrp, cft->ss); |
| 2286 | ret = -EFAULT; | 2237 | rcu_read_unlock(); |
| 2287 | goto out_free; | ||
| 2288 | } | ||
| 2289 | |||
| 2290 | buf[nbytes] = '\0'; | ||
| 2291 | 2238 | ||
| 2292 | if (cft->write_string) { | 2239 | if (cft->write_string) { |
| 2293 | ret = cft->write_string(css, cft, strstrip(buf)); | 2240 | ret = cft->write_string(css, cft, strstrip(buf)); |
| @@ -2306,53 +2253,23 @@ static ssize_t cgroup_file_write(struct file *file, const char __user *userbuf, | |||
| 2306 | } else { | 2253 | } else { |
| 2307 | ret = -EINVAL; | 2254 | ret = -EINVAL; |
| 2308 | } | 2255 | } |
| 2309 | out_free: | 2256 | |
| 2310 | kfree(buf); | ||
| 2311 | return ret ?: nbytes; | 2257 | return ret ?: nbytes; |
| 2312 | } | 2258 | } |
| 2313 | 2259 | ||
| 2314 | /* | ||
| 2315 | * seqfile ops/methods for returning structured data. Currently just | ||
| 2316 | * supports string->u64 maps, but can be extended in future. | ||
| 2317 | */ | ||
| 2318 | |||
| 2319 | static void *cgroup_seqfile_start(struct seq_file *seq, loff_t *ppos) | 2260 | static void *cgroup_seqfile_start(struct seq_file *seq, loff_t *ppos) |
| 2320 | { | 2261 | { |
| 2321 | struct cftype *cft = seq_cft(seq); | 2262 | return seq_cft(seq)->seq_start(seq, ppos); |
| 2322 | |||
| 2323 | if (cft->seq_start) { | ||
| 2324 | return cft->seq_start(seq, ppos); | ||
| 2325 | } else { | ||
| 2326 | /* | ||
| 2327 | * The same behavior and code as single_open(). Returns | ||
| 2328 | * !NULL if pos is at the beginning; otherwise, NULL. | ||
| 2329 | */ | ||
| 2330 | return NULL + !*ppos; | ||
| 2331 | } | ||
| 2332 | } | 2263 | } |
| 2333 | 2264 | ||
| 2334 | static void *cgroup_seqfile_next(struct seq_file *seq, void *v, loff_t *ppos) | 2265 | static void *cgroup_seqfile_next(struct seq_file *seq, void *v, loff_t *ppos) |
| 2335 | { | 2266 | { |
| 2336 | struct cftype *cft = seq_cft(seq); | 2267 | return seq_cft(seq)->seq_next(seq, v, ppos); |
| 2337 | |||
| 2338 | if (cft->seq_next) { | ||
| 2339 | return cft->seq_next(seq, v, ppos); | ||
| 2340 | } else { | ||
| 2341 | /* | ||
| 2342 | * The same behavior and code as single_open(), always | ||
| 2343 | * terminate after the initial read. | ||
| 2344 | */ | ||
| 2345 | ++*ppos; | ||
| 2346 | return NULL; | ||
| 2347 | } | ||
| 2348 | } | 2268 | } |
| 2349 | 2269 | ||
| 2350 | static void cgroup_seqfile_stop(struct seq_file *seq, void *v) | 2270 | static void cgroup_seqfile_stop(struct seq_file *seq, void *v) |
| 2351 | { | 2271 | { |
| 2352 | struct cftype *cft = seq_cft(seq); | 2272 | seq_cft(seq)->seq_stop(seq, v); |
| 2353 | |||
| 2354 | if (cft->seq_stop) | ||
| 2355 | cft->seq_stop(seq, v); | ||
| 2356 | } | 2273 | } |
| 2357 | 2274 | ||
| 2358 | static int cgroup_seqfile_show(struct seq_file *m, void *arg) | 2275 | static int cgroup_seqfile_show(struct seq_file *m, void *arg) |
| @@ -2372,96 +2289,35 @@ static int cgroup_seqfile_show(struct seq_file *m, void *arg) | |||
| 2372 | return 0; | 2289 | return 0; |
| 2373 | } | 2290 | } |
| 2374 | 2291 | ||
| 2375 | static struct seq_operations cgroup_seq_operations = { | 2292 | static struct kernfs_ops cgroup_kf_single_ops = { |
| 2376 | .start = cgroup_seqfile_start, | 2293 | .atomic_write_len = PAGE_SIZE, |
| 2377 | .next = cgroup_seqfile_next, | 2294 | .write = cgroup_file_write, |
| 2378 | .stop = cgroup_seqfile_stop, | 2295 | .seq_show = cgroup_seqfile_show, |
| 2379 | .show = cgroup_seqfile_show, | ||
| 2380 | }; | 2296 | }; |
| 2381 | 2297 | ||
| 2382 | static int cgroup_file_open(struct inode *inode, struct file *file) | 2298 | static struct kernfs_ops cgroup_kf_ops = { |
| 2383 | { | 2299 | .atomic_write_len = PAGE_SIZE, |
| 2384 | struct cfent *cfe = __d_cfe(file->f_dentry); | 2300 | .write = cgroup_file_write, |
| 2385 | struct cftype *cft = __d_cft(file->f_dentry); | 2301 | .seq_start = cgroup_seqfile_start, |
| 2386 | struct cgroup *cgrp = __d_cgrp(cfe->dentry->d_parent); | 2302 | .seq_next = cgroup_seqfile_next, |
| 2387 | struct cgroup_subsys_state *css; | 2303 | .seq_stop = cgroup_seqfile_stop, |
| 2388 | struct cgroup_open_file *of; | 2304 | .seq_show = cgroup_seqfile_show, |
| 2389 | int err; | 2305 | }; |
| 2390 | |||
| 2391 | err = generic_file_open(inode, file); | ||
| 2392 | if (err) | ||
| 2393 | return err; | ||
| 2394 | |||
| 2395 | /* | ||
| 2396 | * If the file belongs to a subsystem, pin the css. Will be | ||
| 2397 | * unpinned either on open failure or release. This ensures that | ||
| 2398 | * @css stays alive for all file operations. | ||
| 2399 | */ | ||
| 2400 | rcu_read_lock(); | ||
| 2401 | css = cgroup_css(cgrp, cft->ss); | ||
| 2402 | if (cft->ss && !css_tryget(css)) | ||
| 2403 | css = NULL; | ||
| 2404 | rcu_read_unlock(); | ||
| 2405 | |||
| 2406 | if (!css) | ||
| 2407 | return -ENODEV; | ||
| 2408 | |||
| 2409 | /* | ||
| 2410 | * @cfe->css is used by read/write/close to determine the | ||
| 2411 | * associated css. @file->private_data would be a better place but | ||
| 2412 | * that's already used by seqfile. Multiple accessors may use it | ||
| 2413 | * simultaneously which is okay as the association never changes. | ||
| 2414 | */ | ||
| 2415 | WARN_ON_ONCE(cfe->css && cfe->css != css); | ||
| 2416 | cfe->css = css; | ||
| 2417 | |||
| 2418 | of = __seq_open_private(file, &cgroup_seq_operations, | ||
| 2419 | sizeof(struct cgroup_open_file)); | ||
| 2420 | if (of) { | ||
| 2421 | of->cfe = cfe; | ||
| 2422 | return 0; | ||
| 2423 | } | ||
| 2424 | |||
| 2425 | if (css->ss) | ||
| 2426 | css_put(css); | ||
| 2427 | return -ENOMEM; | ||
| 2428 | } | ||
| 2429 | |||
| 2430 | static int cgroup_file_release(struct inode *inode, struct file *file) | ||
| 2431 | { | ||
| 2432 | struct cfent *cfe = __d_cfe(file->f_dentry); | ||
| 2433 | struct cgroup_subsys_state *css = cfe->css; | ||
| 2434 | |||
| 2435 | if (css->ss) | ||
| 2436 | css_put(css); | ||
| 2437 | return seq_release_private(inode, file); | ||
| 2438 | } | ||
| 2439 | 2306 | ||
| 2440 | /* | 2307 | /* |
| 2441 | * cgroup_rename - Only allow simple rename of directories in place. | 2308 | * cgroup_rename - Only allow simple rename of directories in place. |
| 2442 | */ | 2309 | */ |
| 2443 | static int cgroup_rename(struct inode *old_dir, struct dentry *old_dentry, | 2310 | static int cgroup_rename(struct kernfs_node *kn, struct kernfs_node *new_parent, |
| 2444 | struct inode *new_dir, struct dentry *new_dentry) | 2311 | const char *new_name_str) |
| 2445 | { | 2312 | { |
| 2313 | struct cgroup *cgrp = kn->priv; | ||
| 2446 | int ret; | 2314 | int ret; |
| 2447 | struct cgroup_name *name, *old_name; | ||
| 2448 | struct cgroup *cgrp; | ||
| 2449 | |||
| 2450 | /* | ||
| 2451 | * It's convinient to use parent dir's i_mutex to protected | ||
| 2452 | * cgrp->name. | ||
| 2453 | */ | ||
| 2454 | lockdep_assert_held(&old_dir->i_mutex); | ||
| 2455 | 2315 | ||
| 2456 | if (!S_ISDIR(old_dentry->d_inode->i_mode)) | 2316 | if (kernfs_type(kn) != KERNFS_DIR) |
| 2457 | return -ENOTDIR; | 2317 | return -ENOTDIR; |
| 2458 | if (new_dentry->d_inode) | 2318 | if (kn->parent != new_parent) |
| 2459 | return -EEXIST; | ||
| 2460 | if (old_dir != new_dir) | ||
| 2461 | return -EIO; | 2319 | return -EIO; |
| 2462 | 2320 | ||
| 2463 | cgrp = __d_cgrp(old_dentry); | ||
| 2464 | |||
| 2465 | /* | 2321 | /* |
| 2466 | * This isn't a proper migration and its usefulness is very | 2322 | * This isn't a proper migration and its usefulness is very |
| 2467 | * limited. Disallow if sane_behavior. | 2323 | * limited. Disallow if sane_behavior. |
| @@ -2469,218 +2325,61 @@ static int cgroup_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
| 2469 | if (cgroup_sane_behavior(cgrp)) | 2325 | if (cgroup_sane_behavior(cgrp)) |
| 2470 | return -EPERM; | 2326 | return -EPERM; |
| 2471 | 2327 | ||
| 2472 | name = cgroup_alloc_name(new_dentry); | 2328 | /* |
| 2473 | if (!name) | 2329 | * We're gonna grab cgroup_tree_mutex which nests outside kernfs |
| 2474 | return -ENOMEM; | 2330 | * active_ref. kernfs_rename() doesn't require active_ref |
| 2475 | 2331 | * protection. Break them before grabbing cgroup_tree_mutex. | |
| 2476 | ret = simple_rename(old_dir, old_dentry, new_dir, new_dentry); | 2332 | */ |
| 2477 | if (ret) { | 2333 | kernfs_break_active_protection(new_parent); |
| 2478 | kfree(name); | 2334 | kernfs_break_active_protection(kn); |
| 2479 | return ret; | ||
| 2480 | } | ||
| 2481 | |||
| 2482 | old_name = rcu_dereference_protected(cgrp->name, true); | ||
| 2483 | rcu_assign_pointer(cgrp->name, name); | ||
| 2484 | |||
| 2485 | kfree_rcu(old_name, rcu_head); | ||
| 2486 | return 0; | ||
| 2487 | } | ||
| 2488 | |||
| 2489 | static struct simple_xattrs *__d_xattrs(struct dentry *dentry) | ||
| 2490 | { | ||
| 2491 | if (S_ISDIR(dentry->d_inode->i_mode)) | ||
| 2492 | return &__d_cgrp(dentry)->xattrs; | ||
| 2493 | else | ||
| 2494 | return &__d_cfe(dentry)->xattrs; | ||
| 2495 | } | ||
| 2496 | |||
| 2497 | static inline int xattr_enabled(struct dentry *dentry) | ||
| 2498 | { | ||
| 2499 | struct cgroupfs_root *root = dentry->d_sb->s_fs_info; | ||
| 2500 | return root->flags & CGRP_ROOT_XATTR; | ||
| 2501 | } | ||
| 2502 | |||
| 2503 | static bool is_valid_xattr(const char *name) | ||
| 2504 | { | ||
| 2505 | if (!strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) || | ||
| 2506 | !strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN)) | ||
| 2507 | return true; | ||
| 2508 | return false; | ||
| 2509 | } | ||
| 2510 | |||
| 2511 | static int cgroup_setxattr(struct dentry *dentry, const char *name, | ||
| 2512 | const void *val, size_t size, int flags) | ||
| 2513 | { | ||
| 2514 | if (!xattr_enabled(dentry)) | ||
| 2515 | return -EOPNOTSUPP; | ||
| 2516 | if (!is_valid_xattr(name)) | ||
| 2517 | return -EINVAL; | ||
| 2518 | return simple_xattr_set(__d_xattrs(dentry), name, val, size, flags); | ||
| 2519 | } | ||
| 2520 | |||
| 2521 | static int cgroup_removexattr(struct dentry *dentry, const char *name) | ||
| 2522 | { | ||
| 2523 | if (!xattr_enabled(dentry)) | ||
| 2524 | return -EOPNOTSUPP; | ||
| 2525 | if (!is_valid_xattr(name)) | ||
| 2526 | return -EINVAL; | ||
| 2527 | return simple_xattr_remove(__d_xattrs(dentry), name); | ||
| 2528 | } | ||
| 2529 | |||
| 2530 | static ssize_t cgroup_getxattr(struct dentry *dentry, const char *name, | ||
| 2531 | void *buf, size_t size) | ||
| 2532 | { | ||
| 2533 | if (!xattr_enabled(dentry)) | ||
| 2534 | return -EOPNOTSUPP; | ||
| 2535 | if (!is_valid_xattr(name)) | ||
| 2536 | return -EINVAL; | ||
| 2537 | return simple_xattr_get(__d_xattrs(dentry), name, buf, size); | ||
| 2538 | } | ||
| 2539 | |||
| 2540 | static ssize_t cgroup_listxattr(struct dentry *dentry, char *buf, size_t size) | ||
| 2541 | { | ||
| 2542 | if (!xattr_enabled(dentry)) | ||
| 2543 | return -EOPNOTSUPP; | ||
| 2544 | return simple_xattr_list(__d_xattrs(dentry), buf, size); | ||
| 2545 | } | ||
| 2546 | |||
| 2547 | static const struct file_operations cgroup_file_operations = { | ||
| 2548 | .read = seq_read, | ||
| 2549 | .write = cgroup_file_write, | ||
| 2550 | .llseek = generic_file_llseek, | ||
| 2551 | .open = cgroup_file_open, | ||
| 2552 | .release = cgroup_file_release, | ||
| 2553 | }; | ||
| 2554 | |||
| 2555 | static const struct inode_operations cgroup_file_inode_operations = { | ||
| 2556 | .setxattr = cgroup_setxattr, | ||
| 2557 | .getxattr = cgroup_getxattr, | ||
| 2558 | .listxattr = cgroup_listxattr, | ||
| 2559 | .removexattr = cgroup_removexattr, | ||
| 2560 | }; | ||
| 2561 | |||
| 2562 | static const struct inode_operations cgroup_dir_inode_operations = { | ||
| 2563 | .lookup = simple_lookup, | ||
| 2564 | .mkdir = cgroup_mkdir, | ||
| 2565 | .rmdir = cgroup_rmdir, | ||
| 2566 | .rename = cgroup_rename, | ||
| 2567 | .setxattr = cgroup_setxattr, | ||
| 2568 | .getxattr = cgroup_getxattr, | ||
| 2569 | .listxattr = cgroup_listxattr, | ||
| 2570 | .removexattr = cgroup_removexattr, | ||
| 2571 | }; | ||
| 2572 | |||
| 2573 | static int cgroup_create_file(struct dentry *dentry, umode_t mode, | ||
| 2574 | struct super_block *sb) | ||
| 2575 | { | ||
| 2576 | struct inode *inode; | ||
| 2577 | |||
| 2578 | if (!dentry) | ||
| 2579 | return -ENOENT; | ||
| 2580 | if (dentry->d_inode) | ||
| 2581 | return -EEXIST; | ||
| 2582 | 2335 | ||
| 2583 | inode = cgroup_new_inode(mode, sb); | 2336 | mutex_lock(&cgroup_tree_mutex); |
| 2584 | if (!inode) | 2337 | mutex_lock(&cgroup_mutex); |
| 2585 | return -ENOMEM; | ||
| 2586 | 2338 | ||
| 2587 | if (S_ISDIR(mode)) { | 2339 | ret = kernfs_rename(kn, new_parent, new_name_str); |
| 2588 | inode->i_op = &cgroup_dir_inode_operations; | ||
| 2589 | inode->i_fop = &simple_dir_operations; | ||
| 2590 | 2340 | ||
| 2591 | /* start off with i_nlink == 2 (for "." entry) */ | 2341 | mutex_unlock(&cgroup_mutex); |
| 2592 | inc_nlink(inode); | 2342 | mutex_unlock(&cgroup_tree_mutex); |
| 2593 | inc_nlink(dentry->d_parent->d_inode); | ||
| 2594 | 2343 | ||
| 2595 | /* | 2344 | kernfs_unbreak_active_protection(kn); |
| 2596 | * Control reaches here with cgroup_mutex held. | 2345 | kernfs_unbreak_active_protection(new_parent); |
| 2597 | * @inode->i_mutex should nest outside cgroup_mutex but we | 2346 | return ret; |
| 2598 | * want to populate it immediately without releasing | ||
| 2599 | * cgroup_mutex. As @inode isn't visible to anyone else | ||
| 2600 | * yet, trylock will always succeed without affecting | ||
| 2601 | * lockdep checks. | ||
| 2602 | */ | ||
| 2603 | WARN_ON_ONCE(!mutex_trylock(&inode->i_mutex)); | ||
| 2604 | } else if (S_ISREG(mode)) { | ||
| 2605 | inode->i_size = 0; | ||
| 2606 | inode->i_fop = &cgroup_file_operations; | ||
| 2607 | inode->i_op = &cgroup_file_inode_operations; | ||
| 2608 | } | ||
| 2609 | d_instantiate(dentry, inode); | ||
| 2610 | dget(dentry); /* Extra count - pin the dentry in core */ | ||
| 2611 | return 0; | ||
| 2612 | } | 2347 | } |
| 2613 | 2348 | ||
| 2614 | /** | 2349 | /* set uid and gid of cgroup dirs and files to that of the creator */ |
| 2615 | * cgroup_file_mode - deduce file mode of a control file | 2350 | static int cgroup_kn_set_ugid(struct kernfs_node *kn) |
| 2616 | * @cft: the control file in question | ||
| 2617 | * | ||
| 2618 | * returns cft->mode if ->mode is not 0 | ||
| 2619 | * returns S_IRUGO|S_IWUSR if it has both a read and a write handler | ||
| 2620 | * returns S_IRUGO if it has only a read handler | ||
| 2621 | * returns S_IWUSR if it has only a write hander | ||
| 2622 | */ | ||
| 2623 | static umode_t cgroup_file_mode(const struct cftype *cft) | ||
| 2624 | { | 2351 | { |
| 2625 | umode_t mode = 0; | 2352 | struct iattr iattr = { .ia_valid = ATTR_UID | ATTR_GID, |
| 2626 | 2353 | .ia_uid = current_fsuid(), | |
| 2627 | if (cft->mode) | 2354 | .ia_gid = current_fsgid(), }; |
| 2628 | return cft->mode; | ||
| 2629 | |||
| 2630 | if (cft->read_u64 || cft->read_s64 || cft->seq_show) | ||
| 2631 | mode |= S_IRUGO; | ||
| 2632 | 2355 | ||
| 2633 | if (cft->write_u64 || cft->write_s64 || cft->write_string || | 2356 | if (uid_eq(iattr.ia_uid, GLOBAL_ROOT_UID) && |
| 2634 | cft->trigger) | 2357 | gid_eq(iattr.ia_gid, GLOBAL_ROOT_GID)) |
| 2635 | mode |= S_IWUSR; | 2358 | return 0; |
| 2636 | 2359 | ||
| 2637 | return mode; | 2360 | return kernfs_setattr(kn, &iattr); |
| 2638 | } | 2361 | } |
| 2639 | 2362 | ||
| 2640 | static int cgroup_add_file(struct cgroup *cgrp, struct cftype *cft) | 2363 | static int cgroup_add_file(struct cgroup *cgrp, struct cftype *cft) |
| 2641 | { | 2364 | { |
| 2642 | struct dentry *dir = cgrp->dentry; | 2365 | char name[CGROUP_FILE_NAME_MAX]; |
| 2643 | struct cgroup *parent = __d_cgrp(dir); | 2366 | struct kernfs_node *kn; |
| 2644 | struct dentry *dentry; | 2367 | struct lock_class_key *key = NULL; |
| 2645 | struct cfent *cfe; | 2368 | int ret; |
| 2646 | int error; | ||
| 2647 | umode_t mode; | ||
| 2648 | char name[MAX_CGROUP_TYPE_NAMELEN + MAX_CFTYPE_NAME + 2] = { 0 }; | ||
| 2649 | |||
| 2650 | if (cft->ss && !(cft->flags & CFTYPE_NO_PREFIX) && | ||
| 2651 | !(cgrp->root->flags & CGRP_ROOT_NOPREFIX)) { | ||
| 2652 | strcpy(name, cft->ss->name); | ||
| 2653 | strcat(name, "."); | ||
| 2654 | } | ||
| 2655 | strcat(name, cft->name); | ||
| 2656 | |||
| 2657 | BUG_ON(!mutex_is_locked(&dir->d_inode->i_mutex)); | ||
| 2658 | |||
| 2659 | cfe = kzalloc(sizeof(*cfe), GFP_KERNEL); | ||
| 2660 | if (!cfe) | ||
| 2661 | return -ENOMEM; | ||
| 2662 | |||
| 2663 | dentry = lookup_one_len(name, dir, strlen(name)); | ||
| 2664 | if (IS_ERR(dentry)) { | ||
| 2665 | error = PTR_ERR(dentry); | ||
| 2666 | goto out; | ||
| 2667 | } | ||
| 2668 | 2369 | ||
| 2669 | cfe->type = (void *)cft; | 2370 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 2670 | cfe->dentry = dentry; | 2371 | key = &cft->lockdep_key; |
| 2671 | dentry->d_fsdata = cfe; | 2372 | #endif |
| 2672 | simple_xattrs_init(&cfe->xattrs); | 2373 | kn = __kernfs_create_file(cgrp->kn, cgroup_file_name(cgrp, cft, name), |
| 2374 | cgroup_file_mode(cft), 0, cft->kf_ops, cft, | ||
| 2375 | NULL, false, key); | ||
| 2376 | if (IS_ERR(kn)) | ||
| 2377 | return PTR_ERR(kn); | ||
| 2673 | 2378 | ||
| 2674 | mode = cgroup_file_mode(cft); | 2379 | ret = cgroup_kn_set_ugid(kn); |
| 2675 | error = cgroup_create_file(dentry, mode | S_IFREG, cgrp->root->sb); | 2380 | if (ret) |
| 2676 | if (!error) { | 2381 | kernfs_remove(kn); |
| 2677 | list_add_tail(&cfe->node, &parent->files); | 2382 | return ret; |
| 2678 | cfe = NULL; | ||
| 2679 | } | ||
| 2680 | dput(dentry); | ||
| 2681 | out: | ||
| 2682 | kfree(cfe); | ||
| 2683 | return error; | ||
| 2684 | } | 2383 | } |
| 2685 | 2384 | ||
| 2686 | /** | 2385 | /** |
| @@ -2700,11 +2399,12 @@ static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[], | |||
| 2700 | struct cftype *cft; | 2399 | struct cftype *cft; |
| 2701 | int ret; | 2400 | int ret; |
| 2702 | 2401 | ||
| 2703 | lockdep_assert_held(&cgrp->dentry->d_inode->i_mutex); | 2402 | lockdep_assert_held(&cgroup_tree_mutex); |
| 2704 | lockdep_assert_held(&cgroup_mutex); | ||
| 2705 | 2403 | ||
| 2706 | for (cft = cfts; cft->name[0] != '\0'; cft++) { | 2404 | for (cft = cfts; cft->name[0] != '\0'; cft++) { |
| 2707 | /* does cft->flags tell us to skip this file on @cgrp? */ | 2405 | /* does cft->flags tell us to skip this file on @cgrp? */ |
| 2406 | if ((cft->flags & CFTYPE_ONLY_ON_DFL) && !cgroup_on_dfl(cgrp)) | ||
| 2407 | continue; | ||
| 2708 | if ((cft->flags & CFTYPE_INSANE) && cgroup_sane_behavior(cgrp)) | 2408 | if ((cft->flags & CFTYPE_INSANE) && cgroup_sane_behavior(cgrp)) |
| 2709 | continue; | 2409 | continue; |
| 2710 | if ((cft->flags & CFTYPE_NOT_ON_ROOT) && !cgrp->parent) | 2410 | if ((cft->flags & CFTYPE_NOT_ON_ROOT) && !cgrp->parent) |
| @@ -2726,44 +2426,19 @@ static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[], | |||
| 2726 | return 0; | 2426 | return 0; |
| 2727 | } | 2427 | } |
| 2728 | 2428 | ||
| 2729 | static void cgroup_cfts_prepare(void) | 2429 | static int cgroup_apply_cftypes(struct cftype *cfts, bool is_add) |
| 2730 | __acquires(&cgroup_mutex) | ||
| 2731 | { | ||
| 2732 | /* | ||
| 2733 | * Thanks to the entanglement with vfs inode locking, we can't walk | ||
| 2734 | * the existing cgroups under cgroup_mutex and create files. | ||
| 2735 | * Instead, we use css_for_each_descendant_pre() and drop RCU read | ||
| 2736 | * lock before calling cgroup_addrm_files(). | ||
| 2737 | */ | ||
| 2738 | mutex_lock(&cgroup_mutex); | ||
| 2739 | } | ||
| 2740 | |||
| 2741 | static int cgroup_cfts_commit(struct cftype *cfts, bool is_add) | ||
| 2742 | __releases(&cgroup_mutex) | ||
| 2743 | { | 2430 | { |
| 2744 | LIST_HEAD(pending); | 2431 | LIST_HEAD(pending); |
| 2745 | struct cgroup_subsys *ss = cfts[0].ss; | 2432 | struct cgroup_subsys *ss = cfts[0].ss; |
| 2746 | struct cgroup *root = &ss->root->top_cgroup; | 2433 | struct cgroup *root = &ss->root->cgrp; |
| 2747 | struct super_block *sb = ss->root->sb; | ||
| 2748 | struct dentry *prev = NULL; | ||
| 2749 | struct inode *inode; | ||
| 2750 | struct cgroup_subsys_state *css; | 2434 | struct cgroup_subsys_state *css; |
| 2751 | u64 update_before; | ||
| 2752 | int ret = 0; | 2435 | int ret = 0; |
| 2753 | 2436 | ||
| 2754 | /* %NULL @cfts indicates abort and don't bother if @ss isn't attached */ | 2437 | lockdep_assert_held(&cgroup_tree_mutex); |
| 2755 | if (!cfts || ss->root == &cgroup_dummy_root || | ||
| 2756 | !atomic_inc_not_zero(&sb->s_active)) { | ||
| 2757 | mutex_unlock(&cgroup_mutex); | ||
| 2758 | return 0; | ||
| 2759 | } | ||
| 2760 | 2438 | ||
| 2761 | /* | 2439 | /* don't bother if @ss isn't attached */ |
| 2762 | * All cgroups which are created after we drop cgroup_mutex will | 2440 | if (ss->root == &cgrp_dfl_root) |
| 2763 | * have the updated set of files, so we only need to update the | 2441 | return 0; |
| 2764 | * cgroups created before the current @cgroup_serial_nr_next. | ||
| 2765 | */ | ||
| 2766 | update_before = cgroup_serial_nr_next; | ||
| 2767 | 2442 | ||
| 2768 | /* add/rm files for all cgroups created before */ | 2443 | /* add/rm files for all cgroups created before */ |
| 2769 | css_for_each_descendant_pre(css, cgroup_css(root, ss)) { | 2444 | css_for_each_descendant_pre(css, cgroup_css(root, ss)) { |
| @@ -2772,62 +2447,75 @@ static int cgroup_cfts_commit(struct cftype *cfts, bool is_add) | |||
| 2772 | if (cgroup_is_dead(cgrp)) | 2447 | if (cgroup_is_dead(cgrp)) |
| 2773 | continue; | 2448 | continue; |
| 2774 | 2449 | ||
| 2775 | inode = cgrp->dentry->d_inode; | 2450 | ret = cgroup_addrm_files(cgrp, cfts, is_add); |
| 2776 | dget(cgrp->dentry); | ||
| 2777 | dput(prev); | ||
| 2778 | prev = cgrp->dentry; | ||
| 2779 | |||
| 2780 | mutex_unlock(&cgroup_mutex); | ||
| 2781 | mutex_lock(&inode->i_mutex); | ||
| 2782 | mutex_lock(&cgroup_mutex); | ||
| 2783 | if (cgrp->serial_nr < update_before && !cgroup_is_dead(cgrp)) | ||
| 2784 | ret = cgroup_addrm_files(cgrp, cfts, is_add); | ||
| 2785 | mutex_unlock(&inode->i_mutex); | ||
| 2786 | if (ret) | 2451 | if (ret) |
| 2787 | break; | 2452 | break; |
| 2788 | } | 2453 | } |
| 2789 | mutex_unlock(&cgroup_mutex); | 2454 | |
| 2790 | dput(prev); | 2455 | if (is_add && !ret) |
| 2791 | deactivate_super(sb); | 2456 | kernfs_activate(root->kn); |
| 2792 | return ret; | 2457 | return ret; |
| 2793 | } | 2458 | } |
| 2794 | 2459 | ||
| 2795 | /** | 2460 | static void cgroup_exit_cftypes(struct cftype *cfts) |
| 2796 | * cgroup_add_cftypes - add an array of cftypes to a subsystem | ||
| 2797 | * @ss: target cgroup subsystem | ||
| 2798 | * @cfts: zero-length name terminated array of cftypes | ||
| 2799 | * | ||
| 2800 | * Register @cfts to @ss. Files described by @cfts are created for all | ||
| 2801 | * existing cgroups to which @ss is attached and all future cgroups will | ||
| 2802 | * have them too. This function can be called anytime whether @ss is | ||
| 2803 | * attached or not. | ||
| 2804 | * | ||
| 2805 | * Returns 0 on successful registration, -errno on failure. Note that this | ||
| 2806 | * function currently returns 0 as long as @cfts registration is successful | ||
| 2807 | * even if some file creation attempts on existing cgroups fail. | ||
| 2808 | */ | ||
| 2809 | int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts) | ||
| 2810 | { | 2461 | { |
| 2811 | struct cftype_set *set; | ||
| 2812 | struct cftype *cft; | 2462 | struct cftype *cft; |
| 2813 | int ret; | ||
| 2814 | 2463 | ||
| 2815 | set = kzalloc(sizeof(*set), GFP_KERNEL); | 2464 | for (cft = cfts; cft->name[0] != '\0'; cft++) { |
| 2816 | if (!set) | 2465 | /* free copy for custom atomic_write_len, see init_cftypes() */ |
| 2817 | return -ENOMEM; | 2466 | if (cft->max_write_len && cft->max_write_len != PAGE_SIZE) |
| 2467 | kfree(cft->kf_ops); | ||
| 2468 | cft->kf_ops = NULL; | ||
| 2469 | cft->ss = NULL; | ||
| 2470 | } | ||
| 2471 | } | ||
| 2472 | |||
| 2473 | static int cgroup_init_cftypes(struct cgroup_subsys *ss, struct cftype *cfts) | ||
| 2474 | { | ||
| 2475 | struct cftype *cft; | ||
| 2476 | |||
| 2477 | for (cft = cfts; cft->name[0] != '\0'; cft++) { | ||
| 2478 | struct kernfs_ops *kf_ops; | ||
| 2818 | 2479 | ||
| 2819 | for (cft = cfts; cft->name[0] != '\0'; cft++) | 2480 | WARN_ON(cft->ss || cft->kf_ops); |
| 2481 | |||
| 2482 | if (cft->seq_start) | ||
| 2483 | kf_ops = &cgroup_kf_ops; | ||
| 2484 | else | ||
| 2485 | kf_ops = &cgroup_kf_single_ops; | ||
| 2486 | |||
| 2487 | /* | ||
| 2488 | * Ugh... if @cft wants a custom max_write_len, we need to | ||
| 2489 | * make a copy of kf_ops to set its atomic_write_len. | ||
| 2490 | */ | ||
| 2491 | if (cft->max_write_len && cft->max_write_len != PAGE_SIZE) { | ||
| 2492 | kf_ops = kmemdup(kf_ops, sizeof(*kf_ops), GFP_KERNEL); | ||
| 2493 | if (!kf_ops) { | ||
| 2494 | cgroup_exit_cftypes(cfts); | ||
| 2495 | return -ENOMEM; | ||
| 2496 | } | ||
| 2497 | kf_ops->atomic_write_len = cft->max_write_len; | ||
| 2498 | } | ||
| 2499 | |||
| 2500 | cft->kf_ops = kf_ops; | ||
| 2820 | cft->ss = ss; | 2501 | cft->ss = ss; |
| 2502 | } | ||
| 2821 | 2503 | ||
| 2822 | cgroup_cfts_prepare(); | 2504 | return 0; |
| 2823 | set->cfts = cfts; | 2505 | } |
| 2824 | list_add_tail(&set->node, &ss->cftsets); | 2506 | |
| 2825 | ret = cgroup_cfts_commit(cfts, true); | 2507 | static int cgroup_rm_cftypes_locked(struct cftype *cfts) |
| 2826 | if (ret) | 2508 | { |
| 2827 | cgroup_rm_cftypes(cfts); | 2509 | lockdep_assert_held(&cgroup_tree_mutex); |
| 2828 | return ret; | 2510 | |
| 2511 | if (!cfts || !cfts[0].ss) | ||
| 2512 | return -ENOENT; | ||
| 2513 | |||
| 2514 | list_del(&cfts->node); | ||
| 2515 | cgroup_apply_cftypes(cfts, false); | ||
| 2516 | cgroup_exit_cftypes(cfts); | ||
| 2517 | return 0; | ||
| 2829 | } | 2518 | } |
| 2830 | EXPORT_SYMBOL_GPL(cgroup_add_cftypes); | ||
| 2831 | 2519 | ||
| 2832 | /** | 2520 | /** |
| 2833 | * cgroup_rm_cftypes - remove an array of cftypes from a subsystem | 2521 | * cgroup_rm_cftypes - remove an array of cftypes from a subsystem |
| @@ -2842,24 +2530,48 @@ EXPORT_SYMBOL_GPL(cgroup_add_cftypes); | |||
| 2842 | */ | 2530 | */ |
| 2843 | int cgroup_rm_cftypes(struct cftype *cfts) | 2531 | int cgroup_rm_cftypes(struct cftype *cfts) |
| 2844 | { | 2532 | { |
| 2845 | struct cftype_set *set; | 2533 | int ret; |
| 2846 | 2534 | ||
| 2847 | if (!cfts || !cfts[0].ss) | 2535 | mutex_lock(&cgroup_tree_mutex); |
| 2848 | return -ENOENT; | 2536 | ret = cgroup_rm_cftypes_locked(cfts); |
| 2537 | mutex_unlock(&cgroup_tree_mutex); | ||
| 2538 | return ret; | ||
| 2539 | } | ||
| 2849 | 2540 | ||
| 2850 | cgroup_cfts_prepare(); | 2541 | /** |
| 2542 | * cgroup_add_cftypes - add an array of cftypes to a subsystem | ||
| 2543 | * @ss: target cgroup subsystem | ||
| 2544 | * @cfts: zero-length name terminated array of cftypes | ||
| 2545 | * | ||
| 2546 | * Register @cfts to @ss. Files described by @cfts are created for all | ||
| 2547 | * existing cgroups to which @ss is attached and all future cgroups will | ||
| 2548 | * have them too. This function can be called anytime whether @ss is | ||
| 2549 | * attached or not. | ||
| 2550 | * | ||
| 2551 | * Returns 0 on successful registration, -errno on failure. Note that this | ||
| 2552 | * function currently returns 0 as long as @cfts registration is successful | ||
| 2553 | * even if some file creation attempts on existing cgroups fail. | ||
| 2554 | */ | ||
| 2555 | int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts) | ||
| 2556 | { | ||
| 2557 | int ret; | ||
| 2851 | 2558 | ||
| 2852 | list_for_each_entry(set, &cfts[0].ss->cftsets, node) { | 2559 | if (!cfts || cfts[0].name[0] == '\0') |
| 2853 | if (set->cfts == cfts) { | 2560 | return 0; |
| 2854 | list_del(&set->node); | ||
| 2855 | kfree(set); | ||
| 2856 | cgroup_cfts_commit(cfts, false); | ||
| 2857 | return 0; | ||
| 2858 | } | ||
| 2859 | } | ||
| 2860 | 2561 | ||
| 2861 | cgroup_cfts_commit(NULL, false); | 2562 | ret = cgroup_init_cftypes(ss, cfts); |
| 2862 | return -ENOENT; | 2563 | if (ret) |
| 2564 | return ret; | ||
| 2565 | |||
| 2566 | mutex_lock(&cgroup_tree_mutex); | ||
| 2567 | |||
| 2568 | list_add_tail(&cfts->node, &ss->cfts); | ||
| 2569 | ret = cgroup_apply_cftypes(cfts, true); | ||
| 2570 | if (ret) | ||
| 2571 | cgroup_rm_cftypes_locked(cfts); | ||
| 2572 | |||
| 2573 | mutex_unlock(&cgroup_tree_mutex); | ||
| 2574 | return ret; | ||
| 2863 | } | 2575 | } |
| 2864 | 2576 | ||
| 2865 | /** | 2577 | /** |
| @@ -2868,57 +2580,18 @@ int cgroup_rm_cftypes(struct cftype *cfts) | |||
| 2868 | * | 2580 | * |
| 2869 | * Return the number of tasks in the cgroup. | 2581 | * Return the number of tasks in the cgroup. |
| 2870 | */ | 2582 | */ |
| 2871 | int cgroup_task_count(const struct cgroup *cgrp) | 2583 | static int cgroup_task_count(const struct cgroup *cgrp) |
| 2872 | { | 2584 | { |
| 2873 | int count = 0; | 2585 | int count = 0; |
| 2874 | struct cgrp_cset_link *link; | 2586 | struct cgrp_cset_link *link; |
| 2875 | 2587 | ||
| 2876 | read_lock(&css_set_lock); | 2588 | down_read(&css_set_rwsem); |
| 2877 | list_for_each_entry(link, &cgrp->cset_links, cset_link) | 2589 | list_for_each_entry(link, &cgrp->cset_links, cset_link) |
| 2878 | count += atomic_read(&link->cset->refcount); | 2590 | count += atomic_read(&link->cset->refcount); |
| 2879 | read_unlock(&css_set_lock); | 2591 | up_read(&css_set_rwsem); |
| 2880 | return count; | 2592 | return count; |
| 2881 | } | 2593 | } |
| 2882 | 2594 | ||
| 2883 | /* | ||
| 2884 | * To reduce the fork() overhead for systems that are not actually using | ||
| 2885 | * their cgroups capability, we don't maintain the lists running through | ||
| 2886 | * each css_set to its tasks until we see the list actually used - in other | ||
| 2887 | * words after the first call to css_task_iter_start(). | ||
| 2888 | */ | ||
| 2889 | static void cgroup_enable_task_cg_lists(void) | ||
| 2890 | { | ||
| 2891 | struct task_struct *p, *g; | ||
| 2892 | write_lock(&css_set_lock); | ||
| 2893 | use_task_css_set_links = 1; | ||
| 2894 | /* | ||
| 2895 | * We need tasklist_lock because RCU is not safe against | ||
| 2896 | * while_each_thread(). Besides, a forking task that has passed | ||
| 2897 | * cgroup_post_fork() without seeing use_task_css_set_links = 1 | ||
| 2898 | * is not guaranteed to have its child immediately visible in the | ||
| 2899 | * tasklist if we walk through it with RCU. | ||
| 2900 | */ | ||
| 2901 | read_lock(&tasklist_lock); | ||
| 2902 | do_each_thread(g, p) { | ||
| 2903 | task_lock(p); | ||
| 2904 | /* | ||
| 2905 | * We should check if the process is exiting, otherwise | ||
| 2906 | * it will race with cgroup_exit() in that the list | ||
| 2907 | * entry won't be deleted though the process has exited. | ||
| 2908 | * Do it while holding siglock so that we don't end up | ||
| 2909 | * racing against cgroup_exit(). | ||
| 2910 | */ | ||
| 2911 | spin_lock_irq(&p->sighand->siglock); | ||
| 2912 | if (!(p->flags & PF_EXITING) && list_empty(&p->cg_list)) | ||
| 2913 | list_add(&p->cg_list, &task_css_set(p)->tasks); | ||
| 2914 | spin_unlock_irq(&p->sighand->siglock); | ||
| 2915 | |||
| 2916 | task_unlock(p); | ||
| 2917 | } while_each_thread(g, p); | ||
| 2918 | read_unlock(&tasklist_lock); | ||
| 2919 | write_unlock(&css_set_lock); | ||
| 2920 | } | ||
| 2921 | |||
| 2922 | /** | 2595 | /** |
| 2923 | * css_next_child - find the next child of a given css | 2596 | * css_next_child - find the next child of a given css |
| 2924 | * @pos_css: the current position (%NULL to initiate traversal) | 2597 | * @pos_css: the current position (%NULL to initiate traversal) |
| @@ -2937,7 +2610,7 @@ css_next_child(struct cgroup_subsys_state *pos_css, | |||
| 2937 | struct cgroup *cgrp = parent_css->cgroup; | 2610 | struct cgroup *cgrp = parent_css->cgroup; |
| 2938 | struct cgroup *next; | 2611 | struct cgroup *next; |
| 2939 | 2612 | ||
| 2940 | cgroup_assert_mutex_or_rcu_locked(); | 2613 | cgroup_assert_mutexes_or_rcu_locked(); |
| 2941 | 2614 | ||
| 2942 | /* | 2615 | /* |
| 2943 | * @pos could already have been removed. Once a cgroup is removed, | 2616 | * @pos could already have been removed. Once a cgroup is removed, |
| @@ -2973,7 +2646,6 @@ css_next_child(struct cgroup_subsys_state *pos_css, | |||
| 2973 | 2646 | ||
| 2974 | return cgroup_css(next, parent_css->ss); | 2647 | return cgroup_css(next, parent_css->ss); |
| 2975 | } | 2648 | } |
| 2976 | EXPORT_SYMBOL_GPL(css_next_child); | ||
| 2977 | 2649 | ||
| 2978 | /** | 2650 | /** |
| 2979 | * css_next_descendant_pre - find the next descendant for pre-order walk | 2651 | * css_next_descendant_pre - find the next descendant for pre-order walk |
| @@ -2995,7 +2667,7 @@ css_next_descendant_pre(struct cgroup_subsys_state *pos, | |||
| 2995 | { | 2667 | { |
| 2996 | struct cgroup_subsys_state *next; | 2668 | struct cgroup_subsys_state *next; |
| 2997 | 2669 | ||
| 2998 | cgroup_assert_mutex_or_rcu_locked(); | 2670 | cgroup_assert_mutexes_or_rcu_locked(); |
| 2999 | 2671 | ||
| 3000 | /* if first iteration, visit @root */ | 2672 | /* if first iteration, visit @root */ |
| 3001 | if (!pos) | 2673 | if (!pos) |
| @@ -3016,7 +2688,6 @@ css_next_descendant_pre(struct cgroup_subsys_state *pos, | |||
| 3016 | 2688 | ||
| 3017 | return NULL; | 2689 | return NULL; |
| 3018 | } | 2690 | } |
| 3019 | EXPORT_SYMBOL_GPL(css_next_descendant_pre); | ||
| 3020 | 2691 | ||
| 3021 | /** | 2692 | /** |
| 3022 | * css_rightmost_descendant - return the rightmost descendant of a css | 2693 | * css_rightmost_descendant - return the rightmost descendant of a css |
| @@ -3036,7 +2707,7 @@ css_rightmost_descendant(struct cgroup_subsys_state *pos) | |||
| 3036 | { | 2707 | { |
| 3037 | struct cgroup_subsys_state *last, *tmp; | 2708 | struct cgroup_subsys_state *last, *tmp; |
| 3038 | 2709 | ||
| 3039 | cgroup_assert_mutex_or_rcu_locked(); | 2710 | cgroup_assert_mutexes_or_rcu_locked(); |
| 3040 | 2711 | ||
| 3041 | do { | 2712 | do { |
| 3042 | last = pos; | 2713 | last = pos; |
| @@ -3048,7 +2719,6 @@ css_rightmost_descendant(struct cgroup_subsys_state *pos) | |||
| 3048 | 2719 | ||
| 3049 | return last; | 2720 | return last; |
| 3050 | } | 2721 | } |
| 3051 | EXPORT_SYMBOL_GPL(css_rightmost_descendant); | ||
| 3052 | 2722 | ||
| 3053 | static struct cgroup_subsys_state * | 2723 | static struct cgroup_subsys_state * |
| 3054 | css_leftmost_descendant(struct cgroup_subsys_state *pos) | 2724 | css_leftmost_descendant(struct cgroup_subsys_state *pos) |
| @@ -3084,7 +2754,7 @@ css_next_descendant_post(struct cgroup_subsys_state *pos, | |||
| 3084 | { | 2754 | { |
| 3085 | struct cgroup_subsys_state *next; | 2755 | struct cgroup_subsys_state *next; |
| 3086 | 2756 | ||
| 3087 | cgroup_assert_mutex_or_rcu_locked(); | 2757 | cgroup_assert_mutexes_or_rcu_locked(); |
| 3088 | 2758 | ||
| 3089 | /* if first iteration, visit leftmost descendant which may be @root */ | 2759 | /* if first iteration, visit leftmost descendant which may be @root */ |
| 3090 | if (!pos) | 2760 | if (!pos) |
| @@ -3102,7 +2772,6 @@ css_next_descendant_post(struct cgroup_subsys_state *pos, | |||
| 3102 | /* no sibling left, visit parent */ | 2772 | /* no sibling left, visit parent */ |
| 3103 | return css_parent(pos); | 2773 | return css_parent(pos); |
| 3104 | } | 2774 | } |
| 3105 | EXPORT_SYMBOL_GPL(css_next_descendant_post); | ||
| 3106 | 2775 | ||
| 3107 | /** | 2776 | /** |
| 3108 | * css_advance_task_iter - advance a task itererator to the next css_set | 2777 | * css_advance_task_iter - advance a task itererator to the next css_set |
| @@ -3125,9 +2794,14 @@ static void css_advance_task_iter(struct css_task_iter *it) | |||
| 3125 | } | 2794 | } |
| 3126 | link = list_entry(l, struct cgrp_cset_link, cset_link); | 2795 | link = list_entry(l, struct cgrp_cset_link, cset_link); |
| 3127 | cset = link->cset; | 2796 | cset = link->cset; |
| 3128 | } while (list_empty(&cset->tasks)); | 2797 | } while (list_empty(&cset->tasks) && list_empty(&cset->mg_tasks)); |
| 2798 | |||
| 3129 | it->cset_link = l; | 2799 | it->cset_link = l; |
| 3130 | it->task = cset->tasks.next; | 2800 | |
| 2801 | if (!list_empty(&cset->tasks)) | ||
| 2802 | it->task = cset->tasks.next; | ||
| 2803 | else | ||
| 2804 | it->task = cset->mg_tasks.next; | ||
| 3131 | } | 2805 | } |
| 3132 | 2806 | ||
| 3133 | /** | 2807 | /** |
| @@ -3146,17 +2820,12 @@ static void css_advance_task_iter(struct css_task_iter *it) | |||
| 3146 | */ | 2820 | */ |
| 3147 | void css_task_iter_start(struct cgroup_subsys_state *css, | 2821 | void css_task_iter_start(struct cgroup_subsys_state *css, |
| 3148 | struct css_task_iter *it) | 2822 | struct css_task_iter *it) |
| 3149 | __acquires(css_set_lock) | 2823 | __acquires(css_set_rwsem) |
| 3150 | { | 2824 | { |
| 3151 | /* | 2825 | /* no one should try to iterate before mounting cgroups */ |
| 3152 | * The first time anyone tries to iterate across a css, we need to | 2826 | WARN_ON_ONCE(!use_task_css_set_links); |
| 3153 | * enable the list linking each css_set to its tasks, and fix up | ||
| 3154 | * all existing tasks. | ||
| 3155 | */ | ||
| 3156 | if (!use_task_css_set_links) | ||
| 3157 | cgroup_enable_task_cg_lists(); | ||
| 3158 | 2827 | ||
| 3159 | read_lock(&css_set_lock); | 2828 | down_read(&css_set_rwsem); |
| 3160 | 2829 | ||
| 3161 | it->origin_css = css; | 2830 | it->origin_css = css; |
| 3162 | it->cset_link = &css->cgroup->cset_links; | 2831 | it->cset_link = &css->cgroup->cset_links; |
| @@ -3176,24 +2845,29 @@ struct task_struct *css_task_iter_next(struct css_task_iter *it) | |||
| 3176 | { | 2845 | { |
| 3177 | struct task_struct *res; | 2846 | struct task_struct *res; |
| 3178 | struct list_head *l = it->task; | 2847 | struct list_head *l = it->task; |
| 3179 | struct cgrp_cset_link *link; | 2848 | struct cgrp_cset_link *link = list_entry(it->cset_link, |
| 2849 | struct cgrp_cset_link, cset_link); | ||
| 3180 | 2850 | ||
| 3181 | /* If the iterator cg is NULL, we have no tasks */ | 2851 | /* If the iterator cg is NULL, we have no tasks */ |
| 3182 | if (!it->cset_link) | 2852 | if (!it->cset_link) |
| 3183 | return NULL; | 2853 | return NULL; |
| 3184 | res = list_entry(l, struct task_struct, cg_list); | 2854 | res = list_entry(l, struct task_struct, cg_list); |
| 3185 | /* Advance iterator to find next entry */ | 2855 | |
| 2856 | /* | ||
| 2857 | * Advance iterator to find next entry. cset->tasks is consumed | ||
| 2858 | * first and then ->mg_tasks. After ->mg_tasks, we move onto the | ||
| 2859 | * next cset. | ||
| 2860 | */ | ||
| 3186 | l = l->next; | 2861 | l = l->next; |
| 3187 | link = list_entry(it->cset_link, struct cgrp_cset_link, cset_link); | 2862 | |
| 3188 | if (l == &link->cset->tasks) { | 2863 | if (l == &link->cset->tasks) |
| 3189 | /* | 2864 | l = link->cset->mg_tasks.next; |
| 3190 | * We reached the end of this task list - move on to the | 2865 | |
| 3191 | * next cgrp_cset_link. | 2866 | if (l == &link->cset->mg_tasks) |
| 3192 | */ | ||
| 3193 | css_advance_task_iter(it); | 2867 | css_advance_task_iter(it); |
| 3194 | } else { | 2868 | else |
| 3195 | it->task = l; | 2869 | it->task = l; |
| 3196 | } | 2870 | |
| 3197 | return res; | 2871 | return res; |
| 3198 | } | 2872 | } |
| 3199 | 2873 | ||
| @@ -3204,191 +2878,62 @@ struct task_struct *css_task_iter_next(struct css_task_iter *it) | |||
| 3204 | * Finish task iteration started by css_task_iter_start(). | 2878 | * Finish task iteration started by css_task_iter_start(). |
| 3205 | */ | 2879 | */ |
| 3206 | void css_task_iter_end(struct css_task_iter *it) | 2880 | void css_task_iter_end(struct css_task_iter *it) |
| 3207 | __releases(css_set_lock) | 2881 | __releases(css_set_rwsem) |
| 3208 | { | ||
| 3209 | read_unlock(&css_set_lock); | ||
| 3210 | } | ||
| 3211 | |||
| 3212 | static inline int started_after_time(struct task_struct *t1, | ||
| 3213 | struct timespec *time, | ||
| 3214 | struct task_struct *t2) | ||
| 3215 | { | ||
| 3216 | int start_diff = timespec_compare(&t1->start_time, time); | ||
| 3217 | if (start_diff > 0) { | ||
| 3218 | return 1; | ||
| 3219 | } else if (start_diff < 0) { | ||
| 3220 | return 0; | ||
| 3221 | } else { | ||
| 3222 | /* | ||
| 3223 | * Arbitrarily, if two processes started at the same | ||
| 3224 | * time, we'll say that the lower pointer value | ||
| 3225 | * started first. Note that t2 may have exited by now | ||
| 3226 | * so this may not be a valid pointer any longer, but | ||
| 3227 | * that's fine - it still serves to distinguish | ||
| 3228 | * between two tasks started (effectively) simultaneously. | ||
| 3229 | */ | ||
| 3230 | return t1 > t2; | ||
| 3231 | } | ||
| 3232 | } | ||
| 3233 | |||
| 3234 | /* | ||
| 3235 | * This function is a callback from heap_insert() and is used to order | ||
| 3236 | * the heap. | ||
| 3237 | * In this case we order the heap in descending task start time. | ||
| 3238 | */ | ||
| 3239 | static inline int started_after(void *p1, void *p2) | ||
| 3240 | { | 2882 | { |
| 3241 | struct task_struct *t1 = p1; | 2883 | up_read(&css_set_rwsem); |
| 3242 | struct task_struct *t2 = p2; | ||
| 3243 | return started_after_time(t1, &t2->start_time, t2); | ||
| 3244 | } | 2884 | } |
| 3245 | 2885 | ||
| 3246 | /** | 2886 | /** |
| 3247 | * css_scan_tasks - iterate though all the tasks in a css | 2887 | * cgroup_trasnsfer_tasks - move tasks from one cgroup to another |
| 3248 | * @css: the css to iterate tasks of | 2888 | * @to: cgroup to which the tasks will be moved |
| 3249 | * @test: optional test callback | 2889 | * @from: cgroup in which the tasks currently reside |
| 3250 | * @process: process callback | ||
| 3251 | * @data: data passed to @test and @process | ||
| 3252 | * @heap: optional pre-allocated heap used for task iteration | ||
| 3253 | * | ||
| 3254 | * Iterate through all the tasks in @css, calling @test for each, and if it | ||
| 3255 | * returns %true, call @process for it also. | ||
| 3256 | * | ||
| 3257 | * @test may be NULL, meaning always true (select all tasks), which | ||
| 3258 | * effectively duplicates css_task_iter_{start,next,end}() but does not | ||
| 3259 | * lock css_set_lock for the call to @process. | ||
| 3260 | * | ||
| 3261 | * It is guaranteed that @process will act on every task that is a member | ||
| 3262 | * of @css for the duration of this call. This function may or may not | ||
| 3263 | * call @process for tasks that exit or move to a different css during the | ||
| 3264 | * call, or are forked or move into the css during the call. | ||
| 3265 | * | ||
| 3266 | * Note that @test may be called with locks held, and may in some | ||
| 3267 | * situations be called multiple times for the same task, so it should be | ||
| 3268 | * cheap. | ||
| 3269 | * | 2890 | * |
| 3270 | * If @heap is non-NULL, a heap has been pre-allocated and will be used for | 2891 | * Locking rules between cgroup_post_fork() and the migration path |
| 3271 | * heap operations (and its "gt" member will be overwritten), else a | 2892 | * guarantee that, if a task is forking while being migrated, the new child |
| 3272 | * temporary heap will be used (allocation of which may cause this function | 2893 | * is guaranteed to be either visible in the source cgroup after the |
| 3273 | * to fail). | 2894 | * parent's migration is complete or put into the target cgroup. No task |
| 2895 | * can slip out of migration through forking. | ||
| 3274 | */ | 2896 | */ |
| 3275 | int css_scan_tasks(struct cgroup_subsys_state *css, | 2897 | int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from) |
| 3276 | bool (*test)(struct task_struct *, void *), | ||
| 3277 | void (*process)(struct task_struct *, void *), | ||
| 3278 | void *data, struct ptr_heap *heap) | ||
| 3279 | { | 2898 | { |
| 3280 | int retval, i; | 2899 | LIST_HEAD(preloaded_csets); |
| 2900 | struct cgrp_cset_link *link; | ||
| 3281 | struct css_task_iter it; | 2901 | struct css_task_iter it; |
| 3282 | struct task_struct *p, *dropped; | 2902 | struct task_struct *task; |
| 3283 | /* Never dereference latest_task, since it's not refcounted */ | 2903 | int ret; |
| 3284 | struct task_struct *latest_task = NULL; | ||
| 3285 | struct ptr_heap tmp_heap; | ||
| 3286 | struct timespec latest_time = { 0, 0 }; | ||
| 3287 | |||
| 3288 | if (heap) { | ||
| 3289 | /* The caller supplied our heap and pre-allocated its memory */ | ||
| 3290 | heap->gt = &started_after; | ||
| 3291 | } else { | ||
| 3292 | /* We need to allocate our own heap memory */ | ||
| 3293 | heap = &tmp_heap; | ||
| 3294 | retval = heap_init(heap, PAGE_SIZE, GFP_KERNEL, &started_after); | ||
| 3295 | if (retval) | ||
| 3296 | /* cannot allocate the heap */ | ||
| 3297 | return retval; | ||
| 3298 | } | ||
| 3299 | 2904 | ||
| 3300 | again: | 2905 | mutex_lock(&cgroup_mutex); |
| 3301 | /* | ||
| 3302 | * Scan tasks in the css, using the @test callback to determine | ||
| 3303 | * which are of interest, and invoking @process callback on the | ||
| 3304 | * ones which need an update. Since we don't want to hold any | ||
| 3305 | * locks during the task updates, gather tasks to be processed in a | ||
| 3306 | * heap structure. The heap is sorted by descending task start | ||
| 3307 | * time. If the statically-sized heap fills up, we overflow tasks | ||
| 3308 | * that started later, and in future iterations only consider tasks | ||
| 3309 | * that started after the latest task in the previous pass. This | ||
| 3310 | * guarantees forward progress and that we don't miss any tasks. | ||
| 3311 | */ | ||
| 3312 | heap->size = 0; | ||
| 3313 | css_task_iter_start(css, &it); | ||
| 3314 | while ((p = css_task_iter_next(&it))) { | ||
| 3315 | /* | ||
| 3316 | * Only affect tasks that qualify per the caller's callback, | ||
| 3317 | * if he provided one | ||
| 3318 | */ | ||
| 3319 | if (test && !test(p, data)) | ||
| 3320 | continue; | ||
| 3321 | /* | ||
| 3322 | * Only process tasks that started after the last task | ||
| 3323 | * we processed | ||
| 3324 | */ | ||
| 3325 | if (!started_after_time(p, &latest_time, latest_task)) | ||
| 3326 | continue; | ||
| 3327 | dropped = heap_insert(heap, p); | ||
| 3328 | if (dropped == NULL) { | ||
| 3329 | /* | ||
| 3330 | * The new task was inserted; the heap wasn't | ||
| 3331 | * previously full | ||
| 3332 | */ | ||
| 3333 | get_task_struct(p); | ||
| 3334 | } else if (dropped != p) { | ||
| 3335 | /* | ||
| 3336 | * The new task was inserted, and pushed out a | ||
| 3337 | * different task | ||
| 3338 | */ | ||
| 3339 | get_task_struct(p); | ||
| 3340 | put_task_struct(dropped); | ||
| 3341 | } | ||
| 3342 | /* | ||
| 3343 | * Else the new task was newer than anything already in | ||
| 3344 | * the heap and wasn't inserted | ||
| 3345 | */ | ||
| 3346 | } | ||
| 3347 | css_task_iter_end(&it); | ||
| 3348 | 2906 | ||
| 3349 | if (heap->size) { | 2907 | /* all tasks in @from are being moved, all csets are source */ |
| 3350 | for (i = 0; i < heap->size; i++) { | 2908 | down_read(&css_set_rwsem); |
| 3351 | struct task_struct *q = heap->ptrs[i]; | 2909 | list_for_each_entry(link, &from->cset_links, cset_link) |
| 3352 | if (i == 0) { | 2910 | cgroup_migrate_add_src(link->cset, to, &preloaded_csets); |
| 3353 | latest_time = q->start_time; | 2911 | up_read(&css_set_rwsem); |
| 3354 | latest_task = q; | ||
| 3355 | } | ||
| 3356 | /* Process the task per the caller's callback */ | ||
| 3357 | process(q, data); | ||
| 3358 | put_task_struct(q); | ||
| 3359 | } | ||
| 3360 | /* | ||
| 3361 | * If we had to process any tasks at all, scan again | ||
| 3362 | * in case some of them were in the middle of forking | ||
| 3363 | * children that didn't get processed. | ||
| 3364 | * Not the most efficient way to do it, but it avoids | ||
| 3365 | * having to take callback_mutex in the fork path | ||
| 3366 | */ | ||
| 3367 | goto again; | ||
| 3368 | } | ||
| 3369 | if (heap == &tmp_heap) | ||
| 3370 | heap_free(&tmp_heap); | ||
| 3371 | return 0; | ||
| 3372 | } | ||
| 3373 | 2912 | ||
| 3374 | static void cgroup_transfer_one_task(struct task_struct *task, void *data) | 2913 | ret = cgroup_migrate_prepare_dst(to, &preloaded_csets); |
| 3375 | { | 2914 | if (ret) |
| 3376 | struct cgroup *new_cgroup = data; | 2915 | goto out_err; |
| 3377 | 2916 | ||
| 3378 | mutex_lock(&cgroup_mutex); | 2917 | /* |
| 3379 | cgroup_attach_task(new_cgroup, task, false); | 2918 | * Migrate tasks one-by-one until @form is empty. This fails iff |
| 2919 | * ->can_attach() fails. | ||
| 2920 | */ | ||
| 2921 | do { | ||
| 2922 | css_task_iter_start(&from->dummy_css, &it); | ||
| 2923 | task = css_task_iter_next(&it); | ||
| 2924 | if (task) | ||
| 2925 | get_task_struct(task); | ||
| 2926 | css_task_iter_end(&it); | ||
| 2927 | |||
| 2928 | if (task) { | ||
| 2929 | ret = cgroup_migrate(to, task, false); | ||
| 2930 | put_task_struct(task); | ||
| 2931 | } | ||
| 2932 | } while (task && !ret); | ||
| 2933 | out_err: | ||
| 2934 | cgroup_migrate_finish(&preloaded_csets); | ||
| 3380 | mutex_unlock(&cgroup_mutex); | 2935 | mutex_unlock(&cgroup_mutex); |
| 3381 | } | 2936 | return ret; |
| 3382 | |||
| 3383 | /** | ||
| 3384 | * cgroup_trasnsfer_tasks - move tasks from one cgroup to another | ||
| 3385 | * @to: cgroup to which the tasks will be moved | ||
| 3386 | * @from: cgroup in which the tasks currently reside | ||
| 3387 | */ | ||
| 3388 | int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from) | ||
| 3389 | { | ||
| 3390 | return css_scan_tasks(&from->dummy_css, NULL, cgroup_transfer_one_task, | ||
| 3391 | to, NULL); | ||
| 3392 | } | 2937 | } |
| 3393 | 2938 | ||
| 3394 | /* | 2939 | /* |
| @@ -3687,21 +3232,31 @@ static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type, | |||
| 3687 | */ | 3232 | */ |
| 3688 | int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry) | 3233 | int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry) |
| 3689 | { | 3234 | { |
| 3690 | int ret = -EINVAL; | 3235 | struct kernfs_node *kn = kernfs_node_from_dentry(dentry); |
| 3691 | struct cgroup *cgrp; | 3236 | struct cgroup *cgrp; |
| 3692 | struct css_task_iter it; | 3237 | struct css_task_iter it; |
| 3693 | struct task_struct *tsk; | 3238 | struct task_struct *tsk; |
| 3694 | 3239 | ||
| 3240 | /* it should be kernfs_node belonging to cgroupfs and is a directory */ | ||
| 3241 | if (dentry->d_sb->s_type != &cgroup_fs_type || !kn || | ||
| 3242 | kernfs_type(kn) != KERNFS_DIR) | ||
| 3243 | return -EINVAL; | ||
| 3244 | |||
| 3245 | mutex_lock(&cgroup_mutex); | ||
| 3246 | |||
| 3695 | /* | 3247 | /* |
| 3696 | * Validate dentry by checking the superblock operations, | 3248 | * We aren't being called from kernfs and there's no guarantee on |
| 3697 | * and make sure it's a directory. | 3249 | * @kn->priv's validity. For this and css_tryget_from_dir(), |
| 3250 | * @kn->priv is RCU safe. Let's do the RCU dancing. | ||
| 3698 | */ | 3251 | */ |
| 3699 | if (dentry->d_sb->s_op != &cgroup_ops || | 3252 | rcu_read_lock(); |
| 3700 | !S_ISDIR(dentry->d_inode->i_mode)) | 3253 | cgrp = rcu_dereference(kn->priv); |
| 3701 | goto err; | 3254 | if (!cgrp || cgroup_is_dead(cgrp)) { |
| 3702 | 3255 | rcu_read_unlock(); | |
| 3703 | ret = 0; | 3256 | mutex_unlock(&cgroup_mutex); |
| 3704 | cgrp = dentry->d_fsdata; | 3257 | return -ENOENT; |
| 3258 | } | ||
| 3259 | rcu_read_unlock(); | ||
| 3705 | 3260 | ||
| 3706 | css_task_iter_start(&cgrp->dummy_css, &it); | 3261 | css_task_iter_start(&cgrp->dummy_css, &it); |
| 3707 | while ((tsk = css_task_iter_next(&it))) { | 3262 | while ((tsk = css_task_iter_next(&it))) { |
| @@ -3726,8 +3281,8 @@ int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry) | |||
| 3726 | } | 3281 | } |
| 3727 | css_task_iter_end(&it); | 3282 | css_task_iter_end(&it); |
| 3728 | 3283 | ||
| 3729 | err: | 3284 | mutex_unlock(&cgroup_mutex); |
| 3730 | return ret; | 3285 | return 0; |
| 3731 | } | 3286 | } |
| 3732 | 3287 | ||
| 3733 | 3288 | ||
| @@ -3745,7 +3300,7 @@ static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos) | |||
| 3745 | * after a seek to the start). Use a binary-search to find the | 3300 | * after a seek to the start). Use a binary-search to find the |
| 3746 | * next pid to display, if any | 3301 | * next pid to display, if any |
| 3747 | */ | 3302 | */ |
| 3748 | struct cgroup_open_file *of = s->private; | 3303 | struct kernfs_open_file *of = s->private; |
| 3749 | struct cgroup *cgrp = seq_css(s)->cgroup; | 3304 | struct cgroup *cgrp = seq_css(s)->cgroup; |
| 3750 | struct cgroup_pidlist *l; | 3305 | struct cgroup_pidlist *l; |
| 3751 | enum cgroup_filetype type = seq_cft(s)->private; | 3306 | enum cgroup_filetype type = seq_cft(s)->private; |
| @@ -3800,7 +3355,7 @@ static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos) | |||
| 3800 | 3355 | ||
| 3801 | static void cgroup_pidlist_stop(struct seq_file *s, void *v) | 3356 | static void cgroup_pidlist_stop(struct seq_file *s, void *v) |
| 3802 | { | 3357 | { |
| 3803 | struct cgroup_open_file *of = s->private; | 3358 | struct kernfs_open_file *of = s->private; |
| 3804 | struct cgroup_pidlist *l = of->priv; | 3359 | struct cgroup_pidlist *l = of->priv; |
| 3805 | 3360 | ||
| 3806 | if (l) | 3361 | if (l) |
| @@ -3811,7 +3366,7 @@ static void cgroup_pidlist_stop(struct seq_file *s, void *v) | |||
| 3811 | 3366 | ||
| 3812 | static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos) | 3367 | static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos) |
| 3813 | { | 3368 | { |
| 3814 | struct cgroup_open_file *of = s->private; | 3369 | struct kernfs_open_file *of = s->private; |
| 3815 | struct cgroup_pidlist *l = of->priv; | 3370 | struct cgroup_pidlist *l = of->priv; |
| 3816 | pid_t *p = v; | 3371 | pid_t *p = v; |
| 3817 | pid_t *end = l->list + l->length; | 3372 | pid_t *end = l->list + l->length; |
| @@ -3861,23 +3416,6 @@ static int cgroup_write_notify_on_release(struct cgroup_subsys_state *css, | |||
| 3861 | return 0; | 3416 | return 0; |
| 3862 | } | 3417 | } |
| 3863 | 3418 | ||
| 3864 | /* | ||
| 3865 | * When dput() is called asynchronously, if umount has been done and | ||
| 3866 | * then deactivate_super() in cgroup_free_fn() kills the superblock, | ||
| 3867 | * there's a small window that vfs will see the root dentry with non-zero | ||
| 3868 | * refcnt and trigger BUG(). | ||
| 3869 | * | ||
| 3870 | * That's why we hold a reference before dput() and drop it right after. | ||
| 3871 | */ | ||
| 3872 | static void cgroup_dput(struct cgroup *cgrp) | ||
| 3873 | { | ||
| 3874 | struct super_block *sb = cgrp->root->sb; | ||
| 3875 | |||
| 3876 | atomic_inc(&sb->s_active); | ||
| 3877 | dput(cgrp->dentry); | ||
| 3878 | deactivate_super(sb); | ||
| 3879 | } | ||
| 3880 | |||
| 3881 | static u64 cgroup_clone_children_read(struct cgroup_subsys_state *css, | 3419 | static u64 cgroup_clone_children_read(struct cgroup_subsys_state *css, |
| 3882 | struct cftype *cft) | 3420 | struct cftype *cft) |
| 3883 | { | 3421 | { |
| @@ -3944,7 +3482,7 @@ static struct cftype cgroup_base_files[] = { | |||
| 3944 | .flags = CFTYPE_INSANE | CFTYPE_ONLY_ON_ROOT, | 3482 | .flags = CFTYPE_INSANE | CFTYPE_ONLY_ON_ROOT, |
| 3945 | .seq_show = cgroup_release_agent_show, | 3483 | .seq_show = cgroup_release_agent_show, |
| 3946 | .write_string = cgroup_release_agent_write, | 3484 | .write_string = cgroup_release_agent_write, |
| 3947 | .max_write_len = PATH_MAX, | 3485 | .max_write_len = PATH_MAX - 1, |
| 3948 | }, | 3486 | }, |
| 3949 | { } /* terminate */ | 3487 | { } /* terminate */ |
| 3950 | }; | 3488 | }; |
| @@ -3963,13 +3501,13 @@ static int cgroup_populate_dir(struct cgroup *cgrp, unsigned long subsys_mask) | |||
| 3963 | 3501 | ||
| 3964 | /* process cftsets of each subsystem */ | 3502 | /* process cftsets of each subsystem */ |
| 3965 | for_each_subsys(ss, i) { | 3503 | for_each_subsys(ss, i) { |
| 3966 | struct cftype_set *set; | 3504 | struct cftype *cfts; |
| 3967 | 3505 | ||
| 3968 | if (!test_bit(i, &subsys_mask)) | 3506 | if (!test_bit(i, &subsys_mask)) |
| 3969 | continue; | 3507 | continue; |
| 3970 | 3508 | ||
| 3971 | list_for_each_entry(set, &ss->cftsets, node) { | 3509 | list_for_each_entry(cfts, &ss->cfts, node) { |
| 3972 | ret = cgroup_addrm_files(cgrp, set->cfts, true); | 3510 | ret = cgroup_addrm_files(cgrp, cfts, true); |
| 3973 | if (ret < 0) | 3511 | if (ret < 0) |
| 3974 | goto err; | 3512 | goto err; |
| 3975 | } | 3513 | } |
| @@ -4012,7 +3550,7 @@ static void css_free_work_fn(struct work_struct *work) | |||
| 4012 | css_put(css->parent); | 3550 | css_put(css->parent); |
| 4013 | 3551 | ||
| 4014 | css->ss->css_free(css); | 3552 | css->ss->css_free(css); |
| 4015 | cgroup_dput(cgrp); | 3553 | cgroup_put(cgrp); |
| 4016 | } | 3554 | } |
| 4017 | 3555 | ||
| 4018 | static void css_free_rcu_fn(struct rcu_head *rcu_head) | 3556 | static void css_free_rcu_fn(struct rcu_head *rcu_head) |
| @@ -4020,10 +3558,6 @@ static void css_free_rcu_fn(struct rcu_head *rcu_head) | |||
| 4020 | struct cgroup_subsys_state *css = | 3558 | struct cgroup_subsys_state *css = |
| 4021 | container_of(rcu_head, struct cgroup_subsys_state, rcu_head); | 3559 | container_of(rcu_head, struct cgroup_subsys_state, rcu_head); |
| 4022 | 3560 | ||
| 4023 | /* | ||
| 4024 | * css holds an extra ref to @cgrp->dentry which is put on the last | ||
| 4025 | * css_put(). dput() requires process context which we don't have. | ||
| 4026 | */ | ||
| 4027 | INIT_WORK(&css->destroy_work, css_free_work_fn); | 3561 | INIT_WORK(&css->destroy_work, css_free_work_fn); |
| 4028 | queue_work(cgroup_destroy_wq, &css->destroy_work); | 3562 | queue_work(cgroup_destroy_wq, &css->destroy_work); |
| 4029 | } | 3563 | } |
| @@ -4033,7 +3567,7 @@ static void css_release(struct percpu_ref *ref) | |||
| 4033 | struct cgroup_subsys_state *css = | 3567 | struct cgroup_subsys_state *css = |
| 4034 | container_of(ref, struct cgroup_subsys_state, refcnt); | 3568 | container_of(ref, struct cgroup_subsys_state, refcnt); |
| 4035 | 3569 | ||
| 4036 | rcu_assign_pointer(css->cgroup->subsys[css->ss->subsys_id], NULL); | 3570 | RCU_INIT_POINTER(css->cgroup->subsys[css->ss->id], NULL); |
| 4037 | call_rcu(&css->rcu_head, css_free_rcu_fn); | 3571 | call_rcu(&css->rcu_head, css_free_rcu_fn); |
| 4038 | } | 3572 | } |
| 4039 | 3573 | ||
| @@ -4058,6 +3592,7 @@ static int online_css(struct cgroup_subsys_state *css) | |||
| 4058 | struct cgroup_subsys *ss = css->ss; | 3592 | struct cgroup_subsys *ss = css->ss; |
| 4059 | int ret = 0; | 3593 | int ret = 0; |
| 4060 | 3594 | ||
| 3595 | lockdep_assert_held(&cgroup_tree_mutex); | ||
| 4061 | lockdep_assert_held(&cgroup_mutex); | 3596 | lockdep_assert_held(&cgroup_mutex); |
| 4062 | 3597 | ||
| 4063 | if (ss->css_online) | 3598 | if (ss->css_online) |
| @@ -4065,7 +3600,7 @@ static int online_css(struct cgroup_subsys_state *css) | |||
| 4065 | if (!ret) { | 3600 | if (!ret) { |
| 4066 | css->flags |= CSS_ONLINE; | 3601 | css->flags |= CSS_ONLINE; |
| 4067 | css->cgroup->nr_css++; | 3602 | css->cgroup->nr_css++; |
| 4068 | rcu_assign_pointer(css->cgroup->subsys[ss->subsys_id], css); | 3603 | rcu_assign_pointer(css->cgroup->subsys[ss->id], css); |
| 4069 | } | 3604 | } |
| 4070 | return ret; | 3605 | return ret; |
| 4071 | } | 3606 | } |
| @@ -4075,6 +3610,7 @@ static void offline_css(struct cgroup_subsys_state *css) | |||
| 4075 | { | 3610 | { |
| 4076 | struct cgroup_subsys *ss = css->ss; | 3611 | struct cgroup_subsys *ss = css->ss; |
| 4077 | 3612 | ||
| 3613 | lockdep_assert_held(&cgroup_tree_mutex); | ||
| 4078 | lockdep_assert_held(&cgroup_mutex); | 3614 | lockdep_assert_held(&cgroup_mutex); |
| 4079 | 3615 | ||
| 4080 | if (!(css->flags & CSS_ONLINE)) | 3616 | if (!(css->flags & CSS_ONLINE)) |
| @@ -4085,7 +3621,7 @@ static void offline_css(struct cgroup_subsys_state *css) | |||
| 4085 | 3621 | ||
| 4086 | css->flags &= ~CSS_ONLINE; | 3622 | css->flags &= ~CSS_ONLINE; |
| 4087 | css->cgroup->nr_css--; | 3623 | css->cgroup->nr_css--; |
| 4088 | RCU_INIT_POINTER(css->cgroup->subsys[ss->subsys_id], css); | 3624 | RCU_INIT_POINTER(css->cgroup->subsys[ss->id], css); |
| 4089 | } | 3625 | } |
| 4090 | 3626 | ||
| 4091 | /** | 3627 | /** |
| @@ -4103,7 +3639,6 @@ static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss) | |||
| 4103 | struct cgroup_subsys_state *css; | 3639 | struct cgroup_subsys_state *css; |
| 4104 | int err; | 3640 | int err; |
| 4105 | 3641 | ||
| 4106 | lockdep_assert_held(&cgrp->dentry->d_inode->i_mutex); | ||
| 4107 | lockdep_assert_held(&cgroup_mutex); | 3642 | lockdep_assert_held(&cgroup_mutex); |
| 4108 | 3643 | ||
| 4109 | css = ss->css_alloc(cgroup_css(parent, ss)); | 3644 | css = ss->css_alloc(cgroup_css(parent, ss)); |
| @@ -4116,7 +3651,7 @@ static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss) | |||
| 4116 | 3651 | ||
| 4117 | init_css(css, ss, cgrp); | 3652 | init_css(css, ss, cgrp); |
| 4118 | 3653 | ||
| 4119 | err = cgroup_populate_dir(cgrp, 1 << ss->subsys_id); | 3654 | err = cgroup_populate_dir(cgrp, 1 << ss->id); |
| 4120 | if (err) | 3655 | if (err) |
| 4121 | goto err_free_percpu_ref; | 3656 | goto err_free_percpu_ref; |
| 4122 | 3657 | ||
| @@ -4124,9 +3659,11 @@ static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss) | |||
| 4124 | if (err) | 3659 | if (err) |
| 4125 | goto err_clear_dir; | 3660 | goto err_clear_dir; |
| 4126 | 3661 | ||
| 4127 | dget(cgrp->dentry); | 3662 | cgroup_get(cgrp); |
| 4128 | css_get(css->parent); | 3663 | css_get(css->parent); |
| 4129 | 3664 | ||
| 3665 | cgrp->subsys_mask |= 1 << ss->id; | ||
| 3666 | |||
| 4130 | if (ss->broken_hierarchy && !ss->warned_broken_hierarchy && | 3667 | if (ss->broken_hierarchy && !ss->warned_broken_hierarchy && |
| 4131 | parent->parent) { | 3668 | parent->parent) { |
| 4132 | pr_warning("cgroup: %s (%d) created nested cgroup for controller \"%s\" which has incomplete hierarchy support. Nested cgroups may change behavior in the future.\n", | 3669 | pr_warning("cgroup: %s (%d) created nested cgroup for controller \"%s\" which has incomplete hierarchy support. Nested cgroups may change behavior in the future.\n", |
| @@ -4139,7 +3676,7 @@ static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss) | |||
| 4139 | return 0; | 3676 | return 0; |
| 4140 | 3677 | ||
| 4141 | err_clear_dir: | 3678 | err_clear_dir: |
| 4142 | cgroup_clear_dir(css->cgroup, 1 << css->ss->subsys_id); | 3679 | cgroup_clear_dir(css->cgroup, 1 << css->ss->id); |
| 4143 | err_free_percpu_ref: | 3680 | err_free_percpu_ref: |
| 4144 | percpu_ref_cancel_init(&css->refcnt); | 3681 | percpu_ref_cancel_init(&css->refcnt); |
| 4145 | err_free_css: | 3682 | err_free_css: |
| @@ -4147,35 +3684,34 @@ err_free_css: | |||
| 4147 | return err; | 3684 | return err; |
| 4148 | } | 3685 | } |
| 4149 | 3686 | ||
| 4150 | /* | 3687 | /** |
| 4151 | * cgroup_create - create a cgroup | 3688 | * cgroup_create - create a cgroup |
| 4152 | * @parent: cgroup that will be parent of the new cgroup | 3689 | * @parent: cgroup that will be parent of the new cgroup |
| 4153 | * @dentry: dentry of the new cgroup | 3690 | * @name: name of the new cgroup |
| 4154 | * @mode: mode to set on new inode | 3691 | * @mode: mode to set on new cgroup |
| 4155 | * | ||
| 4156 | * Must be called with the mutex on the parent inode held | ||
| 4157 | */ | 3692 | */ |
| 4158 | static long cgroup_create(struct cgroup *parent, struct dentry *dentry, | 3693 | static long cgroup_create(struct cgroup *parent, const char *name, |
| 4159 | umode_t mode) | 3694 | umode_t mode) |
| 4160 | { | 3695 | { |
| 4161 | struct cgroup *cgrp; | 3696 | struct cgroup *cgrp; |
| 4162 | struct cgroup_name *name; | 3697 | struct cgroup_root *root = parent->root; |
| 4163 | struct cgroupfs_root *root = parent->root; | ||
| 4164 | int ssid, err; | 3698 | int ssid, err; |
| 4165 | struct cgroup_subsys *ss; | 3699 | struct cgroup_subsys *ss; |
| 4166 | struct super_block *sb = root->sb; | 3700 | struct kernfs_node *kn; |
| 3701 | |||
| 3702 | /* | ||
| 3703 | * XXX: The default hierarchy isn't fully implemented yet. Block | ||
| 3704 | * !root cgroup creation on it for now. | ||
| 3705 | */ | ||
| 3706 | if (root == &cgrp_dfl_root) | ||
| 3707 | return -EINVAL; | ||
| 4167 | 3708 | ||
| 4168 | /* allocate the cgroup and its ID, 0 is reserved for the root */ | 3709 | /* allocate the cgroup and its ID, 0 is reserved for the root */ |
| 4169 | cgrp = kzalloc(sizeof(*cgrp), GFP_KERNEL); | 3710 | cgrp = kzalloc(sizeof(*cgrp), GFP_KERNEL); |
| 4170 | if (!cgrp) | 3711 | if (!cgrp) |
| 4171 | return -ENOMEM; | 3712 | return -ENOMEM; |
| 4172 | 3713 | ||
| 4173 | name = cgroup_alloc_name(dentry); | 3714 | mutex_lock(&cgroup_tree_mutex); |
| 4174 | if (!name) { | ||
| 4175 | err = -ENOMEM; | ||
| 4176 | goto err_free_cgrp; | ||
| 4177 | } | ||
| 4178 | rcu_assign_pointer(cgrp->name, name); | ||
| 4179 | 3715 | ||
| 4180 | /* | 3716 | /* |
| 4181 | * Only live parents can have children. Note that the liveliness | 3717 | * Only live parents can have children. Note that the liveliness |
| @@ -4186,7 +3722,7 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry, | |||
| 4186 | */ | 3722 | */ |
| 4187 | if (!cgroup_lock_live_group(parent)) { | 3723 | if (!cgroup_lock_live_group(parent)) { |
| 4188 | err = -ENODEV; | 3724 | err = -ENODEV; |
| 4189 | goto err_free_name; | 3725 | goto err_unlock_tree; |
| 4190 | } | 3726 | } |
| 4191 | 3727 | ||
| 4192 | /* | 3728 | /* |
| @@ -4199,18 +3735,8 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry, | |||
| 4199 | goto err_unlock; | 3735 | goto err_unlock; |
| 4200 | } | 3736 | } |
| 4201 | 3737 | ||
| 4202 | /* Grab a reference on the superblock so the hierarchy doesn't | ||
| 4203 | * get deleted on unmount if there are child cgroups. This | ||
| 4204 | * can be done outside cgroup_mutex, since the sb can't | ||
| 4205 | * disappear while someone has an open control file on the | ||
| 4206 | * fs */ | ||
| 4207 | atomic_inc(&sb->s_active); | ||
| 4208 | |||
| 4209 | init_cgroup_housekeeping(cgrp); | 3738 | init_cgroup_housekeeping(cgrp); |
| 4210 | 3739 | ||
| 4211 | dentry->d_fsdata = cgrp; | ||
| 4212 | cgrp->dentry = dentry; | ||
| 4213 | |||
| 4214 | cgrp->parent = parent; | 3740 | cgrp->parent = parent; |
| 4215 | cgrp->dummy_css.parent = &parent->dummy_css; | 3741 | cgrp->dummy_css.parent = &parent->dummy_css; |
| 4216 | cgrp->root = parent->root; | 3742 | cgrp->root = parent->root; |
| @@ -4221,24 +3747,26 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry, | |||
| 4221 | if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &parent->flags)) | 3747 | if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &parent->flags)) |
| 4222 | set_bit(CGRP_CPUSET_CLONE_CHILDREN, &cgrp->flags); | 3748 | set_bit(CGRP_CPUSET_CLONE_CHILDREN, &cgrp->flags); |
| 4223 | 3749 | ||
| 3750 | /* create the directory */ | ||
| 3751 | kn = kernfs_create_dir(parent->kn, name, mode, cgrp); | ||
| 3752 | if (IS_ERR(kn)) { | ||
| 3753 | err = PTR_ERR(kn); | ||
| 3754 | goto err_free_id; | ||
| 3755 | } | ||
| 3756 | cgrp->kn = kn; | ||
| 3757 | |||
| 4224 | /* | 3758 | /* |
| 4225 | * Create directory. cgroup_create_file() returns with the new | 3759 | * This extra ref will be put in cgroup_free_fn() and guarantees |
| 4226 | * directory locked on success so that it can be populated without | 3760 | * that @cgrp->kn is always accessible. |
| 4227 | * dropping cgroup_mutex. | ||
| 4228 | */ | 3761 | */ |
| 4229 | err = cgroup_create_file(dentry, S_IFDIR | mode, sb); | 3762 | kernfs_get(kn); |
| 4230 | if (err < 0) | ||
| 4231 | goto err_free_id; | ||
| 4232 | lockdep_assert_held(&dentry->d_inode->i_mutex); | ||
| 4233 | 3763 | ||
| 4234 | cgrp->serial_nr = cgroup_serial_nr_next++; | 3764 | cgrp->serial_nr = cgroup_serial_nr_next++; |
| 4235 | 3765 | ||
| 4236 | /* allocation complete, commit to creation */ | 3766 | /* allocation complete, commit to creation */ |
| 4237 | list_add_tail_rcu(&cgrp->sibling, &cgrp->parent->children); | 3767 | list_add_tail_rcu(&cgrp->sibling, &cgrp->parent->children); |
| 4238 | root->number_of_cgroups++; | 3768 | atomic_inc(&root->nr_cgrps); |
| 4239 | 3769 | cgroup_get(parent); | |
| 4240 | /* hold a ref to the parent's dentry */ | ||
| 4241 | dget(parent->dentry); | ||
| 4242 | 3770 | ||
| 4243 | /* | 3771 | /* |
| 4244 | * @cgrp is now fully operational. If something fails after this | 3772 | * @cgrp is now fully operational. If something fails after this |
| @@ -4246,49 +3774,66 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry, | |||
| 4246 | */ | 3774 | */ |
| 4247 | idr_replace(&root->cgroup_idr, cgrp, cgrp->id); | 3775 | idr_replace(&root->cgroup_idr, cgrp, cgrp->id); |
| 4248 | 3776 | ||
| 3777 | err = cgroup_kn_set_ugid(kn); | ||
| 3778 | if (err) | ||
| 3779 | goto err_destroy; | ||
| 3780 | |||
| 4249 | err = cgroup_addrm_files(cgrp, cgroup_base_files, true); | 3781 | err = cgroup_addrm_files(cgrp, cgroup_base_files, true); |
| 4250 | if (err) | 3782 | if (err) |
| 4251 | goto err_destroy; | 3783 | goto err_destroy; |
| 4252 | 3784 | ||
| 4253 | /* let's create and online css's */ | 3785 | /* let's create and online css's */ |
| 4254 | for_each_subsys(ss, ssid) { | 3786 | for_each_subsys(ss, ssid) { |
| 4255 | if (root->subsys_mask & (1 << ssid)) { | 3787 | if (root->cgrp.subsys_mask & (1 << ssid)) { |
| 4256 | err = create_css(cgrp, ss); | 3788 | err = create_css(cgrp, ss); |
| 4257 | if (err) | 3789 | if (err) |
| 4258 | goto err_destroy; | 3790 | goto err_destroy; |
| 4259 | } | 3791 | } |
| 4260 | } | 3792 | } |
| 4261 | 3793 | ||
| 3794 | kernfs_activate(kn); | ||
| 3795 | |||
| 4262 | mutex_unlock(&cgroup_mutex); | 3796 | mutex_unlock(&cgroup_mutex); |
| 4263 | mutex_unlock(&cgrp->dentry->d_inode->i_mutex); | 3797 | mutex_unlock(&cgroup_tree_mutex); |
| 4264 | 3798 | ||
| 4265 | return 0; | 3799 | return 0; |
| 4266 | 3800 | ||
| 4267 | err_free_id: | 3801 | err_free_id: |
| 4268 | idr_remove(&root->cgroup_idr, cgrp->id); | 3802 | idr_remove(&root->cgroup_idr, cgrp->id); |
| 4269 | /* Release the reference count that we took on the superblock */ | ||
| 4270 | deactivate_super(sb); | ||
| 4271 | err_unlock: | 3803 | err_unlock: |
| 4272 | mutex_unlock(&cgroup_mutex); | 3804 | mutex_unlock(&cgroup_mutex); |
| 4273 | err_free_name: | 3805 | err_unlock_tree: |
| 4274 | kfree(rcu_dereference_raw(cgrp->name)); | 3806 | mutex_unlock(&cgroup_tree_mutex); |
| 4275 | err_free_cgrp: | ||
| 4276 | kfree(cgrp); | 3807 | kfree(cgrp); |
| 4277 | return err; | 3808 | return err; |
| 4278 | 3809 | ||
| 4279 | err_destroy: | 3810 | err_destroy: |
| 4280 | cgroup_destroy_locked(cgrp); | 3811 | cgroup_destroy_locked(cgrp); |
| 4281 | mutex_unlock(&cgroup_mutex); | 3812 | mutex_unlock(&cgroup_mutex); |
| 4282 | mutex_unlock(&dentry->d_inode->i_mutex); | 3813 | mutex_unlock(&cgroup_tree_mutex); |
| 4283 | return err; | 3814 | return err; |
| 4284 | } | 3815 | } |
| 4285 | 3816 | ||
| 4286 | static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) | 3817 | static int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name, |
| 3818 | umode_t mode) | ||
| 4287 | { | 3819 | { |
| 4288 | struct cgroup *c_parent = dentry->d_parent->d_fsdata; | 3820 | struct cgroup *parent = parent_kn->priv; |
| 3821 | int ret; | ||
| 3822 | |||
| 3823 | /* | ||
| 3824 | * cgroup_create() grabs cgroup_tree_mutex which nests outside | ||
| 3825 | * kernfs active_ref and cgroup_create() already synchronizes | ||
| 3826 | * properly against removal through cgroup_lock_live_group(). | ||
| 3827 | * Break it before calling cgroup_create(). | ||
| 3828 | */ | ||
| 3829 | cgroup_get(parent); | ||
| 3830 | kernfs_break_active_protection(parent_kn); | ||
| 3831 | |||
| 3832 | ret = cgroup_create(parent, name, mode); | ||
| 4289 | 3833 | ||
| 4290 | /* the vfs holds inode->i_mutex already */ | 3834 | kernfs_unbreak_active_protection(parent_kn); |
| 4291 | return cgroup_create(c_parent, dentry, mode | S_IFDIR); | 3835 | cgroup_put(parent); |
| 3836 | return ret; | ||
| 4292 | } | 3837 | } |
| 4293 | 3838 | ||
| 4294 | /* | 3839 | /* |
| @@ -4301,6 +3846,7 @@ static void css_killed_work_fn(struct work_struct *work) | |||
| 4301 | container_of(work, struct cgroup_subsys_state, destroy_work); | 3846 | container_of(work, struct cgroup_subsys_state, destroy_work); |
| 4302 | struct cgroup *cgrp = css->cgroup; | 3847 | struct cgroup *cgrp = css->cgroup; |
| 4303 | 3848 | ||
| 3849 | mutex_lock(&cgroup_tree_mutex); | ||
| 4304 | mutex_lock(&cgroup_mutex); | 3850 | mutex_lock(&cgroup_mutex); |
| 4305 | 3851 | ||
| 4306 | /* | 3852 | /* |
| @@ -4318,6 +3864,7 @@ static void css_killed_work_fn(struct work_struct *work) | |||
| 4318 | cgroup_destroy_css_killed(cgrp); | 3864 | cgroup_destroy_css_killed(cgrp); |
| 4319 | 3865 | ||
| 4320 | mutex_unlock(&cgroup_mutex); | 3866 | mutex_unlock(&cgroup_mutex); |
| 3867 | mutex_unlock(&cgroup_tree_mutex); | ||
| 4321 | 3868 | ||
| 4322 | /* | 3869 | /* |
| 4323 | * Put the css refs from kill_css(). Each css holds an extra | 3870 | * Put the css refs from kill_css(). Each css holds an extra |
| @@ -4339,18 +3886,15 @@ static void css_killed_ref_fn(struct percpu_ref *ref) | |||
| 4339 | queue_work(cgroup_destroy_wq, &css->destroy_work); | 3886 | queue_work(cgroup_destroy_wq, &css->destroy_work); |
| 4340 | } | 3887 | } |
| 4341 | 3888 | ||
| 4342 | /** | 3889 | static void __kill_css(struct cgroup_subsys_state *css) |
| 4343 | * kill_css - destroy a css | ||
| 4344 | * @css: css to destroy | ||
| 4345 | * | ||
| 4346 | * This function initiates destruction of @css by removing cgroup interface | ||
| 4347 | * files and putting its base reference. ->css_offline() will be invoked | ||
| 4348 | * asynchronously once css_tryget() is guaranteed to fail and when the | ||
| 4349 | * reference count reaches zero, @css will be released. | ||
| 4350 | */ | ||
| 4351 | static void kill_css(struct cgroup_subsys_state *css) | ||
| 4352 | { | 3890 | { |
| 4353 | cgroup_clear_dir(css->cgroup, 1 << css->ss->subsys_id); | 3891 | lockdep_assert_held(&cgroup_tree_mutex); |
| 3892 | |||
| 3893 | /* | ||
| 3894 | * This must happen before css is disassociated with its cgroup. | ||
| 3895 | * See seq_css() for details. | ||
| 3896 | */ | ||
| 3897 | cgroup_clear_dir(css->cgroup, 1 << css->ss->id); | ||
| 4354 | 3898 | ||
| 4355 | /* | 3899 | /* |
| 4356 | * Killing would put the base ref, but we need to keep it alive | 3900 | * Killing would put the base ref, but we need to keep it alive |
| @@ -4372,6 +3916,28 @@ static void kill_css(struct cgroup_subsys_state *css) | |||
| 4372 | } | 3916 | } |
| 4373 | 3917 | ||
| 4374 | /** | 3918 | /** |
| 3919 | * kill_css - destroy a css | ||
| 3920 | * @css: css to destroy | ||
| 3921 | * | ||
| 3922 | * This function initiates destruction of @css by removing cgroup interface | ||
| 3923 | * files and putting its base reference. ->css_offline() will be invoked | ||
| 3924 | * asynchronously once css_tryget() is guaranteed to fail and when the | ||
| 3925 | * reference count reaches zero, @css will be released. | ||
| 3926 | */ | ||
| 3927 | static void kill_css(struct cgroup_subsys_state *css) | ||
| 3928 | { | ||
| 3929 | struct cgroup *cgrp = css->cgroup; | ||
| 3930 | |||
| 3931 | lockdep_assert_held(&cgroup_tree_mutex); | ||
| 3932 | |||
| 3933 | /* if already killed, noop */ | ||
| 3934 | if (cgrp->subsys_mask & (1 << css->ss->id)) { | ||
| 3935 | cgrp->subsys_mask &= ~(1 << css->ss->id); | ||
| 3936 | __kill_css(css); | ||
| 3937 | } | ||
| 3938 | } | ||
| 3939 | |||
| 3940 | /** | ||
| 4375 | * cgroup_destroy_locked - the first stage of cgroup destruction | 3941 | * cgroup_destroy_locked - the first stage of cgroup destruction |
| 4376 | * @cgrp: cgroup to be destroyed | 3942 | * @cgrp: cgroup to be destroyed |
| 4377 | * | 3943 | * |
| @@ -4398,22 +3964,21 @@ static void kill_css(struct cgroup_subsys_state *css) | |||
| 4398 | static int cgroup_destroy_locked(struct cgroup *cgrp) | 3964 | static int cgroup_destroy_locked(struct cgroup *cgrp) |
| 4399 | __releases(&cgroup_mutex) __acquires(&cgroup_mutex) | 3965 | __releases(&cgroup_mutex) __acquires(&cgroup_mutex) |
| 4400 | { | 3966 | { |
| 4401 | struct dentry *d = cgrp->dentry; | ||
| 4402 | struct cgroup_subsys_state *css; | ||
| 4403 | struct cgroup *child; | 3967 | struct cgroup *child; |
| 3968 | struct cgroup_subsys_state *css; | ||
| 4404 | bool empty; | 3969 | bool empty; |
| 4405 | int ssid; | 3970 | int ssid; |
| 4406 | 3971 | ||
| 4407 | lockdep_assert_held(&d->d_inode->i_mutex); | 3972 | lockdep_assert_held(&cgroup_tree_mutex); |
| 4408 | lockdep_assert_held(&cgroup_mutex); | 3973 | lockdep_assert_held(&cgroup_mutex); |
| 4409 | 3974 | ||
| 4410 | /* | 3975 | /* |
| 4411 | * css_set_lock synchronizes access to ->cset_links and prevents | 3976 | * css_set_rwsem synchronizes access to ->cset_links and prevents |
| 4412 | * @cgrp from being removed while __put_css_set() is in progress. | 3977 | * @cgrp from being removed while put_css_set() is in progress. |
| 4413 | */ | 3978 | */ |
| 4414 | read_lock(&css_set_lock); | 3979 | down_read(&css_set_rwsem); |
| 4415 | empty = list_empty(&cgrp->cset_links); | 3980 | empty = list_empty(&cgrp->cset_links); |
| 4416 | read_unlock(&css_set_lock); | 3981 | up_read(&css_set_rwsem); |
| 4417 | if (!empty) | 3982 | if (!empty) |
| 4418 | return -EBUSY; | 3983 | return -EBUSY; |
| 4419 | 3984 | ||
| @@ -4434,14 +3999,6 @@ static int cgroup_destroy_locked(struct cgroup *cgrp) | |||
| 4434 | return -EBUSY; | 3999 | return -EBUSY; |
| 4435 | 4000 | ||
| 4436 | /* | 4001 | /* |
| 4437 | * Initiate massacre of all css's. cgroup_destroy_css_killed() | ||
| 4438 | * will be invoked to perform the rest of destruction once the | ||
| 4439 | * percpu refs of all css's are confirmed to be killed. | ||
| 4440 | */ | ||
| 4441 | for_each_css(css, ssid, cgrp) | ||
| 4442 | kill_css(css); | ||
| 4443 | |||
| 4444 | /* | ||
| 4445 | * Mark @cgrp dead. This prevents further task migration and child | 4002 | * Mark @cgrp dead. This prevents further task migration and child |
| 4446 | * creation by disabling cgroup_lock_live_group(). Note that | 4003 | * creation by disabling cgroup_lock_live_group(). Note that |
| 4447 | * CGRP_DEAD assertion is depended upon by css_next_child() to | 4004 | * CGRP_DEAD assertion is depended upon by css_next_child() to |
| @@ -4450,6 +4007,17 @@ static int cgroup_destroy_locked(struct cgroup *cgrp) | |||
| 4450 | */ | 4007 | */ |
| 4451 | set_bit(CGRP_DEAD, &cgrp->flags); | 4008 | set_bit(CGRP_DEAD, &cgrp->flags); |
| 4452 | 4009 | ||
| 4010 | /* | ||
| 4011 | * Initiate massacre of all css's. cgroup_destroy_css_killed() | ||
| 4012 | * will be invoked to perform the rest of destruction once the | ||
| 4013 | * percpu refs of all css's are confirmed to be killed. This | ||
| 4014 | * involves removing the subsystem's files, drop cgroup_mutex. | ||
| 4015 | */ | ||
| 4016 | mutex_unlock(&cgroup_mutex); | ||
| 4017 | for_each_css(css, ssid, cgrp) | ||
| 4018 | kill_css(css); | ||
| 4019 | mutex_lock(&cgroup_mutex); | ||
| 4020 | |||
| 4453 | /* CGRP_DEAD is set, remove from ->release_list for the last time */ | 4021 | /* CGRP_DEAD is set, remove from ->release_list for the last time */ |
| 4454 | raw_spin_lock(&release_list_lock); | 4022 | raw_spin_lock(&release_list_lock); |
| 4455 | if (!list_empty(&cgrp->release_list)) | 4023 | if (!list_empty(&cgrp->release_list)) |
| @@ -4465,14 +4033,20 @@ static int cgroup_destroy_locked(struct cgroup *cgrp) | |||
| 4465 | if (!cgrp->nr_css) | 4033 | if (!cgrp->nr_css) |
| 4466 | cgroup_destroy_css_killed(cgrp); | 4034 | cgroup_destroy_css_killed(cgrp); |
| 4467 | 4035 | ||
| 4036 | /* remove @cgrp directory along with the base files */ | ||
| 4037 | mutex_unlock(&cgroup_mutex); | ||
| 4038 | |||
| 4468 | /* | 4039 | /* |
| 4469 | * Clear the base files and remove @cgrp directory. The removal | 4040 | * There are two control paths which try to determine cgroup from |
| 4470 | * puts the base ref but we aren't quite done with @cgrp yet, so | 4041 | * dentry without going through kernfs - cgroupstats_build() and |
| 4471 | * hold onto it. | 4042 | * css_tryget_from_dir(). Those are supported by RCU protecting |
| 4043 | * clearing of cgrp->kn->priv backpointer, which should happen | ||
| 4044 | * after all files under it have been removed. | ||
| 4472 | */ | 4045 | */ |
| 4473 | cgroup_addrm_files(cgrp, cgroup_base_files, false); | 4046 | kernfs_remove(cgrp->kn); /* @cgrp has an extra ref on its kn */ |
| 4474 | dget(d); | 4047 | RCU_INIT_POINTER(*(void __rcu __force **)&cgrp->kn->priv, NULL); |
| 4475 | cgroup_d_remove_dir(d); | 4048 | |
| 4049 | mutex_lock(&cgroup_mutex); | ||
| 4476 | 4050 | ||
| 4477 | return 0; | 4051 | return 0; |
| 4478 | }; | 4052 | }; |
| @@ -4489,72 +4063,82 @@ static int cgroup_destroy_locked(struct cgroup *cgrp) | |||
| 4489 | static void cgroup_destroy_css_killed(struct cgroup *cgrp) | 4063 | static void cgroup_destroy_css_killed(struct cgroup *cgrp) |
| 4490 | { | 4064 | { |
| 4491 | struct cgroup *parent = cgrp->parent; | 4065 | struct cgroup *parent = cgrp->parent; |
| 4492 | struct dentry *d = cgrp->dentry; | ||
| 4493 | 4066 | ||
| 4067 | lockdep_assert_held(&cgroup_tree_mutex); | ||
| 4494 | lockdep_assert_held(&cgroup_mutex); | 4068 | lockdep_assert_held(&cgroup_mutex); |
| 4495 | 4069 | ||
| 4496 | /* delete this cgroup from parent->children */ | 4070 | /* delete this cgroup from parent->children */ |
| 4497 | list_del_rcu(&cgrp->sibling); | 4071 | list_del_rcu(&cgrp->sibling); |
| 4498 | 4072 | ||
| 4499 | dput(d); | 4073 | cgroup_put(cgrp); |
| 4500 | 4074 | ||
| 4501 | set_bit(CGRP_RELEASABLE, &parent->flags); | 4075 | set_bit(CGRP_RELEASABLE, &parent->flags); |
| 4502 | check_for_release(parent); | 4076 | check_for_release(parent); |
| 4503 | } | 4077 | } |
| 4504 | 4078 | ||
| 4505 | static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry) | 4079 | static int cgroup_rmdir(struct kernfs_node *kn) |
| 4506 | { | 4080 | { |
| 4507 | int ret; | 4081 | struct cgroup *cgrp = kn->priv; |
| 4508 | 4082 | int ret = 0; | |
| 4509 | mutex_lock(&cgroup_mutex); | ||
| 4510 | ret = cgroup_destroy_locked(dentry->d_fsdata); | ||
| 4511 | mutex_unlock(&cgroup_mutex); | ||
| 4512 | 4083 | ||
| 4513 | return ret; | 4084 | /* |
| 4514 | } | 4085 | * This is self-destruction but @kn can't be removed while this |
| 4086 | * callback is in progress. Let's break active protection. Once | ||
| 4087 | * the protection is broken, @cgrp can be destroyed at any point. | ||
| 4088 | * Pin it so that it stays accessible. | ||
| 4089 | */ | ||
| 4090 | cgroup_get(cgrp); | ||
| 4091 | kernfs_break_active_protection(kn); | ||
| 4515 | 4092 | ||
| 4516 | static void __init_or_module cgroup_init_cftsets(struct cgroup_subsys *ss) | 4093 | mutex_lock(&cgroup_tree_mutex); |
| 4517 | { | 4094 | mutex_lock(&cgroup_mutex); |
| 4518 | INIT_LIST_HEAD(&ss->cftsets); | ||
| 4519 | 4095 | ||
| 4520 | /* | 4096 | /* |
| 4521 | * base_cftset is embedded in subsys itself, no need to worry about | 4097 | * @cgrp might already have been destroyed while we're trying to |
| 4522 | * deregistration. | 4098 | * grab the mutexes. |
| 4523 | */ | 4099 | */ |
| 4524 | if (ss->base_cftypes) { | 4100 | if (!cgroup_is_dead(cgrp)) |
| 4525 | struct cftype *cft; | 4101 | ret = cgroup_destroy_locked(cgrp); |
| 4526 | 4102 | ||
| 4527 | for (cft = ss->base_cftypes; cft->name[0] != '\0'; cft++) | 4103 | mutex_unlock(&cgroup_mutex); |
| 4528 | cft->ss = ss; | 4104 | mutex_unlock(&cgroup_tree_mutex); |
| 4529 | 4105 | ||
| 4530 | ss->base_cftset.cfts = ss->base_cftypes; | 4106 | kernfs_unbreak_active_protection(kn); |
| 4531 | list_add_tail(&ss->base_cftset.node, &ss->cftsets); | 4107 | cgroup_put(cgrp); |
| 4532 | } | 4108 | return ret; |
| 4533 | } | 4109 | } |
| 4534 | 4110 | ||
| 4111 | static struct kernfs_syscall_ops cgroup_kf_syscall_ops = { | ||
| 4112 | .remount_fs = cgroup_remount, | ||
| 4113 | .show_options = cgroup_show_options, | ||
| 4114 | .mkdir = cgroup_mkdir, | ||
| 4115 | .rmdir = cgroup_rmdir, | ||
| 4116 | .rename = cgroup_rename, | ||
| 4117 | }; | ||
| 4118 | |||
| 4535 | static void __init cgroup_init_subsys(struct cgroup_subsys *ss) | 4119 | static void __init cgroup_init_subsys(struct cgroup_subsys *ss) |
| 4536 | { | 4120 | { |
| 4537 | struct cgroup_subsys_state *css; | 4121 | struct cgroup_subsys_state *css; |
| 4538 | 4122 | ||
| 4539 | printk(KERN_INFO "Initializing cgroup subsys %s\n", ss->name); | 4123 | printk(KERN_INFO "Initializing cgroup subsys %s\n", ss->name); |
| 4540 | 4124 | ||
| 4125 | mutex_lock(&cgroup_tree_mutex); | ||
| 4541 | mutex_lock(&cgroup_mutex); | 4126 | mutex_lock(&cgroup_mutex); |
| 4542 | 4127 | ||
| 4543 | /* init base cftset */ | 4128 | INIT_LIST_HEAD(&ss->cfts); |
| 4544 | cgroup_init_cftsets(ss); | ||
| 4545 | 4129 | ||
| 4546 | /* Create the top cgroup state for this subsystem */ | 4130 | /* Create the root cgroup state for this subsystem */ |
| 4547 | ss->root = &cgroup_dummy_root; | 4131 | ss->root = &cgrp_dfl_root; |
| 4548 | css = ss->css_alloc(cgroup_css(cgroup_dummy_top, ss)); | 4132 | css = ss->css_alloc(cgroup_css(&cgrp_dfl_root.cgrp, ss)); |
| 4549 | /* We don't handle early failures gracefully */ | 4133 | /* We don't handle early failures gracefully */ |
| 4550 | BUG_ON(IS_ERR(css)); | 4134 | BUG_ON(IS_ERR(css)); |
| 4551 | init_css(css, ss, cgroup_dummy_top); | 4135 | init_css(css, ss, &cgrp_dfl_root.cgrp); |
| 4552 | 4136 | ||
| 4553 | /* Update the init_css_set to contain a subsys | 4137 | /* Update the init_css_set to contain a subsys |
| 4554 | * pointer to this state - since the subsystem is | 4138 | * pointer to this state - since the subsystem is |
| 4555 | * newly registered, all tasks and hence the | 4139 | * newly registered, all tasks and hence the |
| 4556 | * init_css_set is in the subsystem's top cgroup. */ | 4140 | * init_css_set is in the subsystem's root cgroup. */ |
| 4557 | init_css_set.subsys[ss->subsys_id] = css; | 4141 | init_css_set.subsys[ss->id] = css; |
| 4558 | 4142 | ||
| 4559 | need_forkexit_callback |= ss->fork || ss->exit; | 4143 | need_forkexit_callback |= ss->fork || ss->exit; |
| 4560 | 4144 | ||
| @@ -4565,185 +4149,11 @@ static void __init cgroup_init_subsys(struct cgroup_subsys *ss) | |||
| 4565 | 4149 | ||
| 4566 | BUG_ON(online_css(css)); | 4150 | BUG_ON(online_css(css)); |
| 4567 | 4151 | ||
| 4568 | mutex_unlock(&cgroup_mutex); | 4152 | cgrp_dfl_root.cgrp.subsys_mask |= 1 << ss->id; |
| 4569 | |||
| 4570 | /* this function shouldn't be used with modular subsystems, since they | ||
| 4571 | * need to register a subsys_id, among other things */ | ||
| 4572 | BUG_ON(ss->module); | ||
| 4573 | } | ||
| 4574 | |||
| 4575 | /** | ||
| 4576 | * cgroup_load_subsys: load and register a modular subsystem at runtime | ||
| 4577 | * @ss: the subsystem to load | ||
| 4578 | * | ||
| 4579 | * This function should be called in a modular subsystem's initcall. If the | ||
| 4580 | * subsystem is built as a module, it will be assigned a new subsys_id and set | ||
| 4581 | * up for use. If the subsystem is built-in anyway, work is delegated to the | ||
| 4582 | * simpler cgroup_init_subsys. | ||
| 4583 | */ | ||
| 4584 | int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss) | ||
| 4585 | { | ||
| 4586 | struct cgroup_subsys_state *css; | ||
| 4587 | int i, ret; | ||
| 4588 | struct hlist_node *tmp; | ||
| 4589 | struct css_set *cset; | ||
| 4590 | unsigned long key; | ||
| 4591 | |||
| 4592 | /* check name and function validity */ | ||
| 4593 | if (ss->name == NULL || strlen(ss->name) > MAX_CGROUP_TYPE_NAMELEN || | ||
| 4594 | ss->css_alloc == NULL || ss->css_free == NULL) | ||
| 4595 | return -EINVAL; | ||
| 4596 | |||
| 4597 | /* | ||
| 4598 | * we don't support callbacks in modular subsystems. this check is | ||
| 4599 | * before the ss->module check for consistency; a subsystem that could | ||
| 4600 | * be a module should still have no callbacks even if the user isn't | ||
| 4601 | * compiling it as one. | ||
| 4602 | */ | ||
| 4603 | if (ss->fork || ss->exit) | ||
| 4604 | return -EINVAL; | ||
| 4605 | |||
| 4606 | /* | ||
| 4607 | * an optionally modular subsystem is built-in: we want to do nothing, | ||
| 4608 | * since cgroup_init_subsys will have already taken care of it. | ||
| 4609 | */ | ||
| 4610 | if (ss->module == NULL) { | ||
| 4611 | /* a sanity check */ | ||
| 4612 | BUG_ON(cgroup_subsys[ss->subsys_id] != ss); | ||
| 4613 | return 0; | ||
| 4614 | } | ||
| 4615 | |||
| 4616 | /* init base cftset */ | ||
| 4617 | cgroup_init_cftsets(ss); | ||
| 4618 | |||
| 4619 | mutex_lock(&cgroup_mutex); | ||
| 4620 | mutex_lock(&cgroup_root_mutex); | ||
| 4621 | cgroup_subsys[ss->subsys_id] = ss; | ||
| 4622 | |||
| 4623 | /* | ||
| 4624 | * no ss->css_alloc seems to need anything important in the ss | ||
| 4625 | * struct, so this can happen first (i.e. before the dummy root | ||
| 4626 | * attachment). | ||
| 4627 | */ | ||
| 4628 | css = ss->css_alloc(cgroup_css(cgroup_dummy_top, ss)); | ||
| 4629 | if (IS_ERR(css)) { | ||
| 4630 | /* failure case - need to deassign the cgroup_subsys[] slot. */ | ||
| 4631 | cgroup_subsys[ss->subsys_id] = NULL; | ||
| 4632 | mutex_unlock(&cgroup_root_mutex); | ||
| 4633 | mutex_unlock(&cgroup_mutex); | ||
| 4634 | return PTR_ERR(css); | ||
| 4635 | } | ||
| 4636 | |||
| 4637 | ss->root = &cgroup_dummy_root; | ||
| 4638 | |||
| 4639 | /* our new subsystem will be attached to the dummy hierarchy. */ | ||
| 4640 | init_css(css, ss, cgroup_dummy_top); | ||
| 4641 | |||
| 4642 | /* | ||
| 4643 | * Now we need to entangle the css into the existing css_sets. unlike | ||
| 4644 | * in cgroup_init_subsys, there are now multiple css_sets, so each one | ||
| 4645 | * will need a new pointer to it; done by iterating the css_set_table. | ||
| 4646 | * furthermore, modifying the existing css_sets will corrupt the hash | ||
| 4647 | * table state, so each changed css_set will need its hash recomputed. | ||
| 4648 | * this is all done under the css_set_lock. | ||
| 4649 | */ | ||
| 4650 | write_lock(&css_set_lock); | ||
| 4651 | hash_for_each_safe(css_set_table, i, tmp, cset, hlist) { | ||
| 4652 | /* skip entries that we already rehashed */ | ||
| 4653 | if (cset->subsys[ss->subsys_id]) | ||
| 4654 | continue; | ||
| 4655 | /* remove existing entry */ | ||
| 4656 | hash_del(&cset->hlist); | ||
| 4657 | /* set new value */ | ||
| 4658 | cset->subsys[ss->subsys_id] = css; | ||
| 4659 | /* recompute hash and restore entry */ | ||
| 4660 | key = css_set_hash(cset->subsys); | ||
| 4661 | hash_add(css_set_table, &cset->hlist, key); | ||
| 4662 | } | ||
| 4663 | write_unlock(&css_set_lock); | ||
| 4664 | |||
| 4665 | ret = online_css(css); | ||
| 4666 | if (ret) { | ||
| 4667 | ss->css_free(css); | ||
| 4668 | goto err_unload; | ||
| 4669 | } | ||
| 4670 | |||
| 4671 | /* success! */ | ||
| 4672 | mutex_unlock(&cgroup_root_mutex); | ||
| 4673 | mutex_unlock(&cgroup_mutex); | ||
| 4674 | return 0; | ||
| 4675 | |||
| 4676 | err_unload: | ||
| 4677 | mutex_unlock(&cgroup_root_mutex); | ||
| 4678 | mutex_unlock(&cgroup_mutex); | ||
| 4679 | /* @ss can't be mounted here as try_module_get() would fail */ | ||
| 4680 | cgroup_unload_subsys(ss); | ||
| 4681 | return ret; | ||
| 4682 | } | ||
| 4683 | EXPORT_SYMBOL_GPL(cgroup_load_subsys); | ||
| 4684 | |||
| 4685 | /** | ||
| 4686 | * cgroup_unload_subsys: unload a modular subsystem | ||
| 4687 | * @ss: the subsystem to unload | ||
| 4688 | * | ||
| 4689 | * This function should be called in a modular subsystem's exitcall. When this | ||
| 4690 | * function is invoked, the refcount on the subsystem's module will be 0, so | ||
| 4691 | * the subsystem will not be attached to any hierarchy. | ||
| 4692 | */ | ||
| 4693 | void cgroup_unload_subsys(struct cgroup_subsys *ss) | ||
| 4694 | { | ||
| 4695 | struct cgrp_cset_link *link; | ||
| 4696 | struct cgroup_subsys_state *css; | ||
| 4697 | |||
| 4698 | BUG_ON(ss->module == NULL); | ||
| 4699 | |||
| 4700 | /* | ||
| 4701 | * we shouldn't be called if the subsystem is in use, and the use of | ||
| 4702 | * try_module_get() in rebind_subsystems() should ensure that it | ||
| 4703 | * doesn't start being used while we're killing it off. | ||
| 4704 | */ | ||
| 4705 | BUG_ON(ss->root != &cgroup_dummy_root); | ||
| 4706 | |||
| 4707 | mutex_lock(&cgroup_mutex); | ||
| 4708 | mutex_lock(&cgroup_root_mutex); | ||
| 4709 | |||
| 4710 | css = cgroup_css(cgroup_dummy_top, ss); | ||
| 4711 | if (css) | ||
| 4712 | offline_css(css); | ||
| 4713 | 4153 | ||
| 4714 | /* deassign the subsys_id */ | ||
| 4715 | cgroup_subsys[ss->subsys_id] = NULL; | ||
| 4716 | |||
| 4717 | /* | ||
| 4718 | * disentangle the css from all css_sets attached to the dummy | ||
| 4719 | * top. as in loading, we need to pay our respects to the hashtable | ||
| 4720 | * gods. | ||
| 4721 | */ | ||
| 4722 | write_lock(&css_set_lock); | ||
| 4723 | list_for_each_entry(link, &cgroup_dummy_top->cset_links, cset_link) { | ||
| 4724 | struct css_set *cset = link->cset; | ||
| 4725 | unsigned long key; | ||
| 4726 | |||
| 4727 | hash_del(&cset->hlist); | ||
| 4728 | cset->subsys[ss->subsys_id] = NULL; | ||
| 4729 | key = css_set_hash(cset->subsys); | ||
| 4730 | hash_add(css_set_table, &cset->hlist, key); | ||
| 4731 | } | ||
| 4732 | write_unlock(&css_set_lock); | ||
| 4733 | |||
| 4734 | /* | ||
| 4735 | * remove subsystem's css from the cgroup_dummy_top and free it - | ||
| 4736 | * need to free before marking as null because ss->css_free needs | ||
| 4737 | * the cgrp->subsys pointer to find their state. | ||
| 4738 | */ | ||
| 4739 | if (css) | ||
| 4740 | ss->css_free(css); | ||
| 4741 | RCU_INIT_POINTER(cgroup_dummy_top->subsys[ss->subsys_id], NULL); | ||
| 4742 | |||
| 4743 | mutex_unlock(&cgroup_root_mutex); | ||
| 4744 | mutex_unlock(&cgroup_mutex); | 4154 | mutex_unlock(&cgroup_mutex); |
| 4155 | mutex_unlock(&cgroup_tree_mutex); | ||
| 4745 | } | 4156 | } |
| 4746 | EXPORT_SYMBOL_GPL(cgroup_unload_subsys); | ||
| 4747 | 4157 | ||
| 4748 | /** | 4158 | /** |
| 4749 | * cgroup_init_early - cgroup initialization at system boot | 4159 | * cgroup_init_early - cgroup initialization at system boot |
| @@ -4753,34 +4163,24 @@ EXPORT_SYMBOL_GPL(cgroup_unload_subsys); | |||
| 4753 | */ | 4163 | */ |
| 4754 | int __init cgroup_init_early(void) | 4164 | int __init cgroup_init_early(void) |
| 4755 | { | 4165 | { |
| 4166 | static struct cgroup_sb_opts __initdata opts = | ||
| 4167 | { .flags = CGRP_ROOT_SANE_BEHAVIOR }; | ||
| 4756 | struct cgroup_subsys *ss; | 4168 | struct cgroup_subsys *ss; |
| 4757 | int i; | 4169 | int i; |
| 4758 | 4170 | ||
| 4759 | atomic_set(&init_css_set.refcount, 1); | 4171 | init_cgroup_root(&cgrp_dfl_root, &opts); |
| 4760 | INIT_LIST_HEAD(&init_css_set.cgrp_links); | ||
| 4761 | INIT_LIST_HEAD(&init_css_set.tasks); | ||
| 4762 | INIT_HLIST_NODE(&init_css_set.hlist); | ||
| 4763 | css_set_count = 1; | ||
| 4764 | init_cgroup_root(&cgroup_dummy_root); | ||
| 4765 | cgroup_root_count = 1; | ||
| 4766 | RCU_INIT_POINTER(init_task.cgroups, &init_css_set); | 4172 | RCU_INIT_POINTER(init_task.cgroups, &init_css_set); |
| 4767 | 4173 | ||
| 4768 | init_cgrp_cset_link.cset = &init_css_set; | 4174 | for_each_subsys(ss, i) { |
| 4769 | init_cgrp_cset_link.cgrp = cgroup_dummy_top; | 4175 | WARN(!ss->css_alloc || !ss->css_free || ss->name || ss->id, |
| 4770 | list_add(&init_cgrp_cset_link.cset_link, &cgroup_dummy_top->cset_links); | 4176 | "invalid cgroup_subsys %d:%s css_alloc=%p css_free=%p name:id=%d:%s\n", |
| 4771 | list_add(&init_cgrp_cset_link.cgrp_link, &init_css_set.cgrp_links); | 4177 | i, cgroup_subsys_name[i], ss->css_alloc, ss->css_free, |
| 4772 | 4178 | ss->id, ss->name); | |
| 4773 | /* at bootup time, we don't worry about modular subsystems */ | 4179 | WARN(strlen(cgroup_subsys_name[i]) > MAX_CGROUP_TYPE_NAMELEN, |
| 4774 | for_each_builtin_subsys(ss, i) { | 4180 | "cgroup_subsys_name %s too long\n", cgroup_subsys_name[i]); |
| 4775 | BUG_ON(!ss->name); | 4181 | |
| 4776 | BUG_ON(strlen(ss->name) > MAX_CGROUP_TYPE_NAMELEN); | 4182 | ss->id = i; |
| 4777 | BUG_ON(!ss->css_alloc); | 4183 | ss->name = cgroup_subsys_name[i]; |
| 4778 | BUG_ON(!ss->css_free); | ||
| 4779 | if (ss->subsys_id != i) { | ||
| 4780 | printk(KERN_ERR "cgroup: Subsys %s id == %d\n", | ||
| 4781 | ss->name, ss->subsys_id); | ||
| 4782 | BUG(); | ||
| 4783 | } | ||
| 4784 | 4184 | ||
| 4785 | if (ss->early_init) | 4185 | if (ss->early_init) |
| 4786 | cgroup_init_subsys(ss); | 4186 | cgroup_init_subsys(ss); |
| @@ -4798,53 +4198,46 @@ int __init cgroup_init(void) | |||
| 4798 | { | 4198 | { |
| 4799 | struct cgroup_subsys *ss; | 4199 | struct cgroup_subsys *ss; |
| 4800 | unsigned long key; | 4200 | unsigned long key; |
| 4801 | int i, err; | 4201 | int ssid, err; |
| 4802 | 4202 | ||
| 4803 | err = bdi_init(&cgroup_backing_dev_info); | 4203 | BUG_ON(cgroup_init_cftypes(NULL, cgroup_base_files)); |
| 4804 | if (err) | ||
| 4805 | return err; | ||
| 4806 | 4204 | ||
| 4807 | for_each_builtin_subsys(ss, i) { | 4205 | mutex_lock(&cgroup_tree_mutex); |
| 4808 | if (!ss->early_init) | ||
| 4809 | cgroup_init_subsys(ss); | ||
| 4810 | } | ||
| 4811 | |||
| 4812 | /* allocate id for the dummy hierarchy */ | ||
| 4813 | mutex_lock(&cgroup_mutex); | 4206 | mutex_lock(&cgroup_mutex); |
| 4814 | mutex_lock(&cgroup_root_mutex); | ||
| 4815 | 4207 | ||
| 4816 | /* Add init_css_set to the hash table */ | 4208 | /* Add init_css_set to the hash table */ |
| 4817 | key = css_set_hash(init_css_set.subsys); | 4209 | key = css_set_hash(init_css_set.subsys); |
| 4818 | hash_add(css_set_table, &init_css_set.hlist, key); | 4210 | hash_add(css_set_table, &init_css_set.hlist, key); |
| 4819 | 4211 | ||
| 4820 | BUG_ON(cgroup_init_root_id(&cgroup_dummy_root, 0, 1)); | 4212 | BUG_ON(cgroup_setup_root(&cgrp_dfl_root, 0)); |
| 4821 | 4213 | ||
| 4822 | err = idr_alloc(&cgroup_dummy_root.cgroup_idr, cgroup_dummy_top, | ||
| 4823 | 0, 1, GFP_KERNEL); | ||
| 4824 | BUG_ON(err < 0); | ||
| 4825 | |||
| 4826 | mutex_unlock(&cgroup_root_mutex); | ||
| 4827 | mutex_unlock(&cgroup_mutex); | 4214 | mutex_unlock(&cgroup_mutex); |
| 4215 | mutex_unlock(&cgroup_tree_mutex); | ||
| 4828 | 4216 | ||
| 4829 | cgroup_kobj = kobject_create_and_add("cgroup", fs_kobj); | 4217 | for_each_subsys(ss, ssid) { |
| 4830 | if (!cgroup_kobj) { | 4218 | if (!ss->early_init) |
| 4831 | err = -ENOMEM; | 4219 | cgroup_init_subsys(ss); |
| 4832 | goto out; | 4220 | |
| 4221 | /* | ||
| 4222 | * cftype registration needs kmalloc and can't be done | ||
| 4223 | * during early_init. Register base cftypes separately. | ||
| 4224 | */ | ||
| 4225 | if (ss->base_cftypes) | ||
| 4226 | WARN_ON(cgroup_add_cftypes(ss, ss->base_cftypes)); | ||
| 4833 | } | 4227 | } |
| 4834 | 4228 | ||
| 4229 | cgroup_kobj = kobject_create_and_add("cgroup", fs_kobj); | ||
| 4230 | if (!cgroup_kobj) | ||
| 4231 | return -ENOMEM; | ||
| 4232 | |||
| 4835 | err = register_filesystem(&cgroup_fs_type); | 4233 | err = register_filesystem(&cgroup_fs_type); |
| 4836 | if (err < 0) { | 4234 | if (err < 0) { |
| 4837 | kobject_put(cgroup_kobj); | 4235 | kobject_put(cgroup_kobj); |
| 4838 | goto out; | 4236 | return err; |
| 4839 | } | 4237 | } |
| 4840 | 4238 | ||
| 4841 | proc_create("cgroups", 0, NULL, &proc_cgroupstats_operations); | 4239 | proc_create("cgroups", 0, NULL, &proc_cgroupstats_operations); |
| 4842 | 4240 | return 0; | |
| 4843 | out: | ||
| 4844 | if (err) | ||
| 4845 | bdi_destroy(&cgroup_backing_dev_info); | ||
| 4846 | |||
| 4847 | return err; | ||
| 4848 | } | 4241 | } |
| 4849 | 4242 | ||
| 4850 | static int __init cgroup_wq_init(void) | 4243 | static int __init cgroup_wq_init(void) |
| @@ -4876,12 +4269,6 @@ core_initcall(cgroup_wq_init); | |||
| 4876 | * proc_cgroup_show() | 4269 | * proc_cgroup_show() |
| 4877 | * - Print task's cgroup paths into seq_file, one line for each hierarchy | 4270 | * - Print task's cgroup paths into seq_file, one line for each hierarchy |
| 4878 | * - Used for /proc/<pid>/cgroup. | 4271 | * - Used for /proc/<pid>/cgroup. |
| 4879 | * - No need to task_lock(tsk) on this tsk->cgroup reference, as it | ||
| 4880 | * doesn't really matter if tsk->cgroup changes after we read it, | ||
| 4881 | * and we take cgroup_mutex, keeping cgroup_attach_task() from changing it | ||
| 4882 | * anyway. No need to check that tsk->cgroup != NULL, thanks to | ||
| 4883 | * the_top_cgroup_hack in cgroup_exit(), which sets an exiting tasks | ||
| 4884 | * cgroup to top_cgroup. | ||
| 4885 | */ | 4272 | */ |
| 4886 | 4273 | ||
| 4887 | /* TODO: Use a proper seq_file iterator */ | 4274 | /* TODO: Use a proper seq_file iterator */ |
| @@ -4889,12 +4276,12 @@ int proc_cgroup_show(struct seq_file *m, void *v) | |||
| 4889 | { | 4276 | { |
| 4890 | struct pid *pid; | 4277 | struct pid *pid; |
| 4891 | struct task_struct *tsk; | 4278 | struct task_struct *tsk; |
| 4892 | char *buf; | 4279 | char *buf, *path; |
| 4893 | int retval; | 4280 | int retval; |
| 4894 | struct cgroupfs_root *root; | 4281 | struct cgroup_root *root; |
| 4895 | 4282 | ||
| 4896 | retval = -ENOMEM; | 4283 | retval = -ENOMEM; |
| 4897 | buf = kmalloc(PAGE_SIZE, GFP_KERNEL); | 4284 | buf = kmalloc(PATH_MAX, GFP_KERNEL); |
| 4898 | if (!buf) | 4285 | if (!buf) |
| 4899 | goto out; | 4286 | goto out; |
| 4900 | 4287 | ||
| @@ -4907,29 +4294,36 @@ int proc_cgroup_show(struct seq_file *m, void *v) | |||
| 4907 | retval = 0; | 4294 | retval = 0; |
| 4908 | 4295 | ||
| 4909 | mutex_lock(&cgroup_mutex); | 4296 | mutex_lock(&cgroup_mutex); |
| 4297 | down_read(&css_set_rwsem); | ||
| 4910 | 4298 | ||
| 4911 | for_each_active_root(root) { | 4299 | for_each_root(root) { |
| 4912 | struct cgroup_subsys *ss; | 4300 | struct cgroup_subsys *ss; |
| 4913 | struct cgroup *cgrp; | 4301 | struct cgroup *cgrp; |
| 4914 | int ssid, count = 0; | 4302 | int ssid, count = 0; |
| 4915 | 4303 | ||
| 4304 | if (root == &cgrp_dfl_root && !cgrp_dfl_root_visible) | ||
| 4305 | continue; | ||
| 4306 | |||
| 4916 | seq_printf(m, "%d:", root->hierarchy_id); | 4307 | seq_printf(m, "%d:", root->hierarchy_id); |
| 4917 | for_each_subsys(ss, ssid) | 4308 | for_each_subsys(ss, ssid) |
| 4918 | if (root->subsys_mask & (1 << ssid)) | 4309 | if (root->cgrp.subsys_mask & (1 << ssid)) |
| 4919 | seq_printf(m, "%s%s", count++ ? "," : "", ss->name); | 4310 | seq_printf(m, "%s%s", count++ ? "," : "", ss->name); |
| 4920 | if (strlen(root->name)) | 4311 | if (strlen(root->name)) |
| 4921 | seq_printf(m, "%sname=%s", count ? "," : "", | 4312 | seq_printf(m, "%sname=%s", count ? "," : "", |
| 4922 | root->name); | 4313 | root->name); |
| 4923 | seq_putc(m, ':'); | 4314 | seq_putc(m, ':'); |
| 4924 | cgrp = task_cgroup_from_root(tsk, root); | 4315 | cgrp = task_cgroup_from_root(tsk, root); |
| 4925 | retval = cgroup_path(cgrp, buf, PAGE_SIZE); | 4316 | path = cgroup_path(cgrp, buf, PATH_MAX); |
| 4926 | if (retval < 0) | 4317 | if (!path) { |
| 4318 | retval = -ENAMETOOLONG; | ||
| 4927 | goto out_unlock; | 4319 | goto out_unlock; |
| 4928 | seq_puts(m, buf); | 4320 | } |
| 4321 | seq_puts(m, path); | ||
| 4929 | seq_putc(m, '\n'); | 4322 | seq_putc(m, '\n'); |
| 4930 | } | 4323 | } |
| 4931 | 4324 | ||
| 4932 | out_unlock: | 4325 | out_unlock: |
| 4326 | up_read(&css_set_rwsem); | ||
| 4933 | mutex_unlock(&cgroup_mutex); | 4327 | mutex_unlock(&cgroup_mutex); |
| 4934 | put_task_struct(tsk); | 4328 | put_task_struct(tsk); |
| 4935 | out_free: | 4329 | out_free: |
| @@ -4955,7 +4349,7 @@ static int proc_cgroupstats_show(struct seq_file *m, void *v) | |||
| 4955 | for_each_subsys(ss, i) | 4349 | for_each_subsys(ss, i) |
| 4956 | seq_printf(m, "%s\t%d\t%d\t%d\n", | 4350 | seq_printf(m, "%s\t%d\t%d\t%d\n", |
| 4957 | ss->name, ss->root->hierarchy_id, | 4351 | ss->name, ss->root->hierarchy_id, |
| 4958 | ss->root->number_of_cgroups, !ss->disabled); | 4352 | atomic_read(&ss->root->nr_cgrps), !ss->disabled); |
| 4959 | 4353 | ||
| 4960 | mutex_unlock(&cgroup_mutex); | 4354 | mutex_unlock(&cgroup_mutex); |
| 4961 | return 0; | 4355 | return 0; |
| @@ -4974,27 +4368,16 @@ static const struct file_operations proc_cgroupstats_operations = { | |||
| 4974 | }; | 4368 | }; |
| 4975 | 4369 | ||
| 4976 | /** | 4370 | /** |
| 4977 | * cgroup_fork - attach newly forked task to its parents cgroup. | 4371 | * cgroup_fork - initialize cgroup related fields during copy_process() |
| 4978 | * @child: pointer to task_struct of forking parent process. | 4372 | * @child: pointer to task_struct of forking parent process. |
| 4979 | * | 4373 | * |
| 4980 | * Description: A task inherits its parent's cgroup at fork(). | 4374 | * A task is associated with the init_css_set until cgroup_post_fork() |
| 4981 | * | 4375 | * attaches it to the parent's css_set. Empty cg_list indicates that |
| 4982 | * A pointer to the shared css_set was automatically copied in | 4376 | * @child isn't holding reference to its css_set. |
| 4983 | * fork.c by dup_task_struct(). However, we ignore that copy, since | ||
| 4984 | * it was not made under the protection of RCU or cgroup_mutex, so | ||
| 4985 | * might no longer be a valid cgroup pointer. cgroup_attach_task() might | ||
| 4986 | * have already changed current->cgroups, allowing the previously | ||
| 4987 | * referenced cgroup group to be removed and freed. | ||
| 4988 | * | ||
| 4989 | * At the point that cgroup_fork() is called, 'current' is the parent | ||
| 4990 | * task, and the passed argument 'child' points to the child task. | ||
| 4991 | */ | 4377 | */ |
| 4992 | void cgroup_fork(struct task_struct *child) | 4378 | void cgroup_fork(struct task_struct *child) |
| 4993 | { | 4379 | { |
| 4994 | task_lock(current); | 4380 | RCU_INIT_POINTER(child->cgroups, &init_css_set); |
| 4995 | get_css_set(task_css_set(current)); | ||
| 4996 | child->cgroups = current->cgroups; | ||
| 4997 | task_unlock(current); | ||
| 4998 | INIT_LIST_HEAD(&child->cg_list); | 4381 | INIT_LIST_HEAD(&child->cg_list); |
| 4999 | } | 4382 | } |
| 5000 | 4383 | ||
| @@ -5014,23 +4397,37 @@ void cgroup_post_fork(struct task_struct *child) | |||
| 5014 | int i; | 4397 | int i; |
| 5015 | 4398 | ||
| 5016 | /* | 4399 | /* |
| 5017 | * use_task_css_set_links is set to 1 before we walk the tasklist | 4400 | * This may race against cgroup_enable_task_cg_links(). As that |
| 5018 | * under the tasklist_lock and we read it here after we added the child | 4401 | * function sets use_task_css_set_links before grabbing |
| 5019 | * to the tasklist under the tasklist_lock as well. If the child wasn't | 4402 | * tasklist_lock and we just went through tasklist_lock to add |
| 5020 | * yet in the tasklist when we walked through it from | 4403 | * @child, it's guaranteed that either we see the set |
| 5021 | * cgroup_enable_task_cg_lists(), then use_task_css_set_links value | 4404 | * use_task_css_set_links or cgroup_enable_task_cg_lists() sees |
| 5022 | * should be visible now due to the paired locking and barriers implied | 4405 | * @child during its iteration. |
| 5023 | * by LOCK/UNLOCK: it is written before the tasklist_lock unlock | 4406 | * |
| 5024 | * in cgroup_enable_task_cg_lists() and read here after the tasklist_lock | 4407 | * If we won the race, @child is associated with %current's |
| 5025 | * lock on fork. | 4408 | * css_set. Grabbing css_set_rwsem guarantees both that the |
| 4409 | * association is stable, and, on completion of the parent's | ||
| 4410 | * migration, @child is visible in the source of migration or | ||
| 4411 | * already in the destination cgroup. This guarantee is necessary | ||
| 4412 | * when implementing operations which need to migrate all tasks of | ||
| 4413 | * a cgroup to another. | ||
| 4414 | * | ||
| 4415 | * Note that if we lose to cgroup_enable_task_cg_links(), @child | ||
| 4416 | * will remain in init_css_set. This is safe because all tasks are | ||
| 4417 | * in the init_css_set before cg_links is enabled and there's no | ||
| 4418 | * operation which transfers all tasks out of init_css_set. | ||
| 5026 | */ | 4419 | */ |
| 5027 | if (use_task_css_set_links) { | 4420 | if (use_task_css_set_links) { |
| 5028 | write_lock(&css_set_lock); | 4421 | struct css_set *cset; |
| 5029 | task_lock(child); | 4422 | |
| 5030 | if (list_empty(&child->cg_list)) | 4423 | down_write(&css_set_rwsem); |
| 5031 | list_add(&child->cg_list, &task_css_set(child)->tasks); | 4424 | cset = task_css_set(current); |
| 5032 | task_unlock(child); | 4425 | if (list_empty(&child->cg_list)) { |
| 5033 | write_unlock(&css_set_lock); | 4426 | rcu_assign_pointer(child->cgroups, cset); |
| 4427 | list_add(&child->cg_list, &cset->tasks); | ||
| 4428 | get_css_set(cset); | ||
| 4429 | } | ||
| 4430 | up_write(&css_set_rwsem); | ||
| 5034 | } | 4431 | } |
| 5035 | 4432 | ||
| 5036 | /* | 4433 | /* |
| @@ -5039,15 +4436,7 @@ void cgroup_post_fork(struct task_struct *child) | |||
| 5039 | * and addition to css_set. | 4436 | * and addition to css_set. |
| 5040 | */ | 4437 | */ |
| 5041 | if (need_forkexit_callback) { | 4438 | if (need_forkexit_callback) { |
| 5042 | /* | 4439 | for_each_subsys(ss, i) |
| 5043 | * fork/exit callbacks are supported only for builtin | ||
| 5044 | * subsystems, and the builtin section of the subsys | ||
| 5045 | * array is immutable, so we don't need to lock the | ||
| 5046 | * subsys array here. On the other hand, modular section | ||
| 5047 | * of the array can be freed at module unload, so we | ||
| 5048 | * can't touch that. | ||
| 5049 | */ | ||
| 5050 | for_each_builtin_subsys(ss, i) | ||
| 5051 | if (ss->fork) | 4440 | if (ss->fork) |
| 5052 | ss->fork(child); | 4441 | ss->fork(child); |
| 5053 | } | 4442 | } |
| @@ -5056,7 +4445,6 @@ void cgroup_post_fork(struct task_struct *child) | |||
| 5056 | /** | 4445 | /** |
| 5057 | * cgroup_exit - detach cgroup from exiting task | 4446 | * cgroup_exit - detach cgroup from exiting task |
| 5058 | * @tsk: pointer to task_struct of exiting process | 4447 | * @tsk: pointer to task_struct of exiting process |
| 5059 | * @run_callback: run exit callbacks? | ||
| 5060 | * | 4448 | * |
| 5061 | * Description: Detach cgroup from @tsk and release it. | 4449 | * Description: Detach cgroup from @tsk and release it. |
| 5062 | * | 4450 | * |
| @@ -5066,57 +4454,38 @@ void cgroup_post_fork(struct task_struct *child) | |||
| 5066 | * use notify_on_release cgroups where very high task exit scaling | 4454 | * use notify_on_release cgroups where very high task exit scaling |
| 5067 | * is required on large systems. | 4455 | * is required on large systems. |
| 5068 | * | 4456 | * |
| 5069 | * the_top_cgroup_hack: | 4457 | * We set the exiting tasks cgroup to the root cgroup (top_cgroup). We |
| 5070 | * | 4458 | * call cgroup_exit() while the task is still competent to handle |
| 5071 | * Set the exiting tasks cgroup to the root cgroup (top_cgroup). | 4459 | * notify_on_release(), then leave the task attached to the root cgroup in |
| 5072 | * | 4460 | * each hierarchy for the remainder of its exit. No need to bother with |
| 5073 | * We call cgroup_exit() while the task is still competent to | 4461 | * init_css_set refcnting. init_css_set never goes away and we can't race |
| 5074 | * handle notify_on_release(), then leave the task attached to the | 4462 | * with migration path - PF_EXITING is visible to migration path. |
| 5075 | * root cgroup in each hierarchy for the remainder of its exit. | ||
| 5076 | * | ||
| 5077 | * To do this properly, we would increment the reference count on | ||
| 5078 | * top_cgroup, and near the very end of the kernel/exit.c do_exit() | ||
| 5079 | * code we would add a second cgroup function call, to drop that | ||
| 5080 | * reference. This would just create an unnecessary hot spot on | ||
| 5081 | * the top_cgroup reference count, to no avail. | ||
| 5082 | * | ||
| 5083 | * Normally, holding a reference to a cgroup without bumping its | ||
| 5084 | * count is unsafe. The cgroup could go away, or someone could | ||
| 5085 | * attach us to a different cgroup, decrementing the count on | ||
| 5086 | * the first cgroup that we never incremented. But in this case, | ||
| 5087 | * top_cgroup isn't going away, and either task has PF_EXITING set, | ||
| 5088 | * which wards off any cgroup_attach_task() attempts, or task is a failed | ||
| 5089 | * fork, never visible to cgroup_attach_task. | ||
| 5090 | */ | 4463 | */ |
| 5091 | void cgroup_exit(struct task_struct *tsk, int run_callbacks) | 4464 | void cgroup_exit(struct task_struct *tsk) |
| 5092 | { | 4465 | { |
| 5093 | struct cgroup_subsys *ss; | 4466 | struct cgroup_subsys *ss; |
| 5094 | struct css_set *cset; | 4467 | struct css_set *cset; |
| 4468 | bool put_cset = false; | ||
| 5095 | int i; | 4469 | int i; |
| 5096 | 4470 | ||
| 5097 | /* | 4471 | /* |
| 5098 | * Unlink from the css_set task list if necessary. | 4472 | * Unlink from @tsk from its css_set. As migration path can't race |
| 5099 | * Optimistically check cg_list before taking | 4473 | * with us, we can check cg_list without grabbing css_set_rwsem. |
| 5100 | * css_set_lock | ||
| 5101 | */ | 4474 | */ |
| 5102 | if (!list_empty(&tsk->cg_list)) { | 4475 | if (!list_empty(&tsk->cg_list)) { |
| 5103 | write_lock(&css_set_lock); | 4476 | down_write(&css_set_rwsem); |
| 5104 | if (!list_empty(&tsk->cg_list)) | 4477 | list_del_init(&tsk->cg_list); |
| 5105 | list_del_init(&tsk->cg_list); | 4478 | up_write(&css_set_rwsem); |
| 5106 | write_unlock(&css_set_lock); | 4479 | put_cset = true; |
| 5107 | } | 4480 | } |
| 5108 | 4481 | ||
| 5109 | /* Reassign the task to the init_css_set. */ | 4482 | /* Reassign the task to the init_css_set. */ |
| 5110 | task_lock(tsk); | ||
| 5111 | cset = task_css_set(tsk); | 4483 | cset = task_css_set(tsk); |
| 5112 | RCU_INIT_POINTER(tsk->cgroups, &init_css_set); | 4484 | RCU_INIT_POINTER(tsk->cgroups, &init_css_set); |
| 5113 | 4485 | ||
| 5114 | if (run_callbacks && need_forkexit_callback) { | 4486 | if (need_forkexit_callback) { |
| 5115 | /* | 4487 | /* see cgroup_post_fork() for details */ |
| 5116 | * fork/exit callbacks are supported only for builtin | 4488 | for_each_subsys(ss, i) { |
| 5117 | * subsystems, see cgroup_post_fork() for details. | ||
| 5118 | */ | ||
| 5119 | for_each_builtin_subsys(ss, i) { | ||
| 5120 | if (ss->exit) { | 4489 | if (ss->exit) { |
| 5121 | struct cgroup_subsys_state *old_css = cset->subsys[i]; | 4490 | struct cgroup_subsys_state *old_css = cset->subsys[i]; |
| 5122 | struct cgroup_subsys_state *css = task_css(tsk, i); | 4491 | struct cgroup_subsys_state *css = task_css(tsk, i); |
| @@ -5125,9 +4494,9 @@ void cgroup_exit(struct task_struct *tsk, int run_callbacks) | |||
| 5125 | } | 4494 | } |
| 5126 | } | 4495 | } |
| 5127 | } | 4496 | } |
| 5128 | task_unlock(tsk); | ||
| 5129 | 4497 | ||
| 5130 | put_css_set_taskexit(cset); | 4498 | if (put_cset) |
| 4499 | put_css_set(cset, true); | ||
| 5131 | } | 4500 | } |
| 5132 | 4501 | ||
| 5133 | static void check_for_release(struct cgroup *cgrp) | 4502 | static void check_for_release(struct cgroup *cgrp) |
| @@ -5184,16 +4553,17 @@ static void cgroup_release_agent(struct work_struct *work) | |||
| 5184 | while (!list_empty(&release_list)) { | 4553 | while (!list_empty(&release_list)) { |
| 5185 | char *argv[3], *envp[3]; | 4554 | char *argv[3], *envp[3]; |
| 5186 | int i; | 4555 | int i; |
| 5187 | char *pathbuf = NULL, *agentbuf = NULL; | 4556 | char *pathbuf = NULL, *agentbuf = NULL, *path; |
| 5188 | struct cgroup *cgrp = list_entry(release_list.next, | 4557 | struct cgroup *cgrp = list_entry(release_list.next, |
| 5189 | struct cgroup, | 4558 | struct cgroup, |
| 5190 | release_list); | 4559 | release_list); |
| 5191 | list_del_init(&cgrp->release_list); | 4560 | list_del_init(&cgrp->release_list); |
| 5192 | raw_spin_unlock(&release_list_lock); | 4561 | raw_spin_unlock(&release_list_lock); |
| 5193 | pathbuf = kmalloc(PAGE_SIZE, GFP_KERNEL); | 4562 | pathbuf = kmalloc(PATH_MAX, GFP_KERNEL); |
| 5194 | if (!pathbuf) | 4563 | if (!pathbuf) |
| 5195 | goto continue_free; | 4564 | goto continue_free; |
| 5196 | if (cgroup_path(cgrp, pathbuf, PAGE_SIZE) < 0) | 4565 | path = cgroup_path(cgrp, pathbuf, PATH_MAX); |
| 4566 | if (!path) | ||
| 5197 | goto continue_free; | 4567 | goto continue_free; |
| 5198 | agentbuf = kstrdup(cgrp->root->release_agent_path, GFP_KERNEL); | 4568 | agentbuf = kstrdup(cgrp->root->release_agent_path, GFP_KERNEL); |
| 5199 | if (!agentbuf) | 4569 | if (!agentbuf) |
| @@ -5201,7 +4571,7 @@ static void cgroup_release_agent(struct work_struct *work) | |||
| 5201 | 4571 | ||
| 5202 | i = 0; | 4572 | i = 0; |
| 5203 | argv[i++] = agentbuf; | 4573 | argv[i++] = agentbuf; |
| 5204 | argv[i++] = pathbuf; | 4574 | argv[i++] = path; |
| 5205 | argv[i] = NULL; | 4575 | argv[i] = NULL; |
| 5206 | 4576 | ||
| 5207 | i = 0; | 4577 | i = 0; |
| @@ -5235,11 +4605,7 @@ static int __init cgroup_disable(char *str) | |||
| 5235 | if (!*token) | 4605 | if (!*token) |
| 5236 | continue; | 4606 | continue; |
| 5237 | 4607 | ||
| 5238 | /* | 4608 | for_each_subsys(ss, i) { |
| 5239 | * cgroup_disable, being at boot time, can't know about | ||
| 5240 | * module subsystems, so we don't worry about them. | ||
| 5241 | */ | ||
| 5242 | for_each_builtin_subsys(ss, i) { | ||
| 5243 | if (!strcmp(token, ss->name)) { | 4609 | if (!strcmp(token, ss->name)) { |
| 5244 | ss->disabled = 1; | 4610 | ss->disabled = 1; |
| 5245 | printk(KERN_INFO "Disabling %s control group" | 4611 | printk(KERN_INFO "Disabling %s control group" |
| @@ -5253,28 +4619,42 @@ static int __init cgroup_disable(char *str) | |||
| 5253 | __setup("cgroup_disable=", cgroup_disable); | 4619 | __setup("cgroup_disable=", cgroup_disable); |
| 5254 | 4620 | ||
| 5255 | /** | 4621 | /** |
| 5256 | * css_from_dir - get corresponding css from the dentry of a cgroup dir | 4622 | * css_tryget_from_dir - get corresponding css from the dentry of a cgroup dir |
| 5257 | * @dentry: directory dentry of interest | 4623 | * @dentry: directory dentry of interest |
| 5258 | * @ss: subsystem of interest | 4624 | * @ss: subsystem of interest |
| 5259 | * | 4625 | * |
| 5260 | * Must be called under cgroup_mutex or RCU read lock. The caller is | 4626 | * If @dentry is a directory for a cgroup which has @ss enabled on it, try |
| 5261 | * responsible for pinning the returned css if it needs to be accessed | 4627 | * to get the corresponding css and return it. If such css doesn't exist |
| 5262 | * outside the critical section. | 4628 | * or can't be pinned, an ERR_PTR value is returned. |
| 5263 | */ | 4629 | */ |
| 5264 | struct cgroup_subsys_state *css_from_dir(struct dentry *dentry, | 4630 | struct cgroup_subsys_state *css_tryget_from_dir(struct dentry *dentry, |
| 5265 | struct cgroup_subsys *ss) | 4631 | struct cgroup_subsys *ss) |
| 5266 | { | 4632 | { |
| 4633 | struct kernfs_node *kn = kernfs_node_from_dentry(dentry); | ||
| 4634 | struct cgroup_subsys_state *css = NULL; | ||
| 5267 | struct cgroup *cgrp; | 4635 | struct cgroup *cgrp; |
| 5268 | 4636 | ||
| 5269 | cgroup_assert_mutex_or_rcu_locked(); | ||
| 5270 | |||
| 5271 | /* is @dentry a cgroup dir? */ | 4637 | /* is @dentry a cgroup dir? */ |
| 5272 | if (!dentry->d_inode || | 4638 | if (dentry->d_sb->s_type != &cgroup_fs_type || !kn || |
| 5273 | dentry->d_inode->i_op != &cgroup_dir_inode_operations) | 4639 | kernfs_type(kn) != KERNFS_DIR) |
| 5274 | return ERR_PTR(-EBADF); | 4640 | return ERR_PTR(-EBADF); |
| 5275 | 4641 | ||
| 5276 | cgrp = __d_cgrp(dentry); | 4642 | rcu_read_lock(); |
| 5277 | return cgroup_css(cgrp, ss) ?: ERR_PTR(-ENOENT); | 4643 | |
| 4644 | /* | ||
| 4645 | * This path doesn't originate from kernfs and @kn could already | ||
| 4646 | * have been or be removed at any point. @kn->priv is RCU | ||
| 4647 | * protected for this access. See destroy_locked() for details. | ||
| 4648 | */ | ||
| 4649 | cgrp = rcu_dereference(kn->priv); | ||
| 4650 | if (cgrp) | ||
| 4651 | css = cgroup_css(cgrp, ss); | ||
| 4652 | |||
| 4653 | if (!css || !css_tryget(css)) | ||
| 4654 | css = ERR_PTR(-ENOENT); | ||
| 4655 | |||
| 4656 | rcu_read_unlock(); | ||
| 4657 | return css; | ||
| 5278 | } | 4658 | } |
| 5279 | 4659 | ||
| 5280 | /** | 4660 | /** |
| @@ -5289,7 +4669,7 @@ struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss) | |||
| 5289 | { | 4669 | { |
| 5290 | struct cgroup *cgrp; | 4670 | struct cgroup *cgrp; |
| 5291 | 4671 | ||
| 5292 | cgroup_assert_mutex_or_rcu_locked(); | 4672 | cgroup_assert_mutexes_or_rcu_locked(); |
| 5293 | 4673 | ||
| 5294 | cgrp = idr_find(&ss->root->cgroup_idr, id); | 4674 | cgrp = idr_find(&ss->root->cgroup_idr, id); |
| 5295 | if (cgrp) | 4675 | if (cgrp) |
| @@ -5341,23 +4721,25 @@ static int current_css_set_cg_links_read(struct seq_file *seq, void *v) | |||
| 5341 | { | 4721 | { |
| 5342 | struct cgrp_cset_link *link; | 4722 | struct cgrp_cset_link *link; |
| 5343 | struct css_set *cset; | 4723 | struct css_set *cset; |
| 4724 | char *name_buf; | ||
| 5344 | 4725 | ||
| 5345 | read_lock(&css_set_lock); | 4726 | name_buf = kmalloc(NAME_MAX + 1, GFP_KERNEL); |
| 4727 | if (!name_buf) | ||
| 4728 | return -ENOMEM; | ||
| 4729 | |||
| 4730 | down_read(&css_set_rwsem); | ||
| 5346 | rcu_read_lock(); | 4731 | rcu_read_lock(); |
| 5347 | cset = rcu_dereference(current->cgroups); | 4732 | cset = rcu_dereference(current->cgroups); |
| 5348 | list_for_each_entry(link, &cset->cgrp_links, cgrp_link) { | 4733 | list_for_each_entry(link, &cset->cgrp_links, cgrp_link) { |
| 5349 | struct cgroup *c = link->cgrp; | 4734 | struct cgroup *c = link->cgrp; |
| 5350 | const char *name; | ||
| 5351 | 4735 | ||
| 5352 | if (c->dentry) | 4736 | cgroup_name(c, name_buf, NAME_MAX + 1); |
| 5353 | name = c->dentry->d_name.name; | ||
| 5354 | else | ||
| 5355 | name = "?"; | ||
| 5356 | seq_printf(seq, "Root %d group %s\n", | 4737 | seq_printf(seq, "Root %d group %s\n", |
| 5357 | c->root->hierarchy_id, name); | 4738 | c->root->hierarchy_id, name_buf); |
| 5358 | } | 4739 | } |
| 5359 | rcu_read_unlock(); | 4740 | rcu_read_unlock(); |
| 5360 | read_unlock(&css_set_lock); | 4741 | up_read(&css_set_rwsem); |
| 4742 | kfree(name_buf); | ||
| 5361 | return 0; | 4743 | return 0; |
| 5362 | } | 4744 | } |
| 5363 | 4745 | ||
| @@ -5367,23 +4749,30 @@ static int cgroup_css_links_read(struct seq_file *seq, void *v) | |||
| 5367 | struct cgroup_subsys_state *css = seq_css(seq); | 4749 | struct cgroup_subsys_state *css = seq_css(seq); |
| 5368 | struct cgrp_cset_link *link; | 4750 | struct cgrp_cset_link *link; |
| 5369 | 4751 | ||
| 5370 | read_lock(&css_set_lock); | 4752 | down_read(&css_set_rwsem); |
| 5371 | list_for_each_entry(link, &css->cgroup->cset_links, cset_link) { | 4753 | list_for_each_entry(link, &css->cgroup->cset_links, cset_link) { |
| 5372 | struct css_set *cset = link->cset; | 4754 | struct css_set *cset = link->cset; |
| 5373 | struct task_struct *task; | 4755 | struct task_struct *task; |
| 5374 | int count = 0; | 4756 | int count = 0; |
| 4757 | |||
| 5375 | seq_printf(seq, "css_set %p\n", cset); | 4758 | seq_printf(seq, "css_set %p\n", cset); |
| 4759 | |||
| 5376 | list_for_each_entry(task, &cset->tasks, cg_list) { | 4760 | list_for_each_entry(task, &cset->tasks, cg_list) { |
| 5377 | if (count++ > MAX_TASKS_SHOWN_PER_CSS) { | 4761 | if (count++ > MAX_TASKS_SHOWN_PER_CSS) |
| 5378 | seq_puts(seq, " ...\n"); | 4762 | goto overflow; |
| 5379 | break; | 4763 | seq_printf(seq, " task %d\n", task_pid_vnr(task)); |
| 5380 | } else { | 4764 | } |
| 5381 | seq_printf(seq, " task %d\n", | 4765 | |
| 5382 | task_pid_vnr(task)); | 4766 | list_for_each_entry(task, &cset->mg_tasks, cg_list) { |
| 5383 | } | 4767 | if (count++ > MAX_TASKS_SHOWN_PER_CSS) |
| 4768 | goto overflow; | ||
| 4769 | seq_printf(seq, " task %d\n", task_pid_vnr(task)); | ||
| 5384 | } | 4770 | } |
| 4771 | continue; | ||
| 4772 | overflow: | ||
| 4773 | seq_puts(seq, " ...\n"); | ||
| 5385 | } | 4774 | } |
| 5386 | read_unlock(&css_set_lock); | 4775 | up_read(&css_set_rwsem); |
| 5387 | return 0; | 4776 | return 0; |
| 5388 | } | 4777 | } |
| 5389 | 4778 | ||
| @@ -5426,11 +4815,9 @@ static struct cftype debug_files[] = { | |||
| 5426 | { } /* terminate */ | 4815 | { } /* terminate */ |
| 5427 | }; | 4816 | }; |
| 5428 | 4817 | ||
| 5429 | struct cgroup_subsys debug_subsys = { | 4818 | struct cgroup_subsys debug_cgrp_subsys = { |
| 5430 | .name = "debug", | ||
| 5431 | .css_alloc = debug_css_alloc, | 4819 | .css_alloc = debug_css_alloc, |
| 5432 | .css_free = debug_css_free, | 4820 | .css_free = debug_css_free, |
| 5433 | .subsys_id = debug_subsys_id, | ||
| 5434 | .base_cftypes = debug_files, | 4821 | .base_cftypes = debug_files, |
| 5435 | }; | 4822 | }; |
| 5436 | #endif /* CONFIG_CGROUP_DEBUG */ | 4823 | #endif /* CONFIG_CGROUP_DEBUG */ |
diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c index 6c3154e477f6..2bc4a2256444 100644 --- a/kernel/cgroup_freezer.c +++ b/kernel/cgroup_freezer.c | |||
| @@ -52,7 +52,7 @@ static inline struct freezer *css_freezer(struct cgroup_subsys_state *css) | |||
| 52 | 52 | ||
| 53 | static inline struct freezer *task_freezer(struct task_struct *task) | 53 | static inline struct freezer *task_freezer(struct task_struct *task) |
| 54 | { | 54 | { |
| 55 | return css_freezer(task_css(task, freezer_subsys_id)); | 55 | return css_freezer(task_css(task, freezer_cgrp_id)); |
| 56 | } | 56 | } |
| 57 | 57 | ||
| 58 | static struct freezer *parent_freezer(struct freezer *freezer) | 58 | static struct freezer *parent_freezer(struct freezer *freezer) |
| @@ -84,8 +84,6 @@ static const char *freezer_state_strs(unsigned int state) | |||
| 84 | return "THAWED"; | 84 | return "THAWED"; |
| 85 | }; | 85 | }; |
| 86 | 86 | ||
| 87 | struct cgroup_subsys freezer_subsys; | ||
| 88 | |||
| 89 | static struct cgroup_subsys_state * | 87 | static struct cgroup_subsys_state * |
| 90 | freezer_css_alloc(struct cgroup_subsys_state *parent_css) | 88 | freezer_css_alloc(struct cgroup_subsys_state *parent_css) |
| 91 | { | 89 | { |
| @@ -189,7 +187,7 @@ static void freezer_attach(struct cgroup_subsys_state *new_css, | |||
| 189 | * current state before executing the following - !frozen tasks may | 187 | * current state before executing the following - !frozen tasks may |
| 190 | * be visible in a FROZEN cgroup and frozen tasks in a THAWED one. | 188 | * be visible in a FROZEN cgroup and frozen tasks in a THAWED one. |
| 191 | */ | 189 | */ |
| 192 | cgroup_taskset_for_each(task, new_css, tset) { | 190 | cgroup_taskset_for_each(task, tset) { |
| 193 | if (!(freezer->state & CGROUP_FREEZING)) { | 191 | if (!(freezer->state & CGROUP_FREEZING)) { |
| 194 | __thaw_task(task); | 192 | __thaw_task(task); |
| 195 | } else { | 193 | } else { |
| @@ -216,6 +214,16 @@ static void freezer_attach(struct cgroup_subsys_state *new_css, | |||
| 216 | } | 214 | } |
| 217 | } | 215 | } |
| 218 | 216 | ||
| 217 | /** | ||
| 218 | * freezer_fork - cgroup post fork callback | ||
| 219 | * @task: a task which has just been forked | ||
| 220 | * | ||
| 221 | * @task has just been created and should conform to the current state of | ||
| 222 | * the cgroup_freezer it belongs to. This function may race against | ||
| 223 | * freezer_attach(). Losing to freezer_attach() means that we don't have | ||
| 224 | * to do anything as freezer_attach() will put @task into the appropriate | ||
| 225 | * state. | ||
| 226 | */ | ||
| 219 | static void freezer_fork(struct task_struct *task) | 227 | static void freezer_fork(struct task_struct *task) |
| 220 | { | 228 | { |
| 221 | struct freezer *freezer; | 229 | struct freezer *freezer; |
| @@ -224,14 +232,26 @@ static void freezer_fork(struct task_struct *task) | |||
| 224 | freezer = task_freezer(task); | 232 | freezer = task_freezer(task); |
| 225 | 233 | ||
| 226 | /* | 234 | /* |
| 227 | * The root cgroup is non-freezable, so we can skip the | 235 | * The root cgroup is non-freezable, so we can skip locking the |
| 228 | * following check. | 236 | * freezer. This is safe regardless of race with task migration. |
| 237 | * If we didn't race or won, skipping is obviously the right thing | ||
| 238 | * to do. If we lost and root is the new cgroup, noop is still the | ||
| 239 | * right thing to do. | ||
| 229 | */ | 240 | */ |
| 230 | if (!parent_freezer(freezer)) | 241 | if (!parent_freezer(freezer)) |
| 231 | goto out; | 242 | goto out; |
| 232 | 243 | ||
| 244 | /* | ||
| 245 | * Grab @freezer->lock and freeze @task after verifying @task still | ||
| 246 | * belongs to @freezer and it's freezing. The former is for the | ||
| 247 | * case where we have raced against task migration and lost and | ||
| 248 | * @task is already in a different cgroup which may not be frozen. | ||
| 249 | * This isn't strictly necessary as freeze_task() is allowed to be | ||
| 250 | * called spuriously but let's do it anyway for, if nothing else, | ||
| 251 | * documentation. | ||
| 252 | */ | ||
| 233 | spin_lock_irq(&freezer->lock); | 253 | spin_lock_irq(&freezer->lock); |
| 234 | if (freezer->state & CGROUP_FREEZING) | 254 | if (freezer == task_freezer(task) && (freezer->state & CGROUP_FREEZING)) |
| 235 | freeze_task(task); | 255 | freeze_task(task); |
| 236 | spin_unlock_irq(&freezer->lock); | 256 | spin_unlock_irq(&freezer->lock); |
| 237 | out: | 257 | out: |
| @@ -422,7 +442,7 @@ static void freezer_change_state(struct freezer *freezer, bool freeze) | |||
| 422 | } | 442 | } |
| 423 | 443 | ||
| 424 | static int freezer_write(struct cgroup_subsys_state *css, struct cftype *cft, | 444 | static int freezer_write(struct cgroup_subsys_state *css, struct cftype *cft, |
| 425 | const char *buffer) | 445 | char *buffer) |
| 426 | { | 446 | { |
| 427 | bool freeze; | 447 | bool freeze; |
| 428 | 448 | ||
| @@ -473,13 +493,11 @@ static struct cftype files[] = { | |||
| 473 | { } /* terminate */ | 493 | { } /* terminate */ |
| 474 | }; | 494 | }; |
| 475 | 495 | ||
| 476 | struct cgroup_subsys freezer_subsys = { | 496 | struct cgroup_subsys freezer_cgrp_subsys = { |
| 477 | .name = "freezer", | ||
| 478 | .css_alloc = freezer_css_alloc, | 497 | .css_alloc = freezer_css_alloc, |
| 479 | .css_online = freezer_css_online, | 498 | .css_online = freezer_css_online, |
| 480 | .css_offline = freezer_css_offline, | 499 | .css_offline = freezer_css_offline, |
| 481 | .css_free = freezer_css_free, | 500 | .css_free = freezer_css_free, |
| 482 | .subsys_id = freezer_subsys_id, | ||
| 483 | .attach = freezer_attach, | 501 | .attach = freezer_attach, |
| 484 | .fork = freezer_fork, | 502 | .fork = freezer_fork, |
| 485 | .base_cftypes = files, | 503 | .base_cftypes = files, |
diff --git a/kernel/compat.c b/kernel/compat.c index 488ff8c4cf48..e40b0430b562 100644 --- a/kernel/compat.c +++ b/kernel/compat.c | |||
| @@ -30,28 +30,6 @@ | |||
| 30 | 30 | ||
| 31 | #include <asm/uaccess.h> | 31 | #include <asm/uaccess.h> |
| 32 | 32 | ||
| 33 | /* | ||
| 34 | * Get/set struct timeval with struct timespec on the native side | ||
| 35 | */ | ||
| 36 | static int compat_get_timeval_convert(struct timespec *o, | ||
| 37 | struct compat_timeval __user *i) | ||
| 38 | { | ||
| 39 | long usec; | ||
| 40 | |||
| 41 | if (get_user(o->tv_sec, &i->tv_sec) || | ||
| 42 | get_user(usec, &i->tv_usec)) | ||
| 43 | return -EFAULT; | ||
| 44 | o->tv_nsec = usec * 1000; | ||
| 45 | return 0; | ||
| 46 | } | ||
| 47 | |||
| 48 | static int compat_put_timeval_convert(struct compat_timeval __user *o, | ||
| 49 | struct timeval *i) | ||
| 50 | { | ||
| 51 | return (put_user(i->tv_sec, &o->tv_sec) || | ||
| 52 | put_user(i->tv_usec, &o->tv_usec)) ? -EFAULT : 0; | ||
| 53 | } | ||
| 54 | |||
| 55 | static int compat_get_timex(struct timex *txc, struct compat_timex __user *utp) | 33 | static int compat_get_timex(struct timex *txc, struct compat_timex __user *utp) |
| 56 | { | 34 | { |
| 57 | memset(txc, 0, sizeof(struct timex)); | 35 | memset(txc, 0, sizeof(struct timex)); |
| @@ -116,7 +94,7 @@ COMPAT_SYSCALL_DEFINE2(gettimeofday, struct compat_timeval __user *, tv, | |||
| 116 | if (tv) { | 94 | if (tv) { |
| 117 | struct timeval ktv; | 95 | struct timeval ktv; |
| 118 | do_gettimeofday(&ktv); | 96 | do_gettimeofday(&ktv); |
| 119 | if (compat_put_timeval_convert(tv, &ktv)) | 97 | if (compat_put_timeval(&ktv, tv)) |
| 120 | return -EFAULT; | 98 | return -EFAULT; |
| 121 | } | 99 | } |
| 122 | if (tz) { | 100 | if (tz) { |
| @@ -130,59 +108,58 @@ COMPAT_SYSCALL_DEFINE2(gettimeofday, struct compat_timeval __user *, tv, | |||
| 130 | COMPAT_SYSCALL_DEFINE2(settimeofday, struct compat_timeval __user *, tv, | 108 | COMPAT_SYSCALL_DEFINE2(settimeofday, struct compat_timeval __user *, tv, |
| 131 | struct timezone __user *, tz) | 109 | struct timezone __user *, tz) |
| 132 | { | 110 | { |
| 133 | struct timespec kts; | 111 | struct timeval user_tv; |
| 134 | struct timezone ktz; | 112 | struct timespec new_ts; |
| 113 | struct timezone new_tz; | ||
| 135 | 114 | ||
| 136 | if (tv) { | 115 | if (tv) { |
| 137 | if (compat_get_timeval_convert(&kts, tv)) | 116 | if (compat_get_timeval(&user_tv, tv)) |
| 138 | return -EFAULT; | 117 | return -EFAULT; |
| 118 | new_ts.tv_sec = user_tv.tv_sec; | ||
| 119 | new_ts.tv_nsec = user_tv.tv_usec * NSEC_PER_USEC; | ||
| 139 | } | 120 | } |
| 140 | if (tz) { | 121 | if (tz) { |
| 141 | if (copy_from_user(&ktz, tz, sizeof(ktz))) | 122 | if (copy_from_user(&new_tz, tz, sizeof(*tz))) |
| 142 | return -EFAULT; | 123 | return -EFAULT; |
| 143 | } | 124 | } |
| 144 | 125 | ||
| 145 | return do_sys_settimeofday(tv ? &kts : NULL, tz ? &ktz : NULL); | 126 | return do_sys_settimeofday(tv ? &new_ts : NULL, tz ? &new_tz : NULL); |
| 146 | } | 127 | } |
| 147 | 128 | ||
| 148 | int get_compat_timeval(struct timeval *tv, const struct compat_timeval __user *ctv) | 129 | static int __compat_get_timeval(struct timeval *tv, const struct compat_timeval __user *ctv) |
| 149 | { | 130 | { |
| 150 | return (!access_ok(VERIFY_READ, ctv, sizeof(*ctv)) || | 131 | return (!access_ok(VERIFY_READ, ctv, sizeof(*ctv)) || |
| 151 | __get_user(tv->tv_sec, &ctv->tv_sec) || | 132 | __get_user(tv->tv_sec, &ctv->tv_sec) || |
| 152 | __get_user(tv->tv_usec, &ctv->tv_usec)) ? -EFAULT : 0; | 133 | __get_user(tv->tv_usec, &ctv->tv_usec)) ? -EFAULT : 0; |
| 153 | } | 134 | } |
| 154 | EXPORT_SYMBOL_GPL(get_compat_timeval); | ||
| 155 | 135 | ||
| 156 | int put_compat_timeval(const struct timeval *tv, struct compat_timeval __user *ctv) | 136 | static int __compat_put_timeval(const struct timeval *tv, struct compat_timeval __user *ctv) |
| 157 | { | 137 | { |
| 158 | return (!access_ok(VERIFY_WRITE, ctv, sizeof(*ctv)) || | 138 | return (!access_ok(VERIFY_WRITE, ctv, sizeof(*ctv)) || |
| 159 | __put_user(tv->tv_sec, &ctv->tv_sec) || | 139 | __put_user(tv->tv_sec, &ctv->tv_sec) || |
| 160 | __put_user(tv->tv_usec, &ctv->tv_usec)) ? -EFAULT : 0; | 140 | __put_user(tv->tv_usec, &ctv->tv_usec)) ? -EFAULT : 0; |
| 161 | } | 141 | } |
| 162 | EXPORT_SYMBOL_GPL(put_compat_timeval); | ||
| 163 | 142 | ||
| 164 | int get_compat_timespec(struct timespec *ts, const struct compat_timespec __user *cts) | 143 | static int __compat_get_timespec(struct timespec *ts, const struct compat_timespec __user *cts) |
| 165 | { | 144 | { |
| 166 | return (!access_ok(VERIFY_READ, cts, sizeof(*cts)) || | 145 | return (!access_ok(VERIFY_READ, cts, sizeof(*cts)) || |
| 167 | __get_user(ts->tv_sec, &cts->tv_sec) || | 146 | __get_user(ts->tv_sec, &cts->tv_sec) || |
| 168 | __get_user(ts->tv_nsec, &cts->tv_nsec)) ? -EFAULT : 0; | 147 | __get_user(ts->tv_nsec, &cts->tv_nsec)) ? -EFAULT : 0; |
| 169 | } | 148 | } |
| 170 | EXPORT_SYMBOL_GPL(get_compat_timespec); | ||
| 171 | 149 | ||
| 172 | int put_compat_timespec(const struct timespec *ts, struct compat_timespec __user *cts) | 150 | static int __compat_put_timespec(const struct timespec *ts, struct compat_timespec __user *cts) |
| 173 | { | 151 | { |
| 174 | return (!access_ok(VERIFY_WRITE, cts, sizeof(*cts)) || | 152 | return (!access_ok(VERIFY_WRITE, cts, sizeof(*cts)) || |
| 175 | __put_user(ts->tv_sec, &cts->tv_sec) || | 153 | __put_user(ts->tv_sec, &cts->tv_sec) || |
| 176 | __put_user(ts->tv_nsec, &cts->tv_nsec)) ? -EFAULT : 0; | 154 | __put_user(ts->tv_nsec, &cts->tv_nsec)) ? -EFAULT : 0; |
| 177 | } | 155 | } |
| 178 | EXPORT_SYMBOL_GPL(put_compat_timespec); | ||
| 179 | 156 | ||
| 180 | int compat_get_timeval(struct timeval *tv, const void __user *utv) | 157 | int compat_get_timeval(struct timeval *tv, const void __user *utv) |
| 181 | { | 158 | { |
| 182 | if (COMPAT_USE_64BIT_TIME) | 159 | if (COMPAT_USE_64BIT_TIME) |
| 183 | return copy_from_user(tv, utv, sizeof *tv) ? -EFAULT : 0; | 160 | return copy_from_user(tv, utv, sizeof *tv) ? -EFAULT : 0; |
| 184 | else | 161 | else |
| 185 | return get_compat_timeval(tv, utv); | 162 | return __compat_get_timeval(tv, utv); |
| 186 | } | 163 | } |
| 187 | EXPORT_SYMBOL_GPL(compat_get_timeval); | 164 | EXPORT_SYMBOL_GPL(compat_get_timeval); |
| 188 | 165 | ||
| @@ -191,7 +168,7 @@ int compat_put_timeval(const struct timeval *tv, void __user *utv) | |||
| 191 | if (COMPAT_USE_64BIT_TIME) | 168 | if (COMPAT_USE_64BIT_TIME) |
| 192 | return copy_to_user(utv, tv, sizeof *tv) ? -EFAULT : 0; | 169 | return copy_to_user(utv, tv, sizeof *tv) ? -EFAULT : 0; |
| 193 | else | 170 | else |
| 194 | return put_compat_timeval(tv, utv); | 171 | return __compat_put_timeval(tv, utv); |
| 195 | } | 172 | } |
| 196 | EXPORT_SYMBOL_GPL(compat_put_timeval); | 173 | EXPORT_SYMBOL_GPL(compat_put_timeval); |
| 197 | 174 | ||
| @@ -200,7 +177,7 @@ int compat_get_timespec(struct timespec *ts, const void __user *uts) | |||
| 200 | if (COMPAT_USE_64BIT_TIME) | 177 | if (COMPAT_USE_64BIT_TIME) |
| 201 | return copy_from_user(ts, uts, sizeof *ts) ? -EFAULT : 0; | 178 | return copy_from_user(ts, uts, sizeof *ts) ? -EFAULT : 0; |
| 202 | else | 179 | else |
| 203 | return get_compat_timespec(ts, uts); | 180 | return __compat_get_timespec(ts, uts); |
| 204 | } | 181 | } |
| 205 | EXPORT_SYMBOL_GPL(compat_get_timespec); | 182 | EXPORT_SYMBOL_GPL(compat_get_timespec); |
| 206 | 183 | ||
| @@ -209,10 +186,33 @@ int compat_put_timespec(const struct timespec *ts, void __user *uts) | |||
| 209 | if (COMPAT_USE_64BIT_TIME) | 186 | if (COMPAT_USE_64BIT_TIME) |
| 210 | return copy_to_user(uts, ts, sizeof *ts) ? -EFAULT : 0; | 187 | return copy_to_user(uts, ts, sizeof *ts) ? -EFAULT : 0; |
| 211 | else | 188 | else |
| 212 | return put_compat_timespec(ts, uts); | 189 | return __compat_put_timespec(ts, uts); |
| 213 | } | 190 | } |
| 214 | EXPORT_SYMBOL_GPL(compat_put_timespec); | 191 | EXPORT_SYMBOL_GPL(compat_put_timespec); |
| 215 | 192 | ||
| 193 | int compat_convert_timespec(struct timespec __user **kts, | ||
| 194 | const void __user *cts) | ||
| 195 | { | ||
| 196 | struct timespec ts; | ||
| 197 | struct timespec __user *uts; | ||
| 198 | |||
| 199 | if (!cts || COMPAT_USE_64BIT_TIME) { | ||
| 200 | *kts = (struct timespec __user *)cts; | ||
| 201 | return 0; | ||
| 202 | } | ||
| 203 | |||
| 204 | uts = compat_alloc_user_space(sizeof(ts)); | ||
| 205 | if (!uts) | ||
| 206 | return -EFAULT; | ||
| 207 | if (compat_get_timespec(&ts, cts)) | ||
| 208 | return -EFAULT; | ||
| 209 | if (copy_to_user(uts, &ts, sizeof(ts))) | ||
| 210 | return -EFAULT; | ||
| 211 | |||
| 212 | *kts = uts; | ||
| 213 | return 0; | ||
| 214 | } | ||
| 215 | |||
| 216 | static long compat_nanosleep_restart(struct restart_block *restart) | 216 | static long compat_nanosleep_restart(struct restart_block *restart) |
| 217 | { | 217 | { |
| 218 | struct compat_timespec __user *rmtp; | 218 | struct compat_timespec __user *rmtp; |
| @@ -229,7 +229,7 @@ static long compat_nanosleep_restart(struct restart_block *restart) | |||
| 229 | if (ret) { | 229 | if (ret) { |
| 230 | rmtp = restart->nanosleep.compat_rmtp; | 230 | rmtp = restart->nanosleep.compat_rmtp; |
| 231 | 231 | ||
| 232 | if (rmtp && put_compat_timespec(&rmt, rmtp)) | 232 | if (rmtp && compat_put_timespec(&rmt, rmtp)) |
| 233 | return -EFAULT; | 233 | return -EFAULT; |
| 234 | } | 234 | } |
| 235 | 235 | ||
| @@ -243,7 +243,7 @@ COMPAT_SYSCALL_DEFINE2(nanosleep, struct compat_timespec __user *, rqtp, | |||
| 243 | mm_segment_t oldfs; | 243 | mm_segment_t oldfs; |
| 244 | long ret; | 244 | long ret; |
| 245 | 245 | ||
| 246 | if (get_compat_timespec(&tu, rqtp)) | 246 | if (compat_get_timespec(&tu, rqtp)) |
| 247 | return -EFAULT; | 247 | return -EFAULT; |
| 248 | 248 | ||
| 249 | if (!timespec_valid(&tu)) | 249 | if (!timespec_valid(&tu)) |
| @@ -263,7 +263,7 @@ COMPAT_SYSCALL_DEFINE2(nanosleep, struct compat_timespec __user *, rqtp, | |||
| 263 | restart->fn = compat_nanosleep_restart; | 263 | restart->fn = compat_nanosleep_restart; |
| 264 | restart->nanosleep.compat_rmtp = rmtp; | 264 | restart->nanosleep.compat_rmtp = rmtp; |
| 265 | 265 | ||
| 266 | if (rmtp && put_compat_timespec(&rmt, rmtp)) | 266 | if (rmtp && compat_put_timespec(&rmt, rmtp)) |
| 267 | return -EFAULT; | 267 | return -EFAULT; |
| 268 | } | 268 | } |
| 269 | 269 | ||
| @@ -451,7 +451,7 @@ COMPAT_SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource, | |||
| 451 | mm_segment_t old_fs = get_fs(); | 451 | mm_segment_t old_fs = get_fs(); |
| 452 | 452 | ||
| 453 | set_fs(KERNEL_DS); | 453 | set_fs(KERNEL_DS); |
| 454 | ret = sys_old_getrlimit(resource, &r); | 454 | ret = sys_old_getrlimit(resource, (struct rlimit __user *)&r); |
| 455 | set_fs(old_fs); | 455 | set_fs(old_fs); |
| 456 | 456 | ||
| 457 | if (!ret) { | 457 | if (!ret) { |
| @@ -647,8 +647,8 @@ COMPAT_SYSCALL_DEFINE3(sched_getaffinity, compat_pid_t, pid, unsigned int, len, | |||
| 647 | int get_compat_itimerspec(struct itimerspec *dst, | 647 | int get_compat_itimerspec(struct itimerspec *dst, |
| 648 | const struct compat_itimerspec __user *src) | 648 | const struct compat_itimerspec __user *src) |
| 649 | { | 649 | { |
| 650 | if (get_compat_timespec(&dst->it_interval, &src->it_interval) || | 650 | if (__compat_get_timespec(&dst->it_interval, &src->it_interval) || |
| 651 | get_compat_timespec(&dst->it_value, &src->it_value)) | 651 | __compat_get_timespec(&dst->it_value, &src->it_value)) |
| 652 | return -EFAULT; | 652 | return -EFAULT; |
| 653 | return 0; | 653 | return 0; |
| 654 | } | 654 | } |
| @@ -656,8 +656,8 @@ int get_compat_itimerspec(struct itimerspec *dst, | |||
| 656 | int put_compat_itimerspec(struct compat_itimerspec __user *dst, | 656 | int put_compat_itimerspec(struct compat_itimerspec __user *dst, |
| 657 | const struct itimerspec *src) | 657 | const struct itimerspec *src) |
| 658 | { | 658 | { |
| 659 | if (put_compat_timespec(&src->it_interval, &dst->it_interval) || | 659 | if (__compat_put_timespec(&src->it_interval, &dst->it_interval) || |
| 660 | put_compat_timespec(&src->it_value, &dst->it_value)) | 660 | __compat_put_timespec(&src->it_value, &dst->it_value)) |
| 661 | return -EFAULT; | 661 | return -EFAULT; |
| 662 | return 0; | 662 | return 0; |
| 663 | } | 663 | } |
| @@ -727,7 +727,7 @@ COMPAT_SYSCALL_DEFINE2(clock_settime, clockid_t, which_clock, | |||
| 727 | mm_segment_t oldfs; | 727 | mm_segment_t oldfs; |
| 728 | struct timespec ts; | 728 | struct timespec ts; |
| 729 | 729 | ||
| 730 | if (get_compat_timespec(&ts, tp)) | 730 | if (compat_get_timespec(&ts, tp)) |
| 731 | return -EFAULT; | 731 | return -EFAULT; |
| 732 | oldfs = get_fs(); | 732 | oldfs = get_fs(); |
| 733 | set_fs(KERNEL_DS); | 733 | set_fs(KERNEL_DS); |
| @@ -749,7 +749,7 @@ COMPAT_SYSCALL_DEFINE2(clock_gettime, clockid_t, which_clock, | |||
| 749 | err = sys_clock_gettime(which_clock, | 749 | err = sys_clock_gettime(which_clock, |
| 750 | (struct timespec __user *) &ts); | 750 | (struct timespec __user *) &ts); |
| 751 | set_fs(oldfs); | 751 | set_fs(oldfs); |
| 752 | if (!err && put_compat_timespec(&ts, tp)) | 752 | if (!err && compat_put_timespec(&ts, tp)) |
| 753 | return -EFAULT; | 753 | return -EFAULT; |
| 754 | return err; | 754 | return err; |
| 755 | } | 755 | } |
| @@ -789,7 +789,7 @@ COMPAT_SYSCALL_DEFINE2(clock_getres, clockid_t, which_clock, | |||
| 789 | err = sys_clock_getres(which_clock, | 789 | err = sys_clock_getres(which_clock, |
| 790 | (struct timespec __user *) &ts); | 790 | (struct timespec __user *) &ts); |
| 791 | set_fs(oldfs); | 791 | set_fs(oldfs); |
| 792 | if (!err && tp && put_compat_timespec(&ts, tp)) | 792 | if (!err && tp && compat_put_timespec(&ts, tp)) |
| 793 | return -EFAULT; | 793 | return -EFAULT; |
| 794 | return err; | 794 | return err; |
| 795 | } | 795 | } |
| @@ -799,7 +799,7 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart) | |||
| 799 | long err; | 799 | long err; |
| 800 | mm_segment_t oldfs; | 800 | mm_segment_t oldfs; |
| 801 | struct timespec tu; | 801 | struct timespec tu; |
| 802 | struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp; | 802 | struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp; |
| 803 | 803 | ||
| 804 | restart->nanosleep.rmtp = (struct timespec __user *) &tu; | 804 | restart->nanosleep.rmtp = (struct timespec __user *) &tu; |
| 805 | oldfs = get_fs(); | 805 | oldfs = get_fs(); |
| @@ -808,7 +808,7 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart) | |||
| 808 | set_fs(oldfs); | 808 | set_fs(oldfs); |
| 809 | 809 | ||
| 810 | if ((err == -ERESTART_RESTARTBLOCK) && rmtp && | 810 | if ((err == -ERESTART_RESTARTBLOCK) && rmtp && |
| 811 | put_compat_timespec(&tu, rmtp)) | 811 | compat_put_timespec(&tu, rmtp)) |
| 812 | return -EFAULT; | 812 | return -EFAULT; |
| 813 | 813 | ||
| 814 | if (err == -ERESTART_RESTARTBLOCK) { | 814 | if (err == -ERESTART_RESTARTBLOCK) { |
| @@ -827,7 +827,7 @@ COMPAT_SYSCALL_DEFINE4(clock_nanosleep, clockid_t, which_clock, int, flags, | |||
| 827 | struct timespec in, out; | 827 | struct timespec in, out; |
| 828 | struct restart_block *restart; | 828 | struct restart_block *restart; |
| 829 | 829 | ||
| 830 | if (get_compat_timespec(&in, rqtp)) | 830 | if (compat_get_timespec(&in, rqtp)) |
| 831 | return -EFAULT; | 831 | return -EFAULT; |
| 832 | 832 | ||
| 833 | oldfs = get_fs(); | 833 | oldfs = get_fs(); |
| @@ -838,7 +838,7 @@ COMPAT_SYSCALL_DEFINE4(clock_nanosleep, clockid_t, which_clock, int, flags, | |||
| 838 | set_fs(oldfs); | 838 | set_fs(oldfs); |
| 839 | 839 | ||
| 840 | if ((err == -ERESTART_RESTARTBLOCK) && rmtp && | 840 | if ((err == -ERESTART_RESTARTBLOCK) && rmtp && |
| 841 | put_compat_timespec(&out, rmtp)) | 841 | compat_put_timespec(&out, rmtp)) |
| 842 | return -EFAULT; | 842 | return -EFAULT; |
| 843 | 843 | ||
| 844 | if (err == -ERESTART_RESTARTBLOCK) { | 844 | if (err == -ERESTART_RESTARTBLOCK) { |
| @@ -1130,7 +1130,7 @@ COMPAT_SYSCALL_DEFINE2(sched_rr_get_interval, | |||
| 1130 | set_fs(KERNEL_DS); | 1130 | set_fs(KERNEL_DS); |
| 1131 | ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t); | 1131 | ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t); |
| 1132 | set_fs(old_fs); | 1132 | set_fs(old_fs); |
| 1133 | if (put_compat_timespec(&t, interval)) | 1133 | if (compat_put_timespec(&t, interval)) |
| 1134 | return -EFAULT; | 1134 | return -EFAULT; |
| 1135 | return ret; | 1135 | return ret; |
| 1136 | } | 1136 | } |
diff --git a/kernel/cpu.c b/kernel/cpu.c index deff2e693766..a9e710eef0e2 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
| @@ -19,6 +19,7 @@ | |||
| 19 | #include <linux/mutex.h> | 19 | #include <linux/mutex.h> |
| 20 | #include <linux/gfp.h> | 20 | #include <linux/gfp.h> |
| 21 | #include <linux/suspend.h> | 21 | #include <linux/suspend.h> |
| 22 | #include <linux/lockdep.h> | ||
| 22 | 23 | ||
| 23 | #include "smpboot.h" | 24 | #include "smpboot.h" |
| 24 | 25 | ||
| @@ -27,18 +28,23 @@ | |||
| 27 | static DEFINE_MUTEX(cpu_add_remove_lock); | 28 | static DEFINE_MUTEX(cpu_add_remove_lock); |
| 28 | 29 | ||
| 29 | /* | 30 | /* |
| 30 | * The following two API's must be used when attempting | 31 | * The following two APIs (cpu_maps_update_begin/done) must be used when |
| 31 | * to serialize the updates to cpu_online_mask, cpu_present_mask. | 32 | * attempting to serialize the updates to cpu_online_mask & cpu_present_mask. |
| 33 | * The APIs cpu_notifier_register_begin/done() must be used to protect CPU | ||
| 34 | * hotplug callback (un)registration performed using __register_cpu_notifier() | ||
| 35 | * or __unregister_cpu_notifier(). | ||
| 32 | */ | 36 | */ |
| 33 | void cpu_maps_update_begin(void) | 37 | void cpu_maps_update_begin(void) |
| 34 | { | 38 | { |
| 35 | mutex_lock(&cpu_add_remove_lock); | 39 | mutex_lock(&cpu_add_remove_lock); |
| 36 | } | 40 | } |
| 41 | EXPORT_SYMBOL(cpu_notifier_register_begin); | ||
| 37 | 42 | ||
| 38 | void cpu_maps_update_done(void) | 43 | void cpu_maps_update_done(void) |
| 39 | { | 44 | { |
| 40 | mutex_unlock(&cpu_add_remove_lock); | 45 | mutex_unlock(&cpu_add_remove_lock); |
| 41 | } | 46 | } |
| 47 | EXPORT_SYMBOL(cpu_notifier_register_done); | ||
| 42 | 48 | ||
| 43 | static RAW_NOTIFIER_HEAD(cpu_chain); | 49 | static RAW_NOTIFIER_HEAD(cpu_chain); |
| 44 | 50 | ||
| @@ -57,17 +63,30 @@ static struct { | |||
| 57 | * an ongoing cpu hotplug operation. | 63 | * an ongoing cpu hotplug operation. |
| 58 | */ | 64 | */ |
| 59 | int refcount; | 65 | int refcount; |
| 66 | |||
| 67 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
| 68 | struct lockdep_map dep_map; | ||
| 69 | #endif | ||
| 60 | } cpu_hotplug = { | 70 | } cpu_hotplug = { |
| 61 | .active_writer = NULL, | 71 | .active_writer = NULL, |
| 62 | .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock), | 72 | .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock), |
| 63 | .refcount = 0, | 73 | .refcount = 0, |
| 74 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
| 75 | .dep_map = {.name = "cpu_hotplug.lock" }, | ||
| 76 | #endif | ||
| 64 | }; | 77 | }; |
| 65 | 78 | ||
| 79 | /* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */ | ||
| 80 | #define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map) | ||
| 81 | #define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map) | ||
| 82 | #define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map) | ||
| 83 | |||
| 66 | void get_online_cpus(void) | 84 | void get_online_cpus(void) |
| 67 | { | 85 | { |
| 68 | might_sleep(); | 86 | might_sleep(); |
| 69 | if (cpu_hotplug.active_writer == current) | 87 | if (cpu_hotplug.active_writer == current) |
| 70 | return; | 88 | return; |
| 89 | cpuhp_lock_acquire_read(); | ||
| 71 | mutex_lock(&cpu_hotplug.lock); | 90 | mutex_lock(&cpu_hotplug.lock); |
| 72 | cpu_hotplug.refcount++; | 91 | cpu_hotplug.refcount++; |
| 73 | mutex_unlock(&cpu_hotplug.lock); | 92 | mutex_unlock(&cpu_hotplug.lock); |
| @@ -87,6 +106,7 @@ void put_online_cpus(void) | |||
| 87 | if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer)) | 106 | if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer)) |
| 88 | wake_up_process(cpu_hotplug.active_writer); | 107 | wake_up_process(cpu_hotplug.active_writer); |
| 89 | mutex_unlock(&cpu_hotplug.lock); | 108 | mutex_unlock(&cpu_hotplug.lock); |
| 109 | cpuhp_lock_release(); | ||
| 90 | 110 | ||
| 91 | } | 111 | } |
| 92 | EXPORT_SYMBOL_GPL(put_online_cpus); | 112 | EXPORT_SYMBOL_GPL(put_online_cpus); |
| @@ -117,6 +137,7 @@ void cpu_hotplug_begin(void) | |||
| 117 | { | 137 | { |
| 118 | cpu_hotplug.active_writer = current; | 138 | cpu_hotplug.active_writer = current; |
| 119 | 139 | ||
| 140 | cpuhp_lock_acquire(); | ||
| 120 | for (;;) { | 141 | for (;;) { |
| 121 | mutex_lock(&cpu_hotplug.lock); | 142 | mutex_lock(&cpu_hotplug.lock); |
| 122 | if (likely(!cpu_hotplug.refcount)) | 143 | if (likely(!cpu_hotplug.refcount)) |
| @@ -131,6 +152,7 @@ void cpu_hotplug_done(void) | |||
| 131 | { | 152 | { |
| 132 | cpu_hotplug.active_writer = NULL; | 153 | cpu_hotplug.active_writer = NULL; |
| 133 | mutex_unlock(&cpu_hotplug.lock); | 154 | mutex_unlock(&cpu_hotplug.lock); |
| 155 | cpuhp_lock_release(); | ||
| 134 | } | 156 | } |
| 135 | 157 | ||
| 136 | /* | 158 | /* |
| @@ -166,6 +188,11 @@ int __ref register_cpu_notifier(struct notifier_block *nb) | |||
| 166 | return ret; | 188 | return ret; |
| 167 | } | 189 | } |
| 168 | 190 | ||
| 191 | int __ref __register_cpu_notifier(struct notifier_block *nb) | ||
| 192 | { | ||
| 193 | return raw_notifier_chain_register(&cpu_chain, nb); | ||
| 194 | } | ||
| 195 | |||
| 169 | static int __cpu_notify(unsigned long val, void *v, int nr_to_call, | 196 | static int __cpu_notify(unsigned long val, void *v, int nr_to_call, |
| 170 | int *nr_calls) | 197 | int *nr_calls) |
| 171 | { | 198 | { |
| @@ -189,6 +216,7 @@ static void cpu_notify_nofail(unsigned long val, void *v) | |||
| 189 | BUG_ON(cpu_notify(val, v)); | 216 | BUG_ON(cpu_notify(val, v)); |
| 190 | } | 217 | } |
| 191 | EXPORT_SYMBOL(register_cpu_notifier); | 218 | EXPORT_SYMBOL(register_cpu_notifier); |
| 219 | EXPORT_SYMBOL(__register_cpu_notifier); | ||
| 192 | 220 | ||
| 193 | void __ref unregister_cpu_notifier(struct notifier_block *nb) | 221 | void __ref unregister_cpu_notifier(struct notifier_block *nb) |
| 194 | { | 222 | { |
| @@ -198,6 +226,12 @@ void __ref unregister_cpu_notifier(struct notifier_block *nb) | |||
| 198 | } | 226 | } |
| 199 | EXPORT_SYMBOL(unregister_cpu_notifier); | 227 | EXPORT_SYMBOL(unregister_cpu_notifier); |
| 200 | 228 | ||
| 229 | void __ref __unregister_cpu_notifier(struct notifier_block *nb) | ||
| 230 | { | ||
| 231 | raw_notifier_chain_unregister(&cpu_chain, nb); | ||
| 232 | } | ||
| 233 | EXPORT_SYMBOL(__unregister_cpu_notifier); | ||
| 234 | |||
| 201 | /** | 235 | /** |
| 202 | * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU | 236 | * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU |
| 203 | * @cpu: a CPU id | 237 | * @cpu: a CPU id |
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index e6b1b66afe52..3d54c418bd06 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
| @@ -119,7 +119,7 @@ static inline struct cpuset *css_cs(struct cgroup_subsys_state *css) | |||
| 119 | /* Retrieve the cpuset for a task */ | 119 | /* Retrieve the cpuset for a task */ |
| 120 | static inline struct cpuset *task_cs(struct task_struct *task) | 120 | static inline struct cpuset *task_cs(struct task_struct *task) |
| 121 | { | 121 | { |
| 122 | return css_cs(task_css(task, cpuset_subsys_id)); | 122 | return css_cs(task_css(task, cpuset_cgrp_id)); |
| 123 | } | 123 | } |
| 124 | 124 | ||
| 125 | static inline struct cpuset *parent_cs(struct cpuset *cs) | 125 | static inline struct cpuset *parent_cs(struct cpuset *cs) |
| @@ -467,7 +467,7 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial) | |||
| 467 | * be changed to have empty cpus_allowed or mems_allowed. | 467 | * be changed to have empty cpus_allowed or mems_allowed. |
| 468 | */ | 468 | */ |
| 469 | ret = -ENOSPC; | 469 | ret = -ENOSPC; |
| 470 | if ((cgroup_task_count(cur->css.cgroup) || cur->attach_in_progress)) { | 470 | if ((cgroup_has_tasks(cur->css.cgroup) || cur->attach_in_progress)) { |
| 471 | if (!cpumask_empty(cur->cpus_allowed) && | 471 | if (!cpumask_empty(cur->cpus_allowed) && |
| 472 | cpumask_empty(trial->cpus_allowed)) | 472 | cpumask_empty(trial->cpus_allowed)) |
| 473 | goto out; | 473 | goto out; |
| @@ -829,55 +829,36 @@ static struct cpuset *effective_nodemask_cpuset(struct cpuset *cs) | |||
| 829 | } | 829 | } |
| 830 | 830 | ||
| 831 | /** | 831 | /** |
| 832 | * cpuset_change_cpumask - make a task's cpus_allowed the same as its cpuset's | ||
| 833 | * @tsk: task to test | ||
| 834 | * @data: cpuset to @tsk belongs to | ||
| 835 | * | ||
| 836 | * Called by css_scan_tasks() for each task in a cgroup whose cpus_allowed | ||
| 837 | * mask needs to be changed. | ||
| 838 | * | ||
| 839 | * We don't need to re-check for the cgroup/cpuset membership, since we're | ||
| 840 | * holding cpuset_mutex at this point. | ||
| 841 | */ | ||
| 842 | static void cpuset_change_cpumask(struct task_struct *tsk, void *data) | ||
| 843 | { | ||
| 844 | struct cpuset *cs = data; | ||
| 845 | struct cpuset *cpus_cs = effective_cpumask_cpuset(cs); | ||
| 846 | |||
| 847 | set_cpus_allowed_ptr(tsk, cpus_cs->cpus_allowed); | ||
| 848 | } | ||
| 849 | |||
| 850 | /** | ||
| 851 | * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset. | 832 | * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset. |
| 852 | * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed | 833 | * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed |
| 853 | * @heap: if NULL, defer allocating heap memory to css_scan_tasks() | ||
| 854 | * | ||
| 855 | * Called with cpuset_mutex held | ||
| 856 | * | 834 | * |
| 857 | * The css_scan_tasks() function will scan all the tasks in a cgroup, | 835 | * Iterate through each task of @cs updating its cpus_allowed to the |
| 858 | * calling callback functions for each. | 836 | * effective cpuset's. As this function is called with cpuset_mutex held, |
| 859 | * | 837 | * cpuset membership stays stable. |
| 860 | * No return value. It's guaranteed that css_scan_tasks() always returns 0 | ||
| 861 | * if @heap != NULL. | ||
| 862 | */ | 838 | */ |
| 863 | static void update_tasks_cpumask(struct cpuset *cs, struct ptr_heap *heap) | 839 | static void update_tasks_cpumask(struct cpuset *cs) |
| 864 | { | 840 | { |
| 865 | css_scan_tasks(&cs->css, NULL, cpuset_change_cpumask, cs, heap); | 841 | struct cpuset *cpus_cs = effective_cpumask_cpuset(cs); |
| 842 | struct css_task_iter it; | ||
| 843 | struct task_struct *task; | ||
| 844 | |||
| 845 | css_task_iter_start(&cs->css, &it); | ||
| 846 | while ((task = css_task_iter_next(&it))) | ||
| 847 | set_cpus_allowed_ptr(task, cpus_cs->cpus_allowed); | ||
| 848 | css_task_iter_end(&it); | ||
| 866 | } | 849 | } |
| 867 | 850 | ||
| 868 | /* | 851 | /* |
| 869 | * update_tasks_cpumask_hier - Update the cpumasks of tasks in the hierarchy. | 852 | * update_tasks_cpumask_hier - Update the cpumasks of tasks in the hierarchy. |
| 870 | * @root_cs: the root cpuset of the hierarchy | 853 | * @root_cs: the root cpuset of the hierarchy |
| 871 | * @update_root: update root cpuset or not? | 854 | * @update_root: update root cpuset or not? |
| 872 | * @heap: the heap used by css_scan_tasks() | ||
| 873 | * | 855 | * |
| 874 | * This will update cpumasks of tasks in @root_cs and all other empty cpusets | 856 | * This will update cpumasks of tasks in @root_cs and all other empty cpusets |
| 875 | * which take on cpumask of @root_cs. | 857 | * which take on cpumask of @root_cs. |
| 876 | * | 858 | * |
| 877 | * Called with cpuset_mutex held | 859 | * Called with cpuset_mutex held |
| 878 | */ | 860 | */ |
| 879 | static void update_tasks_cpumask_hier(struct cpuset *root_cs, | 861 | static void update_tasks_cpumask_hier(struct cpuset *root_cs, bool update_root) |
| 880 | bool update_root, struct ptr_heap *heap) | ||
| 881 | { | 862 | { |
| 882 | struct cpuset *cp; | 863 | struct cpuset *cp; |
| 883 | struct cgroup_subsys_state *pos_css; | 864 | struct cgroup_subsys_state *pos_css; |
| @@ -898,7 +879,7 @@ static void update_tasks_cpumask_hier(struct cpuset *root_cs, | |||
| 898 | continue; | 879 | continue; |
| 899 | rcu_read_unlock(); | 880 | rcu_read_unlock(); |
| 900 | 881 | ||
| 901 | update_tasks_cpumask(cp, heap); | 882 | update_tasks_cpumask(cp); |
| 902 | 883 | ||
| 903 | rcu_read_lock(); | 884 | rcu_read_lock(); |
| 904 | css_put(&cp->css); | 885 | css_put(&cp->css); |
| @@ -914,7 +895,6 @@ static void update_tasks_cpumask_hier(struct cpuset *root_cs, | |||
| 914 | static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, | 895 | static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, |
| 915 | const char *buf) | 896 | const char *buf) |
| 916 | { | 897 | { |
| 917 | struct ptr_heap heap; | ||
| 918 | int retval; | 898 | int retval; |
| 919 | int is_load_balanced; | 899 | int is_load_balanced; |
| 920 | 900 | ||
| @@ -947,19 +927,13 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, | |||
| 947 | if (retval < 0) | 927 | if (retval < 0) |
| 948 | return retval; | 928 | return retval; |
| 949 | 929 | ||
| 950 | retval = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL); | ||
| 951 | if (retval) | ||
| 952 | return retval; | ||
| 953 | |||
| 954 | is_load_balanced = is_sched_load_balance(trialcs); | 930 | is_load_balanced = is_sched_load_balance(trialcs); |
| 955 | 931 | ||
| 956 | mutex_lock(&callback_mutex); | 932 | mutex_lock(&callback_mutex); |
| 957 | cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed); | 933 | cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed); |
| 958 | mutex_unlock(&callback_mutex); | 934 | mutex_unlock(&callback_mutex); |
| 959 | 935 | ||
| 960 | update_tasks_cpumask_hier(cs, true, &heap); | 936 | update_tasks_cpumask_hier(cs, true); |
| 961 | |||
| 962 | heap_free(&heap); | ||
| 963 | 937 | ||
| 964 | if (is_load_balanced) | 938 | if (is_load_balanced) |
| 965 | rebuild_sched_domains_locked(); | 939 | rebuild_sched_domains_locked(); |
| @@ -1022,7 +996,7 @@ static void cpuset_change_task_nodemask(struct task_struct *tsk, | |||
| 1022 | task_lock(tsk); | 996 | task_lock(tsk); |
| 1023 | /* | 997 | /* |
| 1024 | * Determine if a loop is necessary if another thread is doing | 998 | * Determine if a loop is necessary if another thread is doing |
| 1025 | * get_mems_allowed(). If at least one node remains unchanged and | 999 | * read_mems_allowed_begin(). If at least one node remains unchanged and |
| 1026 | * tsk does not have a mempolicy, then an empty nodemask will not be | 1000 | * tsk does not have a mempolicy, then an empty nodemask will not be |
| 1027 | * possible when mems_allowed is larger than a word. | 1001 | * possible when mems_allowed is larger than a word. |
| 1028 | */ | 1002 | */ |
| @@ -1048,53 +1022,22 @@ static void cpuset_change_task_nodemask(struct task_struct *tsk, | |||
| 1048 | task_unlock(tsk); | 1022 | task_unlock(tsk); |
| 1049 | } | 1023 | } |
| 1050 | 1024 | ||
| 1051 | struct cpuset_change_nodemask_arg { | ||
| 1052 | struct cpuset *cs; | ||
| 1053 | nodemask_t *newmems; | ||
| 1054 | }; | ||
| 1055 | |||
| 1056 | /* | ||
| 1057 | * Update task's mems_allowed and rebind its mempolicy and vmas' mempolicy | ||
| 1058 | * of it to cpuset's new mems_allowed, and migrate pages to new nodes if | ||
| 1059 | * memory_migrate flag is set. Called with cpuset_mutex held. | ||
| 1060 | */ | ||
| 1061 | static void cpuset_change_nodemask(struct task_struct *p, void *data) | ||
| 1062 | { | ||
| 1063 | struct cpuset_change_nodemask_arg *arg = data; | ||
| 1064 | struct cpuset *cs = arg->cs; | ||
| 1065 | struct mm_struct *mm; | ||
| 1066 | int migrate; | ||
| 1067 | |||
| 1068 | cpuset_change_task_nodemask(p, arg->newmems); | ||
| 1069 | |||
| 1070 | mm = get_task_mm(p); | ||
| 1071 | if (!mm) | ||
| 1072 | return; | ||
| 1073 | |||
| 1074 | migrate = is_memory_migrate(cs); | ||
| 1075 | |||
| 1076 | mpol_rebind_mm(mm, &cs->mems_allowed); | ||
| 1077 | if (migrate) | ||
| 1078 | cpuset_migrate_mm(mm, &cs->old_mems_allowed, arg->newmems); | ||
| 1079 | mmput(mm); | ||
| 1080 | } | ||
| 1081 | |||
| 1082 | static void *cpuset_being_rebound; | 1025 | static void *cpuset_being_rebound; |
| 1083 | 1026 | ||
| 1084 | /** | 1027 | /** |
| 1085 | * update_tasks_nodemask - Update the nodemasks of tasks in the cpuset. | 1028 | * update_tasks_nodemask - Update the nodemasks of tasks in the cpuset. |
| 1086 | * @cs: the cpuset in which each task's mems_allowed mask needs to be changed | 1029 | * @cs: the cpuset in which each task's mems_allowed mask needs to be changed |
| 1087 | * @heap: if NULL, defer allocating heap memory to css_scan_tasks() | ||
| 1088 | * | 1030 | * |
| 1089 | * Called with cpuset_mutex held. No return value. It's guaranteed that | 1031 | * Iterate through each task of @cs updating its mems_allowed to the |
| 1090 | * css_scan_tasks() always returns 0 if @heap != NULL. | 1032 | * effective cpuset's. As this function is called with cpuset_mutex held, |
| 1033 | * cpuset membership stays stable. | ||
| 1091 | */ | 1034 | */ |
| 1092 | static void update_tasks_nodemask(struct cpuset *cs, struct ptr_heap *heap) | 1035 | static void update_tasks_nodemask(struct cpuset *cs) |
| 1093 | { | 1036 | { |
| 1094 | static nodemask_t newmems; /* protected by cpuset_mutex */ | 1037 | static nodemask_t newmems; /* protected by cpuset_mutex */ |
| 1095 | struct cpuset *mems_cs = effective_nodemask_cpuset(cs); | 1038 | struct cpuset *mems_cs = effective_nodemask_cpuset(cs); |
| 1096 | struct cpuset_change_nodemask_arg arg = { .cs = cs, | 1039 | struct css_task_iter it; |
| 1097 | .newmems = &newmems }; | 1040 | struct task_struct *task; |
| 1098 | 1041 | ||
| 1099 | cpuset_being_rebound = cs; /* causes mpol_dup() rebind */ | 1042 | cpuset_being_rebound = cs; /* causes mpol_dup() rebind */ |
| 1100 | 1043 | ||
| @@ -1110,7 +1053,25 @@ static void update_tasks_nodemask(struct cpuset *cs, struct ptr_heap *heap) | |||
| 1110 | * It's ok if we rebind the same mm twice; mpol_rebind_mm() | 1053 | * It's ok if we rebind the same mm twice; mpol_rebind_mm() |
| 1111 | * is idempotent. Also migrate pages in each mm to new nodes. | 1054 | * is idempotent. Also migrate pages in each mm to new nodes. |
| 1112 | */ | 1055 | */ |
| 1113 | css_scan_tasks(&cs->css, NULL, cpuset_change_nodemask, &arg, heap); | 1056 | css_task_iter_start(&cs->css, &it); |
| 1057 | while ((task = css_task_iter_next(&it))) { | ||
| 1058 | struct mm_struct *mm; | ||
| 1059 | bool migrate; | ||
| 1060 | |||
| 1061 | cpuset_change_task_nodemask(task, &newmems); | ||
| 1062 | |||
| 1063 | mm = get_task_mm(task); | ||
| 1064 | if (!mm) | ||
| 1065 | continue; | ||
| 1066 | |||
| 1067 | migrate = is_memory_migrate(cs); | ||
| 1068 | |||
| 1069 | mpol_rebind_mm(mm, &cs->mems_allowed); | ||
| 1070 | if (migrate) | ||
| 1071 | cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems); | ||
| 1072 | mmput(mm); | ||
| 1073 | } | ||
| 1074 | css_task_iter_end(&it); | ||
| 1114 | 1075 | ||
| 1115 | /* | 1076 | /* |
| 1116 | * All the tasks' nodemasks have been updated, update | 1077 | * All the tasks' nodemasks have been updated, update |
| @@ -1126,15 +1087,13 @@ static void update_tasks_nodemask(struct cpuset *cs, struct ptr_heap *heap) | |||
| 1126 | * update_tasks_nodemask_hier - Update the nodemasks of tasks in the hierarchy. | 1087 | * update_tasks_nodemask_hier - Update the nodemasks of tasks in the hierarchy. |
| 1127 | * @cs: the root cpuset of the hierarchy | 1088 | * @cs: the root cpuset of the hierarchy |
| 1128 | * @update_root: update the root cpuset or not? | 1089 | * @update_root: update the root cpuset or not? |
| 1129 | * @heap: the heap used by css_scan_tasks() | ||
| 1130 | * | 1090 | * |
| 1131 | * This will update nodemasks of tasks in @root_cs and all other empty cpusets | 1091 | * This will update nodemasks of tasks in @root_cs and all other empty cpusets |
| 1132 | * which take on nodemask of @root_cs. | 1092 | * which take on nodemask of @root_cs. |
| 1133 | * | 1093 | * |
| 1134 | * Called with cpuset_mutex held | 1094 | * Called with cpuset_mutex held |
| 1135 | */ | 1095 | */ |
| 1136 | static void update_tasks_nodemask_hier(struct cpuset *root_cs, | 1096 | static void update_tasks_nodemask_hier(struct cpuset *root_cs, bool update_root) |
| 1137 | bool update_root, struct ptr_heap *heap) | ||
| 1138 | { | 1097 | { |
| 1139 | struct cpuset *cp; | 1098 | struct cpuset *cp; |
| 1140 | struct cgroup_subsys_state *pos_css; | 1099 | struct cgroup_subsys_state *pos_css; |
| @@ -1155,7 +1114,7 @@ static void update_tasks_nodemask_hier(struct cpuset *root_cs, | |||
| 1155 | continue; | 1114 | continue; |
| 1156 | rcu_read_unlock(); | 1115 | rcu_read_unlock(); |
| 1157 | 1116 | ||
| 1158 | update_tasks_nodemask(cp, heap); | 1117 | update_tasks_nodemask(cp); |
| 1159 | 1118 | ||
| 1160 | rcu_read_lock(); | 1119 | rcu_read_lock(); |
| 1161 | css_put(&cp->css); | 1120 | css_put(&cp->css); |
| @@ -1180,7 +1139,6 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs, | |||
| 1180 | const char *buf) | 1139 | const char *buf) |
| 1181 | { | 1140 | { |
| 1182 | int retval; | 1141 | int retval; |
| 1183 | struct ptr_heap heap; | ||
| 1184 | 1142 | ||
| 1185 | /* | 1143 | /* |
| 1186 | * top_cpuset.mems_allowed tracks node_stats[N_MEMORY]; | 1144 | * top_cpuset.mems_allowed tracks node_stats[N_MEMORY]; |
| @@ -1219,17 +1177,11 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs, | |||
| 1219 | if (retval < 0) | 1177 | if (retval < 0) |
| 1220 | goto done; | 1178 | goto done; |
| 1221 | 1179 | ||
| 1222 | retval = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL); | ||
| 1223 | if (retval < 0) | ||
| 1224 | goto done; | ||
| 1225 | |||
| 1226 | mutex_lock(&callback_mutex); | 1180 | mutex_lock(&callback_mutex); |
| 1227 | cs->mems_allowed = trialcs->mems_allowed; | 1181 | cs->mems_allowed = trialcs->mems_allowed; |
| 1228 | mutex_unlock(&callback_mutex); | 1182 | mutex_unlock(&callback_mutex); |
| 1229 | 1183 | ||
| 1230 | update_tasks_nodemask_hier(cs, true, &heap); | 1184 | update_tasks_nodemask_hier(cs, true); |
| 1231 | |||
| 1232 | heap_free(&heap); | ||
| 1233 | done: | 1185 | done: |
| 1234 | return retval; | 1186 | return retval; |
| 1235 | } | 1187 | } |
| @@ -1257,38 +1209,22 @@ static int update_relax_domain_level(struct cpuset *cs, s64 val) | |||
| 1257 | } | 1209 | } |
| 1258 | 1210 | ||
| 1259 | /** | 1211 | /** |
| 1260 | * cpuset_change_flag - make a task's spread flags the same as its cpuset's | ||
| 1261 | * @tsk: task to be updated | ||
| 1262 | * @data: cpuset to @tsk belongs to | ||
| 1263 | * | ||
| 1264 | * Called by css_scan_tasks() for each task in a cgroup. | ||
| 1265 | * | ||
| 1266 | * We don't need to re-check for the cgroup/cpuset membership, since we're | ||
| 1267 | * holding cpuset_mutex at this point. | ||
| 1268 | */ | ||
| 1269 | static void cpuset_change_flag(struct task_struct *tsk, void *data) | ||
| 1270 | { | ||
| 1271 | struct cpuset *cs = data; | ||
| 1272 | |||
| 1273 | cpuset_update_task_spread_flag(cs, tsk); | ||
| 1274 | } | ||
| 1275 | |||
| 1276 | /** | ||
| 1277 | * update_tasks_flags - update the spread flags of tasks in the cpuset. | 1212 | * update_tasks_flags - update the spread flags of tasks in the cpuset. |
| 1278 | * @cs: the cpuset in which each task's spread flags needs to be changed | 1213 | * @cs: the cpuset in which each task's spread flags needs to be changed |
| 1279 | * @heap: if NULL, defer allocating heap memory to css_scan_tasks() | ||
| 1280 | * | ||
| 1281 | * Called with cpuset_mutex held | ||
| 1282 | * | 1214 | * |
| 1283 | * The css_scan_tasks() function will scan all the tasks in a cgroup, | 1215 | * Iterate through each task of @cs updating its spread flags. As this |
| 1284 | * calling callback functions for each. | 1216 | * function is called with cpuset_mutex held, cpuset membership stays |
| 1285 | * | 1217 | * stable. |
| 1286 | * No return value. It's guaranteed that css_scan_tasks() always returns 0 | ||
| 1287 | * if @heap != NULL. | ||
| 1288 | */ | 1218 | */ |
| 1289 | static void update_tasks_flags(struct cpuset *cs, struct ptr_heap *heap) | 1219 | static void update_tasks_flags(struct cpuset *cs) |
| 1290 | { | 1220 | { |
| 1291 | css_scan_tasks(&cs->css, NULL, cpuset_change_flag, cs, heap); | 1221 | struct css_task_iter it; |
| 1222 | struct task_struct *task; | ||
| 1223 | |||
| 1224 | css_task_iter_start(&cs->css, &it); | ||
| 1225 | while ((task = css_task_iter_next(&it))) | ||
| 1226 | cpuset_update_task_spread_flag(cs, task); | ||
| 1227 | css_task_iter_end(&it); | ||
| 1292 | } | 1228 | } |
| 1293 | 1229 | ||
| 1294 | /* | 1230 | /* |
| @@ -1306,7 +1242,6 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, | |||
| 1306 | struct cpuset *trialcs; | 1242 | struct cpuset *trialcs; |
| 1307 | int balance_flag_changed; | 1243 | int balance_flag_changed; |
| 1308 | int spread_flag_changed; | 1244 | int spread_flag_changed; |
| 1309 | struct ptr_heap heap; | ||
| 1310 | int err; | 1245 | int err; |
| 1311 | 1246 | ||
| 1312 | trialcs = alloc_trial_cpuset(cs); | 1247 | trialcs = alloc_trial_cpuset(cs); |
| @@ -1322,10 +1257,6 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, | |||
| 1322 | if (err < 0) | 1257 | if (err < 0) |
| 1323 | goto out; | 1258 | goto out; |
| 1324 | 1259 | ||
| 1325 | err = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL); | ||
| 1326 | if (err < 0) | ||
| 1327 | goto out; | ||
| 1328 | |||
| 1329 | balance_flag_changed = (is_sched_load_balance(cs) != | 1260 | balance_flag_changed = (is_sched_load_balance(cs) != |
| 1330 | is_sched_load_balance(trialcs)); | 1261 | is_sched_load_balance(trialcs)); |
| 1331 | 1262 | ||
| @@ -1340,8 +1271,7 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, | |||
| 1340 | rebuild_sched_domains_locked(); | 1271 | rebuild_sched_domains_locked(); |
| 1341 | 1272 | ||
| 1342 | if (spread_flag_changed) | 1273 | if (spread_flag_changed) |
| 1343 | update_tasks_flags(cs, &heap); | 1274 | update_tasks_flags(cs); |
| 1344 | heap_free(&heap); | ||
| 1345 | out: | 1275 | out: |
| 1346 | free_trial_cpuset(trialcs); | 1276 | free_trial_cpuset(trialcs); |
| 1347 | return err; | 1277 | return err; |
| @@ -1445,6 +1375,8 @@ static int fmeter_getrate(struct fmeter *fmp) | |||
| 1445 | return val; | 1375 | return val; |
| 1446 | } | 1376 | } |
| 1447 | 1377 | ||
| 1378 | static struct cpuset *cpuset_attach_old_cs; | ||
| 1379 | |||
| 1448 | /* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */ | 1380 | /* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */ |
| 1449 | static int cpuset_can_attach(struct cgroup_subsys_state *css, | 1381 | static int cpuset_can_attach(struct cgroup_subsys_state *css, |
| 1450 | struct cgroup_taskset *tset) | 1382 | struct cgroup_taskset *tset) |
| @@ -1453,6 +1385,9 @@ static int cpuset_can_attach(struct cgroup_subsys_state *css, | |||
| 1453 | struct task_struct *task; | 1385 | struct task_struct *task; |
| 1454 | int ret; | 1386 | int ret; |
| 1455 | 1387 | ||
| 1388 | /* used later by cpuset_attach() */ | ||
| 1389 | cpuset_attach_old_cs = task_cs(cgroup_taskset_first(tset)); | ||
| 1390 | |||
| 1456 | mutex_lock(&cpuset_mutex); | 1391 | mutex_lock(&cpuset_mutex); |
| 1457 | 1392 | ||
| 1458 | /* | 1393 | /* |
| @@ -1464,7 +1399,7 @@ static int cpuset_can_attach(struct cgroup_subsys_state *css, | |||
| 1464 | (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))) | 1399 | (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))) |
| 1465 | goto out_unlock; | 1400 | goto out_unlock; |
| 1466 | 1401 | ||
| 1467 | cgroup_taskset_for_each(task, css, tset) { | 1402 | cgroup_taskset_for_each(task, tset) { |
| 1468 | /* | 1403 | /* |
| 1469 | * Kthreads which disallow setaffinity shouldn't be moved | 1404 | * Kthreads which disallow setaffinity shouldn't be moved |
| 1470 | * to a new cpuset; we don't want to change their cpu | 1405 | * to a new cpuset; we don't want to change their cpu |
| @@ -1516,10 +1451,8 @@ static void cpuset_attach(struct cgroup_subsys_state *css, | |||
| 1516 | struct mm_struct *mm; | 1451 | struct mm_struct *mm; |
| 1517 | struct task_struct *task; | 1452 | struct task_struct *task; |
| 1518 | struct task_struct *leader = cgroup_taskset_first(tset); | 1453 | struct task_struct *leader = cgroup_taskset_first(tset); |
| 1519 | struct cgroup_subsys_state *oldcss = cgroup_taskset_cur_css(tset, | ||
| 1520 | cpuset_subsys_id); | ||
| 1521 | struct cpuset *cs = css_cs(css); | 1454 | struct cpuset *cs = css_cs(css); |
| 1522 | struct cpuset *oldcs = css_cs(oldcss); | 1455 | struct cpuset *oldcs = cpuset_attach_old_cs; |
| 1523 | struct cpuset *cpus_cs = effective_cpumask_cpuset(cs); | 1456 | struct cpuset *cpus_cs = effective_cpumask_cpuset(cs); |
| 1524 | struct cpuset *mems_cs = effective_nodemask_cpuset(cs); | 1457 | struct cpuset *mems_cs = effective_nodemask_cpuset(cs); |
| 1525 | 1458 | ||
| @@ -1533,7 +1466,7 @@ static void cpuset_attach(struct cgroup_subsys_state *css, | |||
| 1533 | 1466 | ||
| 1534 | guarantee_online_mems(mems_cs, &cpuset_attach_nodemask_to); | 1467 | guarantee_online_mems(mems_cs, &cpuset_attach_nodemask_to); |
| 1535 | 1468 | ||
| 1536 | cgroup_taskset_for_each(task, css, tset) { | 1469 | cgroup_taskset_for_each(task, tset) { |
| 1537 | /* | 1470 | /* |
| 1538 | * can_attach beforehand should guarantee that this doesn't | 1471 | * can_attach beforehand should guarantee that this doesn't |
| 1539 | * fail. TODO: have a better way to handle failure here | 1472 | * fail. TODO: have a better way to handle failure here |
| @@ -1673,7 +1606,7 @@ out_unlock: | |||
| 1673 | * Common handling for a write to a "cpus" or "mems" file. | 1606 | * Common handling for a write to a "cpus" or "mems" file. |
| 1674 | */ | 1607 | */ |
| 1675 | static int cpuset_write_resmask(struct cgroup_subsys_state *css, | 1608 | static int cpuset_write_resmask(struct cgroup_subsys_state *css, |
| 1676 | struct cftype *cft, const char *buf) | 1609 | struct cftype *cft, char *buf) |
| 1677 | { | 1610 | { |
| 1678 | struct cpuset *cs = css_cs(css); | 1611 | struct cpuset *cs = css_cs(css); |
| 1679 | struct cpuset *trialcs; | 1612 | struct cpuset *trialcs; |
| @@ -2020,8 +1953,7 @@ static void cpuset_css_free(struct cgroup_subsys_state *css) | |||
| 2020 | kfree(cs); | 1953 | kfree(cs); |
| 2021 | } | 1954 | } |
| 2022 | 1955 | ||
| 2023 | struct cgroup_subsys cpuset_subsys = { | 1956 | struct cgroup_subsys cpuset_cgrp_subsys = { |
| 2024 | .name = "cpuset", | ||
| 2025 | .css_alloc = cpuset_css_alloc, | 1957 | .css_alloc = cpuset_css_alloc, |
| 2026 | .css_online = cpuset_css_online, | 1958 | .css_online = cpuset_css_online, |
| 2027 | .css_offline = cpuset_css_offline, | 1959 | .css_offline = cpuset_css_offline, |
| @@ -2029,7 +1961,6 @@ struct cgroup_subsys cpuset_subsys = { | |||
| 2029 | .can_attach = cpuset_can_attach, | 1961 | .can_attach = cpuset_can_attach, |
| 2030 | .cancel_attach = cpuset_cancel_attach, | 1962 | .cancel_attach = cpuset_cancel_attach, |
| 2031 | .attach = cpuset_attach, | 1963 | .attach = cpuset_attach, |
| 2032 | .subsys_id = cpuset_subsys_id, | ||
| 2033 | .base_cftypes = files, | 1964 | .base_cftypes = files, |
| 2034 | .early_init = 1, | 1965 | .early_init = 1, |
| 2035 | }; | 1966 | }; |
| @@ -2086,10 +2017,9 @@ static void remove_tasks_in_empty_cpuset(struct cpuset *cs) | |||
| 2086 | parent = parent_cs(parent); | 2017 | parent = parent_cs(parent); |
| 2087 | 2018 | ||
| 2088 | if (cgroup_transfer_tasks(parent->css.cgroup, cs->css.cgroup)) { | 2019 | if (cgroup_transfer_tasks(parent->css.cgroup, cs->css.cgroup)) { |
| 2089 | rcu_read_lock(); | 2020 | printk(KERN_ERR "cpuset: failed to transfer tasks out of empty cpuset "); |
| 2090 | printk(KERN_ERR "cpuset: failed to transfer tasks out of empty cpuset %s\n", | 2021 | pr_cont_cgroup_name(cs->css.cgroup); |
| 2091 | cgroup_name(cs->css.cgroup)); | 2022 | pr_cont("\n"); |
| 2092 | rcu_read_unlock(); | ||
| 2093 | } | 2023 | } |
| 2094 | } | 2024 | } |
| 2095 | 2025 | ||
| @@ -2137,7 +2067,7 @@ retry: | |||
| 2137 | */ | 2067 | */ |
| 2138 | if ((sane && cpumask_empty(cs->cpus_allowed)) || | 2068 | if ((sane && cpumask_empty(cs->cpus_allowed)) || |
| 2139 | (!cpumask_empty(&off_cpus) && !cpumask_empty(cs->cpus_allowed))) | 2069 | (!cpumask_empty(&off_cpus) && !cpumask_empty(cs->cpus_allowed))) |
| 2140 | update_tasks_cpumask(cs, NULL); | 2070 | update_tasks_cpumask(cs); |
| 2141 | 2071 | ||
| 2142 | mutex_lock(&callback_mutex); | 2072 | mutex_lock(&callback_mutex); |
| 2143 | nodes_andnot(cs->mems_allowed, cs->mems_allowed, off_mems); | 2073 | nodes_andnot(cs->mems_allowed, cs->mems_allowed, off_mems); |
| @@ -2151,7 +2081,7 @@ retry: | |||
| 2151 | */ | 2081 | */ |
| 2152 | if ((sane && nodes_empty(cs->mems_allowed)) || | 2082 | if ((sane && nodes_empty(cs->mems_allowed)) || |
| 2153 | (!nodes_empty(off_mems) && !nodes_empty(cs->mems_allowed))) | 2083 | (!nodes_empty(off_mems) && !nodes_empty(cs->mems_allowed))) |
| 2154 | update_tasks_nodemask(cs, NULL); | 2084 | update_tasks_nodemask(cs); |
| 2155 | 2085 | ||
| 2156 | is_empty = cpumask_empty(cs->cpus_allowed) || | 2086 | is_empty = cpumask_empty(cs->cpus_allowed) || |
| 2157 | nodes_empty(cs->mems_allowed); | 2087 | nodes_empty(cs->mems_allowed); |
| @@ -2213,7 +2143,7 @@ static void cpuset_hotplug_workfn(struct work_struct *work) | |||
| 2213 | mutex_lock(&callback_mutex); | 2143 | mutex_lock(&callback_mutex); |
| 2214 | top_cpuset.mems_allowed = new_mems; | 2144 | top_cpuset.mems_allowed = new_mems; |
| 2215 | mutex_unlock(&callback_mutex); | 2145 | mutex_unlock(&callback_mutex); |
| 2216 | update_tasks_nodemask(&top_cpuset, NULL); | 2146 | update_tasks_nodemask(&top_cpuset); |
| 2217 | } | 2147 | } |
| 2218 | 2148 | ||
| 2219 | mutex_unlock(&cpuset_mutex); | 2149 | mutex_unlock(&cpuset_mutex); |
| @@ -2305,10 +2235,10 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask) | |||
| 2305 | struct cpuset *cpus_cs; | 2235 | struct cpuset *cpus_cs; |
| 2306 | 2236 | ||
| 2307 | mutex_lock(&callback_mutex); | 2237 | mutex_lock(&callback_mutex); |
| 2308 | task_lock(tsk); | 2238 | rcu_read_lock(); |
| 2309 | cpus_cs = effective_cpumask_cpuset(task_cs(tsk)); | 2239 | cpus_cs = effective_cpumask_cpuset(task_cs(tsk)); |
| 2310 | guarantee_online_cpus(cpus_cs, pmask); | 2240 | guarantee_online_cpus(cpus_cs, pmask); |
| 2311 | task_unlock(tsk); | 2241 | rcu_read_unlock(); |
| 2312 | mutex_unlock(&callback_mutex); | 2242 | mutex_unlock(&callback_mutex); |
| 2313 | } | 2243 | } |
| 2314 | 2244 | ||
| @@ -2361,10 +2291,10 @@ nodemask_t cpuset_mems_allowed(struct task_struct *tsk) | |||
| 2361 | nodemask_t mask; | 2291 | nodemask_t mask; |
| 2362 | 2292 | ||
| 2363 | mutex_lock(&callback_mutex); | 2293 | mutex_lock(&callback_mutex); |
| 2364 | task_lock(tsk); | 2294 | rcu_read_lock(); |
| 2365 | mems_cs = effective_nodemask_cpuset(task_cs(tsk)); | 2295 | mems_cs = effective_nodemask_cpuset(task_cs(tsk)); |
| 2366 | guarantee_online_mems(mems_cs, &mask); | 2296 | guarantee_online_mems(mems_cs, &mask); |
| 2367 | task_unlock(tsk); | 2297 | rcu_read_unlock(); |
| 2368 | mutex_unlock(&callback_mutex); | 2298 | mutex_unlock(&callback_mutex); |
| 2369 | 2299 | ||
| 2370 | return mask; | 2300 | return mask; |
| @@ -2480,10 +2410,10 @@ int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask) | |||
| 2480 | /* Not hardwall and node outside mems_allowed: scan up cpusets */ | 2410 | /* Not hardwall and node outside mems_allowed: scan up cpusets */ |
| 2481 | mutex_lock(&callback_mutex); | 2411 | mutex_lock(&callback_mutex); |
| 2482 | 2412 | ||
| 2483 | task_lock(current); | 2413 | rcu_read_lock(); |
| 2484 | cs = nearest_hardwall_ancestor(task_cs(current)); | 2414 | cs = nearest_hardwall_ancestor(task_cs(current)); |
| 2485 | allowed = node_isset(node, cs->mems_allowed); | 2415 | allowed = node_isset(node, cs->mems_allowed); |
| 2486 | task_unlock(current); | 2416 | rcu_read_unlock(); |
| 2487 | 2417 | ||
| 2488 | mutex_unlock(&callback_mutex); | 2418 | mutex_unlock(&callback_mutex); |
| 2489 | return allowed; | 2419 | return allowed; |
| @@ -2609,27 +2539,27 @@ int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, | |||
| 2609 | * @task: pointer to task_struct of some task. | 2539 | * @task: pointer to task_struct of some task. |
| 2610 | * | 2540 | * |
| 2611 | * Description: Prints @task's name, cpuset name, and cached copy of its | 2541 | * Description: Prints @task's name, cpuset name, and cached copy of its |
| 2612 | * mems_allowed to the kernel log. Must hold task_lock(task) to allow | 2542 | * mems_allowed to the kernel log. |
| 2613 | * dereferencing task_cs(task). | ||
| 2614 | */ | 2543 | */ |
| 2615 | void cpuset_print_task_mems_allowed(struct task_struct *tsk) | 2544 | void cpuset_print_task_mems_allowed(struct task_struct *tsk) |
| 2616 | { | 2545 | { |
| 2617 | /* Statically allocated to prevent using excess stack. */ | 2546 | /* Statically allocated to prevent using excess stack. */ |
| 2618 | static char cpuset_nodelist[CPUSET_NODELIST_LEN]; | 2547 | static char cpuset_nodelist[CPUSET_NODELIST_LEN]; |
| 2619 | static DEFINE_SPINLOCK(cpuset_buffer_lock); | 2548 | static DEFINE_SPINLOCK(cpuset_buffer_lock); |
| 2549 | struct cgroup *cgrp; | ||
| 2620 | 2550 | ||
| 2621 | struct cgroup *cgrp = task_cs(tsk)->css.cgroup; | ||
| 2622 | |||
| 2623 | rcu_read_lock(); | ||
| 2624 | spin_lock(&cpuset_buffer_lock); | 2551 | spin_lock(&cpuset_buffer_lock); |
| 2552 | rcu_read_lock(); | ||
| 2625 | 2553 | ||
| 2554 | cgrp = task_cs(tsk)->css.cgroup; | ||
| 2626 | nodelist_scnprintf(cpuset_nodelist, CPUSET_NODELIST_LEN, | 2555 | nodelist_scnprintf(cpuset_nodelist, CPUSET_NODELIST_LEN, |
| 2627 | tsk->mems_allowed); | 2556 | tsk->mems_allowed); |
| 2628 | printk(KERN_INFO "%s cpuset=%s mems_allowed=%s\n", | 2557 | printk(KERN_INFO "%s cpuset=", tsk->comm); |
| 2629 | tsk->comm, cgroup_name(cgrp), cpuset_nodelist); | 2558 | pr_cont_cgroup_name(cgrp); |
| 2559 | pr_cont(" mems_allowed=%s\n", cpuset_nodelist); | ||
| 2630 | 2560 | ||
| 2631 | spin_unlock(&cpuset_buffer_lock); | ||
| 2632 | rcu_read_unlock(); | 2561 | rcu_read_unlock(); |
| 2562 | spin_unlock(&cpuset_buffer_lock); | ||
| 2633 | } | 2563 | } |
| 2634 | 2564 | ||
| 2635 | /* | 2565 | /* |
| @@ -2660,9 +2590,9 @@ int cpuset_memory_pressure_enabled __read_mostly; | |||
| 2660 | 2590 | ||
| 2661 | void __cpuset_memory_pressure_bump(void) | 2591 | void __cpuset_memory_pressure_bump(void) |
| 2662 | { | 2592 | { |
| 2663 | task_lock(current); | 2593 | rcu_read_lock(); |
| 2664 | fmeter_markevent(&task_cs(current)->fmeter); | 2594 | fmeter_markevent(&task_cs(current)->fmeter); |
| 2665 | task_unlock(current); | 2595 | rcu_read_unlock(); |
| 2666 | } | 2596 | } |
| 2667 | 2597 | ||
| 2668 | #ifdef CONFIG_PROC_PID_CPUSET | 2598 | #ifdef CONFIG_PROC_PID_CPUSET |
| @@ -2679,12 +2609,12 @@ int proc_cpuset_show(struct seq_file *m, void *unused_v) | |||
| 2679 | { | 2609 | { |
| 2680 | struct pid *pid; | 2610 | struct pid *pid; |
| 2681 | struct task_struct *tsk; | 2611 | struct task_struct *tsk; |
| 2682 | char *buf; | 2612 | char *buf, *p; |
| 2683 | struct cgroup_subsys_state *css; | 2613 | struct cgroup_subsys_state *css; |
| 2684 | int retval; | 2614 | int retval; |
| 2685 | 2615 | ||
| 2686 | retval = -ENOMEM; | 2616 | retval = -ENOMEM; |
| 2687 | buf = kmalloc(PAGE_SIZE, GFP_KERNEL); | 2617 | buf = kmalloc(PATH_MAX, GFP_KERNEL); |
| 2688 | if (!buf) | 2618 | if (!buf) |
| 2689 | goto out; | 2619 | goto out; |
| 2690 | 2620 | ||
| @@ -2694,14 +2624,16 @@ int proc_cpuset_show(struct seq_file *m, void *unused_v) | |||
| 2694 | if (!tsk) | 2624 | if (!tsk) |
| 2695 | goto out_free; | 2625 | goto out_free; |
| 2696 | 2626 | ||
| 2627 | retval = -ENAMETOOLONG; | ||
| 2697 | rcu_read_lock(); | 2628 | rcu_read_lock(); |
| 2698 | css = task_css(tsk, cpuset_subsys_id); | 2629 | css = task_css(tsk, cpuset_cgrp_id); |
| 2699 | retval = cgroup_path(css->cgroup, buf, PAGE_SIZE); | 2630 | p = cgroup_path(css->cgroup, buf, PATH_MAX); |
| 2700 | rcu_read_unlock(); | 2631 | rcu_read_unlock(); |
| 2701 | if (retval < 0) | 2632 | if (!p) |
| 2702 | goto out_put_task; | 2633 | goto out_put_task; |
| 2703 | seq_puts(m, buf); | 2634 | seq_puts(m, p); |
| 2704 | seq_putc(m, '\n'); | 2635 | seq_putc(m, '\n'); |
| 2636 | retval = 0; | ||
| 2705 | out_put_task: | 2637 | out_put_task: |
| 2706 | put_task_struct(tsk); | 2638 | put_task_struct(tsk); |
| 2707 | out_free: | 2639 | out_free: |
diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c index 99982a70ddad..2956c8da1605 100644 --- a/kernel/debug/debug_core.c +++ b/kernel/debug/debug_core.c | |||
| @@ -49,6 +49,7 @@ | |||
| 49 | #include <linux/pid.h> | 49 | #include <linux/pid.h> |
| 50 | #include <linux/smp.h> | 50 | #include <linux/smp.h> |
| 51 | #include <linux/mm.h> | 51 | #include <linux/mm.h> |
| 52 | #include <linux/vmacache.h> | ||
| 52 | #include <linux/rcupdate.h> | 53 | #include <linux/rcupdate.h> |
| 53 | 54 | ||
| 54 | #include <asm/cacheflush.h> | 55 | #include <asm/cacheflush.h> |
| @@ -224,10 +225,17 @@ static void kgdb_flush_swbreak_addr(unsigned long addr) | |||
| 224 | if (!CACHE_FLUSH_IS_SAFE) | 225 | if (!CACHE_FLUSH_IS_SAFE) |
| 225 | return; | 226 | return; |
| 226 | 227 | ||
| 227 | if (current->mm && current->mm->mmap_cache) { | 228 | if (current->mm) { |
| 228 | flush_cache_range(current->mm->mmap_cache, | 229 | int i; |
| 229 | addr, addr + BREAK_INSTR_SIZE); | 230 | |
| 231 | for (i = 0; i < VMACACHE_SIZE; i++) { | ||
| 232 | if (!current->vmacache[i]) | ||
| 233 | continue; | ||
| 234 | flush_cache_range(current->vmacache[i], | ||
| 235 | addr, addr + BREAK_INSTR_SIZE); | ||
| 236 | } | ||
| 230 | } | 237 | } |
| 238 | |||
| 231 | /* Force flush instruction cache if it was outside the mm */ | 239 | /* Force flush instruction cache if it was outside the mm */ |
| 232 | flush_icache_range(addr, addr + BREAK_INSTR_SIZE); | 240 | flush_icache_range(addr, addr + BREAK_INSTR_SIZE); |
| 233 | } | 241 | } |
diff --git a/kernel/events/core.c b/kernel/events/core.c index 661951ab8ae7..f83a71a3e46d 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
| @@ -361,7 +361,7 @@ struct perf_cgroup { | |||
| 361 | static inline struct perf_cgroup * | 361 | static inline struct perf_cgroup * |
| 362 | perf_cgroup_from_task(struct task_struct *task) | 362 | perf_cgroup_from_task(struct task_struct *task) |
| 363 | { | 363 | { |
| 364 | return container_of(task_css(task, perf_subsys_id), | 364 | return container_of(task_css(task, perf_event_cgrp_id), |
| 365 | struct perf_cgroup, css); | 365 | struct perf_cgroup, css); |
| 366 | } | 366 | } |
| 367 | 367 | ||
| @@ -389,11 +389,6 @@ perf_cgroup_match(struct perf_event *event) | |||
| 389 | event->cgrp->css.cgroup); | 389 | event->cgrp->css.cgroup); |
| 390 | } | 390 | } |
| 391 | 391 | ||
| 392 | static inline bool perf_tryget_cgroup(struct perf_event *event) | ||
| 393 | { | ||
| 394 | return css_tryget(&event->cgrp->css); | ||
| 395 | } | ||
| 396 | |||
| 397 | static inline void perf_put_cgroup(struct perf_event *event) | 392 | static inline void perf_put_cgroup(struct perf_event *event) |
| 398 | { | 393 | { |
| 399 | css_put(&event->cgrp->css); | 394 | css_put(&event->cgrp->css); |
| @@ -612,9 +607,7 @@ static inline int perf_cgroup_connect(int fd, struct perf_event *event, | |||
| 612 | if (!f.file) | 607 | if (!f.file) |
| 613 | return -EBADF; | 608 | return -EBADF; |
| 614 | 609 | ||
| 615 | rcu_read_lock(); | 610 | css = css_tryget_from_dir(f.file->f_dentry, &perf_event_cgrp_subsys); |
| 616 | |||
| 617 | css = css_from_dir(f.file->f_dentry, &perf_subsys); | ||
| 618 | if (IS_ERR(css)) { | 611 | if (IS_ERR(css)) { |
| 619 | ret = PTR_ERR(css); | 612 | ret = PTR_ERR(css); |
| 620 | goto out; | 613 | goto out; |
| @@ -623,13 +616,6 @@ static inline int perf_cgroup_connect(int fd, struct perf_event *event, | |||
| 623 | cgrp = container_of(css, struct perf_cgroup, css); | 616 | cgrp = container_of(css, struct perf_cgroup, css); |
| 624 | event->cgrp = cgrp; | 617 | event->cgrp = cgrp; |
| 625 | 618 | ||
| 626 | /* must be done before we fput() the file */ | ||
| 627 | if (!perf_tryget_cgroup(event)) { | ||
| 628 | event->cgrp = NULL; | ||
| 629 | ret = -ENOENT; | ||
| 630 | goto out; | ||
| 631 | } | ||
| 632 | |||
| 633 | /* | 619 | /* |
| 634 | * all events in a group must monitor | 620 | * all events in a group must monitor |
| 635 | * the same cgroup because a task belongs | 621 | * the same cgroup because a task belongs |
| @@ -640,7 +626,6 @@ static inline int perf_cgroup_connect(int fd, struct perf_event *event, | |||
| 640 | ret = -EINVAL; | 626 | ret = -EINVAL; |
| 641 | } | 627 | } |
| 642 | out: | 628 | out: |
| 643 | rcu_read_unlock(); | ||
| 644 | fdput(f); | 629 | fdput(f); |
| 645 | return ret; | 630 | return ret; |
| 646 | } | 631 | } |
| @@ -8053,7 +8038,7 @@ static void perf_cgroup_attach(struct cgroup_subsys_state *css, | |||
| 8053 | { | 8038 | { |
| 8054 | struct task_struct *task; | 8039 | struct task_struct *task; |
| 8055 | 8040 | ||
| 8056 | cgroup_taskset_for_each(task, css, tset) | 8041 | cgroup_taskset_for_each(task, tset) |
| 8057 | task_function_call(task, __perf_cgroup_move, task); | 8042 | task_function_call(task, __perf_cgroup_move, task); |
| 8058 | } | 8043 | } |
| 8059 | 8044 | ||
| @@ -8072,9 +8057,7 @@ static void perf_cgroup_exit(struct cgroup_subsys_state *css, | |||
| 8072 | task_function_call(task, __perf_cgroup_move, task); | 8057 | task_function_call(task, __perf_cgroup_move, task); |
| 8073 | } | 8058 | } |
| 8074 | 8059 | ||
| 8075 | struct cgroup_subsys perf_subsys = { | 8060 | struct cgroup_subsys perf_event_cgrp_subsys = { |
| 8076 | .name = "perf_event", | ||
| 8077 | .subsys_id = perf_subsys_id, | ||
| 8078 | .css_alloc = perf_cgroup_css_alloc, | 8061 | .css_alloc = perf_cgroup_css_alloc, |
| 8079 | .css_free = perf_cgroup_css_free, | 8062 | .css_free = perf_cgroup_css_free, |
| 8080 | .exit = perf_cgroup_exit, | 8063 | .exit = perf_cgroup_exit, |
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 307d87c0991a..04709b66369d 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c | |||
| @@ -1804,6 +1804,11 @@ static bool handle_trampoline(struct pt_regs *regs) | |||
| 1804 | return true; | 1804 | return true; |
| 1805 | } | 1805 | } |
| 1806 | 1806 | ||
| 1807 | bool __weak arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs) | ||
| 1808 | { | ||
| 1809 | return false; | ||
| 1810 | } | ||
| 1811 | |||
| 1807 | /* | 1812 | /* |
| 1808 | * Run handler and ask thread to singlestep. | 1813 | * Run handler and ask thread to singlestep. |
| 1809 | * Ensure all non-fatal signals cannot interrupt thread while it singlesteps. | 1814 | * Ensure all non-fatal signals cannot interrupt thread while it singlesteps. |
| @@ -1858,7 +1863,11 @@ static void handle_swbp(struct pt_regs *regs) | |||
| 1858 | if (!get_utask()) | 1863 | if (!get_utask()) |
| 1859 | goto out; | 1864 | goto out; |
| 1860 | 1865 | ||
| 1866 | if (arch_uprobe_ignore(&uprobe->arch, regs)) | ||
| 1867 | goto out; | ||
| 1868 | |||
| 1861 | handler_chain(uprobe, regs); | 1869 | handler_chain(uprobe, regs); |
| 1870 | |||
| 1862 | if (can_skip_sstep(uprobe, regs)) | 1871 | if (can_skip_sstep(uprobe, regs)) |
| 1863 | goto out; | 1872 | goto out; |
| 1864 | 1873 | ||
diff --git a/kernel/exit.c b/kernel/exit.c index 1e77fc645317..6ed6a1d552b5 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
| @@ -570,7 +570,7 @@ static void reparent_leader(struct task_struct *father, struct task_struct *p, | |||
| 570 | if (same_thread_group(p->real_parent, father)) | 570 | if (same_thread_group(p->real_parent, father)) |
| 571 | return; | 571 | return; |
| 572 | 572 | ||
| 573 | /* We don't want people slaying init. */ | 573 | /* We don't want people slaying init. */ |
| 574 | p->exit_signal = SIGCHLD; | 574 | p->exit_signal = SIGCHLD; |
| 575 | 575 | ||
| 576 | /* If it has exited notify the new parent about this child's death. */ | 576 | /* If it has exited notify the new parent about this child's death. */ |
| @@ -784,9 +784,10 @@ void do_exit(long code) | |||
| 784 | exit_shm(tsk); | 784 | exit_shm(tsk); |
| 785 | exit_files(tsk); | 785 | exit_files(tsk); |
| 786 | exit_fs(tsk); | 786 | exit_fs(tsk); |
| 787 | if (group_dead) | ||
| 788 | disassociate_ctty(1); | ||
| 787 | exit_task_namespaces(tsk); | 789 | exit_task_namespaces(tsk); |
| 788 | exit_task_work(tsk); | 790 | exit_task_work(tsk); |
| 789 | check_stack_usage(); | ||
| 790 | exit_thread(); | 791 | exit_thread(); |
| 791 | 792 | ||
| 792 | /* | 793 | /* |
| @@ -797,21 +798,17 @@ void do_exit(long code) | |||
| 797 | */ | 798 | */ |
| 798 | perf_event_exit_task(tsk); | 799 | perf_event_exit_task(tsk); |
| 799 | 800 | ||
| 800 | cgroup_exit(tsk, 1); | 801 | cgroup_exit(tsk); |
| 801 | |||
| 802 | if (group_dead) | ||
| 803 | disassociate_ctty(1); | ||
| 804 | 802 | ||
| 805 | module_put(task_thread_info(tsk)->exec_domain->module); | 803 | module_put(task_thread_info(tsk)->exec_domain->module); |
| 806 | 804 | ||
| 807 | proc_exit_connector(tsk); | ||
| 808 | |||
| 809 | /* | 805 | /* |
| 810 | * FIXME: do that only when needed, using sched_exit tracepoint | 806 | * FIXME: do that only when needed, using sched_exit tracepoint |
| 811 | */ | 807 | */ |
| 812 | flush_ptrace_hw_breakpoint(tsk); | 808 | flush_ptrace_hw_breakpoint(tsk); |
| 813 | 809 | ||
| 814 | exit_notify(tsk, group_dead); | 810 | exit_notify(tsk, group_dead); |
| 811 | proc_exit_connector(tsk); | ||
| 815 | #ifdef CONFIG_NUMA | 812 | #ifdef CONFIG_NUMA |
| 816 | task_lock(tsk); | 813 | task_lock(tsk); |
| 817 | mpol_put(tsk->mempolicy); | 814 | mpol_put(tsk->mempolicy); |
| @@ -844,6 +841,7 @@ void do_exit(long code) | |||
| 844 | 841 | ||
| 845 | validate_creds_for_do_exit(tsk); | 842 | validate_creds_for_do_exit(tsk); |
| 846 | 843 | ||
| 844 | check_stack_usage(); | ||
| 847 | preempt_disable(); | 845 | preempt_disable(); |
| 848 | if (tsk->nr_dirtied) | 846 | if (tsk->nr_dirtied) |
| 849 | __this_cpu_add(dirty_throttle_leaks, tsk->nr_dirtied); | 847 | __this_cpu_add(dirty_throttle_leaks, tsk->nr_dirtied); |
| @@ -1038,17 +1036,13 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p) | |||
| 1038 | return wait_noreap_copyout(wo, p, pid, uid, why, status); | 1036 | return wait_noreap_copyout(wo, p, pid, uid, why, status); |
| 1039 | } | 1037 | } |
| 1040 | 1038 | ||
| 1039 | traced = ptrace_reparented(p); | ||
| 1041 | /* | 1040 | /* |
| 1042 | * Try to move the task's state to DEAD | 1041 | * Move the task's state to DEAD/TRACE, only one thread can do this. |
| 1043 | * only one thread is allowed to do this: | ||
| 1044 | */ | 1042 | */ |
| 1045 | state = xchg(&p->exit_state, EXIT_DEAD); | 1043 | state = traced && thread_group_leader(p) ? EXIT_TRACE : EXIT_DEAD; |
| 1046 | if (state != EXIT_ZOMBIE) { | 1044 | if (cmpxchg(&p->exit_state, EXIT_ZOMBIE, state) != EXIT_ZOMBIE) |
| 1047 | BUG_ON(state != EXIT_DEAD); | ||
| 1048 | return 0; | 1045 | return 0; |
| 1049 | } | ||
| 1050 | |||
| 1051 | traced = ptrace_reparented(p); | ||
| 1052 | /* | 1046 | /* |
| 1053 | * It can be ptraced but not reparented, check | 1047 | * It can be ptraced but not reparented, check |
| 1054 | * thread_group_leader() to filter out sub-threads. | 1048 | * thread_group_leader() to filter out sub-threads. |
| @@ -1109,7 +1103,7 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p) | |||
| 1109 | 1103 | ||
| 1110 | /* | 1104 | /* |
| 1111 | * Now we are sure this task is interesting, and no other | 1105 | * Now we are sure this task is interesting, and no other |
| 1112 | * thread can reap it because we set its state to EXIT_DEAD. | 1106 | * thread can reap it because we its state == DEAD/TRACE. |
| 1113 | */ | 1107 | */ |
| 1114 | read_unlock(&tasklist_lock); | 1108 | read_unlock(&tasklist_lock); |
| 1115 | 1109 | ||
| @@ -1146,22 +1140,19 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p) | |||
| 1146 | if (!retval) | 1140 | if (!retval) |
| 1147 | retval = pid; | 1141 | retval = pid; |
| 1148 | 1142 | ||
| 1149 | if (traced) { | 1143 | if (state == EXIT_TRACE) { |
| 1150 | write_lock_irq(&tasklist_lock); | 1144 | write_lock_irq(&tasklist_lock); |
| 1151 | /* We dropped tasklist, ptracer could die and untrace */ | 1145 | /* We dropped tasklist, ptracer could die and untrace */ |
| 1152 | ptrace_unlink(p); | 1146 | ptrace_unlink(p); |
| 1153 | /* | 1147 | |
| 1154 | * If this is not a sub-thread, notify the parent. | 1148 | /* If parent wants a zombie, don't release it now */ |
| 1155 | * If parent wants a zombie, don't release it now. | 1149 | state = EXIT_ZOMBIE; |
| 1156 | */ | 1150 | if (do_notify_parent(p, p->exit_signal)) |
| 1157 | if (thread_group_leader(p) && | 1151 | state = EXIT_DEAD; |
| 1158 | !do_notify_parent(p, p->exit_signal)) { | 1152 | p->exit_state = state; |
| 1159 | p->exit_state = EXIT_ZOMBIE; | ||
| 1160 | p = NULL; | ||
| 1161 | } | ||
| 1162 | write_unlock_irq(&tasklist_lock); | 1153 | write_unlock_irq(&tasklist_lock); |
| 1163 | } | 1154 | } |
| 1164 | if (p != NULL) | 1155 | if (state == EXIT_DEAD) |
| 1165 | release_task(p); | 1156 | release_task(p); |
| 1166 | 1157 | ||
| 1167 | return retval; | 1158 | return retval; |
| @@ -1338,7 +1329,12 @@ static int wait_task_continued(struct wait_opts *wo, struct task_struct *p) | |||
| 1338 | static int wait_consider_task(struct wait_opts *wo, int ptrace, | 1329 | static int wait_consider_task(struct wait_opts *wo, int ptrace, |
| 1339 | struct task_struct *p) | 1330 | struct task_struct *p) |
| 1340 | { | 1331 | { |
| 1341 | int ret = eligible_child(wo, p); | 1332 | int ret; |
| 1333 | |||
| 1334 | if (unlikely(p->exit_state == EXIT_DEAD)) | ||
| 1335 | return 0; | ||
| 1336 | |||
| 1337 | ret = eligible_child(wo, p); | ||
| 1342 | if (!ret) | 1338 | if (!ret) |
| 1343 | return ret; | 1339 | return ret; |
| 1344 | 1340 | ||
| @@ -1356,33 +1352,44 @@ static int wait_consider_task(struct wait_opts *wo, int ptrace, | |||
| 1356 | return 0; | 1352 | return 0; |
| 1357 | } | 1353 | } |
| 1358 | 1354 | ||
| 1359 | /* dead body doesn't have much to contribute */ | 1355 | if (unlikely(p->exit_state == EXIT_TRACE)) { |
| 1360 | if (unlikely(p->exit_state == EXIT_DEAD)) { | ||
| 1361 | /* | 1356 | /* |
| 1362 | * But do not ignore this task until the tracer does | 1357 | * ptrace == 0 means we are the natural parent. In this case |
| 1363 | * wait_task_zombie()->do_notify_parent(). | 1358 | * we should clear notask_error, debugger will notify us. |
| 1364 | */ | 1359 | */ |
| 1365 | if (likely(!ptrace) && unlikely(ptrace_reparented(p))) | 1360 | if (likely(!ptrace)) |
| 1366 | wo->notask_error = 0; | 1361 | wo->notask_error = 0; |
| 1367 | return 0; | 1362 | return 0; |
| 1368 | } | 1363 | } |
| 1369 | 1364 | ||
| 1370 | /* slay zombie? */ | 1365 | if (likely(!ptrace) && unlikely(p->ptrace)) { |
| 1371 | if (p->exit_state == EXIT_ZOMBIE) { | ||
| 1372 | /* | 1366 | /* |
| 1373 | * A zombie ptracee is only visible to its ptracer. | 1367 | * If it is traced by its real parent's group, just pretend |
| 1374 | * Notification and reaping will be cascaded to the real | 1368 | * the caller is ptrace_do_wait() and reap this child if it |
| 1375 | * parent when the ptracer detaches. | 1369 | * is zombie. |
| 1370 | * | ||
| 1371 | * This also hides group stop state from real parent; otherwise | ||
| 1372 | * a single stop can be reported twice as group and ptrace stop. | ||
| 1373 | * If a ptracer wants to distinguish these two events for its | ||
| 1374 | * own children it should create a separate process which takes | ||
| 1375 | * the role of real parent. | ||
| 1376 | */ | 1376 | */ |
| 1377 | if (likely(!ptrace) && unlikely(p->ptrace)) { | 1377 | if (!ptrace_reparented(p)) |
| 1378 | /* it will become visible, clear notask_error */ | 1378 | ptrace = 1; |
| 1379 | wo->notask_error = 0; | 1379 | } |
| 1380 | return 0; | ||
| 1381 | } | ||
| 1382 | 1380 | ||
| 1381 | /* slay zombie? */ | ||
| 1382 | if (p->exit_state == EXIT_ZOMBIE) { | ||
| 1383 | /* we don't reap group leaders with subthreads */ | 1383 | /* we don't reap group leaders with subthreads */ |
| 1384 | if (!delay_group_leader(p)) | 1384 | if (!delay_group_leader(p)) { |
| 1385 | return wait_task_zombie(wo, p); | 1385 | /* |
| 1386 | * A zombie ptracee is only visible to its ptracer. | ||
| 1387 | * Notification and reaping will be cascaded to the | ||
| 1388 | * real parent when the ptracer detaches. | ||
| 1389 | */ | ||
| 1390 | if (unlikely(ptrace) || likely(!p->ptrace)) | ||
| 1391 | return wait_task_zombie(wo, p); | ||
| 1392 | } | ||
| 1386 | 1393 | ||
| 1387 | /* | 1394 | /* |
| 1388 | * Allow access to stopped/continued state via zombie by | 1395 | * Allow access to stopped/continued state via zombie by |
| @@ -1408,19 +1415,6 @@ static int wait_consider_task(struct wait_opts *wo, int ptrace, | |||
| 1408 | wo->notask_error = 0; | 1415 | wo->notask_error = 0; |
| 1409 | } else { | 1416 | } else { |
| 1410 | /* | 1417 | /* |
| 1411 | * If @p is ptraced by a task in its real parent's group, | ||
| 1412 | * hide group stop/continued state when looking at @p as | ||
| 1413 | * the real parent; otherwise, a single stop can be | ||
| 1414 | * reported twice as group and ptrace stops. | ||
| 1415 | * | ||
| 1416 | * If a ptracer wants to distinguish the two events for its | ||
| 1417 | * own children, it should create a separate process which | ||
| 1418 | * takes the role of real parent. | ||
| 1419 | */ | ||
| 1420 | if (likely(!ptrace) && p->ptrace && !ptrace_reparented(p)) | ||
| 1421 | return 0; | ||
| 1422 | |||
| 1423 | /* | ||
| 1424 | * @p is alive and it's gonna stop, continue or exit, so | 1418 | * @p is alive and it's gonna stop, continue or exit, so |
| 1425 | * there always is something to wait for. | 1419 | * there always is something to wait for. |
| 1426 | */ | 1420 | */ |
diff --git a/kernel/fork.c b/kernel/fork.c index 332688e5e7b4..54a8d26f612f 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
| @@ -28,6 +28,8 @@ | |||
| 28 | #include <linux/mman.h> | 28 | #include <linux/mman.h> |
| 29 | #include <linux/mmu_notifier.h> | 29 | #include <linux/mmu_notifier.h> |
| 30 | #include <linux/fs.h> | 30 | #include <linux/fs.h> |
| 31 | #include <linux/mm.h> | ||
| 32 | #include <linux/vmacache.h> | ||
| 31 | #include <linux/nsproxy.h> | 33 | #include <linux/nsproxy.h> |
| 32 | #include <linux/capability.h> | 34 | #include <linux/capability.h> |
| 33 | #include <linux/cpu.h> | 35 | #include <linux/cpu.h> |
| @@ -71,6 +73,7 @@ | |||
| 71 | #include <linux/signalfd.h> | 73 | #include <linux/signalfd.h> |
| 72 | #include <linux/uprobes.h> | 74 | #include <linux/uprobes.h> |
| 73 | #include <linux/aio.h> | 75 | #include <linux/aio.h> |
| 76 | #include <linux/compiler.h> | ||
| 74 | 77 | ||
| 75 | #include <asm/pgtable.h> | 78 | #include <asm/pgtable.h> |
| 76 | #include <asm/pgalloc.h> | 79 | #include <asm/pgalloc.h> |
| @@ -284,7 +287,7 @@ void __init fork_init(unsigned long mempages) | |||
| 284 | init_task.signal->rlim[RLIMIT_NPROC]; | 287 | init_task.signal->rlim[RLIMIT_NPROC]; |
| 285 | } | 288 | } |
| 286 | 289 | ||
| 287 | int __attribute__((weak)) arch_dup_task_struct(struct task_struct *dst, | 290 | int __weak arch_dup_task_struct(struct task_struct *dst, |
| 288 | struct task_struct *src) | 291 | struct task_struct *src) |
| 289 | { | 292 | { |
| 290 | *dst = *src; | 293 | *dst = *src; |
| @@ -364,7 +367,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) | |||
| 364 | 367 | ||
| 365 | mm->locked_vm = 0; | 368 | mm->locked_vm = 0; |
| 366 | mm->mmap = NULL; | 369 | mm->mmap = NULL; |
| 367 | mm->mmap_cache = NULL; | 370 | mm->vmacache_seqnum = 0; |
| 368 | mm->map_count = 0; | 371 | mm->map_count = 0; |
| 369 | cpumask_clear(mm_cpumask(mm)); | 372 | cpumask_clear(mm_cpumask(mm)); |
| 370 | mm->mm_rb = RB_ROOT; | 373 | mm->mm_rb = RB_ROOT; |
| @@ -530,8 +533,6 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p) | |||
| 530 | atomic_set(&mm->mm_count, 1); | 533 | atomic_set(&mm->mm_count, 1); |
| 531 | init_rwsem(&mm->mmap_sem); | 534 | init_rwsem(&mm->mmap_sem); |
| 532 | INIT_LIST_HEAD(&mm->mmlist); | 535 | INIT_LIST_HEAD(&mm->mmlist); |
| 533 | mm->flags = (current->mm) ? | ||
| 534 | (current->mm->flags & MMF_INIT_MASK) : default_dump_filter; | ||
| 535 | mm->core_state = NULL; | 536 | mm->core_state = NULL; |
| 536 | atomic_long_set(&mm->nr_ptes, 0); | 537 | atomic_long_set(&mm->nr_ptes, 0); |
| 537 | memset(&mm->rss_stat, 0, sizeof(mm->rss_stat)); | 538 | memset(&mm->rss_stat, 0, sizeof(mm->rss_stat)); |
| @@ -540,8 +541,15 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p) | |||
| 540 | mm_init_owner(mm, p); | 541 | mm_init_owner(mm, p); |
| 541 | clear_tlb_flush_pending(mm); | 542 | clear_tlb_flush_pending(mm); |
| 542 | 543 | ||
| 543 | if (likely(!mm_alloc_pgd(mm))) { | 544 | if (current->mm) { |
| 545 | mm->flags = current->mm->flags & MMF_INIT_MASK; | ||
| 546 | mm->def_flags = current->mm->def_flags & VM_INIT_DEF_MASK; | ||
| 547 | } else { | ||
| 548 | mm->flags = default_dump_filter; | ||
| 544 | mm->def_flags = 0; | 549 | mm->def_flags = 0; |
| 550 | } | ||
| 551 | |||
| 552 | if (likely(!mm_alloc_pgd(mm))) { | ||
| 545 | mmu_notifier_mm_init(mm); | 553 | mmu_notifier_mm_init(mm); |
| 546 | return mm; | 554 | return mm; |
| 547 | } | 555 | } |
| @@ -877,6 +885,9 @@ static int copy_mm(unsigned long clone_flags, struct task_struct *tsk) | |||
| 877 | if (!oldmm) | 885 | if (!oldmm) |
| 878 | return 0; | 886 | return 0; |
| 879 | 887 | ||
| 888 | /* initialize the new vmacache entries */ | ||
| 889 | vmacache_flush(tsk); | ||
| 890 | |||
| 880 | if (clone_flags & CLONE_VM) { | 891 | if (clone_flags & CLONE_VM) { |
| 881 | atomic_inc(&oldmm->mm_users); | 892 | atomic_inc(&oldmm->mm_users); |
| 882 | mm = oldmm; | 893 | mm = oldmm; |
| @@ -1070,15 +1081,6 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) | |||
| 1070 | return 0; | 1081 | return 0; |
| 1071 | } | 1082 | } |
| 1072 | 1083 | ||
| 1073 | static void copy_flags(unsigned long clone_flags, struct task_struct *p) | ||
| 1074 | { | ||
| 1075 | unsigned long new_flags = p->flags; | ||
| 1076 | |||
| 1077 | new_flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER); | ||
| 1078 | new_flags |= PF_FORKNOEXEC; | ||
| 1079 | p->flags = new_flags; | ||
| 1080 | } | ||
| 1081 | |||
| 1082 | SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr) | 1084 | SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr) |
| 1083 | { | 1085 | { |
| 1084 | current->clear_child_tid = tidptr; | 1086 | current->clear_child_tid = tidptr; |
| @@ -1228,7 +1230,8 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
| 1228 | goto bad_fork_cleanup_count; | 1230 | goto bad_fork_cleanup_count; |
| 1229 | 1231 | ||
| 1230 | delayacct_tsk_init(p); /* Must remain after dup_task_struct() */ | 1232 | delayacct_tsk_init(p); /* Must remain after dup_task_struct() */ |
| 1231 | copy_flags(clone_flags, p); | 1233 | p->flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER); |
| 1234 | p->flags |= PF_FORKNOEXEC; | ||
| 1232 | INIT_LIST_HEAD(&p->children); | 1235 | INIT_LIST_HEAD(&p->children); |
| 1233 | INIT_LIST_HEAD(&p->sibling); | 1236 | INIT_LIST_HEAD(&p->sibling); |
| 1234 | rcu_copy_process(p); | 1237 | rcu_copy_process(p); |
| @@ -1272,9 +1275,8 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
| 1272 | if (IS_ERR(p->mempolicy)) { | 1275 | if (IS_ERR(p->mempolicy)) { |
| 1273 | retval = PTR_ERR(p->mempolicy); | 1276 | retval = PTR_ERR(p->mempolicy); |
| 1274 | p->mempolicy = NULL; | 1277 | p->mempolicy = NULL; |
| 1275 | goto bad_fork_cleanup_cgroup; | 1278 | goto bad_fork_cleanup_threadgroup_lock; |
| 1276 | } | 1279 | } |
| 1277 | mpol_fix_fork_child_flag(p); | ||
| 1278 | #endif | 1280 | #endif |
| 1279 | #ifdef CONFIG_CPUSETS | 1281 | #ifdef CONFIG_CPUSETS |
| 1280 | p->cpuset_mem_spread_rotor = NUMA_NO_NODE; | 1282 | p->cpuset_mem_spread_rotor = NUMA_NO_NODE; |
| @@ -1525,11 +1527,10 @@ bad_fork_cleanup_policy: | |||
| 1525 | perf_event_free_task(p); | 1527 | perf_event_free_task(p); |
| 1526 | #ifdef CONFIG_NUMA | 1528 | #ifdef CONFIG_NUMA |
| 1527 | mpol_put(p->mempolicy); | 1529 | mpol_put(p->mempolicy); |
| 1528 | bad_fork_cleanup_cgroup: | 1530 | bad_fork_cleanup_threadgroup_lock: |
| 1529 | #endif | 1531 | #endif |
| 1530 | if (clone_flags & CLONE_THREAD) | 1532 | if (clone_flags & CLONE_THREAD) |
| 1531 | threadgroup_change_end(current); | 1533 | threadgroup_change_end(current); |
| 1532 | cgroup_exit(p, 0); | ||
| 1533 | delayacct_tsk_free(p); | 1534 | delayacct_tsk_free(p); |
| 1534 | module_put(task_thread_info(p)->exec_domain->module); | 1535 | module_put(task_thread_info(p)->exec_domain->module); |
| 1535 | bad_fork_cleanup_count: | 1536 | bad_fork_cleanup_count: |
diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c index f9f44fd4d34d..55c8c9349cfe 100644 --- a/kernel/futex_compat.c +++ b/kernel/futex_compat.c | |||
| @@ -183,7 +183,7 @@ COMPAT_SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val, | |||
| 183 | if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI || | 183 | if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI || |
| 184 | cmd == FUTEX_WAIT_BITSET || | 184 | cmd == FUTEX_WAIT_BITSET || |
| 185 | cmd == FUTEX_WAIT_REQUEUE_PI)) { | 185 | cmd == FUTEX_WAIT_REQUEUE_PI)) { |
| 186 | if (get_compat_timespec(&ts, utime)) | 186 | if (compat_get_timespec(&ts, utime)) |
| 187 | return -EFAULT; | 187 | return -EFAULT; |
| 188 | if (!timespec_valid(&ts)) | 188 | if (!timespec_valid(&ts)) |
| 189 | return -EINVAL; | 189 | return -EINVAL; |
diff --git a/kernel/groups.c b/kernel/groups.c index 90cf1c38c8ea..451698f86cfa 100644 --- a/kernel/groups.c +++ b/kernel/groups.c | |||
| @@ -157,17 +157,13 @@ int groups_search(const struct group_info *group_info, kgid_t grp) | |||
| 157 | * set_groups - Change a group subscription in a set of credentials | 157 | * set_groups - Change a group subscription in a set of credentials |
| 158 | * @new: The newly prepared set of credentials to alter | 158 | * @new: The newly prepared set of credentials to alter |
| 159 | * @group_info: The group list to install | 159 | * @group_info: The group list to install |
| 160 | * | ||
| 161 | * Validate a group subscription and, if valid, insert it into a set | ||
| 162 | * of credentials. | ||
| 163 | */ | 160 | */ |
| 164 | int set_groups(struct cred *new, struct group_info *group_info) | 161 | void set_groups(struct cred *new, struct group_info *group_info) |
| 165 | { | 162 | { |
| 166 | put_group_info(new->group_info); | 163 | put_group_info(new->group_info); |
| 167 | groups_sort(group_info); | 164 | groups_sort(group_info); |
| 168 | get_group_info(group_info); | 165 | get_group_info(group_info); |
| 169 | new->group_info = group_info; | 166 | new->group_info = group_info; |
| 170 | return 0; | ||
| 171 | } | 167 | } |
| 172 | 168 | ||
| 173 | EXPORT_SYMBOL(set_groups); | 169 | EXPORT_SYMBOL(set_groups); |
| @@ -182,18 +178,12 @@ EXPORT_SYMBOL(set_groups); | |||
| 182 | int set_current_groups(struct group_info *group_info) | 178 | int set_current_groups(struct group_info *group_info) |
| 183 | { | 179 | { |
| 184 | struct cred *new; | 180 | struct cred *new; |
| 185 | int ret; | ||
| 186 | 181 | ||
| 187 | new = prepare_creds(); | 182 | new = prepare_creds(); |
| 188 | if (!new) | 183 | if (!new) |
| 189 | return -ENOMEM; | 184 | return -ENOMEM; |
| 190 | 185 | ||
| 191 | ret = set_groups(new, group_info); | 186 | set_groups(new, group_info); |
| 192 | if (ret < 0) { | ||
| 193 | abort_creds(new); | ||
| 194 | return ret; | ||
| 195 | } | ||
| 196 | |||
| 197 | return commit_creds(new); | 187 | return commit_creds(new); |
| 198 | } | 188 | } |
| 199 | 189 | ||
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 09094361dce5..d55092ceee29 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
| @@ -168,19 +168,6 @@ struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer, | |||
| 168 | } | 168 | } |
| 169 | } | 169 | } |
| 170 | 170 | ||
| 171 | |||
| 172 | /* | ||
| 173 | * Get the preferred target CPU for NOHZ | ||
| 174 | */ | ||
| 175 | static int hrtimer_get_target(int this_cpu, int pinned) | ||
| 176 | { | ||
| 177 | #ifdef CONFIG_NO_HZ_COMMON | ||
| 178 | if (!pinned && get_sysctl_timer_migration() && idle_cpu(this_cpu)) | ||
| 179 | return get_nohz_timer_target(); | ||
| 180 | #endif | ||
| 181 | return this_cpu; | ||
| 182 | } | ||
| 183 | |||
| 184 | /* | 171 | /* |
| 185 | * With HIGHRES=y we do not migrate the timer when it is expiring | 172 | * With HIGHRES=y we do not migrate the timer when it is expiring |
| 186 | * before the next event on the target cpu because we cannot reprogram | 173 | * before the next event on the target cpu because we cannot reprogram |
| @@ -214,7 +201,7 @@ switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base, | |||
| 214 | struct hrtimer_clock_base *new_base; | 201 | struct hrtimer_clock_base *new_base; |
| 215 | struct hrtimer_cpu_base *new_cpu_base; | 202 | struct hrtimer_cpu_base *new_cpu_base; |
| 216 | int this_cpu = smp_processor_id(); | 203 | int this_cpu = smp_processor_id(); |
| 217 | int cpu = hrtimer_get_target(this_cpu, pinned); | 204 | int cpu = get_nohz_timer_target(pinned); |
| 218 | int basenum = base->index; | 205 | int basenum = base->index; |
| 219 | 206 | ||
| 220 | again: | 207 | again: |
diff --git a/kernel/hung_task.c b/kernel/hung_task.c index 0b9c169d577f..06bb1417b063 100644 --- a/kernel/hung_task.c +++ b/kernel/hung_task.c | |||
| @@ -246,5 +246,4 @@ static int __init hung_task_init(void) | |||
| 246 | 246 | ||
| 247 | return 0; | 247 | return 0; |
| 248 | } | 248 | } |
| 249 | 249 | subsys_initcall(hung_task_init); | |
| 250 | module_init(hung_task_init); | ||
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index dc04c166c54d..6397df2d6945 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c | |||
| @@ -281,6 +281,19 @@ void unmask_irq(struct irq_desc *desc) | |||
| 281 | } | 281 | } |
| 282 | } | 282 | } |
| 283 | 283 | ||
| 284 | void unmask_threaded_irq(struct irq_desc *desc) | ||
| 285 | { | ||
| 286 | struct irq_chip *chip = desc->irq_data.chip; | ||
| 287 | |||
| 288 | if (chip->flags & IRQCHIP_EOI_THREADED) | ||
| 289 | chip->irq_eoi(&desc->irq_data); | ||
| 290 | |||
| 291 | if (chip->irq_unmask) { | ||
| 292 | chip->irq_unmask(&desc->irq_data); | ||
| 293 | irq_state_clr_masked(desc); | ||
| 294 | } | ||
| 295 | } | ||
| 296 | |||
| 284 | /* | 297 | /* |
| 285 | * handle_nested_irq - Handle a nested irq from a irq thread | 298 | * handle_nested_irq - Handle a nested irq from a irq thread |
| 286 | * @irq: the interrupt number | 299 | * @irq: the interrupt number |
| @@ -435,6 +448,27 @@ static inline void preflow_handler(struct irq_desc *desc) | |||
| 435 | static inline void preflow_handler(struct irq_desc *desc) { } | 448 | static inline void preflow_handler(struct irq_desc *desc) { } |
| 436 | #endif | 449 | #endif |
| 437 | 450 | ||
| 451 | static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip) | ||
| 452 | { | ||
| 453 | if (!(desc->istate & IRQS_ONESHOT)) { | ||
| 454 | chip->irq_eoi(&desc->irq_data); | ||
| 455 | return; | ||
| 456 | } | ||
| 457 | /* | ||
| 458 | * We need to unmask in the following cases: | ||
| 459 | * - Oneshot irq which did not wake the thread (caused by a | ||
| 460 | * spurious interrupt or a primary handler handling it | ||
| 461 | * completely). | ||
| 462 | */ | ||
| 463 | if (!irqd_irq_disabled(&desc->irq_data) && | ||
| 464 | irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) { | ||
| 465 | chip->irq_eoi(&desc->irq_data); | ||
| 466 | unmask_irq(desc); | ||
| 467 | } else if (!(chip->flags & IRQCHIP_EOI_THREADED)) { | ||
| 468 | chip->irq_eoi(&desc->irq_data); | ||
| 469 | } | ||
| 470 | } | ||
| 471 | |||
| 438 | /** | 472 | /** |
| 439 | * handle_fasteoi_irq - irq handler for transparent controllers | 473 | * handle_fasteoi_irq - irq handler for transparent controllers |
| 440 | * @irq: the interrupt number | 474 | * @irq: the interrupt number |
| @@ -448,6 +482,8 @@ static inline void preflow_handler(struct irq_desc *desc) { } | |||
| 448 | void | 482 | void |
| 449 | handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) | 483 | handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) |
| 450 | { | 484 | { |
| 485 | struct irq_chip *chip = desc->irq_data.chip; | ||
| 486 | |||
| 451 | raw_spin_lock(&desc->lock); | 487 | raw_spin_lock(&desc->lock); |
| 452 | 488 | ||
| 453 | if (unlikely(irqd_irq_inprogress(&desc->irq_data))) | 489 | if (unlikely(irqd_irq_inprogress(&desc->irq_data))) |
| @@ -473,18 +509,14 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) | |||
| 473 | preflow_handler(desc); | 509 | preflow_handler(desc); |
| 474 | handle_irq_event(desc); | 510 | handle_irq_event(desc); |
| 475 | 511 | ||
| 476 | if (desc->istate & IRQS_ONESHOT) | 512 | cond_unmask_eoi_irq(desc, chip); |
| 477 | cond_unmask_irq(desc); | ||
| 478 | 513 | ||
| 479 | out_eoi: | ||
| 480 | desc->irq_data.chip->irq_eoi(&desc->irq_data); | ||
| 481 | out_unlock: | ||
| 482 | raw_spin_unlock(&desc->lock); | 514 | raw_spin_unlock(&desc->lock); |
| 483 | return; | 515 | return; |
| 484 | out: | 516 | out: |
| 485 | if (!(desc->irq_data.chip->flags & IRQCHIP_EOI_IF_HANDLED)) | 517 | if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED)) |
| 486 | goto out_eoi; | 518 | chip->irq_eoi(&desc->irq_data); |
| 487 | goto out_unlock; | 519 | raw_spin_unlock(&desc->lock); |
| 488 | } | 520 | } |
| 489 | 521 | ||
| 490 | /** | 522 | /** |
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index 131ca176b497..635480270858 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c | |||
| @@ -41,6 +41,7 @@ irqreturn_t no_action(int cpl, void *dev_id) | |||
| 41 | { | 41 | { |
| 42 | return IRQ_NONE; | 42 | return IRQ_NONE; |
| 43 | } | 43 | } |
| 44 | EXPORT_SYMBOL_GPL(no_action); | ||
| 44 | 45 | ||
| 45 | static void warn_no_thread(unsigned int irq, struct irqaction *action) | 46 | static void warn_no_thread(unsigned int irq, struct irqaction *action) |
| 46 | { | 47 | { |
| @@ -51,7 +52,7 @@ static void warn_no_thread(unsigned int irq, struct irqaction *action) | |||
| 51 | "but no thread function available.", irq, action->name); | 52 | "but no thread function available.", irq, action->name); |
| 52 | } | 53 | } |
| 53 | 54 | ||
| 54 | static void irq_wake_thread(struct irq_desc *desc, struct irqaction *action) | 55 | void __irq_wake_thread(struct irq_desc *desc, struct irqaction *action) |
| 55 | { | 56 | { |
| 56 | /* | 57 | /* |
| 57 | * In case the thread crashed and was killed we just pretend that | 58 | * In case the thread crashed and was killed we just pretend that |
| @@ -157,7 +158,7 @@ handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action) | |||
| 157 | break; | 158 | break; |
| 158 | } | 159 | } |
| 159 | 160 | ||
| 160 | irq_wake_thread(desc, action); | 161 | __irq_wake_thread(desc, action); |
| 161 | 162 | ||
| 162 | /* Fall through to add to randomness */ | 163 | /* Fall through to add to randomness */ |
| 163 | case IRQ_HANDLED: | 164 | case IRQ_HANDLED: |
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index 001fa5bab490..ddf1ffeb79f1 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h | |||
| @@ -6,6 +6,7 @@ | |||
| 6 | * of this file for your non core code. | 6 | * of this file for your non core code. |
| 7 | */ | 7 | */ |
| 8 | #include <linux/irqdesc.h> | 8 | #include <linux/irqdesc.h> |
| 9 | #include <linux/kernel_stat.h> | ||
| 9 | 10 | ||
| 10 | #ifdef CONFIG_SPARSE_IRQ | 11 | #ifdef CONFIG_SPARSE_IRQ |
| 11 | # define IRQ_BITMAP_BITS (NR_IRQS + 8196) | 12 | # define IRQ_BITMAP_BITS (NR_IRQS + 8196) |
| @@ -73,6 +74,7 @@ extern void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu); | |||
| 73 | extern void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu); | 74 | extern void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu); |
| 74 | extern void mask_irq(struct irq_desc *desc); | 75 | extern void mask_irq(struct irq_desc *desc); |
| 75 | extern void unmask_irq(struct irq_desc *desc); | 76 | extern void unmask_irq(struct irq_desc *desc); |
| 77 | extern void unmask_threaded_irq(struct irq_desc *desc); | ||
| 76 | 78 | ||
| 77 | extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr); | 79 | extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr); |
| 78 | 80 | ||
| @@ -82,6 +84,7 @@ irqreturn_t handle_irq_event(struct irq_desc *desc); | |||
| 82 | /* Resending of interrupts :*/ | 84 | /* Resending of interrupts :*/ |
| 83 | void check_irq_resend(struct irq_desc *desc, unsigned int irq); | 85 | void check_irq_resend(struct irq_desc *desc, unsigned int irq); |
| 84 | bool irq_wait_for_poll(struct irq_desc *desc); | 86 | bool irq_wait_for_poll(struct irq_desc *desc); |
| 87 | void __irq_wake_thread(struct irq_desc *desc, struct irqaction *action); | ||
| 85 | 88 | ||
| 86 | #ifdef CONFIG_PROC_FS | 89 | #ifdef CONFIG_PROC_FS |
| 87 | extern void register_irq_proc(unsigned int irq, struct irq_desc *desc); | 90 | extern void register_irq_proc(unsigned int irq, struct irq_desc *desc); |
| @@ -179,3 +182,9 @@ static inline bool irqd_has_set(struct irq_data *d, unsigned int mask) | |||
| 179 | { | 182 | { |
| 180 | return d->state_use_accessors & mask; | 183 | return d->state_use_accessors & mask; |
| 181 | } | 184 | } |
| 185 | |||
| 186 | static inline void kstat_incr_irqs_this_cpu(unsigned int irq, struct irq_desc *desc) | ||
| 187 | { | ||
| 188 | __this_cpu_inc(*desc->kstat_irqs); | ||
| 189 | __this_cpu_inc(kstat.irqs_sum); | ||
| 190 | } | ||
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index 8ab8e9390297..a7174617616b 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c | |||
| @@ -489,6 +489,11 @@ void dynamic_irq_cleanup(unsigned int irq) | |||
| 489 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 489 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
| 490 | } | 490 | } |
| 491 | 491 | ||
| 492 | void kstat_incr_irq_this_cpu(unsigned int irq) | ||
| 493 | { | ||
| 494 | kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq)); | ||
| 495 | } | ||
| 496 | |||
| 492 | unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) | 497 | unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) |
| 493 | { | 498 | { |
| 494 | struct irq_desc *desc = irq_to_desc(irq); | 499 | struct irq_desc *desc = irq_to_desc(irq); |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index d3bf660cb57f..2486a4c1a710 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
| @@ -32,24 +32,10 @@ static int __init setup_forced_irqthreads(char *arg) | |||
| 32 | early_param("threadirqs", setup_forced_irqthreads); | 32 | early_param("threadirqs", setup_forced_irqthreads); |
| 33 | #endif | 33 | #endif |
| 34 | 34 | ||
| 35 | /** | 35 | static void __synchronize_hardirq(struct irq_desc *desc) |
| 36 | * synchronize_irq - wait for pending IRQ handlers (on other CPUs) | ||
| 37 | * @irq: interrupt number to wait for | ||
| 38 | * | ||
| 39 | * This function waits for any pending IRQ handlers for this interrupt | ||
| 40 | * to complete before returning. If you use this function while | ||
| 41 | * holding a resource the IRQ handler may need you will deadlock. | ||
| 42 | * | ||
| 43 | * This function may be called - with care - from IRQ context. | ||
| 44 | */ | ||
| 45 | void synchronize_irq(unsigned int irq) | ||
| 46 | { | 36 | { |
| 47 | struct irq_desc *desc = irq_to_desc(irq); | ||
| 48 | bool inprogress; | 37 | bool inprogress; |
| 49 | 38 | ||
| 50 | if (!desc) | ||
| 51 | return; | ||
| 52 | |||
| 53 | do { | 39 | do { |
| 54 | unsigned long flags; | 40 | unsigned long flags; |
| 55 | 41 | ||
| @@ -67,12 +53,56 @@ void synchronize_irq(unsigned int irq) | |||
| 67 | 53 | ||
| 68 | /* Oops, that failed? */ | 54 | /* Oops, that failed? */ |
| 69 | } while (inprogress); | 55 | } while (inprogress); |
| 56 | } | ||
| 70 | 57 | ||
| 71 | /* | 58 | /** |
| 72 | * We made sure that no hardirq handler is running. Now verify | 59 | * synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs) |
| 73 | * that no threaded handlers are active. | 60 | * @irq: interrupt number to wait for |
| 74 | */ | 61 | * |
| 75 | wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active)); | 62 | * This function waits for any pending hard IRQ handlers for this |
| 63 | * interrupt to complete before returning. If you use this | ||
| 64 | * function while holding a resource the IRQ handler may need you | ||
| 65 | * will deadlock. It does not take associated threaded handlers | ||
| 66 | * into account. | ||
| 67 | * | ||
| 68 | * Do not use this for shutdown scenarios where you must be sure | ||
| 69 | * that all parts (hardirq and threaded handler) have completed. | ||
| 70 | * | ||
| 71 | * This function may be called - with care - from IRQ context. | ||
| 72 | */ | ||
| 73 | void synchronize_hardirq(unsigned int irq) | ||
| 74 | { | ||
| 75 | struct irq_desc *desc = irq_to_desc(irq); | ||
| 76 | |||
| 77 | if (desc) | ||
| 78 | __synchronize_hardirq(desc); | ||
| 79 | } | ||
| 80 | EXPORT_SYMBOL(synchronize_hardirq); | ||
| 81 | |||
| 82 | /** | ||
| 83 | * synchronize_irq - wait for pending IRQ handlers (on other CPUs) | ||
| 84 | * @irq: interrupt number to wait for | ||
| 85 | * | ||
| 86 | * This function waits for any pending IRQ handlers for this interrupt | ||
| 87 | * to complete before returning. If you use this function while | ||
| 88 | * holding a resource the IRQ handler may need you will deadlock. | ||
| 89 | * | ||
| 90 | * This function may be called - with care - from IRQ context. | ||
| 91 | */ | ||
| 92 | void synchronize_irq(unsigned int irq) | ||
| 93 | { | ||
| 94 | struct irq_desc *desc = irq_to_desc(irq); | ||
| 95 | |||
| 96 | if (desc) { | ||
| 97 | __synchronize_hardirq(desc); | ||
| 98 | /* | ||
| 99 | * We made sure that no hardirq handler is | ||
| 100 | * running. Now verify that no threaded handlers are | ||
| 101 | * active. | ||
| 102 | */ | ||
| 103 | wait_event(desc->wait_for_threads, | ||
| 104 | !atomic_read(&desc->threads_active)); | ||
| 105 | } | ||
| 76 | } | 106 | } |
| 77 | EXPORT_SYMBOL(synchronize_irq); | 107 | EXPORT_SYMBOL(synchronize_irq); |
| 78 | 108 | ||
| @@ -718,7 +748,7 @@ again: | |||
| 718 | 748 | ||
| 719 | if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) && | 749 | if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) && |
| 720 | irqd_irq_masked(&desc->irq_data)) | 750 | irqd_irq_masked(&desc->irq_data)) |
| 721 | unmask_irq(desc); | 751 | unmask_threaded_irq(desc); |
| 722 | 752 | ||
| 723 | out_unlock: | 753 | out_unlock: |
| 724 | raw_spin_unlock_irq(&desc->lock); | 754 | raw_spin_unlock_irq(&desc->lock); |
| @@ -727,7 +757,7 @@ out_unlock: | |||
| 727 | 757 | ||
| 728 | #ifdef CONFIG_SMP | 758 | #ifdef CONFIG_SMP |
| 729 | /* | 759 | /* |
| 730 | * Check whether we need to chasnge the affinity of the interrupt thread. | 760 | * Check whether we need to change the affinity of the interrupt thread. |
| 731 | */ | 761 | */ |
| 732 | static void | 762 | static void |
| 733 | irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) | 763 | irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) |
| @@ -880,6 +910,33 @@ static int irq_thread(void *data) | |||
| 880 | return 0; | 910 | return 0; |
| 881 | } | 911 | } |
| 882 | 912 | ||
| 913 | /** | ||
| 914 | * irq_wake_thread - wake the irq thread for the action identified by dev_id | ||
| 915 | * @irq: Interrupt line | ||
| 916 | * @dev_id: Device identity for which the thread should be woken | ||
| 917 | * | ||
| 918 | */ | ||
| 919 | void irq_wake_thread(unsigned int irq, void *dev_id) | ||
| 920 | { | ||
| 921 | struct irq_desc *desc = irq_to_desc(irq); | ||
| 922 | struct irqaction *action; | ||
| 923 | unsigned long flags; | ||
| 924 | |||
| 925 | if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc))) | ||
| 926 | return; | ||
| 927 | |||
| 928 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
| 929 | for (action = desc->action; action; action = action->next) { | ||
| 930 | if (action->dev_id == dev_id) { | ||
| 931 | if (action->thread) | ||
| 932 | __irq_wake_thread(desc, action); | ||
| 933 | break; | ||
| 934 | } | ||
| 935 | } | ||
| 936 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
| 937 | } | ||
| 938 | EXPORT_SYMBOL_GPL(irq_wake_thread); | ||
| 939 | |||
| 883 | static void irq_setup_forced_threading(struct irqaction *new) | 940 | static void irq_setup_forced_threading(struct irqaction *new) |
| 884 | { | 941 | { |
| 885 | if (!force_irqthreads) | 942 | if (!force_irqthreads) |
| @@ -896,6 +953,23 @@ static void irq_setup_forced_threading(struct irqaction *new) | |||
| 896 | } | 953 | } |
| 897 | } | 954 | } |
| 898 | 955 | ||
| 956 | static int irq_request_resources(struct irq_desc *desc) | ||
| 957 | { | ||
| 958 | struct irq_data *d = &desc->irq_data; | ||
| 959 | struct irq_chip *c = d->chip; | ||
| 960 | |||
| 961 | return c->irq_request_resources ? c->irq_request_resources(d) : 0; | ||
| 962 | } | ||
| 963 | |||
| 964 | static void irq_release_resources(struct irq_desc *desc) | ||
| 965 | { | ||
| 966 | struct irq_data *d = &desc->irq_data; | ||
| 967 | struct irq_chip *c = d->chip; | ||
| 968 | |||
| 969 | if (c->irq_release_resources) | ||
| 970 | c->irq_release_resources(d); | ||
| 971 | } | ||
| 972 | |||
| 899 | /* | 973 | /* |
| 900 | * Internal function to register an irqaction - typically used to | 974 | * Internal function to register an irqaction - typically used to |
| 901 | * allocate special interrupts that are part of the architecture. | 975 | * allocate special interrupts that are part of the architecture. |
| @@ -1091,6 +1165,13 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
| 1091 | } | 1165 | } |
| 1092 | 1166 | ||
| 1093 | if (!shared) { | 1167 | if (!shared) { |
| 1168 | ret = irq_request_resources(desc); | ||
| 1169 | if (ret) { | ||
| 1170 | pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n", | ||
| 1171 | new->name, irq, desc->irq_data.chip->name); | ||
| 1172 | goto out_mask; | ||
| 1173 | } | ||
| 1174 | |||
| 1094 | init_waitqueue_head(&desc->wait_for_threads); | 1175 | init_waitqueue_head(&desc->wait_for_threads); |
| 1095 | 1176 | ||
| 1096 | /* Setup the type (level, edge polarity) if configured: */ | 1177 | /* Setup the type (level, edge polarity) if configured: */ |
| @@ -1261,8 +1342,10 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) | |||
| 1261 | *action_ptr = action->next; | 1342 | *action_ptr = action->next; |
| 1262 | 1343 | ||
| 1263 | /* If this was the last handler, shut down the IRQ line: */ | 1344 | /* If this was the last handler, shut down the IRQ line: */ |
| 1264 | if (!desc->action) | 1345 | if (!desc->action) { |
| 1265 | irq_shutdown(desc); | 1346 | irq_shutdown(desc); |
| 1347 | irq_release_resources(desc); | ||
| 1348 | } | ||
| 1266 | 1349 | ||
| 1267 | #ifdef CONFIG_SMP | 1350 | #ifdef CONFIG_SMP |
| 1268 | /* make sure affinity_hint is cleaned up */ | 1351 | /* make sure affinity_hint is cleaned up */ |
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index 36f6ee181b0c..ac1ba2f11032 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c | |||
| @@ -324,15 +324,15 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc) | |||
| 324 | 324 | ||
| 325 | #ifdef CONFIG_SMP | 325 | #ifdef CONFIG_SMP |
| 326 | /* create /proc/irq/<irq>/smp_affinity */ | 326 | /* create /proc/irq/<irq>/smp_affinity */ |
| 327 | proc_create_data("smp_affinity", 0600, desc->dir, | 327 | proc_create_data("smp_affinity", 0644, desc->dir, |
| 328 | &irq_affinity_proc_fops, (void *)(long)irq); | 328 | &irq_affinity_proc_fops, (void *)(long)irq); |
| 329 | 329 | ||
| 330 | /* create /proc/irq/<irq>/affinity_hint */ | 330 | /* create /proc/irq/<irq>/affinity_hint */ |
| 331 | proc_create_data("affinity_hint", 0400, desc->dir, | 331 | proc_create_data("affinity_hint", 0444, desc->dir, |
| 332 | &irq_affinity_hint_proc_fops, (void *)(long)irq); | 332 | &irq_affinity_hint_proc_fops, (void *)(long)irq); |
| 333 | 333 | ||
| 334 | /* create /proc/irq/<irq>/smp_affinity_list */ | 334 | /* create /proc/irq/<irq>/smp_affinity_list */ |
| 335 | proc_create_data("smp_affinity_list", 0600, desc->dir, | 335 | proc_create_data("smp_affinity_list", 0644, desc->dir, |
| 336 | &irq_affinity_list_proc_fops, (void *)(long)irq); | 336 | &irq_affinity_list_proc_fops, (void *)(long)irq); |
| 337 | 337 | ||
| 338 | proc_create_data("node", 0444, desc->dir, | 338 | proc_create_data("node", 0444, desc->dir, |
| @@ -372,7 +372,7 @@ void unregister_handler_proc(unsigned int irq, struct irqaction *action) | |||
| 372 | static void register_default_affinity_proc(void) | 372 | static void register_default_affinity_proc(void) |
| 373 | { | 373 | { |
| 374 | #ifdef CONFIG_SMP | 374 | #ifdef CONFIG_SMP |
| 375 | proc_create("irq/default_smp_affinity", 0600, NULL, | 375 | proc_create("irq/default_smp_affinity", 0644, NULL, |
| 376 | &default_affinity_proc_fops); | 376 | &default_affinity_proc_fops); |
| 377 | #endif | 377 | #endif |
| 378 | } | 378 | } |
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c index 3127ad52cdb2..cb0cf37dac3a 100644 --- a/kernel/kallsyms.c +++ b/kernel/kallsyms.c | |||
| @@ -23,6 +23,7 @@ | |||
| 23 | #include <linux/mm.h> | 23 | #include <linux/mm.h> |
| 24 | #include <linux/ctype.h> | 24 | #include <linux/ctype.h> |
| 25 | #include <linux/slab.h> | 25 | #include <linux/slab.h> |
| 26 | #include <linux/compiler.h> | ||
| 26 | 27 | ||
| 27 | #include <asm/sections.h> | 28 | #include <asm/sections.h> |
| 28 | 29 | ||
| @@ -36,8 +37,8 @@ | |||
| 36 | * These will be re-linked against their real values | 37 | * These will be re-linked against their real values |
| 37 | * during the second link stage. | 38 | * during the second link stage. |
| 38 | */ | 39 | */ |
| 39 | extern const unsigned long kallsyms_addresses[] __attribute__((weak)); | 40 | extern const unsigned long kallsyms_addresses[] __weak; |
| 40 | extern const u8 kallsyms_names[] __attribute__((weak)); | 41 | extern const u8 kallsyms_names[] __weak; |
| 41 | 42 | ||
| 42 | /* | 43 | /* |
| 43 | * Tell the compiler that the count isn't in the small data section if the arch | 44 | * Tell the compiler that the count isn't in the small data section if the arch |
| @@ -46,10 +47,10 @@ extern const u8 kallsyms_names[] __attribute__((weak)); | |||
| 46 | extern const unsigned long kallsyms_num_syms | 47 | extern const unsigned long kallsyms_num_syms |
| 47 | __attribute__((weak, section(".rodata"))); | 48 | __attribute__((weak, section(".rodata"))); |
| 48 | 49 | ||
| 49 | extern const u8 kallsyms_token_table[] __attribute__((weak)); | 50 | extern const u8 kallsyms_token_table[] __weak; |
| 50 | extern const u16 kallsyms_token_index[] __attribute__((weak)); | 51 | extern const u16 kallsyms_token_index[] __weak; |
| 51 | 52 | ||
| 52 | extern const unsigned long kallsyms_markers[] __attribute__((weak)); | 53 | extern const unsigned long kallsyms_markers[] __weak; |
| 53 | 54 | ||
| 54 | static inline int is_kernel_inittext(unsigned long addr) | 55 | static inline int is_kernel_inittext(unsigned long addr) |
| 55 | { | 56 | { |
diff --git a/kernel/kexec.c b/kernel/kexec.c index 45601cf41bee..c8380ad203bc 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c | |||
| @@ -32,6 +32,7 @@ | |||
| 32 | #include <linux/vmalloc.h> | 32 | #include <linux/vmalloc.h> |
| 33 | #include <linux/swap.h> | 33 | #include <linux/swap.h> |
| 34 | #include <linux/syscore_ops.h> | 34 | #include <linux/syscore_ops.h> |
| 35 | #include <linux/compiler.h> | ||
| 35 | 36 | ||
| 36 | #include <asm/page.h> | 37 | #include <asm/page.h> |
| 37 | #include <asm/uaccess.h> | 38 | #include <asm/uaccess.h> |
| @@ -1235,7 +1236,7 @@ static int __init crash_notes_memory_init(void) | |||
| 1235 | } | 1236 | } |
| 1236 | return 0; | 1237 | return 0; |
| 1237 | } | 1238 | } |
| 1238 | module_init(crash_notes_memory_init) | 1239 | subsys_initcall(crash_notes_memory_init); |
| 1239 | 1240 | ||
| 1240 | 1241 | ||
| 1241 | /* | 1242 | /* |
| @@ -1551,10 +1552,10 @@ void vmcoreinfo_append_str(const char *fmt, ...) | |||
| 1551 | * provide an empty default implementation here -- architecture | 1552 | * provide an empty default implementation here -- architecture |
| 1552 | * code may override this | 1553 | * code may override this |
| 1553 | */ | 1554 | */ |
| 1554 | void __attribute__ ((weak)) arch_crash_save_vmcoreinfo(void) | 1555 | void __weak arch_crash_save_vmcoreinfo(void) |
| 1555 | {} | 1556 | {} |
| 1556 | 1557 | ||
| 1557 | unsigned long __attribute__ ((weak)) paddr_vmcoreinfo_note(void) | 1558 | unsigned long __weak paddr_vmcoreinfo_note(void) |
| 1558 | { | 1559 | { |
| 1559 | return __pa((unsigned long)(char *)&vmcoreinfo_note); | 1560 | return __pa((unsigned long)(char *)&vmcoreinfo_note); |
| 1560 | } | 1561 | } |
| @@ -1629,7 +1630,7 @@ static int __init crash_save_vmcoreinfo_init(void) | |||
| 1629 | return 0; | 1630 | return 0; |
| 1630 | } | 1631 | } |
| 1631 | 1632 | ||
| 1632 | module_init(crash_save_vmcoreinfo_init) | 1633 | subsys_initcall(crash_save_vmcoreinfo_init); |
| 1633 | 1634 | ||
| 1634 | /* | 1635 | /* |
| 1635 | * Move into place and start executing a preloaded standalone | 1636 | * Move into place and start executing a preloaded standalone |
diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c index e660964086e2..2495a9b14ac8 100644 --- a/kernel/ksysfs.c +++ b/kernel/ksysfs.c | |||
| @@ -18,6 +18,7 @@ | |||
| 18 | #include <linux/stat.h> | 18 | #include <linux/stat.h> |
| 19 | #include <linux/sched.h> | 19 | #include <linux/sched.h> |
| 20 | #include <linux/capability.h> | 20 | #include <linux/capability.h> |
| 21 | #include <linux/compiler.h> | ||
| 21 | 22 | ||
| 22 | #include <linux/rcupdate.h> /* rcu_expedited */ | 23 | #include <linux/rcupdate.h> /* rcu_expedited */ |
| 23 | 24 | ||
| @@ -162,8 +163,8 @@ KERNEL_ATTR_RW(rcu_expedited); | |||
| 162 | /* | 163 | /* |
| 163 | * Make /sys/kernel/notes give the raw contents of our kernel .notes section. | 164 | * Make /sys/kernel/notes give the raw contents of our kernel .notes section. |
| 164 | */ | 165 | */ |
| 165 | extern const void __start_notes __attribute__((weak)); | 166 | extern const void __start_notes __weak; |
| 166 | extern const void __stop_notes __attribute__((weak)); | 167 | extern const void __stop_notes __weak; |
| 167 | #define notes_size (&__stop_notes - &__start_notes) | 168 | #define notes_size (&__stop_notes - &__start_notes) |
| 168 | 169 | ||
| 169 | static ssize_t notes_read(struct file *filp, struct kobject *kobj, | 170 | static ssize_t notes_read(struct file *filp, struct kobject *kobj, |
diff --git a/kernel/kthread.c b/kernel/kthread.c index b5ae3ee860a9..9a130ec06f7a 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c | |||
| @@ -217,7 +217,7 @@ int tsk_fork_get_node(struct task_struct *tsk) | |||
| 217 | if (tsk == kthreadd_task) | 217 | if (tsk == kthreadd_task) |
| 218 | return tsk->pref_node_fork; | 218 | return tsk->pref_node_fork; |
| 219 | #endif | 219 | #endif |
| 220 | return numa_node_id(); | 220 | return NUMA_NO_NODE; |
| 221 | } | 221 | } |
| 222 | 222 | ||
| 223 | static void create_kthread(struct kthread_create_info *create) | 223 | static void create_kthread(struct kthread_create_info *create) |
| @@ -369,7 +369,7 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data), | |||
| 369 | { | 369 | { |
| 370 | struct task_struct *p; | 370 | struct task_struct *p; |
| 371 | 371 | ||
| 372 | p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt, | 372 | p = kthread_create_on_node(threadfn, data, cpu_to_mem(cpu), namefmt, |
| 373 | cpu); | 373 | cpu); |
| 374 | if (IS_ERR(p)) | 374 | if (IS_ERR(p)) |
| 375 | return p; | 375 | return p; |
diff --git a/kernel/locking/Makefile b/kernel/locking/Makefile index 306a76b51e0f..b8bdcd4785b7 100644 --- a/kernel/locking/Makefile +++ b/kernel/locking/Makefile | |||
| @@ -1,5 +1,5 @@ | |||
| 1 | 1 | ||
| 2 | obj-y += mutex.o semaphore.o rwsem.o lglock.o mcs_spinlock.o | 2 | obj-y += mutex.o semaphore.o rwsem.o mcs_spinlock.o |
| 3 | 3 | ||
| 4 | ifdef CONFIG_FUNCTION_TRACER | 4 | ifdef CONFIG_FUNCTION_TRACER |
| 5 | CFLAGS_REMOVE_lockdep.o = -pg | 5 | CFLAGS_REMOVE_lockdep.o = -pg |
| @@ -14,6 +14,7 @@ ifeq ($(CONFIG_PROC_FS),y) | |||
| 14 | obj-$(CONFIG_LOCKDEP) += lockdep_proc.o | 14 | obj-$(CONFIG_LOCKDEP) += lockdep_proc.o |
| 15 | endif | 15 | endif |
| 16 | obj-$(CONFIG_SMP) += spinlock.o | 16 | obj-$(CONFIG_SMP) += spinlock.o |
| 17 | obj-$(CONFIG_SMP) += lglock.o | ||
| 17 | obj-$(CONFIG_PROVE_LOCKING) += spinlock.o | 18 | obj-$(CONFIG_PROVE_LOCKING) += spinlock.o |
| 18 | obj-$(CONFIG_RT_MUTEXES) += rtmutex.o | 19 | obj-$(CONFIG_RT_MUTEXES) += rtmutex.o |
| 19 | obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o | 20 | obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o |
diff --git a/kernel/module.c b/kernel/module.c index 8dc7f5e80dd8..11869408f79b 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
| @@ -640,7 +640,7 @@ static int module_unload_init(struct module *mod) | |||
| 640 | INIT_LIST_HEAD(&mod->target_list); | 640 | INIT_LIST_HEAD(&mod->target_list); |
| 641 | 641 | ||
| 642 | /* Hold reference count during initialization. */ | 642 | /* Hold reference count during initialization. */ |
| 643 | __this_cpu_write(mod->refptr->incs, 1); | 643 | raw_cpu_write(mod->refptr->incs, 1); |
| 644 | 644 | ||
| 645 | return 0; | 645 | return 0; |
| 646 | } | 646 | } |
| @@ -1013,6 +1013,8 @@ static size_t module_flags_taint(struct module *mod, char *buf) | |||
| 1013 | buf[l++] = 'F'; | 1013 | buf[l++] = 'F'; |
| 1014 | if (mod->taints & (1 << TAINT_CRAP)) | 1014 | if (mod->taints & (1 << TAINT_CRAP)) |
| 1015 | buf[l++] = 'C'; | 1015 | buf[l++] = 'C'; |
| 1016 | if (mod->taints & (1 << TAINT_UNSIGNED_MODULE)) | ||
| 1017 | buf[l++] = 'E'; | ||
| 1016 | /* | 1018 | /* |
| 1017 | * TAINT_FORCED_RMMOD: could be added. | 1019 | * TAINT_FORCED_RMMOD: could be added. |
| 1018 | * TAINT_CPU_OUT_OF_SPEC, TAINT_MACHINE_CHECK, TAINT_BAD_PAGE don't | 1020 | * TAINT_CPU_OUT_OF_SPEC, TAINT_MACHINE_CHECK, TAINT_BAD_PAGE don't |
| @@ -3218,7 +3220,7 @@ static int load_module(struct load_info *info, const char __user *uargs, | |||
| 3218 | pr_notice_once("%s: module verification failed: signature " | 3220 | pr_notice_once("%s: module verification failed: signature " |
| 3219 | "and/or required key missing - tainting " | 3221 | "and/or required key missing - tainting " |
| 3220 | "kernel\n", mod->name); | 3222 | "kernel\n", mod->name); |
| 3221 | add_taint_module(mod, TAINT_FORCED_MODULE, LOCKDEP_STILL_OK); | 3223 | add_taint_module(mod, TAINT_UNSIGNED_MODULE, LOCKDEP_STILL_OK); |
| 3222 | } | 3224 | } |
| 3223 | #endif | 3225 | #endif |
| 3224 | 3226 | ||
| @@ -3813,12 +3815,12 @@ void print_modules(void) | |||
| 3813 | list_for_each_entry_rcu(mod, &modules, list) { | 3815 | list_for_each_entry_rcu(mod, &modules, list) { |
| 3814 | if (mod->state == MODULE_STATE_UNFORMED) | 3816 | if (mod->state == MODULE_STATE_UNFORMED) |
| 3815 | continue; | 3817 | continue; |
| 3816 | printk(" %s%s", mod->name, module_flags(mod, buf)); | 3818 | pr_cont(" %s%s", mod->name, module_flags(mod, buf)); |
| 3817 | } | 3819 | } |
| 3818 | preempt_enable(); | 3820 | preempt_enable(); |
| 3819 | if (last_unloaded_module[0]) | 3821 | if (last_unloaded_module[0]) |
| 3820 | printk(" [last unloaded: %s]", last_unloaded_module); | 3822 | pr_cont(" [last unloaded: %s]", last_unloaded_module); |
| 3821 | printk("\n"); | 3823 | pr_cont("\n"); |
| 3822 | } | 3824 | } |
| 3823 | 3825 | ||
| 3824 | #ifdef CONFIG_MODVERSIONS | 3826 | #ifdef CONFIG_MODVERSIONS |
diff --git a/kernel/panic.c b/kernel/panic.c index cca8a913ae7c..d02fa9fef46a 100644 --- a/kernel/panic.c +++ b/kernel/panic.c | |||
| @@ -100,7 +100,7 @@ void panic(const char *fmt, ...) | |||
| 100 | va_start(args, fmt); | 100 | va_start(args, fmt); |
| 101 | vsnprintf(buf, sizeof(buf), fmt, args); | 101 | vsnprintf(buf, sizeof(buf), fmt, args); |
| 102 | va_end(args); | 102 | va_end(args); |
| 103 | printk(KERN_EMERG "Kernel panic - not syncing: %s\n",buf); | 103 | pr_emerg("Kernel panic - not syncing: %s\n", buf); |
| 104 | #ifdef CONFIG_DEBUG_BUGVERBOSE | 104 | #ifdef CONFIG_DEBUG_BUGVERBOSE |
| 105 | /* | 105 | /* |
| 106 | * Avoid nested stack-dumping if a panic occurs during oops processing | 106 | * Avoid nested stack-dumping if a panic occurs during oops processing |
| @@ -141,7 +141,7 @@ void panic(const char *fmt, ...) | |||
| 141 | * Delay timeout seconds before rebooting the machine. | 141 | * Delay timeout seconds before rebooting the machine. |
| 142 | * We can't use the "normal" timers since we just panicked. | 142 | * We can't use the "normal" timers since we just panicked. |
| 143 | */ | 143 | */ |
| 144 | printk(KERN_EMERG "Rebooting in %d seconds..", panic_timeout); | 144 | pr_emerg("Rebooting in %d seconds..", panic_timeout); |
| 145 | 145 | ||
| 146 | for (i = 0; i < panic_timeout * 1000; i += PANIC_TIMER_STEP) { | 146 | for (i = 0; i < panic_timeout * 1000; i += PANIC_TIMER_STEP) { |
| 147 | touch_nmi_watchdog(); | 147 | touch_nmi_watchdog(); |
| @@ -165,7 +165,7 @@ void panic(const char *fmt, ...) | |||
| 165 | extern int stop_a_enabled; | 165 | extern int stop_a_enabled; |
| 166 | /* Make sure the user can actually press Stop-A (L1-A) */ | 166 | /* Make sure the user can actually press Stop-A (L1-A) */ |
| 167 | stop_a_enabled = 1; | 167 | stop_a_enabled = 1; |
| 168 | printk(KERN_EMERG "Press Stop-A (L1-A) to return to the boot prom\n"); | 168 | pr_emerg("Press Stop-A (L1-A) to return to the boot prom\n"); |
| 169 | } | 169 | } |
| 170 | #endif | 170 | #endif |
| 171 | #if defined(CONFIG_S390) | 171 | #if defined(CONFIG_S390) |
| @@ -176,6 +176,7 @@ void panic(const char *fmt, ...) | |||
| 176 | disabled_wait(caller); | 176 | disabled_wait(caller); |
| 177 | } | 177 | } |
| 178 | #endif | 178 | #endif |
| 179 | pr_emerg("---[ end Kernel panic - not syncing: %s\n", buf); | ||
| 179 | local_irq_enable(); | 180 | local_irq_enable(); |
| 180 | for (i = 0; ; i += PANIC_TIMER_STEP) { | 181 | for (i = 0; ; i += PANIC_TIMER_STEP) { |
| 181 | touch_softlockup_watchdog(); | 182 | touch_softlockup_watchdog(); |
| @@ -210,6 +211,7 @@ static const struct tnt tnts[] = { | |||
| 210 | { TAINT_CRAP, 'C', ' ' }, | 211 | { TAINT_CRAP, 'C', ' ' }, |
| 211 | { TAINT_FIRMWARE_WORKAROUND, 'I', ' ' }, | 212 | { TAINT_FIRMWARE_WORKAROUND, 'I', ' ' }, |
| 212 | { TAINT_OOT_MODULE, 'O', ' ' }, | 213 | { TAINT_OOT_MODULE, 'O', ' ' }, |
| 214 | { TAINT_UNSIGNED_MODULE, 'E', ' ' }, | ||
| 213 | }; | 215 | }; |
| 214 | 216 | ||
| 215 | /** | 217 | /** |
| @@ -228,6 +230,7 @@ static const struct tnt tnts[] = { | |||
| 228 | * 'C' - modules from drivers/staging are loaded. | 230 | * 'C' - modules from drivers/staging are loaded. |
| 229 | * 'I' - Working around severe firmware bug. | 231 | * 'I' - Working around severe firmware bug. |
| 230 | * 'O' - Out-of-tree module has been loaded. | 232 | * 'O' - Out-of-tree module has been loaded. |
| 233 | * 'E' - Unsigned module has been loaded. | ||
| 231 | * | 234 | * |
| 232 | * The string is overwritten by the next call to print_tainted(). | 235 | * The string is overwritten by the next call to print_tainted(). |
| 233 | */ | 236 | */ |
| @@ -274,8 +277,7 @@ unsigned long get_taint(void) | |||
| 274 | void add_taint(unsigned flag, enum lockdep_ok lockdep_ok) | 277 | void add_taint(unsigned flag, enum lockdep_ok lockdep_ok) |
| 275 | { | 278 | { |
| 276 | if (lockdep_ok == LOCKDEP_NOW_UNRELIABLE && __debug_locks_off()) | 279 | if (lockdep_ok == LOCKDEP_NOW_UNRELIABLE && __debug_locks_off()) |
| 277 | printk(KERN_WARNING | 280 | pr_warn("Disabling lock debugging due to kernel taint\n"); |
| 278 | "Disabling lock debugging due to kernel taint\n"); | ||
| 279 | 281 | ||
| 280 | set_bit(flag, &tainted_mask); | 282 | set_bit(flag, &tainted_mask); |
| 281 | } | 283 | } |
| @@ -380,8 +382,7 @@ late_initcall(init_oops_id); | |||
| 380 | void print_oops_end_marker(void) | 382 | void print_oops_end_marker(void) |
| 381 | { | 383 | { |
| 382 | init_oops_id(); | 384 | init_oops_id(); |
| 383 | printk(KERN_WARNING "---[ end trace %016llx ]---\n", | 385 | pr_warn("---[ end trace %016llx ]---\n", (unsigned long long)oops_id); |
| 384 | (unsigned long long)oops_id); | ||
| 385 | } | 386 | } |
| 386 | 387 | ||
| 387 | /* | 388 | /* |
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c index 06c62de9c711..db95d8eb761b 100644 --- a/kernel/pid_namespace.c +++ b/kernel/pid_namespace.c | |||
| @@ -318,7 +318,9 @@ static void *pidns_get(struct task_struct *task) | |||
| 318 | struct pid_namespace *ns; | 318 | struct pid_namespace *ns; |
| 319 | 319 | ||
| 320 | rcu_read_lock(); | 320 | rcu_read_lock(); |
| 321 | ns = get_pid_ns(task_active_pid_ns(task)); | 321 | ns = task_active_pid_ns(task); |
| 322 | if (ns) | ||
| 323 | get_pid_ns(ns); | ||
| 322 | rcu_read_unlock(); | 324 | rcu_read_unlock(); |
| 323 | 325 | ||
| 324 | return ns; | 326 | return ns; |
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index 37170d4dd9a6..f4f2073711d3 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c | |||
| @@ -973,16 +973,20 @@ static ssize_t resume_show(struct kobject *kobj, struct kobj_attribute *attr, | |||
| 973 | static ssize_t resume_store(struct kobject *kobj, struct kobj_attribute *attr, | 973 | static ssize_t resume_store(struct kobject *kobj, struct kobj_attribute *attr, |
| 974 | const char *buf, size_t n) | 974 | const char *buf, size_t n) |
| 975 | { | 975 | { |
| 976 | unsigned int maj, min; | ||
| 977 | dev_t res; | 976 | dev_t res; |
| 978 | int ret = -EINVAL; | 977 | int len = n; |
| 978 | char *name; | ||
| 979 | 979 | ||
| 980 | if (sscanf(buf, "%u:%u", &maj, &min) != 2) | 980 | if (len && buf[len-1] == '\n') |
| 981 | goto out; | 981 | len--; |
| 982 | name = kstrndup(buf, len, GFP_KERNEL); | ||
| 983 | if (!name) | ||
| 984 | return -ENOMEM; | ||
| 982 | 985 | ||
| 983 | res = MKDEV(maj,min); | 986 | res = name_to_dev_t(name); |
| 984 | if (maj != MAJOR(res) || min != MINOR(res)) | 987 | kfree(name); |
| 985 | goto out; | 988 | if (!res) |
| 989 | return -EINVAL; | ||
| 986 | 990 | ||
| 987 | lock_system_sleep(); | 991 | lock_system_sleep(); |
| 988 | swsusp_resume_device = res; | 992 | swsusp_resume_device = res; |
| @@ -990,9 +994,7 @@ static ssize_t resume_store(struct kobject *kobj, struct kobj_attribute *attr, | |||
| 990 | printk(KERN_INFO "PM: Starting manual resume from disk\n"); | 994 | printk(KERN_INFO "PM: Starting manual resume from disk\n"); |
| 991 | noresume = 0; | 995 | noresume = 0; |
| 992 | software_resume(); | 996 | software_resume(); |
| 993 | ret = n; | 997 | return n; |
| 994 | out: | ||
| 995 | return ret; | ||
| 996 | } | 998 | } |
| 997 | 999 | ||
| 998 | power_attr(resume); | 1000 | power_attr(resume); |
diff --git a/kernel/power/main.c b/kernel/power/main.c index 1d1bf630e6e9..6271bc4073ef 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c | |||
| @@ -282,8 +282,8 @@ struct kobject *power_kobj; | |||
| 282 | * state - control system power state. | 282 | * state - control system power state. |
| 283 | * | 283 | * |
| 284 | * show() returns what states are supported, which is hard-coded to | 284 | * show() returns what states are supported, which is hard-coded to |
| 285 | * 'standby' (Power-On Suspend), 'mem' (Suspend-to-RAM), and | 285 | * 'freeze' (Low-Power Idle), 'standby' (Power-On Suspend), |
| 286 | * 'disk' (Suspend-to-Disk). | 286 | * 'mem' (Suspend-to-RAM), and 'disk' (Suspend-to-Disk). |
| 287 | * | 287 | * |
| 288 | * store() accepts one of those strings, translates it into the | 288 | * store() accepts one of those strings, translates it into the |
| 289 | * proper enumerated value, and initiates a suspend transition. | 289 | * proper enumerated value, and initiates a suspend transition. |
diff --git a/kernel/power/power.h b/kernel/power/power.h index 7d4b7ffb3c1d..15f37ea08719 100644 --- a/kernel/power/power.h +++ b/kernel/power/power.h | |||
| @@ -2,6 +2,7 @@ | |||
| 2 | #include <linux/suspend_ioctls.h> | 2 | #include <linux/suspend_ioctls.h> |
| 3 | #include <linux/utsname.h> | 3 | #include <linux/utsname.h> |
| 4 | #include <linux/freezer.h> | 4 | #include <linux/freezer.h> |
| 5 | #include <linux/compiler.h> | ||
| 5 | 6 | ||
| 6 | struct swsusp_info { | 7 | struct swsusp_info { |
| 7 | struct new_utsname uts; | 8 | struct new_utsname uts; |
| @@ -11,7 +12,7 @@ struct swsusp_info { | |||
| 11 | unsigned long image_pages; | 12 | unsigned long image_pages; |
| 12 | unsigned long pages; | 13 | unsigned long pages; |
| 13 | unsigned long size; | 14 | unsigned long size; |
| 14 | } __attribute__((aligned(PAGE_SIZE))); | 15 | } __aligned(PAGE_SIZE); |
| 15 | 16 | ||
| 16 | #ifdef CONFIG_HIBERNATION | 17 | #ifdef CONFIG_HIBERNATION |
| 17 | /* kernel/power/snapshot.c */ | 18 | /* kernel/power/snapshot.c */ |
| @@ -49,6 +50,8 @@ static inline char *check_image_kernel(struct swsusp_info *info) | |||
| 49 | */ | 50 | */ |
| 50 | #define SPARE_PAGES ((1024 * 1024) >> PAGE_SHIFT) | 51 | #define SPARE_PAGES ((1024 * 1024) >> PAGE_SHIFT) |
| 51 | 52 | ||
| 53 | asmlinkage int swsusp_save(void); | ||
| 54 | |||
| 52 | /* kernel/power/hibernate.c */ | 55 | /* kernel/power/hibernate.c */ |
| 53 | extern bool freezer_test_done; | 56 | extern bool freezer_test_done; |
| 54 | 57 | ||
diff --git a/kernel/power/qos.c b/kernel/power/qos.c index 8dff9b48075a..884b77058864 100644 --- a/kernel/power/qos.c +++ b/kernel/power/qos.c | |||
| @@ -66,6 +66,7 @@ static struct pm_qos_constraints cpu_dma_constraints = { | |||
| 66 | .list = PLIST_HEAD_INIT(cpu_dma_constraints.list), | 66 | .list = PLIST_HEAD_INIT(cpu_dma_constraints.list), |
| 67 | .target_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE, | 67 | .target_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE, |
| 68 | .default_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE, | 68 | .default_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE, |
| 69 | .no_constraint_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE, | ||
| 69 | .type = PM_QOS_MIN, | 70 | .type = PM_QOS_MIN, |
| 70 | .notifiers = &cpu_dma_lat_notifier, | 71 | .notifiers = &cpu_dma_lat_notifier, |
| 71 | }; | 72 | }; |
| @@ -79,6 +80,7 @@ static struct pm_qos_constraints network_lat_constraints = { | |||
| 79 | .list = PLIST_HEAD_INIT(network_lat_constraints.list), | 80 | .list = PLIST_HEAD_INIT(network_lat_constraints.list), |
| 80 | .target_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE, | 81 | .target_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE, |
| 81 | .default_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE, | 82 | .default_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE, |
| 83 | .no_constraint_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE, | ||
| 82 | .type = PM_QOS_MIN, | 84 | .type = PM_QOS_MIN, |
| 83 | .notifiers = &network_lat_notifier, | 85 | .notifiers = &network_lat_notifier, |
| 84 | }; | 86 | }; |
| @@ -93,6 +95,7 @@ static struct pm_qos_constraints network_tput_constraints = { | |||
| 93 | .list = PLIST_HEAD_INIT(network_tput_constraints.list), | 95 | .list = PLIST_HEAD_INIT(network_tput_constraints.list), |
| 94 | .target_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE, | 96 | .target_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE, |
| 95 | .default_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE, | 97 | .default_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE, |
| 98 | .no_constraint_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE, | ||
| 96 | .type = PM_QOS_MAX, | 99 | .type = PM_QOS_MAX, |
| 97 | .notifiers = &network_throughput_notifier, | 100 | .notifiers = &network_throughput_notifier, |
| 98 | }; | 101 | }; |
| @@ -128,7 +131,7 @@ static const struct file_operations pm_qos_power_fops = { | |||
| 128 | static inline int pm_qos_get_value(struct pm_qos_constraints *c) | 131 | static inline int pm_qos_get_value(struct pm_qos_constraints *c) |
| 129 | { | 132 | { |
| 130 | if (plist_head_empty(&c->list)) | 133 | if (plist_head_empty(&c->list)) |
| 131 | return c->default_value; | 134 | return c->no_constraint_value; |
| 132 | 135 | ||
| 133 | switch (c->type) { | 136 | switch (c->type) { |
| 134 | case PM_QOS_MIN: | 137 | case PM_QOS_MIN: |
| @@ -170,6 +173,7 @@ int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node, | |||
| 170 | { | 173 | { |
| 171 | unsigned long flags; | 174 | unsigned long flags; |
| 172 | int prev_value, curr_value, new_value; | 175 | int prev_value, curr_value, new_value; |
| 176 | int ret; | ||
| 173 | 177 | ||
| 174 | spin_lock_irqsave(&pm_qos_lock, flags); | 178 | spin_lock_irqsave(&pm_qos_lock, flags); |
| 175 | prev_value = pm_qos_get_value(c); | 179 | prev_value = pm_qos_get_value(c); |
| @@ -205,13 +209,15 @@ int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node, | |||
| 205 | 209 | ||
| 206 | trace_pm_qos_update_target(action, prev_value, curr_value); | 210 | trace_pm_qos_update_target(action, prev_value, curr_value); |
| 207 | if (prev_value != curr_value) { | 211 | if (prev_value != curr_value) { |
| 208 | blocking_notifier_call_chain(c->notifiers, | 212 | ret = 1; |
| 209 | (unsigned long)curr_value, | 213 | if (c->notifiers) |
| 210 | NULL); | 214 | blocking_notifier_call_chain(c->notifiers, |
| 211 | return 1; | 215 | (unsigned long)curr_value, |
| 216 | NULL); | ||
| 212 | } else { | 217 | } else { |
| 213 | return 0; | 218 | ret = 0; |
| 214 | } | 219 | } |
| 220 | return ret; | ||
| 215 | } | 221 | } |
| 216 | 222 | ||
| 217 | /** | 223 | /** |
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index d9f61a145802..18fb7a2fb14b 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c | |||
| @@ -27,6 +27,7 @@ | |||
| 27 | #include <linux/highmem.h> | 27 | #include <linux/highmem.h> |
| 28 | #include <linux/list.h> | 28 | #include <linux/list.h> |
| 29 | #include <linux/slab.h> | 29 | #include <linux/slab.h> |
| 30 | #include <linux/compiler.h> | ||
| 30 | 31 | ||
| 31 | #include <asm/uaccess.h> | 32 | #include <asm/uaccess.h> |
| 32 | #include <asm/mmu_context.h> | 33 | #include <asm/mmu_context.h> |
| @@ -155,7 +156,7 @@ static inline void free_image_page(void *addr, int clear_nosave_free) | |||
| 155 | struct linked_page { | 156 | struct linked_page { |
| 156 | struct linked_page *next; | 157 | struct linked_page *next; |
| 157 | char data[LINKED_PAGE_DATA_SIZE]; | 158 | char data[LINKED_PAGE_DATA_SIZE]; |
| 158 | } __attribute__((packed)); | 159 | } __packed; |
| 159 | 160 | ||
| 160 | static inline void | 161 | static inline void |
| 161 | free_list_of_pages(struct linked_page *list, int clear_page_nosave) | 162 | free_list_of_pages(struct linked_page *list, int clear_page_nosave) |
| @@ -1268,7 +1269,7 @@ static void free_unnecessary_pages(void) | |||
| 1268 | * [number of saveable pages] - [number of pages that can be freed in theory] | 1269 | * [number of saveable pages] - [number of pages that can be freed in theory] |
| 1269 | * | 1270 | * |
| 1270 | * where the second term is the sum of (1) reclaimable slab pages, (2) active | 1271 | * where the second term is the sum of (1) reclaimable slab pages, (2) active |
| 1271 | * and (3) inactive anonymouns pages, (4) active and (5) inactive file pages, | 1272 | * and (3) inactive anonymous pages, (4) active and (5) inactive file pages, |
| 1272 | * minus mapped file pages. | 1273 | * minus mapped file pages. |
| 1273 | */ | 1274 | */ |
| 1274 | static unsigned long minimum_image_size(unsigned long saveable) | 1275 | static unsigned long minimum_image_size(unsigned long saveable) |
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index 62ee437b5c7e..c3ad9cafe930 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c | |||
| @@ -26,6 +26,7 @@ | |||
| 26 | #include <linux/syscore_ops.h> | 26 | #include <linux/syscore_ops.h> |
| 27 | #include <linux/ftrace.h> | 27 | #include <linux/ftrace.h> |
| 28 | #include <trace/events/power.h> | 28 | #include <trace/events/power.h> |
| 29 | #include <linux/compiler.h> | ||
| 29 | 30 | ||
| 30 | #include "power.h" | 31 | #include "power.h" |
| 31 | 32 | ||
| @@ -39,7 +40,7 @@ static const struct platform_suspend_ops *suspend_ops; | |||
| 39 | 40 | ||
| 40 | static bool need_suspend_ops(suspend_state_t state) | 41 | static bool need_suspend_ops(suspend_state_t state) |
| 41 | { | 42 | { |
| 42 | return !!(state > PM_SUSPEND_FREEZE); | 43 | return state > PM_SUSPEND_FREEZE; |
| 43 | } | 44 | } |
| 44 | 45 | ||
| 45 | static DECLARE_WAIT_QUEUE_HEAD(suspend_freeze_wait_head); | 46 | static DECLARE_WAIT_QUEUE_HEAD(suspend_freeze_wait_head); |
| @@ -156,13 +157,13 @@ static int suspend_prepare(suspend_state_t state) | |||
| 156 | } | 157 | } |
| 157 | 158 | ||
| 158 | /* default implementation */ | 159 | /* default implementation */ |
| 159 | void __attribute__ ((weak)) arch_suspend_disable_irqs(void) | 160 | void __weak arch_suspend_disable_irqs(void) |
| 160 | { | 161 | { |
| 161 | local_irq_disable(); | 162 | local_irq_disable(); |
| 162 | } | 163 | } |
| 163 | 164 | ||
| 164 | /* default implementation */ | 165 | /* default implementation */ |
| 165 | void __attribute__ ((weak)) arch_suspend_enable_irqs(void) | 166 | void __weak arch_suspend_enable_irqs(void) |
| 166 | { | 167 | { |
| 167 | local_irq_enable(); | 168 | local_irq_enable(); |
| 168 | } | 169 | } |
diff --git a/kernel/power/swap.c b/kernel/power/swap.c index 7c33ed200410..8c9a4819f798 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c | |||
| @@ -101,7 +101,7 @@ struct swsusp_header { | |||
| 101 | unsigned int flags; /* Flags to pass to the "boot" kernel */ | 101 | unsigned int flags; /* Flags to pass to the "boot" kernel */ |
| 102 | char orig_sig[10]; | 102 | char orig_sig[10]; |
| 103 | char sig[10]; | 103 | char sig[10]; |
| 104 | } __attribute__((packed)); | 104 | } __packed; |
| 105 | 105 | ||
| 106 | static struct swsusp_header *swsusp_header; | 106 | static struct swsusp_header *swsusp_header; |
| 107 | 107 | ||
diff --git a/kernel/power/wakelock.c b/kernel/power/wakelock.c index 8f50de394d22..019069c84ff6 100644 --- a/kernel/power/wakelock.c +++ b/kernel/power/wakelock.c | |||
| @@ -18,6 +18,8 @@ | |||
| 18 | #include <linux/rbtree.h> | 18 | #include <linux/rbtree.h> |
| 19 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
| 20 | 20 | ||
| 21 | #include "power.h" | ||
| 22 | |||
| 21 | static DEFINE_MUTEX(wakelocks_lock); | 23 | static DEFINE_MUTEX(wakelocks_lock); |
| 22 | 24 | ||
| 23 | struct wakelock { | 25 | struct wakelock { |
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index 4dae9cbe9259..a45b50962295 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c | |||
| @@ -319,7 +319,7 @@ static void log_store(int facility, int level, | |||
| 319 | else | 319 | else |
| 320 | free = log_first_idx - log_next_idx; | 320 | free = log_first_idx - log_next_idx; |
| 321 | 321 | ||
| 322 | if (free > size + sizeof(struct printk_log)) | 322 | if (free >= size + sizeof(struct printk_log)) |
| 323 | break; | 323 | break; |
| 324 | 324 | ||
| 325 | /* drop old messages until we have enough contiuous space */ | 325 | /* drop old messages until we have enough contiuous space */ |
| @@ -327,7 +327,7 @@ static void log_store(int facility, int level, | |||
| 327 | log_first_seq++; | 327 | log_first_seq++; |
| 328 | } | 328 | } |
| 329 | 329 | ||
| 330 | if (log_next_idx + size + sizeof(struct printk_log) >= log_buf_len) { | 330 | if (log_next_idx + size + sizeof(struct printk_log) > log_buf_len) { |
| 331 | /* | 331 | /* |
| 332 | * This message + an additional empty header does not fit | 332 | * This message + an additional empty header does not fit |
| 333 | * at the end of the buffer. Add an empty header with len == 0 | 333 | * at the end of the buffer. Add an empty header with len == 0 |
| @@ -351,7 +351,7 @@ static void log_store(int facility, int level, | |||
| 351 | else | 351 | else |
| 352 | msg->ts_nsec = local_clock(); | 352 | msg->ts_nsec = local_clock(); |
| 353 | memset(log_dict(msg) + dict_len, 0, pad_len); | 353 | memset(log_dict(msg) + dict_len, 0, pad_len); |
| 354 | msg->len = sizeof(struct printk_log) + text_len + dict_len + pad_len; | 354 | msg->len = size; |
| 355 | 355 | ||
| 356 | /* insert message */ | 356 | /* insert message */ |
| 357 | log_next_idx += msg->len; | 357 | log_next_idx += msg->len; |
| @@ -1560,9 +1560,12 @@ asmlinkage int vprintk_emit(int facility, int level, | |||
| 1560 | level = kern_level - '0'; | 1560 | level = kern_level - '0'; |
| 1561 | case 'd': /* KERN_DEFAULT */ | 1561 | case 'd': /* KERN_DEFAULT */ |
| 1562 | lflags |= LOG_PREFIX; | 1562 | lflags |= LOG_PREFIX; |
| 1563 | case 'c': /* KERN_CONT */ | ||
| 1564 | break; | ||
| 1565 | } | 1563 | } |
| 1564 | /* | ||
| 1565 | * No need to check length here because vscnprintf | ||
| 1566 | * put '\0' at the end of the string. Only valid and | ||
| 1567 | * newly printed level is detected. | ||
| 1568 | */ | ||
| 1566 | text_len -= end_of_header - text; | 1569 | text_len -= end_of_header - text; |
| 1567 | text = (char *)end_of_header; | 1570 | text = (char *)end_of_header; |
| 1568 | } | 1571 | } |
| @@ -1880,6 +1883,7 @@ void suspend_console(void) | |||
| 1880 | console_lock(); | 1883 | console_lock(); |
| 1881 | console_suspended = 1; | 1884 | console_suspended = 1; |
| 1882 | up(&console_sem); | 1885 | up(&console_sem); |
| 1886 | mutex_release(&console_lock_dep_map, 1, _RET_IP_); | ||
| 1883 | } | 1887 | } |
| 1884 | 1888 | ||
| 1885 | void resume_console(void) | 1889 | void resume_console(void) |
| @@ -1887,6 +1891,7 @@ void resume_console(void) | |||
| 1887 | if (!console_suspend_enabled) | 1891 | if (!console_suspend_enabled) |
| 1888 | return; | 1892 | return; |
| 1889 | down(&console_sem); | 1893 | down(&console_sem); |
| 1894 | mutex_acquire(&console_lock_dep_map, 0, 0, _RET_IP_); | ||
| 1890 | console_suspended = 0; | 1895 | console_suspended = 0; |
| 1891 | console_unlock(); | 1896 | console_unlock(); |
| 1892 | } | 1897 | } |
diff --git a/kernel/profile.c b/kernel/profile.c index ebdd9c1a86b4..cb980f0c731b 100644 --- a/kernel/profile.c +++ b/kernel/profile.c | |||
| @@ -591,18 +591,28 @@ out_cleanup: | |||
| 591 | int __ref create_proc_profile(void) /* false positive from hotcpu_notifier */ | 591 | int __ref create_proc_profile(void) /* false positive from hotcpu_notifier */ |
| 592 | { | 592 | { |
| 593 | struct proc_dir_entry *entry; | 593 | struct proc_dir_entry *entry; |
| 594 | int err = 0; | ||
| 594 | 595 | ||
| 595 | if (!prof_on) | 596 | if (!prof_on) |
| 596 | return 0; | 597 | return 0; |
| 597 | if (create_hash_tables()) | 598 | |
| 598 | return -ENOMEM; | 599 | cpu_notifier_register_begin(); |
| 600 | |||
| 601 | if (create_hash_tables()) { | ||
| 602 | err = -ENOMEM; | ||
| 603 | goto out; | ||
| 604 | } | ||
| 605 | |||
| 599 | entry = proc_create("profile", S_IWUSR | S_IRUGO, | 606 | entry = proc_create("profile", S_IWUSR | S_IRUGO, |
| 600 | NULL, &proc_profile_operations); | 607 | NULL, &proc_profile_operations); |
| 601 | if (!entry) | 608 | if (!entry) |
| 602 | return 0; | 609 | goto out; |
| 603 | proc_set_size(entry, (1 + prof_len) * sizeof(atomic_t)); | 610 | proc_set_size(entry, (1 + prof_len) * sizeof(atomic_t)); |
| 604 | hotcpu_notifier(profile_cpu_callback, 0); | 611 | __hotcpu_notifier(profile_cpu_callback, 0); |
| 605 | return 0; | 612 | |
| 613 | out: | ||
| 614 | cpu_notifier_register_done(); | ||
| 615 | return err; | ||
| 606 | } | 616 | } |
| 607 | module_init(create_proc_profile); | 617 | subsys_initcall(create_proc_profile); |
| 608 | #endif /* CONFIG_PROC_FS */ | 618 | #endif /* CONFIG_PROC_FS */ |
diff --git a/kernel/relay.c b/kernel/relay.c index 5001c9887db1..52d6a6f56261 100644 --- a/kernel/relay.c +++ b/kernel/relay.c | |||
| @@ -227,7 +227,7 @@ static void relay_destroy_buf(struct rchan_buf *buf) | |||
| 227 | * relay_remove_buf - remove a channel buffer | 227 | * relay_remove_buf - remove a channel buffer |
| 228 | * @kref: target kernel reference that contains the relay buffer | 228 | * @kref: target kernel reference that contains the relay buffer |
| 229 | * | 229 | * |
| 230 | * Removes the file from the fileystem, which also frees the | 230 | * Removes the file from the filesystem, which also frees the |
| 231 | * rchan_buf_struct and the channel buffer. Should only be called from | 231 | * rchan_buf_struct and the channel buffer. Should only be called from |
| 232 | * kref_put(). | 232 | * kref_put(). |
| 233 | */ | 233 | */ |
diff --git a/kernel/res_counter.c b/kernel/res_counter.c index 4aa8a305aede..51dbac6a3633 100644 --- a/kernel/res_counter.c +++ b/kernel/res_counter.c | |||
| @@ -22,8 +22,18 @@ void res_counter_init(struct res_counter *counter, struct res_counter *parent) | |||
| 22 | counter->parent = parent; | 22 | counter->parent = parent; |
| 23 | } | 23 | } |
| 24 | 24 | ||
| 25 | int res_counter_charge_locked(struct res_counter *counter, unsigned long val, | 25 | static u64 res_counter_uncharge_locked(struct res_counter *counter, |
| 26 | bool force) | 26 | unsigned long val) |
| 27 | { | ||
| 28 | if (WARN_ON(counter->usage < val)) | ||
| 29 | val = counter->usage; | ||
| 30 | |||
| 31 | counter->usage -= val; | ||
| 32 | return counter->usage; | ||
| 33 | } | ||
| 34 | |||
| 35 | static int res_counter_charge_locked(struct res_counter *counter, | ||
| 36 | unsigned long val, bool force) | ||
| 27 | { | 37 | { |
| 28 | int ret = 0; | 38 | int ret = 0; |
| 29 | 39 | ||
| @@ -86,15 +96,6 @@ int res_counter_charge_nofail(struct res_counter *counter, unsigned long val, | |||
| 86 | return __res_counter_charge(counter, val, limit_fail_at, true); | 96 | return __res_counter_charge(counter, val, limit_fail_at, true); |
| 87 | } | 97 | } |
| 88 | 98 | ||
| 89 | u64 res_counter_uncharge_locked(struct res_counter *counter, unsigned long val) | ||
| 90 | { | ||
| 91 | if (WARN_ON(counter->usage < val)) | ||
| 92 | val = counter->usage; | ||
| 93 | |||
| 94 | counter->usage -= val; | ||
| 95 | return counter->usage; | ||
| 96 | } | ||
| 97 | |||
| 98 | u64 res_counter_uncharge_until(struct res_counter *counter, | 99 | u64 res_counter_uncharge_until(struct res_counter *counter, |
| 99 | struct res_counter *top, | 100 | struct res_counter *top, |
| 100 | unsigned long val) | 101 | unsigned long val) |
diff --git a/kernel/resource.c b/kernel/resource.c index 3f285dce9347..8957d686e29b 100644 --- a/kernel/resource.c +++ b/kernel/resource.c | |||
| @@ -432,11 +432,6 @@ static void resource_clip(struct resource *res, resource_size_t min, | |||
| 432 | res->end = max; | 432 | res->end = max; |
| 433 | } | 433 | } |
| 434 | 434 | ||
| 435 | static bool resource_contains(struct resource *res1, struct resource *res2) | ||
| 436 | { | ||
| 437 | return res1->start <= res2->start && res1->end >= res2->end; | ||
| 438 | } | ||
| 439 | |||
| 440 | /* | 435 | /* |
| 441 | * Find empty slot in the resource tree with the given range and | 436 | * Find empty slot in the resource tree with the given range and |
| 442 | * alignment constraints | 437 | * alignment constraints |
| @@ -471,10 +466,11 @@ static int __find_resource(struct resource *root, struct resource *old, | |||
| 471 | arch_remove_reservations(&tmp); | 466 | arch_remove_reservations(&tmp); |
| 472 | 467 | ||
| 473 | /* Check for overflow after ALIGN() */ | 468 | /* Check for overflow after ALIGN() */ |
| 474 | avail = *new; | ||
| 475 | avail.start = ALIGN(tmp.start, constraint->align); | 469 | avail.start = ALIGN(tmp.start, constraint->align); |
| 476 | avail.end = tmp.end; | 470 | avail.end = tmp.end; |
| 471 | avail.flags = new->flags & ~IORESOURCE_UNSET; | ||
| 477 | if (avail.start >= tmp.start) { | 472 | if (avail.start >= tmp.start) { |
| 473 | alloc.flags = avail.flags; | ||
| 478 | alloc.start = constraint->alignf(constraint->alignf_data, &avail, | 474 | alloc.start = constraint->alignf(constraint->alignf_data, &avail, |
| 479 | size, constraint->align); | 475 | size, constraint->align); |
| 480 | alloc.end = alloc.start + size - 1; | 476 | alloc.end = alloc.start + size - 1; |
| @@ -515,7 +511,7 @@ static int find_resource(struct resource *root, struct resource *new, | |||
| 515 | * @newsize: new size of the resource descriptor | 511 | * @newsize: new size of the resource descriptor |
| 516 | * @constraint: the size and alignment constraints to be met. | 512 | * @constraint: the size and alignment constraints to be met. |
| 517 | */ | 513 | */ |
| 518 | int reallocate_resource(struct resource *root, struct resource *old, | 514 | static int reallocate_resource(struct resource *root, struct resource *old, |
| 519 | resource_size_t newsize, | 515 | resource_size_t newsize, |
| 520 | struct resource_constraint *constraint) | 516 | struct resource_constraint *constraint) |
| 521 | { | 517 | { |
| @@ -949,8 +945,8 @@ struct resource * __request_region(struct resource *parent, | |||
| 949 | res->name = name; | 945 | res->name = name; |
| 950 | res->start = start; | 946 | res->start = start; |
| 951 | res->end = start + n - 1; | 947 | res->end = start + n - 1; |
| 952 | res->flags = IORESOURCE_BUSY; | 948 | res->flags = resource_type(parent); |
| 953 | res->flags |= flags; | 949 | res->flags |= IORESOURCE_BUSY | flags; |
| 954 | 950 | ||
| 955 | write_lock(&resource_lock); | 951 | write_lock(&resource_lock); |
| 956 | 952 | ||
diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c index b30a2924ef14..3ef6451e972e 100644 --- a/kernel/sched/clock.c +++ b/kernel/sched/clock.c | |||
| @@ -60,13 +60,14 @@ | |||
| 60 | #include <linux/sched.h> | 60 | #include <linux/sched.h> |
| 61 | #include <linux/static_key.h> | 61 | #include <linux/static_key.h> |
| 62 | #include <linux/workqueue.h> | 62 | #include <linux/workqueue.h> |
| 63 | #include <linux/compiler.h> | ||
| 63 | 64 | ||
| 64 | /* | 65 | /* |
| 65 | * Scheduler clock - returns current time in nanosec units. | 66 | * Scheduler clock - returns current time in nanosec units. |
| 66 | * This is default implementation. | 67 | * This is default implementation. |
| 67 | * Architectures and sub-architectures can override this. | 68 | * Architectures and sub-architectures can override this. |
| 68 | */ | 69 | */ |
| 69 | unsigned long long __attribute__((weak)) sched_clock(void) | 70 | unsigned long long __weak sched_clock(void) |
| 70 | { | 71 | { |
| 71 | return (unsigned long long)(jiffies - INITIAL_JIFFIES) | 72 | return (unsigned long long)(jiffies - INITIAL_JIFFIES) |
| 72 | * (NSEC_PER_SEC / HZ); | 73 | * (NSEC_PER_SEC / HZ); |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index a47902c687ae..268a45ea238c 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
| @@ -73,6 +73,7 @@ | |||
| 73 | #include <linux/init_task.h> | 73 | #include <linux/init_task.h> |
| 74 | #include <linux/binfmts.h> | 74 | #include <linux/binfmts.h> |
| 75 | #include <linux/context_tracking.h> | 75 | #include <linux/context_tracking.h> |
| 76 | #include <linux/compiler.h> | ||
| 76 | 77 | ||
| 77 | #include <asm/switch_to.h> | 78 | #include <asm/switch_to.h> |
| 78 | #include <asm/tlb.h> | 79 | #include <asm/tlb.h> |
| @@ -432,7 +433,7 @@ void hrtick_start(struct rq *rq, u64 delay) | |||
| 432 | if (rq == this_rq()) { | 433 | if (rq == this_rq()) { |
| 433 | __hrtick_restart(rq); | 434 | __hrtick_restart(rq); |
| 434 | } else if (!rq->hrtick_csd_pending) { | 435 | } else if (!rq->hrtick_csd_pending) { |
| 435 | __smp_call_function_single(cpu_of(rq), &rq->hrtick_csd, 0); | 436 | smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd); |
| 436 | rq->hrtick_csd_pending = 1; | 437 | rq->hrtick_csd_pending = 1; |
| 437 | } | 438 | } |
| 438 | } | 439 | } |
| @@ -555,12 +556,15 @@ void resched_cpu(int cpu) | |||
| 555 | * selecting an idle cpu will add more delays to the timers than intended | 556 | * selecting an idle cpu will add more delays to the timers than intended |
| 556 | * (as that cpu's timer base may not be uptodate wrt jiffies etc). | 557 | * (as that cpu's timer base may not be uptodate wrt jiffies etc). |
| 557 | */ | 558 | */ |
| 558 | int get_nohz_timer_target(void) | 559 | int get_nohz_timer_target(int pinned) |
| 559 | { | 560 | { |
| 560 | int cpu = smp_processor_id(); | 561 | int cpu = smp_processor_id(); |
| 561 | int i; | 562 | int i; |
| 562 | struct sched_domain *sd; | 563 | struct sched_domain *sd; |
| 563 | 564 | ||
| 565 | if (pinned || !get_sysctl_timer_migration() || !idle_cpu(cpu)) | ||
| 566 | return cpu; | ||
| 567 | |||
| 564 | rcu_read_lock(); | 568 | rcu_read_lock(); |
| 565 | for_each_domain(cpu, sd) { | 569 | for_each_domain(cpu, sd) { |
| 566 | for_each_cpu(i, sched_domain_span(sd)) { | 570 | for_each_cpu(i, sched_domain_span(sd)) { |
| @@ -823,19 +827,13 @@ static void update_rq_clock_task(struct rq *rq, s64 delta) | |||
| 823 | #endif | 827 | #endif |
| 824 | #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING | 828 | #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING |
| 825 | if (static_key_false((¶virt_steal_rq_enabled))) { | 829 | if (static_key_false((¶virt_steal_rq_enabled))) { |
| 826 | u64 st; | ||
| 827 | |||
| 828 | steal = paravirt_steal_clock(cpu_of(rq)); | 830 | steal = paravirt_steal_clock(cpu_of(rq)); |
| 829 | steal -= rq->prev_steal_time_rq; | 831 | steal -= rq->prev_steal_time_rq; |
| 830 | 832 | ||
| 831 | if (unlikely(steal > delta)) | 833 | if (unlikely(steal > delta)) |
| 832 | steal = delta; | 834 | steal = delta; |
| 833 | 835 | ||
| 834 | st = steal_ticks(steal); | ||
| 835 | steal = st * TICK_NSEC; | ||
| 836 | |||
| 837 | rq->prev_steal_time_rq += steal; | 836 | rq->prev_steal_time_rq += steal; |
| 838 | |||
| 839 | delta -= steal; | 837 | delta -= steal; |
| 840 | } | 838 | } |
| 841 | #endif | 839 | #endif |
| @@ -2848,52 +2846,6 @@ int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags, | |||
| 2848 | } | 2846 | } |
| 2849 | EXPORT_SYMBOL(default_wake_function); | 2847 | EXPORT_SYMBOL(default_wake_function); |
| 2850 | 2848 | ||
| 2851 | static long __sched | ||
| 2852 | sleep_on_common(wait_queue_head_t *q, int state, long timeout) | ||
| 2853 | { | ||
| 2854 | unsigned long flags; | ||
| 2855 | wait_queue_t wait; | ||
| 2856 | |||
| 2857 | init_waitqueue_entry(&wait, current); | ||
| 2858 | |||
| 2859 | __set_current_state(state); | ||
| 2860 | |||
| 2861 | spin_lock_irqsave(&q->lock, flags); | ||
| 2862 | __add_wait_queue(q, &wait); | ||
| 2863 | spin_unlock(&q->lock); | ||
| 2864 | timeout = schedule_timeout(timeout); | ||
| 2865 | spin_lock_irq(&q->lock); | ||
| 2866 | __remove_wait_queue(q, &wait); | ||
| 2867 | spin_unlock_irqrestore(&q->lock, flags); | ||
| 2868 | |||
| 2869 | return timeout; | ||
| 2870 | } | ||
| 2871 | |||
| 2872 | void __sched interruptible_sleep_on(wait_queue_head_t *q) | ||
| 2873 | { | ||
| 2874 | sleep_on_common(q, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); | ||
| 2875 | } | ||
| 2876 | EXPORT_SYMBOL(interruptible_sleep_on); | ||
| 2877 | |||
| 2878 | long __sched | ||
| 2879 | interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout) | ||
| 2880 | { | ||
| 2881 | return sleep_on_common(q, TASK_INTERRUPTIBLE, timeout); | ||
| 2882 | } | ||
| 2883 | EXPORT_SYMBOL(interruptible_sleep_on_timeout); | ||
| 2884 | |||
| 2885 | void __sched sleep_on(wait_queue_head_t *q) | ||
| 2886 | { | ||
| 2887 | sleep_on_common(q, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); | ||
| 2888 | } | ||
| 2889 | EXPORT_SYMBOL(sleep_on); | ||
| 2890 | |||
| 2891 | long __sched sleep_on_timeout(wait_queue_head_t *q, long timeout) | ||
| 2892 | { | ||
| 2893 | return sleep_on_common(q, TASK_UNINTERRUPTIBLE, timeout); | ||
| 2894 | } | ||
| 2895 | EXPORT_SYMBOL(sleep_on_timeout); | ||
| 2896 | |||
| 2897 | #ifdef CONFIG_RT_MUTEXES | 2849 | #ifdef CONFIG_RT_MUTEXES |
| 2898 | 2850 | ||
| 2899 | /* | 2851 | /* |
| @@ -6501,7 +6453,7 @@ static cpumask_var_t fallback_doms; | |||
| 6501 | * cpu core maps. It is supposed to return 1 if the topology changed | 6453 | * cpu core maps. It is supposed to return 1 if the topology changed |
| 6502 | * or 0 if it stayed the same. | 6454 | * or 0 if it stayed the same. |
| 6503 | */ | 6455 | */ |
| 6504 | int __attribute__((weak)) arch_update_cpu_topology(void) | 6456 | int __weak arch_update_cpu_topology(void) |
| 6505 | { | 6457 | { |
| 6506 | return 0; | 6458 | return 0; |
| 6507 | } | 6459 | } |
| @@ -7233,7 +7185,7 @@ void sched_move_task(struct task_struct *tsk) | |||
| 7233 | if (unlikely(running)) | 7185 | if (unlikely(running)) |
| 7234 | tsk->sched_class->put_prev_task(rq, tsk); | 7186 | tsk->sched_class->put_prev_task(rq, tsk); |
| 7235 | 7187 | ||
| 7236 | tg = container_of(task_css_check(tsk, cpu_cgroup_subsys_id, | 7188 | tg = container_of(task_css_check(tsk, cpu_cgrp_id, |
| 7237 | lockdep_is_held(&tsk->sighand->siglock)), | 7189 | lockdep_is_held(&tsk->sighand->siglock)), |
| 7238 | struct task_group, css); | 7190 | struct task_group, css); |
| 7239 | tg = autogroup_task_group(tsk, tg); | 7191 | tg = autogroup_task_group(tsk, tg); |
| @@ -7660,7 +7612,7 @@ static int cpu_cgroup_can_attach(struct cgroup_subsys_state *css, | |||
| 7660 | { | 7612 | { |
| 7661 | struct task_struct *task; | 7613 | struct task_struct *task; |
| 7662 | 7614 | ||
| 7663 | cgroup_taskset_for_each(task, css, tset) { | 7615 | cgroup_taskset_for_each(task, tset) { |
| 7664 | #ifdef CONFIG_RT_GROUP_SCHED | 7616 | #ifdef CONFIG_RT_GROUP_SCHED |
| 7665 | if (!sched_rt_can_attach(css_tg(css), task)) | 7617 | if (!sched_rt_can_attach(css_tg(css), task)) |
| 7666 | return -EINVAL; | 7618 | return -EINVAL; |
| @@ -7678,7 +7630,7 @@ static void cpu_cgroup_attach(struct cgroup_subsys_state *css, | |||
| 7678 | { | 7630 | { |
| 7679 | struct task_struct *task; | 7631 | struct task_struct *task; |
| 7680 | 7632 | ||
| 7681 | cgroup_taskset_for_each(task, css, tset) | 7633 | cgroup_taskset_for_each(task, tset) |
| 7682 | sched_move_task(task); | 7634 | sched_move_task(task); |
| 7683 | } | 7635 | } |
| 7684 | 7636 | ||
| @@ -8017,8 +7969,7 @@ static struct cftype cpu_files[] = { | |||
| 8017 | { } /* terminate */ | 7969 | { } /* terminate */ |
| 8018 | }; | 7970 | }; |
| 8019 | 7971 | ||
| 8020 | struct cgroup_subsys cpu_cgroup_subsys = { | 7972 | struct cgroup_subsys cpu_cgrp_subsys = { |
| 8021 | .name = "cpu", | ||
| 8022 | .css_alloc = cpu_cgroup_css_alloc, | 7973 | .css_alloc = cpu_cgroup_css_alloc, |
| 8023 | .css_free = cpu_cgroup_css_free, | 7974 | .css_free = cpu_cgroup_css_free, |
| 8024 | .css_online = cpu_cgroup_css_online, | 7975 | .css_online = cpu_cgroup_css_online, |
| @@ -8026,7 +7977,6 @@ struct cgroup_subsys cpu_cgroup_subsys = { | |||
| 8026 | .can_attach = cpu_cgroup_can_attach, | 7977 | .can_attach = cpu_cgroup_can_attach, |
| 8027 | .attach = cpu_cgroup_attach, | 7978 | .attach = cpu_cgroup_attach, |
| 8028 | .exit = cpu_cgroup_exit, | 7979 | .exit = cpu_cgroup_exit, |
| 8029 | .subsys_id = cpu_cgroup_subsys_id, | ||
| 8030 | .base_cftypes = cpu_files, | 7980 | .base_cftypes = cpu_files, |
| 8031 | .early_init = 1, | 7981 | .early_init = 1, |
| 8032 | }; | 7982 | }; |
diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c index 622e0818f905..c143ee380e3a 100644 --- a/kernel/sched/cpuacct.c +++ b/kernel/sched/cpuacct.c | |||
| @@ -41,7 +41,7 @@ static inline struct cpuacct *css_ca(struct cgroup_subsys_state *css) | |||
| 41 | /* return cpu accounting group to which this task belongs */ | 41 | /* return cpu accounting group to which this task belongs */ |
| 42 | static inline struct cpuacct *task_ca(struct task_struct *tsk) | 42 | static inline struct cpuacct *task_ca(struct task_struct *tsk) |
| 43 | { | 43 | { |
| 44 | return css_ca(task_css(tsk, cpuacct_subsys_id)); | 44 | return css_ca(task_css(tsk, cpuacct_cgrp_id)); |
| 45 | } | 45 | } |
| 46 | 46 | ||
| 47 | static inline struct cpuacct *parent_ca(struct cpuacct *ca) | 47 | static inline struct cpuacct *parent_ca(struct cpuacct *ca) |
| @@ -275,11 +275,9 @@ void cpuacct_account_field(struct task_struct *p, int index, u64 val) | |||
| 275 | rcu_read_unlock(); | 275 | rcu_read_unlock(); |
| 276 | } | 276 | } |
| 277 | 277 | ||
| 278 | struct cgroup_subsys cpuacct_subsys = { | 278 | struct cgroup_subsys cpuacct_cgrp_subsys = { |
| 279 | .name = "cpuacct", | ||
| 280 | .css_alloc = cpuacct_css_alloc, | 279 | .css_alloc = cpuacct_css_alloc, |
| 281 | .css_free = cpuacct_css_free, | 280 | .css_free = cpuacct_css_free, |
| 282 | .subsys_id = cpuacct_subsys_id, | ||
| 283 | .base_cftypes = files, | 281 | .base_cftypes = files, |
| 284 | .early_init = 1, | 282 | .early_init = 1, |
| 285 | }; | 283 | }; |
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index 58624a65f124..a95097cb4591 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c | |||
| @@ -258,16 +258,22 @@ static __always_inline bool steal_account_process_tick(void) | |||
| 258 | { | 258 | { |
| 259 | #ifdef CONFIG_PARAVIRT | 259 | #ifdef CONFIG_PARAVIRT |
| 260 | if (static_key_false(¶virt_steal_enabled)) { | 260 | if (static_key_false(¶virt_steal_enabled)) { |
| 261 | u64 steal, st = 0; | 261 | u64 steal; |
| 262 | cputime_t steal_ct; | ||
| 262 | 263 | ||
| 263 | steal = paravirt_steal_clock(smp_processor_id()); | 264 | steal = paravirt_steal_clock(smp_processor_id()); |
| 264 | steal -= this_rq()->prev_steal_time; | 265 | steal -= this_rq()->prev_steal_time; |
| 265 | 266 | ||
| 266 | st = steal_ticks(steal); | 267 | /* |
| 267 | this_rq()->prev_steal_time += st * TICK_NSEC; | 268 | * cputime_t may be less precise than nsecs (eg: if it's |
| 269 | * based on jiffies). Lets cast the result to cputime | ||
| 270 | * granularity and account the rest on the next rounds. | ||
| 271 | */ | ||
| 272 | steal_ct = nsecs_to_cputime(steal); | ||
| 273 | this_rq()->prev_steal_time += cputime_to_nsecs(steal_ct); | ||
| 268 | 274 | ||
| 269 | account_steal_time(st); | 275 | account_steal_time(steal_ct); |
| 270 | return st; | 276 | return steal_ct; |
| 271 | } | 277 | } |
| 272 | #endif | 278 | #endif |
| 273 | return false; | 279 | return false; |
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index f3344c31632a..695f9773bb60 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c | |||
| @@ -111,8 +111,7 @@ static char *task_group_path(struct task_group *tg) | |||
| 111 | if (autogroup_path(tg, group_path, PATH_MAX)) | 111 | if (autogroup_path(tg, group_path, PATH_MAX)) |
| 112 | return group_path; | 112 | return group_path; |
| 113 | 113 | ||
| 114 | cgroup_path(tg->css.cgroup, group_path, PATH_MAX); | 114 | return cgroup_path(tg->css.cgroup, group_path, PATH_MAX); |
| 115 | return group_path; | ||
| 116 | } | 115 | } |
| 117 | #endif | 116 | #endif |
| 118 | 117 | ||
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index b7976a127178..8f4390a079c7 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c | |||
| @@ -63,6 +63,136 @@ void __weak arch_cpu_idle(void) | |||
| 63 | local_irq_enable(); | 63 | local_irq_enable(); |
| 64 | } | 64 | } |
| 65 | 65 | ||
| 66 | /** | ||
| 67 | * cpuidle_idle_call - the main idle function | ||
| 68 | * | ||
| 69 | * NOTE: no locks or semaphores should be used here | ||
| 70 | * return non-zero on failure | ||
| 71 | */ | ||
| 72 | static int cpuidle_idle_call(void) | ||
| 73 | { | ||
| 74 | struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); | ||
| 75 | struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); | ||
| 76 | int next_state, entered_state, ret; | ||
| 77 | bool broadcast; | ||
| 78 | |||
| 79 | /* | ||
| 80 | * Check if the idle task must be rescheduled. If it is the | ||
| 81 | * case, exit the function after re-enabling the local irq and | ||
| 82 | * set again the polling flag | ||
| 83 | */ | ||
| 84 | if (current_clr_polling_and_test()) { | ||
| 85 | local_irq_enable(); | ||
| 86 | __current_set_polling(); | ||
| 87 | return 0; | ||
| 88 | } | ||
| 89 | |||
| 90 | /* | ||
| 91 | * During the idle period, stop measuring the disabled irqs | ||
| 92 | * critical sections latencies | ||
| 93 | */ | ||
| 94 | stop_critical_timings(); | ||
| 95 | |||
| 96 | /* | ||
| 97 | * Tell the RCU framework we are entering an idle section, | ||
| 98 | * so no more rcu read side critical sections and one more | ||
| 99 | * step to the grace period | ||
| 100 | */ | ||
| 101 | rcu_idle_enter(); | ||
| 102 | |||
| 103 | /* | ||
| 104 | * Check if the cpuidle framework is ready, otherwise fallback | ||
| 105 | * to the default arch specific idle method | ||
| 106 | */ | ||
| 107 | ret = cpuidle_enabled(drv, dev); | ||
| 108 | |||
| 109 | if (!ret) { | ||
| 110 | /* | ||
| 111 | * Ask the governor to choose an idle state it thinks | ||
| 112 | * it is convenient to go to. There is *always* a | ||
| 113 | * convenient idle state | ||
| 114 | */ | ||
| 115 | next_state = cpuidle_select(drv, dev); | ||
| 116 | |||
| 117 | /* | ||
| 118 | * The idle task must be scheduled, it is pointless to | ||
| 119 | * go to idle, just update no idle residency and get | ||
| 120 | * out of this function | ||
| 121 | */ | ||
| 122 | if (current_clr_polling_and_test()) { | ||
| 123 | dev->last_residency = 0; | ||
| 124 | entered_state = next_state; | ||
| 125 | local_irq_enable(); | ||
| 126 | } else { | ||
| 127 | broadcast = !!(drv->states[next_state].flags & | ||
| 128 | CPUIDLE_FLAG_TIMER_STOP); | ||
| 129 | |||
| 130 | if (broadcast) | ||
| 131 | /* | ||
| 132 | * Tell the time framework to switch | ||
| 133 | * to a broadcast timer because our | ||
| 134 | * local timer will be shutdown. If a | ||
| 135 | * local timer is used from another | ||
| 136 | * cpu as a broadcast timer, this call | ||
| 137 | * may fail if it is not available | ||
| 138 | */ | ||
| 139 | ret = clockevents_notify( | ||
| 140 | CLOCK_EVT_NOTIFY_BROADCAST_ENTER, | ||
| 141 | &dev->cpu); | ||
| 142 | |||
| 143 | if (!ret) { | ||
| 144 | trace_cpu_idle_rcuidle(next_state, dev->cpu); | ||
| 145 | |||
| 146 | /* | ||
| 147 | * Enter the idle state previously | ||
| 148 | * returned by the governor | ||
| 149 | * decision. This function will block | ||
| 150 | * until an interrupt occurs and will | ||
| 151 | * take care of re-enabling the local | ||
| 152 | * interrupts | ||
| 153 | */ | ||
| 154 | entered_state = cpuidle_enter(drv, dev, | ||
| 155 | next_state); | ||
| 156 | |||
| 157 | trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, | ||
| 158 | dev->cpu); | ||
| 159 | |||
| 160 | if (broadcast) | ||
| 161 | clockevents_notify( | ||
| 162 | CLOCK_EVT_NOTIFY_BROADCAST_EXIT, | ||
| 163 | &dev->cpu); | ||
| 164 | |||
| 165 | /* | ||
| 166 | * Give the governor an opportunity to reflect on the | ||
| 167 | * outcome | ||
| 168 | */ | ||
| 169 | cpuidle_reflect(dev, entered_state); | ||
| 170 | } | ||
| 171 | } | ||
| 172 | } | ||
| 173 | |||
| 174 | /* | ||
| 175 | * We can't use the cpuidle framework, let's use the default | ||
| 176 | * idle routine | ||
| 177 | */ | ||
| 178 | if (ret) | ||
| 179 | arch_cpu_idle(); | ||
| 180 | |||
| 181 | __current_set_polling(); | ||
| 182 | |||
| 183 | /* | ||
| 184 | * It is up to the idle functions to enable back the local | ||
| 185 | * interrupt | ||
| 186 | */ | ||
| 187 | if (WARN_ON_ONCE(irqs_disabled())) | ||
| 188 | local_irq_enable(); | ||
| 189 | |||
| 190 | rcu_idle_exit(); | ||
| 191 | start_critical_timings(); | ||
| 192 | |||
| 193 | return 0; | ||
| 194 | } | ||
| 195 | |||
| 66 | /* | 196 | /* |
| 67 | * Generic idle loop implementation | 197 | * Generic idle loop implementation |
| 68 | */ | 198 | */ |
| @@ -90,23 +220,11 @@ static void cpu_idle_loop(void) | |||
| 90 | * know that the IPI is going to arrive right | 220 | * know that the IPI is going to arrive right |
| 91 | * away | 221 | * away |
| 92 | */ | 222 | */ |
| 93 | if (cpu_idle_force_poll || tick_check_broadcast_expired()) { | 223 | if (cpu_idle_force_poll || tick_check_broadcast_expired()) |
| 94 | cpu_idle_poll(); | 224 | cpu_idle_poll(); |
| 95 | } else { | 225 | else |
| 96 | if (!current_clr_polling_and_test()) { | 226 | cpuidle_idle_call(); |
| 97 | stop_critical_timings(); | 227 | |
| 98 | rcu_idle_enter(); | ||
| 99 | if (cpuidle_idle_call()) | ||
| 100 | arch_cpu_idle(); | ||
| 101 | if (WARN_ON_ONCE(irqs_disabled())) | ||
| 102 | local_irq_enable(); | ||
| 103 | rcu_idle_exit(); | ||
| 104 | start_critical_timings(); | ||
| 105 | } else { | ||
| 106 | local_irq_enable(); | ||
| 107 | } | ||
| 108 | __current_set_polling(); | ||
| 109 | } | ||
| 110 | arch_cpu_idle_exit(); | 228 | arch_cpu_idle_exit(); |
| 111 | } | 229 | } |
| 112 | 230 | ||
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index f2de7a175620..c9007f28d3a2 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h | |||
| @@ -1216,16 +1216,6 @@ extern void update_idle_cpu_load(struct rq *this_rq); | |||
| 1216 | 1216 | ||
| 1217 | extern void init_task_runnable_average(struct task_struct *p); | 1217 | extern void init_task_runnable_average(struct task_struct *p); |
| 1218 | 1218 | ||
| 1219 | #ifdef CONFIG_PARAVIRT | ||
| 1220 | static inline u64 steal_ticks(u64 steal) | ||
| 1221 | { | ||
| 1222 | if (unlikely(steal > NSEC_PER_SEC)) | ||
| 1223 | return div_u64(steal, TICK_NSEC); | ||
| 1224 | |||
| 1225 | return __iter_div_u64_rem(steal, TICK_NSEC, &steal); | ||
| 1226 | } | ||
| 1227 | #endif | ||
| 1228 | |||
| 1229 | static inline void inc_nr_running(struct rq *rq) | 1219 | static inline void inc_nr_running(struct rq *rq) |
| 1230 | { | 1220 | { |
| 1231 | rq->nr_running++; | 1221 | rq->nr_running++; |
diff --git a/kernel/sched/stats.c b/kernel/sched/stats.c index da98af347e8b..a476bea17fbc 100644 --- a/kernel/sched/stats.c +++ b/kernel/sched/stats.c | |||
| @@ -142,4 +142,4 @@ static int __init proc_schedstat_init(void) | |||
| 142 | proc_create("schedstat", 0, NULL, &proc_schedstat_operations); | 142 | proc_create("schedstat", 0, NULL, &proc_schedstat_operations); |
| 143 | return 0; | 143 | return 0; |
| 144 | } | 144 | } |
| 145 | module_init(proc_schedstat_init); | 145 | subsys_initcall(proc_schedstat_init); |
diff --git a/kernel/seccomp.c b/kernel/seccomp.c index b7a10048a32c..fd609bd9d6dd 100644 --- a/kernel/seccomp.c +++ b/kernel/seccomp.c | |||
| @@ -55,60 +55,33 @@ struct seccomp_filter { | |||
| 55 | atomic_t usage; | 55 | atomic_t usage; |
| 56 | struct seccomp_filter *prev; | 56 | struct seccomp_filter *prev; |
| 57 | unsigned short len; /* Instruction count */ | 57 | unsigned short len; /* Instruction count */ |
| 58 | struct sock_filter insns[]; | 58 | struct sock_filter_int insnsi[]; |
| 59 | }; | 59 | }; |
| 60 | 60 | ||
| 61 | /* Limit any path through the tree to 256KB worth of instructions. */ | 61 | /* Limit any path through the tree to 256KB worth of instructions. */ |
| 62 | #define MAX_INSNS_PER_PATH ((1 << 18) / sizeof(struct sock_filter)) | 62 | #define MAX_INSNS_PER_PATH ((1 << 18) / sizeof(struct sock_filter)) |
| 63 | 63 | ||
| 64 | /** | 64 | /* |
| 65 | * get_u32 - returns a u32 offset into data | ||
| 66 | * @data: a unsigned 64 bit value | ||
| 67 | * @index: 0 or 1 to return the first or second 32-bits | ||
| 68 | * | ||
| 69 | * This inline exists to hide the length of unsigned long. If a 32-bit | ||
| 70 | * unsigned long is passed in, it will be extended and the top 32-bits will be | ||
| 71 | * 0. If it is a 64-bit unsigned long, then whatever data is resident will be | ||
| 72 | * properly returned. | ||
| 73 | * | ||
| 74 | * Endianness is explicitly ignored and left for BPF program authors to manage | 65 | * Endianness is explicitly ignored and left for BPF program authors to manage |
| 75 | * as per the specific architecture. | 66 | * as per the specific architecture. |
| 76 | */ | 67 | */ |
| 77 | static inline u32 get_u32(u64 data, int index) | 68 | static void populate_seccomp_data(struct seccomp_data *sd) |
| 78 | { | 69 | { |
| 79 | return ((u32 *)&data)[index]; | 70 | struct task_struct *task = current; |
| 80 | } | 71 | struct pt_regs *regs = task_pt_regs(task); |
| 81 | 72 | ||
| 82 | /* Helper for bpf_load below. */ | 73 | sd->nr = syscall_get_nr(task, regs); |
| 83 | #define BPF_DATA(_name) offsetof(struct seccomp_data, _name) | 74 | sd->arch = syscall_get_arch(task, regs); |
| 84 | /** | 75 | |
| 85 | * bpf_load: checks and returns a pointer to the requested offset | 76 | /* Unroll syscall_get_args to help gcc on arm. */ |
| 86 | * @off: offset into struct seccomp_data to load from | 77 | syscall_get_arguments(task, regs, 0, 1, (unsigned long *) &sd->args[0]); |
| 87 | * | 78 | syscall_get_arguments(task, regs, 1, 1, (unsigned long *) &sd->args[1]); |
| 88 | * Returns the requested 32-bits of data. | 79 | syscall_get_arguments(task, regs, 2, 1, (unsigned long *) &sd->args[2]); |
| 89 | * seccomp_check_filter() should assure that @off is 32-bit aligned | 80 | syscall_get_arguments(task, regs, 3, 1, (unsigned long *) &sd->args[3]); |
| 90 | * and not out of bounds. Failure to do so is a BUG. | 81 | syscall_get_arguments(task, regs, 4, 1, (unsigned long *) &sd->args[4]); |
| 91 | */ | 82 | syscall_get_arguments(task, regs, 5, 1, (unsigned long *) &sd->args[5]); |
| 92 | u32 seccomp_bpf_load(int off) | 83 | |
| 93 | { | 84 | sd->instruction_pointer = KSTK_EIP(task); |
| 94 | struct pt_regs *regs = task_pt_regs(current); | ||
| 95 | if (off == BPF_DATA(nr)) | ||
| 96 | return syscall_get_nr(current, regs); | ||
| 97 | if (off == BPF_DATA(arch)) | ||
| 98 | return syscall_get_arch(current, regs); | ||
| 99 | if (off >= BPF_DATA(args[0]) && off < BPF_DATA(args[6])) { | ||
| 100 | unsigned long value; | ||
| 101 | int arg = (off - BPF_DATA(args[0])) / sizeof(u64); | ||
| 102 | int index = !!(off % sizeof(u64)); | ||
| 103 | syscall_get_arguments(current, regs, arg, 1, &value); | ||
| 104 | return get_u32(value, index); | ||
| 105 | } | ||
| 106 | if (off == BPF_DATA(instruction_pointer)) | ||
| 107 | return get_u32(KSTK_EIP(current), 0); | ||
| 108 | if (off == BPF_DATA(instruction_pointer) + sizeof(u32)) | ||
| 109 | return get_u32(KSTK_EIP(current), 1); | ||
| 110 | /* seccomp_check_filter should make this impossible. */ | ||
| 111 | BUG(); | ||
| 112 | } | 85 | } |
| 113 | 86 | ||
| 114 | /** | 87 | /** |
| @@ -133,17 +106,17 @@ static int seccomp_check_filter(struct sock_filter *filter, unsigned int flen) | |||
| 133 | 106 | ||
| 134 | switch (code) { | 107 | switch (code) { |
| 135 | case BPF_S_LD_W_ABS: | 108 | case BPF_S_LD_W_ABS: |
| 136 | ftest->code = BPF_S_ANC_SECCOMP_LD_W; | 109 | ftest->code = BPF_LDX | BPF_W | BPF_ABS; |
| 137 | /* 32-bit aligned and not out of bounds. */ | 110 | /* 32-bit aligned and not out of bounds. */ |
| 138 | if (k >= sizeof(struct seccomp_data) || k & 3) | 111 | if (k >= sizeof(struct seccomp_data) || k & 3) |
| 139 | return -EINVAL; | 112 | return -EINVAL; |
| 140 | continue; | 113 | continue; |
| 141 | case BPF_S_LD_W_LEN: | 114 | case BPF_S_LD_W_LEN: |
| 142 | ftest->code = BPF_S_LD_IMM; | 115 | ftest->code = BPF_LD | BPF_IMM; |
| 143 | ftest->k = sizeof(struct seccomp_data); | 116 | ftest->k = sizeof(struct seccomp_data); |
| 144 | continue; | 117 | continue; |
| 145 | case BPF_S_LDX_W_LEN: | 118 | case BPF_S_LDX_W_LEN: |
| 146 | ftest->code = BPF_S_LDX_IMM; | 119 | ftest->code = BPF_LDX | BPF_IMM; |
| 147 | ftest->k = sizeof(struct seccomp_data); | 120 | ftest->k = sizeof(struct seccomp_data); |
| 148 | continue; | 121 | continue; |
| 149 | /* Explicitly include allowed calls. */ | 122 | /* Explicitly include allowed calls. */ |
| @@ -185,6 +158,7 @@ static int seccomp_check_filter(struct sock_filter *filter, unsigned int flen) | |||
| 185 | case BPF_S_JMP_JGT_X: | 158 | case BPF_S_JMP_JGT_X: |
| 186 | case BPF_S_JMP_JSET_K: | 159 | case BPF_S_JMP_JSET_K: |
| 187 | case BPF_S_JMP_JSET_X: | 160 | case BPF_S_JMP_JSET_X: |
| 161 | sk_decode_filter(ftest, ftest); | ||
| 188 | continue; | 162 | continue; |
| 189 | default: | 163 | default: |
| 190 | return -EINVAL; | 164 | return -EINVAL; |
| @@ -202,18 +176,21 @@ static int seccomp_check_filter(struct sock_filter *filter, unsigned int flen) | |||
| 202 | static u32 seccomp_run_filters(int syscall) | 176 | static u32 seccomp_run_filters(int syscall) |
| 203 | { | 177 | { |
| 204 | struct seccomp_filter *f; | 178 | struct seccomp_filter *f; |
| 179 | struct seccomp_data sd; | ||
| 205 | u32 ret = SECCOMP_RET_ALLOW; | 180 | u32 ret = SECCOMP_RET_ALLOW; |
| 206 | 181 | ||
| 207 | /* Ensure unexpected behavior doesn't result in failing open. */ | 182 | /* Ensure unexpected behavior doesn't result in failing open. */ |
| 208 | if (WARN_ON(current->seccomp.filter == NULL)) | 183 | if (WARN_ON(current->seccomp.filter == NULL)) |
| 209 | return SECCOMP_RET_KILL; | 184 | return SECCOMP_RET_KILL; |
| 210 | 185 | ||
| 186 | populate_seccomp_data(&sd); | ||
| 187 | |||
| 211 | /* | 188 | /* |
| 212 | * All filters in the list are evaluated and the lowest BPF return | 189 | * All filters in the list are evaluated and the lowest BPF return |
| 213 | * value always takes priority (ignoring the DATA). | 190 | * value always takes priority (ignoring the DATA). |
| 214 | */ | 191 | */ |
| 215 | for (f = current->seccomp.filter; f; f = f->prev) { | 192 | for (f = current->seccomp.filter; f; f = f->prev) { |
| 216 | u32 cur_ret = sk_run_filter(NULL, f->insns); | 193 | u32 cur_ret = sk_run_filter_int_seccomp(&sd, f->insnsi); |
| 217 | if ((cur_ret & SECCOMP_RET_ACTION) < (ret & SECCOMP_RET_ACTION)) | 194 | if ((cur_ret & SECCOMP_RET_ACTION) < (ret & SECCOMP_RET_ACTION)) |
| 218 | ret = cur_ret; | 195 | ret = cur_ret; |
| 219 | } | 196 | } |
| @@ -231,6 +208,8 @@ static long seccomp_attach_filter(struct sock_fprog *fprog) | |||
| 231 | struct seccomp_filter *filter; | 208 | struct seccomp_filter *filter; |
| 232 | unsigned long fp_size = fprog->len * sizeof(struct sock_filter); | 209 | unsigned long fp_size = fprog->len * sizeof(struct sock_filter); |
| 233 | unsigned long total_insns = fprog->len; | 210 | unsigned long total_insns = fprog->len; |
| 211 | struct sock_filter *fp; | ||
| 212 | int new_len; | ||
| 234 | long ret; | 213 | long ret; |
| 235 | 214 | ||
| 236 | if (fprog->len == 0 || fprog->len > BPF_MAXINSNS) | 215 | if (fprog->len == 0 || fprog->len > BPF_MAXINSNS) |
| @@ -252,28 +231,43 @@ static long seccomp_attach_filter(struct sock_fprog *fprog) | |||
| 252 | CAP_SYS_ADMIN) != 0) | 231 | CAP_SYS_ADMIN) != 0) |
| 253 | return -EACCES; | 232 | return -EACCES; |
| 254 | 233 | ||
| 255 | /* Allocate a new seccomp_filter */ | 234 | fp = kzalloc(fp_size, GFP_KERNEL|__GFP_NOWARN); |
| 256 | filter = kzalloc(sizeof(struct seccomp_filter) + fp_size, | 235 | if (!fp) |
| 257 | GFP_KERNEL|__GFP_NOWARN); | ||
| 258 | if (!filter) | ||
| 259 | return -ENOMEM; | 236 | return -ENOMEM; |
| 260 | atomic_set(&filter->usage, 1); | ||
| 261 | filter->len = fprog->len; | ||
| 262 | 237 | ||
| 263 | /* Copy the instructions from fprog. */ | 238 | /* Copy the instructions from fprog. */ |
| 264 | ret = -EFAULT; | 239 | ret = -EFAULT; |
| 265 | if (copy_from_user(filter->insns, fprog->filter, fp_size)) | 240 | if (copy_from_user(fp, fprog->filter, fp_size)) |
| 266 | goto fail; | 241 | goto free_prog; |
| 267 | 242 | ||
| 268 | /* Check and rewrite the fprog via the skb checker */ | 243 | /* Check and rewrite the fprog via the skb checker */ |
| 269 | ret = sk_chk_filter(filter->insns, filter->len); | 244 | ret = sk_chk_filter(fp, fprog->len); |
| 270 | if (ret) | 245 | if (ret) |
| 271 | goto fail; | 246 | goto free_prog; |
| 272 | 247 | ||
| 273 | /* Check and rewrite the fprog for seccomp use */ | 248 | /* Check and rewrite the fprog for seccomp use */ |
| 274 | ret = seccomp_check_filter(filter->insns, filter->len); | 249 | ret = seccomp_check_filter(fp, fprog->len); |
| 250 | if (ret) | ||
| 251 | goto free_prog; | ||
| 252 | |||
| 253 | /* Convert 'sock_filter' insns to 'sock_filter_int' insns */ | ||
| 254 | ret = sk_convert_filter(fp, fprog->len, NULL, &new_len); | ||
| 255 | if (ret) | ||
| 256 | goto free_prog; | ||
| 257 | |||
| 258 | /* Allocate a new seccomp_filter */ | ||
| 259 | filter = kzalloc(sizeof(struct seccomp_filter) + | ||
| 260 | sizeof(struct sock_filter_int) * new_len, | ||
| 261 | GFP_KERNEL|__GFP_NOWARN); | ||
| 262 | if (!filter) | ||
| 263 | goto free_prog; | ||
| 264 | |||
| 265 | ret = sk_convert_filter(fp, fprog->len, filter->insnsi, &new_len); | ||
| 275 | if (ret) | 266 | if (ret) |
| 276 | goto fail; | 267 | goto free_filter; |
| 268 | |||
| 269 | atomic_set(&filter->usage, 1); | ||
| 270 | filter->len = new_len; | ||
| 277 | 271 | ||
| 278 | /* | 272 | /* |
| 279 | * If there is an existing filter, make it the prev and don't drop its | 273 | * If there is an existing filter, make it the prev and don't drop its |
| @@ -282,8 +276,11 @@ static long seccomp_attach_filter(struct sock_fprog *fprog) | |||
| 282 | filter->prev = current->seccomp.filter; | 276 | filter->prev = current->seccomp.filter; |
| 283 | current->seccomp.filter = filter; | 277 | current->seccomp.filter = filter; |
| 284 | return 0; | 278 | return 0; |
| 285 | fail: | 279 | |
| 280 | free_filter: | ||
| 286 | kfree(filter); | 281 | kfree(filter); |
| 282 | free_prog: | ||
| 283 | kfree(fp); | ||
| 287 | return ret; | 284 | return ret; |
| 288 | } | 285 | } |
| 289 | 286 | ||
| @@ -293,7 +290,7 @@ fail: | |||
| 293 | * | 290 | * |
| 294 | * Returns 0 on success and non-zero otherwise. | 291 | * Returns 0 on success and non-zero otherwise. |
| 295 | */ | 292 | */ |
| 296 | long seccomp_attach_user_filter(char __user *user_filter) | 293 | static long seccomp_attach_user_filter(char __user *user_filter) |
| 297 | { | 294 | { |
| 298 | struct sock_fprog fprog; | 295 | struct sock_fprog fprog; |
| 299 | long ret = -EFAULT; | 296 | long ret = -EFAULT; |
diff --git a/kernel/signal.c b/kernel/signal.c index 52f881db1ca0..6ea13c09ae56 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
| @@ -33,6 +33,8 @@ | |||
| 33 | #include <linux/uprobes.h> | 33 | #include <linux/uprobes.h> |
| 34 | #include <linux/compat.h> | 34 | #include <linux/compat.h> |
| 35 | #include <linux/cn_proc.h> | 35 | #include <linux/cn_proc.h> |
| 36 | #include <linux/compiler.h> | ||
| 37 | |||
| 36 | #define CREATE_TRACE_POINTS | 38 | #define CREATE_TRACE_POINTS |
| 37 | #include <trace/events/signal.h> | 39 | #include <trace/events/signal.h> |
| 38 | 40 | ||
| @@ -2382,7 +2384,7 @@ relock: | |||
| 2382 | * @regs: user register state | 2384 | * @regs: user register state |
| 2383 | * @stepping: nonzero if debugger single-step or block-step in use | 2385 | * @stepping: nonzero if debugger single-step or block-step in use |
| 2384 | * | 2386 | * |
| 2385 | * This function should be called when a signal has succesfully been | 2387 | * This function should be called when a signal has successfully been |
| 2386 | * delivered. It updates the blocked signals accordingly (@ka->sa.sa_mask | 2388 | * delivered. It updates the blocked signals accordingly (@ka->sa.sa_mask |
| 2387 | * is always blocked, and the signal itself is blocked unless %SA_NODEFER | 2389 | * is always blocked, and the signal itself is blocked unless %SA_NODEFER |
| 2388 | * is set in @ka->sa.sa_flags. Tracing is notified. | 2390 | * is set in @ka->sa.sa_flags. Tracing is notified. |
| @@ -3618,7 +3620,7 @@ SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask) | |||
| 3618 | } | 3620 | } |
| 3619 | #endif | 3621 | #endif |
| 3620 | 3622 | ||
| 3621 | __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma) | 3623 | __weak const char *arch_vma_name(struct vm_area_struct *vma) |
| 3622 | { | 3624 | { |
| 3623 | return NULL; | 3625 | return NULL; |
| 3624 | } | 3626 | } |
diff --git a/kernel/smp.c b/kernel/smp.c index ffee35bef179..06d574e42c72 100644 --- a/kernel/smp.c +++ b/kernel/smp.c | |||
| @@ -117,13 +117,43 @@ static void csd_unlock(struct call_single_data *csd) | |||
| 117 | csd->flags &= ~CSD_FLAG_LOCK; | 117 | csd->flags &= ~CSD_FLAG_LOCK; |
| 118 | } | 118 | } |
| 119 | 119 | ||
| 120 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data); | ||
| 121 | |||
| 120 | /* | 122 | /* |
| 121 | * Insert a previously allocated call_single_data element | 123 | * Insert a previously allocated call_single_data element |
| 122 | * for execution on the given CPU. data must already have | 124 | * for execution on the given CPU. data must already have |
| 123 | * ->func, ->info, and ->flags set. | 125 | * ->func, ->info, and ->flags set. |
| 124 | */ | 126 | */ |
| 125 | static void generic_exec_single(int cpu, struct call_single_data *csd, int wait) | 127 | static int generic_exec_single(int cpu, struct call_single_data *csd, |
| 128 | smp_call_func_t func, void *info, int wait) | ||
| 126 | { | 129 | { |
| 130 | struct call_single_data csd_stack = { .flags = 0 }; | ||
| 131 | unsigned long flags; | ||
| 132 | |||
| 133 | |||
| 134 | if (cpu == smp_processor_id()) { | ||
| 135 | local_irq_save(flags); | ||
| 136 | func(info); | ||
| 137 | local_irq_restore(flags); | ||
| 138 | return 0; | ||
| 139 | } | ||
| 140 | |||
| 141 | |||
| 142 | if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) | ||
| 143 | return -ENXIO; | ||
| 144 | |||
| 145 | |||
| 146 | if (!csd) { | ||
| 147 | csd = &csd_stack; | ||
| 148 | if (!wait) | ||
| 149 | csd = &__get_cpu_var(csd_data); | ||
| 150 | } | ||
| 151 | |||
| 152 | csd_lock(csd); | ||
| 153 | |||
| 154 | csd->func = func; | ||
| 155 | csd->info = info; | ||
| 156 | |||
| 127 | if (wait) | 157 | if (wait) |
| 128 | csd->flags |= CSD_FLAG_WAIT; | 158 | csd->flags |= CSD_FLAG_WAIT; |
| 129 | 159 | ||
| @@ -143,6 +173,8 @@ static void generic_exec_single(int cpu, struct call_single_data *csd, int wait) | |||
| 143 | 173 | ||
| 144 | if (wait) | 174 | if (wait) |
| 145 | csd_lock_wait(csd); | 175 | csd_lock_wait(csd); |
| 176 | |||
| 177 | return 0; | ||
| 146 | } | 178 | } |
| 147 | 179 | ||
| 148 | /* | 180 | /* |
| @@ -151,7 +183,8 @@ static void generic_exec_single(int cpu, struct call_single_data *csd, int wait) | |||
| 151 | */ | 183 | */ |
| 152 | void generic_smp_call_function_single_interrupt(void) | 184 | void generic_smp_call_function_single_interrupt(void) |
| 153 | { | 185 | { |
| 154 | struct llist_node *entry, *next; | 186 | struct llist_node *entry; |
| 187 | struct call_single_data *csd, *csd_next; | ||
| 155 | 188 | ||
| 156 | /* | 189 | /* |
| 157 | * Shouldn't receive this interrupt on a cpu that is not yet online. | 190 | * Shouldn't receive this interrupt on a cpu that is not yet online. |
| @@ -161,21 +194,12 @@ void generic_smp_call_function_single_interrupt(void) | |||
| 161 | entry = llist_del_all(&__get_cpu_var(call_single_queue)); | 194 | entry = llist_del_all(&__get_cpu_var(call_single_queue)); |
| 162 | entry = llist_reverse_order(entry); | 195 | entry = llist_reverse_order(entry); |
| 163 | 196 | ||
| 164 | while (entry) { | 197 | llist_for_each_entry_safe(csd, csd_next, entry, llist) { |
| 165 | struct call_single_data *csd; | ||
| 166 | |||
| 167 | next = entry->next; | ||
| 168 | |||
| 169 | csd = llist_entry(entry, struct call_single_data, llist); | ||
| 170 | csd->func(csd->info); | 198 | csd->func(csd->info); |
| 171 | csd_unlock(csd); | 199 | csd_unlock(csd); |
| 172 | |||
| 173 | entry = next; | ||
| 174 | } | 200 | } |
| 175 | } | 201 | } |
| 176 | 202 | ||
| 177 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data); | ||
| 178 | |||
| 179 | /* | 203 | /* |
| 180 | * smp_call_function_single - Run a function on a specific CPU | 204 | * smp_call_function_single - Run a function on a specific CPU |
| 181 | * @func: The function to run. This must be fast and non-blocking. | 205 | * @func: The function to run. This must be fast and non-blocking. |
| @@ -187,12 +211,8 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data); | |||
| 187 | int smp_call_function_single(int cpu, smp_call_func_t func, void *info, | 211 | int smp_call_function_single(int cpu, smp_call_func_t func, void *info, |
| 188 | int wait) | 212 | int wait) |
| 189 | { | 213 | { |
| 190 | struct call_single_data d = { | ||
| 191 | .flags = 0, | ||
| 192 | }; | ||
| 193 | unsigned long flags; | ||
| 194 | int this_cpu; | 214 | int this_cpu; |
| 195 | int err = 0; | 215 | int err; |
| 196 | 216 | ||
| 197 | /* | 217 | /* |
| 198 | * prevent preemption and reschedule on another processor, | 218 | * prevent preemption and reschedule on another processor, |
| @@ -209,32 +229,41 @@ int smp_call_function_single(int cpu, smp_call_func_t func, void *info, | |||
| 209 | WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled() | 229 | WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled() |
| 210 | && !oops_in_progress); | 230 | && !oops_in_progress); |
| 211 | 231 | ||
| 212 | if (cpu == this_cpu) { | 232 | err = generic_exec_single(cpu, NULL, func, info, wait); |
| 213 | local_irq_save(flags); | ||
| 214 | func(info); | ||
| 215 | local_irq_restore(flags); | ||
| 216 | } else { | ||
| 217 | if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) { | ||
| 218 | struct call_single_data *csd = &d; | ||
| 219 | 233 | ||
| 220 | if (!wait) | 234 | put_cpu(); |
| 221 | csd = &__get_cpu_var(csd_data); | ||
| 222 | 235 | ||
| 223 | csd_lock(csd); | 236 | return err; |
| 237 | } | ||
| 238 | EXPORT_SYMBOL(smp_call_function_single); | ||
| 224 | 239 | ||
| 225 | csd->func = func; | 240 | /** |
| 226 | csd->info = info; | 241 | * smp_call_function_single_async(): Run an asynchronous function on a |
| 227 | generic_exec_single(cpu, csd, wait); | 242 | * specific CPU. |
| 228 | } else { | 243 | * @cpu: The CPU to run on. |
| 229 | err = -ENXIO; /* CPU not online */ | 244 | * @csd: Pre-allocated and setup data structure |
| 230 | } | 245 | * |
| 231 | } | 246 | * Like smp_call_function_single(), but the call is asynchonous and |
| 247 | * can thus be done from contexts with disabled interrupts. | ||
| 248 | * | ||
| 249 | * The caller passes his own pre-allocated data structure | ||
| 250 | * (ie: embedded in an object) and is responsible for synchronizing it | ||
| 251 | * such that the IPIs performed on the @csd are strictly serialized. | ||
| 252 | * | ||
| 253 | * NOTE: Be careful, there is unfortunately no current debugging facility to | ||
| 254 | * validate the correctness of this serialization. | ||
| 255 | */ | ||
| 256 | int smp_call_function_single_async(int cpu, struct call_single_data *csd) | ||
| 257 | { | ||
| 258 | int err = 0; | ||
| 232 | 259 | ||
| 233 | put_cpu(); | 260 | preempt_disable(); |
| 261 | err = generic_exec_single(cpu, csd, csd->func, csd->info, 0); | ||
| 262 | preempt_enable(); | ||
| 234 | 263 | ||
| 235 | return err; | 264 | return err; |
| 236 | } | 265 | } |
| 237 | EXPORT_SYMBOL(smp_call_function_single); | 266 | EXPORT_SYMBOL_GPL(smp_call_function_single_async); |
| 238 | 267 | ||
| 239 | /* | 268 | /* |
| 240 | * smp_call_function_any - Run a function on any of the given cpus | 269 | * smp_call_function_any - Run a function on any of the given cpus |
| @@ -280,44 +309,6 @@ call: | |||
| 280 | EXPORT_SYMBOL_GPL(smp_call_function_any); | 309 | EXPORT_SYMBOL_GPL(smp_call_function_any); |
| 281 | 310 | ||
| 282 | /** | 311 | /** |
| 283 | * __smp_call_function_single(): Run a function on a specific CPU | ||
| 284 | * @cpu: The CPU to run on. | ||
| 285 | * @data: Pre-allocated and setup data structure | ||
| 286 | * @wait: If true, wait until function has completed on specified CPU. | ||
| 287 | * | ||
| 288 | * Like smp_call_function_single(), but allow caller to pass in a | ||
| 289 | * pre-allocated data structure. Useful for embedding @data inside | ||
| 290 | * other structures, for instance. | ||
| 291 | */ | ||
| 292 | void __smp_call_function_single(int cpu, struct call_single_data *csd, | ||
| 293 | int wait) | ||
| 294 | { | ||
| 295 | unsigned int this_cpu; | ||
| 296 | unsigned long flags; | ||
| 297 | |||
| 298 | this_cpu = get_cpu(); | ||
| 299 | /* | ||
| 300 | * Can deadlock when called with interrupts disabled. | ||
| 301 | * We allow cpu's that are not yet online though, as no one else can | ||
| 302 | * send smp call function interrupt to this cpu and as such deadlocks | ||
| 303 | * can't happen. | ||
| 304 | */ | ||
| 305 | WARN_ON_ONCE(cpu_online(smp_processor_id()) && wait && irqs_disabled() | ||
| 306 | && !oops_in_progress); | ||
| 307 | |||
| 308 | if (cpu == this_cpu) { | ||
| 309 | local_irq_save(flags); | ||
| 310 | csd->func(csd->info); | ||
| 311 | local_irq_restore(flags); | ||
| 312 | } else { | ||
| 313 | csd_lock(csd); | ||
| 314 | generic_exec_single(cpu, csd, wait); | ||
| 315 | } | ||
| 316 | put_cpu(); | ||
| 317 | } | ||
| 318 | EXPORT_SYMBOL_GPL(__smp_call_function_single); | ||
| 319 | |||
| 320 | /** | ||
| 321 | * smp_call_function_many(): Run a function on a set of other CPUs. | 312 | * smp_call_function_many(): Run a function on a set of other CPUs. |
| 322 | * @mask: The set of cpus to run on (only runs on online subset). | 313 | * @mask: The set of cpus to run on (only runs on online subset). |
| 323 | * @func: The function to run. This must be fast and non-blocking. | 314 | * @func: The function to run. This must be fast and non-blocking. |
diff --git a/kernel/softirq.c b/kernel/softirq.c index 490fcbb1dc5b..b50990a5bea0 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
| @@ -25,6 +25,7 @@ | |||
| 25 | #include <linux/smp.h> | 25 | #include <linux/smp.h> |
| 26 | #include <linux/smpboot.h> | 26 | #include <linux/smpboot.h> |
| 27 | #include <linux/tick.h> | 27 | #include <linux/tick.h> |
| 28 | #include <linux/irq.h> | ||
| 28 | 29 | ||
| 29 | #define CREATE_TRACE_POINTS | 30 | #define CREATE_TRACE_POINTS |
| 30 | #include <trace/events/irq.h> | 31 | #include <trace/events/irq.h> |
diff --git a/kernel/sys.c b/kernel/sys.c index adaeab6f7a87..fba0f29401ea 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
| @@ -1996,6 +1996,21 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, | |||
| 1996 | if (arg2 || arg3 || arg4 || arg5) | 1996 | if (arg2 || arg3 || arg4 || arg5) |
| 1997 | return -EINVAL; | 1997 | return -EINVAL; |
| 1998 | return current->no_new_privs ? 1 : 0; | 1998 | return current->no_new_privs ? 1 : 0; |
| 1999 | case PR_GET_THP_DISABLE: | ||
| 2000 | if (arg2 || arg3 || arg4 || arg5) | ||
| 2001 | return -EINVAL; | ||
| 2002 | error = !!(me->mm->def_flags & VM_NOHUGEPAGE); | ||
| 2003 | break; | ||
| 2004 | case PR_SET_THP_DISABLE: | ||
| 2005 | if (arg3 || arg4 || arg5) | ||
| 2006 | return -EINVAL; | ||
| 2007 | down_write(&me->mm->mmap_sem); | ||
| 2008 | if (arg2) | ||
| 2009 | me->mm->def_flags |= VM_NOHUGEPAGE; | ||
| 2010 | else | ||
| 2011 | me->mm->def_flags &= ~VM_NOHUGEPAGE; | ||
| 2012 | up_write(&me->mm->mmap_sem); | ||
| 2013 | break; | ||
| 1999 | default: | 2014 | default: |
| 2000 | error = -EINVAL; | 2015 | error = -EINVAL; |
| 2001 | break; | 2016 | break; |
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c index 7078052284fd..bc8d1b74a6b9 100644 --- a/kernel/sys_ni.c +++ b/kernel/sys_ni.c | |||
| @@ -146,11 +146,13 @@ cond_syscall(sys_io_destroy); | |||
| 146 | cond_syscall(sys_io_submit); | 146 | cond_syscall(sys_io_submit); |
| 147 | cond_syscall(sys_io_cancel); | 147 | cond_syscall(sys_io_cancel); |
| 148 | cond_syscall(sys_io_getevents); | 148 | cond_syscall(sys_io_getevents); |
| 149 | cond_syscall(sys_sysfs); | ||
| 149 | cond_syscall(sys_syslog); | 150 | cond_syscall(sys_syslog); |
| 150 | cond_syscall(sys_process_vm_readv); | 151 | cond_syscall(sys_process_vm_readv); |
| 151 | cond_syscall(sys_process_vm_writev); | 152 | cond_syscall(sys_process_vm_writev); |
| 152 | cond_syscall(compat_sys_process_vm_readv); | 153 | cond_syscall(compat_sys_process_vm_readv); |
| 153 | cond_syscall(compat_sys_process_vm_writev); | 154 | cond_syscall(compat_sys_process_vm_writev); |
| 155 | cond_syscall(sys_uselib); | ||
| 154 | 156 | ||
| 155 | /* arch-specific weak syscall entries */ | 157 | /* arch-specific weak syscall entries */ |
| 156 | cond_syscall(sys_pciconfig_read); | 158 | cond_syscall(sys_pciconfig_read); |
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 7754ff16f334..74f5b580fe34 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
| @@ -112,9 +112,6 @@ extern int sysctl_nr_open_min, sysctl_nr_open_max; | |||
| 112 | #ifndef CONFIG_MMU | 112 | #ifndef CONFIG_MMU |
| 113 | extern int sysctl_nr_trim_pages; | 113 | extern int sysctl_nr_trim_pages; |
| 114 | #endif | 114 | #endif |
| 115 | #ifdef CONFIG_BLOCK | ||
| 116 | extern int blk_iopoll_enabled; | ||
| 117 | #endif | ||
| 118 | 115 | ||
| 119 | /* Constants used for minimum and maximum */ | 116 | /* Constants used for minimum and maximum */ |
| 120 | #ifdef CONFIG_LOCKUP_DETECTOR | 117 | #ifdef CONFIG_LOCKUP_DETECTOR |
| @@ -126,7 +123,7 @@ static int __maybe_unused neg_one = -1; | |||
| 126 | static int zero; | 123 | static int zero; |
| 127 | static int __maybe_unused one = 1; | 124 | static int __maybe_unused one = 1; |
| 128 | static int __maybe_unused two = 2; | 125 | static int __maybe_unused two = 2; |
| 129 | static int __maybe_unused three = 3; | 126 | static int __maybe_unused four = 4; |
| 130 | static unsigned long one_ul = 1; | 127 | static unsigned long one_ul = 1; |
| 131 | static int one_hundred = 100; | 128 | static int one_hundred = 100; |
| 132 | #ifdef CONFIG_PRINTK | 129 | #ifdef CONFIG_PRINTK |
| @@ -144,6 +141,11 @@ static int min_percpu_pagelist_fract = 8; | |||
| 144 | static int ngroups_max = NGROUPS_MAX; | 141 | static int ngroups_max = NGROUPS_MAX; |
| 145 | static const int cap_last_cap = CAP_LAST_CAP; | 142 | static const int cap_last_cap = CAP_LAST_CAP; |
| 146 | 143 | ||
| 144 | /*this is needed for proc_doulongvec_minmax of sysctl_hung_task_timeout_secs */ | ||
| 145 | #ifdef CONFIG_DETECT_HUNG_TASK | ||
| 146 | static unsigned long hung_task_timeout_max = (LONG_MAX/HZ); | ||
| 147 | #endif | ||
| 148 | |||
| 147 | #ifdef CONFIG_INOTIFY_USER | 149 | #ifdef CONFIG_INOTIFY_USER |
| 148 | #include <linux/inotify.h> | 150 | #include <linux/inotify.h> |
| 149 | #endif | 151 | #endif |
| @@ -988,6 +990,7 @@ static struct ctl_table kern_table[] = { | |||
| 988 | .maxlen = sizeof(unsigned long), | 990 | .maxlen = sizeof(unsigned long), |
| 989 | .mode = 0644, | 991 | .mode = 0644, |
| 990 | .proc_handler = proc_dohung_task_timeout_secs, | 992 | .proc_handler = proc_dohung_task_timeout_secs, |
| 993 | .extra2 = &hung_task_timeout_max, | ||
| 991 | }, | 994 | }, |
| 992 | { | 995 | { |
| 993 | .procname = "hung_task_warnings", | 996 | .procname = "hung_task_warnings", |
| @@ -1087,15 +1090,6 @@ static struct ctl_table kern_table[] = { | |||
| 1087 | .proc_handler = proc_dointvec, | 1090 | .proc_handler = proc_dointvec, |
| 1088 | }, | 1091 | }, |
| 1089 | #endif | 1092 | #endif |
| 1090 | #ifdef CONFIG_BLOCK | ||
| 1091 | { | ||
| 1092 | .procname = "blk_iopoll", | ||
| 1093 | .data = &blk_iopoll_enabled, | ||
| 1094 | .maxlen = sizeof(int), | ||
| 1095 | .mode = 0644, | ||
| 1096 | .proc_handler = proc_dointvec, | ||
| 1097 | }, | ||
| 1098 | #endif | ||
| 1099 | { } | 1093 | { } |
| 1100 | }; | 1094 | }; |
| 1101 | 1095 | ||
| @@ -1276,7 +1270,7 @@ static struct ctl_table vm_table[] = { | |||
| 1276 | .mode = 0644, | 1270 | .mode = 0644, |
| 1277 | .proc_handler = drop_caches_sysctl_handler, | 1271 | .proc_handler = drop_caches_sysctl_handler, |
| 1278 | .extra1 = &one, | 1272 | .extra1 = &one, |
| 1279 | .extra2 = &three, | 1273 | .extra2 = &four, |
| 1280 | }, | 1274 | }, |
| 1281 | #ifdef CONFIG_COMPACTION | 1275 | #ifdef CONFIG_COMPACTION |
| 1282 | { | 1276 | { |
diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig index 3ce6e8c5f3fc..f448513a45ed 100644 --- a/kernel/time/Kconfig +++ b/kernel/time/Kconfig | |||
| @@ -124,7 +124,7 @@ config NO_HZ_FULL | |||
| 124 | endchoice | 124 | endchoice |
| 125 | 125 | ||
| 126 | config NO_HZ_FULL_ALL | 126 | config NO_HZ_FULL_ALL |
| 127 | bool "Full dynticks system on all CPUs by default" | 127 | bool "Full dynticks system on all CPUs by default (except CPU 0)" |
| 128 | depends on NO_HZ_FULL | 128 | depends on NO_HZ_FULL |
| 129 | help | 129 | help |
| 130 | If the user doesn't pass the nohz_full boot option to | 130 | If the user doesn't pass the nohz_full boot option to |
diff --git a/kernel/time/Makefile b/kernel/time/Makefile index 9250130646f5..57a413fd0ebf 100644 --- a/kernel/time/Makefile +++ b/kernel/time/Makefile | |||
| @@ -3,7 +3,10 @@ obj-y += timeconv.o posix-clock.o alarmtimer.o | |||
| 3 | 3 | ||
| 4 | obj-$(CONFIG_GENERIC_CLOCKEVENTS_BUILD) += clockevents.o | 4 | obj-$(CONFIG_GENERIC_CLOCKEVENTS_BUILD) += clockevents.o |
| 5 | obj-$(CONFIG_GENERIC_CLOCKEVENTS) += tick-common.o | 5 | obj-$(CONFIG_GENERIC_CLOCKEVENTS) += tick-common.o |
| 6 | obj-$(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) += tick-broadcast.o | 6 | ifeq ($(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST),y) |
| 7 | obj-y += tick-broadcast.o | ||
| 8 | obj-$(CONFIG_TICK_ONESHOT) += tick-broadcast-hrtimer.o | ||
| 9 | endif | ||
| 7 | obj-$(CONFIG_GENERIC_SCHED_CLOCK) += sched_clock.o | 10 | obj-$(CONFIG_GENERIC_SCHED_CLOCK) += sched_clock.o |
| 8 | obj-$(CONFIG_TICK_ONESHOT) += tick-oneshot.o | 11 | obj-$(CONFIG_TICK_ONESHOT) += tick-oneshot.o |
| 9 | obj-$(CONFIG_TICK_ONESHOT) += tick-sched.o | 12 | obj-$(CONFIG_TICK_ONESHOT) += tick-sched.o |
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c index 086ad6043bcb..ad362c260ef4 100644 --- a/kernel/time/clockevents.c +++ b/kernel/time/clockevents.c | |||
| @@ -439,6 +439,19 @@ void clockevents_config_and_register(struct clock_event_device *dev, | |||
| 439 | } | 439 | } |
| 440 | EXPORT_SYMBOL_GPL(clockevents_config_and_register); | 440 | EXPORT_SYMBOL_GPL(clockevents_config_and_register); |
| 441 | 441 | ||
| 442 | int __clockevents_update_freq(struct clock_event_device *dev, u32 freq) | ||
| 443 | { | ||
| 444 | clockevents_config(dev, freq); | ||
| 445 | |||
| 446 | if (dev->mode == CLOCK_EVT_MODE_ONESHOT) | ||
| 447 | return clockevents_program_event(dev, dev->next_event, false); | ||
| 448 | |||
| 449 | if (dev->mode == CLOCK_EVT_MODE_PERIODIC) | ||
| 450 | dev->set_mode(CLOCK_EVT_MODE_PERIODIC, dev); | ||
| 451 | |||
| 452 | return 0; | ||
| 453 | } | ||
| 454 | |||
| 442 | /** | 455 | /** |
| 443 | * clockevents_update_freq - Update frequency and reprogram a clock event device. | 456 | * clockevents_update_freq - Update frequency and reprogram a clock event device. |
| 444 | * @dev: device to modify | 457 | * @dev: device to modify |
| @@ -446,17 +459,22 @@ EXPORT_SYMBOL_GPL(clockevents_config_and_register); | |||
| 446 | * | 459 | * |
| 447 | * Reconfigure and reprogram a clock event device in oneshot | 460 | * Reconfigure and reprogram a clock event device in oneshot |
| 448 | * mode. Must be called on the cpu for which the device delivers per | 461 | * mode. Must be called on the cpu for which the device delivers per |
| 449 | * cpu timer events with interrupts disabled! Returns 0 on success, | 462 | * cpu timer events. If called for the broadcast device the core takes |
| 450 | * -ETIME when the event is in the past. | 463 | * care of serialization. |
| 464 | * | ||
| 465 | * Returns 0 on success, -ETIME when the event is in the past. | ||
| 451 | */ | 466 | */ |
| 452 | int clockevents_update_freq(struct clock_event_device *dev, u32 freq) | 467 | int clockevents_update_freq(struct clock_event_device *dev, u32 freq) |
| 453 | { | 468 | { |
| 454 | clockevents_config(dev, freq); | 469 | unsigned long flags; |
| 455 | 470 | int ret; | |
| 456 | if (dev->mode != CLOCK_EVT_MODE_ONESHOT) | ||
| 457 | return 0; | ||
| 458 | 471 | ||
| 459 | return clockevents_program_event(dev, dev->next_event, false); | 472 | local_irq_save(flags); |
| 473 | ret = tick_broadcast_update_freq(dev, freq); | ||
| 474 | if (ret == -ENODEV) | ||
| 475 | ret = __clockevents_update_freq(dev, freq); | ||
| 476 | local_irq_restore(flags); | ||
| 477 | return ret; | ||
| 460 | } | 478 | } |
| 461 | 479 | ||
| 462 | /* | 480 | /* |
| @@ -524,12 +542,13 @@ void clockevents_resume(void) | |||
| 524 | #ifdef CONFIG_GENERIC_CLOCKEVENTS | 542 | #ifdef CONFIG_GENERIC_CLOCKEVENTS |
| 525 | /** | 543 | /** |
| 526 | * clockevents_notify - notification about relevant events | 544 | * clockevents_notify - notification about relevant events |
| 545 | * Returns 0 on success, any other value on error | ||
| 527 | */ | 546 | */ |
| 528 | void clockevents_notify(unsigned long reason, void *arg) | 547 | int clockevents_notify(unsigned long reason, void *arg) |
| 529 | { | 548 | { |
| 530 | struct clock_event_device *dev, *tmp; | 549 | struct clock_event_device *dev, *tmp; |
| 531 | unsigned long flags; | 550 | unsigned long flags; |
| 532 | int cpu; | 551 | int cpu, ret = 0; |
| 533 | 552 | ||
| 534 | raw_spin_lock_irqsave(&clockevents_lock, flags); | 553 | raw_spin_lock_irqsave(&clockevents_lock, flags); |
| 535 | 554 | ||
| @@ -542,7 +561,7 @@ void clockevents_notify(unsigned long reason, void *arg) | |||
| 542 | 561 | ||
| 543 | case CLOCK_EVT_NOTIFY_BROADCAST_ENTER: | 562 | case CLOCK_EVT_NOTIFY_BROADCAST_ENTER: |
| 544 | case CLOCK_EVT_NOTIFY_BROADCAST_EXIT: | 563 | case CLOCK_EVT_NOTIFY_BROADCAST_EXIT: |
| 545 | tick_broadcast_oneshot_control(reason); | 564 | ret = tick_broadcast_oneshot_control(reason); |
| 546 | break; | 565 | break; |
| 547 | 566 | ||
| 548 | case CLOCK_EVT_NOTIFY_CPU_DYING: | 567 | case CLOCK_EVT_NOTIFY_CPU_DYING: |
| @@ -585,6 +604,7 @@ void clockevents_notify(unsigned long reason, void *arg) | |||
| 585 | break; | 604 | break; |
| 586 | } | 605 | } |
| 587 | raw_spin_unlock_irqrestore(&clockevents_lock, flags); | 606 | raw_spin_unlock_irqrestore(&clockevents_lock, flags); |
| 607 | return ret; | ||
| 588 | } | 608 | } |
| 589 | EXPORT_SYMBOL_GPL(clockevents_notify); | 609 | EXPORT_SYMBOL_GPL(clockevents_notify); |
| 590 | 610 | ||
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index af8d1d4f3d55..419a52cecd20 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c | |||
| @@ -514,12 +514,13 @@ static void sync_cmos_clock(struct work_struct *work) | |||
| 514 | next.tv_sec++; | 514 | next.tv_sec++; |
| 515 | next.tv_nsec -= NSEC_PER_SEC; | 515 | next.tv_nsec -= NSEC_PER_SEC; |
| 516 | } | 516 | } |
| 517 | schedule_delayed_work(&sync_cmos_work, timespec_to_jiffies(&next)); | 517 | queue_delayed_work(system_power_efficient_wq, |
| 518 | &sync_cmos_work, timespec_to_jiffies(&next)); | ||
| 518 | } | 519 | } |
| 519 | 520 | ||
| 520 | void ntp_notify_cmos_timer(void) | 521 | void ntp_notify_cmos_timer(void) |
| 521 | { | 522 | { |
| 522 | schedule_delayed_work(&sync_cmos_work, 0); | 523 | queue_delayed_work(system_power_efficient_wq, &sync_cmos_work, 0); |
| 523 | } | 524 | } |
| 524 | 525 | ||
| 525 | #else | 526 | #else |
diff --git a/kernel/time/tick-broadcast-hrtimer.c b/kernel/time/tick-broadcast-hrtimer.c new file mode 100644 index 000000000000..eb682d5c697c --- /dev/null +++ b/kernel/time/tick-broadcast-hrtimer.c | |||
| @@ -0,0 +1,106 @@ | |||
| 1 | /* | ||
| 2 | * linux/kernel/time/tick-broadcast-hrtimer.c | ||
| 3 | * This file emulates a local clock event device | ||
| 4 | * via a pseudo clock device. | ||
| 5 | */ | ||
| 6 | #include <linux/cpu.h> | ||
| 7 | #include <linux/err.h> | ||
| 8 | #include <linux/hrtimer.h> | ||
| 9 | #include <linux/interrupt.h> | ||
| 10 | #include <linux/percpu.h> | ||
| 11 | #include <linux/profile.h> | ||
| 12 | #include <linux/clockchips.h> | ||
| 13 | #include <linux/sched.h> | ||
| 14 | #include <linux/smp.h> | ||
| 15 | #include <linux/module.h> | ||
| 16 | |||
| 17 | #include "tick-internal.h" | ||
| 18 | |||
| 19 | static struct hrtimer bctimer; | ||
| 20 | |||
| 21 | static void bc_set_mode(enum clock_event_mode mode, | ||
| 22 | struct clock_event_device *bc) | ||
| 23 | { | ||
| 24 | switch (mode) { | ||
| 25 | case CLOCK_EVT_MODE_SHUTDOWN: | ||
| 26 | /* | ||
| 27 | * Note, we cannot cancel the timer here as we might | ||
| 28 | * run into the following live lock scenario: | ||
| 29 | * | ||
| 30 | * cpu 0 cpu1 | ||
| 31 | * lock(broadcast_lock); | ||
| 32 | * hrtimer_interrupt() | ||
| 33 | * bc_handler() | ||
| 34 | * tick_handle_oneshot_broadcast(); | ||
| 35 | * lock(broadcast_lock); | ||
| 36 | * hrtimer_cancel() | ||
| 37 | * wait_for_callback() | ||
| 38 | */ | ||
| 39 | hrtimer_try_to_cancel(&bctimer); | ||
| 40 | break; | ||
| 41 | default: | ||
| 42 | break; | ||
| 43 | } | ||
| 44 | } | ||
| 45 | |||
| 46 | /* | ||
| 47 | * This is called from the guts of the broadcast code when the cpu | ||
| 48 | * which is about to enter idle has the earliest broadcast timer event. | ||
| 49 | */ | ||
| 50 | static int bc_set_next(ktime_t expires, struct clock_event_device *bc) | ||
| 51 | { | ||
| 52 | /* | ||
| 53 | * We try to cancel the timer first. If the callback is on | ||
| 54 | * flight on some other cpu then we let it handle it. If we | ||
| 55 | * were able to cancel the timer nothing can rearm it as we | ||
| 56 | * own broadcast_lock. | ||
| 57 | * | ||
| 58 | * However we can also be called from the event handler of | ||
| 59 | * ce_broadcast_hrtimer itself when it expires. We cannot | ||
| 60 | * restart the timer because we are in the callback, but we | ||
| 61 | * can set the expiry time and let the callback return | ||
| 62 | * HRTIMER_RESTART. | ||
| 63 | */ | ||
| 64 | if (hrtimer_try_to_cancel(&bctimer) >= 0) { | ||
| 65 | hrtimer_start(&bctimer, expires, HRTIMER_MODE_ABS_PINNED); | ||
| 66 | /* Bind the "device" to the cpu */ | ||
| 67 | bc->bound_on = smp_processor_id(); | ||
| 68 | } else if (bc->bound_on == smp_processor_id()) { | ||
| 69 | hrtimer_set_expires(&bctimer, expires); | ||
| 70 | } | ||
| 71 | return 0; | ||
| 72 | } | ||
| 73 | |||
| 74 | static struct clock_event_device ce_broadcast_hrtimer = { | ||
| 75 | .set_mode = bc_set_mode, | ||
| 76 | .set_next_ktime = bc_set_next, | ||
| 77 | .features = CLOCK_EVT_FEAT_ONESHOT | | ||
| 78 | CLOCK_EVT_FEAT_KTIME | | ||
| 79 | CLOCK_EVT_FEAT_HRTIMER, | ||
| 80 | .rating = 0, | ||
| 81 | .bound_on = -1, | ||
| 82 | .min_delta_ns = 1, | ||
| 83 | .max_delta_ns = KTIME_MAX, | ||
| 84 | .min_delta_ticks = 1, | ||
| 85 | .max_delta_ticks = ULONG_MAX, | ||
| 86 | .mult = 1, | ||
| 87 | .shift = 0, | ||
| 88 | .cpumask = cpu_all_mask, | ||
| 89 | }; | ||
| 90 | |||
| 91 | static enum hrtimer_restart bc_handler(struct hrtimer *t) | ||
| 92 | { | ||
| 93 | ce_broadcast_hrtimer.event_handler(&ce_broadcast_hrtimer); | ||
| 94 | |||
| 95 | if (ce_broadcast_hrtimer.next_event.tv64 == KTIME_MAX) | ||
| 96 | return HRTIMER_NORESTART; | ||
| 97 | |||
| 98 | return HRTIMER_RESTART; | ||
| 99 | } | ||
| 100 | |||
| 101 | void tick_setup_hrtimer_broadcast(void) | ||
| 102 | { | ||
| 103 | hrtimer_init(&bctimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | ||
| 104 | bctimer.function = bc_handler; | ||
| 105 | clockevents_register_device(&ce_broadcast_hrtimer); | ||
| 106 | } | ||
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index 98977a57ac72..64c5990fd500 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c | |||
| @@ -120,6 +120,19 @@ int tick_is_broadcast_device(struct clock_event_device *dev) | |||
| 120 | return (dev && tick_broadcast_device.evtdev == dev); | 120 | return (dev && tick_broadcast_device.evtdev == dev); |
| 121 | } | 121 | } |
| 122 | 122 | ||
| 123 | int tick_broadcast_update_freq(struct clock_event_device *dev, u32 freq) | ||
| 124 | { | ||
| 125 | int ret = -ENODEV; | ||
| 126 | |||
| 127 | if (tick_is_broadcast_device(dev)) { | ||
| 128 | raw_spin_lock(&tick_broadcast_lock); | ||
| 129 | ret = __clockevents_update_freq(dev, freq); | ||
| 130 | raw_spin_unlock(&tick_broadcast_lock); | ||
| 131 | } | ||
| 132 | return ret; | ||
| 133 | } | ||
| 134 | |||
| 135 | |||
| 123 | static void err_broadcast(const struct cpumask *mask) | 136 | static void err_broadcast(const struct cpumask *mask) |
| 124 | { | 137 | { |
| 125 | pr_crit_once("Failed to broadcast timer tick. Some CPUs may be unresponsive.\n"); | 138 | pr_crit_once("Failed to broadcast timer tick. Some CPUs may be unresponsive.\n"); |
| @@ -272,12 +285,8 @@ static void tick_do_broadcast(struct cpumask *mask) | |||
| 272 | */ | 285 | */ |
| 273 | static void tick_do_periodic_broadcast(void) | 286 | static void tick_do_periodic_broadcast(void) |
| 274 | { | 287 | { |
| 275 | raw_spin_lock(&tick_broadcast_lock); | ||
| 276 | |||
| 277 | cpumask_and(tmpmask, cpu_online_mask, tick_broadcast_mask); | 288 | cpumask_and(tmpmask, cpu_online_mask, tick_broadcast_mask); |
| 278 | tick_do_broadcast(tmpmask); | 289 | tick_do_broadcast(tmpmask); |
| 279 | |||
| 280 | raw_spin_unlock(&tick_broadcast_lock); | ||
| 281 | } | 290 | } |
| 282 | 291 | ||
| 283 | /* | 292 | /* |
| @@ -287,13 +296,15 @@ static void tick_handle_periodic_broadcast(struct clock_event_device *dev) | |||
| 287 | { | 296 | { |
| 288 | ktime_t next; | 297 | ktime_t next; |
| 289 | 298 | ||
| 299 | raw_spin_lock(&tick_broadcast_lock); | ||
| 300 | |||
| 290 | tick_do_periodic_broadcast(); | 301 | tick_do_periodic_broadcast(); |
| 291 | 302 | ||
| 292 | /* | 303 | /* |
| 293 | * The device is in periodic mode. No reprogramming necessary: | 304 | * The device is in periodic mode. No reprogramming necessary: |
| 294 | */ | 305 | */ |
| 295 | if (dev->mode == CLOCK_EVT_MODE_PERIODIC) | 306 | if (dev->mode == CLOCK_EVT_MODE_PERIODIC) |
| 296 | return; | 307 | goto unlock; |
| 297 | 308 | ||
| 298 | /* | 309 | /* |
| 299 | * Setup the next period for devices, which do not have | 310 | * Setup the next period for devices, which do not have |
| @@ -306,9 +317,11 @@ static void tick_handle_periodic_broadcast(struct clock_event_device *dev) | |||
| 306 | next = ktime_add(next, tick_period); | 317 | next = ktime_add(next, tick_period); |
| 307 | 318 | ||
| 308 | if (!clockevents_program_event(dev, next, false)) | 319 | if (!clockevents_program_event(dev, next, false)) |
| 309 | return; | 320 | goto unlock; |
| 310 | tick_do_periodic_broadcast(); | 321 | tick_do_periodic_broadcast(); |
| 311 | } | 322 | } |
| 323 | unlock: | ||
| 324 | raw_spin_unlock(&tick_broadcast_lock); | ||
| 312 | } | 325 | } |
| 313 | 326 | ||
| 314 | /* | 327 | /* |
| @@ -630,24 +643,61 @@ again: | |||
| 630 | raw_spin_unlock(&tick_broadcast_lock); | 643 | raw_spin_unlock(&tick_broadcast_lock); |
| 631 | } | 644 | } |
| 632 | 645 | ||
| 646 | static int broadcast_needs_cpu(struct clock_event_device *bc, int cpu) | ||
| 647 | { | ||
| 648 | if (!(bc->features & CLOCK_EVT_FEAT_HRTIMER)) | ||
| 649 | return 0; | ||
| 650 | if (bc->next_event.tv64 == KTIME_MAX) | ||
| 651 | return 0; | ||
| 652 | return bc->bound_on == cpu ? -EBUSY : 0; | ||
| 653 | } | ||
| 654 | |||
| 655 | static void broadcast_shutdown_local(struct clock_event_device *bc, | ||
| 656 | struct clock_event_device *dev) | ||
| 657 | { | ||
| 658 | /* | ||
| 659 | * For hrtimer based broadcasting we cannot shutdown the cpu | ||
| 660 | * local device if our own event is the first one to expire or | ||
| 661 | * if we own the broadcast timer. | ||
| 662 | */ | ||
| 663 | if (bc->features & CLOCK_EVT_FEAT_HRTIMER) { | ||
| 664 | if (broadcast_needs_cpu(bc, smp_processor_id())) | ||
| 665 | return; | ||
| 666 | if (dev->next_event.tv64 < bc->next_event.tv64) | ||
| 667 | return; | ||
| 668 | } | ||
| 669 | clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN); | ||
| 670 | } | ||
| 671 | |||
| 672 | static void broadcast_move_bc(int deadcpu) | ||
| 673 | { | ||
| 674 | struct clock_event_device *bc = tick_broadcast_device.evtdev; | ||
| 675 | |||
| 676 | if (!bc || !broadcast_needs_cpu(bc, deadcpu)) | ||
| 677 | return; | ||
| 678 | /* This moves the broadcast assignment to this cpu */ | ||
| 679 | clockevents_program_event(bc, bc->next_event, 1); | ||
| 680 | } | ||
| 681 | |||
| 633 | /* | 682 | /* |
| 634 | * Powerstate information: The system enters/leaves a state, where | 683 | * Powerstate information: The system enters/leaves a state, where |
| 635 | * affected devices might stop | 684 | * affected devices might stop |
| 685 | * Returns 0 on success, -EBUSY if the cpu is used to broadcast wakeups. | ||
| 636 | */ | 686 | */ |
| 637 | void tick_broadcast_oneshot_control(unsigned long reason) | 687 | int tick_broadcast_oneshot_control(unsigned long reason) |
| 638 | { | 688 | { |
| 639 | struct clock_event_device *bc, *dev; | 689 | struct clock_event_device *bc, *dev; |
| 640 | struct tick_device *td; | 690 | struct tick_device *td; |
| 641 | unsigned long flags; | 691 | unsigned long flags; |
| 642 | ktime_t now; | 692 | ktime_t now; |
| 643 | int cpu; | 693 | int cpu, ret = 0; |
| 644 | 694 | ||
| 645 | /* | 695 | /* |
| 646 | * Periodic mode does not care about the enter/exit of power | 696 | * Periodic mode does not care about the enter/exit of power |
| 647 | * states | 697 | * states |
| 648 | */ | 698 | */ |
| 649 | if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) | 699 | if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) |
| 650 | return; | 700 | return 0; |
| 651 | 701 | ||
| 652 | /* | 702 | /* |
| 653 | * We are called with preemtion disabled from the depth of the | 703 | * We are called with preemtion disabled from the depth of the |
| @@ -658,7 +708,7 @@ void tick_broadcast_oneshot_control(unsigned long reason) | |||
| 658 | dev = td->evtdev; | 708 | dev = td->evtdev; |
| 659 | 709 | ||
| 660 | if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) | 710 | if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) |
| 661 | return; | 711 | return 0; |
| 662 | 712 | ||
| 663 | bc = tick_broadcast_device.evtdev; | 713 | bc = tick_broadcast_device.evtdev; |
| 664 | 714 | ||
| @@ -666,7 +716,7 @@ void tick_broadcast_oneshot_control(unsigned long reason) | |||
| 666 | if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) { | 716 | if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) { |
| 667 | if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) { | 717 | if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) { |
| 668 | WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask)); | 718 | WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask)); |
| 669 | clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN); | 719 | broadcast_shutdown_local(bc, dev); |
| 670 | /* | 720 | /* |
| 671 | * We only reprogram the broadcast timer if we | 721 | * We only reprogram the broadcast timer if we |
| 672 | * did not mark ourself in the force mask and | 722 | * did not mark ourself in the force mask and |
| @@ -679,6 +729,16 @@ void tick_broadcast_oneshot_control(unsigned long reason) | |||
| 679 | dev->next_event.tv64 < bc->next_event.tv64) | 729 | dev->next_event.tv64 < bc->next_event.tv64) |
| 680 | tick_broadcast_set_event(bc, cpu, dev->next_event, 1); | 730 | tick_broadcast_set_event(bc, cpu, dev->next_event, 1); |
| 681 | } | 731 | } |
| 732 | /* | ||
| 733 | * If the current CPU owns the hrtimer broadcast | ||
| 734 | * mechanism, it cannot go deep idle and we remove the | ||
| 735 | * CPU from the broadcast mask. We don't have to go | ||
| 736 | * through the EXIT path as the local timer is not | ||
| 737 | * shutdown. | ||
| 738 | */ | ||
| 739 | ret = broadcast_needs_cpu(bc, cpu); | ||
| 740 | if (ret) | ||
| 741 | cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask); | ||
| 682 | } else { | 742 | } else { |
| 683 | if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) { | 743 | if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) { |
| 684 | clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT); | 744 | clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT); |
| @@ -746,6 +806,7 @@ void tick_broadcast_oneshot_control(unsigned long reason) | |||
| 746 | } | 806 | } |
| 747 | out: | 807 | out: |
| 748 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); | 808 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
| 809 | return ret; | ||
| 749 | } | 810 | } |
| 750 | 811 | ||
| 751 | /* | 812 | /* |
| @@ -852,6 +913,8 @@ void tick_shutdown_broadcast_oneshot(unsigned int *cpup) | |||
| 852 | cpumask_clear_cpu(cpu, tick_broadcast_pending_mask); | 913 | cpumask_clear_cpu(cpu, tick_broadcast_pending_mask); |
| 853 | cpumask_clear_cpu(cpu, tick_broadcast_force_mask); | 914 | cpumask_clear_cpu(cpu, tick_broadcast_force_mask); |
| 854 | 915 | ||
| 916 | broadcast_move_bc(cpu); | ||
| 917 | |||
| 855 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); | 918 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
| 856 | } | 919 | } |
| 857 | 920 | ||
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index 20b2fe37d105..015661279b68 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c | |||
| @@ -98,18 +98,19 @@ static void tick_periodic(int cpu) | |||
| 98 | void tick_handle_periodic(struct clock_event_device *dev) | 98 | void tick_handle_periodic(struct clock_event_device *dev) |
| 99 | { | 99 | { |
| 100 | int cpu = smp_processor_id(); | 100 | int cpu = smp_processor_id(); |
| 101 | ktime_t next; | 101 | ktime_t next = dev->next_event; |
| 102 | 102 | ||
| 103 | tick_periodic(cpu); | 103 | tick_periodic(cpu); |
| 104 | 104 | ||
| 105 | if (dev->mode != CLOCK_EVT_MODE_ONESHOT) | 105 | if (dev->mode != CLOCK_EVT_MODE_ONESHOT) |
| 106 | return; | 106 | return; |
| 107 | /* | ||
| 108 | * Setup the next period for devices, which do not have | ||
| 109 | * periodic mode: | ||
| 110 | */ | ||
| 111 | next = ktime_add(dev->next_event, tick_period); | ||
| 112 | for (;;) { | 107 | for (;;) { |
| 108 | /* | ||
| 109 | * Setup the next period for devices, which do not have | ||
| 110 | * periodic mode: | ||
| 111 | */ | ||
| 112 | next = ktime_add(next, tick_period); | ||
| 113 | |||
| 113 | if (!clockevents_program_event(dev, next, false)) | 114 | if (!clockevents_program_event(dev, next, false)) |
| 114 | return; | 115 | return; |
| 115 | /* | 116 | /* |
| @@ -118,12 +119,11 @@ void tick_handle_periodic(struct clock_event_device *dev) | |||
| 118 | * to be sure we're using a real hardware clocksource. | 119 | * to be sure we're using a real hardware clocksource. |
| 119 | * Otherwise we could get trapped in an infinite | 120 | * Otherwise we could get trapped in an infinite |
| 120 | * loop, as the tick_periodic() increments jiffies, | 121 | * loop, as the tick_periodic() increments jiffies, |
| 121 | * when then will increment time, posibly causing | 122 | * which then will increment time, possibly causing |
| 122 | * the loop to trigger again and again. | 123 | * the loop to trigger again and again. |
| 123 | */ | 124 | */ |
| 124 | if (timekeeping_valid_for_hres()) | 125 | if (timekeeping_valid_for_hres()) |
| 125 | tick_periodic(cpu); | 126 | tick_periodic(cpu); |
| 126 | next = ktime_add(next, tick_period); | ||
| 127 | } | 127 | } |
| 128 | } | 128 | } |
| 129 | 129 | ||
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h index 8329669b51ec..7ab92b19965a 100644 --- a/kernel/time/tick-internal.h +++ b/kernel/time/tick-internal.h | |||
| @@ -46,7 +46,7 @@ extern int tick_switch_to_oneshot(void (*handler)(struct clock_event_device *)); | |||
| 46 | extern void tick_resume_oneshot(void); | 46 | extern void tick_resume_oneshot(void); |
| 47 | # ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST | 47 | # ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST |
| 48 | extern void tick_broadcast_setup_oneshot(struct clock_event_device *bc); | 48 | extern void tick_broadcast_setup_oneshot(struct clock_event_device *bc); |
| 49 | extern void tick_broadcast_oneshot_control(unsigned long reason); | 49 | extern int tick_broadcast_oneshot_control(unsigned long reason); |
| 50 | extern void tick_broadcast_switch_to_oneshot(void); | 50 | extern void tick_broadcast_switch_to_oneshot(void); |
| 51 | extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup); | 51 | extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup); |
| 52 | extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc); | 52 | extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc); |
| @@ -58,7 +58,7 @@ static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) | |||
| 58 | { | 58 | { |
| 59 | BUG(); | 59 | BUG(); |
| 60 | } | 60 | } |
| 61 | static inline void tick_broadcast_oneshot_control(unsigned long reason) { } | 61 | static inline int tick_broadcast_oneshot_control(unsigned long reason) { return 0; } |
| 62 | static inline void tick_broadcast_switch_to_oneshot(void) { } | 62 | static inline void tick_broadcast_switch_to_oneshot(void) { } |
| 63 | static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { } | 63 | static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { } |
| 64 | static inline int tick_broadcast_oneshot_active(void) { return 0; } | 64 | static inline int tick_broadcast_oneshot_active(void) { return 0; } |
| @@ -87,7 +87,7 @@ static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) | |||
| 87 | { | 87 | { |
| 88 | BUG(); | 88 | BUG(); |
| 89 | } | 89 | } |
| 90 | static inline void tick_broadcast_oneshot_control(unsigned long reason) { } | 90 | static inline int tick_broadcast_oneshot_control(unsigned long reason) { return 0; } |
| 91 | static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { } | 91 | static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { } |
| 92 | static inline int tick_resume_broadcast_oneshot(struct clock_event_device *bc) | 92 | static inline int tick_resume_broadcast_oneshot(struct clock_event_device *bc) |
| 93 | { | 93 | { |
| @@ -111,6 +111,7 @@ extern int tick_resume_broadcast(void); | |||
| 111 | extern void tick_broadcast_init(void); | 111 | extern void tick_broadcast_init(void); |
| 112 | extern void | 112 | extern void |
| 113 | tick_set_periodic_handler(struct clock_event_device *dev, int broadcast); | 113 | tick_set_periodic_handler(struct clock_event_device *dev, int broadcast); |
| 114 | int tick_broadcast_update_freq(struct clock_event_device *dev, u32 freq); | ||
| 114 | 115 | ||
| 115 | #else /* !BROADCAST */ | 116 | #else /* !BROADCAST */ |
| 116 | 117 | ||
| @@ -133,6 +134,8 @@ static inline void tick_shutdown_broadcast(unsigned int *cpup) { } | |||
| 133 | static inline void tick_suspend_broadcast(void) { } | 134 | static inline void tick_suspend_broadcast(void) { } |
| 134 | static inline int tick_resume_broadcast(void) { return 0; } | 135 | static inline int tick_resume_broadcast(void) { return 0; } |
| 135 | static inline void tick_broadcast_init(void) { } | 136 | static inline void tick_broadcast_init(void) { } |
| 137 | static inline int tick_broadcast_update_freq(struct clock_event_device *dev, | ||
| 138 | u32 freq) { return -ENODEV; } | ||
| 136 | 139 | ||
| 137 | /* | 140 | /* |
| 138 | * Set the periodic handler in non broadcast mode | 141 | * Set the periodic handler in non broadcast mode |
| @@ -152,6 +155,8 @@ static inline int tick_device_is_functional(struct clock_event_device *dev) | |||
| 152 | return !(dev->features & CLOCK_EVT_FEAT_DUMMY); | 155 | return !(dev->features & CLOCK_EVT_FEAT_DUMMY); |
| 153 | } | 156 | } |
| 154 | 157 | ||
| 158 | int __clockevents_update_freq(struct clock_event_device *dev, u32 freq); | ||
| 159 | |||
| 155 | #endif | 160 | #endif |
| 156 | 161 | ||
| 157 | extern void do_timer(unsigned long ticks); | 162 | extern void do_timer(unsigned long ticks); |
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 5b40279ecd71..f7df8ea21707 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c | |||
| @@ -22,6 +22,7 @@ | |||
| 22 | #include <linux/tick.h> | 22 | #include <linux/tick.h> |
| 23 | #include <linux/stop_machine.h> | 23 | #include <linux/stop_machine.h> |
| 24 | #include <linux/pvclock_gtod.h> | 24 | #include <linux/pvclock_gtod.h> |
| 25 | #include <linux/compiler.h> | ||
| 25 | 26 | ||
| 26 | #include "tick-internal.h" | 27 | #include "tick-internal.h" |
| 27 | #include "ntp_internal.h" | 28 | #include "ntp_internal.h" |
| @@ -760,7 +761,7 @@ u64 timekeeping_max_deferment(void) | |||
| 760 | * | 761 | * |
| 761 | * XXX - Do be sure to remove it once all arches implement it. | 762 | * XXX - Do be sure to remove it once all arches implement it. |
| 762 | */ | 763 | */ |
| 763 | void __attribute__((weak)) read_persistent_clock(struct timespec *ts) | 764 | void __weak read_persistent_clock(struct timespec *ts) |
| 764 | { | 765 | { |
| 765 | ts->tv_sec = 0; | 766 | ts->tv_sec = 0; |
| 766 | ts->tv_nsec = 0; | 767 | ts->tv_nsec = 0; |
| @@ -775,7 +776,7 @@ void __attribute__((weak)) read_persistent_clock(struct timespec *ts) | |||
| 775 | * | 776 | * |
| 776 | * XXX - Do be sure to remove it once all arches implement it. | 777 | * XXX - Do be sure to remove it once all arches implement it. |
| 777 | */ | 778 | */ |
| 778 | void __attribute__((weak)) read_boot_clock(struct timespec *ts) | 779 | void __weak read_boot_clock(struct timespec *ts) |
| 779 | { | 780 | { |
| 780 | ts->tv_sec = 0; | 781 | ts->tv_sec = 0; |
| 781 | ts->tv_nsec = 0; | 782 | ts->tv_nsec = 0; |
diff --git a/kernel/time/timekeeping_debug.c b/kernel/time/timekeeping_debug.c index 802433a4f5eb..4d54f97558df 100644 --- a/kernel/time/timekeeping_debug.c +++ b/kernel/time/timekeeping_debug.c | |||
| @@ -21,6 +21,8 @@ | |||
| 21 | #include <linux/seq_file.h> | 21 | #include <linux/seq_file.h> |
| 22 | #include <linux/time.h> | 22 | #include <linux/time.h> |
| 23 | 23 | ||
| 24 | #include "timekeeping_internal.h" | ||
| 25 | |||
| 24 | static unsigned int sleep_time_bin[32] = {0}; | 26 | static unsigned int sleep_time_bin[32] = {0}; |
| 25 | 27 | ||
| 26 | static int tk_debug_show_sleep_time(struct seq_file *s, void *data) | 28 | static int tk_debug_show_sleep_time(struct seq_file *s, void *data) |
diff --git a/kernel/timer.c b/kernel/timer.c index d78de047599b..87bd529879c2 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
| @@ -81,6 +81,7 @@ struct tvec_base { | |||
| 81 | unsigned long timer_jiffies; | 81 | unsigned long timer_jiffies; |
| 82 | unsigned long next_timer; | 82 | unsigned long next_timer; |
| 83 | unsigned long active_timers; | 83 | unsigned long active_timers; |
| 84 | unsigned long all_timers; | ||
| 84 | struct tvec_root tv1; | 85 | struct tvec_root tv1; |
| 85 | struct tvec tv2; | 86 | struct tvec tv2; |
| 86 | struct tvec tv3; | 87 | struct tvec tv3; |
| @@ -337,6 +338,20 @@ void set_timer_slack(struct timer_list *timer, int slack_hz) | |||
| 337 | } | 338 | } |
| 338 | EXPORT_SYMBOL_GPL(set_timer_slack); | 339 | EXPORT_SYMBOL_GPL(set_timer_slack); |
| 339 | 340 | ||
| 341 | /* | ||
| 342 | * If the list is empty, catch up ->timer_jiffies to the current time. | ||
| 343 | * The caller must hold the tvec_base lock. Returns true if the list | ||
| 344 | * was empty and therefore ->timer_jiffies was updated. | ||
| 345 | */ | ||
| 346 | static bool catchup_timer_jiffies(struct tvec_base *base) | ||
| 347 | { | ||
| 348 | if (!base->all_timers) { | ||
| 349 | base->timer_jiffies = jiffies; | ||
| 350 | return true; | ||
| 351 | } | ||
| 352 | return false; | ||
| 353 | } | ||
| 354 | |||
| 340 | static void | 355 | static void |
| 341 | __internal_add_timer(struct tvec_base *base, struct timer_list *timer) | 356 | __internal_add_timer(struct tvec_base *base, struct timer_list *timer) |
| 342 | { | 357 | { |
| @@ -383,15 +398,17 @@ __internal_add_timer(struct tvec_base *base, struct timer_list *timer) | |||
| 383 | 398 | ||
| 384 | static void internal_add_timer(struct tvec_base *base, struct timer_list *timer) | 399 | static void internal_add_timer(struct tvec_base *base, struct timer_list *timer) |
| 385 | { | 400 | { |
| 401 | (void)catchup_timer_jiffies(base); | ||
| 386 | __internal_add_timer(base, timer); | 402 | __internal_add_timer(base, timer); |
| 387 | /* | 403 | /* |
| 388 | * Update base->active_timers and base->next_timer | 404 | * Update base->active_timers and base->next_timer |
| 389 | */ | 405 | */ |
| 390 | if (!tbase_get_deferrable(timer->base)) { | 406 | if (!tbase_get_deferrable(timer->base)) { |
| 391 | if (time_before(timer->expires, base->next_timer)) | 407 | if (!base->active_timers++ || |
| 408 | time_before(timer->expires, base->next_timer)) | ||
| 392 | base->next_timer = timer->expires; | 409 | base->next_timer = timer->expires; |
| 393 | base->active_timers++; | ||
| 394 | } | 410 | } |
| 411 | base->all_timers++; | ||
| 395 | } | 412 | } |
| 396 | 413 | ||
| 397 | #ifdef CONFIG_TIMER_STATS | 414 | #ifdef CONFIG_TIMER_STATS |
| @@ -671,6 +688,8 @@ detach_expired_timer(struct timer_list *timer, struct tvec_base *base) | |||
| 671 | detach_timer(timer, true); | 688 | detach_timer(timer, true); |
| 672 | if (!tbase_get_deferrable(timer->base)) | 689 | if (!tbase_get_deferrable(timer->base)) |
| 673 | base->active_timers--; | 690 | base->active_timers--; |
| 691 | base->all_timers--; | ||
| 692 | (void)catchup_timer_jiffies(base); | ||
| 674 | } | 693 | } |
| 675 | 694 | ||
| 676 | static int detach_if_pending(struct timer_list *timer, struct tvec_base *base, | 695 | static int detach_if_pending(struct timer_list *timer, struct tvec_base *base, |
| @@ -685,6 +704,8 @@ static int detach_if_pending(struct timer_list *timer, struct tvec_base *base, | |||
| 685 | if (timer->expires == base->next_timer) | 704 | if (timer->expires == base->next_timer) |
| 686 | base->next_timer = base->timer_jiffies; | 705 | base->next_timer = base->timer_jiffies; |
| 687 | } | 706 | } |
| 707 | base->all_timers--; | ||
| 708 | (void)catchup_timer_jiffies(base); | ||
| 688 | return 1; | 709 | return 1; |
| 689 | } | 710 | } |
| 690 | 711 | ||
| @@ -739,12 +760,7 @@ __mod_timer(struct timer_list *timer, unsigned long expires, | |||
| 739 | 760 | ||
| 740 | debug_activate(timer, expires); | 761 | debug_activate(timer, expires); |
| 741 | 762 | ||
| 742 | cpu = smp_processor_id(); | 763 | cpu = get_nohz_timer_target(pinned); |
| 743 | |||
| 744 | #if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP) | ||
| 745 | if (!pinned && get_sysctl_timer_migration() && idle_cpu(cpu)) | ||
| 746 | cpu = get_nohz_timer_target(); | ||
| 747 | #endif | ||
| 748 | new_base = per_cpu(tvec_bases, cpu); | 764 | new_base = per_cpu(tvec_bases, cpu); |
| 749 | 765 | ||
| 750 | if (base != new_base) { | 766 | if (base != new_base) { |
| @@ -939,8 +955,15 @@ void add_timer_on(struct timer_list *timer, int cpu) | |||
| 939 | * with the timer by holding the timer base lock. This also | 955 | * with the timer by holding the timer base lock. This also |
| 940 | * makes sure that a CPU on the way to stop its tick can not | 956 | * makes sure that a CPU on the way to stop its tick can not |
| 941 | * evaluate the timer wheel. | 957 | * evaluate the timer wheel. |
| 958 | * | ||
| 959 | * Spare the IPI for deferrable timers on idle targets though. | ||
| 960 | * The next busy ticks will take care of it. Except full dynticks | ||
| 961 | * require special care against races with idle_cpu(), lets deal | ||
| 962 | * with that later. | ||
| 942 | */ | 963 | */ |
| 943 | wake_up_nohz_cpu(cpu); | 964 | if (!tbase_get_deferrable(timer->base) || tick_nohz_full_cpu(cpu)) |
| 965 | wake_up_nohz_cpu(cpu); | ||
| 966 | |||
| 944 | spin_unlock_irqrestore(&base->lock, flags); | 967 | spin_unlock_irqrestore(&base->lock, flags); |
| 945 | } | 968 | } |
| 946 | EXPORT_SYMBOL_GPL(add_timer_on); | 969 | EXPORT_SYMBOL_GPL(add_timer_on); |
| @@ -1146,6 +1169,10 @@ static inline void __run_timers(struct tvec_base *base) | |||
| 1146 | struct timer_list *timer; | 1169 | struct timer_list *timer; |
| 1147 | 1170 | ||
| 1148 | spin_lock_irq(&base->lock); | 1171 | spin_lock_irq(&base->lock); |
| 1172 | if (catchup_timer_jiffies(base)) { | ||
| 1173 | spin_unlock_irq(&base->lock); | ||
| 1174 | return; | ||
| 1175 | } | ||
| 1149 | while (time_after_eq(jiffies, base->timer_jiffies)) { | 1176 | while (time_after_eq(jiffies, base->timer_jiffies)) { |
| 1150 | struct list_head work_list; | 1177 | struct list_head work_list; |
| 1151 | struct list_head *head = &work_list; | 1178 | struct list_head *head = &work_list; |
| @@ -1160,7 +1187,7 @@ static inline void __run_timers(struct tvec_base *base) | |||
| 1160 | !cascade(base, &base->tv4, INDEX(2))) | 1187 | !cascade(base, &base->tv4, INDEX(2))) |
| 1161 | cascade(base, &base->tv5, INDEX(3)); | 1188 | cascade(base, &base->tv5, INDEX(3)); |
| 1162 | ++base->timer_jiffies; | 1189 | ++base->timer_jiffies; |
| 1163 | list_replace_init(base->tv1.vec + index, &work_list); | 1190 | list_replace_init(base->tv1.vec + index, head); |
| 1164 | while (!list_empty(head)) { | 1191 | while (!list_empty(head)) { |
| 1165 | void (*fn)(unsigned long); | 1192 | void (*fn)(unsigned long); |
| 1166 | unsigned long data; | 1193 | unsigned long data; |
| @@ -1523,9 +1550,8 @@ static int init_timers_cpu(int cpu) | |||
| 1523 | if (!base) | 1550 | if (!base) |
| 1524 | return -ENOMEM; | 1551 | return -ENOMEM; |
| 1525 | 1552 | ||
| 1526 | /* Make sure that tvec_base is 2 byte aligned */ | 1553 | /* Make sure tvec_base has TIMER_FLAG_MASK bits free */ |
| 1527 | if (tbase_get_deferrable(base)) { | 1554 | if (WARN_ON(base != tbase_get_base(base))) { |
| 1528 | WARN_ON(1); | ||
| 1529 | kfree(base); | 1555 | kfree(base); |
| 1530 | return -ENOMEM; | 1556 | return -ENOMEM; |
| 1531 | } | 1557 | } |
| @@ -1559,6 +1585,7 @@ static int init_timers_cpu(int cpu) | |||
| 1559 | base->timer_jiffies = jiffies; | 1585 | base->timer_jiffies = jiffies; |
| 1560 | base->next_timer = base->timer_jiffies; | 1586 | base->next_timer = base->timer_jiffies; |
| 1561 | base->active_timers = 0; | 1587 | base->active_timers = 0; |
| 1588 | base->all_timers = 0; | ||
| 1562 | return 0; | 1589 | return 0; |
| 1563 | } | 1590 | } |
| 1564 | 1591 | ||
| @@ -1648,9 +1675,9 @@ void __init init_timers(void) | |||
| 1648 | 1675 | ||
| 1649 | err = timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE, | 1676 | err = timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE, |
| 1650 | (void *)(long)smp_processor_id()); | 1677 | (void *)(long)smp_processor_id()); |
| 1651 | init_timer_stats(); | ||
| 1652 | |||
| 1653 | BUG_ON(err != NOTIFY_OK); | 1678 | BUG_ON(err != NOTIFY_OK); |
| 1679 | |||
| 1680 | init_timer_stats(); | ||
| 1654 | register_cpu_notifier(&timers_nb); | 1681 | register_cpu_notifier(&timers_nb); |
| 1655 | open_softirq(TIMER_SOFTIRQ, run_timer_softirq); | 1682 | open_softirq(TIMER_SOFTIRQ, run_timer_softirq); |
| 1656 | } | 1683 | } |
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 015f85aaca08..8639819f6cef 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
| @@ -424,6 +424,7 @@ config UPROBE_EVENT | |||
| 424 | bool "Enable uprobes-based dynamic events" | 424 | bool "Enable uprobes-based dynamic events" |
| 425 | depends on ARCH_SUPPORTS_UPROBES | 425 | depends on ARCH_SUPPORTS_UPROBES |
| 426 | depends on MMU | 426 | depends on MMU |
| 427 | depends on PERF_EVENTS | ||
| 427 | select UPROBES | 428 | select UPROBES |
| 428 | select PROBE_EVENTS | 429 | select PROBE_EVENTS |
| 429 | select TRACING | 430 | select TRACING |
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index b418cb0d7242..c1bd4ada2a04 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c | |||
| @@ -702,6 +702,7 @@ void blk_trace_shutdown(struct request_queue *q) | |||
| 702 | * blk_add_trace_rq - Add a trace for a request oriented action | 702 | * blk_add_trace_rq - Add a trace for a request oriented action |
| 703 | * @q: queue the io is for | 703 | * @q: queue the io is for |
| 704 | * @rq: the source request | 704 | * @rq: the source request |
| 705 | * @nr_bytes: number of completed bytes | ||
| 705 | * @what: the action | 706 | * @what: the action |
| 706 | * | 707 | * |
| 707 | * Description: | 708 | * Description: |
| @@ -709,7 +710,7 @@ void blk_trace_shutdown(struct request_queue *q) | |||
| 709 | * | 710 | * |
| 710 | **/ | 711 | **/ |
| 711 | static void blk_add_trace_rq(struct request_queue *q, struct request *rq, | 712 | static void blk_add_trace_rq(struct request_queue *q, struct request *rq, |
| 712 | u32 what) | 713 | unsigned int nr_bytes, u32 what) |
| 713 | { | 714 | { |
| 714 | struct blk_trace *bt = q->blk_trace; | 715 | struct blk_trace *bt = q->blk_trace; |
| 715 | 716 | ||
| @@ -718,11 +719,11 @@ static void blk_add_trace_rq(struct request_queue *q, struct request *rq, | |||
| 718 | 719 | ||
| 719 | if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { | 720 | if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { |
| 720 | what |= BLK_TC_ACT(BLK_TC_PC); | 721 | what |= BLK_TC_ACT(BLK_TC_PC); |
| 721 | __blk_add_trace(bt, 0, blk_rq_bytes(rq), rq->cmd_flags, | 722 | __blk_add_trace(bt, 0, nr_bytes, rq->cmd_flags, |
| 722 | what, rq->errors, rq->cmd_len, rq->cmd); | 723 | what, rq->errors, rq->cmd_len, rq->cmd); |
| 723 | } else { | 724 | } else { |
| 724 | what |= BLK_TC_ACT(BLK_TC_FS); | 725 | what |= BLK_TC_ACT(BLK_TC_FS); |
| 725 | __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), | 726 | __blk_add_trace(bt, blk_rq_pos(rq), nr_bytes, |
| 726 | rq->cmd_flags, what, rq->errors, 0, NULL); | 727 | rq->cmd_flags, what, rq->errors, 0, NULL); |
| 727 | } | 728 | } |
| 728 | } | 729 | } |
| @@ -730,33 +731,34 @@ static void blk_add_trace_rq(struct request_queue *q, struct request *rq, | |||
| 730 | static void blk_add_trace_rq_abort(void *ignore, | 731 | static void blk_add_trace_rq_abort(void *ignore, |
| 731 | struct request_queue *q, struct request *rq) | 732 | struct request_queue *q, struct request *rq) |
| 732 | { | 733 | { |
| 733 | blk_add_trace_rq(q, rq, BLK_TA_ABORT); | 734 | blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_ABORT); |
| 734 | } | 735 | } |
| 735 | 736 | ||
| 736 | static void blk_add_trace_rq_insert(void *ignore, | 737 | static void blk_add_trace_rq_insert(void *ignore, |
| 737 | struct request_queue *q, struct request *rq) | 738 | struct request_queue *q, struct request *rq) |
| 738 | { | 739 | { |
| 739 | blk_add_trace_rq(q, rq, BLK_TA_INSERT); | 740 | blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_INSERT); |
| 740 | } | 741 | } |
| 741 | 742 | ||
| 742 | static void blk_add_trace_rq_issue(void *ignore, | 743 | static void blk_add_trace_rq_issue(void *ignore, |
| 743 | struct request_queue *q, struct request *rq) | 744 | struct request_queue *q, struct request *rq) |
| 744 | { | 745 | { |
| 745 | blk_add_trace_rq(q, rq, BLK_TA_ISSUE); | 746 | blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_ISSUE); |
| 746 | } | 747 | } |
| 747 | 748 | ||
| 748 | static void blk_add_trace_rq_requeue(void *ignore, | 749 | static void blk_add_trace_rq_requeue(void *ignore, |
| 749 | struct request_queue *q, | 750 | struct request_queue *q, |
| 750 | struct request *rq) | 751 | struct request *rq) |
| 751 | { | 752 | { |
| 752 | blk_add_trace_rq(q, rq, BLK_TA_REQUEUE); | 753 | blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_REQUEUE); |
| 753 | } | 754 | } |
| 754 | 755 | ||
| 755 | static void blk_add_trace_rq_complete(void *ignore, | 756 | static void blk_add_trace_rq_complete(void *ignore, |
| 756 | struct request_queue *q, | 757 | struct request_queue *q, |
| 757 | struct request *rq) | 758 | struct request *rq, |
| 759 | unsigned int nr_bytes) | ||
| 758 | { | 760 | { |
| 759 | blk_add_trace_rq(q, rq, BLK_TA_COMPLETE); | 761 | blk_add_trace_rq(q, rq, nr_bytes, BLK_TA_COMPLETE); |
| 760 | } | 762 | } |
| 761 | 763 | ||
| 762 | /** | 764 | /** |
| @@ -1427,7 +1429,8 @@ static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter) | |||
| 1427 | return print_one_line(iter, true); | 1429 | return print_one_line(iter, true); |
| 1428 | } | 1430 | } |
| 1429 | 1431 | ||
| 1430 | static int blk_tracer_set_flag(u32 old_flags, u32 bit, int set) | 1432 | static int |
| 1433 | blk_tracer_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) | ||
| 1431 | { | 1434 | { |
| 1432 | /* don't output context-info for blk_classic output */ | 1435 | /* don't output context-info for blk_classic output */ |
| 1433 | if (bit == TRACE_BLK_OPT_CLASSIC) { | 1436 | if (bit == TRACE_BLK_OPT_CLASSIC) { |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index cd7f76d1eb86..1fd4b9479210 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
| @@ -237,14 +237,13 @@ static int control_ops_alloc(struct ftrace_ops *ops) | |||
| 237 | return 0; | 237 | return 0; |
| 238 | } | 238 | } |
| 239 | 239 | ||
| 240 | static void control_ops_free(struct ftrace_ops *ops) | ||
| 241 | { | ||
| 242 | free_percpu(ops->disabled); | ||
| 243 | } | ||
| 244 | |||
| 245 | static void update_global_ops(void) | 240 | static void update_global_ops(void) |
| 246 | { | 241 | { |
| 247 | ftrace_func_t func; | 242 | ftrace_func_t func = ftrace_global_list_func; |
| 243 | void *private = NULL; | ||
| 244 | |||
| 245 | /* The list has its own recursion protection. */ | ||
| 246 | global_ops.flags |= FTRACE_OPS_FL_RECURSION_SAFE; | ||
| 248 | 247 | ||
| 249 | /* | 248 | /* |
| 250 | * If there's only one function registered, then call that | 249 | * If there's only one function registered, then call that |
| @@ -254,23 +253,17 @@ static void update_global_ops(void) | |||
| 254 | if (ftrace_global_list == &ftrace_list_end || | 253 | if (ftrace_global_list == &ftrace_list_end || |
| 255 | ftrace_global_list->next == &ftrace_list_end) { | 254 | ftrace_global_list->next == &ftrace_list_end) { |
| 256 | func = ftrace_global_list->func; | 255 | func = ftrace_global_list->func; |
| 256 | private = ftrace_global_list->private; | ||
| 257 | /* | 257 | /* |
| 258 | * As we are calling the function directly. | 258 | * As we are calling the function directly. |
| 259 | * If it does not have recursion protection, | 259 | * If it does not have recursion protection, |
| 260 | * the function_trace_op needs to be updated | 260 | * the function_trace_op needs to be updated |
| 261 | * accordingly. | 261 | * accordingly. |
| 262 | */ | 262 | */ |
| 263 | if (ftrace_global_list->flags & FTRACE_OPS_FL_RECURSION_SAFE) | 263 | if (!(ftrace_global_list->flags & FTRACE_OPS_FL_RECURSION_SAFE)) |
| 264 | global_ops.flags |= FTRACE_OPS_FL_RECURSION_SAFE; | ||
| 265 | else | ||
| 266 | global_ops.flags &= ~FTRACE_OPS_FL_RECURSION_SAFE; | 264 | global_ops.flags &= ~FTRACE_OPS_FL_RECURSION_SAFE; |
| 267 | } else { | ||
| 268 | func = ftrace_global_list_func; | ||
| 269 | /* The list has its own recursion protection. */ | ||
| 270 | global_ops.flags |= FTRACE_OPS_FL_RECURSION_SAFE; | ||
| 271 | } | 265 | } |
| 272 | 266 | ||
| 273 | |||
| 274 | /* If we filter on pids, update to use the pid function */ | 267 | /* If we filter on pids, update to use the pid function */ |
| 275 | if (!list_empty(&ftrace_pids)) { | 268 | if (!list_empty(&ftrace_pids)) { |
| 276 | set_ftrace_pid_function(func); | 269 | set_ftrace_pid_function(func); |
| @@ -278,6 +271,7 @@ static void update_global_ops(void) | |||
| 278 | } | 271 | } |
| 279 | 272 | ||
| 280 | global_ops.func = func; | 273 | global_ops.func = func; |
| 274 | global_ops.private = private; | ||
| 281 | } | 275 | } |
| 282 | 276 | ||
| 283 | static void ftrace_sync(struct work_struct *work) | 277 | static void ftrace_sync(struct work_struct *work) |
| @@ -437,6 +431,9 @@ static int remove_ftrace_list_ops(struct ftrace_ops **list, | |||
| 437 | 431 | ||
| 438 | static int __register_ftrace_function(struct ftrace_ops *ops) | 432 | static int __register_ftrace_function(struct ftrace_ops *ops) |
| 439 | { | 433 | { |
| 434 | if (ops->flags & FTRACE_OPS_FL_DELETED) | ||
| 435 | return -EINVAL; | ||
| 436 | |||
| 440 | if (FTRACE_WARN_ON(ops == &global_ops)) | 437 | if (FTRACE_WARN_ON(ops == &global_ops)) |
| 441 | return -EINVAL; | 438 | return -EINVAL; |
| 442 | 439 | ||
| @@ -1172,8 +1169,6 @@ struct ftrace_page { | |||
| 1172 | int size; | 1169 | int size; |
| 1173 | }; | 1170 | }; |
| 1174 | 1171 | ||
| 1175 | static struct ftrace_page *ftrace_new_pgs; | ||
| 1176 | |||
| 1177 | #define ENTRY_SIZE sizeof(struct dyn_ftrace) | 1172 | #define ENTRY_SIZE sizeof(struct dyn_ftrace) |
| 1178 | #define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE) | 1173 | #define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE) |
| 1179 | 1174 | ||
| @@ -1560,7 +1555,7 @@ unsigned long ftrace_location(unsigned long ip) | |||
| 1560 | * the function tracer. It checks the ftrace internal tables to | 1555 | * the function tracer. It checks the ftrace internal tables to |
| 1561 | * determine if the address belongs or not. | 1556 | * determine if the address belongs or not. |
| 1562 | */ | 1557 | */ |
| 1563 | int ftrace_text_reserved(void *start, void *end) | 1558 | int ftrace_text_reserved(const void *start, const void *end) |
| 1564 | { | 1559 | { |
| 1565 | unsigned long ret; | 1560 | unsigned long ret; |
| 1566 | 1561 | ||
| @@ -1994,6 +1989,7 @@ int __weak ftrace_arch_code_modify_post_process(void) | |||
| 1994 | void ftrace_modify_all_code(int command) | 1989 | void ftrace_modify_all_code(int command) |
| 1995 | { | 1990 | { |
| 1996 | int update = command & FTRACE_UPDATE_TRACE_FUNC; | 1991 | int update = command & FTRACE_UPDATE_TRACE_FUNC; |
| 1992 | int err = 0; | ||
| 1997 | 1993 | ||
| 1998 | /* | 1994 | /* |
| 1999 | * If the ftrace_caller calls a ftrace_ops func directly, | 1995 | * If the ftrace_caller calls a ftrace_ops func directly, |
| @@ -2005,8 +2001,11 @@ void ftrace_modify_all_code(int command) | |||
| 2005 | * to make sure the ops are having the right functions | 2001 | * to make sure the ops are having the right functions |
| 2006 | * traced. | 2002 | * traced. |
| 2007 | */ | 2003 | */ |
| 2008 | if (update) | 2004 | if (update) { |
| 2009 | ftrace_update_ftrace_func(ftrace_ops_list_func); | 2005 | err = ftrace_update_ftrace_func(ftrace_ops_list_func); |
| 2006 | if (FTRACE_WARN_ON(err)) | ||
| 2007 | return; | ||
| 2008 | } | ||
| 2010 | 2009 | ||
| 2011 | if (command & FTRACE_UPDATE_CALLS) | 2010 | if (command & FTRACE_UPDATE_CALLS) |
| 2012 | ftrace_replace_code(1); | 2011 | ftrace_replace_code(1); |
| @@ -2019,13 +2018,16 @@ void ftrace_modify_all_code(int command) | |||
| 2019 | /* If irqs are disabled, we are in stop machine */ | 2018 | /* If irqs are disabled, we are in stop machine */ |
| 2020 | if (!irqs_disabled()) | 2019 | if (!irqs_disabled()) |
| 2021 | smp_call_function(ftrace_sync_ipi, NULL, 1); | 2020 | smp_call_function(ftrace_sync_ipi, NULL, 1); |
| 2022 | ftrace_update_ftrace_func(ftrace_trace_function); | 2021 | err = ftrace_update_ftrace_func(ftrace_trace_function); |
| 2022 | if (FTRACE_WARN_ON(err)) | ||
| 2023 | return; | ||
| 2023 | } | 2024 | } |
| 2024 | 2025 | ||
| 2025 | if (command & FTRACE_START_FUNC_RET) | 2026 | if (command & FTRACE_START_FUNC_RET) |
| 2026 | ftrace_enable_ftrace_graph_caller(); | 2027 | err = ftrace_enable_ftrace_graph_caller(); |
| 2027 | else if (command & FTRACE_STOP_FUNC_RET) | 2028 | else if (command & FTRACE_STOP_FUNC_RET) |
| 2028 | ftrace_disable_ftrace_graph_caller(); | 2029 | err = ftrace_disable_ftrace_graph_caller(); |
| 2030 | FTRACE_WARN_ON(err); | ||
| 2029 | } | 2031 | } |
| 2030 | 2032 | ||
| 2031 | static int __ftrace_modify_code(void *data) | 2033 | static int __ftrace_modify_code(void *data) |
| @@ -2093,6 +2095,11 @@ static ftrace_func_t saved_ftrace_func; | |||
| 2093 | static int ftrace_start_up; | 2095 | static int ftrace_start_up; |
| 2094 | static int global_start_up; | 2096 | static int global_start_up; |
| 2095 | 2097 | ||
| 2098 | static void control_ops_free(struct ftrace_ops *ops) | ||
| 2099 | { | ||
| 2100 | free_percpu(ops->disabled); | ||
| 2101 | } | ||
| 2102 | |||
| 2096 | static void ftrace_startup_enable(int command) | 2103 | static void ftrace_startup_enable(int command) |
| 2097 | { | 2104 | { |
| 2098 | if (saved_ftrace_func != ftrace_trace_function) { | 2105 | if (saved_ftrace_func != ftrace_trace_function) { |
| @@ -2244,7 +2251,6 @@ static void ftrace_shutdown_sysctl(void) | |||
| 2244 | } | 2251 | } |
| 2245 | 2252 | ||
| 2246 | static cycle_t ftrace_update_time; | 2253 | static cycle_t ftrace_update_time; |
| 2247 | static unsigned long ftrace_update_cnt; | ||
| 2248 | unsigned long ftrace_update_tot_cnt; | 2254 | unsigned long ftrace_update_tot_cnt; |
| 2249 | 2255 | ||
| 2250 | static inline int ops_traces_mod(struct ftrace_ops *ops) | 2256 | static inline int ops_traces_mod(struct ftrace_ops *ops) |
| @@ -2300,11 +2306,12 @@ static int referenced_filters(struct dyn_ftrace *rec) | |||
| 2300 | return cnt; | 2306 | return cnt; |
| 2301 | } | 2307 | } |
| 2302 | 2308 | ||
| 2303 | static int ftrace_update_code(struct module *mod) | 2309 | static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs) |
| 2304 | { | 2310 | { |
| 2305 | struct ftrace_page *pg; | 2311 | struct ftrace_page *pg; |
| 2306 | struct dyn_ftrace *p; | 2312 | struct dyn_ftrace *p; |
| 2307 | cycle_t start, stop; | 2313 | cycle_t start, stop; |
| 2314 | unsigned long update_cnt = 0; | ||
| 2308 | unsigned long ref = 0; | 2315 | unsigned long ref = 0; |
| 2309 | bool test = false; | 2316 | bool test = false; |
| 2310 | int i; | 2317 | int i; |
| @@ -2330,9 +2337,8 @@ static int ftrace_update_code(struct module *mod) | |||
| 2330 | } | 2337 | } |
| 2331 | 2338 | ||
| 2332 | start = ftrace_now(raw_smp_processor_id()); | 2339 | start = ftrace_now(raw_smp_processor_id()); |
| 2333 | ftrace_update_cnt = 0; | ||
| 2334 | 2340 | ||
| 2335 | for (pg = ftrace_new_pgs; pg; pg = pg->next) { | 2341 | for (pg = new_pgs; pg; pg = pg->next) { |
| 2336 | 2342 | ||
| 2337 | for (i = 0; i < pg->index; i++) { | 2343 | for (i = 0; i < pg->index; i++) { |
| 2338 | int cnt = ref; | 2344 | int cnt = ref; |
| @@ -2353,7 +2359,7 @@ static int ftrace_update_code(struct module *mod) | |||
| 2353 | if (!ftrace_code_disable(mod, p)) | 2359 | if (!ftrace_code_disable(mod, p)) |
| 2354 | break; | 2360 | break; |
| 2355 | 2361 | ||
| 2356 | ftrace_update_cnt++; | 2362 | update_cnt++; |
| 2357 | 2363 | ||
| 2358 | /* | 2364 | /* |
| 2359 | * If the tracing is enabled, go ahead and enable the record. | 2365 | * If the tracing is enabled, go ahead and enable the record. |
| @@ -2372,11 +2378,9 @@ static int ftrace_update_code(struct module *mod) | |||
| 2372 | } | 2378 | } |
| 2373 | } | 2379 | } |
| 2374 | 2380 | ||
| 2375 | ftrace_new_pgs = NULL; | ||
| 2376 | |||
| 2377 | stop = ftrace_now(raw_smp_processor_id()); | 2381 | stop = ftrace_now(raw_smp_processor_id()); |
| 2378 | ftrace_update_time = stop - start; | 2382 | ftrace_update_time = stop - start; |
| 2379 | ftrace_update_tot_cnt += ftrace_update_cnt; | 2383 | ftrace_update_tot_cnt += update_cnt; |
| 2380 | 2384 | ||
| 2381 | return 0; | 2385 | return 0; |
| 2382 | } | 2386 | } |
| @@ -2468,22 +2472,6 @@ ftrace_allocate_pages(unsigned long num_to_init) | |||
| 2468 | return NULL; | 2472 | return NULL; |
| 2469 | } | 2473 | } |
| 2470 | 2474 | ||
| 2471 | static int __init ftrace_dyn_table_alloc(unsigned long num_to_init) | ||
| 2472 | { | ||
| 2473 | int cnt; | ||
| 2474 | |||
| 2475 | if (!num_to_init) { | ||
| 2476 | pr_info("ftrace: No functions to be traced?\n"); | ||
| 2477 | return -1; | ||
| 2478 | } | ||
| 2479 | |||
| 2480 | cnt = num_to_init / ENTRIES_PER_PAGE; | ||
| 2481 | pr_info("ftrace: allocating %ld entries in %d pages\n", | ||
| 2482 | num_to_init, cnt + 1); | ||
| 2483 | |||
| 2484 | return 0; | ||
| 2485 | } | ||
| 2486 | |||
| 2487 | #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */ | 2475 | #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */ |
| 2488 | 2476 | ||
| 2489 | struct ftrace_iterator { | 2477 | struct ftrace_iterator { |
| @@ -2871,7 +2859,9 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag, | |||
| 2871 | static int | 2859 | static int |
| 2872 | ftrace_filter_open(struct inode *inode, struct file *file) | 2860 | ftrace_filter_open(struct inode *inode, struct file *file) |
| 2873 | { | 2861 | { |
| 2874 | return ftrace_regex_open(&global_ops, | 2862 | struct ftrace_ops *ops = inode->i_private; |
| 2863 | |||
| 2864 | return ftrace_regex_open(ops, | ||
| 2875 | FTRACE_ITER_FILTER | FTRACE_ITER_DO_HASH, | 2865 | FTRACE_ITER_FILTER | FTRACE_ITER_DO_HASH, |
| 2876 | inode, file); | 2866 | inode, file); |
| 2877 | } | 2867 | } |
| @@ -2879,7 +2869,9 @@ ftrace_filter_open(struct inode *inode, struct file *file) | |||
| 2879 | static int | 2869 | static int |
| 2880 | ftrace_notrace_open(struct inode *inode, struct file *file) | 2870 | ftrace_notrace_open(struct inode *inode, struct file *file) |
| 2881 | { | 2871 | { |
| 2882 | return ftrace_regex_open(&global_ops, FTRACE_ITER_NOTRACE, | 2872 | struct ftrace_ops *ops = inode->i_private; |
| 2873 | |||
| 2874 | return ftrace_regex_open(ops, FTRACE_ITER_NOTRACE, | ||
| 2883 | inode, file); | 2875 | inode, file); |
| 2884 | } | 2876 | } |
| 2885 | 2877 | ||
| @@ -4109,6 +4101,36 @@ static const struct file_operations ftrace_graph_notrace_fops = { | |||
| 4109 | }; | 4101 | }; |
| 4110 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 4102 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
| 4111 | 4103 | ||
| 4104 | void ftrace_create_filter_files(struct ftrace_ops *ops, | ||
| 4105 | struct dentry *parent) | ||
| 4106 | { | ||
| 4107 | |||
| 4108 | trace_create_file("set_ftrace_filter", 0644, parent, | ||
| 4109 | ops, &ftrace_filter_fops); | ||
| 4110 | |||
| 4111 | trace_create_file("set_ftrace_notrace", 0644, parent, | ||
| 4112 | ops, &ftrace_notrace_fops); | ||
| 4113 | } | ||
| 4114 | |||
| 4115 | /* | ||
| 4116 | * The name "destroy_filter_files" is really a misnomer. Although | ||
| 4117 | * in the future, it may actualy delete the files, but this is | ||
| 4118 | * really intended to make sure the ops passed in are disabled | ||
| 4119 | * and that when this function returns, the caller is free to | ||
| 4120 | * free the ops. | ||
| 4121 | * | ||
| 4122 | * The "destroy" name is only to match the "create" name that this | ||
| 4123 | * should be paired with. | ||
| 4124 | */ | ||
| 4125 | void ftrace_destroy_filter_files(struct ftrace_ops *ops) | ||
| 4126 | { | ||
| 4127 | mutex_lock(&ftrace_lock); | ||
| 4128 | if (ops->flags & FTRACE_OPS_FL_ENABLED) | ||
| 4129 | ftrace_shutdown(ops, 0); | ||
| 4130 | ops->flags |= FTRACE_OPS_FL_DELETED; | ||
| 4131 | mutex_unlock(&ftrace_lock); | ||
| 4132 | } | ||
| 4133 | |||
| 4112 | static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer) | 4134 | static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer) |
| 4113 | { | 4135 | { |
| 4114 | 4136 | ||
| @@ -4118,11 +4140,7 @@ static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer) | |||
| 4118 | trace_create_file("enabled_functions", 0444, | 4140 | trace_create_file("enabled_functions", 0444, |
| 4119 | d_tracer, NULL, &ftrace_enabled_fops); | 4141 | d_tracer, NULL, &ftrace_enabled_fops); |
| 4120 | 4142 | ||
| 4121 | trace_create_file("set_ftrace_filter", 0644, d_tracer, | 4143 | ftrace_create_filter_files(&global_ops, d_tracer); |
| 4122 | NULL, &ftrace_filter_fops); | ||
| 4123 | |||
| 4124 | trace_create_file("set_ftrace_notrace", 0644, d_tracer, | ||
| 4125 | NULL, &ftrace_notrace_fops); | ||
| 4126 | 4144 | ||
| 4127 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 4145 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
| 4128 | trace_create_file("set_graph_function", 0444, d_tracer, | 4146 | trace_create_file("set_graph_function", 0444, d_tracer, |
| @@ -4238,9 +4256,6 @@ static int ftrace_process_locs(struct module *mod, | |||
| 4238 | /* Assign the last page to ftrace_pages */ | 4256 | /* Assign the last page to ftrace_pages */ |
| 4239 | ftrace_pages = pg; | 4257 | ftrace_pages = pg; |
| 4240 | 4258 | ||
| 4241 | /* These new locations need to be initialized */ | ||
| 4242 | ftrace_new_pgs = start_pg; | ||
| 4243 | |||
| 4244 | /* | 4259 | /* |
| 4245 | * We only need to disable interrupts on start up | 4260 | * We only need to disable interrupts on start up |
| 4246 | * because we are modifying code that an interrupt | 4261 | * because we are modifying code that an interrupt |
| @@ -4251,7 +4266,7 @@ static int ftrace_process_locs(struct module *mod, | |||
| 4251 | */ | 4266 | */ |
| 4252 | if (!mod) | 4267 | if (!mod) |
| 4253 | local_irq_save(flags); | 4268 | local_irq_save(flags); |
| 4254 | ftrace_update_code(mod); | 4269 | ftrace_update_code(mod, start_pg); |
| 4255 | if (!mod) | 4270 | if (!mod) |
| 4256 | local_irq_restore(flags); | 4271 | local_irq_restore(flags); |
| 4257 | ret = 0; | 4272 | ret = 0; |
| @@ -4360,30 +4375,27 @@ struct notifier_block ftrace_module_exit_nb = { | |||
| 4360 | .priority = INT_MIN, /* Run after anything that can remove kprobes */ | 4375 | .priority = INT_MIN, /* Run after anything that can remove kprobes */ |
| 4361 | }; | 4376 | }; |
| 4362 | 4377 | ||
| 4363 | extern unsigned long __start_mcount_loc[]; | ||
| 4364 | extern unsigned long __stop_mcount_loc[]; | ||
| 4365 | |||
| 4366 | void __init ftrace_init(void) | 4378 | void __init ftrace_init(void) |
| 4367 | { | 4379 | { |
| 4368 | unsigned long count, addr, flags; | 4380 | extern unsigned long __start_mcount_loc[]; |
| 4381 | extern unsigned long __stop_mcount_loc[]; | ||
| 4382 | unsigned long count, flags; | ||
| 4369 | int ret; | 4383 | int ret; |
| 4370 | 4384 | ||
| 4371 | /* Keep the ftrace pointer to the stub */ | ||
| 4372 | addr = (unsigned long)ftrace_stub; | ||
| 4373 | |||
| 4374 | local_irq_save(flags); | 4385 | local_irq_save(flags); |
| 4375 | ftrace_dyn_arch_init(&addr); | 4386 | ret = ftrace_dyn_arch_init(); |
| 4376 | local_irq_restore(flags); | 4387 | local_irq_restore(flags); |
| 4377 | 4388 | if (ret) | |
| 4378 | /* ftrace_dyn_arch_init places the return code in addr */ | ||
| 4379 | if (addr) | ||
| 4380 | goto failed; | 4389 | goto failed; |
| 4381 | 4390 | ||
| 4382 | count = __stop_mcount_loc - __start_mcount_loc; | 4391 | count = __stop_mcount_loc - __start_mcount_loc; |
| 4383 | 4392 | if (!count) { | |
| 4384 | ret = ftrace_dyn_table_alloc(count); | 4393 | pr_info("ftrace: No functions to be traced?\n"); |
| 4385 | if (ret) | ||
| 4386 | goto failed; | 4394 | goto failed; |
| 4395 | } | ||
| 4396 | |||
| 4397 | pr_info("ftrace: allocating %ld entries in %ld pages\n", | ||
| 4398 | count, count / ENTRIES_PER_PAGE + 1); | ||
| 4387 | 4399 | ||
| 4388 | last_ftrace_enabled = ftrace_enabled = 1; | 4400 | last_ftrace_enabled = ftrace_enabled = 1; |
| 4389 | 4401 | ||
| @@ -4431,7 +4443,13 @@ static inline void ftrace_startup_enable(int command) { } | |||
| 4431 | (ops)->flags |= FTRACE_OPS_FL_ENABLED; \ | 4443 | (ops)->flags |= FTRACE_OPS_FL_ENABLED; \ |
| 4432 | ___ret; \ | 4444 | ___ret; \ |
| 4433 | }) | 4445 | }) |
| 4434 | # define ftrace_shutdown(ops, command) __unregister_ftrace_function(ops) | 4446 | # define ftrace_shutdown(ops, command) \ |
| 4447 | ({ \ | ||
| 4448 | int ___ret = __unregister_ftrace_function(ops); \ | ||
| 4449 | if (!___ret) \ | ||
| 4450 | (ops)->flags &= ~FTRACE_OPS_FL_ENABLED; \ | ||
| 4451 | ___ret; \ | ||
| 4452 | }) | ||
| 4435 | 4453 | ||
| 4436 | # define ftrace_startup_sysctl() do { } while (0) | 4454 | # define ftrace_startup_sysctl() do { } while (0) |
| 4437 | # define ftrace_shutdown_sysctl() do { } while (0) | 4455 | # define ftrace_shutdown_sysctl() do { } while (0) |
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index fc4da2d97f9b..c634868c2921 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
| @@ -1301,7 +1301,7 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags, | |||
| 1301 | * In that off case, we need to allocate for all possible cpus. | 1301 | * In that off case, we need to allocate for all possible cpus. |
| 1302 | */ | 1302 | */ |
| 1303 | #ifdef CONFIG_HOTPLUG_CPU | 1303 | #ifdef CONFIG_HOTPLUG_CPU |
| 1304 | get_online_cpus(); | 1304 | cpu_notifier_register_begin(); |
| 1305 | cpumask_copy(buffer->cpumask, cpu_online_mask); | 1305 | cpumask_copy(buffer->cpumask, cpu_online_mask); |
| 1306 | #else | 1306 | #else |
| 1307 | cpumask_copy(buffer->cpumask, cpu_possible_mask); | 1307 | cpumask_copy(buffer->cpumask, cpu_possible_mask); |
| @@ -1324,10 +1324,10 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags, | |||
| 1324 | #ifdef CONFIG_HOTPLUG_CPU | 1324 | #ifdef CONFIG_HOTPLUG_CPU |
| 1325 | buffer->cpu_notify.notifier_call = rb_cpu_notify; | 1325 | buffer->cpu_notify.notifier_call = rb_cpu_notify; |
| 1326 | buffer->cpu_notify.priority = 0; | 1326 | buffer->cpu_notify.priority = 0; |
| 1327 | register_cpu_notifier(&buffer->cpu_notify); | 1327 | __register_cpu_notifier(&buffer->cpu_notify); |
| 1328 | cpu_notifier_register_done(); | ||
| 1328 | #endif | 1329 | #endif |
| 1329 | 1330 | ||
| 1330 | put_online_cpus(); | ||
| 1331 | mutex_init(&buffer->mutex); | 1331 | mutex_init(&buffer->mutex); |
| 1332 | 1332 | ||
| 1333 | return buffer; | 1333 | return buffer; |
| @@ -1341,7 +1341,9 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags, | |||
| 1341 | 1341 | ||
| 1342 | fail_free_cpumask: | 1342 | fail_free_cpumask: |
| 1343 | free_cpumask_var(buffer->cpumask); | 1343 | free_cpumask_var(buffer->cpumask); |
| 1344 | put_online_cpus(); | 1344 | #ifdef CONFIG_HOTPLUG_CPU |
| 1345 | cpu_notifier_register_done(); | ||
| 1346 | #endif | ||
| 1345 | 1347 | ||
| 1346 | fail_free_buffer: | 1348 | fail_free_buffer: |
| 1347 | kfree(buffer); | 1349 | kfree(buffer); |
| @@ -1358,16 +1360,17 @@ ring_buffer_free(struct ring_buffer *buffer) | |||
| 1358 | { | 1360 | { |
| 1359 | int cpu; | 1361 | int cpu; |
| 1360 | 1362 | ||
| 1361 | get_online_cpus(); | ||
| 1362 | |||
| 1363 | #ifdef CONFIG_HOTPLUG_CPU | 1363 | #ifdef CONFIG_HOTPLUG_CPU |
| 1364 | unregister_cpu_notifier(&buffer->cpu_notify); | 1364 | cpu_notifier_register_begin(); |
| 1365 | __unregister_cpu_notifier(&buffer->cpu_notify); | ||
| 1365 | #endif | 1366 | #endif |
| 1366 | 1367 | ||
| 1367 | for_each_buffer_cpu(buffer, cpu) | 1368 | for_each_buffer_cpu(buffer, cpu) |
| 1368 | rb_free_cpu_buffer(buffer->buffers[cpu]); | 1369 | rb_free_cpu_buffer(buffer->buffers[cpu]); |
| 1369 | 1370 | ||
| 1370 | put_online_cpus(); | 1371 | #ifdef CONFIG_HOTPLUG_CPU |
| 1372 | cpu_notifier_register_done(); | ||
| 1373 | #endif | ||
| 1371 | 1374 | ||
| 1372 | kfree(buffer->buffers); | 1375 | kfree(buffer->buffers); |
| 1373 | free_cpumask_var(buffer->cpumask); | 1376 | free_cpumask_var(buffer->cpumask); |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 24c1f2382557..9be67c5e5b0f 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
| @@ -73,7 +73,8 @@ static struct tracer_flags dummy_tracer_flags = { | |||
| 73 | .opts = dummy_tracer_opt | 73 | .opts = dummy_tracer_opt |
| 74 | }; | 74 | }; |
| 75 | 75 | ||
| 76 | static int dummy_set_flag(u32 old_flags, u32 bit, int set) | 76 | static int |
| 77 | dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) | ||
| 77 | { | 78 | { |
| 78 | return 0; | 79 | return 0; |
| 79 | } | 80 | } |
| @@ -118,7 +119,7 @@ enum ftrace_dump_mode ftrace_dump_on_oops; | |||
| 118 | /* When set, tracing will stop when a WARN*() is hit */ | 119 | /* When set, tracing will stop when a WARN*() is hit */ |
| 119 | int __disable_trace_on_warning; | 120 | int __disable_trace_on_warning; |
| 120 | 121 | ||
| 121 | static int tracing_set_tracer(const char *buf); | 122 | static int tracing_set_tracer(struct trace_array *tr, const char *buf); |
| 122 | 123 | ||
| 123 | #define MAX_TRACER_SIZE 100 | 124 | #define MAX_TRACER_SIZE 100 |
| 124 | static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata; | 125 | static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata; |
| @@ -180,6 +181,17 @@ static int __init set_trace_boot_options(char *str) | |||
| 180 | } | 181 | } |
| 181 | __setup("trace_options=", set_trace_boot_options); | 182 | __setup("trace_options=", set_trace_boot_options); |
| 182 | 183 | ||
| 184 | static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata; | ||
| 185 | static char *trace_boot_clock __initdata; | ||
| 186 | |||
| 187 | static int __init set_trace_boot_clock(char *str) | ||
| 188 | { | ||
| 189 | strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE); | ||
| 190 | trace_boot_clock = trace_boot_clock_buf; | ||
| 191 | return 0; | ||
| 192 | } | ||
| 193 | __setup("trace_clock=", set_trace_boot_clock); | ||
| 194 | |||
| 183 | 195 | ||
| 184 | unsigned long long ns2usecs(cycle_t nsec) | 196 | unsigned long long ns2usecs(cycle_t nsec) |
| 185 | { | 197 | { |
| @@ -1230,7 +1242,7 @@ int register_tracer(struct tracer *type) | |||
| 1230 | 1242 | ||
| 1231 | printk(KERN_INFO "Starting tracer '%s'\n", type->name); | 1243 | printk(KERN_INFO "Starting tracer '%s'\n", type->name); |
| 1232 | /* Do we want this tracer to start on bootup? */ | 1244 | /* Do we want this tracer to start on bootup? */ |
| 1233 | tracing_set_tracer(type->name); | 1245 | tracing_set_tracer(&global_trace, type->name); |
| 1234 | default_bootup_tracer = NULL; | 1246 | default_bootup_tracer = NULL; |
| 1235 | /* disable other selftests, since this will break it. */ | 1247 | /* disable other selftests, since this will break it. */ |
| 1236 | tracing_selftest_disabled = true; | 1248 | tracing_selftest_disabled = true; |
| @@ -3137,27 +3149,52 @@ static int tracing_open(struct inode *inode, struct file *file) | |||
| 3137 | return ret; | 3149 | return ret; |
| 3138 | } | 3150 | } |
| 3139 | 3151 | ||
| 3152 | /* | ||
| 3153 | * Some tracers are not suitable for instance buffers. | ||
| 3154 | * A tracer is always available for the global array (toplevel) | ||
| 3155 | * or if it explicitly states that it is. | ||
| 3156 | */ | ||
| 3157 | static bool | ||
| 3158 | trace_ok_for_array(struct tracer *t, struct trace_array *tr) | ||
| 3159 | { | ||
| 3160 | return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances; | ||
| 3161 | } | ||
| 3162 | |||
| 3163 | /* Find the next tracer that this trace array may use */ | ||
| 3164 | static struct tracer * | ||
| 3165 | get_tracer_for_array(struct trace_array *tr, struct tracer *t) | ||
| 3166 | { | ||
| 3167 | while (t && !trace_ok_for_array(t, tr)) | ||
| 3168 | t = t->next; | ||
| 3169 | |||
| 3170 | return t; | ||
| 3171 | } | ||
| 3172 | |||
| 3140 | static void * | 3173 | static void * |
| 3141 | t_next(struct seq_file *m, void *v, loff_t *pos) | 3174 | t_next(struct seq_file *m, void *v, loff_t *pos) |
| 3142 | { | 3175 | { |
| 3176 | struct trace_array *tr = m->private; | ||
| 3143 | struct tracer *t = v; | 3177 | struct tracer *t = v; |
| 3144 | 3178 | ||
| 3145 | (*pos)++; | 3179 | (*pos)++; |
| 3146 | 3180 | ||
| 3147 | if (t) | 3181 | if (t) |
| 3148 | t = t->next; | 3182 | t = get_tracer_for_array(tr, t->next); |
| 3149 | 3183 | ||
| 3150 | return t; | 3184 | return t; |
| 3151 | } | 3185 | } |
| 3152 | 3186 | ||
| 3153 | static void *t_start(struct seq_file *m, loff_t *pos) | 3187 | static void *t_start(struct seq_file *m, loff_t *pos) |
| 3154 | { | 3188 | { |
| 3189 | struct trace_array *tr = m->private; | ||
| 3155 | struct tracer *t; | 3190 | struct tracer *t; |
| 3156 | loff_t l = 0; | 3191 | loff_t l = 0; |
| 3157 | 3192 | ||
| 3158 | mutex_lock(&trace_types_lock); | 3193 | mutex_lock(&trace_types_lock); |
| 3159 | for (t = trace_types; t && l < *pos; t = t_next(m, t, &l)) | 3194 | |
| 3160 | ; | 3195 | t = get_tracer_for_array(tr, trace_types); |
| 3196 | for (; t && l < *pos; t = t_next(m, t, &l)) | ||
| 3197 | ; | ||
| 3161 | 3198 | ||
| 3162 | return t; | 3199 | return t; |
| 3163 | } | 3200 | } |
| @@ -3192,10 +3229,21 @@ static const struct seq_operations show_traces_seq_ops = { | |||
| 3192 | 3229 | ||
| 3193 | static int show_traces_open(struct inode *inode, struct file *file) | 3230 | static int show_traces_open(struct inode *inode, struct file *file) |
| 3194 | { | 3231 | { |
| 3232 | struct trace_array *tr = inode->i_private; | ||
| 3233 | struct seq_file *m; | ||
| 3234 | int ret; | ||
| 3235 | |||
| 3195 | if (tracing_disabled) | 3236 | if (tracing_disabled) |
| 3196 | return -ENODEV; | 3237 | return -ENODEV; |
| 3197 | 3238 | ||
| 3198 | return seq_open(file, &show_traces_seq_ops); | 3239 | ret = seq_open(file, &show_traces_seq_ops); |
| 3240 | if (ret) | ||
| 3241 | return ret; | ||
| 3242 | |||
| 3243 | m = file->private_data; | ||
| 3244 | m->private = tr; | ||
| 3245 | |||
| 3246 | return 0; | ||
| 3199 | } | 3247 | } |
| 3200 | 3248 | ||
| 3201 | static ssize_t | 3249 | static ssize_t |
| @@ -3355,13 +3403,14 @@ static int tracing_trace_options_show(struct seq_file *m, void *v) | |||
| 3355 | return 0; | 3403 | return 0; |
| 3356 | } | 3404 | } |
| 3357 | 3405 | ||
| 3358 | static int __set_tracer_option(struct tracer *trace, | 3406 | static int __set_tracer_option(struct trace_array *tr, |
| 3359 | struct tracer_flags *tracer_flags, | 3407 | struct tracer_flags *tracer_flags, |
| 3360 | struct tracer_opt *opts, int neg) | 3408 | struct tracer_opt *opts, int neg) |
| 3361 | { | 3409 | { |
| 3410 | struct tracer *trace = tr->current_trace; | ||
| 3362 | int ret; | 3411 | int ret; |
| 3363 | 3412 | ||
| 3364 | ret = trace->set_flag(tracer_flags->val, opts->bit, !neg); | 3413 | ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg); |
| 3365 | if (ret) | 3414 | if (ret) |
| 3366 | return ret; | 3415 | return ret; |
| 3367 | 3416 | ||
| @@ -3373,8 +3422,9 @@ static int __set_tracer_option(struct tracer *trace, | |||
| 3373 | } | 3422 | } |
| 3374 | 3423 | ||
| 3375 | /* Try to assign a tracer specific option */ | 3424 | /* Try to assign a tracer specific option */ |
| 3376 | static int set_tracer_option(struct tracer *trace, char *cmp, int neg) | 3425 | static int set_tracer_option(struct trace_array *tr, char *cmp, int neg) |
| 3377 | { | 3426 | { |
| 3427 | struct tracer *trace = tr->current_trace; | ||
| 3378 | struct tracer_flags *tracer_flags = trace->flags; | 3428 | struct tracer_flags *tracer_flags = trace->flags; |
| 3379 | struct tracer_opt *opts = NULL; | 3429 | struct tracer_opt *opts = NULL; |
| 3380 | int i; | 3430 | int i; |
| @@ -3383,8 +3433,7 @@ static int set_tracer_option(struct tracer *trace, char *cmp, int neg) | |||
| 3383 | opts = &tracer_flags->opts[i]; | 3433 | opts = &tracer_flags->opts[i]; |
| 3384 | 3434 | ||
| 3385 | if (strcmp(cmp, opts->name) == 0) | 3435 | if (strcmp(cmp, opts->name) == 0) |
| 3386 | return __set_tracer_option(trace, trace->flags, | 3436 | return __set_tracer_option(tr, trace->flags, opts, neg); |
| 3387 | opts, neg); | ||
| 3388 | } | 3437 | } |
| 3389 | 3438 | ||
| 3390 | return -EINVAL; | 3439 | return -EINVAL; |
| @@ -3407,7 +3456,7 @@ int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled) | |||
| 3407 | 3456 | ||
| 3408 | /* Give the tracer a chance to approve the change */ | 3457 | /* Give the tracer a chance to approve the change */ |
| 3409 | if (tr->current_trace->flag_changed) | 3458 | if (tr->current_trace->flag_changed) |
| 3410 | if (tr->current_trace->flag_changed(tr->current_trace, mask, !!enabled)) | 3459 | if (tr->current_trace->flag_changed(tr, mask, !!enabled)) |
| 3411 | return -EINVAL; | 3460 | return -EINVAL; |
| 3412 | 3461 | ||
| 3413 | if (enabled) | 3462 | if (enabled) |
| @@ -3456,7 +3505,7 @@ static int trace_set_options(struct trace_array *tr, char *option) | |||
| 3456 | 3505 | ||
| 3457 | /* If no option could be set, test the specific tracer options */ | 3506 | /* If no option could be set, test the specific tracer options */ |
| 3458 | if (!trace_options[i]) | 3507 | if (!trace_options[i]) |
| 3459 | ret = set_tracer_option(tr->current_trace, cmp, neg); | 3508 | ret = set_tracer_option(tr, cmp, neg); |
| 3460 | 3509 | ||
| 3461 | mutex_unlock(&trace_types_lock); | 3510 | mutex_unlock(&trace_types_lock); |
| 3462 | 3511 | ||
| @@ -3885,10 +3934,26 @@ create_trace_option_files(struct trace_array *tr, struct tracer *tracer); | |||
| 3885 | static void | 3934 | static void |
| 3886 | destroy_trace_option_files(struct trace_option_dentry *topts); | 3935 | destroy_trace_option_files(struct trace_option_dentry *topts); |
| 3887 | 3936 | ||
| 3888 | static int tracing_set_tracer(const char *buf) | 3937 | /* |
| 3938 | * Used to clear out the tracer before deletion of an instance. | ||
| 3939 | * Must have trace_types_lock held. | ||
| 3940 | */ | ||
| 3941 | static void tracing_set_nop(struct trace_array *tr) | ||
| 3942 | { | ||
| 3943 | if (tr->current_trace == &nop_trace) | ||
| 3944 | return; | ||
| 3945 | |||
| 3946 | tr->current_trace->enabled--; | ||
| 3947 | |||
| 3948 | if (tr->current_trace->reset) | ||
| 3949 | tr->current_trace->reset(tr); | ||
| 3950 | |||
| 3951 | tr->current_trace = &nop_trace; | ||
| 3952 | } | ||
| 3953 | |||
| 3954 | static int tracing_set_tracer(struct trace_array *tr, const char *buf) | ||
| 3889 | { | 3955 | { |
| 3890 | static struct trace_option_dentry *topts; | 3956 | static struct trace_option_dentry *topts; |
| 3891 | struct trace_array *tr = &global_trace; | ||
| 3892 | struct tracer *t; | 3957 | struct tracer *t; |
| 3893 | #ifdef CONFIG_TRACER_MAX_TRACE | 3958 | #ifdef CONFIG_TRACER_MAX_TRACE |
| 3894 | bool had_max_tr; | 3959 | bool had_max_tr; |
| @@ -3916,9 +3981,15 @@ static int tracing_set_tracer(const char *buf) | |||
| 3916 | if (t == tr->current_trace) | 3981 | if (t == tr->current_trace) |
| 3917 | goto out; | 3982 | goto out; |
| 3918 | 3983 | ||
| 3984 | /* Some tracers are only allowed for the top level buffer */ | ||
| 3985 | if (!trace_ok_for_array(t, tr)) { | ||
| 3986 | ret = -EINVAL; | ||
| 3987 | goto out; | ||
| 3988 | } | ||
| 3989 | |||
| 3919 | trace_branch_disable(); | 3990 | trace_branch_disable(); |
| 3920 | 3991 | ||
| 3921 | tr->current_trace->enabled = false; | 3992 | tr->current_trace->enabled--; |
| 3922 | 3993 | ||
| 3923 | if (tr->current_trace->reset) | 3994 | if (tr->current_trace->reset) |
| 3924 | tr->current_trace->reset(tr); | 3995 | tr->current_trace->reset(tr); |
| @@ -3941,9 +4012,11 @@ static int tracing_set_tracer(const char *buf) | |||
| 3941 | free_snapshot(tr); | 4012 | free_snapshot(tr); |
| 3942 | } | 4013 | } |
| 3943 | #endif | 4014 | #endif |
| 3944 | destroy_trace_option_files(topts); | 4015 | /* Currently, only the top instance has options */ |
| 3945 | 4016 | if (tr->flags & TRACE_ARRAY_FL_GLOBAL) { | |
| 3946 | topts = create_trace_option_files(tr, t); | 4017 | destroy_trace_option_files(topts); |
| 4018 | topts = create_trace_option_files(tr, t); | ||
| 4019 | } | ||
| 3947 | 4020 | ||
| 3948 | #ifdef CONFIG_TRACER_MAX_TRACE | 4021 | #ifdef CONFIG_TRACER_MAX_TRACE |
| 3949 | if (t->use_max_tr && !had_max_tr) { | 4022 | if (t->use_max_tr && !had_max_tr) { |
| @@ -3960,7 +4033,7 @@ static int tracing_set_tracer(const char *buf) | |||
| 3960 | } | 4033 | } |
| 3961 | 4034 | ||
| 3962 | tr->current_trace = t; | 4035 | tr->current_trace = t; |
| 3963 | tr->current_trace->enabled = true; | 4036 | tr->current_trace->enabled++; |
| 3964 | trace_branch_enable(tr); | 4037 | trace_branch_enable(tr); |
| 3965 | out: | 4038 | out: |
| 3966 | mutex_unlock(&trace_types_lock); | 4039 | mutex_unlock(&trace_types_lock); |
| @@ -3972,6 +4045,7 @@ static ssize_t | |||
| 3972 | tracing_set_trace_write(struct file *filp, const char __user *ubuf, | 4045 | tracing_set_trace_write(struct file *filp, const char __user *ubuf, |
| 3973 | size_t cnt, loff_t *ppos) | 4046 | size_t cnt, loff_t *ppos) |
| 3974 | { | 4047 | { |
| 4048 | struct trace_array *tr = filp->private_data; | ||
| 3975 | char buf[MAX_TRACER_SIZE+1]; | 4049 | char buf[MAX_TRACER_SIZE+1]; |
| 3976 | int i; | 4050 | int i; |
| 3977 | size_t ret; | 4051 | size_t ret; |
| @@ -3991,7 +4065,7 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf, | |||
| 3991 | for (i = cnt - 1; i > 0 && isspace(buf[i]); i--) | 4065 | for (i = cnt - 1; i > 0 && isspace(buf[i]); i--) |
| 3992 | buf[i] = 0; | 4066 | buf[i] = 0; |
| 3993 | 4067 | ||
| 3994 | err = tracing_set_tracer(buf); | 4068 | err = tracing_set_tracer(tr, buf); |
| 3995 | if (err) | 4069 | if (err) |
| 3996 | return err; | 4070 | return err; |
| 3997 | 4071 | ||
| @@ -4699,25 +4773,10 @@ static int tracing_clock_show(struct seq_file *m, void *v) | |||
| 4699 | return 0; | 4773 | return 0; |
| 4700 | } | 4774 | } |
| 4701 | 4775 | ||
| 4702 | static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, | 4776 | static int tracing_set_clock(struct trace_array *tr, const char *clockstr) |
| 4703 | size_t cnt, loff_t *fpos) | ||
| 4704 | { | 4777 | { |
| 4705 | struct seq_file *m = filp->private_data; | ||
| 4706 | struct trace_array *tr = m->private; | ||
| 4707 | char buf[64]; | ||
| 4708 | const char *clockstr; | ||
| 4709 | int i; | 4778 | int i; |
| 4710 | 4779 | ||
| 4711 | if (cnt >= sizeof(buf)) | ||
| 4712 | return -EINVAL; | ||
| 4713 | |||
| 4714 | if (copy_from_user(&buf, ubuf, cnt)) | ||
| 4715 | return -EFAULT; | ||
| 4716 | |||
| 4717 | buf[cnt] = 0; | ||
| 4718 | |||
| 4719 | clockstr = strstrip(buf); | ||
| 4720 | |||
| 4721 | for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) { | 4780 | for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) { |
| 4722 | if (strcmp(trace_clocks[i].name, clockstr) == 0) | 4781 | if (strcmp(trace_clocks[i].name, clockstr) == 0) |
| 4723 | break; | 4782 | break; |
| @@ -4745,6 +4804,32 @@ static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, | |||
| 4745 | 4804 | ||
| 4746 | mutex_unlock(&trace_types_lock); | 4805 | mutex_unlock(&trace_types_lock); |
| 4747 | 4806 | ||
| 4807 | return 0; | ||
| 4808 | } | ||
| 4809 | |||
| 4810 | static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, | ||
| 4811 | size_t cnt, loff_t *fpos) | ||
| 4812 | { | ||
| 4813 | struct seq_file *m = filp->private_data; | ||
| 4814 | struct trace_array *tr = m->private; | ||
| 4815 | char buf[64]; | ||
| 4816 | const char *clockstr; | ||
| 4817 | int ret; | ||
| 4818 | |||
| 4819 | if (cnt >= sizeof(buf)) | ||
| 4820 | return -EINVAL; | ||
| 4821 | |||
| 4822 | if (copy_from_user(&buf, ubuf, cnt)) | ||
| 4823 | return -EFAULT; | ||
| 4824 | |||
| 4825 | buf[cnt] = 0; | ||
| 4826 | |||
| 4827 | clockstr = strstrip(buf); | ||
| 4828 | |||
| 4829 | ret = tracing_set_clock(tr, clockstr); | ||
| 4830 | if (ret) | ||
| 4831 | return ret; | ||
| 4832 | |||
| 4748 | *fpos += cnt; | 4833 | *fpos += cnt; |
| 4749 | 4834 | ||
| 4750 | return cnt; | 4835 | return cnt; |
| @@ -5705,7 +5790,7 @@ trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt, | |||
| 5705 | 5790 | ||
| 5706 | if (!!(topt->flags->val & topt->opt->bit) != val) { | 5791 | if (!!(topt->flags->val & topt->opt->bit) != val) { |
| 5707 | mutex_lock(&trace_types_lock); | 5792 | mutex_lock(&trace_types_lock); |
| 5708 | ret = __set_tracer_option(topt->tr->current_trace, topt->flags, | 5793 | ret = __set_tracer_option(topt->tr, topt->flags, |
| 5709 | topt->opt, !val); | 5794 | topt->opt, !val); |
| 5710 | mutex_unlock(&trace_types_lock); | 5795 | mutex_unlock(&trace_types_lock); |
| 5711 | if (ret) | 5796 | if (ret) |
| @@ -6112,7 +6197,9 @@ static int instance_delete(const char *name) | |||
| 6112 | 6197 | ||
| 6113 | list_del(&tr->list); | 6198 | list_del(&tr->list); |
| 6114 | 6199 | ||
| 6200 | tracing_set_nop(tr); | ||
| 6115 | event_trace_del_tracer(tr); | 6201 | event_trace_del_tracer(tr); |
| 6202 | ftrace_destroy_function_files(tr); | ||
| 6116 | debugfs_remove_recursive(tr->dir); | 6203 | debugfs_remove_recursive(tr->dir); |
| 6117 | free_percpu(tr->trace_buffer.data); | 6204 | free_percpu(tr->trace_buffer.data); |
| 6118 | ring_buffer_free(tr->trace_buffer.buffer); | 6205 | ring_buffer_free(tr->trace_buffer.buffer); |
| @@ -6207,6 +6294,12 @@ init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer) | |||
| 6207 | { | 6294 | { |
| 6208 | int cpu; | 6295 | int cpu; |
| 6209 | 6296 | ||
| 6297 | trace_create_file("available_tracers", 0444, d_tracer, | ||
| 6298 | tr, &show_traces_fops); | ||
| 6299 | |||
| 6300 | trace_create_file("current_tracer", 0644, d_tracer, | ||
| 6301 | tr, &set_tracer_fops); | ||
| 6302 | |||
| 6210 | trace_create_file("tracing_cpumask", 0644, d_tracer, | 6303 | trace_create_file("tracing_cpumask", 0644, d_tracer, |
| 6211 | tr, &tracing_cpumask_fops); | 6304 | tr, &tracing_cpumask_fops); |
| 6212 | 6305 | ||
| @@ -6237,6 +6330,9 @@ init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer) | |||
| 6237 | trace_create_file("tracing_on", 0644, d_tracer, | 6330 | trace_create_file("tracing_on", 0644, d_tracer, |
| 6238 | tr, &rb_simple_fops); | 6331 | tr, &rb_simple_fops); |
| 6239 | 6332 | ||
| 6333 | if (ftrace_create_function_files(tr, d_tracer)) | ||
| 6334 | WARN(1, "Could not allocate function filter files"); | ||
| 6335 | |||
| 6240 | #ifdef CONFIG_TRACER_SNAPSHOT | 6336 | #ifdef CONFIG_TRACER_SNAPSHOT |
| 6241 | trace_create_file("snapshot", 0644, d_tracer, | 6337 | trace_create_file("snapshot", 0644, d_tracer, |
| 6242 | tr, &snapshot_fops); | 6338 | tr, &snapshot_fops); |
| @@ -6259,12 +6355,6 @@ static __init int tracer_init_debugfs(void) | |||
| 6259 | 6355 | ||
| 6260 | init_tracer_debugfs(&global_trace, d_tracer); | 6356 | init_tracer_debugfs(&global_trace, d_tracer); |
| 6261 | 6357 | ||
| 6262 | trace_create_file("available_tracers", 0444, d_tracer, | ||
| 6263 | &global_trace, &show_traces_fops); | ||
| 6264 | |||
| 6265 | trace_create_file("current_tracer", 0644, d_tracer, | ||
| 6266 | &global_trace, &set_tracer_fops); | ||
| 6267 | |||
| 6268 | #ifdef CONFIG_TRACER_MAX_TRACE | 6358 | #ifdef CONFIG_TRACER_MAX_TRACE |
| 6269 | trace_create_file("tracing_max_latency", 0644, d_tracer, | 6359 | trace_create_file("tracing_max_latency", 0644, d_tracer, |
| 6270 | &tracing_max_latency, &tracing_max_lat_fops); | 6360 | &tracing_max_latency, &tracing_max_lat_fops); |
| @@ -6527,6 +6617,13 @@ __init static int tracer_alloc_buffers(void) | |||
| 6527 | 6617 | ||
| 6528 | trace_init_cmdlines(); | 6618 | trace_init_cmdlines(); |
| 6529 | 6619 | ||
| 6620 | if (trace_boot_clock) { | ||
| 6621 | ret = tracing_set_clock(&global_trace, trace_boot_clock); | ||
| 6622 | if (ret < 0) | ||
| 6623 | pr_warning("Trace clock %s not defined, going back to default\n", | ||
| 6624 | trace_boot_clock); | ||
| 6625 | } | ||
| 6626 | |||
| 6530 | /* | 6627 | /* |
| 6531 | * register_tracer() might reference current_trace, so it | 6628 | * register_tracer() might reference current_trace, so it |
| 6532 | * needs to be set before we register anything. This is | 6629 | * needs to be set before we register anything. This is |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 02b592f2d4b7..2e29d7ba5a52 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
| @@ -13,6 +13,7 @@ | |||
| 13 | #include <linux/hw_breakpoint.h> | 13 | #include <linux/hw_breakpoint.h> |
| 14 | #include <linux/trace_seq.h> | 14 | #include <linux/trace_seq.h> |
| 15 | #include <linux/ftrace_event.h> | 15 | #include <linux/ftrace_event.h> |
| 16 | #include <linux/compiler.h> | ||
| 16 | 17 | ||
| 17 | #ifdef CONFIG_FTRACE_SYSCALLS | 18 | #ifdef CONFIG_FTRACE_SYSCALLS |
| 18 | #include <asm/unistd.h> /* For NR_SYSCALLS */ | 19 | #include <asm/unistd.h> /* For NR_SYSCALLS */ |
| @@ -210,6 +211,11 @@ struct trace_array { | |||
| 210 | struct list_head events; | 211 | struct list_head events; |
| 211 | cpumask_var_t tracing_cpumask; /* only trace on set CPUs */ | 212 | cpumask_var_t tracing_cpumask; /* only trace on set CPUs */ |
| 212 | int ref; | 213 | int ref; |
| 214 | #ifdef CONFIG_FUNCTION_TRACER | ||
| 215 | struct ftrace_ops *ops; | ||
| 216 | /* function tracing enabled */ | ||
| 217 | int function_enabled; | ||
| 218 | #endif | ||
| 213 | }; | 219 | }; |
| 214 | 220 | ||
| 215 | enum { | 221 | enum { |
| @@ -355,14 +361,16 @@ struct tracer { | |||
| 355 | void (*print_header)(struct seq_file *m); | 361 | void (*print_header)(struct seq_file *m); |
| 356 | enum print_line_t (*print_line)(struct trace_iterator *iter); | 362 | enum print_line_t (*print_line)(struct trace_iterator *iter); |
| 357 | /* If you handled the flag setting, return 0 */ | 363 | /* If you handled the flag setting, return 0 */ |
| 358 | int (*set_flag)(u32 old_flags, u32 bit, int set); | 364 | int (*set_flag)(struct trace_array *tr, |
| 365 | u32 old_flags, u32 bit, int set); | ||
| 359 | /* Return 0 if OK with change, else return non-zero */ | 366 | /* Return 0 if OK with change, else return non-zero */ |
| 360 | int (*flag_changed)(struct tracer *tracer, | 367 | int (*flag_changed)(struct trace_array *tr, |
| 361 | u32 mask, int set); | 368 | u32 mask, int set); |
| 362 | struct tracer *next; | 369 | struct tracer *next; |
| 363 | struct tracer_flags *flags; | 370 | struct tracer_flags *flags; |
| 371 | int enabled; | ||
| 364 | bool print_max; | 372 | bool print_max; |
| 365 | bool enabled; | 373 | bool allow_instances; |
| 366 | #ifdef CONFIG_TRACER_MAX_TRACE | 374 | #ifdef CONFIG_TRACER_MAX_TRACE |
| 367 | bool use_max_tr; | 375 | bool use_max_tr; |
| 368 | #endif | 376 | #endif |
| @@ -812,13 +820,36 @@ static inline int ftrace_trace_task(struct task_struct *task) | |||
| 812 | return test_tsk_trace_trace(task); | 820 | return test_tsk_trace_trace(task); |
| 813 | } | 821 | } |
| 814 | extern int ftrace_is_dead(void); | 822 | extern int ftrace_is_dead(void); |
| 823 | int ftrace_create_function_files(struct trace_array *tr, | ||
| 824 | struct dentry *parent); | ||
| 825 | void ftrace_destroy_function_files(struct trace_array *tr); | ||
| 815 | #else | 826 | #else |
| 816 | static inline int ftrace_trace_task(struct task_struct *task) | 827 | static inline int ftrace_trace_task(struct task_struct *task) |
| 817 | { | 828 | { |
| 818 | return 1; | 829 | return 1; |
| 819 | } | 830 | } |
| 820 | static inline int ftrace_is_dead(void) { return 0; } | 831 | static inline int ftrace_is_dead(void) { return 0; } |
| 821 | #endif | 832 | static inline int |
| 833 | ftrace_create_function_files(struct trace_array *tr, | ||
| 834 | struct dentry *parent) | ||
| 835 | { | ||
| 836 | return 0; | ||
| 837 | } | ||
| 838 | static inline void ftrace_destroy_function_files(struct trace_array *tr) { } | ||
| 839 | #endif /* CONFIG_FUNCTION_TRACER */ | ||
| 840 | |||
| 841 | #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE) | ||
| 842 | void ftrace_create_filter_files(struct ftrace_ops *ops, | ||
| 843 | struct dentry *parent); | ||
| 844 | void ftrace_destroy_filter_files(struct ftrace_ops *ops); | ||
| 845 | #else | ||
| 846 | /* | ||
| 847 | * The ops parameter passed in is usually undefined. | ||
| 848 | * This must be a macro. | ||
| 849 | */ | ||
| 850 | #define ftrace_create_filter_files(ops, parent) do { } while (0) | ||
| 851 | #define ftrace_destroy_filter_files(ops) do { } while (0) | ||
| 852 | #endif /* CONFIG_FUNCTION_TRACER && CONFIG_DYNAMIC_FTRACE */ | ||
| 822 | 853 | ||
| 823 | int ftrace_event_is_function(struct ftrace_event_call *call); | 854 | int ftrace_event_is_function(struct ftrace_event_call *call); |
| 824 | 855 | ||
| @@ -1249,7 +1280,7 @@ int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled); | |||
| 1249 | #undef FTRACE_ENTRY | 1280 | #undef FTRACE_ENTRY |
| 1250 | #define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \ | 1281 | #define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \ |
| 1251 | extern struct ftrace_event_call \ | 1282 | extern struct ftrace_event_call \ |
| 1252 | __attribute__((__aligned__(4))) event_##call; | 1283 | __aligned(4) event_##call; |
| 1253 | #undef FTRACE_ENTRY_DUP | 1284 | #undef FTRACE_ENTRY_DUP |
| 1254 | #define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print, filter) \ | 1285 | #define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print, filter) \ |
| 1255 | FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \ | 1286 | FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \ |
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 7b16d40bd64d..83a4378dc5e0 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
| @@ -188,6 +188,36 @@ int trace_event_raw_init(struct ftrace_event_call *call) | |||
| 188 | } | 188 | } |
| 189 | EXPORT_SYMBOL_GPL(trace_event_raw_init); | 189 | EXPORT_SYMBOL_GPL(trace_event_raw_init); |
| 190 | 190 | ||
| 191 | void *ftrace_event_buffer_reserve(struct ftrace_event_buffer *fbuffer, | ||
| 192 | struct ftrace_event_file *ftrace_file, | ||
| 193 | unsigned long len) | ||
| 194 | { | ||
| 195 | struct ftrace_event_call *event_call = ftrace_file->event_call; | ||
| 196 | |||
| 197 | local_save_flags(fbuffer->flags); | ||
| 198 | fbuffer->pc = preempt_count(); | ||
| 199 | fbuffer->ftrace_file = ftrace_file; | ||
| 200 | |||
| 201 | fbuffer->event = | ||
| 202 | trace_event_buffer_lock_reserve(&fbuffer->buffer, ftrace_file, | ||
| 203 | event_call->event.type, len, | ||
| 204 | fbuffer->flags, fbuffer->pc); | ||
| 205 | if (!fbuffer->event) | ||
| 206 | return NULL; | ||
| 207 | |||
| 208 | fbuffer->entry = ring_buffer_event_data(fbuffer->event); | ||
| 209 | return fbuffer->entry; | ||
| 210 | } | ||
| 211 | EXPORT_SYMBOL_GPL(ftrace_event_buffer_reserve); | ||
| 212 | |||
| 213 | void ftrace_event_buffer_commit(struct ftrace_event_buffer *fbuffer) | ||
| 214 | { | ||
| 215 | event_trigger_unlock_commit(fbuffer->ftrace_file, fbuffer->buffer, | ||
| 216 | fbuffer->event, fbuffer->entry, | ||
| 217 | fbuffer->flags, fbuffer->pc); | ||
| 218 | } | ||
| 219 | EXPORT_SYMBOL_GPL(ftrace_event_buffer_commit); | ||
| 220 | |||
| 191 | int ftrace_event_reg(struct ftrace_event_call *call, | 221 | int ftrace_event_reg(struct ftrace_event_call *call, |
| 192 | enum trace_reg type, void *data) | 222 | enum trace_reg type, void *data) |
| 193 | { | 223 | { |
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index 38fe1483c508..5b781d2be383 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c | |||
| @@ -13,32 +13,106 @@ | |||
| 13 | #include <linux/debugfs.h> | 13 | #include <linux/debugfs.h> |
| 14 | #include <linux/uaccess.h> | 14 | #include <linux/uaccess.h> |
| 15 | #include <linux/ftrace.h> | 15 | #include <linux/ftrace.h> |
| 16 | #include <linux/slab.h> | ||
| 16 | #include <linux/fs.h> | 17 | #include <linux/fs.h> |
| 17 | 18 | ||
| 18 | #include "trace.h" | 19 | #include "trace.h" |
| 19 | 20 | ||
| 20 | /* function tracing enabled */ | 21 | static void tracing_start_function_trace(struct trace_array *tr); |
| 21 | static int ftrace_function_enabled; | 22 | static void tracing_stop_function_trace(struct trace_array *tr); |
| 23 | static void | ||
| 24 | function_trace_call(unsigned long ip, unsigned long parent_ip, | ||
| 25 | struct ftrace_ops *op, struct pt_regs *pt_regs); | ||
| 26 | static void | ||
| 27 | function_stack_trace_call(unsigned long ip, unsigned long parent_ip, | ||
| 28 | struct ftrace_ops *op, struct pt_regs *pt_regs); | ||
| 29 | static struct ftrace_ops trace_ops; | ||
| 30 | static struct ftrace_ops trace_stack_ops; | ||
| 31 | static struct tracer_flags func_flags; | ||
| 32 | |||
| 33 | /* Our option */ | ||
| 34 | enum { | ||
| 35 | TRACE_FUNC_OPT_STACK = 0x1, | ||
| 36 | }; | ||
| 37 | |||
| 38 | static int allocate_ftrace_ops(struct trace_array *tr) | ||
| 39 | { | ||
| 40 | struct ftrace_ops *ops; | ||
| 41 | |||
| 42 | ops = kzalloc(sizeof(*ops), GFP_KERNEL); | ||
| 43 | if (!ops) | ||
| 44 | return -ENOMEM; | ||
| 22 | 45 | ||
| 23 | static struct trace_array *func_trace; | 46 | /* Currently only the non stack verision is supported */ |
| 47 | ops->func = function_trace_call; | ||
| 48 | ops->flags = FTRACE_OPS_FL_RECURSION_SAFE; | ||
| 49 | |||
| 50 | tr->ops = ops; | ||
| 51 | ops->private = tr; | ||
| 52 | return 0; | ||
| 53 | } | ||
| 54 | |||
| 55 | |||
| 56 | int ftrace_create_function_files(struct trace_array *tr, | ||
| 57 | struct dentry *parent) | ||
| 58 | { | ||
| 59 | int ret; | ||
| 60 | |||
| 61 | /* The top level array uses the "global_ops". */ | ||
| 62 | if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL)) { | ||
| 63 | ret = allocate_ftrace_ops(tr); | ||
| 64 | if (ret) | ||
| 65 | return ret; | ||
| 66 | } | ||
| 67 | |||
| 68 | ftrace_create_filter_files(tr->ops, parent); | ||
| 69 | |||
| 70 | return 0; | ||
| 71 | } | ||
| 24 | 72 | ||
| 25 | static void tracing_start_function_trace(void); | 73 | void ftrace_destroy_function_files(struct trace_array *tr) |
| 26 | static void tracing_stop_function_trace(void); | 74 | { |
| 75 | ftrace_destroy_filter_files(tr->ops); | ||
| 76 | kfree(tr->ops); | ||
| 77 | tr->ops = NULL; | ||
| 78 | } | ||
| 27 | 79 | ||
| 28 | static int function_trace_init(struct trace_array *tr) | 80 | static int function_trace_init(struct trace_array *tr) |
| 29 | { | 81 | { |
| 30 | func_trace = tr; | 82 | struct ftrace_ops *ops; |
| 83 | |||
| 84 | if (tr->flags & TRACE_ARRAY_FL_GLOBAL) { | ||
| 85 | /* There's only one global tr */ | ||
| 86 | if (!trace_ops.private) { | ||
| 87 | trace_ops.private = tr; | ||
| 88 | trace_stack_ops.private = tr; | ||
| 89 | } | ||
| 90 | |||
| 91 | if (func_flags.val & TRACE_FUNC_OPT_STACK) | ||
| 92 | ops = &trace_stack_ops; | ||
| 93 | else | ||
| 94 | ops = &trace_ops; | ||
| 95 | tr->ops = ops; | ||
| 96 | } else if (!tr->ops) { | ||
| 97 | /* | ||
| 98 | * Instance trace_arrays get their ops allocated | ||
| 99 | * at instance creation. Unless it failed | ||
| 100 | * the allocation. | ||
| 101 | */ | ||
| 102 | return -ENOMEM; | ||
| 103 | } | ||
| 104 | |||
| 31 | tr->trace_buffer.cpu = get_cpu(); | 105 | tr->trace_buffer.cpu = get_cpu(); |
| 32 | put_cpu(); | 106 | put_cpu(); |
| 33 | 107 | ||
| 34 | tracing_start_cmdline_record(); | 108 | tracing_start_cmdline_record(); |
| 35 | tracing_start_function_trace(); | 109 | tracing_start_function_trace(tr); |
| 36 | return 0; | 110 | return 0; |
| 37 | } | 111 | } |
| 38 | 112 | ||
| 39 | static void function_trace_reset(struct trace_array *tr) | 113 | static void function_trace_reset(struct trace_array *tr) |
| 40 | { | 114 | { |
| 41 | tracing_stop_function_trace(); | 115 | tracing_stop_function_trace(tr); |
| 42 | tracing_stop_cmdline_record(); | 116 | tracing_stop_cmdline_record(); |
| 43 | } | 117 | } |
| 44 | 118 | ||
| @@ -47,25 +121,18 @@ static void function_trace_start(struct trace_array *tr) | |||
| 47 | tracing_reset_online_cpus(&tr->trace_buffer); | 121 | tracing_reset_online_cpus(&tr->trace_buffer); |
| 48 | } | 122 | } |
| 49 | 123 | ||
| 50 | /* Our option */ | ||
| 51 | enum { | ||
| 52 | TRACE_FUNC_OPT_STACK = 0x1, | ||
| 53 | }; | ||
| 54 | |||
| 55 | static struct tracer_flags func_flags; | ||
| 56 | |||
| 57 | static void | 124 | static void |
| 58 | function_trace_call(unsigned long ip, unsigned long parent_ip, | 125 | function_trace_call(unsigned long ip, unsigned long parent_ip, |
| 59 | struct ftrace_ops *op, struct pt_regs *pt_regs) | 126 | struct ftrace_ops *op, struct pt_regs *pt_regs) |
| 60 | { | 127 | { |
| 61 | struct trace_array *tr = func_trace; | 128 | struct trace_array *tr = op->private; |
| 62 | struct trace_array_cpu *data; | 129 | struct trace_array_cpu *data; |
| 63 | unsigned long flags; | 130 | unsigned long flags; |
| 64 | int bit; | 131 | int bit; |
| 65 | int cpu; | 132 | int cpu; |
| 66 | int pc; | 133 | int pc; |
| 67 | 134 | ||
| 68 | if (unlikely(!ftrace_function_enabled)) | 135 | if (unlikely(!tr->function_enabled)) |
| 69 | return; | 136 | return; |
| 70 | 137 | ||
| 71 | pc = preempt_count(); | 138 | pc = preempt_count(); |
| @@ -91,14 +158,14 @@ static void | |||
| 91 | function_stack_trace_call(unsigned long ip, unsigned long parent_ip, | 158 | function_stack_trace_call(unsigned long ip, unsigned long parent_ip, |
| 92 | struct ftrace_ops *op, struct pt_regs *pt_regs) | 159 | struct ftrace_ops *op, struct pt_regs *pt_regs) |
| 93 | { | 160 | { |
| 94 | struct trace_array *tr = func_trace; | 161 | struct trace_array *tr = op->private; |
| 95 | struct trace_array_cpu *data; | 162 | struct trace_array_cpu *data; |
| 96 | unsigned long flags; | 163 | unsigned long flags; |
| 97 | long disabled; | 164 | long disabled; |
| 98 | int cpu; | 165 | int cpu; |
| 99 | int pc; | 166 | int pc; |
| 100 | 167 | ||
| 101 | if (unlikely(!ftrace_function_enabled)) | 168 | if (unlikely(!tr->function_enabled)) |
| 102 | return; | 169 | return; |
| 103 | 170 | ||
| 104 | /* | 171 | /* |
| @@ -128,7 +195,6 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip, | |||
| 128 | local_irq_restore(flags); | 195 | local_irq_restore(flags); |
| 129 | } | 196 | } |
| 130 | 197 | ||
| 131 | |||
| 132 | static struct ftrace_ops trace_ops __read_mostly = | 198 | static struct ftrace_ops trace_ops __read_mostly = |
| 133 | { | 199 | { |
| 134 | .func = function_trace_call, | 200 | .func = function_trace_call, |
| @@ -153,29 +219,21 @@ static struct tracer_flags func_flags = { | |||
| 153 | .opts = func_opts | 219 | .opts = func_opts |
| 154 | }; | 220 | }; |
| 155 | 221 | ||
| 156 | static void tracing_start_function_trace(void) | 222 | static void tracing_start_function_trace(struct trace_array *tr) |
| 157 | { | 223 | { |
| 158 | ftrace_function_enabled = 0; | 224 | tr->function_enabled = 0; |
| 159 | 225 | register_ftrace_function(tr->ops); | |
| 160 | if (func_flags.val & TRACE_FUNC_OPT_STACK) | 226 | tr->function_enabled = 1; |
| 161 | register_ftrace_function(&trace_stack_ops); | ||
| 162 | else | ||
| 163 | register_ftrace_function(&trace_ops); | ||
| 164 | |||
| 165 | ftrace_function_enabled = 1; | ||
| 166 | } | 227 | } |
| 167 | 228 | ||
| 168 | static void tracing_stop_function_trace(void) | 229 | static void tracing_stop_function_trace(struct trace_array *tr) |
| 169 | { | 230 | { |
| 170 | ftrace_function_enabled = 0; | 231 | tr->function_enabled = 0; |
| 171 | 232 | unregister_ftrace_function(tr->ops); | |
| 172 | if (func_flags.val & TRACE_FUNC_OPT_STACK) | ||
| 173 | unregister_ftrace_function(&trace_stack_ops); | ||
| 174 | else | ||
| 175 | unregister_ftrace_function(&trace_ops); | ||
| 176 | } | 233 | } |
| 177 | 234 | ||
| 178 | static int func_set_flag(u32 old_flags, u32 bit, int set) | 235 | static int |
| 236 | func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) | ||
| 179 | { | 237 | { |
| 180 | switch (bit) { | 238 | switch (bit) { |
| 181 | case TRACE_FUNC_OPT_STACK: | 239 | case TRACE_FUNC_OPT_STACK: |
| @@ -183,12 +241,14 @@ static int func_set_flag(u32 old_flags, u32 bit, int set) | |||
| 183 | if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK)) | 241 | if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK)) |
| 184 | break; | 242 | break; |
| 185 | 243 | ||
| 244 | unregister_ftrace_function(tr->ops); | ||
| 245 | |||
| 186 | if (set) { | 246 | if (set) { |
| 187 | unregister_ftrace_function(&trace_ops); | 247 | tr->ops = &trace_stack_ops; |
| 188 | register_ftrace_function(&trace_stack_ops); | 248 | register_ftrace_function(tr->ops); |
| 189 | } else { | 249 | } else { |
| 190 | unregister_ftrace_function(&trace_stack_ops); | 250 | tr->ops = &trace_ops; |
| 191 | register_ftrace_function(&trace_ops); | 251 | register_ftrace_function(tr->ops); |
| 192 | } | 252 | } |
| 193 | 253 | ||
| 194 | break; | 254 | break; |
| @@ -208,6 +268,7 @@ static struct tracer function_trace __tracer_data = | |||
| 208 | .wait_pipe = poll_wait_pipe, | 268 | .wait_pipe = poll_wait_pipe, |
| 209 | .flags = &func_flags, | 269 | .flags = &func_flags, |
| 210 | .set_flag = func_set_flag, | 270 | .set_flag = func_set_flag, |
| 271 | .allow_instances = true, | ||
| 211 | #ifdef CONFIG_FTRACE_SELFTEST | 272 | #ifdef CONFIG_FTRACE_SELFTEST |
| 212 | .selftest = trace_selftest_startup_function, | 273 | .selftest = trace_selftest_startup_function, |
| 213 | #endif | 274 | #endif |
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 0b99120d395c..deff11200261 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
| @@ -1476,7 +1476,8 @@ void graph_trace_close(struct trace_iterator *iter) | |||
| 1476 | } | 1476 | } |
| 1477 | } | 1477 | } |
| 1478 | 1478 | ||
| 1479 | static int func_graph_set_flag(u32 old_flags, u32 bit, int set) | 1479 | static int |
| 1480 | func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) | ||
| 1480 | { | 1481 | { |
| 1481 | if (bit == TRACE_GRAPH_PRINT_IRQS) | 1482 | if (bit == TRACE_GRAPH_PRINT_IRQS) |
| 1482 | ftrace_graph_skip_irqs = !set; | 1483 | ftrace_graph_skip_irqs = !set; |
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index 887ef88b0bc7..8ff02cbb892f 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c | |||
| @@ -160,7 +160,8 @@ static struct ftrace_ops trace_ops __read_mostly = | |||
| 160 | #endif /* CONFIG_FUNCTION_TRACER */ | 160 | #endif /* CONFIG_FUNCTION_TRACER */ |
| 161 | 161 | ||
| 162 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 162 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
| 163 | static int irqsoff_set_flag(u32 old_flags, u32 bit, int set) | 163 | static int |
| 164 | irqsoff_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) | ||
| 164 | { | 165 | { |
| 165 | int cpu; | 166 | int cpu; |
| 166 | 167 | ||
| @@ -266,7 +267,8 @@ __trace_function(struct trace_array *tr, | |||
| 266 | #else | 267 | #else |
| 267 | #define __trace_function trace_function | 268 | #define __trace_function trace_function |
| 268 | 269 | ||
| 269 | static int irqsoff_set_flag(u32 old_flags, u32 bit, int set) | 270 | static int |
| 271 | irqsoff_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) | ||
| 270 | { | 272 | { |
| 271 | return -EINVAL; | 273 | return -EINVAL; |
| 272 | } | 274 | } |
| @@ -570,8 +572,10 @@ static void irqsoff_function_set(int set) | |||
| 570 | unregister_irqsoff_function(is_graph()); | 572 | unregister_irqsoff_function(is_graph()); |
| 571 | } | 573 | } |
| 572 | 574 | ||
| 573 | static int irqsoff_flag_changed(struct tracer *tracer, u32 mask, int set) | 575 | static int irqsoff_flag_changed(struct trace_array *tr, u32 mask, int set) |
| 574 | { | 576 | { |
| 577 | struct tracer *tracer = tr->current_trace; | ||
| 578 | |||
| 575 | if (mask & TRACE_ITER_FUNCTION) | 579 | if (mask & TRACE_ITER_FUNCTION) |
| 576 | irqsoff_function_set(set); | 580 | irqsoff_function_set(set); |
| 577 | 581 | ||
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index bdbae450c13e..d021d21dd150 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c | |||
| @@ -35,11 +35,6 @@ struct trace_kprobe { | |||
| 35 | struct trace_probe tp; | 35 | struct trace_probe tp; |
| 36 | }; | 36 | }; |
| 37 | 37 | ||
| 38 | struct event_file_link { | ||
| 39 | struct ftrace_event_file *file; | ||
| 40 | struct list_head list; | ||
| 41 | }; | ||
| 42 | |||
| 43 | #define SIZEOF_TRACE_KPROBE(n) \ | 38 | #define SIZEOF_TRACE_KPROBE(n) \ |
| 44 | (offsetof(struct trace_kprobe, tp.args) + \ | 39 | (offsetof(struct trace_kprobe, tp.args) + \ |
| 45 | (sizeof(struct probe_arg) * (n))) | 40 | (sizeof(struct probe_arg) * (n))) |
| @@ -387,18 +382,6 @@ enable_trace_kprobe(struct trace_kprobe *tk, struct ftrace_event_file *file) | |||
| 387 | return ret; | 382 | return ret; |
| 388 | } | 383 | } |
| 389 | 384 | ||
| 390 | static struct event_file_link * | ||
| 391 | find_event_file_link(struct trace_probe *tp, struct ftrace_event_file *file) | ||
| 392 | { | ||
| 393 | struct event_file_link *link; | ||
| 394 | |||
| 395 | list_for_each_entry(link, &tp->files, list) | ||
| 396 | if (link->file == file) | ||
| 397 | return link; | ||
| 398 | |||
| 399 | return NULL; | ||
| 400 | } | ||
| 401 | |||
| 402 | /* | 385 | /* |
| 403 | * Disable trace_probe | 386 | * Disable trace_probe |
| 404 | * if the file is NULL, disable "perf" handler, or disable "trace" handler. | 387 | * if the file is NULL, disable "perf" handler, or disable "trace" handler. |
diff --git a/kernel/trace/trace_nop.c b/kernel/trace/trace_nop.c index 394f94417e2f..69a5cc94c01a 100644 --- a/kernel/trace/trace_nop.c +++ b/kernel/trace/trace_nop.c | |||
| @@ -62,7 +62,7 @@ static void nop_trace_reset(struct trace_array *tr) | |||
| 62 | * If you don't implement it, then the flag setting will be | 62 | * If you don't implement it, then the flag setting will be |
| 63 | * automatically accepted. | 63 | * automatically accepted. |
| 64 | */ | 64 | */ |
| 65 | static int nop_set_flag(u32 old_flags, u32 bit, int set) | 65 | static int nop_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) |
| 66 | { | 66 | { |
| 67 | /* | 67 | /* |
| 68 | * Note that you don't need to update nop_flags.val yourself. | 68 | * Note that you don't need to update nop_flags.val yourself. |
| @@ -96,6 +96,7 @@ struct tracer nop_trace __read_mostly = | |||
| 96 | .selftest = trace_selftest_startup_nop, | 96 | .selftest = trace_selftest_startup_nop, |
| 97 | #endif | 97 | #endif |
| 98 | .flags = &nop_flags, | 98 | .flags = &nop_flags, |
| 99 | .set_flag = nop_set_flag | 99 | .set_flag = nop_set_flag, |
| 100 | .allow_instances = true, | ||
| 100 | }; | 101 | }; |
| 101 | 102 | ||
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index ed32284fbe32..ca0e79e2abaa 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c | |||
| @@ -439,6 +439,37 @@ int ftrace_raw_output_prep(struct trace_iterator *iter, | |||
| 439 | } | 439 | } |
| 440 | EXPORT_SYMBOL(ftrace_raw_output_prep); | 440 | EXPORT_SYMBOL(ftrace_raw_output_prep); |
| 441 | 441 | ||
| 442 | static int ftrace_output_raw(struct trace_iterator *iter, char *name, | ||
| 443 | char *fmt, va_list ap) | ||
| 444 | { | ||
| 445 | struct trace_seq *s = &iter->seq; | ||
| 446 | int ret; | ||
| 447 | |||
| 448 | ret = trace_seq_printf(s, "%s: ", name); | ||
| 449 | if (!ret) | ||
| 450 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 451 | |||
| 452 | ret = trace_seq_vprintf(s, fmt, ap); | ||
| 453 | |||
| 454 | if (!ret) | ||
| 455 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 456 | |||
| 457 | return TRACE_TYPE_HANDLED; | ||
| 458 | } | ||
| 459 | |||
| 460 | int ftrace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...) | ||
| 461 | { | ||
| 462 | va_list ap; | ||
| 463 | int ret; | ||
| 464 | |||
| 465 | va_start(ap, fmt); | ||
| 466 | ret = ftrace_output_raw(iter, name, fmt, ap); | ||
| 467 | va_end(ap); | ||
| 468 | |||
| 469 | return ret; | ||
| 470 | } | ||
| 471 | EXPORT_SYMBOL_GPL(ftrace_output_call); | ||
| 472 | |||
| 442 | #ifdef CONFIG_KRETPROBES | 473 | #ifdef CONFIG_KRETPROBES |
| 443 | static inline const char *kretprobed(const char *name) | 474 | static inline const char *kretprobed(const char *name) |
| 444 | { | 475 | { |
diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h index b73574a5f429..fb1ab5dfbd42 100644 --- a/kernel/trace/trace_probe.h +++ b/kernel/trace/trace_probe.h | |||
| @@ -288,6 +288,11 @@ struct trace_probe { | |||
| 288 | struct probe_arg args[]; | 288 | struct probe_arg args[]; |
| 289 | }; | 289 | }; |
| 290 | 290 | ||
| 291 | struct event_file_link { | ||
| 292 | struct ftrace_event_file *file; | ||
| 293 | struct list_head list; | ||
| 294 | }; | ||
| 295 | |||
| 291 | static inline bool trace_probe_is_enabled(struct trace_probe *tp) | 296 | static inline bool trace_probe_is_enabled(struct trace_probe *tp) |
| 292 | { | 297 | { |
| 293 | return !!(tp->flags & (TP_FLAG_TRACE | TP_FLAG_PROFILE)); | 298 | return !!(tp->flags & (TP_FLAG_TRACE | TP_FLAG_PROFILE)); |
| @@ -316,6 +321,18 @@ static inline int is_good_name(const char *name) | |||
| 316 | return 1; | 321 | return 1; |
| 317 | } | 322 | } |
| 318 | 323 | ||
| 324 | static inline struct event_file_link * | ||
| 325 | find_event_file_link(struct trace_probe *tp, struct ftrace_event_file *file) | ||
| 326 | { | ||
| 327 | struct event_file_link *link; | ||
| 328 | |||
| 329 | list_for_each_entry(link, &tp->files, list) | ||
| 330 | if (link->file == file) | ||
| 331 | return link; | ||
| 332 | |||
| 333 | return NULL; | ||
| 334 | } | ||
| 335 | |||
| 319 | extern int traceprobe_parse_probe_arg(char *arg, ssize_t *size, | 336 | extern int traceprobe_parse_probe_arg(char *arg, ssize_t *size, |
| 320 | struct probe_arg *parg, bool is_return, bool is_kprobe); | 337 | struct probe_arg *parg, bool is_return, bool is_kprobe); |
| 321 | 338 | ||
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 6e32635e5e57..e14da5e97a69 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c | |||
| @@ -179,8 +179,10 @@ static void wakeup_function_set(int set) | |||
| 179 | unregister_wakeup_function(is_graph()); | 179 | unregister_wakeup_function(is_graph()); |
| 180 | } | 180 | } |
| 181 | 181 | ||
| 182 | static int wakeup_flag_changed(struct tracer *tracer, u32 mask, int set) | 182 | static int wakeup_flag_changed(struct trace_array *tr, u32 mask, int set) |
| 183 | { | 183 | { |
| 184 | struct tracer *tracer = tr->current_trace; | ||
| 185 | |||
| 184 | if (mask & TRACE_ITER_FUNCTION) | 186 | if (mask & TRACE_ITER_FUNCTION) |
| 185 | wakeup_function_set(set); | 187 | wakeup_function_set(set); |
| 186 | 188 | ||
| @@ -209,7 +211,8 @@ static void stop_func_tracer(int graph) | |||
| 209 | } | 211 | } |
| 210 | 212 | ||
| 211 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 213 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
| 212 | static int wakeup_set_flag(u32 old_flags, u32 bit, int set) | 214 | static int |
| 215 | wakeup_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) | ||
| 213 | { | 216 | { |
| 214 | 217 | ||
| 215 | if (!(bit & TRACE_DISPLAY_GRAPH)) | 218 | if (!(bit & TRACE_DISPLAY_GRAPH)) |
| @@ -311,7 +314,8 @@ __trace_function(struct trace_array *tr, | |||
| 311 | #else | 314 | #else |
| 312 | #define __trace_function trace_function | 315 | #define __trace_function trace_function |
| 313 | 316 | ||
| 314 | static int wakeup_set_flag(u32 old_flags, u32 bit, int set) | 317 | static int |
| 318 | wakeup_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) | ||
| 315 | { | 319 | { |
| 316 | return -EINVAL; | 320 | return -EINVAL; |
| 317 | } | 321 | } |
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index e6be585cf06a..21b320e5d163 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c | |||
| @@ -13,6 +13,7 @@ | |||
| 13 | #include <linux/sysctl.h> | 13 | #include <linux/sysctl.h> |
| 14 | #include <linux/init.h> | 14 | #include <linux/init.h> |
| 15 | #include <linux/fs.h> | 15 | #include <linux/fs.h> |
| 16 | #include <linux/magic.h> | ||
| 16 | 17 | ||
| 17 | #include <asm/setup.h> | 18 | #include <asm/setup.h> |
| 18 | 19 | ||
| @@ -144,6 +145,8 @@ check_stack(unsigned long ip, unsigned long *stack) | |||
| 144 | i++; | 145 | i++; |
| 145 | } | 146 | } |
| 146 | 147 | ||
| 148 | BUG_ON(current != &init_task && | ||
| 149 | *(end_of_stack(current)) != STACK_END_MAGIC); | ||
| 147 | out: | 150 | out: |
| 148 | arch_spin_unlock(&max_stack_lock); | 151 | arch_spin_unlock(&max_stack_lock); |
| 149 | local_irq_restore(flags); | 152 | local_irq_restore(flags); |
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index 79e52d93860b..e4473367e7a4 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c | |||
| @@ -260,6 +260,7 @@ alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret) | |||
| 260 | goto error; | 260 | goto error; |
| 261 | 261 | ||
| 262 | INIT_LIST_HEAD(&tu->list); | 262 | INIT_LIST_HEAD(&tu->list); |
| 263 | INIT_LIST_HEAD(&tu->tp.files); | ||
| 263 | tu->consumer.handler = uprobe_dispatcher; | 264 | tu->consumer.handler = uprobe_dispatcher; |
| 264 | if (is_ret) | 265 | if (is_ret) |
| 265 | tu->consumer.ret_handler = uretprobe_dispatcher; | 266 | tu->consumer.ret_handler = uretprobe_dispatcher; |
| @@ -758,31 +759,32 @@ static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb) | |||
| 758 | mutex_unlock(&ucb->mutex); | 759 | mutex_unlock(&ucb->mutex); |
| 759 | } | 760 | } |
| 760 | 761 | ||
| 761 | static void uprobe_trace_print(struct trace_uprobe *tu, | 762 | static void __uprobe_trace_func(struct trace_uprobe *tu, |
| 762 | unsigned long func, struct pt_regs *regs) | 763 | unsigned long func, struct pt_regs *regs, |
| 764 | struct uprobe_cpu_buffer *ucb, int dsize, | ||
| 765 | struct ftrace_event_file *ftrace_file) | ||
| 763 | { | 766 | { |
| 764 | struct uprobe_trace_entry_head *entry; | 767 | struct uprobe_trace_entry_head *entry; |
| 765 | struct ring_buffer_event *event; | 768 | struct ring_buffer_event *event; |
| 766 | struct ring_buffer *buffer; | 769 | struct ring_buffer *buffer; |
| 767 | struct uprobe_cpu_buffer *ucb; | ||
| 768 | void *data; | 770 | void *data; |
| 769 | int size, dsize, esize; | 771 | int size, esize; |
| 770 | struct ftrace_event_call *call = &tu->tp.call; | 772 | struct ftrace_event_call *call = &tu->tp.call; |
| 771 | 773 | ||
| 772 | dsize = __get_data_size(&tu->tp, regs); | 774 | WARN_ON(call != ftrace_file->event_call); |
| 773 | esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); | ||
| 774 | 775 | ||
| 775 | if (WARN_ON_ONCE(!uprobe_cpu_buffer || tu->tp.size + dsize > PAGE_SIZE)) | 776 | if (WARN_ON_ONCE(tu->tp.size + dsize > PAGE_SIZE)) |
| 776 | return; | 777 | return; |
| 777 | 778 | ||
| 778 | ucb = uprobe_buffer_get(); | 779 | if (ftrace_trigger_soft_disabled(ftrace_file)) |
| 779 | store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize); | 780 | return; |
| 780 | 781 | ||
| 782 | esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); | ||
| 781 | size = esize + tu->tp.size + dsize; | 783 | size = esize + tu->tp.size + dsize; |
| 782 | event = trace_current_buffer_lock_reserve(&buffer, call->event.type, | 784 | event = trace_event_buffer_lock_reserve(&buffer, ftrace_file, |
| 783 | size, 0, 0); | 785 | call->event.type, size, 0, 0); |
| 784 | if (!event) | 786 | if (!event) |
| 785 | goto out; | 787 | return; |
| 786 | 788 | ||
| 787 | entry = ring_buffer_event_data(event); | 789 | entry = ring_buffer_event_data(event); |
| 788 | if (is_ret_probe(tu)) { | 790 | if (is_ret_probe(tu)) { |
| @@ -796,25 +798,36 @@ static void uprobe_trace_print(struct trace_uprobe *tu, | |||
| 796 | 798 | ||
| 797 | memcpy(data, ucb->buf, tu->tp.size + dsize); | 799 | memcpy(data, ucb->buf, tu->tp.size + dsize); |
| 798 | 800 | ||
| 799 | if (!call_filter_check_discard(call, entry, buffer, event)) | 801 | event_trigger_unlock_commit(ftrace_file, buffer, event, entry, 0, 0); |
| 800 | trace_buffer_unlock_commit(buffer, event, 0, 0); | ||
| 801 | |||
| 802 | out: | ||
| 803 | uprobe_buffer_put(ucb); | ||
| 804 | } | 802 | } |
| 805 | 803 | ||
| 806 | /* uprobe handler */ | 804 | /* uprobe handler */ |
| 807 | static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs) | 805 | static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs, |
| 806 | struct uprobe_cpu_buffer *ucb, int dsize) | ||
| 808 | { | 807 | { |
| 809 | if (!is_ret_probe(tu)) | 808 | struct event_file_link *link; |
| 810 | uprobe_trace_print(tu, 0, regs); | 809 | |
| 810 | if (is_ret_probe(tu)) | ||
| 811 | return 0; | ||
| 812 | |||
| 813 | rcu_read_lock(); | ||
| 814 | list_for_each_entry_rcu(link, &tu->tp.files, list) | ||
| 815 | __uprobe_trace_func(tu, 0, regs, ucb, dsize, link->file); | ||
| 816 | rcu_read_unlock(); | ||
| 817 | |||
| 811 | return 0; | 818 | return 0; |
| 812 | } | 819 | } |
| 813 | 820 | ||
| 814 | static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func, | 821 | static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func, |
| 815 | struct pt_regs *regs) | 822 | struct pt_regs *regs, |
| 823 | struct uprobe_cpu_buffer *ucb, int dsize) | ||
| 816 | { | 824 | { |
| 817 | uprobe_trace_print(tu, func, regs); | 825 | struct event_file_link *link; |
| 826 | |||
| 827 | rcu_read_lock(); | ||
| 828 | list_for_each_entry_rcu(link, &tu->tp.files, list) | ||
| 829 | __uprobe_trace_func(tu, func, regs, ucb, dsize, link->file); | ||
| 830 | rcu_read_unlock(); | ||
| 818 | } | 831 | } |
| 819 | 832 | ||
| 820 | /* Event entry printers */ | 833 | /* Event entry printers */ |
| @@ -861,12 +874,24 @@ typedef bool (*filter_func_t)(struct uprobe_consumer *self, | |||
| 861 | struct mm_struct *mm); | 874 | struct mm_struct *mm); |
| 862 | 875 | ||
| 863 | static int | 876 | static int |
| 864 | probe_event_enable(struct trace_uprobe *tu, int flag, filter_func_t filter) | 877 | probe_event_enable(struct trace_uprobe *tu, struct ftrace_event_file *file, |
| 878 | filter_func_t filter) | ||
| 865 | { | 879 | { |
| 866 | int ret = 0; | 880 | bool enabled = trace_probe_is_enabled(&tu->tp); |
| 881 | struct event_file_link *link = NULL; | ||
| 882 | int ret; | ||
| 883 | |||
| 884 | if (file) { | ||
| 885 | link = kmalloc(sizeof(*link), GFP_KERNEL); | ||
| 886 | if (!link) | ||
| 887 | return -ENOMEM; | ||
| 867 | 888 | ||
| 868 | if (trace_probe_is_enabled(&tu->tp)) | 889 | link->file = file; |
| 869 | return -EINTR; | 890 | list_add_tail_rcu(&link->list, &tu->tp.files); |
| 891 | |||
| 892 | tu->tp.flags |= TP_FLAG_TRACE; | ||
| 893 | } else | ||
| 894 | tu->tp.flags |= TP_FLAG_PROFILE; | ||
| 870 | 895 | ||
| 871 | ret = uprobe_buffer_enable(); | 896 | ret = uprobe_buffer_enable(); |
| 872 | if (ret < 0) | 897 | if (ret < 0) |
| @@ -874,24 +899,49 @@ probe_event_enable(struct trace_uprobe *tu, int flag, filter_func_t filter) | |||
| 874 | 899 | ||
| 875 | WARN_ON(!uprobe_filter_is_empty(&tu->filter)); | 900 | WARN_ON(!uprobe_filter_is_empty(&tu->filter)); |
| 876 | 901 | ||
| 877 | tu->tp.flags |= flag; | 902 | if (enabled) |
| 903 | return 0; | ||
| 904 | |||
| 878 | tu->consumer.filter = filter; | 905 | tu->consumer.filter = filter; |
| 879 | ret = uprobe_register(tu->inode, tu->offset, &tu->consumer); | 906 | ret = uprobe_register(tu->inode, tu->offset, &tu->consumer); |
| 880 | if (ret) | 907 | if (ret) { |
| 881 | tu->tp.flags &= ~flag; | 908 | if (file) { |
| 909 | list_del(&link->list); | ||
| 910 | kfree(link); | ||
| 911 | tu->tp.flags &= ~TP_FLAG_TRACE; | ||
| 912 | } else | ||
| 913 | tu->tp.flags &= ~TP_FLAG_PROFILE; | ||
| 914 | } | ||
| 882 | 915 | ||
| 883 | return ret; | 916 | return ret; |
| 884 | } | 917 | } |
| 885 | 918 | ||
| 886 | static void probe_event_disable(struct trace_uprobe *tu, int flag) | 919 | static void |
| 920 | probe_event_disable(struct trace_uprobe *tu, struct ftrace_event_file *file) | ||
| 887 | { | 921 | { |
| 888 | if (!trace_probe_is_enabled(&tu->tp)) | 922 | if (!trace_probe_is_enabled(&tu->tp)) |
| 889 | return; | 923 | return; |
| 890 | 924 | ||
| 925 | if (file) { | ||
| 926 | struct event_file_link *link; | ||
| 927 | |||
| 928 | link = find_event_file_link(&tu->tp, file); | ||
| 929 | if (!link) | ||
| 930 | return; | ||
| 931 | |||
| 932 | list_del_rcu(&link->list); | ||
| 933 | /* synchronize with u{,ret}probe_trace_func */ | ||
| 934 | synchronize_sched(); | ||
| 935 | kfree(link); | ||
| 936 | |||
| 937 | if (!list_empty(&tu->tp.files)) | ||
| 938 | return; | ||
| 939 | } | ||
| 940 | |||
| 891 | WARN_ON(!uprobe_filter_is_empty(&tu->filter)); | 941 | WARN_ON(!uprobe_filter_is_empty(&tu->filter)); |
| 892 | 942 | ||
| 893 | uprobe_unregister(tu->inode, tu->offset, &tu->consumer); | 943 | uprobe_unregister(tu->inode, tu->offset, &tu->consumer); |
| 894 | tu->tp.flags &= ~flag; | 944 | tu->tp.flags &= file ? ~TP_FLAG_TRACE : ~TP_FLAG_PROFILE; |
| 895 | 945 | ||
| 896 | uprobe_buffer_disable(); | 946 | uprobe_buffer_disable(); |
| 897 | } | 947 | } |
| @@ -1014,31 +1064,24 @@ static bool uprobe_perf_filter(struct uprobe_consumer *uc, | |||
| 1014 | return ret; | 1064 | return ret; |
| 1015 | } | 1065 | } |
| 1016 | 1066 | ||
| 1017 | static void uprobe_perf_print(struct trace_uprobe *tu, | 1067 | static void __uprobe_perf_func(struct trace_uprobe *tu, |
| 1018 | unsigned long func, struct pt_regs *regs) | 1068 | unsigned long func, struct pt_regs *regs, |
| 1069 | struct uprobe_cpu_buffer *ucb, int dsize) | ||
| 1019 | { | 1070 | { |
| 1020 | struct ftrace_event_call *call = &tu->tp.call; | 1071 | struct ftrace_event_call *call = &tu->tp.call; |
| 1021 | struct uprobe_trace_entry_head *entry; | 1072 | struct uprobe_trace_entry_head *entry; |
| 1022 | struct hlist_head *head; | 1073 | struct hlist_head *head; |
| 1023 | struct uprobe_cpu_buffer *ucb; | ||
| 1024 | void *data; | 1074 | void *data; |
| 1025 | int size, dsize, esize; | 1075 | int size, esize; |
| 1026 | int rctx; | 1076 | int rctx; |
| 1027 | 1077 | ||
| 1028 | dsize = __get_data_size(&tu->tp, regs); | ||
| 1029 | esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); | 1078 | esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); |
| 1030 | 1079 | ||
| 1031 | if (WARN_ON_ONCE(!uprobe_cpu_buffer)) | ||
| 1032 | return; | ||
| 1033 | |||
| 1034 | size = esize + tu->tp.size + dsize; | 1080 | size = esize + tu->tp.size + dsize; |
| 1035 | size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32); | 1081 | size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32); |
| 1036 | if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough")) | 1082 | if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough")) |
| 1037 | return; | 1083 | return; |
| 1038 | 1084 | ||
| 1039 | ucb = uprobe_buffer_get(); | ||
| 1040 | store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize); | ||
| 1041 | |||
| 1042 | preempt_disable(); | 1085 | preempt_disable(); |
| 1043 | head = this_cpu_ptr(call->perf_events); | 1086 | head = this_cpu_ptr(call->perf_events); |
| 1044 | if (hlist_empty(head)) | 1087 | if (hlist_empty(head)) |
| @@ -1068,46 +1111,49 @@ static void uprobe_perf_print(struct trace_uprobe *tu, | |||
| 1068 | perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL); | 1111 | perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL); |
| 1069 | out: | 1112 | out: |
| 1070 | preempt_enable(); | 1113 | preempt_enable(); |
| 1071 | uprobe_buffer_put(ucb); | ||
| 1072 | } | 1114 | } |
| 1073 | 1115 | ||
| 1074 | /* uprobe profile handler */ | 1116 | /* uprobe profile handler */ |
| 1075 | static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs) | 1117 | static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs, |
| 1118 | struct uprobe_cpu_buffer *ucb, int dsize) | ||
| 1076 | { | 1119 | { |
| 1077 | if (!uprobe_perf_filter(&tu->consumer, 0, current->mm)) | 1120 | if (!uprobe_perf_filter(&tu->consumer, 0, current->mm)) |
| 1078 | return UPROBE_HANDLER_REMOVE; | 1121 | return UPROBE_HANDLER_REMOVE; |
| 1079 | 1122 | ||
| 1080 | if (!is_ret_probe(tu)) | 1123 | if (!is_ret_probe(tu)) |
| 1081 | uprobe_perf_print(tu, 0, regs); | 1124 | __uprobe_perf_func(tu, 0, regs, ucb, dsize); |
| 1082 | return 0; | 1125 | return 0; |
| 1083 | } | 1126 | } |
| 1084 | 1127 | ||
| 1085 | static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func, | 1128 | static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func, |
| 1086 | struct pt_regs *regs) | 1129 | struct pt_regs *regs, |
| 1130 | struct uprobe_cpu_buffer *ucb, int dsize) | ||
| 1087 | { | 1131 | { |
| 1088 | uprobe_perf_print(tu, func, regs); | 1132 | __uprobe_perf_func(tu, func, regs, ucb, dsize); |
| 1089 | } | 1133 | } |
| 1090 | #endif /* CONFIG_PERF_EVENTS */ | 1134 | #endif /* CONFIG_PERF_EVENTS */ |
| 1091 | 1135 | ||
| 1092 | static | 1136 | static int |
| 1093 | int trace_uprobe_register(struct ftrace_event_call *event, enum trace_reg type, void *data) | 1137 | trace_uprobe_register(struct ftrace_event_call *event, enum trace_reg type, |
| 1138 | void *data) | ||
| 1094 | { | 1139 | { |
| 1095 | struct trace_uprobe *tu = event->data; | 1140 | struct trace_uprobe *tu = event->data; |
| 1141 | struct ftrace_event_file *file = data; | ||
| 1096 | 1142 | ||
| 1097 | switch (type) { | 1143 | switch (type) { |
| 1098 | case TRACE_REG_REGISTER: | 1144 | case TRACE_REG_REGISTER: |
| 1099 | return probe_event_enable(tu, TP_FLAG_TRACE, NULL); | 1145 | return probe_event_enable(tu, file, NULL); |
| 1100 | 1146 | ||
| 1101 | case TRACE_REG_UNREGISTER: | 1147 | case TRACE_REG_UNREGISTER: |
| 1102 | probe_event_disable(tu, TP_FLAG_TRACE); | 1148 | probe_event_disable(tu, file); |
| 1103 | return 0; | 1149 | return 0; |
| 1104 | 1150 | ||
| 1105 | #ifdef CONFIG_PERF_EVENTS | 1151 | #ifdef CONFIG_PERF_EVENTS |
| 1106 | case TRACE_REG_PERF_REGISTER: | 1152 | case TRACE_REG_PERF_REGISTER: |
| 1107 | return probe_event_enable(tu, TP_FLAG_PROFILE, uprobe_perf_filter); | 1153 | return probe_event_enable(tu, NULL, uprobe_perf_filter); |
| 1108 | 1154 | ||
| 1109 | case TRACE_REG_PERF_UNREGISTER: | 1155 | case TRACE_REG_PERF_UNREGISTER: |
| 1110 | probe_event_disable(tu, TP_FLAG_PROFILE); | 1156 | probe_event_disable(tu, NULL); |
| 1111 | return 0; | 1157 | return 0; |
| 1112 | 1158 | ||
| 1113 | case TRACE_REG_PERF_OPEN: | 1159 | case TRACE_REG_PERF_OPEN: |
| @@ -1127,8 +1173,11 @@ static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs) | |||
| 1127 | { | 1173 | { |
| 1128 | struct trace_uprobe *tu; | 1174 | struct trace_uprobe *tu; |
| 1129 | struct uprobe_dispatch_data udd; | 1175 | struct uprobe_dispatch_data udd; |
| 1176 | struct uprobe_cpu_buffer *ucb; | ||
| 1177 | int dsize, esize; | ||
| 1130 | int ret = 0; | 1178 | int ret = 0; |
| 1131 | 1179 | ||
| 1180 | |||
| 1132 | tu = container_of(con, struct trace_uprobe, consumer); | 1181 | tu = container_of(con, struct trace_uprobe, consumer); |
| 1133 | tu->nhit++; | 1182 | tu->nhit++; |
| 1134 | 1183 | ||
| @@ -1137,13 +1186,29 @@ static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs) | |||
| 1137 | 1186 | ||
| 1138 | current->utask->vaddr = (unsigned long) &udd; | 1187 | current->utask->vaddr = (unsigned long) &udd; |
| 1139 | 1188 | ||
| 1189 | #ifdef CONFIG_PERF_EVENTS | ||
| 1190 | if ((tu->tp.flags & TP_FLAG_TRACE) == 0 && | ||
| 1191 | !uprobe_perf_filter(&tu->consumer, 0, current->mm)) | ||
| 1192 | return UPROBE_HANDLER_REMOVE; | ||
| 1193 | #endif | ||
| 1194 | |||
| 1195 | if (WARN_ON_ONCE(!uprobe_cpu_buffer)) | ||
| 1196 | return 0; | ||
| 1197 | |||
| 1198 | dsize = __get_data_size(&tu->tp, regs); | ||
| 1199 | esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); | ||
| 1200 | |||
| 1201 | ucb = uprobe_buffer_get(); | ||
| 1202 | store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize); | ||
| 1203 | |||
| 1140 | if (tu->tp.flags & TP_FLAG_TRACE) | 1204 | if (tu->tp.flags & TP_FLAG_TRACE) |
| 1141 | ret |= uprobe_trace_func(tu, regs); | 1205 | ret |= uprobe_trace_func(tu, regs, ucb, dsize); |
| 1142 | 1206 | ||
| 1143 | #ifdef CONFIG_PERF_EVENTS | 1207 | #ifdef CONFIG_PERF_EVENTS |
| 1144 | if (tu->tp.flags & TP_FLAG_PROFILE) | 1208 | if (tu->tp.flags & TP_FLAG_PROFILE) |
| 1145 | ret |= uprobe_perf_func(tu, regs); | 1209 | ret |= uprobe_perf_func(tu, regs, ucb, dsize); |
| 1146 | #endif | 1210 | #endif |
| 1211 | uprobe_buffer_put(ucb); | ||
| 1147 | return ret; | 1212 | return ret; |
| 1148 | } | 1213 | } |
| 1149 | 1214 | ||
| @@ -1152,6 +1217,8 @@ static int uretprobe_dispatcher(struct uprobe_consumer *con, | |||
| 1152 | { | 1217 | { |
| 1153 | struct trace_uprobe *tu; | 1218 | struct trace_uprobe *tu; |
| 1154 | struct uprobe_dispatch_data udd; | 1219 | struct uprobe_dispatch_data udd; |
| 1220 | struct uprobe_cpu_buffer *ucb; | ||
| 1221 | int dsize, esize; | ||
| 1155 | 1222 | ||
| 1156 | tu = container_of(con, struct trace_uprobe, consumer); | 1223 | tu = container_of(con, struct trace_uprobe, consumer); |
| 1157 | 1224 | ||
| @@ -1160,13 +1227,23 @@ static int uretprobe_dispatcher(struct uprobe_consumer *con, | |||
| 1160 | 1227 | ||
| 1161 | current->utask->vaddr = (unsigned long) &udd; | 1228 | current->utask->vaddr = (unsigned long) &udd; |
| 1162 | 1229 | ||
| 1230 | if (WARN_ON_ONCE(!uprobe_cpu_buffer)) | ||
| 1231 | return 0; | ||
| 1232 | |||
| 1233 | dsize = __get_data_size(&tu->tp, regs); | ||
| 1234 | esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); | ||
| 1235 | |||
| 1236 | ucb = uprobe_buffer_get(); | ||
| 1237 | store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize); | ||
| 1238 | |||
| 1163 | if (tu->tp.flags & TP_FLAG_TRACE) | 1239 | if (tu->tp.flags & TP_FLAG_TRACE) |
| 1164 | uretprobe_trace_func(tu, func, regs); | 1240 | uretprobe_trace_func(tu, func, regs, ucb, dsize); |
| 1165 | 1241 | ||
| 1166 | #ifdef CONFIG_PERF_EVENTS | 1242 | #ifdef CONFIG_PERF_EVENTS |
| 1167 | if (tu->tp.flags & TP_FLAG_PROFILE) | 1243 | if (tu->tp.flags & TP_FLAG_PROFILE) |
| 1168 | uretprobe_perf_func(tu, func, regs); | 1244 | uretprobe_perf_func(tu, func, regs, ucb, dsize); |
| 1169 | #endif | 1245 | #endif |
| 1246 | uprobe_buffer_put(ucb); | ||
| 1170 | return 0; | 1247 | return 0; |
| 1171 | } | 1248 | } |
| 1172 | 1249 | ||
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c index 031cc5655a51..fb0a38a26555 100644 --- a/kernel/tracepoint.c +++ b/kernel/tracepoint.c | |||
| @@ -62,14 +62,12 @@ struct tracepoint_entry { | |||
| 62 | struct hlist_node hlist; | 62 | struct hlist_node hlist; |
| 63 | struct tracepoint_func *funcs; | 63 | struct tracepoint_func *funcs; |
| 64 | int refcount; /* Number of times armed. 0 if disarmed. */ | 64 | int refcount; /* Number of times armed. 0 if disarmed. */ |
| 65 | int enabled; /* Tracepoint enabled */ | ||
| 65 | char name[0]; | 66 | char name[0]; |
| 66 | }; | 67 | }; |
| 67 | 68 | ||
| 68 | struct tp_probes { | 69 | struct tp_probes { |
| 69 | union { | 70 | struct rcu_head rcu; |
| 70 | struct rcu_head rcu; | ||
| 71 | struct list_head list; | ||
| 72 | } u; | ||
| 73 | struct tracepoint_func probes[0]; | 71 | struct tracepoint_func probes[0]; |
| 74 | }; | 72 | }; |
| 75 | 73 | ||
| @@ -82,7 +80,7 @@ static inline void *allocate_probes(int count) | |||
| 82 | 80 | ||
| 83 | static void rcu_free_old_probes(struct rcu_head *head) | 81 | static void rcu_free_old_probes(struct rcu_head *head) |
| 84 | { | 82 | { |
| 85 | kfree(container_of(head, struct tp_probes, u.rcu)); | 83 | kfree(container_of(head, struct tp_probes, rcu)); |
| 86 | } | 84 | } |
| 87 | 85 | ||
| 88 | static inline void release_probes(struct tracepoint_func *old) | 86 | static inline void release_probes(struct tracepoint_func *old) |
| @@ -90,7 +88,7 @@ static inline void release_probes(struct tracepoint_func *old) | |||
| 90 | if (old) { | 88 | if (old) { |
| 91 | struct tp_probes *tp_probes = container_of(old, | 89 | struct tp_probes *tp_probes = container_of(old, |
| 92 | struct tp_probes, probes[0]); | 90 | struct tp_probes, probes[0]); |
| 93 | call_rcu_sched(&tp_probes->u.rcu, rcu_free_old_probes); | 91 | call_rcu_sched(&tp_probes->rcu, rcu_free_old_probes); |
| 94 | } | 92 | } |
| 95 | } | 93 | } |
| 96 | 94 | ||
| @@ -237,6 +235,7 @@ static struct tracepoint_entry *add_tracepoint(const char *name) | |||
| 237 | memcpy(&e->name[0], name, name_len); | 235 | memcpy(&e->name[0], name, name_len); |
| 238 | e->funcs = NULL; | 236 | e->funcs = NULL; |
| 239 | e->refcount = 0; | 237 | e->refcount = 0; |
| 238 | e->enabled = 0; | ||
| 240 | hlist_add_head(&e->hlist, head); | 239 | hlist_add_head(&e->hlist, head); |
| 241 | return e; | 240 | return e; |
| 242 | } | 241 | } |
| @@ -316,6 +315,7 @@ static void tracepoint_update_probe_range(struct tracepoint * const *begin, | |||
| 316 | if (mark_entry) { | 315 | if (mark_entry) { |
| 317 | set_tracepoint(&mark_entry, *iter, | 316 | set_tracepoint(&mark_entry, *iter, |
| 318 | !!mark_entry->refcount); | 317 | !!mark_entry->refcount); |
| 318 | mark_entry->enabled = !!mark_entry->refcount; | ||
| 319 | } else { | 319 | } else { |
| 320 | disable_tracepoint(*iter); | 320 | disable_tracepoint(*iter); |
| 321 | } | 321 | } |
| @@ -373,13 +373,26 @@ tracepoint_add_probe(const char *name, void *probe, void *data) | |||
| 373 | * tracepoint_probe_register - Connect a probe to a tracepoint | 373 | * tracepoint_probe_register - Connect a probe to a tracepoint |
| 374 | * @name: tracepoint name | 374 | * @name: tracepoint name |
| 375 | * @probe: probe handler | 375 | * @probe: probe handler |
| 376 | * @data: probe private data | ||
| 377 | * | ||
| 378 | * Returns: | ||
| 379 | * - 0 if the probe was successfully registered, and tracepoint | ||
| 380 | * callsites are currently loaded for that probe, | ||
| 381 | * - -ENODEV if the probe was successfully registered, but no tracepoint | ||
| 382 | * callsite is currently loaded for that probe, | ||
| 383 | * - other negative error value on error. | ||
| 384 | * | ||
| 385 | * When tracepoint_probe_register() returns either 0 or -ENODEV, | ||
| 386 | * parameters @name, @probe, and @data may be used by the tracepoint | ||
| 387 | * infrastructure until the probe is unregistered. | ||
| 376 | * | 388 | * |
| 377 | * Returns 0 if ok, error value on error. | ||
| 378 | * The probe address must at least be aligned on the architecture pointer size. | 389 | * The probe address must at least be aligned on the architecture pointer size. |
| 379 | */ | 390 | */ |
| 380 | int tracepoint_probe_register(const char *name, void *probe, void *data) | 391 | int tracepoint_probe_register(const char *name, void *probe, void *data) |
| 381 | { | 392 | { |
| 382 | struct tracepoint_func *old; | 393 | struct tracepoint_func *old; |
| 394 | struct tracepoint_entry *entry; | ||
| 395 | int ret = 0; | ||
| 383 | 396 | ||
| 384 | mutex_lock(&tracepoints_mutex); | 397 | mutex_lock(&tracepoints_mutex); |
| 385 | old = tracepoint_add_probe(name, probe, data); | 398 | old = tracepoint_add_probe(name, probe, data); |
| @@ -388,9 +401,13 @@ int tracepoint_probe_register(const char *name, void *probe, void *data) | |||
| 388 | return PTR_ERR(old); | 401 | return PTR_ERR(old); |
| 389 | } | 402 | } |
| 390 | tracepoint_update_probes(); /* may update entry */ | 403 | tracepoint_update_probes(); /* may update entry */ |
| 404 | entry = get_tracepoint(name); | ||
| 405 | /* Make sure the entry was enabled */ | ||
| 406 | if (!entry || !entry->enabled) | ||
| 407 | ret = -ENODEV; | ||
| 391 | mutex_unlock(&tracepoints_mutex); | 408 | mutex_unlock(&tracepoints_mutex); |
| 392 | release_probes(old); | 409 | release_probes(old); |
| 393 | return 0; | 410 | return ret; |
| 394 | } | 411 | } |
| 395 | EXPORT_SYMBOL_GPL(tracepoint_probe_register); | 412 | EXPORT_SYMBOL_GPL(tracepoint_probe_register); |
| 396 | 413 | ||
| @@ -415,6 +432,7 @@ tracepoint_remove_probe(const char *name, void *probe, void *data) | |||
| 415 | * tracepoint_probe_unregister - Disconnect a probe from a tracepoint | 432 | * tracepoint_probe_unregister - Disconnect a probe from a tracepoint |
| 416 | * @name: tracepoint name | 433 | * @name: tracepoint name |
| 417 | * @probe: probe function pointer | 434 | * @probe: probe function pointer |
| 435 | * @data: probe private data | ||
| 418 | * | 436 | * |
| 419 | * We do not need to call a synchronize_sched to make sure the probes have | 437 | * We do not need to call a synchronize_sched to make sure the probes have |
| 420 | * finished running before doing a module unload, because the module unload | 438 | * finished running before doing a module unload, because the module unload |
| @@ -438,213 +456,26 @@ int tracepoint_probe_unregister(const char *name, void *probe, void *data) | |||
| 438 | } | 456 | } |
| 439 | EXPORT_SYMBOL_GPL(tracepoint_probe_unregister); | 457 | EXPORT_SYMBOL_GPL(tracepoint_probe_unregister); |
| 440 | 458 | ||
| 441 | static LIST_HEAD(old_probes); | ||
| 442 | static int need_update; | ||
| 443 | |||
| 444 | static void tracepoint_add_old_probes(void *old) | ||
| 445 | { | ||
| 446 | need_update = 1; | ||
| 447 | if (old) { | ||
| 448 | struct tp_probes *tp_probes = container_of(old, | ||
| 449 | struct tp_probes, probes[0]); | ||
| 450 | list_add(&tp_probes->u.list, &old_probes); | ||
| 451 | } | ||
| 452 | } | ||
| 453 | |||
| 454 | /** | ||
| 455 | * tracepoint_probe_register_noupdate - register a probe but not connect | ||
| 456 | * @name: tracepoint name | ||
| 457 | * @probe: probe handler | ||
| 458 | * | ||
| 459 | * caller must call tracepoint_probe_update_all() | ||
| 460 | */ | ||
| 461 | int tracepoint_probe_register_noupdate(const char *name, void *probe, | ||
| 462 | void *data) | ||
| 463 | { | ||
| 464 | struct tracepoint_func *old; | ||
| 465 | |||
| 466 | mutex_lock(&tracepoints_mutex); | ||
| 467 | old = tracepoint_add_probe(name, probe, data); | ||
| 468 | if (IS_ERR(old)) { | ||
| 469 | mutex_unlock(&tracepoints_mutex); | ||
| 470 | return PTR_ERR(old); | ||
| 471 | } | ||
| 472 | tracepoint_add_old_probes(old); | ||
| 473 | mutex_unlock(&tracepoints_mutex); | ||
| 474 | return 0; | ||
| 475 | } | ||
| 476 | EXPORT_SYMBOL_GPL(tracepoint_probe_register_noupdate); | ||
| 477 | |||
| 478 | /** | ||
| 479 | * tracepoint_probe_unregister_noupdate - remove a probe but not disconnect | ||
| 480 | * @name: tracepoint name | ||
| 481 | * @probe: probe function pointer | ||
| 482 | * | ||
| 483 | * caller must call tracepoint_probe_update_all() | ||
| 484 | */ | ||
| 485 | int tracepoint_probe_unregister_noupdate(const char *name, void *probe, | ||
| 486 | void *data) | ||
| 487 | { | ||
| 488 | struct tracepoint_func *old; | ||
| 489 | |||
| 490 | mutex_lock(&tracepoints_mutex); | ||
| 491 | old = tracepoint_remove_probe(name, probe, data); | ||
| 492 | if (IS_ERR(old)) { | ||
| 493 | mutex_unlock(&tracepoints_mutex); | ||
| 494 | return PTR_ERR(old); | ||
| 495 | } | ||
| 496 | tracepoint_add_old_probes(old); | ||
| 497 | mutex_unlock(&tracepoints_mutex); | ||
| 498 | return 0; | ||
| 499 | } | ||
| 500 | EXPORT_SYMBOL_GPL(tracepoint_probe_unregister_noupdate); | ||
| 501 | |||
| 502 | /** | ||
| 503 | * tracepoint_probe_update_all - update tracepoints | ||
| 504 | */ | ||
| 505 | void tracepoint_probe_update_all(void) | ||
| 506 | { | ||
| 507 | LIST_HEAD(release_probes); | ||
| 508 | struct tp_probes *pos, *next; | ||
| 509 | |||
| 510 | mutex_lock(&tracepoints_mutex); | ||
| 511 | if (!need_update) { | ||
| 512 | mutex_unlock(&tracepoints_mutex); | ||
| 513 | return; | ||
| 514 | } | ||
| 515 | if (!list_empty(&old_probes)) | ||
| 516 | list_replace_init(&old_probes, &release_probes); | ||
| 517 | need_update = 0; | ||
| 518 | tracepoint_update_probes(); | ||
| 519 | mutex_unlock(&tracepoints_mutex); | ||
| 520 | list_for_each_entry_safe(pos, next, &release_probes, u.list) { | ||
| 521 | list_del(&pos->u.list); | ||
| 522 | call_rcu_sched(&pos->u.rcu, rcu_free_old_probes); | ||
| 523 | } | ||
| 524 | } | ||
| 525 | EXPORT_SYMBOL_GPL(tracepoint_probe_update_all); | ||
| 526 | |||
| 527 | /** | ||
| 528 | * tracepoint_get_iter_range - Get a next tracepoint iterator given a range. | ||
| 529 | * @tracepoint: current tracepoints (in), next tracepoint (out) | ||
| 530 | * @begin: beginning of the range | ||
| 531 | * @end: end of the range | ||
| 532 | * | ||
| 533 | * Returns whether a next tracepoint has been found (1) or not (0). | ||
| 534 | * Will return the first tracepoint in the range if the input tracepoint is | ||
| 535 | * NULL. | ||
| 536 | */ | ||
| 537 | static int tracepoint_get_iter_range(struct tracepoint * const **tracepoint, | ||
| 538 | struct tracepoint * const *begin, struct tracepoint * const *end) | ||
| 539 | { | ||
| 540 | if (!*tracepoint && begin != end) { | ||
| 541 | *tracepoint = begin; | ||
| 542 | return 1; | ||
| 543 | } | ||
| 544 | if (*tracepoint >= begin && *tracepoint < end) | ||
| 545 | return 1; | ||
| 546 | return 0; | ||
| 547 | } | ||
| 548 | |||
| 549 | #ifdef CONFIG_MODULES | ||
| 550 | static void tracepoint_get_iter(struct tracepoint_iter *iter) | ||
| 551 | { | ||
| 552 | int found = 0; | ||
| 553 | struct tp_module *iter_mod; | ||
| 554 | |||
| 555 | /* Core kernel tracepoints */ | ||
| 556 | if (!iter->module) { | ||
| 557 | found = tracepoint_get_iter_range(&iter->tracepoint, | ||
| 558 | __start___tracepoints_ptrs, | ||
| 559 | __stop___tracepoints_ptrs); | ||
| 560 | if (found) | ||
| 561 | goto end; | ||
| 562 | } | ||
| 563 | /* Tracepoints in modules */ | ||
| 564 | mutex_lock(&tracepoints_mutex); | ||
| 565 | list_for_each_entry(iter_mod, &tracepoint_module_list, list) { | ||
| 566 | /* | ||
| 567 | * Sorted module list | ||
| 568 | */ | ||
| 569 | if (iter_mod < iter->module) | ||
| 570 | continue; | ||
| 571 | else if (iter_mod > iter->module) | ||
| 572 | iter->tracepoint = NULL; | ||
| 573 | found = tracepoint_get_iter_range(&iter->tracepoint, | ||
| 574 | iter_mod->tracepoints_ptrs, | ||
| 575 | iter_mod->tracepoints_ptrs | ||
| 576 | + iter_mod->num_tracepoints); | ||
| 577 | if (found) { | ||
| 578 | iter->module = iter_mod; | ||
| 579 | break; | ||
| 580 | } | ||
| 581 | } | ||
| 582 | mutex_unlock(&tracepoints_mutex); | ||
| 583 | end: | ||
| 584 | if (!found) | ||
| 585 | tracepoint_iter_reset(iter); | ||
| 586 | } | ||
| 587 | #else /* CONFIG_MODULES */ | ||
| 588 | static void tracepoint_get_iter(struct tracepoint_iter *iter) | ||
| 589 | { | ||
| 590 | int found = 0; | ||
| 591 | |||
| 592 | /* Core kernel tracepoints */ | ||
| 593 | found = tracepoint_get_iter_range(&iter->tracepoint, | ||
| 594 | __start___tracepoints_ptrs, | ||
| 595 | __stop___tracepoints_ptrs); | ||
| 596 | if (!found) | ||
| 597 | tracepoint_iter_reset(iter); | ||
| 598 | } | ||
| 599 | #endif /* CONFIG_MODULES */ | ||
| 600 | |||
| 601 | void tracepoint_iter_start(struct tracepoint_iter *iter) | ||
| 602 | { | ||
| 603 | tracepoint_get_iter(iter); | ||
| 604 | } | ||
| 605 | EXPORT_SYMBOL_GPL(tracepoint_iter_start); | ||
| 606 | |||
| 607 | void tracepoint_iter_next(struct tracepoint_iter *iter) | ||
| 608 | { | ||
| 609 | iter->tracepoint++; | ||
| 610 | /* | ||
| 611 | * iter->tracepoint may be invalid because we blindly incremented it. | ||
| 612 | * Make sure it is valid by marshalling on the tracepoints, getting the | ||
| 613 | * tracepoints from following modules if necessary. | ||
| 614 | */ | ||
| 615 | tracepoint_get_iter(iter); | ||
| 616 | } | ||
| 617 | EXPORT_SYMBOL_GPL(tracepoint_iter_next); | ||
| 618 | |||
| 619 | void tracepoint_iter_stop(struct tracepoint_iter *iter) | ||
| 620 | { | ||
| 621 | } | ||
| 622 | EXPORT_SYMBOL_GPL(tracepoint_iter_stop); | ||
| 623 | |||
| 624 | void tracepoint_iter_reset(struct tracepoint_iter *iter) | ||
| 625 | { | ||
| 626 | #ifdef CONFIG_MODULES | ||
| 627 | iter->module = NULL; | ||
| 628 | #endif /* CONFIG_MODULES */ | ||
| 629 | iter->tracepoint = NULL; | ||
| 630 | } | ||
| 631 | EXPORT_SYMBOL_GPL(tracepoint_iter_reset); | ||
| 632 | 459 | ||
| 633 | #ifdef CONFIG_MODULES | 460 | #ifdef CONFIG_MODULES |
| 634 | bool trace_module_has_bad_taint(struct module *mod) | 461 | bool trace_module_has_bad_taint(struct module *mod) |
| 635 | { | 462 | { |
| 636 | return mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP)); | 463 | return mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP) | |
| 464 | (1 << TAINT_UNSIGNED_MODULE)); | ||
| 637 | } | 465 | } |
| 638 | 466 | ||
| 639 | static int tracepoint_module_coming(struct module *mod) | 467 | static int tracepoint_module_coming(struct module *mod) |
| 640 | { | 468 | { |
| 641 | struct tp_module *tp_mod, *iter; | 469 | struct tp_module *tp_mod; |
| 642 | int ret = 0; | 470 | int ret = 0; |
| 643 | 471 | ||
| 472 | if (!mod->num_tracepoints) | ||
| 473 | return 0; | ||
| 474 | |||
| 644 | /* | 475 | /* |
| 645 | * We skip modules that taint the kernel, especially those with different | 476 | * We skip modules that taint the kernel, especially those with different |
| 646 | * module headers (for forced load), to make sure we don't cause a crash. | 477 | * module headers (for forced load), to make sure we don't cause a crash. |
| 647 | * Staging and out-of-tree GPL modules are fine. | 478 | * Staging, out-of-tree, and unsigned GPL modules are fine. |
| 648 | */ | 479 | */ |
| 649 | if (trace_module_has_bad_taint(mod)) | 480 | if (trace_module_has_bad_taint(mod)) |
| 650 | return 0; | 481 | return 0; |
| @@ -656,23 +487,7 @@ static int tracepoint_module_coming(struct module *mod) | |||
| 656 | } | 487 | } |
| 657 | tp_mod->num_tracepoints = mod->num_tracepoints; | 488 | tp_mod->num_tracepoints = mod->num_tracepoints; |
| 658 | tp_mod->tracepoints_ptrs = mod->tracepoints_ptrs; | 489 | tp_mod->tracepoints_ptrs = mod->tracepoints_ptrs; |
| 659 | 490 | list_add_tail(&tp_mod->list, &tracepoint_module_list); | |
| 660 | /* | ||
| 661 | * tracepoint_module_list is kept sorted by struct module pointer | ||
| 662 | * address for iteration on tracepoints from a seq_file that can release | ||
| 663 | * the mutex between calls. | ||
| 664 | */ | ||
| 665 | list_for_each_entry_reverse(iter, &tracepoint_module_list, list) { | ||
| 666 | BUG_ON(iter == tp_mod); /* Should never be in the list twice */ | ||
| 667 | if (iter < tp_mod) { | ||
| 668 | /* We belong to the location right after iter. */ | ||
| 669 | list_add(&tp_mod->list, &iter->list); | ||
| 670 | goto module_added; | ||
| 671 | } | ||
| 672 | } | ||
| 673 | /* We belong to the beginning of the list */ | ||
| 674 | list_add(&tp_mod->list, &tracepoint_module_list); | ||
| 675 | module_added: | ||
| 676 | tracepoint_update_probe_range(mod->tracepoints_ptrs, | 491 | tracepoint_update_probe_range(mod->tracepoints_ptrs, |
| 677 | mod->tracepoints_ptrs + mod->num_tracepoints); | 492 | mod->tracepoints_ptrs + mod->num_tracepoints); |
| 678 | end: | 493 | end: |
| @@ -684,6 +499,9 @@ static int tracepoint_module_going(struct module *mod) | |||
| 684 | { | 499 | { |
| 685 | struct tp_module *pos; | 500 | struct tp_module *pos; |
| 686 | 501 | ||
| 502 | if (!mod->num_tracepoints) | ||
| 503 | return 0; | ||
| 504 | |||
| 687 | mutex_lock(&tracepoints_mutex); | 505 | mutex_lock(&tracepoints_mutex); |
| 688 | tracepoint_update_probe_range(mod->tracepoints_ptrs, | 506 | tracepoint_update_probe_range(mod->tracepoints_ptrs, |
| 689 | mod->tracepoints_ptrs + mod->num_tracepoints); | 507 | mod->tracepoints_ptrs + mod->num_tracepoints); |
diff --git a/kernel/up.c b/kernel/up.c index 509403e3fbc6..1760bf3d1463 100644 --- a/kernel/up.c +++ b/kernel/up.c | |||
| @@ -22,16 +22,16 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info, | |||
| 22 | } | 22 | } |
| 23 | EXPORT_SYMBOL(smp_call_function_single); | 23 | EXPORT_SYMBOL(smp_call_function_single); |
| 24 | 24 | ||
| 25 | void __smp_call_function_single(int cpu, struct call_single_data *csd, | 25 | int smp_call_function_single_async(int cpu, struct call_single_data *csd) |
| 26 | int wait) | ||
| 27 | { | 26 | { |
| 28 | unsigned long flags; | 27 | unsigned long flags; |
| 29 | 28 | ||
| 30 | local_irq_save(flags); | 29 | local_irq_save(flags); |
| 31 | csd->func(csd->info); | 30 | csd->func(csd->info); |
| 32 | local_irq_restore(flags); | 31 | local_irq_restore(flags); |
| 32 | return 0; | ||
| 33 | } | 33 | } |
| 34 | EXPORT_SYMBOL(__smp_call_function_single); | 34 | EXPORT_SYMBOL(smp_call_function_single_async); |
| 35 | 35 | ||
| 36 | int on_each_cpu(smp_call_func_t func, void *info, int wait) | 36 | int on_each_cpu(smp_call_func_t func, void *info, int wait) |
| 37 | { | 37 | { |
diff --git a/kernel/user.c b/kernel/user.c index c006131beb77..294fc6a94168 100644 --- a/kernel/user.c +++ b/kernel/user.c | |||
| @@ -222,5 +222,4 @@ static int __init uid_cache_init(void) | |||
| 222 | 222 | ||
| 223 | return 0; | 223 | return 0; |
| 224 | } | 224 | } |
| 225 | 225 | subsys_initcall(uid_cache_init); | |
| 226 | module_init(uid_cache_init); | ||
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c index dd06439b9c84..0d8f6023fd8d 100644 --- a/kernel/user_namespace.c +++ b/kernel/user_namespace.c | |||
| @@ -902,4 +902,4 @@ static __init int user_namespaces_init(void) | |||
| 902 | user_ns_cachep = KMEM_CACHE(user_namespace, SLAB_PANIC); | 902 | user_ns_cachep = KMEM_CACHE(user_namespace, SLAB_PANIC); |
| 903 | return 0; | 903 | return 0; |
| 904 | } | 904 | } |
| 905 | module_init(user_namespaces_init); | 905 | subsys_initcall(user_namespaces_init); |
diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 4431610f049a..e90089fd78e0 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c | |||
| @@ -158,14 +158,14 @@ void touch_all_softlockup_watchdogs(void) | |||
| 158 | #ifdef CONFIG_HARDLOCKUP_DETECTOR | 158 | #ifdef CONFIG_HARDLOCKUP_DETECTOR |
| 159 | void touch_nmi_watchdog(void) | 159 | void touch_nmi_watchdog(void) |
| 160 | { | 160 | { |
| 161 | if (watchdog_user_enabled) { | 161 | /* |
| 162 | unsigned cpu; | 162 | * Using __raw here because some code paths have |
| 163 | 163 | * preemption enabled. If preemption is enabled | |
| 164 | for_each_present_cpu(cpu) { | 164 | * then interrupts should be enabled too, in which |
| 165 | if (per_cpu(watchdog_nmi_touch, cpu) != true) | 165 | * case we shouldn't have to worry about the watchdog |
| 166 | per_cpu(watchdog_nmi_touch, cpu) = true; | 166 | * going off. |
| 167 | } | 167 | */ |
| 168 | } | 168 | __raw_get_cpu_var(watchdog_nmi_touch) = true; |
| 169 | touch_softlockup_watchdog(); | 169 | touch_softlockup_watchdog(); |
| 170 | } | 170 | } |
| 171 | EXPORT_SYMBOL(touch_nmi_watchdog); | 171 | EXPORT_SYMBOL(touch_nmi_watchdog); |
| @@ -505,7 +505,6 @@ static void restart_watchdog_hrtimer(void *info) | |||
| 505 | 505 | ||
| 506 | static void update_timers(int cpu) | 506 | static void update_timers(int cpu) |
| 507 | { | 507 | { |
| 508 | struct call_single_data data = {.func = restart_watchdog_hrtimer}; | ||
| 509 | /* | 508 | /* |
| 510 | * Make sure that perf event counter will adopt to a new | 509 | * Make sure that perf event counter will adopt to a new |
| 511 | * sampling period. Updating the sampling period directly would | 510 | * sampling period. Updating the sampling period directly would |
| @@ -515,7 +514,7 @@ static void update_timers(int cpu) | |||
| 515 | * might be late already so we have to restart the timer as well. | 514 | * might be late already so we have to restart the timer as well. |
| 516 | */ | 515 | */ |
| 517 | watchdog_nmi_disable(cpu); | 516 | watchdog_nmi_disable(cpu); |
| 518 | __smp_call_function_single(cpu, &data, 1); | 517 | smp_call_function_single(cpu, restart_watchdog_hrtimer, NULL, 1); |
| 519 | watchdog_nmi_enable(cpu); | 518 | watchdog_nmi_enable(cpu); |
| 520 | } | 519 | } |
| 521 | 520 | ||
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 3fa5b8f3aae3..0ee63af30bd1 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
| @@ -516,6 +516,13 @@ void destroy_work_on_stack(struct work_struct *work) | |||
| 516 | } | 516 | } |
| 517 | EXPORT_SYMBOL_GPL(destroy_work_on_stack); | 517 | EXPORT_SYMBOL_GPL(destroy_work_on_stack); |
| 518 | 518 | ||
| 519 | void destroy_delayed_work_on_stack(struct delayed_work *work) | ||
| 520 | { | ||
| 521 | destroy_timer_on_stack(&work->timer); | ||
| 522 | debug_object_free(&work->work, &work_debug_descr); | ||
| 523 | } | ||
| 524 | EXPORT_SYMBOL_GPL(destroy_delayed_work_on_stack); | ||
| 525 | |||
| 519 | #else | 526 | #else |
| 520 | static inline void debug_work_activate(struct work_struct *work) { } | 527 | static inline void debug_work_activate(struct work_struct *work) { } |
| 521 | static inline void debug_work_deactivate(struct work_struct *work) { } | 528 | static inline void debug_work_deactivate(struct work_struct *work) { } |
