diff options
Diffstat (limited to 'kernel')
34 files changed, 468 insertions, 202 deletions
diff --git a/kernel/Makefile b/kernel/Makefile index 170a9213c1b6..e4791b3ba55d 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
| @@ -51,6 +51,7 @@ obj-$(CONFIG_UID16) += uid16.o | |||
| 51 | obj-$(CONFIG_MODULES) += module.o | 51 | obj-$(CONFIG_MODULES) += module.o |
| 52 | obj-$(CONFIG_KALLSYMS) += kallsyms.o | 52 | obj-$(CONFIG_KALLSYMS) += kallsyms.o |
| 53 | obj-$(CONFIG_PM) += power/ | 53 | obj-$(CONFIG_PM) += power/ |
| 54 | obj-$(CONFIG_FREEZER) += power/ | ||
| 54 | obj-$(CONFIG_BSD_PROCESS_ACCT) += acct.o | 55 | obj-$(CONFIG_BSD_PROCESS_ACCT) += acct.o |
| 55 | obj-$(CONFIG_KEXEC) += kexec.o | 56 | obj-$(CONFIG_KEXEC) += kexec.o |
| 56 | obj-$(CONFIG_BACKTRACE_SELF_TEST) += backtracetest.o | 57 | obj-$(CONFIG_BACKTRACE_SELF_TEST) += backtracetest.o |
diff --git a/kernel/async.c b/kernel/async.c index 67a2be71f517..f565891f2c9b 100644 --- a/kernel/async.c +++ b/kernel/async.c | |||
| @@ -54,6 +54,7 @@ asynchronous and synchronous parts of the kernel. | |||
| 54 | #include <linux/sched.h> | 54 | #include <linux/sched.h> |
| 55 | #include <linux/init.h> | 55 | #include <linux/init.h> |
| 56 | #include <linux/kthread.h> | 56 | #include <linux/kthread.h> |
| 57 | #include <linux/delay.h> | ||
| 57 | #include <asm/atomic.h> | 58 | #include <asm/atomic.h> |
| 58 | 59 | ||
| 59 | static async_cookie_t next_cookie = 1; | 60 | static async_cookie_t next_cookie = 1; |
| @@ -132,8 +133,7 @@ static void run_one_entry(void) | |||
| 132 | entry = list_first_entry(&async_pending, struct async_entry, list); | 133 | entry = list_first_entry(&async_pending, struct async_entry, list); |
| 133 | 134 | ||
| 134 | /* 2) move it to the running queue */ | 135 | /* 2) move it to the running queue */ |
| 135 | list_del(&entry->list); | 136 | list_move_tail(&entry->list, entry->running); |
| 136 | list_add_tail(&entry->list, &async_running); | ||
| 137 | spin_unlock_irqrestore(&async_lock, flags); | 137 | spin_unlock_irqrestore(&async_lock, flags); |
| 138 | 138 | ||
| 139 | /* 3) run it (and print duration)*/ | 139 | /* 3) run it (and print duration)*/ |
| @@ -208,18 +208,44 @@ static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct l | |||
| 208 | return newcookie; | 208 | return newcookie; |
| 209 | } | 209 | } |
| 210 | 210 | ||
| 211 | /** | ||
| 212 | * async_schedule - schedule a function for asynchronous execution | ||
| 213 | * @ptr: function to execute asynchronously | ||
| 214 | * @data: data pointer to pass to the function | ||
| 215 | * | ||
| 216 | * Returns an async_cookie_t that may be used for checkpointing later. | ||
| 217 | * Note: This function may be called from atomic or non-atomic contexts. | ||
| 218 | */ | ||
| 211 | async_cookie_t async_schedule(async_func_ptr *ptr, void *data) | 219 | async_cookie_t async_schedule(async_func_ptr *ptr, void *data) |
| 212 | { | 220 | { |
| 213 | return __async_schedule(ptr, data, &async_pending); | 221 | return __async_schedule(ptr, data, &async_running); |
| 214 | } | 222 | } |
| 215 | EXPORT_SYMBOL_GPL(async_schedule); | 223 | EXPORT_SYMBOL_GPL(async_schedule); |
| 216 | 224 | ||
| 217 | async_cookie_t async_schedule_special(async_func_ptr *ptr, void *data, struct list_head *running) | 225 | /** |
| 226 | * async_schedule_domain - schedule a function for asynchronous execution within a certain domain | ||
| 227 | * @ptr: function to execute asynchronously | ||
| 228 | * @data: data pointer to pass to the function | ||
| 229 | * @running: running list for the domain | ||
| 230 | * | ||
| 231 | * Returns an async_cookie_t that may be used for checkpointing later. | ||
| 232 | * @running may be used in the async_synchronize_*_domain() functions | ||
| 233 | * to wait within a certain synchronization domain rather than globally. | ||
| 234 | * A synchronization domain is specified via the running queue @running to use. | ||
| 235 | * Note: This function may be called from atomic or non-atomic contexts. | ||
| 236 | */ | ||
| 237 | async_cookie_t async_schedule_domain(async_func_ptr *ptr, void *data, | ||
| 238 | struct list_head *running) | ||
| 218 | { | 239 | { |
| 219 | return __async_schedule(ptr, data, running); | 240 | return __async_schedule(ptr, data, running); |
| 220 | } | 241 | } |
| 221 | EXPORT_SYMBOL_GPL(async_schedule_special); | 242 | EXPORT_SYMBOL_GPL(async_schedule_domain); |
| 222 | 243 | ||
| 244 | /** | ||
| 245 | * async_synchronize_full - synchronize all asynchronous function calls | ||
| 246 | * | ||
| 247 | * This function waits until all asynchronous function calls have been done. | ||
| 248 | */ | ||
| 223 | void async_synchronize_full(void) | 249 | void async_synchronize_full(void) |
| 224 | { | 250 | { |
| 225 | do { | 251 | do { |
| @@ -228,13 +254,30 @@ void async_synchronize_full(void) | |||
| 228 | } | 254 | } |
| 229 | EXPORT_SYMBOL_GPL(async_synchronize_full); | 255 | EXPORT_SYMBOL_GPL(async_synchronize_full); |
| 230 | 256 | ||
| 231 | void async_synchronize_full_special(struct list_head *list) | 257 | /** |
| 258 | * async_synchronize_full_domain - synchronize all asynchronous function within a certain domain | ||
| 259 | * @list: running list to synchronize on | ||
| 260 | * | ||
| 261 | * This function waits until all asynchronous function calls for the | ||
| 262 | * synchronization domain specified by the running list @list have been done. | ||
| 263 | */ | ||
| 264 | void async_synchronize_full_domain(struct list_head *list) | ||
| 232 | { | 265 | { |
| 233 | async_synchronize_cookie_special(next_cookie, list); | 266 | async_synchronize_cookie_domain(next_cookie, list); |
| 234 | } | 267 | } |
| 235 | EXPORT_SYMBOL_GPL(async_synchronize_full_special); | 268 | EXPORT_SYMBOL_GPL(async_synchronize_full_domain); |
| 236 | 269 | ||
| 237 | void async_synchronize_cookie_special(async_cookie_t cookie, struct list_head *running) | 270 | /** |
| 271 | * async_synchronize_cookie_domain - synchronize asynchronous function calls within a certain domain with cookie checkpointing | ||
| 272 | * @cookie: async_cookie_t to use as checkpoint | ||
| 273 | * @running: running list to synchronize on | ||
| 274 | * | ||
| 275 | * This function waits until all asynchronous function calls for the | ||
| 276 | * synchronization domain specified by the running list @list submitted | ||
| 277 | * prior to @cookie have been done. | ||
| 278 | */ | ||
| 279 | void async_synchronize_cookie_domain(async_cookie_t cookie, | ||
| 280 | struct list_head *running) | ||
| 238 | { | 281 | { |
| 239 | ktime_t starttime, delta, endtime; | 282 | ktime_t starttime, delta, endtime; |
| 240 | 283 | ||
| @@ -254,11 +297,18 @@ void async_synchronize_cookie_special(async_cookie_t cookie, struct list_head *r | |||
| 254 | (long long)ktime_to_ns(delta) >> 10); | 297 | (long long)ktime_to_ns(delta) >> 10); |
| 255 | } | 298 | } |
| 256 | } | 299 | } |
| 257 | EXPORT_SYMBOL_GPL(async_synchronize_cookie_special); | 300 | EXPORT_SYMBOL_GPL(async_synchronize_cookie_domain); |
| 258 | 301 | ||
| 302 | /** | ||
| 303 | * async_synchronize_cookie - synchronize asynchronous function calls with cookie checkpointing | ||
| 304 | * @cookie: async_cookie_t to use as checkpoint | ||
| 305 | * | ||
| 306 | * This function waits until all asynchronous function calls prior to @cookie | ||
| 307 | * have been done. | ||
| 308 | */ | ||
| 259 | void async_synchronize_cookie(async_cookie_t cookie) | 309 | void async_synchronize_cookie(async_cookie_t cookie) |
| 260 | { | 310 | { |
| 261 | async_synchronize_cookie_special(cookie, &async_running); | 311 | async_synchronize_cookie_domain(cookie, &async_running); |
| 262 | } | 312 | } |
| 263 | EXPORT_SYMBOL_GPL(async_synchronize_cookie); | 313 | EXPORT_SYMBOL_GPL(async_synchronize_cookie); |
| 264 | 314 | ||
| @@ -319,7 +369,11 @@ static int async_manager_thread(void *unused) | |||
| 319 | ec = atomic_read(&entry_count); | 369 | ec = atomic_read(&entry_count); |
| 320 | 370 | ||
| 321 | while (tc < ec && tc < MAX_THREADS) { | 371 | while (tc < ec && tc < MAX_THREADS) { |
| 322 | kthread_run(async_thread, NULL, "async/%i", tc); | 372 | if (IS_ERR(kthread_run(async_thread, NULL, "async/%i", |
| 373 | tc))) { | ||
| 374 | msleep(100); | ||
| 375 | continue; | ||
| 376 | } | ||
| 323 | atomic_inc(&thread_count); | 377 | atomic_inc(&thread_count); |
| 324 | tc++; | 378 | tc++; |
| 325 | } | 379 | } |
| @@ -334,7 +388,9 @@ static int async_manager_thread(void *unused) | |||
| 334 | static int __init async_init(void) | 388 | static int __init async_init(void) |
| 335 | { | 389 | { |
| 336 | if (async_enabled) | 390 | if (async_enabled) |
| 337 | kthread_run(async_manager_thread, NULL, "async/mgr"); | 391 | if (IS_ERR(kthread_run(async_manager_thread, NULL, |
| 392 | "async/mgr"))) | ||
| 393 | async_enabled = 0; | ||
| 338 | return 0; | 394 | return 0; |
| 339 | } | 395 | } |
| 340 | 396 | ||
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 5a54ff42874e..9edb5c4b79b4 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
| @@ -1122,8 +1122,8 @@ static void cgroup_kill_sb(struct super_block *sb) { | |||
| 1122 | 1122 | ||
| 1123 | mutex_unlock(&cgroup_mutex); | 1123 | mutex_unlock(&cgroup_mutex); |
| 1124 | 1124 | ||
| 1125 | kfree(root); | ||
| 1126 | kill_litter_super(sb); | 1125 | kill_litter_super(sb); |
| 1126 | kfree(root); | ||
| 1127 | } | 1127 | } |
| 1128 | 1128 | ||
| 1129 | static struct file_system_type cgroup_fs_type = { | 1129 | static struct file_system_type cgroup_fs_type = { |
| @@ -2351,7 +2351,7 @@ static void cgroup_lock_hierarchy(struct cgroupfs_root *root) | |||
| 2351 | for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { | 2351 | for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { |
| 2352 | struct cgroup_subsys *ss = subsys[i]; | 2352 | struct cgroup_subsys *ss = subsys[i]; |
| 2353 | if (ss->root == root) | 2353 | if (ss->root == root) |
| 2354 | mutex_lock_nested(&ss->hierarchy_mutex, i); | 2354 | mutex_lock(&ss->hierarchy_mutex); |
| 2355 | } | 2355 | } |
| 2356 | } | 2356 | } |
| 2357 | 2357 | ||
| @@ -2637,6 +2637,7 @@ static void __init cgroup_init_subsys(struct cgroup_subsys *ss) | |||
| 2637 | BUG_ON(!list_empty(&init_task.tasks)); | 2637 | BUG_ON(!list_empty(&init_task.tasks)); |
| 2638 | 2638 | ||
| 2639 | mutex_init(&ss->hierarchy_mutex); | 2639 | mutex_init(&ss->hierarchy_mutex); |
| 2640 | lockdep_set_class(&ss->hierarchy_mutex, &ss->subsys_key); | ||
| 2640 | ss->active = 1; | 2641 | ss->active = 1; |
| 2641 | } | 2642 | } |
| 2642 | 2643 | ||
diff --git a/kernel/fork.c b/kernel/fork.c index bb5fe56a7a9c..4854c2c4a82e 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
| @@ -1095,7 +1095,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
| 1095 | #ifdef CONFIG_DEBUG_MUTEXES | 1095 | #ifdef CONFIG_DEBUG_MUTEXES |
| 1096 | p->blocked_on = NULL; /* not blocked yet */ | 1096 | p->blocked_on = NULL; /* not blocked yet */ |
| 1097 | #endif | 1097 | #endif |
| 1098 | if (unlikely(ptrace_reparented(current))) | 1098 | if (unlikely(current->ptrace)) |
| 1099 | ptrace_fork(p, clone_flags); | 1099 | ptrace_fork(p, clone_flags); |
| 1100 | 1100 | ||
| 1101 | /* Perform scheduler related setup. Assign this task to a CPU. */ | 1101 | /* Perform scheduler related setup. Assign this task to a CPU. */ |
| @@ -1179,10 +1179,6 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
| 1179 | #endif | 1179 | #endif |
| 1180 | clear_all_latency_tracing(p); | 1180 | clear_all_latency_tracing(p); |
| 1181 | 1181 | ||
| 1182 | /* Our parent execution domain becomes current domain | ||
| 1183 | These must match for thread signalling to apply */ | ||
| 1184 | p->parent_exec_id = p->self_exec_id; | ||
| 1185 | |||
| 1186 | /* ok, now we should be set up.. */ | 1182 | /* ok, now we should be set up.. */ |
| 1187 | p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL); | 1183 | p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL); |
| 1188 | p->pdeath_signal = 0; | 1184 | p->pdeath_signal = 0; |
| @@ -1220,10 +1216,13 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
| 1220 | set_task_cpu(p, smp_processor_id()); | 1216 | set_task_cpu(p, smp_processor_id()); |
| 1221 | 1217 | ||
| 1222 | /* CLONE_PARENT re-uses the old parent */ | 1218 | /* CLONE_PARENT re-uses the old parent */ |
| 1223 | if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) | 1219 | if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) { |
| 1224 | p->real_parent = current->real_parent; | 1220 | p->real_parent = current->real_parent; |
| 1225 | else | 1221 | p->parent_exec_id = current->parent_exec_id; |
| 1222 | } else { | ||
| 1226 | p->real_parent = current; | 1223 | p->real_parent = current; |
| 1224 | p->parent_exec_id = current->self_exec_id; | ||
| 1225 | } | ||
| 1227 | 1226 | ||
| 1228 | spin_lock(¤t->sighand->siglock); | 1227 | spin_lock(¤t->sighand->siglock); |
| 1229 | 1228 | ||
diff --git a/kernel/futex.c b/kernel/futex.c index f89d373a9c6d..438701adce23 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
| @@ -1165,6 +1165,7 @@ static int futex_wait(u32 __user *uaddr, int fshared, | |||
| 1165 | u32 val, ktime_t *abs_time, u32 bitset, int clockrt) | 1165 | u32 val, ktime_t *abs_time, u32 bitset, int clockrt) |
| 1166 | { | 1166 | { |
| 1167 | struct task_struct *curr = current; | 1167 | struct task_struct *curr = current; |
| 1168 | struct restart_block *restart; | ||
| 1168 | DECLARE_WAITQUEUE(wait, curr); | 1169 | DECLARE_WAITQUEUE(wait, curr); |
| 1169 | struct futex_hash_bucket *hb; | 1170 | struct futex_hash_bucket *hb; |
| 1170 | struct futex_q q; | 1171 | struct futex_q q; |
| @@ -1216,11 +1217,13 @@ retry: | |||
| 1216 | 1217 | ||
| 1217 | if (!ret) | 1218 | if (!ret) |
| 1218 | goto retry; | 1219 | goto retry; |
| 1219 | return ret; | 1220 | goto out; |
| 1220 | } | 1221 | } |
| 1221 | ret = -EWOULDBLOCK; | 1222 | ret = -EWOULDBLOCK; |
| 1222 | if (uval != val) | 1223 | if (unlikely(uval != val)) { |
| 1223 | goto out_unlock_put_key; | 1224 | queue_unlock(&q, hb); |
| 1225 | goto out_put_key; | ||
| 1226 | } | ||
| 1224 | 1227 | ||
| 1225 | /* Only actually queue if *uaddr contained val. */ | 1228 | /* Only actually queue if *uaddr contained val. */ |
| 1226 | queue_me(&q, hb); | 1229 | queue_me(&q, hb); |
| @@ -1284,38 +1287,38 @@ retry: | |||
| 1284 | */ | 1287 | */ |
| 1285 | 1288 | ||
| 1286 | /* If we were woken (and unqueued), we succeeded, whatever. */ | 1289 | /* If we were woken (and unqueued), we succeeded, whatever. */ |
| 1290 | ret = 0; | ||
| 1287 | if (!unqueue_me(&q)) | 1291 | if (!unqueue_me(&q)) |
| 1288 | return 0; | 1292 | goto out_put_key; |
| 1293 | ret = -ETIMEDOUT; | ||
| 1289 | if (rem) | 1294 | if (rem) |
| 1290 | return -ETIMEDOUT; | 1295 | goto out_put_key; |
| 1291 | 1296 | ||
| 1292 | /* | 1297 | /* |
| 1293 | * We expect signal_pending(current), but another thread may | 1298 | * We expect signal_pending(current), but another thread may |
| 1294 | * have handled it for us already. | 1299 | * have handled it for us already. |
| 1295 | */ | 1300 | */ |
| 1301 | ret = -ERESTARTSYS; | ||
| 1296 | if (!abs_time) | 1302 | if (!abs_time) |
| 1297 | return -ERESTARTSYS; | 1303 | goto out_put_key; |
| 1298 | else { | ||
| 1299 | struct restart_block *restart; | ||
| 1300 | restart = ¤t_thread_info()->restart_block; | ||
| 1301 | restart->fn = futex_wait_restart; | ||
| 1302 | restart->futex.uaddr = (u32 *)uaddr; | ||
| 1303 | restart->futex.val = val; | ||
| 1304 | restart->futex.time = abs_time->tv64; | ||
| 1305 | restart->futex.bitset = bitset; | ||
| 1306 | restart->futex.flags = 0; | ||
| 1307 | |||
| 1308 | if (fshared) | ||
| 1309 | restart->futex.flags |= FLAGS_SHARED; | ||
| 1310 | if (clockrt) | ||
| 1311 | restart->futex.flags |= FLAGS_CLOCKRT; | ||
| 1312 | return -ERESTART_RESTARTBLOCK; | ||
| 1313 | } | ||
| 1314 | 1304 | ||
| 1315 | out_unlock_put_key: | 1305 | restart = ¤t_thread_info()->restart_block; |
| 1316 | queue_unlock(&q, hb); | 1306 | restart->fn = futex_wait_restart; |
| 1317 | put_futex_key(fshared, &q.key); | 1307 | restart->futex.uaddr = (u32 *)uaddr; |
| 1308 | restart->futex.val = val; | ||
| 1309 | restart->futex.time = abs_time->tv64; | ||
| 1310 | restart->futex.bitset = bitset; | ||
| 1311 | restart->futex.flags = 0; | ||
| 1312 | |||
| 1313 | if (fshared) | ||
| 1314 | restart->futex.flags |= FLAGS_SHARED; | ||
| 1315 | if (clockrt) | ||
| 1316 | restart->futex.flags |= FLAGS_CLOCKRT; | ||
| 1318 | 1317 | ||
| 1318 | ret = -ERESTART_RESTARTBLOCK; | ||
| 1319 | |||
| 1320 | out_put_key: | ||
| 1321 | put_futex_key(fshared, &q.key); | ||
| 1319 | out: | 1322 | out: |
| 1320 | return ret; | 1323 | return ret; |
| 1321 | } | 1324 | } |
diff --git a/kernel/kexec.c b/kernel/kexec.c index 8a6d7b08864e..483899578259 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c | |||
| @@ -1465,6 +1465,11 @@ int kernel_kexec(void) | |||
| 1465 | error = device_power_down(PMSG_FREEZE); | 1465 | error = device_power_down(PMSG_FREEZE); |
| 1466 | if (error) | 1466 | if (error) |
| 1467 | goto Enable_irqs; | 1467 | goto Enable_irqs; |
| 1468 | |||
| 1469 | /* Suspend system devices */ | ||
| 1470 | error = sysdev_suspend(PMSG_FREEZE); | ||
| 1471 | if (error) | ||
| 1472 | goto Power_up_devices; | ||
| 1468 | } else | 1473 | } else |
| 1469 | #endif | 1474 | #endif |
| 1470 | { | 1475 | { |
| @@ -1477,6 +1482,8 @@ int kernel_kexec(void) | |||
| 1477 | 1482 | ||
| 1478 | #ifdef CONFIG_KEXEC_JUMP | 1483 | #ifdef CONFIG_KEXEC_JUMP |
| 1479 | if (kexec_image->preserve_context) { | 1484 | if (kexec_image->preserve_context) { |
| 1485 | sysdev_resume(); | ||
| 1486 | Power_up_devices: | ||
| 1480 | device_power_up(PMSG_RESTORE); | 1487 | device_power_up(PMSG_RESTORE); |
| 1481 | Enable_irqs: | 1488 | Enable_irqs: |
| 1482 | local_irq_enable(); | 1489 | local_irq_enable(); |
diff --git a/kernel/module.c b/kernel/module.c index ba22484a987e..1196f5d11700 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
| @@ -2015,14 +2015,6 @@ static noinline struct module *load_module(void __user *umod, | |||
| 2015 | if (err < 0) | 2015 | if (err < 0) |
| 2016 | goto free_mod; | 2016 | goto free_mod; |
| 2017 | 2017 | ||
| 2018 | #if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP) | ||
| 2019 | mod->refptr = percpu_modalloc(sizeof(local_t), __alignof__(local_t), | ||
| 2020 | mod->name); | ||
| 2021 | if (!mod->refptr) { | ||
| 2022 | err = -ENOMEM; | ||
| 2023 | goto free_mod; | ||
| 2024 | } | ||
| 2025 | #endif | ||
| 2026 | if (pcpuindex) { | 2018 | if (pcpuindex) { |
| 2027 | /* We have a special allocation for this section. */ | 2019 | /* We have a special allocation for this section. */ |
| 2028 | percpu = percpu_modalloc(sechdrs[pcpuindex].sh_size, | 2020 | percpu = percpu_modalloc(sechdrs[pcpuindex].sh_size, |
| @@ -2030,7 +2022,7 @@ static noinline struct module *load_module(void __user *umod, | |||
| 2030 | mod->name); | 2022 | mod->name); |
| 2031 | if (!percpu) { | 2023 | if (!percpu) { |
| 2032 | err = -ENOMEM; | 2024 | err = -ENOMEM; |
| 2033 | goto free_percpu; | 2025 | goto free_mod; |
| 2034 | } | 2026 | } |
| 2035 | sechdrs[pcpuindex].sh_flags &= ~(unsigned long)SHF_ALLOC; | 2027 | sechdrs[pcpuindex].sh_flags &= ~(unsigned long)SHF_ALLOC; |
| 2036 | mod->percpu = percpu; | 2028 | mod->percpu = percpu; |
| @@ -2082,6 +2074,14 @@ static noinline struct module *load_module(void __user *umod, | |||
| 2082 | /* Module has been moved. */ | 2074 | /* Module has been moved. */ |
| 2083 | mod = (void *)sechdrs[modindex].sh_addr; | 2075 | mod = (void *)sechdrs[modindex].sh_addr; |
| 2084 | 2076 | ||
| 2077 | #if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP) | ||
| 2078 | mod->refptr = percpu_modalloc(sizeof(local_t), __alignof__(local_t), | ||
| 2079 | mod->name); | ||
| 2080 | if (!mod->refptr) { | ||
| 2081 | err = -ENOMEM; | ||
| 2082 | goto free_init; | ||
| 2083 | } | ||
| 2084 | #endif | ||
| 2085 | /* Now we've moved module, initialize linked lists, etc. */ | 2085 | /* Now we've moved module, initialize linked lists, etc. */ |
| 2086 | module_unload_init(mod); | 2086 | module_unload_init(mod); |
| 2087 | 2087 | ||
| @@ -2288,15 +2288,17 @@ static noinline struct module *load_module(void __user *umod, | |||
| 2288 | ftrace_release(mod->module_core, mod->core_size); | 2288 | ftrace_release(mod->module_core, mod->core_size); |
| 2289 | free_unload: | 2289 | free_unload: |
| 2290 | module_unload_free(mod); | 2290 | module_unload_free(mod); |
| 2291 | free_init: | ||
| 2292 | #if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP) | ||
| 2293 | percpu_modfree(mod->refptr); | ||
| 2294 | #endif | ||
| 2291 | module_free(mod, mod->module_init); | 2295 | module_free(mod, mod->module_init); |
| 2292 | free_core: | 2296 | free_core: |
| 2293 | module_free(mod, mod->module_core); | 2297 | module_free(mod, mod->module_core); |
| 2298 | /* mod will be freed with core. Don't access it beyond this line! */ | ||
| 2294 | free_percpu: | 2299 | free_percpu: |
| 2295 | if (percpu) | 2300 | if (percpu) |
| 2296 | percpu_modfree(percpu); | 2301 | percpu_modfree(percpu); |
| 2297 | #if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP) | ||
| 2298 | percpu_modfree(mod->refptr); | ||
| 2299 | #endif | ||
| 2300 | free_mod: | 2302 | free_mod: |
| 2301 | kfree(args); | 2303 | kfree(args); |
| 2302 | free_hdr: | 2304 | free_hdr: |
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index db107c9bbc05..e976e505648d 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c | |||
| @@ -261,6 +261,40 @@ out: | |||
| 261 | rcu_read_unlock(); | 261 | rcu_read_unlock(); |
| 262 | } | 262 | } |
| 263 | 263 | ||
| 264 | static void update_gt_cputime(struct task_cputime *a, struct task_cputime *b) | ||
| 265 | { | ||
| 266 | if (cputime_gt(b->utime, a->utime)) | ||
| 267 | a->utime = b->utime; | ||
| 268 | |||
| 269 | if (cputime_gt(b->stime, a->stime)) | ||
| 270 | a->stime = b->stime; | ||
| 271 | |||
| 272 | if (b->sum_exec_runtime > a->sum_exec_runtime) | ||
| 273 | a->sum_exec_runtime = b->sum_exec_runtime; | ||
| 274 | } | ||
| 275 | |||
| 276 | void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times) | ||
| 277 | { | ||
| 278 | struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; | ||
| 279 | struct task_cputime sum; | ||
| 280 | unsigned long flags; | ||
| 281 | |||
| 282 | spin_lock_irqsave(&cputimer->lock, flags); | ||
| 283 | if (!cputimer->running) { | ||
| 284 | cputimer->running = 1; | ||
| 285 | /* | ||
| 286 | * The POSIX timer interface allows for absolute time expiry | ||
| 287 | * values through the TIMER_ABSTIME flag, therefore we have | ||
| 288 | * to synchronize the timer to the clock every time we start | ||
| 289 | * it. | ||
| 290 | */ | ||
| 291 | thread_group_cputime(tsk, &sum); | ||
| 292 | update_gt_cputime(&cputimer->cputime, &sum); | ||
| 293 | } | ||
| 294 | *times = cputimer->cputime; | ||
| 295 | spin_unlock_irqrestore(&cputimer->lock, flags); | ||
| 296 | } | ||
| 297 | |||
| 264 | /* | 298 | /* |
| 265 | * Sample a process (thread group) clock for the given group_leader task. | 299 | * Sample a process (thread group) clock for the given group_leader task. |
| 266 | * Must be called with tasklist_lock held for reading. | 300 | * Must be called with tasklist_lock held for reading. |
| @@ -488,7 +522,7 @@ void posix_cpu_timers_exit_group(struct task_struct *tsk) | |||
| 488 | { | 522 | { |
| 489 | struct task_cputime cputime; | 523 | struct task_cputime cputime; |
| 490 | 524 | ||
| 491 | thread_group_cputime(tsk, &cputime); | 525 | thread_group_cputimer(tsk, &cputime); |
| 492 | cleanup_timers(tsk->signal->cpu_timers, | 526 | cleanup_timers(tsk->signal->cpu_timers, |
| 493 | cputime.utime, cputime.stime, cputime.sum_exec_runtime); | 527 | cputime.utime, cputime.stime, cputime.sum_exec_runtime); |
| 494 | } | 528 | } |
| @@ -507,29 +541,6 @@ static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now) | |||
| 507 | } | 541 | } |
| 508 | 542 | ||
| 509 | /* | 543 | /* |
| 510 | * Enable the process wide cpu timer accounting. | ||
| 511 | * | ||
| 512 | * serialized using ->sighand->siglock | ||
| 513 | */ | ||
| 514 | static void start_process_timers(struct task_struct *tsk) | ||
| 515 | { | ||
| 516 | tsk->signal->cputimer.running = 1; | ||
| 517 | barrier(); | ||
| 518 | } | ||
| 519 | |||
| 520 | /* | ||
| 521 | * Release the process wide timer accounting -- timer stops ticking when | ||
| 522 | * nobody cares about it. | ||
| 523 | * | ||
| 524 | * serialized using ->sighand->siglock | ||
| 525 | */ | ||
| 526 | static void stop_process_timers(struct task_struct *tsk) | ||
| 527 | { | ||
| 528 | tsk->signal->cputimer.running = 0; | ||
| 529 | barrier(); | ||
| 530 | } | ||
| 531 | |||
| 532 | /* | ||
| 533 | * Insert the timer on the appropriate list before any timers that | 544 | * Insert the timer on the appropriate list before any timers that |
| 534 | * expire later. This must be called with the tasklist_lock held | 545 | * expire later. This must be called with the tasklist_lock held |
| 535 | * for reading, and interrupts disabled. | 546 | * for reading, and interrupts disabled. |
| @@ -549,9 +560,6 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now) | |||
| 549 | BUG_ON(!irqs_disabled()); | 560 | BUG_ON(!irqs_disabled()); |
| 550 | spin_lock(&p->sighand->siglock); | 561 | spin_lock(&p->sighand->siglock); |
| 551 | 562 | ||
| 552 | if (!CPUCLOCK_PERTHREAD(timer->it_clock)) | ||
| 553 | start_process_timers(p); | ||
| 554 | |||
| 555 | listpos = head; | 563 | listpos = head; |
| 556 | if (CPUCLOCK_WHICH(timer->it_clock) == CPUCLOCK_SCHED) { | 564 | if (CPUCLOCK_WHICH(timer->it_clock) == CPUCLOCK_SCHED) { |
| 557 | list_for_each_entry(next, head, entry) { | 565 | list_for_each_entry(next, head, entry) { |
| @@ -673,6 +681,33 @@ static void cpu_timer_fire(struct k_itimer *timer) | |||
| 673 | } | 681 | } |
| 674 | 682 | ||
| 675 | /* | 683 | /* |
| 684 | * Sample a process (thread group) timer for the given group_leader task. | ||
| 685 | * Must be called with tasklist_lock held for reading. | ||
| 686 | */ | ||
| 687 | static int cpu_timer_sample_group(const clockid_t which_clock, | ||
| 688 | struct task_struct *p, | ||
| 689 | union cpu_time_count *cpu) | ||
| 690 | { | ||
| 691 | struct task_cputime cputime; | ||
| 692 | |||
| 693 | thread_group_cputimer(p, &cputime); | ||
| 694 | switch (CPUCLOCK_WHICH(which_clock)) { | ||
| 695 | default: | ||
| 696 | return -EINVAL; | ||
| 697 | case CPUCLOCK_PROF: | ||
| 698 | cpu->cpu = cputime_add(cputime.utime, cputime.stime); | ||
| 699 | break; | ||
| 700 | case CPUCLOCK_VIRT: | ||
| 701 | cpu->cpu = cputime.utime; | ||
| 702 | break; | ||
| 703 | case CPUCLOCK_SCHED: | ||
| 704 | cpu->sched = cputime.sum_exec_runtime + task_delta_exec(p); | ||
| 705 | break; | ||
| 706 | } | ||
| 707 | return 0; | ||
| 708 | } | ||
| 709 | |||
| 710 | /* | ||
| 676 | * Guts of sys_timer_settime for CPU timers. | 711 | * Guts of sys_timer_settime for CPU timers. |
| 677 | * This is called with the timer locked and interrupts disabled. | 712 | * This is called with the timer locked and interrupts disabled. |
| 678 | * If we return TIMER_RETRY, it's necessary to release the timer's lock | 713 | * If we return TIMER_RETRY, it's necessary to release the timer's lock |
| @@ -733,7 +768,7 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags, | |||
| 733 | if (CPUCLOCK_PERTHREAD(timer->it_clock)) { | 768 | if (CPUCLOCK_PERTHREAD(timer->it_clock)) { |
| 734 | cpu_clock_sample(timer->it_clock, p, &val); | 769 | cpu_clock_sample(timer->it_clock, p, &val); |
| 735 | } else { | 770 | } else { |
| 736 | cpu_clock_sample_group(timer->it_clock, p, &val); | 771 | cpu_timer_sample_group(timer->it_clock, p, &val); |
| 737 | } | 772 | } |
| 738 | 773 | ||
| 739 | if (old) { | 774 | if (old) { |
| @@ -881,7 +916,7 @@ void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp) | |||
| 881 | read_unlock(&tasklist_lock); | 916 | read_unlock(&tasklist_lock); |
| 882 | goto dead; | 917 | goto dead; |
| 883 | } else { | 918 | } else { |
| 884 | cpu_clock_sample_group(timer->it_clock, p, &now); | 919 | cpu_timer_sample_group(timer->it_clock, p, &now); |
| 885 | clear_dead = (unlikely(p->exit_state) && | 920 | clear_dead = (unlikely(p->exit_state) && |
| 886 | thread_group_empty(p)); | 921 | thread_group_empty(p)); |
| 887 | } | 922 | } |
| @@ -1021,6 +1056,19 @@ static void check_thread_timers(struct task_struct *tsk, | |||
| 1021 | } | 1056 | } |
| 1022 | } | 1057 | } |
| 1023 | 1058 | ||
| 1059 | static void stop_process_timers(struct task_struct *tsk) | ||
| 1060 | { | ||
| 1061 | struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; | ||
| 1062 | unsigned long flags; | ||
| 1063 | |||
| 1064 | if (!cputimer->running) | ||
| 1065 | return; | ||
| 1066 | |||
| 1067 | spin_lock_irqsave(&cputimer->lock, flags); | ||
| 1068 | cputimer->running = 0; | ||
| 1069 | spin_unlock_irqrestore(&cputimer->lock, flags); | ||
| 1070 | } | ||
| 1071 | |||
| 1024 | /* | 1072 | /* |
| 1025 | * Check for any per-thread CPU timers that have fired and move them | 1073 | * Check for any per-thread CPU timers that have fired and move them |
| 1026 | * off the tsk->*_timers list onto the firing list. Per-thread timers | 1074 | * off the tsk->*_timers list onto the firing list. Per-thread timers |
| @@ -1223,7 +1271,7 @@ void posix_cpu_timer_schedule(struct k_itimer *timer) | |||
| 1223 | clear_dead_task(timer, now); | 1271 | clear_dead_task(timer, now); |
| 1224 | goto out_unlock; | 1272 | goto out_unlock; |
| 1225 | } | 1273 | } |
| 1226 | cpu_clock_sample_group(timer->it_clock, p, &now); | 1274 | cpu_timer_sample_group(timer->it_clock, p, &now); |
| 1227 | bump_cpu_timer(timer, now); | 1275 | bump_cpu_timer(timer, now); |
| 1228 | /* Leave the tasklist_lock locked for the call below. */ | 1276 | /* Leave the tasklist_lock locked for the call below. */ |
| 1229 | } | 1277 | } |
| @@ -1388,33 +1436,6 @@ void run_posix_cpu_timers(struct task_struct *tsk) | |||
| 1388 | } | 1436 | } |
| 1389 | 1437 | ||
| 1390 | /* | 1438 | /* |
| 1391 | * Sample a process (thread group) timer for the given group_leader task. | ||
| 1392 | * Must be called with tasklist_lock held for reading. | ||
| 1393 | */ | ||
| 1394 | static int cpu_timer_sample_group(const clockid_t which_clock, | ||
| 1395 | struct task_struct *p, | ||
| 1396 | union cpu_time_count *cpu) | ||
| 1397 | { | ||
| 1398 | struct task_cputime cputime; | ||
| 1399 | |||
| 1400 | thread_group_cputimer(p, &cputime); | ||
| 1401 | switch (CPUCLOCK_WHICH(which_clock)) { | ||
| 1402 | default: | ||
| 1403 | return -EINVAL; | ||
| 1404 | case CPUCLOCK_PROF: | ||
| 1405 | cpu->cpu = cputime_add(cputime.utime, cputime.stime); | ||
| 1406 | break; | ||
| 1407 | case CPUCLOCK_VIRT: | ||
| 1408 | cpu->cpu = cputime.utime; | ||
| 1409 | break; | ||
| 1410 | case CPUCLOCK_SCHED: | ||
| 1411 | cpu->sched = cputime.sum_exec_runtime + task_delta_exec(p); | ||
| 1412 | break; | ||
| 1413 | } | ||
| 1414 | return 0; | ||
| 1415 | } | ||
| 1416 | |||
| 1417 | /* | ||
| 1418 | * Set one of the process-wide special case CPU timers. | 1439 | * Set one of the process-wide special case CPU timers. |
| 1419 | * The tsk->sighand->siglock must be held by the caller. | 1440 | * The tsk->sighand->siglock must be held by the caller. |
| 1420 | * The *newval argument is relative and we update it to be absolute, *oldval | 1441 | * The *newval argument is relative and we update it to be absolute, *oldval |
| @@ -1427,7 +1448,6 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx, | |||
| 1427 | struct list_head *head; | 1448 | struct list_head *head; |
| 1428 | 1449 | ||
| 1429 | BUG_ON(clock_idx == CPUCLOCK_SCHED); | 1450 | BUG_ON(clock_idx == CPUCLOCK_SCHED); |
| 1430 | start_process_timers(tsk); | ||
| 1431 | cpu_timer_sample_group(clock_idx, tsk, &now); | 1451 | cpu_timer_sample_group(clock_idx, tsk, &now); |
| 1432 | 1452 | ||
| 1433 | if (oldval) { | 1453 | if (oldval) { |
diff --git a/kernel/power/Makefile b/kernel/power/Makefile index d7a10167a25b..720ea4f781bd 100644 --- a/kernel/power/Makefile +++ b/kernel/power/Makefile | |||
| @@ -3,7 +3,7 @@ ifeq ($(CONFIG_PM_DEBUG),y) | |||
| 3 | EXTRA_CFLAGS += -DDEBUG | 3 | EXTRA_CFLAGS += -DDEBUG |
| 4 | endif | 4 | endif |
| 5 | 5 | ||
| 6 | obj-y := main.o | 6 | obj-$(CONFIG_PM) += main.o |
| 7 | obj-$(CONFIG_PM_SLEEP) += console.o | 7 | obj-$(CONFIG_PM_SLEEP) += console.o |
| 8 | obj-$(CONFIG_FREEZER) += process.o | 8 | obj-$(CONFIG_FREEZER) += process.o |
| 9 | obj-$(CONFIG_HIBERNATION) += swsusp.o disk.o snapshot.o swap.o user.o | 9 | obj-$(CONFIG_HIBERNATION) += swsusp.o disk.o snapshot.o swap.o user.o |
diff --git a/kernel/power/console.c b/kernel/power/console.c index b8628be2a465..a3961b205de7 100644 --- a/kernel/power/console.c +++ b/kernel/power/console.c | |||
| @@ -78,6 +78,12 @@ void pm_restore_console(void) | |||
| 78 | } | 78 | } |
| 79 | set_console(orig_fgconsole); | 79 | set_console(orig_fgconsole); |
| 80 | release_console_sem(); | 80 | release_console_sem(); |
| 81 | |||
| 82 | if (vt_waitactive(orig_fgconsole)) { | ||
| 83 | pr_debug("Resume: Can't switch VCs."); | ||
| 84 | return; | ||
| 85 | } | ||
| 86 | |||
| 81 | kmsg_redirect = orig_kmsg; | 87 | kmsg_redirect = orig_kmsg; |
| 82 | } | 88 | } |
| 83 | #endif | 89 | #endif |
diff --git a/kernel/power/disk.c b/kernel/power/disk.c index 432ee575c9ee..4a4a206b1979 100644 --- a/kernel/power/disk.c +++ b/kernel/power/disk.c | |||
| @@ -227,6 +227,12 @@ static int create_image(int platform_mode) | |||
| 227 | "aborting hibernation\n"); | 227 | "aborting hibernation\n"); |
| 228 | goto Enable_irqs; | 228 | goto Enable_irqs; |
| 229 | } | 229 | } |
| 230 | sysdev_suspend(PMSG_FREEZE); | ||
| 231 | if (error) { | ||
| 232 | printk(KERN_ERR "PM: Some devices failed to power down, " | ||
| 233 | "aborting hibernation\n"); | ||
| 234 | goto Power_up_devices; | ||
| 235 | } | ||
| 230 | 236 | ||
| 231 | if (hibernation_test(TEST_CORE)) | 237 | if (hibernation_test(TEST_CORE)) |
| 232 | goto Power_up; | 238 | goto Power_up; |
| @@ -242,9 +248,11 @@ static int create_image(int platform_mode) | |||
| 242 | if (!in_suspend) | 248 | if (!in_suspend) |
| 243 | platform_leave(platform_mode); | 249 | platform_leave(platform_mode); |
| 244 | Power_up: | 250 | Power_up: |
| 251 | sysdev_resume(); | ||
| 245 | /* NOTE: device_power_up() is just a resume() for devices | 252 | /* NOTE: device_power_up() is just a resume() for devices |
| 246 | * that suspended with irqs off ... no overall powerup. | 253 | * that suspended with irqs off ... no overall powerup. |
| 247 | */ | 254 | */ |
| 255 | Power_up_devices: | ||
| 248 | device_power_up(in_suspend ? | 256 | device_power_up(in_suspend ? |
| 249 | (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE); | 257 | (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE); |
| 250 | Enable_irqs: | 258 | Enable_irqs: |
| @@ -335,6 +343,7 @@ static int resume_target_kernel(void) | |||
| 335 | "aborting resume\n"); | 343 | "aborting resume\n"); |
| 336 | goto Enable_irqs; | 344 | goto Enable_irqs; |
| 337 | } | 345 | } |
| 346 | sysdev_suspend(PMSG_QUIESCE); | ||
| 338 | /* We'll ignore saved state, but this gets preempt count (etc) right */ | 347 | /* We'll ignore saved state, but this gets preempt count (etc) right */ |
| 339 | save_processor_state(); | 348 | save_processor_state(); |
| 340 | error = restore_highmem(); | 349 | error = restore_highmem(); |
| @@ -357,6 +366,7 @@ static int resume_target_kernel(void) | |||
| 357 | swsusp_free(); | 366 | swsusp_free(); |
| 358 | restore_processor_state(); | 367 | restore_processor_state(); |
| 359 | touch_softlockup_watchdog(); | 368 | touch_softlockup_watchdog(); |
| 369 | sysdev_resume(); | ||
| 360 | device_power_up(PMSG_RECOVER); | 370 | device_power_up(PMSG_RECOVER); |
| 361 | Enable_irqs: | 371 | Enable_irqs: |
| 362 | local_irq_enable(); | 372 | local_irq_enable(); |
| @@ -440,6 +450,7 @@ int hibernation_platform_enter(void) | |||
| 440 | local_irq_disable(); | 450 | local_irq_disable(); |
| 441 | error = device_power_down(PMSG_HIBERNATE); | 451 | error = device_power_down(PMSG_HIBERNATE); |
| 442 | if (!error) { | 452 | if (!error) { |
| 453 | sysdev_suspend(PMSG_HIBERNATE); | ||
| 443 | hibernation_ops->enter(); | 454 | hibernation_ops->enter(); |
| 444 | /* We should never get here */ | 455 | /* We should never get here */ |
| 445 | while (1); | 456 | while (1); |
| @@ -595,6 +606,12 @@ static int software_resume(void) | |||
| 595 | unsigned int flags; | 606 | unsigned int flags; |
| 596 | 607 | ||
| 597 | /* | 608 | /* |
| 609 | * If the user said "noresume".. bail out early. | ||
| 610 | */ | ||
| 611 | if (noresume) | ||
| 612 | return 0; | ||
| 613 | |||
| 614 | /* | ||
| 598 | * name_to_dev_t() below takes a sysfs buffer mutex when sysfs | 615 | * name_to_dev_t() below takes a sysfs buffer mutex when sysfs |
| 599 | * is configured into the kernel. Since the regular hibernate | 616 | * is configured into the kernel. Since the regular hibernate |
| 600 | * trigger path is via sysfs which takes a buffer mutex before | 617 | * trigger path is via sysfs which takes a buffer mutex before |
| @@ -610,6 +627,11 @@ static int software_resume(void) | |||
| 610 | mutex_unlock(&pm_mutex); | 627 | mutex_unlock(&pm_mutex); |
| 611 | return -ENOENT; | 628 | return -ENOENT; |
| 612 | } | 629 | } |
| 630 | /* | ||
| 631 | * Some device discovery might still be in progress; we need | ||
| 632 | * to wait for this to finish. | ||
| 633 | */ | ||
| 634 | wait_for_device_probe(); | ||
| 613 | swsusp_resume_device = name_to_dev_t(resume_file); | 635 | swsusp_resume_device = name_to_dev_t(resume_file); |
| 614 | pr_debug("PM: Resume from partition %s\n", resume_file); | 636 | pr_debug("PM: Resume from partition %s\n", resume_file); |
| 615 | } else { | 637 | } else { |
diff --git a/kernel/power/main.c b/kernel/power/main.c index 239988873971..c9632f841f64 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c | |||
| @@ -57,16 +57,6 @@ int pm_notifier_call_chain(unsigned long val) | |||
| 57 | #ifdef CONFIG_PM_DEBUG | 57 | #ifdef CONFIG_PM_DEBUG |
| 58 | int pm_test_level = TEST_NONE; | 58 | int pm_test_level = TEST_NONE; |
| 59 | 59 | ||
| 60 | static int suspend_test(int level) | ||
| 61 | { | ||
| 62 | if (pm_test_level == level) { | ||
| 63 | printk(KERN_INFO "suspend debug: Waiting for 5 seconds.\n"); | ||
| 64 | mdelay(5000); | ||
| 65 | return 1; | ||
| 66 | } | ||
| 67 | return 0; | ||
| 68 | } | ||
| 69 | |||
| 70 | static const char * const pm_tests[__TEST_AFTER_LAST] = { | 60 | static const char * const pm_tests[__TEST_AFTER_LAST] = { |
| 71 | [TEST_NONE] = "none", | 61 | [TEST_NONE] = "none", |
| 72 | [TEST_CORE] = "core", | 62 | [TEST_CORE] = "core", |
| @@ -125,14 +115,24 @@ static ssize_t pm_test_store(struct kobject *kobj, struct kobj_attribute *attr, | |||
| 125 | } | 115 | } |
| 126 | 116 | ||
| 127 | power_attr(pm_test); | 117 | power_attr(pm_test); |
| 128 | #else /* !CONFIG_PM_DEBUG */ | 118 | #endif /* CONFIG_PM_DEBUG */ |
| 129 | static inline int suspend_test(int level) { return 0; } | ||
| 130 | #endif /* !CONFIG_PM_DEBUG */ | ||
| 131 | 119 | ||
| 132 | #endif /* CONFIG_PM_SLEEP */ | 120 | #endif /* CONFIG_PM_SLEEP */ |
| 133 | 121 | ||
| 134 | #ifdef CONFIG_SUSPEND | 122 | #ifdef CONFIG_SUSPEND |
| 135 | 123 | ||
| 124 | static int suspend_test(int level) | ||
| 125 | { | ||
| 126 | #ifdef CONFIG_PM_DEBUG | ||
| 127 | if (pm_test_level == level) { | ||
| 128 | printk(KERN_INFO "suspend debug: Waiting for 5 seconds.\n"); | ||
| 129 | mdelay(5000); | ||
| 130 | return 1; | ||
| 131 | } | ||
| 132 | #endif /* !CONFIG_PM_DEBUG */ | ||
| 133 | return 0; | ||
| 134 | } | ||
| 135 | |||
| 136 | #ifdef CONFIG_PM_TEST_SUSPEND | 136 | #ifdef CONFIG_PM_TEST_SUSPEND |
| 137 | 137 | ||
| 138 | /* | 138 | /* |
| @@ -298,8 +298,12 @@ static int suspend_enter(suspend_state_t state) | |||
| 298 | goto Done; | 298 | goto Done; |
| 299 | } | 299 | } |
| 300 | 300 | ||
| 301 | if (!suspend_test(TEST_CORE)) | 301 | error = sysdev_suspend(PMSG_SUSPEND); |
| 302 | error = suspend_ops->enter(state); | 302 | if (!error) { |
| 303 | if (!suspend_test(TEST_CORE)) | ||
| 304 | error = suspend_ops->enter(state); | ||
| 305 | sysdev_resume(); | ||
| 306 | } | ||
| 303 | 307 | ||
| 304 | device_power_up(PMSG_RESUME); | 308 | device_power_up(PMSG_RESUME); |
| 305 | Done: | 309 | Done: |
diff --git a/kernel/power/swap.c b/kernel/power/swap.c index 6da14358537c..505f319e489c 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c | |||
| @@ -60,6 +60,7 @@ static struct block_device *resume_bdev; | |||
| 60 | static int submit(int rw, pgoff_t page_off, struct page *page, | 60 | static int submit(int rw, pgoff_t page_off, struct page *page, |
| 61 | struct bio **bio_chain) | 61 | struct bio **bio_chain) |
| 62 | { | 62 | { |
| 63 | const int bio_rw = rw | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG); | ||
| 63 | struct bio *bio; | 64 | struct bio *bio; |
| 64 | 65 | ||
| 65 | bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1); | 66 | bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1); |
| @@ -80,7 +81,7 @@ static int submit(int rw, pgoff_t page_off, struct page *page, | |||
| 80 | bio_get(bio); | 81 | bio_get(bio); |
| 81 | 82 | ||
| 82 | if (bio_chain == NULL) { | 83 | if (bio_chain == NULL) { |
| 83 | submit_bio(rw | (1 << BIO_RW_SYNC), bio); | 84 | submit_bio(bio_rw, bio); |
| 84 | wait_on_page_locked(page); | 85 | wait_on_page_locked(page); |
| 85 | if (rw == READ) | 86 | if (rw == READ) |
| 86 | bio_set_pages_dirty(bio); | 87 | bio_set_pages_dirty(bio); |
| @@ -90,7 +91,7 @@ static int submit(int rw, pgoff_t page_off, struct page *page, | |||
| 90 | get_page(page); /* These pages are freed later */ | 91 | get_page(page); /* These pages are freed later */ |
| 91 | bio->bi_private = *bio_chain; | 92 | bio->bi_private = *bio_chain; |
| 92 | *bio_chain = bio; | 93 | *bio_chain = bio; |
| 93 | submit_bio(rw | (1 << BIO_RW_SYNC), bio); | 94 | submit_bio(bio_rw, bio); |
| 94 | } | 95 | } |
| 95 | return 0; | 96 | return 0; |
| 96 | } | 97 | } |
diff --git a/kernel/power/user.c b/kernel/power/user.c index 005b93d839ba..6c85359364f2 100644 --- a/kernel/power/user.c +++ b/kernel/power/user.c | |||
| @@ -95,15 +95,15 @@ static int snapshot_open(struct inode *inode, struct file *filp) | |||
| 95 | data->swap = swsusp_resume_device ? | 95 | data->swap = swsusp_resume_device ? |
| 96 | swap_type_of(swsusp_resume_device, 0, NULL) : -1; | 96 | swap_type_of(swsusp_resume_device, 0, NULL) : -1; |
| 97 | data->mode = O_RDONLY; | 97 | data->mode = O_RDONLY; |
| 98 | error = pm_notifier_call_chain(PM_RESTORE_PREPARE); | 98 | error = pm_notifier_call_chain(PM_HIBERNATION_PREPARE); |
| 99 | if (error) | 99 | if (error) |
| 100 | pm_notifier_call_chain(PM_POST_RESTORE); | 100 | pm_notifier_call_chain(PM_POST_HIBERNATION); |
| 101 | } else { | 101 | } else { |
| 102 | data->swap = -1; | 102 | data->swap = -1; |
| 103 | data->mode = O_WRONLY; | 103 | data->mode = O_WRONLY; |
| 104 | error = pm_notifier_call_chain(PM_HIBERNATION_PREPARE); | 104 | error = pm_notifier_call_chain(PM_RESTORE_PREPARE); |
| 105 | if (error) | 105 | if (error) |
| 106 | pm_notifier_call_chain(PM_POST_HIBERNATION); | 106 | pm_notifier_call_chain(PM_POST_RESTORE); |
| 107 | } | 107 | } |
| 108 | if (error) | 108 | if (error) |
| 109 | atomic_inc(&snapshot_device_available); | 109 | atomic_inc(&snapshot_device_available); |
diff --git a/kernel/printk.c b/kernel/printk.c index 69188f226a93..e3602d0755b0 100644 --- a/kernel/printk.c +++ b/kernel/printk.c | |||
| @@ -73,7 +73,6 @@ EXPORT_SYMBOL(oops_in_progress); | |||
| 73 | * driver system. | 73 | * driver system. |
| 74 | */ | 74 | */ |
| 75 | static DECLARE_MUTEX(console_sem); | 75 | static DECLARE_MUTEX(console_sem); |
| 76 | static DECLARE_MUTEX(secondary_console_sem); | ||
| 77 | struct console *console_drivers; | 76 | struct console *console_drivers; |
| 78 | EXPORT_SYMBOL_GPL(console_drivers); | 77 | EXPORT_SYMBOL_GPL(console_drivers); |
| 79 | 78 | ||
| @@ -891,12 +890,14 @@ void suspend_console(void) | |||
| 891 | printk("Suspending console(s) (use no_console_suspend to debug)\n"); | 890 | printk("Suspending console(s) (use no_console_suspend to debug)\n"); |
| 892 | acquire_console_sem(); | 891 | acquire_console_sem(); |
| 893 | console_suspended = 1; | 892 | console_suspended = 1; |
| 893 | up(&console_sem); | ||
| 894 | } | 894 | } |
| 895 | 895 | ||
| 896 | void resume_console(void) | 896 | void resume_console(void) |
| 897 | { | 897 | { |
| 898 | if (!console_suspend_enabled) | 898 | if (!console_suspend_enabled) |
| 899 | return; | 899 | return; |
| 900 | down(&console_sem); | ||
| 900 | console_suspended = 0; | 901 | console_suspended = 0; |
| 901 | release_console_sem(); | 902 | release_console_sem(); |
| 902 | } | 903 | } |
| @@ -912,11 +913,9 @@ void resume_console(void) | |||
| 912 | void acquire_console_sem(void) | 913 | void acquire_console_sem(void) |
| 913 | { | 914 | { |
| 914 | BUG_ON(in_interrupt()); | 915 | BUG_ON(in_interrupt()); |
| 915 | if (console_suspended) { | ||
| 916 | down(&secondary_console_sem); | ||
| 917 | return; | ||
| 918 | } | ||
| 919 | down(&console_sem); | 916 | down(&console_sem); |
| 917 | if (console_suspended) | ||
| 918 | return; | ||
| 920 | console_locked = 1; | 919 | console_locked = 1; |
| 921 | console_may_schedule = 1; | 920 | console_may_schedule = 1; |
| 922 | } | 921 | } |
| @@ -926,6 +925,10 @@ int try_acquire_console_sem(void) | |||
| 926 | { | 925 | { |
| 927 | if (down_trylock(&console_sem)) | 926 | if (down_trylock(&console_sem)) |
| 928 | return -1; | 927 | return -1; |
| 928 | if (console_suspended) { | ||
| 929 | up(&console_sem); | ||
| 930 | return -1; | ||
| 931 | } | ||
| 929 | console_locked = 1; | 932 | console_locked = 1; |
| 930 | console_may_schedule = 0; | 933 | console_may_schedule = 0; |
| 931 | return 0; | 934 | return 0; |
| @@ -979,7 +982,7 @@ void release_console_sem(void) | |||
| 979 | unsigned wake_klogd = 0; | 982 | unsigned wake_klogd = 0; |
| 980 | 983 | ||
| 981 | if (console_suspended) { | 984 | if (console_suspended) { |
| 982 | up(&secondary_console_sem); | 985 | up(&console_sem); |
| 983 | return; | 986 | return; |
| 984 | } | 987 | } |
| 985 | 988 | ||
diff --git a/kernel/profile.c b/kernel/profile.c index 784933acf5b8..7724e0409bae 100644 --- a/kernel/profile.c +++ b/kernel/profile.c | |||
| @@ -114,12 +114,15 @@ int __ref profile_init(void) | |||
| 114 | if (!slab_is_available()) { | 114 | if (!slab_is_available()) { |
| 115 | prof_buffer = alloc_bootmem(buffer_bytes); | 115 | prof_buffer = alloc_bootmem(buffer_bytes); |
| 116 | alloc_bootmem_cpumask_var(&prof_cpu_mask); | 116 | alloc_bootmem_cpumask_var(&prof_cpu_mask); |
| 117 | cpumask_copy(prof_cpu_mask, cpu_possible_mask); | ||
| 117 | return 0; | 118 | return 0; |
| 118 | } | 119 | } |
| 119 | 120 | ||
| 120 | if (!alloc_cpumask_var(&prof_cpu_mask, GFP_KERNEL)) | 121 | if (!alloc_cpumask_var(&prof_cpu_mask, GFP_KERNEL)) |
| 121 | return -ENOMEM; | 122 | return -ENOMEM; |
| 122 | 123 | ||
| 124 | cpumask_copy(prof_cpu_mask, cpu_possible_mask); | ||
| 125 | |||
| 123 | prof_buffer = kzalloc(buffer_bytes, GFP_KERNEL); | 126 | prof_buffer = kzalloc(buffer_bytes, GFP_KERNEL); |
| 124 | if (prof_buffer) | 127 | if (prof_buffer) |
| 125 | return 0; | 128 | return 0; |
diff --git a/kernel/rcuclassic.c b/kernel/rcuclassic.c index bd5a9003497c..654c640a6b9c 100644 --- a/kernel/rcuclassic.c +++ b/kernel/rcuclassic.c | |||
| @@ -679,8 +679,8 @@ int rcu_needs_cpu(int cpu) | |||
| 679 | void rcu_check_callbacks(int cpu, int user) | 679 | void rcu_check_callbacks(int cpu, int user) |
| 680 | { | 680 | { |
| 681 | if (user || | 681 | if (user || |
| 682 | (idle_cpu(cpu) && !in_softirq() && | 682 | (idle_cpu(cpu) && rcu_scheduler_active && |
| 683 | hardirq_count() <= (1 << HARDIRQ_SHIFT))) { | 683 | !in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) { |
| 684 | 684 | ||
| 685 | /* | 685 | /* |
| 686 | * Get here if this CPU took its interrupt from user | 686 | * Get here if this CPU took its interrupt from user |
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index d92a76a881aa..cae8a059cf47 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c | |||
| @@ -44,6 +44,7 @@ | |||
| 44 | #include <linux/cpu.h> | 44 | #include <linux/cpu.h> |
| 45 | #include <linux/mutex.h> | 45 | #include <linux/mutex.h> |
| 46 | #include <linux/module.h> | 46 | #include <linux/module.h> |
| 47 | #include <linux/kernel_stat.h> | ||
| 47 | 48 | ||
| 48 | enum rcu_barrier { | 49 | enum rcu_barrier { |
| 49 | RCU_BARRIER_STD, | 50 | RCU_BARRIER_STD, |
| @@ -55,6 +56,7 @@ static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL}; | |||
| 55 | static atomic_t rcu_barrier_cpu_count; | 56 | static atomic_t rcu_barrier_cpu_count; |
| 56 | static DEFINE_MUTEX(rcu_barrier_mutex); | 57 | static DEFINE_MUTEX(rcu_barrier_mutex); |
| 57 | static struct completion rcu_barrier_completion; | 58 | static struct completion rcu_barrier_completion; |
| 59 | int rcu_scheduler_active __read_mostly; | ||
| 58 | 60 | ||
| 59 | /* | 61 | /* |
| 60 | * Awaken the corresponding synchronize_rcu() instance now that a | 62 | * Awaken the corresponding synchronize_rcu() instance now that a |
| @@ -80,6 +82,10 @@ void wakeme_after_rcu(struct rcu_head *head) | |||
| 80 | void synchronize_rcu(void) | 82 | void synchronize_rcu(void) |
| 81 | { | 83 | { |
| 82 | struct rcu_synchronize rcu; | 84 | struct rcu_synchronize rcu; |
| 85 | |||
| 86 | if (rcu_blocking_is_gp()) | ||
| 87 | return; | ||
| 88 | |||
| 83 | init_completion(&rcu.completion); | 89 | init_completion(&rcu.completion); |
| 84 | /* Will wake me after RCU finished. */ | 90 | /* Will wake me after RCU finished. */ |
| 85 | call_rcu(&rcu.head, wakeme_after_rcu); | 91 | call_rcu(&rcu.head, wakeme_after_rcu); |
| @@ -175,3 +181,9 @@ void __init rcu_init(void) | |||
| 175 | __rcu_init(); | 181 | __rcu_init(); |
| 176 | } | 182 | } |
| 177 | 183 | ||
| 184 | void rcu_scheduler_starting(void) | ||
| 185 | { | ||
| 186 | WARN_ON(num_online_cpus() != 1); | ||
| 187 | WARN_ON(nr_context_switches() > 0); | ||
| 188 | rcu_scheduler_active = 1; | ||
| 189 | } | ||
diff --git a/kernel/rcupreempt.c b/kernel/rcupreempt.c index 33cfc50781f9..5d59e850fb71 100644 --- a/kernel/rcupreempt.c +++ b/kernel/rcupreempt.c | |||
| @@ -1181,6 +1181,9 @@ void __synchronize_sched(void) | |||
| 1181 | { | 1181 | { |
| 1182 | struct rcu_synchronize rcu; | 1182 | struct rcu_synchronize rcu; |
| 1183 | 1183 | ||
| 1184 | if (num_online_cpus() == 1) | ||
| 1185 | return; /* blocking is gp if only one CPU! */ | ||
| 1186 | |||
| 1184 | init_completion(&rcu.completion); | 1187 | init_completion(&rcu.completion); |
| 1185 | /* Will wake me after RCU finished. */ | 1188 | /* Will wake me after RCU finished. */ |
| 1186 | call_rcu_sched(&rcu.head, wakeme_after_rcu); | 1189 | call_rcu_sched(&rcu.head, wakeme_after_rcu); |
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index b2fd602a6f6f..97ce31579ec0 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
| @@ -948,8 +948,8 @@ static void rcu_do_batch(struct rcu_data *rdp) | |||
| 948 | void rcu_check_callbacks(int cpu, int user) | 948 | void rcu_check_callbacks(int cpu, int user) |
| 949 | { | 949 | { |
| 950 | if (user || | 950 | if (user || |
| 951 | (idle_cpu(cpu) && !in_softirq() && | 951 | (idle_cpu(cpu) && rcu_scheduler_active && |
| 952 | hardirq_count() <= (1 << HARDIRQ_SHIFT))) { | 952 | !in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) { |
| 953 | 953 | ||
| 954 | /* | 954 | /* |
| 955 | * Get here if this CPU took its interrupt from user | 955 | * Get here if this CPU took its interrupt from user |
diff --git a/kernel/sched.c b/kernel/sched.c index e72485033c48..8e2558c2ba67 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -223,7 +223,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b) | |||
| 223 | { | 223 | { |
| 224 | ktime_t now; | 224 | ktime_t now; |
| 225 | 225 | ||
| 226 | if (rt_bandwidth_enabled() && rt_b->rt_runtime == RUNTIME_INF) | 226 | if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF) |
| 227 | return; | 227 | return; |
| 228 | 228 | ||
| 229 | if (hrtimer_active(&rt_b->rt_period_timer)) | 229 | if (hrtimer_active(&rt_b->rt_period_timer)) |
| @@ -2266,16 +2266,6 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync) | |||
| 2266 | if (!sched_feat(SYNC_WAKEUPS)) | 2266 | if (!sched_feat(SYNC_WAKEUPS)) |
| 2267 | sync = 0; | 2267 | sync = 0; |
| 2268 | 2268 | ||
| 2269 | if (!sync) { | ||
| 2270 | if (current->se.avg_overlap < sysctl_sched_migration_cost && | ||
| 2271 | p->se.avg_overlap < sysctl_sched_migration_cost) | ||
| 2272 | sync = 1; | ||
| 2273 | } else { | ||
| 2274 | if (current->se.avg_overlap >= sysctl_sched_migration_cost || | ||
| 2275 | p->se.avg_overlap >= sysctl_sched_migration_cost) | ||
| 2276 | sync = 0; | ||
| 2277 | } | ||
| 2278 | |||
| 2279 | #ifdef CONFIG_SMP | 2269 | #ifdef CONFIG_SMP |
| 2280 | if (sched_feat(LB_WAKEUP_UPDATE)) { | 2270 | if (sched_feat(LB_WAKEUP_UPDATE)) { |
| 2281 | struct sched_domain *sd; | 2271 | struct sched_domain *sd; |
| @@ -6954,20 +6944,26 @@ static void free_rootdomain(struct root_domain *rd) | |||
| 6954 | 6944 | ||
| 6955 | static void rq_attach_root(struct rq *rq, struct root_domain *rd) | 6945 | static void rq_attach_root(struct rq *rq, struct root_domain *rd) |
| 6956 | { | 6946 | { |
| 6947 | struct root_domain *old_rd = NULL; | ||
| 6957 | unsigned long flags; | 6948 | unsigned long flags; |
| 6958 | 6949 | ||
| 6959 | spin_lock_irqsave(&rq->lock, flags); | 6950 | spin_lock_irqsave(&rq->lock, flags); |
| 6960 | 6951 | ||
| 6961 | if (rq->rd) { | 6952 | if (rq->rd) { |
| 6962 | struct root_domain *old_rd = rq->rd; | 6953 | old_rd = rq->rd; |
| 6963 | 6954 | ||
| 6964 | if (cpumask_test_cpu(rq->cpu, old_rd->online)) | 6955 | if (cpumask_test_cpu(rq->cpu, old_rd->online)) |
| 6965 | set_rq_offline(rq); | 6956 | set_rq_offline(rq); |
| 6966 | 6957 | ||
| 6967 | cpumask_clear_cpu(rq->cpu, old_rd->span); | 6958 | cpumask_clear_cpu(rq->cpu, old_rd->span); |
| 6968 | 6959 | ||
| 6969 | if (atomic_dec_and_test(&old_rd->refcount)) | 6960 | /* |
| 6970 | free_rootdomain(old_rd); | 6961 | * If we dont want to free the old_rt yet then |
| 6962 | * set old_rd to NULL to skip the freeing later | ||
| 6963 | * in this function: | ||
| 6964 | */ | ||
| 6965 | if (!atomic_dec_and_test(&old_rd->refcount)) | ||
| 6966 | old_rd = NULL; | ||
| 6971 | } | 6967 | } |
| 6972 | 6968 | ||
| 6973 | atomic_inc(&rd->refcount); | 6969 | atomic_inc(&rd->refcount); |
| @@ -6978,6 +6974,9 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd) | |||
| 6978 | set_rq_online(rq); | 6974 | set_rq_online(rq); |
| 6979 | 6975 | ||
| 6980 | spin_unlock_irqrestore(&rq->lock, flags); | 6976 | spin_unlock_irqrestore(&rq->lock, flags); |
| 6977 | |||
| 6978 | if (old_rd) | ||
| 6979 | free_rootdomain(old_rd); | ||
| 6981 | } | 6980 | } |
| 6982 | 6981 | ||
| 6983 | static int __init_refok init_rootdomain(struct root_domain *rd, bool bootmem) | 6982 | static int __init_refok init_rootdomain(struct root_domain *rd, bool bootmem) |
| @@ -9225,6 +9224,16 @@ static int sched_rt_global_constraints(void) | |||
| 9225 | 9224 | ||
| 9226 | return ret; | 9225 | return ret; |
| 9227 | } | 9226 | } |
| 9227 | |||
| 9228 | int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk) | ||
| 9229 | { | ||
| 9230 | /* Don't accept realtime tasks when there is no way for them to run */ | ||
| 9231 | if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0) | ||
| 9232 | return 0; | ||
| 9233 | |||
| 9234 | return 1; | ||
| 9235 | } | ||
| 9236 | |||
| 9228 | #else /* !CONFIG_RT_GROUP_SCHED */ | 9237 | #else /* !CONFIG_RT_GROUP_SCHED */ |
| 9229 | static int sched_rt_global_constraints(void) | 9238 | static int sched_rt_global_constraints(void) |
| 9230 | { | 9239 | { |
| @@ -9318,8 +9327,7 @@ cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, | |||
| 9318 | struct task_struct *tsk) | 9327 | struct task_struct *tsk) |
| 9319 | { | 9328 | { |
| 9320 | #ifdef CONFIG_RT_GROUP_SCHED | 9329 | #ifdef CONFIG_RT_GROUP_SCHED |
| 9321 | /* Don't accept realtime tasks when there is no way for them to run */ | 9330 | if (!sched_rt_can_attach(cgroup_tg(cgrp), tsk)) |
| 9322 | if (rt_task(tsk) && cgroup_tg(cgrp)->rt_bandwidth.rt_runtime == 0) | ||
| 9323 | return -EINVAL; | 9331 | return -EINVAL; |
| 9324 | #else | 9332 | #else |
| 9325 | /* We don't support RT-tasks being in separate groups */ | 9333 | /* We don't support RT-tasks being in separate groups */ |
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index a7e50ba185ac..0566f2a03c42 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
| @@ -1191,15 +1191,20 @@ wake_affine(struct sched_domain *this_sd, struct rq *this_rq, | |||
| 1191 | int idx, unsigned long load, unsigned long this_load, | 1191 | int idx, unsigned long load, unsigned long this_load, |
| 1192 | unsigned int imbalance) | 1192 | unsigned int imbalance) |
| 1193 | { | 1193 | { |
| 1194 | struct task_struct *curr = this_rq->curr; | ||
| 1195 | struct task_group *tg; | ||
| 1194 | unsigned long tl = this_load; | 1196 | unsigned long tl = this_load; |
| 1195 | unsigned long tl_per_task; | 1197 | unsigned long tl_per_task; |
| 1196 | struct task_group *tg; | ||
| 1197 | unsigned long weight; | 1198 | unsigned long weight; |
| 1198 | int balanced; | 1199 | int balanced; |
| 1199 | 1200 | ||
| 1200 | if (!(this_sd->flags & SD_WAKE_AFFINE) || !sched_feat(AFFINE_WAKEUPS)) | 1201 | if (!(this_sd->flags & SD_WAKE_AFFINE) || !sched_feat(AFFINE_WAKEUPS)) |
| 1201 | return 0; | 1202 | return 0; |
| 1202 | 1203 | ||
| 1204 | if (sync && (curr->se.avg_overlap > sysctl_sched_migration_cost || | ||
| 1205 | p->se.avg_overlap > sysctl_sched_migration_cost)) | ||
| 1206 | sync = 0; | ||
| 1207 | |||
| 1203 | /* | 1208 | /* |
| 1204 | * If sync wakeup then subtract the (maximum possible) | 1209 | * If sync wakeup then subtract the (maximum possible) |
| 1205 | * effect of the currently running task from the load | 1210 | * effect of the currently running task from the load |
| @@ -1426,7 +1431,9 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync) | |||
| 1426 | if (!sched_feat(WAKEUP_PREEMPT)) | 1431 | if (!sched_feat(WAKEUP_PREEMPT)) |
| 1427 | return; | 1432 | return; |
| 1428 | 1433 | ||
| 1429 | if (sched_feat(WAKEUP_OVERLAP) && sync) { | 1434 | if (sched_feat(WAKEUP_OVERLAP) && (sync || |
| 1435 | (se->avg_overlap < sysctl_sched_migration_cost && | ||
| 1436 | pse->avg_overlap < sysctl_sched_migration_cost))) { | ||
| 1430 | resched_task(curr); | 1437 | resched_task(curr); |
| 1431 | return; | 1438 | return; |
| 1432 | } | 1439 | } |
diff --git a/kernel/seccomp.c b/kernel/seccomp.c index ad64fcb731f2..57d4b13b631d 100644 --- a/kernel/seccomp.c +++ b/kernel/seccomp.c | |||
| @@ -8,6 +8,7 @@ | |||
| 8 | 8 | ||
| 9 | #include <linux/seccomp.h> | 9 | #include <linux/seccomp.h> |
| 10 | #include <linux/sched.h> | 10 | #include <linux/sched.h> |
| 11 | #include <linux/compat.h> | ||
| 11 | 12 | ||
| 12 | /* #define SECCOMP_DEBUG 1 */ | 13 | /* #define SECCOMP_DEBUG 1 */ |
| 13 | #define NR_SECCOMP_MODES 1 | 14 | #define NR_SECCOMP_MODES 1 |
| @@ -22,7 +23,7 @@ static int mode1_syscalls[] = { | |||
| 22 | 0, /* null terminated */ | 23 | 0, /* null terminated */ |
| 23 | }; | 24 | }; |
| 24 | 25 | ||
| 25 | #ifdef TIF_32BIT | 26 | #ifdef CONFIG_COMPAT |
| 26 | static int mode1_syscalls_32[] = { | 27 | static int mode1_syscalls_32[] = { |
| 27 | __NR_seccomp_read_32, __NR_seccomp_write_32, __NR_seccomp_exit_32, __NR_seccomp_sigreturn_32, | 28 | __NR_seccomp_read_32, __NR_seccomp_write_32, __NR_seccomp_exit_32, __NR_seccomp_sigreturn_32, |
| 28 | 0, /* null terminated */ | 29 | 0, /* null terminated */ |
| @@ -37,8 +38,8 @@ void __secure_computing(int this_syscall) | |||
| 37 | switch (mode) { | 38 | switch (mode) { |
| 38 | case 1: | 39 | case 1: |
| 39 | syscall = mode1_syscalls; | 40 | syscall = mode1_syscalls; |
| 40 | #ifdef TIF_32BIT | 41 | #ifdef CONFIG_COMPAT |
| 41 | if (test_thread_flag(TIF_32BIT)) | 42 | if (is_compat_task()) |
| 42 | syscall = mode1_syscalls_32; | 43 | syscall = mode1_syscalls_32; |
| 43 | #endif | 44 | #endif |
| 44 | do { | 45 | do { |
diff --git a/kernel/signal.c b/kernel/signal.c index 2a74fe87c0dd..1c8814481a11 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
| @@ -1575,7 +1575,15 @@ static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info) | |||
| 1575 | read_lock(&tasklist_lock); | 1575 | read_lock(&tasklist_lock); |
| 1576 | if (may_ptrace_stop()) { | 1576 | if (may_ptrace_stop()) { |
| 1577 | do_notify_parent_cldstop(current, CLD_TRAPPED); | 1577 | do_notify_parent_cldstop(current, CLD_TRAPPED); |
| 1578 | /* | ||
| 1579 | * Don't want to allow preemption here, because | ||
| 1580 | * sys_ptrace() needs this task to be inactive. | ||
| 1581 | * | ||
| 1582 | * XXX: implement read_unlock_no_resched(). | ||
| 1583 | */ | ||
| 1584 | preempt_disable(); | ||
| 1578 | read_unlock(&tasklist_lock); | 1585 | read_unlock(&tasklist_lock); |
| 1586 | preempt_enable_no_resched(); | ||
| 1579 | schedule(); | 1587 | schedule(); |
| 1580 | } else { | 1588 | } else { |
| 1581 | /* | 1589 | /* |
diff --git a/kernel/softirq.c b/kernel/softirq.c index bdbe9de9cd8d..9041ea7948fe 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
| @@ -626,6 +626,7 @@ static int ksoftirqd(void * __bind_cpu) | |||
| 626 | preempt_enable_no_resched(); | 626 | preempt_enable_no_resched(); |
| 627 | cond_resched(); | 627 | cond_resched(); |
| 628 | preempt_disable(); | 628 | preempt_disable(); |
| 629 | rcu_qsctr_inc((long)__bind_cpu); | ||
| 629 | } | 630 | } |
| 630 | preempt_enable(); | 631 | preempt_enable(); |
| 631 | set_current_state(TASK_INTERRUPTIBLE); | 632 | set_current_state(TASK_INTERRUPTIBLE); |
diff --git a/kernel/sys.c b/kernel/sys.c index f145c415bc16..37f458e6882a 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
| @@ -559,7 +559,7 @@ error: | |||
| 559 | abort_creds(new); | 559 | abort_creds(new); |
| 560 | return retval; | 560 | return retval; |
| 561 | } | 561 | } |
| 562 | 562 | ||
| 563 | /* | 563 | /* |
| 564 | * change the user struct in a credentials set to match the new UID | 564 | * change the user struct in a credentials set to match the new UID |
| 565 | */ | 565 | */ |
| @@ -571,6 +571,11 @@ static int set_user(struct cred *new) | |||
| 571 | if (!new_user) | 571 | if (!new_user) |
| 572 | return -EAGAIN; | 572 | return -EAGAIN; |
| 573 | 573 | ||
| 574 | if (!task_can_switch_user(new_user, current)) { | ||
| 575 | free_uid(new_user); | ||
| 576 | return -EINVAL; | ||
| 577 | } | ||
| 578 | |||
| 574 | if (atomic_read(&new_user->processes) >= | 579 | if (atomic_read(&new_user->processes) >= |
| 575 | current->signal->rlim[RLIMIT_NPROC].rlim_cur && | 580 | current->signal->rlim[RLIMIT_NPROC].rlim_cur && |
| 576 | new_user != INIT_USER) { | 581 | new_user != INIT_USER) { |
| @@ -631,10 +636,11 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid) | |||
| 631 | goto error; | 636 | goto error; |
| 632 | } | 637 | } |
| 633 | 638 | ||
| 634 | retval = -EAGAIN; | 639 | if (new->uid != old->uid) { |
| 635 | if (new->uid != old->uid && set_user(new) < 0) | 640 | retval = set_user(new); |
| 636 | goto error; | 641 | if (retval < 0) |
| 637 | 642 | goto error; | |
| 643 | } | ||
| 638 | if (ruid != (uid_t) -1 || | 644 | if (ruid != (uid_t) -1 || |
| 639 | (euid != (uid_t) -1 && euid != old->uid)) | 645 | (euid != (uid_t) -1 && euid != old->uid)) |
| 640 | new->suid = new->euid; | 646 | new->suid = new->euid; |
| @@ -680,9 +686,10 @@ SYSCALL_DEFINE1(setuid, uid_t, uid) | |||
| 680 | retval = -EPERM; | 686 | retval = -EPERM; |
| 681 | if (capable(CAP_SETUID)) { | 687 | if (capable(CAP_SETUID)) { |
| 682 | new->suid = new->uid = uid; | 688 | new->suid = new->uid = uid; |
| 683 | if (uid != old->uid && set_user(new) < 0) { | 689 | if (uid != old->uid) { |
| 684 | retval = -EAGAIN; | 690 | retval = set_user(new); |
| 685 | goto error; | 691 | if (retval < 0) |
| 692 | goto error; | ||
| 686 | } | 693 | } |
| 687 | } else if (uid != old->uid && uid != new->suid) { | 694 | } else if (uid != old->uid && uid != new->suid) { |
| 688 | goto error; | 695 | goto error; |
| @@ -734,11 +741,13 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid) | |||
| 734 | goto error; | 741 | goto error; |
| 735 | } | 742 | } |
| 736 | 743 | ||
| 737 | retval = -EAGAIN; | ||
| 738 | if (ruid != (uid_t) -1) { | 744 | if (ruid != (uid_t) -1) { |
| 739 | new->uid = ruid; | 745 | new->uid = ruid; |
| 740 | if (ruid != old->uid && set_user(new) < 0) | 746 | if (ruid != old->uid) { |
| 741 | goto error; | 747 | retval = set_user(new); |
| 748 | if (retval < 0) | ||
| 749 | goto error; | ||
| 750 | } | ||
| 742 | } | 751 | } |
| 743 | if (euid != (uid_t) -1) | 752 | if (euid != (uid_t) -1) |
| 744 | new->euid = euid; | 753 | new->euid = euid; |
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 790f9d785663..c5ef44ff850f 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
| @@ -101,6 +101,7 @@ static int two = 2; | |||
| 101 | 101 | ||
| 102 | static int zero; | 102 | static int zero; |
| 103 | static int one = 1; | 103 | static int one = 1; |
| 104 | static unsigned long one_ul = 1; | ||
| 104 | static int one_hundred = 100; | 105 | static int one_hundred = 100; |
| 105 | 106 | ||
| 106 | /* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */ | 107 | /* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */ |
| @@ -974,7 +975,7 @@ static struct ctl_table vm_table[] = { | |||
| 974 | .mode = 0644, | 975 | .mode = 0644, |
| 975 | .proc_handler = &dirty_background_bytes_handler, | 976 | .proc_handler = &dirty_background_bytes_handler, |
| 976 | .strategy = &sysctl_intvec, | 977 | .strategy = &sysctl_intvec, |
| 977 | .extra1 = &one, | 978 | .extra1 = &one_ul, |
| 978 | }, | 979 | }, |
| 979 | { | 980 | { |
| 980 | .ctl_name = VM_DIRTY_RATIO, | 981 | .ctl_name = VM_DIRTY_RATIO, |
| @@ -995,7 +996,7 @@ static struct ctl_table vm_table[] = { | |||
| 995 | .mode = 0644, | 996 | .mode = 0644, |
| 996 | .proc_handler = &dirty_bytes_handler, | 997 | .proc_handler = &dirty_bytes_handler, |
| 997 | .strategy = &sysctl_intvec, | 998 | .strategy = &sysctl_intvec, |
| 998 | .extra1 = &one, | 999 | .extra1 = &one_ul, |
| 999 | }, | 1000 | }, |
| 1000 | { | 1001 | { |
| 1001 | .procname = "dirty_writeback_centisecs", | 1002 | .procname = "dirty_writeback_centisecs", |
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index e2a4ff6fc3a6..34e707e5ab87 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
| @@ -52,6 +52,7 @@ config FUNCTION_TRACER | |||
| 52 | depends on HAVE_FUNCTION_TRACER | 52 | depends on HAVE_FUNCTION_TRACER |
| 53 | depends on DEBUG_KERNEL | 53 | depends on DEBUG_KERNEL |
| 54 | select FRAME_POINTER | 54 | select FRAME_POINTER |
| 55 | select KALLSYMS | ||
| 55 | select TRACING | 56 | select TRACING |
| 56 | select CONTEXT_SWITCH_TRACER | 57 | select CONTEXT_SWITCH_TRACER |
| 57 | help | 58 | help |
| @@ -238,6 +239,7 @@ config STACK_TRACER | |||
| 238 | depends on DEBUG_KERNEL | 239 | depends on DEBUG_KERNEL |
| 239 | select FUNCTION_TRACER | 240 | select FUNCTION_TRACER |
| 240 | select STACKTRACE | 241 | select STACKTRACE |
| 242 | select KALLSYMS | ||
| 241 | help | 243 | help |
| 242 | This special tracer records the maximum stack footprint of the | 244 | This special tracer records the maximum stack footprint of the |
| 243 | kernel and displays it in debugfs/tracing/stack_trace. | 245 | kernel and displays it in debugfs/tracing/stack_trace. |
| @@ -302,4 +304,27 @@ config FTRACE_STARTUP_TEST | |||
| 302 | functioning properly. It will do tests on all the configured | 304 | functioning properly. It will do tests on all the configured |
| 303 | tracers of ftrace. | 305 | tracers of ftrace. |
| 304 | 306 | ||
| 307 | config MMIOTRACE | ||
| 308 | bool "Memory mapped IO tracing" | ||
| 309 | depends on HAVE_MMIOTRACE_SUPPORT && DEBUG_KERNEL && PCI | ||
| 310 | select TRACING | ||
| 311 | help | ||
| 312 | Mmiotrace traces Memory Mapped I/O access and is meant for | ||
| 313 | debugging and reverse engineering. It is called from the ioremap | ||
| 314 | implementation and works via page faults. Tracing is disabled by | ||
| 315 | default and can be enabled at run-time. | ||
| 316 | |||
| 317 | See Documentation/tracers/mmiotrace.txt. | ||
| 318 | If you are not helping to develop drivers, say N. | ||
| 319 | |||
| 320 | config MMIOTRACE_TEST | ||
| 321 | tristate "Test module for mmiotrace" | ||
| 322 | depends on MMIOTRACE && m | ||
| 323 | help | ||
| 324 | This is a dumb module for testing mmiotrace. It is very dangerous | ||
| 325 | as it will write garbage to IO memory starting at a given address. | ||
| 326 | However, it should be safe to use on e.g. unused portion of VRAM. | ||
| 327 | |||
| 328 | Say N, unless you absolutely know what you are doing. | ||
| 329 | |||
| 305 | endmenu | 330 | endmenu |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 9a236ffe2aa4..fdf913dfc7e8 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
| @@ -2033,7 +2033,7 @@ free: | |||
| 2033 | static int start_graph_tracing(void) | 2033 | static int start_graph_tracing(void) |
| 2034 | { | 2034 | { |
| 2035 | struct ftrace_ret_stack **ret_stack_list; | 2035 | struct ftrace_ret_stack **ret_stack_list; |
| 2036 | int ret; | 2036 | int ret, cpu; |
| 2037 | 2037 | ||
| 2038 | ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE * | 2038 | ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE * |
| 2039 | sizeof(struct ftrace_ret_stack *), | 2039 | sizeof(struct ftrace_ret_stack *), |
| @@ -2042,6 +2042,10 @@ static int start_graph_tracing(void) | |||
| 2042 | if (!ret_stack_list) | 2042 | if (!ret_stack_list) |
| 2043 | return -ENOMEM; | 2043 | return -ENOMEM; |
| 2044 | 2044 | ||
| 2045 | /* The cpu_boot init_task->ret_stack will never be freed */ | ||
| 2046 | for_each_online_cpu(cpu) | ||
| 2047 | ftrace_graph_init_task(idle_task(cpu)); | ||
| 2048 | |||
| 2045 | do { | 2049 | do { |
| 2046 | ret = alloc_retstack_tasklist(ret_stack_list); | 2050 | ret = alloc_retstack_tasklist(ret_stack_list); |
| 2047 | } while (ret == -EAGAIN); | 2051 | } while (ret == -EAGAIN); |
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c index fffcb069f1dc..80e503ef6136 100644 --- a/kernel/trace/trace_mmiotrace.c +++ b/kernel/trace/trace_mmiotrace.c | |||
| @@ -9,6 +9,7 @@ | |||
| 9 | #include <linux/kernel.h> | 9 | #include <linux/kernel.h> |
| 10 | #include <linux/mmiotrace.h> | 10 | #include <linux/mmiotrace.h> |
| 11 | #include <linux/pci.h> | 11 | #include <linux/pci.h> |
| 12 | #include <asm/atomic.h> | ||
| 12 | 13 | ||
| 13 | #include "trace.h" | 14 | #include "trace.h" |
| 14 | 15 | ||
| @@ -19,6 +20,7 @@ struct header_iter { | |||
| 19 | static struct trace_array *mmio_trace_array; | 20 | static struct trace_array *mmio_trace_array; |
| 20 | static bool overrun_detected; | 21 | static bool overrun_detected; |
| 21 | static unsigned long prev_overruns; | 22 | static unsigned long prev_overruns; |
| 23 | static atomic_t dropped_count; | ||
| 22 | 24 | ||
| 23 | static void mmio_reset_data(struct trace_array *tr) | 25 | static void mmio_reset_data(struct trace_array *tr) |
| 24 | { | 26 | { |
| @@ -121,11 +123,11 @@ static void mmio_close(struct trace_iterator *iter) | |||
| 121 | 123 | ||
| 122 | static unsigned long count_overruns(struct trace_iterator *iter) | 124 | static unsigned long count_overruns(struct trace_iterator *iter) |
| 123 | { | 125 | { |
| 124 | unsigned long cnt = 0; | 126 | unsigned long cnt = atomic_xchg(&dropped_count, 0); |
| 125 | unsigned long over = ring_buffer_overruns(iter->tr->buffer); | 127 | unsigned long over = ring_buffer_overruns(iter->tr->buffer); |
| 126 | 128 | ||
| 127 | if (over > prev_overruns) | 129 | if (over > prev_overruns) |
| 128 | cnt = over - prev_overruns; | 130 | cnt += over - prev_overruns; |
| 129 | prev_overruns = over; | 131 | prev_overruns = over; |
| 130 | return cnt; | 132 | return cnt; |
| 131 | } | 133 | } |
| @@ -310,8 +312,10 @@ static void __trace_mmiotrace_rw(struct trace_array *tr, | |||
| 310 | 312 | ||
| 311 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | 313 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), |
| 312 | &irq_flags); | 314 | &irq_flags); |
| 313 | if (!event) | 315 | if (!event) { |
| 316 | atomic_inc(&dropped_count); | ||
| 314 | return; | 317 | return; |
| 318 | } | ||
| 315 | entry = ring_buffer_event_data(event); | 319 | entry = ring_buffer_event_data(event); |
| 316 | tracing_generic_entry_update(&entry->ent, 0, preempt_count()); | 320 | tracing_generic_entry_update(&entry->ent, 0, preempt_count()); |
| 317 | entry->ent.type = TRACE_MMIO_RW; | 321 | entry->ent.type = TRACE_MMIO_RW; |
| @@ -338,8 +342,10 @@ static void __trace_mmiotrace_map(struct trace_array *tr, | |||
| 338 | 342 | ||
| 339 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | 343 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), |
| 340 | &irq_flags); | 344 | &irq_flags); |
| 341 | if (!event) | 345 | if (!event) { |
| 346 | atomic_inc(&dropped_count); | ||
| 342 | return; | 347 | return; |
| 348 | } | ||
| 343 | entry = ring_buffer_event_data(event); | 349 | entry = ring_buffer_event_data(event); |
| 344 | tracing_generic_entry_update(&entry->ent, 0, preempt_count()); | 350 | tracing_generic_entry_update(&entry->ent, 0, preempt_count()); |
| 345 | entry->ent.type = TRACE_MMIO_MAP; | 351 | entry->ent.type = TRACE_MMIO_MAP; |
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index 88c8eb70f54a..bc8e80a86bca 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c | |||
| @@ -23,10 +23,20 @@ static int trace_test_buffer_cpu(struct trace_array *tr, int cpu) | |||
| 23 | { | 23 | { |
| 24 | struct ring_buffer_event *event; | 24 | struct ring_buffer_event *event; |
| 25 | struct trace_entry *entry; | 25 | struct trace_entry *entry; |
| 26 | unsigned int loops = 0; | ||
| 26 | 27 | ||
| 27 | while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) { | 28 | while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) { |
| 28 | entry = ring_buffer_event_data(event); | 29 | entry = ring_buffer_event_data(event); |
| 29 | 30 | ||
| 31 | /* | ||
| 32 | * The ring buffer is a size of trace_buf_size, if | ||
| 33 | * we loop more than the size, there's something wrong | ||
| 34 | * with the ring buffer. | ||
| 35 | */ | ||
| 36 | if (loops++ > trace_buf_size) { | ||
| 37 | printk(KERN_CONT ".. bad ring buffer "); | ||
| 38 | goto failed; | ||
| 39 | } | ||
| 30 | if (!trace_valid_entry(entry)) { | 40 | if (!trace_valid_entry(entry)) { |
| 31 | printk(KERN_CONT ".. invalid entry %d ", | 41 | printk(KERN_CONT ".. invalid entry %d ", |
| 32 | entry->type); | 42 | entry->type); |
| @@ -57,11 +67,20 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count) | |||
| 57 | 67 | ||
| 58 | cnt = ring_buffer_entries(tr->buffer); | 68 | cnt = ring_buffer_entries(tr->buffer); |
| 59 | 69 | ||
| 70 | /* | ||
| 71 | * The trace_test_buffer_cpu runs a while loop to consume all data. | ||
| 72 | * If the calling tracer is broken, and is constantly filling | ||
| 73 | * the buffer, this will run forever, and hard lock the box. | ||
| 74 | * We disable the ring buffer while we do this test to prevent | ||
| 75 | * a hard lock up. | ||
| 76 | */ | ||
| 77 | tracing_off(); | ||
| 60 | for_each_possible_cpu(cpu) { | 78 | for_each_possible_cpu(cpu) { |
| 61 | ret = trace_test_buffer_cpu(tr, cpu); | 79 | ret = trace_test_buffer_cpu(tr, cpu); |
| 62 | if (ret) | 80 | if (ret) |
| 63 | break; | 81 | break; |
| 64 | } | 82 | } |
| 83 | tracing_on(); | ||
| 65 | __raw_spin_unlock(&ftrace_max_lock); | 84 | __raw_spin_unlock(&ftrace_max_lock); |
| 66 | local_irq_restore(flags); | 85 | local_irq_restore(flags); |
| 67 | 86 | ||
diff --git a/kernel/tsacct.c b/kernel/tsacct.c index 43f891b05a4b..00d59d048edf 100644 --- a/kernel/tsacct.c +++ b/kernel/tsacct.c | |||
| @@ -122,8 +122,10 @@ void acct_update_integrals(struct task_struct *tsk) | |||
| 122 | if (likely(tsk->mm)) { | 122 | if (likely(tsk->mm)) { |
| 123 | cputime_t time, dtime; | 123 | cputime_t time, dtime; |
| 124 | struct timeval value; | 124 | struct timeval value; |
| 125 | unsigned long flags; | ||
| 125 | u64 delta; | 126 | u64 delta; |
| 126 | 127 | ||
| 128 | local_irq_save(flags); | ||
| 127 | time = tsk->stime + tsk->utime; | 129 | time = tsk->stime + tsk->utime; |
| 128 | dtime = cputime_sub(time, tsk->acct_timexpd); | 130 | dtime = cputime_sub(time, tsk->acct_timexpd); |
| 129 | jiffies_to_timeval(cputime_to_jiffies(dtime), &value); | 131 | jiffies_to_timeval(cputime_to_jiffies(dtime), &value); |
| @@ -131,10 +133,12 @@ void acct_update_integrals(struct task_struct *tsk) | |||
| 131 | delta = delta * USEC_PER_SEC + value.tv_usec; | 133 | delta = delta * USEC_PER_SEC + value.tv_usec; |
| 132 | 134 | ||
| 133 | if (delta == 0) | 135 | if (delta == 0) |
| 134 | return; | 136 | goto out; |
| 135 | tsk->acct_timexpd = time; | 137 | tsk->acct_timexpd = time; |
| 136 | tsk->acct_rss_mem1 += delta * get_mm_rss(tsk->mm); | 138 | tsk->acct_rss_mem1 += delta * get_mm_rss(tsk->mm); |
| 137 | tsk->acct_vm_mem1 += delta * tsk->mm->total_vm; | 139 | tsk->acct_vm_mem1 += delta * tsk->mm->total_vm; |
| 140 | out: | ||
| 141 | local_irq_restore(flags); | ||
| 138 | } | 142 | } |
| 139 | } | 143 | } |
| 140 | 144 | ||
diff --git a/kernel/user.c b/kernel/user.c index 477b6660f447..fbb300e6191f 100644 --- a/kernel/user.c +++ b/kernel/user.c | |||
| @@ -72,6 +72,7 @@ static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent) | |||
| 72 | static void uid_hash_remove(struct user_struct *up) | 72 | static void uid_hash_remove(struct user_struct *up) |
| 73 | { | 73 | { |
| 74 | hlist_del_init(&up->uidhash_node); | 74 | hlist_del_init(&up->uidhash_node); |
| 75 | put_user_ns(up->user_ns); | ||
| 75 | } | 76 | } |
| 76 | 77 | ||
| 77 | static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent) | 78 | static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent) |
| @@ -285,14 +286,12 @@ int __init uids_sysfs_init(void) | |||
| 285 | /* work function to remove sysfs directory for a user and free up | 286 | /* work function to remove sysfs directory for a user and free up |
| 286 | * corresponding structures. | 287 | * corresponding structures. |
| 287 | */ | 288 | */ |
| 288 | static void remove_user_sysfs_dir(struct work_struct *w) | 289 | static void cleanup_user_struct(struct work_struct *w) |
| 289 | { | 290 | { |
| 290 | struct user_struct *up = container_of(w, struct user_struct, work); | 291 | struct user_struct *up = container_of(w, struct user_struct, work); |
| 291 | unsigned long flags; | 292 | unsigned long flags; |
| 292 | int remove_user = 0; | 293 | int remove_user = 0; |
| 293 | 294 | ||
| 294 | if (up->user_ns != &init_user_ns) | ||
| 295 | return; | ||
| 296 | /* Make uid_hash_remove() + sysfs_remove_file() + kobject_del() | 295 | /* Make uid_hash_remove() + sysfs_remove_file() + kobject_del() |
| 297 | * atomic. | 296 | * atomic. |
| 298 | */ | 297 | */ |
| @@ -311,9 +310,11 @@ static void remove_user_sysfs_dir(struct work_struct *w) | |||
| 311 | if (!remove_user) | 310 | if (!remove_user) |
| 312 | goto done; | 311 | goto done; |
| 313 | 312 | ||
| 314 | kobject_uevent(&up->kobj, KOBJ_REMOVE); | 313 | if (up->user_ns == &init_user_ns) { |
| 315 | kobject_del(&up->kobj); | 314 | kobject_uevent(&up->kobj, KOBJ_REMOVE); |
| 316 | kobject_put(&up->kobj); | 315 | kobject_del(&up->kobj); |
| 316 | kobject_put(&up->kobj); | ||
| 317 | } | ||
| 317 | 318 | ||
| 318 | sched_destroy_user(up); | 319 | sched_destroy_user(up); |
| 319 | key_put(up->uid_keyring); | 320 | key_put(up->uid_keyring); |
| @@ -334,8 +335,7 @@ static void free_user(struct user_struct *up, unsigned long flags) | |||
| 334 | atomic_inc(&up->__count); | 335 | atomic_inc(&up->__count); |
| 335 | spin_unlock_irqrestore(&uidhash_lock, flags); | 336 | spin_unlock_irqrestore(&uidhash_lock, flags); |
| 336 | 337 | ||
| 337 | put_user_ns(up->user_ns); | 338 | INIT_WORK(&up->work, cleanup_user_struct); |
| 338 | INIT_WORK(&up->work, remove_user_sysfs_dir); | ||
| 339 | schedule_work(&up->work); | 339 | schedule_work(&up->work); |
| 340 | } | 340 | } |
| 341 | 341 | ||
| @@ -357,12 +357,29 @@ static void free_user(struct user_struct *up, unsigned long flags) | |||
| 357 | sched_destroy_user(up); | 357 | sched_destroy_user(up); |
| 358 | key_put(up->uid_keyring); | 358 | key_put(up->uid_keyring); |
| 359 | key_put(up->session_keyring); | 359 | key_put(up->session_keyring); |
| 360 | put_user_ns(up->user_ns); | ||
| 361 | kmem_cache_free(uid_cachep, up); | 360 | kmem_cache_free(uid_cachep, up); |
| 362 | } | 361 | } |
| 363 | 362 | ||
| 364 | #endif | 363 | #endif |
| 365 | 364 | ||
| 365 | #if defined(CONFIG_RT_GROUP_SCHED) && defined(CONFIG_USER_SCHED) | ||
| 366 | /* | ||
| 367 | * We need to check if a setuid can take place. This function should be called | ||
| 368 | * before successfully completing the setuid. | ||
| 369 | */ | ||
| 370 | int task_can_switch_user(struct user_struct *up, struct task_struct *tsk) | ||
| 371 | { | ||
| 372 | |||
| 373 | return sched_rt_can_attach(up->tg, tsk); | ||
| 374 | |||
| 375 | } | ||
| 376 | #else | ||
| 377 | int task_can_switch_user(struct user_struct *up, struct task_struct *tsk) | ||
| 378 | { | ||
| 379 | return 1; | ||
| 380 | } | ||
| 381 | #endif | ||
| 382 | |||
| 366 | /* | 383 | /* |
| 367 | * Locate the user_struct for the passed UID. If found, take a ref on it. The | 384 | * Locate the user_struct for the passed UID. If found, take a ref on it. The |
| 368 | * caller must undo that ref with free_uid(). | 385 | * caller must undo that ref with free_uid(). |
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c index 79084311ee57..076c7c8215b0 100644 --- a/kernel/user_namespace.c +++ b/kernel/user_namespace.c | |||
| @@ -60,12 +60,25 @@ int create_user_ns(struct cred *new) | |||
| 60 | return 0; | 60 | return 0; |
| 61 | } | 61 | } |
| 62 | 62 | ||
| 63 | void free_user_ns(struct kref *kref) | 63 | /* |
| 64 | * Deferred destructor for a user namespace. This is required because | ||
| 65 | * free_user_ns() may be called with uidhash_lock held, but we need to call | ||
| 66 | * back to free_uid() which will want to take the lock again. | ||
| 67 | */ | ||
| 68 | static void free_user_ns_work(struct work_struct *work) | ||
| 64 | { | 69 | { |
| 65 | struct user_namespace *ns; | 70 | struct user_namespace *ns = |
| 66 | 71 | container_of(work, struct user_namespace, destroyer); | |
| 67 | ns = container_of(kref, struct user_namespace, kref); | ||
| 68 | free_uid(ns->creator); | 72 | free_uid(ns->creator); |
| 69 | kfree(ns); | 73 | kfree(ns); |
| 70 | } | 74 | } |
| 75 | |||
| 76 | void free_user_ns(struct kref *kref) | ||
| 77 | { | ||
| 78 | struct user_namespace *ns = | ||
| 79 | container_of(kref, struct user_namespace, kref); | ||
| 80 | |||
| 81 | INIT_WORK(&ns->destroyer, free_user_ns_work); | ||
| 82 | schedule_work(&ns->destroyer); | ||
| 83 | } | ||
| 71 | EXPORT_SYMBOL(free_user_ns); | 84 | EXPORT_SYMBOL(free_user_ns); |
