diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/audit.c | 67 | ||||
| -rw-r--r-- | kernel/audit.h | 5 | ||||
| -rw-r--r-- | kernel/audit_tree.c | 9 | ||||
| -rw-r--r-- | kernel/audit_watch.c | 4 | ||||
| -rw-r--r-- | kernel/auditfilter.c | 12 | ||||
| -rw-r--r-- | kernel/auditsc.c | 16 | ||||
| -rw-r--r-- | kernel/cgroup.c | 11 | ||||
| -rw-r--r-- | kernel/cpuset.c | 13 | ||||
| -rw-r--r-- | kernel/debug/debug_core.c | 16 | ||||
| -rw-r--r-- | kernel/debug/kdb/kdb_main.c | 48 | ||||
| -rw-r--r-- | kernel/resource.c | 151 | ||||
| -rw-r--r-- | kernel/sched.c | 8 | ||||
| -rw-r--r-- | kernel/sched_fair.c | 25 | ||||
| -rw-r--r-- | kernel/sched_stats.h | 20 | 
14 files changed, 253 insertions, 152 deletions
| diff --git a/kernel/audit.c b/kernel/audit.c index d96045789b54..77770a034d59 100644 --- a/kernel/audit.c +++ b/kernel/audit.c | |||
| @@ -467,23 +467,16 @@ static int audit_prepare_user_tty(pid_t pid, uid_t loginuid, u32 sessionid) | |||
| 467 | struct task_struct *tsk; | 467 | struct task_struct *tsk; | 
| 468 | int err; | 468 | int err; | 
| 469 | 469 | ||
| 470 | read_lock(&tasklist_lock); | 470 | rcu_read_lock(); | 
| 471 | tsk = find_task_by_vpid(pid); | 471 | tsk = find_task_by_vpid(pid); | 
| 472 | err = -ESRCH; | 472 | if (!tsk) { | 
| 473 | if (!tsk) | 473 | rcu_read_unlock(); | 
| 474 | goto out; | 474 | return -ESRCH; | 
| 475 | err = 0; | 475 | } | 
| 476 | 476 | get_task_struct(tsk); | |
| 477 | spin_lock_irq(&tsk->sighand->siglock); | 477 | rcu_read_unlock(); | 
| 478 | if (!tsk->signal->audit_tty) | 478 | err = tty_audit_push_task(tsk, loginuid, sessionid); | 
| 479 | err = -EPERM; | 479 | put_task_struct(tsk); | 
| 480 | spin_unlock_irq(&tsk->sighand->siglock); | ||
| 481 | if (err) | ||
| 482 | goto out; | ||
| 483 | |||
| 484 | tty_audit_push_task(tsk, loginuid, sessionid); | ||
| 485 | out: | ||
| 486 | read_unlock(&tasklist_lock); | ||
| 487 | return err; | 480 | return err; | 
| 488 | } | 481 | } | 
| 489 | 482 | ||
| @@ -506,7 +499,7 @@ int audit_send_list(void *_dest) | |||
| 506 | } | 499 | } | 
| 507 | 500 | ||
| 508 | struct sk_buff *audit_make_reply(int pid, int seq, int type, int done, | 501 | struct sk_buff *audit_make_reply(int pid, int seq, int type, int done, | 
| 509 | int multi, void *payload, int size) | 502 | int multi, const void *payload, int size) | 
| 510 | { | 503 | { | 
| 511 | struct sk_buff *skb; | 504 | struct sk_buff *skb; | 
| 512 | struct nlmsghdr *nlh; | 505 | struct nlmsghdr *nlh; | 
| @@ -555,8 +548,8 @@ static int audit_send_reply_thread(void *arg) | |||
| 555 | * Allocates an skb, builds the netlink message, and sends it to the pid. | 548 | * Allocates an skb, builds the netlink message, and sends it to the pid. | 
| 556 | * No failure notifications. | 549 | * No failure notifications. | 
| 557 | */ | 550 | */ | 
| 558 | void audit_send_reply(int pid, int seq, int type, int done, int multi, | 551 | static void audit_send_reply(int pid, int seq, int type, int done, int multi, | 
| 559 | void *payload, int size) | 552 | const void *payload, int size) | 
| 560 | { | 553 | { | 
| 561 | struct sk_buff *skb; | 554 | struct sk_buff *skb; | 
| 562 | struct task_struct *tsk; | 555 | struct task_struct *tsk; | 
| @@ -880,40 +873,40 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
| 880 | case AUDIT_TTY_GET: { | 873 | case AUDIT_TTY_GET: { | 
| 881 | struct audit_tty_status s; | 874 | struct audit_tty_status s; | 
| 882 | struct task_struct *tsk; | 875 | struct task_struct *tsk; | 
| 876 | unsigned long flags; | ||
| 883 | 877 | ||
| 884 | read_lock(&tasklist_lock); | 878 | rcu_read_lock(); | 
| 885 | tsk = find_task_by_vpid(pid); | 879 | tsk = find_task_by_vpid(pid); | 
| 886 | if (!tsk) | 880 | if (tsk && lock_task_sighand(tsk, &flags)) { | 
| 887 | err = -ESRCH; | ||
| 888 | else { | ||
| 889 | spin_lock_irq(&tsk->sighand->siglock); | ||
| 890 | s.enabled = tsk->signal->audit_tty != 0; | 881 | s.enabled = tsk->signal->audit_tty != 0; | 
| 891 | spin_unlock_irq(&tsk->sighand->siglock); | 882 | unlock_task_sighand(tsk, &flags); | 
| 892 | } | 883 | } else | 
| 893 | read_unlock(&tasklist_lock); | 884 | err = -ESRCH; | 
| 894 | audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_TTY_GET, 0, 0, | 885 | rcu_read_unlock(); | 
| 895 | &s, sizeof(s)); | 886 | |
| 887 | if (!err) | ||
| 888 | audit_send_reply(NETLINK_CB(skb).pid, seq, | ||
| 889 | AUDIT_TTY_GET, 0, 0, &s, sizeof(s)); | ||
| 896 | break; | 890 | break; | 
| 897 | } | 891 | } | 
| 898 | case AUDIT_TTY_SET: { | 892 | case AUDIT_TTY_SET: { | 
| 899 | struct audit_tty_status *s; | 893 | struct audit_tty_status *s; | 
| 900 | struct task_struct *tsk; | 894 | struct task_struct *tsk; | 
| 895 | unsigned long flags; | ||
| 901 | 896 | ||
| 902 | if (nlh->nlmsg_len < sizeof(struct audit_tty_status)) | 897 | if (nlh->nlmsg_len < sizeof(struct audit_tty_status)) | 
| 903 | return -EINVAL; | 898 | return -EINVAL; | 
| 904 | s = data; | 899 | s = data; | 
| 905 | if (s->enabled != 0 && s->enabled != 1) | 900 | if (s->enabled != 0 && s->enabled != 1) | 
| 906 | return -EINVAL; | 901 | return -EINVAL; | 
| 907 | read_lock(&tasklist_lock); | 902 | rcu_read_lock(); | 
| 908 | tsk = find_task_by_vpid(pid); | 903 | tsk = find_task_by_vpid(pid); | 
| 909 | if (!tsk) | 904 | if (tsk && lock_task_sighand(tsk, &flags)) { | 
| 910 | err = -ESRCH; | ||
| 911 | else { | ||
| 912 | spin_lock_irq(&tsk->sighand->siglock); | ||
| 913 | tsk->signal->audit_tty = s->enabled != 0; | 905 | tsk->signal->audit_tty = s->enabled != 0; | 
| 914 | spin_unlock_irq(&tsk->sighand->siglock); | 906 | unlock_task_sighand(tsk, &flags); | 
| 915 | } | 907 | } else | 
| 916 | read_unlock(&tasklist_lock); | 908 | err = -ESRCH; | 
| 909 | rcu_read_unlock(); | ||
| 917 | break; | 910 | break; | 
| 918 | } | 911 | } | 
| 919 | default: | 912 | default: | 
| diff --git a/kernel/audit.h b/kernel/audit.h index f7206db4e13d..91e7071c4d2c 100644 --- a/kernel/audit.h +++ b/kernel/audit.h | |||
| @@ -84,10 +84,7 @@ extern int audit_compare_dname_path(const char *dname, const char *path, | |||
| 84 | int *dirlen); | 84 | int *dirlen); | 
| 85 | extern struct sk_buff * audit_make_reply(int pid, int seq, int type, | 85 | extern struct sk_buff * audit_make_reply(int pid, int seq, int type, | 
| 86 | int done, int multi, | 86 | int done, int multi, | 
| 87 | void *payload, int size); | 87 | const void *payload, int size); | 
| 88 | extern void audit_send_reply(int pid, int seq, int type, | ||
| 89 | int done, int multi, | ||
| 90 | void *payload, int size); | ||
| 91 | extern void audit_panic(const char *message); | 88 | extern void audit_panic(const char *message); | 
| 92 | 89 | ||
| 93 | struct audit_netlink_list { | 90 | struct audit_netlink_list { | 
| diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c index 7f18d3a4527e..37b2bea170c8 100644 --- a/kernel/audit_tree.c +++ b/kernel/audit_tree.c | |||
| @@ -223,7 +223,7 @@ static void untag_chunk(struct node *p) | |||
| 223 | { | 223 | { | 
| 224 | struct audit_chunk *chunk = find_chunk(p); | 224 | struct audit_chunk *chunk = find_chunk(p); | 
| 225 | struct fsnotify_mark *entry = &chunk->mark; | 225 | struct fsnotify_mark *entry = &chunk->mark; | 
| 226 | struct audit_chunk *new; | 226 | struct audit_chunk *new = NULL; | 
| 227 | struct audit_tree *owner; | 227 | struct audit_tree *owner; | 
| 228 | int size = chunk->count - 1; | 228 | int size = chunk->count - 1; | 
| 229 | int i, j; | 229 | int i, j; | 
| @@ -232,9 +232,14 @@ static void untag_chunk(struct node *p) | |||
| 232 | 232 | ||
| 233 | spin_unlock(&hash_lock); | 233 | spin_unlock(&hash_lock); | 
| 234 | 234 | ||
| 235 | if (size) | ||
| 236 | new = alloc_chunk(size); | ||
| 237 | |||
| 235 | spin_lock(&entry->lock); | 238 | spin_lock(&entry->lock); | 
| 236 | if (chunk->dead || !entry->i.inode) { | 239 | if (chunk->dead || !entry->i.inode) { | 
| 237 | spin_unlock(&entry->lock); | 240 | spin_unlock(&entry->lock); | 
| 241 | if (new) | ||
| 242 | free_chunk(new); | ||
| 238 | goto out; | 243 | goto out; | 
| 239 | } | 244 | } | 
| 240 | 245 | ||
| @@ -255,9 +260,9 @@ static void untag_chunk(struct node *p) | |||
| 255 | goto out; | 260 | goto out; | 
| 256 | } | 261 | } | 
| 257 | 262 | ||
| 258 | new = alloc_chunk(size); | ||
| 259 | if (!new) | 263 | if (!new) | 
| 260 | goto Fallback; | 264 | goto Fallback; | 
| 265 | |||
| 261 | fsnotify_duplicate_mark(&new->mark, entry); | 266 | fsnotify_duplicate_mark(&new->mark, entry); | 
| 262 | if (fsnotify_add_mark(&new->mark, new->mark.group, new->mark.i.inode, NULL, 1)) { | 267 | if (fsnotify_add_mark(&new->mark, new->mark.group, new->mark.i.inode, NULL, 1)) { | 
| 263 | free_chunk(new); | 268 | free_chunk(new); | 
| diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c index f0c9b2e7542d..d2e3c7866460 100644 --- a/kernel/audit_watch.c +++ b/kernel/audit_watch.c | |||
| @@ -60,7 +60,7 @@ struct audit_parent { | |||
| 60 | }; | 60 | }; | 
| 61 | 61 | ||
| 62 | /* fsnotify handle. */ | 62 | /* fsnotify handle. */ | 
| 63 | struct fsnotify_group *audit_watch_group; | 63 | static struct fsnotify_group *audit_watch_group; | 
| 64 | 64 | ||
| 65 | /* fsnotify events we care about. */ | 65 | /* fsnotify events we care about. */ | 
| 66 | #define AUDIT_FS_WATCH (FS_MOVE | FS_CREATE | FS_DELETE | FS_DELETE_SELF |\ | 66 | #define AUDIT_FS_WATCH (FS_MOVE | FS_CREATE | FS_DELETE | FS_DELETE_SELF |\ | 
| @@ -123,7 +123,7 @@ void audit_put_watch(struct audit_watch *watch) | |||
| 123 | } | 123 | } | 
| 124 | } | 124 | } | 
| 125 | 125 | ||
| 126 | void audit_remove_watch(struct audit_watch *watch) | 126 | static void audit_remove_watch(struct audit_watch *watch) | 
| 127 | { | 127 | { | 
| 128 | list_del(&watch->wlist); | 128 | list_del(&watch->wlist); | 
| 129 | audit_put_parent(watch->parent); | 129 | audit_put_parent(watch->parent); | 
| diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c index eb7675499fb5..add2819af71b 100644 --- a/kernel/auditfilter.c +++ b/kernel/auditfilter.c | |||
| @@ -1252,6 +1252,18 @@ static int audit_filter_user_rules(struct netlink_skb_parms *cb, | |||
| 1252 | case AUDIT_LOGINUID: | 1252 | case AUDIT_LOGINUID: | 
| 1253 | result = audit_comparator(cb->loginuid, f->op, f->val); | 1253 | result = audit_comparator(cb->loginuid, f->op, f->val); | 
| 1254 | break; | 1254 | break; | 
| 1255 | case AUDIT_SUBJ_USER: | ||
| 1256 | case AUDIT_SUBJ_ROLE: | ||
| 1257 | case AUDIT_SUBJ_TYPE: | ||
| 1258 | case AUDIT_SUBJ_SEN: | ||
| 1259 | case AUDIT_SUBJ_CLR: | ||
| 1260 | if (f->lsm_rule) | ||
| 1261 | result = security_audit_rule_match(cb->sid, | ||
| 1262 | f->type, | ||
| 1263 | f->op, | ||
| 1264 | f->lsm_rule, | ||
| 1265 | NULL); | ||
| 1266 | break; | ||
| 1255 | } | 1267 | } | 
| 1256 | 1268 | ||
| 1257 | if (!result) | 1269 | if (!result) | 
| diff --git a/kernel/auditsc.c b/kernel/auditsc.c index 1b31c130d034..f49a0318c2ed 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c | |||
| @@ -241,6 +241,10 @@ struct audit_context { | |||
| 241 | pid_t pid; | 241 | pid_t pid; | 
| 242 | struct audit_cap_data cap; | 242 | struct audit_cap_data cap; | 
| 243 | } capset; | 243 | } capset; | 
| 244 | struct { | ||
| 245 | int fd; | ||
| 246 | int flags; | ||
| 247 | } mmap; | ||
| 244 | }; | 248 | }; | 
| 245 | int fds[2]; | 249 | int fds[2]; | 
| 246 | 250 | ||
| @@ -1305,6 +1309,10 @@ static void show_special(struct audit_context *context, int *call_panic) | |||
| 1305 | audit_log_cap(ab, "cap_pp", &context->capset.cap.permitted); | 1309 | audit_log_cap(ab, "cap_pp", &context->capset.cap.permitted); | 
| 1306 | audit_log_cap(ab, "cap_pe", &context->capset.cap.effective); | 1310 | audit_log_cap(ab, "cap_pe", &context->capset.cap.effective); | 
| 1307 | break; } | 1311 | break; } | 
| 1312 | case AUDIT_MMAP: { | ||
| 1313 | audit_log_format(ab, "fd=%d flags=0x%x", context->mmap.fd, | ||
| 1314 | context->mmap.flags); | ||
| 1315 | break; } | ||
| 1308 | } | 1316 | } | 
| 1309 | audit_log_end(ab); | 1317 | audit_log_end(ab); | 
| 1310 | } | 1318 | } | 
| @@ -2476,6 +2484,14 @@ void __audit_log_capset(pid_t pid, | |||
| 2476 | context->type = AUDIT_CAPSET; | 2484 | context->type = AUDIT_CAPSET; | 
| 2477 | } | 2485 | } | 
| 2478 | 2486 | ||
| 2487 | void __audit_mmap_fd(int fd, int flags) | ||
| 2488 | { | ||
| 2489 | struct audit_context *context = current->audit_context; | ||
| 2490 | context->mmap.fd = fd; | ||
| 2491 | context->mmap.flags = flags; | ||
| 2492 | context->type = AUDIT_MMAP; | ||
| 2493 | } | ||
| 2494 | |||
| 2479 | /** | 2495 | /** | 
| 2480 | * audit_core_dumps - record information about processes that end abnormally | 2496 | * audit_core_dumps - record information about processes that end abnormally | 
| 2481 | * @signr: signal value | 2497 | * @signr: signal value | 
| diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 5cf366965d0c..66a416b42c18 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
| @@ -1460,9 +1460,9 @@ static int cgroup_get_rootdir(struct super_block *sb) | |||
| 1460 | return 0; | 1460 | return 0; | 
| 1461 | } | 1461 | } | 
| 1462 | 1462 | ||
| 1463 | static int cgroup_get_sb(struct file_system_type *fs_type, | 1463 | static struct dentry *cgroup_mount(struct file_system_type *fs_type, | 
| 1464 | int flags, const char *unused_dev_name, | 1464 | int flags, const char *unused_dev_name, | 
| 1465 | void *data, struct vfsmount *mnt) | 1465 | void *data) | 
| 1466 | { | 1466 | { | 
| 1467 | struct cgroup_sb_opts opts; | 1467 | struct cgroup_sb_opts opts; | 
| 1468 | struct cgroupfs_root *root; | 1468 | struct cgroupfs_root *root; | 
| @@ -1596,10 +1596,9 @@ static int cgroup_get_sb(struct file_system_type *fs_type, | |||
| 1596 | drop_parsed_module_refcounts(opts.subsys_bits); | 1596 | drop_parsed_module_refcounts(opts.subsys_bits); | 
| 1597 | } | 1597 | } | 
| 1598 | 1598 | ||
| 1599 | simple_set_mnt(mnt, sb); | ||
| 1600 | kfree(opts.release_agent); | 1599 | kfree(opts.release_agent); | 
| 1601 | kfree(opts.name); | 1600 | kfree(opts.name); | 
| 1602 | return 0; | 1601 | return dget(sb->s_root); | 
| 1603 | 1602 | ||
| 1604 | drop_new_super: | 1603 | drop_new_super: | 
| 1605 | deactivate_locked_super(sb); | 1604 | deactivate_locked_super(sb); | 
| @@ -1608,7 +1607,7 @@ static int cgroup_get_sb(struct file_system_type *fs_type, | |||
| 1608 | out_err: | 1607 | out_err: | 
| 1609 | kfree(opts.release_agent); | 1608 | kfree(opts.release_agent); | 
| 1610 | kfree(opts.name); | 1609 | kfree(opts.name); | 
| 1611 | return ret; | 1610 | return ERR_PTR(ret); | 
| 1612 | } | 1611 | } | 
| 1613 | 1612 | ||
| 1614 | static void cgroup_kill_sb(struct super_block *sb) { | 1613 | static void cgroup_kill_sb(struct super_block *sb) { | 
| @@ -1658,7 +1657,7 @@ static void cgroup_kill_sb(struct super_block *sb) { | |||
| 1658 | 1657 | ||
| 1659 | static struct file_system_type cgroup_fs_type = { | 1658 | static struct file_system_type cgroup_fs_type = { | 
| 1660 | .name = "cgroup", | 1659 | .name = "cgroup", | 
| 1661 | .get_sb = cgroup_get_sb, | 1660 | .mount = cgroup_mount, | 
| 1662 | .kill_sb = cgroup_kill_sb, | 1661 | .kill_sb = cgroup_kill_sb, | 
| 1663 | }; | 1662 | }; | 
| 1664 | 1663 | ||
| diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 51b143e2a07a..4349935c2ad8 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
| @@ -231,18 +231,17 @@ static DEFINE_SPINLOCK(cpuset_buffer_lock); | |||
| 231 | * users. If someone tries to mount the "cpuset" filesystem, we | 231 | * users. If someone tries to mount the "cpuset" filesystem, we | 
| 232 | * silently switch it to mount "cgroup" instead | 232 | * silently switch it to mount "cgroup" instead | 
| 233 | */ | 233 | */ | 
| 234 | static int cpuset_get_sb(struct file_system_type *fs_type, | 234 | static struct dentry *cpuset_mount(struct file_system_type *fs_type, | 
| 235 | int flags, const char *unused_dev_name, | 235 | int flags, const char *unused_dev_name, void *data) | 
| 236 | void *data, struct vfsmount *mnt) | ||
| 237 | { | 236 | { | 
| 238 | struct file_system_type *cgroup_fs = get_fs_type("cgroup"); | 237 | struct file_system_type *cgroup_fs = get_fs_type("cgroup"); | 
| 239 | int ret = -ENODEV; | 238 | struct dentry *ret = ERR_PTR(-ENODEV); | 
| 240 | if (cgroup_fs) { | 239 | if (cgroup_fs) { | 
| 241 | char mountopts[] = | 240 | char mountopts[] = | 
| 242 | "cpuset,noprefix," | 241 | "cpuset,noprefix," | 
| 243 | "release_agent=/sbin/cpuset_release_agent"; | 242 | "release_agent=/sbin/cpuset_release_agent"; | 
| 244 | ret = cgroup_fs->get_sb(cgroup_fs, flags, | 243 | ret = cgroup_fs->mount(cgroup_fs, flags, | 
| 245 | unused_dev_name, mountopts, mnt); | 244 | unused_dev_name, mountopts); | 
| 246 | put_filesystem(cgroup_fs); | 245 | put_filesystem(cgroup_fs); | 
| 247 | } | 246 | } | 
| 248 | return ret; | 247 | return ret; | 
| @@ -250,7 +249,7 @@ static int cpuset_get_sb(struct file_system_type *fs_type, | |||
| 250 | 249 | ||
| 251 | static struct file_system_type cpuset_fs_type = { | 250 | static struct file_system_type cpuset_fs_type = { | 
| 252 | .name = "cpuset", | 251 | .name = "cpuset", | 
| 253 | .get_sb = cpuset_get_sb, | 252 | .mount = cpuset_mount, | 
| 254 | }; | 253 | }; | 
| 255 | 254 | ||
| 256 | /* | 255 | /* | 
| diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c index fec596da9bd0..cefd4a11f6d9 100644 --- a/kernel/debug/debug_core.c +++ b/kernel/debug/debug_core.c | |||
| @@ -209,18 +209,6 @@ int __weak kgdb_skipexception(int exception, struct pt_regs *regs) | |||
| 209 | return 0; | 209 | return 0; | 
| 210 | } | 210 | } | 
| 211 | 211 | ||
| 212 | /** | ||
| 213 | * kgdb_disable_hw_debug - Disable hardware debugging while we in kgdb. | ||
| 214 | * @regs: Current &struct pt_regs. | ||
| 215 | * | ||
| 216 | * This function will be called if the particular architecture must | ||
| 217 | * disable hardware debugging while it is processing gdb packets or | ||
| 218 | * handling exception. | ||
| 219 | */ | ||
| 220 | void __weak kgdb_disable_hw_debug(struct pt_regs *regs) | ||
| 221 | { | ||
| 222 | } | ||
| 223 | |||
| 224 | /* | 212 | /* | 
| 225 | * Some architectures need cache flushes when we set/clear a | 213 | * Some architectures need cache flushes when we set/clear a | 
| 226 | * breakpoint: | 214 | * breakpoint: | 
| @@ -484,7 +472,9 @@ static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs, | |||
| 484 | atomic_inc(&masters_in_kgdb); | 472 | atomic_inc(&masters_in_kgdb); | 
| 485 | else | 473 | else | 
| 486 | atomic_inc(&slaves_in_kgdb); | 474 | atomic_inc(&slaves_in_kgdb); | 
| 487 | kgdb_disable_hw_debug(ks->linux_regs); | 475 | |
| 476 | if (arch_kgdb_ops.disable_hw_break) | ||
| 477 | arch_kgdb_ops.disable_hw_break(regs); | ||
| 488 | 478 | ||
| 489 | acquirelock: | 479 | acquirelock: | 
| 490 | /* | 480 | /* | 
| diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c index d7bda21a106b..37755d621924 100644 --- a/kernel/debug/kdb/kdb_main.c +++ b/kernel/debug/kdb/kdb_main.c | |||
| @@ -1127,7 +1127,7 @@ static int kdb_local(kdb_reason_t reason, int error, struct pt_regs *regs, | |||
| 1127 | /* special case below */ | 1127 | /* special case below */ | 
| 1128 | } else { | 1128 | } else { | 
| 1129 | kdb_printf("\nEntering kdb (current=0x%p, pid %d) ", | 1129 | kdb_printf("\nEntering kdb (current=0x%p, pid %d) ", | 
| 1130 | kdb_current, kdb_current->pid); | 1130 | kdb_current, kdb_current ? kdb_current->pid : 0); | 
| 1131 | #if defined(CONFIG_SMP) | 1131 | #if defined(CONFIG_SMP) | 
| 1132 | kdb_printf("on processor %d ", raw_smp_processor_id()); | 1132 | kdb_printf("on processor %d ", raw_smp_processor_id()); | 
| 1133 | #endif | 1133 | #endif | 
| @@ -2603,20 +2603,17 @@ static int kdb_summary(int argc, const char **argv) | |||
| 2603 | */ | 2603 | */ | 
| 2604 | static int kdb_per_cpu(int argc, const char **argv) | 2604 | static int kdb_per_cpu(int argc, const char **argv) | 
| 2605 | { | 2605 | { | 
| 2606 | char buf[256], fmtstr[64]; | 2606 | char fmtstr[64]; | 
| 2607 | kdb_symtab_t symtab; | 2607 | int cpu, diag, nextarg = 1; | 
| 2608 | cpumask_t suppress = CPU_MASK_NONE; | 2608 | unsigned long addr, symaddr, val, bytesperword = 0, whichcpu = ~0UL; | 
| 2609 | int cpu, diag; | ||
| 2610 | unsigned long addr, val, bytesperword = 0, whichcpu = ~0UL; | ||
| 2611 | 2609 | ||
| 2612 | if (argc < 1 || argc > 3) | 2610 | if (argc < 1 || argc > 3) | 
| 2613 | return KDB_ARGCOUNT; | 2611 | return KDB_ARGCOUNT; | 
| 2614 | 2612 | ||
| 2615 | snprintf(buf, sizeof(buf), "per_cpu__%s", argv[1]); | 2613 | diag = kdbgetaddrarg(argc, argv, &nextarg, &symaddr, NULL, NULL); | 
| 2616 | if (!kdbgetsymval(buf, &symtab)) { | 2614 | if (diag) | 
| 2617 | kdb_printf("%s is not a per_cpu variable\n", argv[1]); | 2615 | return diag; | 
| 2618 | return KDB_BADADDR; | 2616 | |
| 2619 | } | ||
| 2620 | if (argc >= 2) { | 2617 | if (argc >= 2) { | 
| 2621 | diag = kdbgetularg(argv[2], &bytesperword); | 2618 | diag = kdbgetularg(argv[2], &bytesperword); | 
| 2622 | if (diag) | 2619 | if (diag) | 
| @@ -2649,46 +2646,25 @@ static int kdb_per_cpu(int argc, const char **argv) | |||
| 2649 | #define KDB_PCU(cpu) 0 | 2646 | #define KDB_PCU(cpu) 0 | 
| 2650 | #endif | 2647 | #endif | 
| 2651 | #endif | 2648 | #endif | 
| 2652 | |||
| 2653 | for_each_online_cpu(cpu) { | 2649 | for_each_online_cpu(cpu) { | 
| 2650 | if (KDB_FLAG(CMD_INTERRUPT)) | ||
| 2651 | return 0; | ||
| 2652 | |||
| 2654 | if (whichcpu != ~0UL && whichcpu != cpu) | 2653 | if (whichcpu != ~0UL && whichcpu != cpu) | 
| 2655 | continue; | 2654 | continue; | 
| 2656 | addr = symtab.sym_start + KDB_PCU(cpu); | 2655 | addr = symaddr + KDB_PCU(cpu); | 
| 2657 | diag = kdb_getword(&val, addr, bytesperword); | 2656 | diag = kdb_getword(&val, addr, bytesperword); | 
| 2658 | if (diag) { | 2657 | if (diag) { | 
| 2659 | kdb_printf("%5d " kdb_bfd_vma_fmt0 " - unable to " | 2658 | kdb_printf("%5d " kdb_bfd_vma_fmt0 " - unable to " | 
| 2660 | "read, diag=%d\n", cpu, addr, diag); | 2659 | "read, diag=%d\n", cpu, addr, diag); | 
| 2661 | continue; | 2660 | continue; | 
| 2662 | } | 2661 | } | 
| 2663 | #ifdef CONFIG_SMP | ||
| 2664 | if (!val) { | ||
| 2665 | cpu_set(cpu, suppress); | ||
| 2666 | continue; | ||
| 2667 | } | ||
| 2668 | #endif /* CONFIG_SMP */ | ||
| 2669 | kdb_printf("%5d ", cpu); | 2662 | kdb_printf("%5d ", cpu); | 
| 2670 | kdb_md_line(fmtstr, addr, | 2663 | kdb_md_line(fmtstr, addr, | 
| 2671 | bytesperword == KDB_WORD_SIZE, | 2664 | bytesperword == KDB_WORD_SIZE, | 
| 2672 | 1, bytesperword, 1, 1, 0); | 2665 | 1, bytesperword, 1, 1, 0); | 
| 2673 | } | 2666 | } | 
| 2674 | if (cpus_weight(suppress) == 0) | ||
| 2675 | return 0; | ||
| 2676 | kdb_printf("Zero suppressed cpu(s):"); | ||
| 2677 | for (cpu = first_cpu(suppress); cpu < num_possible_cpus(); | ||
| 2678 | cpu = next_cpu(cpu, suppress)) { | ||
| 2679 | kdb_printf(" %d", cpu); | ||
| 2680 | if (cpu == num_possible_cpus() - 1 || | ||
| 2681 | next_cpu(cpu, suppress) != cpu + 1) | ||
| 2682 | continue; | ||
| 2683 | while (cpu < num_possible_cpus() && | ||
| 2684 | next_cpu(cpu, suppress) == cpu + 1) | ||
| 2685 | ++cpu; | ||
| 2686 | kdb_printf("-%d", cpu); | ||
| 2687 | } | ||
| 2688 | kdb_printf("\n"); | ||
| 2689 | |||
| 2690 | #undef KDB_PCU | 2667 | #undef KDB_PCU | 
| 2691 | |||
| 2692 | return 0; | 2668 | return 0; | 
| 2693 | } | 2669 | } | 
| 2694 | 2670 | ||
| diff --git a/kernel/resource.c b/kernel/resource.c index 9c9841cb6902..9fad33efd0db 100644 --- a/kernel/resource.c +++ b/kernel/resource.c | |||
| @@ -40,6 +40,23 @@ EXPORT_SYMBOL(iomem_resource); | |||
| 40 | 40 | ||
| 41 | static DEFINE_RWLOCK(resource_lock); | 41 | static DEFINE_RWLOCK(resource_lock); | 
| 42 | 42 | ||
| 43 | /* | ||
| 44 | * By default, we allocate free space bottom-up. The architecture can request | ||
| 45 | * top-down by clearing this flag. The user can override the architecture's | ||
| 46 | * choice with the "resource_alloc_from_bottom" kernel boot option, but that | ||
| 47 | * should only be a debugging tool. | ||
| 48 | */ | ||
| 49 | int resource_alloc_from_bottom = 1; | ||
| 50 | |||
| 51 | static __init int setup_alloc_from_bottom(char *s) | ||
| 52 | { | ||
| 53 | printk(KERN_INFO | ||
| 54 | "resource: allocating from bottom-up; please report a bug\n"); | ||
| 55 | resource_alloc_from_bottom = 1; | ||
| 56 | return 0; | ||
| 57 | } | ||
| 58 | early_param("resource_alloc_from_bottom", setup_alloc_from_bottom); | ||
| 59 | |||
| 43 | static void *r_next(struct seq_file *m, void *v, loff_t *pos) | 60 | static void *r_next(struct seq_file *m, void *v, loff_t *pos) | 
| 44 | { | 61 | { | 
| 45 | struct resource *p = v; | 62 | struct resource *p = v; | 
| @@ -357,8 +374,97 @@ int __weak page_is_ram(unsigned long pfn) | |||
| 357 | return walk_system_ram_range(pfn, 1, NULL, __is_ram) == 1; | 374 | return walk_system_ram_range(pfn, 1, NULL, __is_ram) == 1; | 
| 358 | } | 375 | } | 
| 359 | 376 | ||
| 377 | static resource_size_t simple_align_resource(void *data, | ||
| 378 | const struct resource *avail, | ||
| 379 | resource_size_t size, | ||
| 380 | resource_size_t align) | ||
| 381 | { | ||
| 382 | return avail->start; | ||
| 383 | } | ||
| 384 | |||
| 385 | static void resource_clip(struct resource *res, resource_size_t min, | ||
| 386 | resource_size_t max) | ||
| 387 | { | ||
| 388 | if (res->start < min) | ||
| 389 | res->start = min; | ||
| 390 | if (res->end > max) | ||
| 391 | res->end = max; | ||
| 392 | } | ||
| 393 | |||
| 394 | static bool resource_contains(struct resource *res1, struct resource *res2) | ||
| 395 | { | ||
| 396 | return res1->start <= res2->start && res1->end >= res2->end; | ||
| 397 | } | ||
| 398 | |||
| 399 | /* | ||
| 400 | * Find the resource before "child" in the sibling list of "root" children. | ||
| 401 | */ | ||
| 402 | static struct resource *find_sibling_prev(struct resource *root, struct resource *child) | ||
| 403 | { | ||
| 404 | struct resource *this; | ||
| 405 | |||
| 406 | for (this = root->child; this; this = this->sibling) | ||
| 407 | if (this->sibling == child) | ||
| 408 | return this; | ||
| 409 | |||
| 410 | return NULL; | ||
| 411 | } | ||
| 412 | |||
| 360 | /* | 413 | /* | 
| 361 | * Find empty slot in the resource tree given range and alignment. | 414 | * Find empty slot in the resource tree given range and alignment. | 
| 415 | * This version allocates from the end of the root resource first. | ||
| 416 | */ | ||
| 417 | static int find_resource_from_top(struct resource *root, struct resource *new, | ||
| 418 | resource_size_t size, resource_size_t min, | ||
| 419 | resource_size_t max, resource_size_t align, | ||
| 420 | resource_size_t (*alignf)(void *, | ||
| 421 | const struct resource *, | ||
| 422 | resource_size_t, | ||
| 423 | resource_size_t), | ||
| 424 | void *alignf_data) | ||
| 425 | { | ||
| 426 | struct resource *this; | ||
| 427 | struct resource tmp, avail, alloc; | ||
| 428 | |||
| 429 | tmp.start = root->end; | ||
| 430 | tmp.end = root->end; | ||
| 431 | |||
| 432 | this = find_sibling_prev(root, NULL); | ||
| 433 | for (;;) { | ||
| 434 | if (this) { | ||
| 435 | if (this->end < root->end) | ||
| 436 | tmp.start = this->end + 1; | ||
| 437 | } else | ||
| 438 | tmp.start = root->start; | ||
| 439 | |||
| 440 | resource_clip(&tmp, min, max); | ||
| 441 | |||
| 442 | /* Check for overflow after ALIGN() */ | ||
| 443 | avail = *new; | ||
| 444 | avail.start = ALIGN(tmp.start, align); | ||
| 445 | avail.end = tmp.end; | ||
| 446 | if (avail.start >= tmp.start) { | ||
| 447 | alloc.start = alignf(alignf_data, &avail, size, align); | ||
| 448 | alloc.end = alloc.start + size - 1; | ||
| 449 | if (resource_contains(&avail, &alloc)) { | ||
| 450 | new->start = alloc.start; | ||
| 451 | new->end = alloc.end; | ||
| 452 | return 0; | ||
| 453 | } | ||
| 454 | } | ||
| 455 | |||
| 456 | if (!this || this->start == root->start) | ||
| 457 | break; | ||
| 458 | |||
| 459 | tmp.end = this->start - 1; | ||
| 460 | this = find_sibling_prev(root, this); | ||
| 461 | } | ||
| 462 | return -EBUSY; | ||
| 463 | } | ||
| 464 | |||
| 465 | /* | ||
| 466 | * Find empty slot in the resource tree given range and alignment. | ||
| 467 | * This version allocates from the beginning of the root resource first. | ||
| 362 | */ | 468 | */ | 
| 363 | static int find_resource(struct resource *root, struct resource *new, | 469 | static int find_resource(struct resource *root, struct resource *new, | 
| 364 | resource_size_t size, resource_size_t min, | 470 | resource_size_t size, resource_size_t min, | 
| @@ -370,36 +476,43 @@ static int find_resource(struct resource *root, struct resource *new, | |||
| 370 | void *alignf_data) | 476 | void *alignf_data) | 
| 371 | { | 477 | { | 
| 372 | struct resource *this = root->child; | 478 | struct resource *this = root->child; | 
| 373 | struct resource tmp = *new; | 479 | struct resource tmp = *new, avail, alloc; | 
| 374 | 480 | ||
| 375 | tmp.start = root->start; | 481 | tmp.start = root->start; | 
| 376 | /* | 482 | /* | 
| 377 | * Skip past an allocated resource that starts at 0, since the assignment | 483 | * Skip past an allocated resource that starts at 0, since the | 
| 378 | * of this->start - 1 to tmp->end below would cause an underflow. | 484 | * assignment of this->start - 1 to tmp->end below would cause an | 
| 485 | * underflow. | ||
| 379 | */ | 486 | */ | 
| 380 | if (this && this->start == 0) { | 487 | if (this && this->start == 0) { | 
| 381 | tmp.start = this->end + 1; | 488 | tmp.start = this->end + 1; | 
| 382 | this = this->sibling; | 489 | this = this->sibling; | 
| 383 | } | 490 | } | 
| 384 | for(;;) { | 491 | for (;;) { | 
| 385 | if (this) | 492 | if (this) | 
| 386 | tmp.end = this->start - 1; | 493 | tmp.end = this->start - 1; | 
| 387 | else | 494 | else | 
| 388 | tmp.end = root->end; | 495 | tmp.end = root->end; | 
| 389 | if (tmp.start < min) | 496 | |
| 390 | tmp.start = min; | 497 | resource_clip(&tmp, min, max); | 
| 391 | if (tmp.end > max) | 498 | |
| 392 | tmp.end = max; | 499 | /* Check for overflow after ALIGN() */ | 
| 393 | tmp.start = ALIGN(tmp.start, align); | 500 | avail = *new; | 
| 394 | if (alignf) | 501 | avail.start = ALIGN(tmp.start, align); | 
| 395 | tmp.start = alignf(alignf_data, &tmp, size, align); | 502 | avail.end = tmp.end; | 
| 396 | if (tmp.start < tmp.end && tmp.end - tmp.start >= size - 1) { | 503 | if (avail.start >= tmp.start) { | 
| 397 | new->start = tmp.start; | 504 | alloc.start = alignf(alignf_data, &avail, size, align); | 
| 398 | new->end = tmp.start + size - 1; | 505 | alloc.end = alloc.start + size - 1; | 
| 399 | return 0; | 506 | if (resource_contains(&avail, &alloc)) { | 
| 507 | new->start = alloc.start; | ||
| 508 | new->end = alloc.end; | ||
| 509 | return 0; | ||
| 510 | } | ||
| 400 | } | 511 | } | 
| 512 | |||
| 401 | if (!this) | 513 | if (!this) | 
| 402 | break; | 514 | break; | 
| 515 | |||
| 403 | tmp.start = this->end + 1; | 516 | tmp.start = this->end + 1; | 
| 404 | this = this->sibling; | 517 | this = this->sibling; | 
| 405 | } | 518 | } | 
| @@ -428,8 +541,14 @@ int allocate_resource(struct resource *root, struct resource *new, | |||
| 428 | { | 541 | { | 
| 429 | int err; | 542 | int err; | 
| 430 | 543 | ||
| 544 | if (!alignf) | ||
| 545 | alignf = simple_align_resource; | ||
| 546 | |||
| 431 | write_lock(&resource_lock); | 547 | write_lock(&resource_lock); | 
| 432 | err = find_resource(root, new, size, min, max, align, alignf, alignf_data); | 548 | if (resource_alloc_from_bottom) | 
| 549 | err = find_resource(root, new, size, min, max, align, alignf, alignf_data); | ||
| 550 | else | ||
| 551 | err = find_resource_from_top(root, new, size, min, max, align, alignf, alignf_data); | ||
| 433 | if (err >= 0 && __request_resource(root, new)) | 552 | if (err >= 0 && __request_resource(root, new)) | 
| 434 | err = -EBUSY; | 553 | err = -EBUSY; | 
| 435 | write_unlock(&resource_lock); | 554 | write_unlock(&resource_lock); | 
| diff --git a/kernel/sched.c b/kernel/sched.c index d42992bccdfa..aa14a56f9d03 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -8510,12 +8510,12 @@ void sched_move_task(struct task_struct *tsk) | |||
| 8510 | if (unlikely(running)) | 8510 | if (unlikely(running)) | 
| 8511 | tsk->sched_class->put_prev_task(rq, tsk); | 8511 | tsk->sched_class->put_prev_task(rq, tsk); | 
| 8512 | 8512 | ||
| 8513 | set_task_rq(tsk, task_cpu(tsk)); | ||
| 8514 | |||
| 8515 | #ifdef CONFIG_FAIR_GROUP_SCHED | 8513 | #ifdef CONFIG_FAIR_GROUP_SCHED | 
| 8516 | if (tsk->sched_class->moved_group) | 8514 | if (tsk->sched_class->task_move_group) | 
| 8517 | tsk->sched_class->moved_group(tsk, on_rq); | 8515 | tsk->sched_class->task_move_group(tsk, on_rq); | 
| 8516 | else | ||
| 8518 | #endif | 8517 | #endif | 
| 8518 | set_task_rq(tsk, task_cpu(tsk)); | ||
| 8519 | 8519 | ||
| 8520 | if (unlikely(running)) | 8520 | if (unlikely(running)) | 
| 8521 | tsk->sched_class->set_curr_task(rq); | 8521 | tsk->sched_class->set_curr_task(rq); | 
| diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 933f3d1b62ea..f4f6a8326dd0 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
| @@ -3869,13 +3869,26 @@ static void set_curr_task_fair(struct rq *rq) | |||
| 3869 | } | 3869 | } | 
| 3870 | 3870 | ||
| 3871 | #ifdef CONFIG_FAIR_GROUP_SCHED | 3871 | #ifdef CONFIG_FAIR_GROUP_SCHED | 
| 3872 | static void moved_group_fair(struct task_struct *p, int on_rq) | 3872 | static void task_move_group_fair(struct task_struct *p, int on_rq) | 
| 3873 | { | 3873 | { | 
| 3874 | struct cfs_rq *cfs_rq = task_cfs_rq(p); | 3874 | /* | 
| 3875 | 3875 | * If the task was not on the rq at the time of this cgroup movement | |
| 3876 | update_curr(cfs_rq); | 3876 | * it must have been asleep, sleeping tasks keep their ->vruntime | 
| 3877 | * absolute on their old rq until wakeup (needed for the fair sleeper | ||
| 3878 | * bonus in place_entity()). | ||
| 3879 | * | ||
| 3880 | * If it was on the rq, we've just 'preempted' it, which does convert | ||
| 3881 | * ->vruntime to a relative base. | ||
| 3882 | * | ||
| 3883 | * Make sure both cases convert their relative position when migrating | ||
| 3884 | * to another cgroup's rq. This does somewhat interfere with the | ||
| 3885 | * fair sleeper stuff for the first placement, but who cares. | ||
| 3886 | */ | ||
| 3887 | if (!on_rq) | ||
| 3888 | p->se.vruntime -= cfs_rq_of(&p->se)->min_vruntime; | ||
| 3889 | set_task_rq(p, task_cpu(p)); | ||
| 3877 | if (!on_rq) | 3890 | if (!on_rq) | 
| 3878 | place_entity(cfs_rq, &p->se, 1); | 3891 | p->se.vruntime += cfs_rq_of(&p->se)->min_vruntime; | 
| 3879 | } | 3892 | } | 
| 3880 | #endif | 3893 | #endif | 
| 3881 | 3894 | ||
| @@ -3927,7 +3940,7 @@ static const struct sched_class fair_sched_class = { | |||
| 3927 | .get_rr_interval = get_rr_interval_fair, | 3940 | .get_rr_interval = get_rr_interval_fair, | 
| 3928 | 3941 | ||
| 3929 | #ifdef CONFIG_FAIR_GROUP_SCHED | 3942 | #ifdef CONFIG_FAIR_GROUP_SCHED | 
| 3930 | .moved_group = moved_group_fair, | 3943 | .task_move_group = task_move_group_fair, | 
| 3931 | #endif | 3944 | #endif | 
| 3932 | }; | 3945 | }; | 
| 3933 | 3946 | ||
| diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h index 25c2f962f6fc..48ddf431db0e 100644 --- a/kernel/sched_stats.h +++ b/kernel/sched_stats.h | |||
| @@ -157,15 +157,7 @@ static inline void sched_info_reset_dequeued(struct task_struct *t) | |||
| 157 | } | 157 | } | 
| 158 | 158 | ||
| 159 | /* | 159 | /* | 
| 160 | * Called when a process is dequeued from the active array and given | 160 | * We are interested in knowing how long it was from the *first* time a | 
| 161 | * the cpu. We should note that with the exception of interactive | ||
| 162 | * tasks, the expired queue will become the active queue after the active | ||
| 163 | * queue is empty, without explicitly dequeuing and requeuing tasks in the | ||
| 164 | * expired queue. (Interactive tasks may be requeued directly to the | ||
| 165 | * active queue, thus delaying tasks in the expired queue from running; | ||
| 166 | * see scheduler_tick()). | ||
| 167 | * | ||
| 168 | * Though we are interested in knowing how long it was from the *first* time a | ||
| 169 | * task was queued to the time that it finally hit a cpu, we call this routine | 161 | * task was queued to the time that it finally hit a cpu, we call this routine | 
| 170 | * from dequeue_task() to account for possible rq->clock skew across cpus. The | 162 | * from dequeue_task() to account for possible rq->clock skew across cpus. The | 
| 171 | * delta taken on each cpu would annul the skew. | 163 | * delta taken on each cpu would annul the skew. | 
| @@ -203,16 +195,6 @@ static void sched_info_arrive(struct task_struct *t) | |||
| 203 | } | 195 | } | 
| 204 | 196 | ||
| 205 | /* | 197 | /* | 
| 206 | * Called when a process is queued into either the active or expired | ||
| 207 | * array. The time is noted and later used to determine how long we | ||
| 208 | * had to wait for us to reach the cpu. Since the expired queue will | ||
| 209 | * become the active queue after active queue is empty, without dequeuing | ||
| 210 | * and requeuing any tasks, we are interested in queuing to either. It | ||
| 211 | * is unusual but not impossible for tasks to be dequeued and immediately | ||
| 212 | * requeued in the same or another array: this can happen in sched_yield(), | ||
| 213 | * set_user_nice(), and even load_balance() as it moves tasks from runqueue | ||
| 214 | * to runqueue. | ||
| 215 | * | ||
| 216 | * This function is only called from enqueue_task(), but also only updates | 198 | * This function is only called from enqueue_task(), but also only updates | 
| 217 | * the timestamp if it is already not set. It's assumed that | 199 | * the timestamp if it is already not set. It's assumed that | 
| 218 | * sched_info_dequeued() will clear that stamp when appropriate. | 200 | * sched_info_dequeued() will clear that stamp when appropriate. | 
