aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/audit.c6
-rw-r--r--kernel/auditfilter.c3
-rw-r--r--kernel/capability.c132
-rw-r--r--kernel/cpuset.c20
-rw-r--r--kernel/futex.c93
-rw-r--r--kernel/hrtimer.c8
-rw-r--r--kernel/kgdb.c19
-rw-r--r--kernel/kprobes.c15
-rw-r--r--kernel/rcuclassic.c16
-rw-r--r--kernel/rcupreempt.c2
-rw-r--r--kernel/sched.c74
-rw-r--r--kernel/sched_rt.c66
-rw-r--r--kernel/sched_stats.h6
-rw-r--r--kernel/softlockup.c16
-rw-r--r--kernel/workqueue.c2
15 files changed, 338 insertions, 140 deletions
diff --git a/kernel/audit.c b/kernel/audit.c
index e8692a5748c2..e092f1c0ce30 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -738,7 +738,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
738 if (!audit_enabled && msg_type != AUDIT_USER_AVC) 738 if (!audit_enabled && msg_type != AUDIT_USER_AVC)
739 return 0; 739 return 0;
740 740
741 err = audit_filter_user(&NETLINK_CB(skb), msg_type); 741 err = audit_filter_user(&NETLINK_CB(skb));
742 if (err == 1) { 742 if (err == 1) {
743 err = 0; 743 err = 0;
744 if (msg_type == AUDIT_USER_TTY) { 744 if (msg_type == AUDIT_USER_TTY) {
@@ -779,7 +779,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
779 } 779 }
780 /* fallthrough */ 780 /* fallthrough */
781 case AUDIT_LIST: 781 case AUDIT_LIST:
782 err = audit_receive_filter(nlh->nlmsg_type, NETLINK_CB(skb).pid, 782 err = audit_receive_filter(msg_type, NETLINK_CB(skb).pid,
783 uid, seq, data, nlmsg_len(nlh), 783 uid, seq, data, nlmsg_len(nlh),
784 loginuid, sessionid, sid); 784 loginuid, sessionid, sid);
785 break; 785 break;
@@ -798,7 +798,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
798 } 798 }
799 /* fallthrough */ 799 /* fallthrough */
800 case AUDIT_LIST_RULES: 800 case AUDIT_LIST_RULES:
801 err = audit_receive_filter(nlh->nlmsg_type, NETLINK_CB(skb).pid, 801 err = audit_receive_filter(msg_type, NETLINK_CB(skb).pid,
802 uid, seq, data, nlmsg_len(nlh), 802 uid, seq, data, nlmsg_len(nlh),
803 loginuid, sessionid, sid); 803 loginuid, sessionid, sid);
804 break; 804 break;
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
index 0e0bd27e6512..98c50cc671bb 100644
--- a/kernel/auditfilter.c
+++ b/kernel/auditfilter.c
@@ -1544,6 +1544,7 @@ static void audit_log_rule_change(uid_t loginuid, u32 sessionid, u32 sid,
1544 * @data: payload data 1544 * @data: payload data
1545 * @datasz: size of payload data 1545 * @datasz: size of payload data
1546 * @loginuid: loginuid of sender 1546 * @loginuid: loginuid of sender
1547 * @sessionid: sessionid for netlink audit message
1547 * @sid: SE Linux Security ID of sender 1548 * @sid: SE Linux Security ID of sender
1548 */ 1549 */
1549int audit_receive_filter(int type, int pid, int uid, int seq, void *data, 1550int audit_receive_filter(int type, int pid, int uid, int seq, void *data,
@@ -1720,7 +1721,7 @@ static int audit_filter_user_rules(struct netlink_skb_parms *cb,
1720 return 1; 1721 return 1;
1721} 1722}
1722 1723
1723int audit_filter_user(struct netlink_skb_parms *cb, int type) 1724int audit_filter_user(struct netlink_skb_parms *cb)
1724{ 1725{
1725 enum audit_state state = AUDIT_DISABLED; 1726 enum audit_state state = AUDIT_DISABLED;
1726 struct audit_entry *e; 1727 struct audit_entry *e;
diff --git a/kernel/capability.c b/kernel/capability.c
index 39e8193b41ea..901e0fdc3fff 100644
--- a/kernel/capability.c
+++ b/kernel/capability.c
@@ -53,11 +53,95 @@ static void warn_legacy_capability_use(void)
53} 53}
54 54
55/* 55/*
56 * Version 2 capabilities worked fine, but the linux/capability.h file
57 * that accompanied their introduction encouraged their use without
58 * the necessary user-space source code changes. As such, we have
59 * created a version 3 with equivalent functionality to version 2, but
60 * with a header change to protect legacy source code from using
61 * version 2 when it wanted to use version 1. If your system has code
62 * that trips the following warning, it is using version 2 specific
63 * capabilities and may be doing so insecurely.
64 *
65 * The remedy is to either upgrade your version of libcap (to 2.10+,
66 * if the application is linked against it), or recompile your
67 * application with modern kernel headers and this warning will go
68 * away.
69 */
70
71static void warn_deprecated_v2(void)
72{
73 static int warned;
74
75 if (!warned) {
76 char name[sizeof(current->comm)];
77
78 printk(KERN_INFO "warning: `%s' uses deprecated v2"
79 " capabilities in a way that may be insecure.\n",
80 get_task_comm(name, current));
81 warned = 1;
82 }
83}
84
85/*
86 * Version check. Return the number of u32s in each capability flag
87 * array, or a negative value on error.
88 */
89static int cap_validate_magic(cap_user_header_t header, unsigned *tocopy)
90{
91 __u32 version;
92
93 if (get_user(version, &header->version))
94 return -EFAULT;
95
96 switch (version) {
97 case _LINUX_CAPABILITY_VERSION_1:
98 warn_legacy_capability_use();
99 *tocopy = _LINUX_CAPABILITY_U32S_1;
100 break;
101 case _LINUX_CAPABILITY_VERSION_2:
102 warn_deprecated_v2();
103 /*
104 * fall through - v3 is otherwise equivalent to v2.
105 */
106 case _LINUX_CAPABILITY_VERSION_3:
107 *tocopy = _LINUX_CAPABILITY_U32S_3;
108 break;
109 default:
110 if (put_user((u32)_KERNEL_CAPABILITY_VERSION, &header->version))
111 return -EFAULT;
112 return -EINVAL;
113 }
114
115 return 0;
116}
117
118/*
56 * For sys_getproccap() and sys_setproccap(), any of the three 119 * For sys_getproccap() and sys_setproccap(), any of the three
57 * capability set pointers may be NULL -- indicating that that set is 120 * capability set pointers may be NULL -- indicating that that set is
58 * uninteresting and/or not to be changed. 121 * uninteresting and/or not to be changed.
59 */ 122 */
60 123
124/*
125 * Atomically modify the effective capabilities returning the original
126 * value. No permission check is performed here - it is assumed that the
127 * caller is permitted to set the desired effective capabilities.
128 */
129kernel_cap_t cap_set_effective(const kernel_cap_t pE_new)
130{
131 kernel_cap_t pE_old;
132
133 spin_lock(&task_capability_lock);
134
135 pE_old = current->cap_effective;
136 current->cap_effective = pE_new;
137
138 spin_unlock(&task_capability_lock);
139
140 return pE_old;
141}
142
143EXPORT_SYMBOL(cap_set_effective);
144
61/** 145/**
62 * sys_capget - get the capabilities of a given process. 146 * sys_capget - get the capabilities of a given process.
63 * @header: pointer to struct that contains capability version and 147 * @header: pointer to struct that contains capability version and
@@ -71,27 +155,13 @@ asmlinkage long sys_capget(cap_user_header_t header, cap_user_data_t dataptr)
71{ 155{
72 int ret = 0; 156 int ret = 0;
73 pid_t pid; 157 pid_t pid;
74 __u32 version;
75 struct task_struct *target; 158 struct task_struct *target;
76 unsigned tocopy; 159 unsigned tocopy;
77 kernel_cap_t pE, pI, pP; 160 kernel_cap_t pE, pI, pP;
78 161
79 if (get_user(version, &header->version)) 162 ret = cap_validate_magic(header, &tocopy);
80 return -EFAULT; 163 if (ret != 0)
81 164 return ret;
82 switch (version) {
83 case _LINUX_CAPABILITY_VERSION_1:
84 warn_legacy_capability_use();
85 tocopy = _LINUX_CAPABILITY_U32S_1;
86 break;
87 case _LINUX_CAPABILITY_VERSION_2:
88 tocopy = _LINUX_CAPABILITY_U32S_2;
89 break;
90 default:
91 if (put_user(_LINUX_CAPABILITY_VERSION, &header->version))
92 return -EFAULT;
93 return -EINVAL;
94 }
95 165
96 if (get_user(pid, &header->pid)) 166 if (get_user(pid, &header->pid))
97 return -EFAULT; 167 return -EFAULT;
@@ -118,7 +188,7 @@ out:
118 spin_unlock(&task_capability_lock); 188 spin_unlock(&task_capability_lock);
119 189
120 if (!ret) { 190 if (!ret) {
121 struct __user_cap_data_struct kdata[_LINUX_CAPABILITY_U32S]; 191 struct __user_cap_data_struct kdata[_KERNEL_CAPABILITY_U32S];
122 unsigned i; 192 unsigned i;
123 193
124 for (i = 0; i < tocopy; i++) { 194 for (i = 0; i < tocopy; i++) {
@@ -128,7 +198,7 @@ out:
128 } 198 }
129 199
130 /* 200 /*
131 * Note, in the case, tocopy < _LINUX_CAPABILITY_U32S, 201 * Note, in the case, tocopy < _KERNEL_CAPABILITY_U32S,
132 * we silently drop the upper capabilities here. This 202 * we silently drop the upper capabilities here. This
133 * has the effect of making older libcap 203 * has the effect of making older libcap
134 * implementations implicitly drop upper capability 204 * implementations implicitly drop upper capability
@@ -240,30 +310,16 @@ static inline int cap_set_all(kernel_cap_t *effective,
240 */ 310 */
241asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data) 311asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data)
242{ 312{
243 struct __user_cap_data_struct kdata[_LINUX_CAPABILITY_U32S]; 313 struct __user_cap_data_struct kdata[_KERNEL_CAPABILITY_U32S];
244 unsigned i, tocopy; 314 unsigned i, tocopy;
245 kernel_cap_t inheritable, permitted, effective; 315 kernel_cap_t inheritable, permitted, effective;
246 __u32 version;
247 struct task_struct *target; 316 struct task_struct *target;
248 int ret; 317 int ret;
249 pid_t pid; 318 pid_t pid;
250 319
251 if (get_user(version, &header->version)) 320 ret = cap_validate_magic(header, &tocopy);
252 return -EFAULT; 321 if (ret != 0)
253 322 return ret;
254 switch (version) {
255 case _LINUX_CAPABILITY_VERSION_1:
256 warn_legacy_capability_use();
257 tocopy = _LINUX_CAPABILITY_U32S_1;
258 break;
259 case _LINUX_CAPABILITY_VERSION_2:
260 tocopy = _LINUX_CAPABILITY_U32S_2;
261 break;
262 default:
263 if (put_user(_LINUX_CAPABILITY_VERSION, &header->version))
264 return -EFAULT;
265 return -EINVAL;
266 }
267 323
268 if (get_user(pid, &header->pid)) 324 if (get_user(pid, &header->pid))
269 return -EFAULT; 325 return -EFAULT;
@@ -281,7 +337,7 @@ asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data)
281 permitted.cap[i] = kdata[i].permitted; 337 permitted.cap[i] = kdata[i].permitted;
282 inheritable.cap[i] = kdata[i].inheritable; 338 inheritable.cap[i] = kdata[i].inheritable;
283 } 339 }
284 while (i < _LINUX_CAPABILITY_U32S) { 340 while (i < _KERNEL_CAPABILITY_U32S) {
285 effective.cap[i] = 0; 341 effective.cap[i] = 0;
286 permitted.cap[i] = 0; 342 permitted.cap[i] = 0;
287 inheritable.cap[i] = 0; 343 inheritable.cap[i] = 0;
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 86ea9e34e326..9fceb97e989c 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -797,8 +797,10 @@ static int update_cpumask(struct cpuset *cs, char *buf)
797 retval = cpulist_parse(buf, trialcs.cpus_allowed); 797 retval = cpulist_parse(buf, trialcs.cpus_allowed);
798 if (retval < 0) 798 if (retval < 0)
799 return retval; 799 return retval;
800
801 if (!cpus_subset(trialcs.cpus_allowed, cpu_online_map))
802 return -EINVAL;
800 } 803 }
801 cpus_and(trialcs.cpus_allowed, trialcs.cpus_allowed, cpu_online_map);
802 retval = validate_change(cs, &trialcs); 804 retval = validate_change(cs, &trialcs);
803 if (retval < 0) 805 if (retval < 0)
804 return retval; 806 return retval;
@@ -932,9 +934,11 @@ static int update_nodemask(struct cpuset *cs, char *buf)
932 retval = nodelist_parse(buf, trialcs.mems_allowed); 934 retval = nodelist_parse(buf, trialcs.mems_allowed);
933 if (retval < 0) 935 if (retval < 0)
934 goto done; 936 goto done;
937
938 if (!nodes_subset(trialcs.mems_allowed,
939 node_states[N_HIGH_MEMORY]))
940 return -EINVAL;
935 } 941 }
936 nodes_and(trialcs.mems_allowed, trialcs.mems_allowed,
937 node_states[N_HIGH_MEMORY]);
938 oldmem = cs->mems_allowed; 942 oldmem = cs->mems_allowed;
939 if (nodes_equal(oldmem, trialcs.mems_allowed)) { 943 if (nodes_equal(oldmem, trialcs.mems_allowed)) {
940 retval = 0; /* Too easy - nothing to do */ 944 retval = 0; /* Too easy - nothing to do */
@@ -1033,8 +1037,8 @@ int current_cpuset_is_being_rebound(void)
1033 1037
1034static int update_relax_domain_level(struct cpuset *cs, s64 val) 1038static int update_relax_domain_level(struct cpuset *cs, s64 val)
1035{ 1039{
1036 if ((int)val < 0) 1040 if (val < -1 || val >= SD_LV_MAX)
1037 val = -1; 1041 return -EINVAL;
1038 1042
1039 if (val != cs->relax_domain_level) { 1043 if (val != cs->relax_domain_level) {
1040 cs->relax_domain_level = val; 1044 cs->relax_domain_level = val;
@@ -1886,6 +1890,12 @@ static void common_cpu_mem_hotplug_unplug(void)
1886 top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY]; 1890 top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
1887 scan_for_empty_cpusets(&top_cpuset); 1891 scan_for_empty_cpusets(&top_cpuset);
1888 1892
1893 /*
1894 * Scheduler destroys domains on hotplug events.
1895 * Rebuild them based on the current settings.
1896 */
1897 rebuild_sched_domains();
1898
1889 cgroup_unlock(); 1899 cgroup_unlock();
1890} 1900}
1891 1901
diff --git a/kernel/futex.c b/kernel/futex.c
index 449def8074fe..7d1136e97c14 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1096,21 +1096,64 @@ static void unqueue_me_pi(struct futex_q *q)
1096 * private futexes. 1096 * private futexes.
1097 */ 1097 */
1098static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, 1098static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
1099 struct task_struct *newowner) 1099 struct task_struct *newowner,
1100 struct rw_semaphore *fshared)
1100{ 1101{
1101 u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS; 1102 u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
1102 struct futex_pi_state *pi_state = q->pi_state; 1103 struct futex_pi_state *pi_state = q->pi_state;
1104 struct task_struct *oldowner = pi_state->owner;
1103 u32 uval, curval, newval; 1105 u32 uval, curval, newval;
1104 int ret; 1106 int ret, attempt = 0;
1105 1107
1106 /* Owner died? */ 1108 /* Owner died? */
1109 if (!pi_state->owner)
1110 newtid |= FUTEX_OWNER_DIED;
1111
1112 /*
1113 * We are here either because we stole the rtmutex from the
1114 * pending owner or we are the pending owner which failed to
1115 * get the rtmutex. We have to replace the pending owner TID
1116 * in the user space variable. This must be atomic as we have
1117 * to preserve the owner died bit here.
1118 *
1119 * Note: We write the user space value _before_ changing the
1120 * pi_state because we can fault here. Imagine swapped out
1121 * pages or a fork, which was running right before we acquired
1122 * mmap_sem, that marked all the anonymous memory readonly for
1123 * cow.
1124 *
1125 * Modifying pi_state _before_ the user space value would
1126 * leave the pi_state in an inconsistent state when we fault
1127 * here, because we need to drop the hash bucket lock to
1128 * handle the fault. This might be observed in the PID check
1129 * in lookup_pi_state.
1130 */
1131retry:
1132 if (get_futex_value_locked(&uval, uaddr))
1133 goto handle_fault;
1134
1135 while (1) {
1136 newval = (uval & FUTEX_OWNER_DIED) | newtid;
1137
1138 curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
1139
1140 if (curval == -EFAULT)
1141 goto handle_fault;
1142 if (curval == uval)
1143 break;
1144 uval = curval;
1145 }
1146
1147 /*
1148 * We fixed up user space. Now we need to fix the pi_state
1149 * itself.
1150 */
1107 if (pi_state->owner != NULL) { 1151 if (pi_state->owner != NULL) {
1108 spin_lock_irq(&pi_state->owner->pi_lock); 1152 spin_lock_irq(&pi_state->owner->pi_lock);
1109 WARN_ON(list_empty(&pi_state->list)); 1153 WARN_ON(list_empty(&pi_state->list));
1110 list_del_init(&pi_state->list); 1154 list_del_init(&pi_state->list);
1111 spin_unlock_irq(&pi_state->owner->pi_lock); 1155 spin_unlock_irq(&pi_state->owner->pi_lock);
1112 } else 1156 }
1113 newtid |= FUTEX_OWNER_DIED;
1114 1157
1115 pi_state->owner = newowner; 1158 pi_state->owner = newowner;
1116 1159
@@ -1118,26 +1161,35 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
1118 WARN_ON(!list_empty(&pi_state->list)); 1161 WARN_ON(!list_empty(&pi_state->list));
1119 list_add(&pi_state->list, &newowner->pi_state_list); 1162 list_add(&pi_state->list, &newowner->pi_state_list);
1120 spin_unlock_irq(&newowner->pi_lock); 1163 spin_unlock_irq(&newowner->pi_lock);
1164 return 0;
1121 1165
1122 /* 1166 /*
1123 * We own it, so we have to replace the pending owner 1167 * To handle the page fault we need to drop the hash bucket
1124 * TID. This must be atomic as we have preserve the 1168 * lock here. That gives the other task (either the pending
1125 * owner died bit here. 1169 * owner itself or the task which stole the rtmutex) the
1170 * chance to try the fixup of the pi_state. So once we are
1171 * back from handling the fault we need to check the pi_state
1172 * after reacquiring the hash bucket lock and before trying to
1173 * do another fixup. When the fixup has been done already we
1174 * simply return.
1126 */ 1175 */
1127 ret = get_futex_value_locked(&uval, uaddr); 1176handle_fault:
1177 spin_unlock(q->lock_ptr);
1128 1178
1129 while (!ret) { 1179 ret = futex_handle_fault((unsigned long)uaddr, fshared, attempt++);
1130 newval = (uval & FUTEX_OWNER_DIED) | newtid;
1131 1180
1132 curval = cmpxchg_futex_value_locked(uaddr, uval, newval); 1181 spin_lock(q->lock_ptr);
1133 1182
1134 if (curval == -EFAULT) 1183 /*
1135 ret = -EFAULT; 1184 * Check if someone else fixed it for us:
1136 if (curval == uval) 1185 */
1137 break; 1186 if (pi_state->owner != oldowner)
1138 uval = curval; 1187 return 0;
1139 } 1188
1140 return ret; 1189 if (ret)
1190 return ret;
1191
1192 goto retry;
1141} 1193}
1142 1194
1143/* 1195/*
@@ -1507,7 +1559,7 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
1507 * that case: 1559 * that case:
1508 */ 1560 */
1509 if (q.pi_state->owner != curr) 1561 if (q.pi_state->owner != curr)
1510 ret = fixup_pi_state_owner(uaddr, &q, curr); 1562 ret = fixup_pi_state_owner(uaddr, &q, curr, fshared);
1511 } else { 1563 } else {
1512 /* 1564 /*
1513 * Catch the rare case, where the lock was released 1565 * Catch the rare case, where the lock was released
@@ -1539,7 +1591,8 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
1539 int res; 1591 int res;
1540 1592
1541 owner = rt_mutex_owner(&q.pi_state->pi_mutex); 1593 owner = rt_mutex_owner(&q.pi_state->pi_mutex);
1542 res = fixup_pi_state_owner(uaddr, &q, owner); 1594 res = fixup_pi_state_owner(uaddr, &q, owner,
1595 fshared);
1543 1596
1544 /* propagate -EFAULT, if the fixup failed */ 1597 /* propagate -EFAULT, if the fixup failed */
1545 if (res) 1598 if (res)
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 421be5fe5cc7..ab80515008f4 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -1003,10 +1003,18 @@ hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
1003 */ 1003 */
1004 raise = timer->state == HRTIMER_STATE_PENDING; 1004 raise = timer->state == HRTIMER_STATE_PENDING;
1005 1005
1006 /*
1007 * We use preempt_disable to prevent this task from migrating after
1008 * setting up the softirq and raising it. Otherwise, if me migrate
1009 * we will raise the softirq on the wrong CPU.
1010 */
1011 preempt_disable();
1012
1006 unlock_hrtimer_base(timer, &flags); 1013 unlock_hrtimer_base(timer, &flags);
1007 1014
1008 if (raise) 1015 if (raise)
1009 hrtimer_raise_softirq(); 1016 hrtimer_raise_softirq();
1017 preempt_enable();
1010 1018
1011 return ret; 1019 return ret;
1012} 1020}
diff --git a/kernel/kgdb.c b/kernel/kgdb.c
index 14787de568b3..3ec23c3ec97f 100644
--- a/kernel/kgdb.c
+++ b/kernel/kgdb.c
@@ -52,6 +52,7 @@
52#include <asm/byteorder.h> 52#include <asm/byteorder.h>
53#include <asm/atomic.h> 53#include <asm/atomic.h>
54#include <asm/system.h> 54#include <asm/system.h>
55#include <asm/unaligned.h>
55 56
56static int kgdb_break_asap; 57static int kgdb_break_asap;
57 58
@@ -227,8 +228,6 @@ void __weak kgdb_disable_hw_debug(struct pt_regs *regs)
227 * GDB remote protocol parser: 228 * GDB remote protocol parser:
228 */ 229 */
229 230
230static const char hexchars[] = "0123456789abcdef";
231
232static int hex(char ch) 231static int hex(char ch)
233{ 232{
234 if ((ch >= 'a') && (ch <= 'f')) 233 if ((ch >= 'a') && (ch <= 'f'))
@@ -316,8 +315,8 @@ static void put_packet(char *buffer)
316 } 315 }
317 316
318 kgdb_io_ops->write_char('#'); 317 kgdb_io_ops->write_char('#');
319 kgdb_io_ops->write_char(hexchars[checksum >> 4]); 318 kgdb_io_ops->write_char(hex_asc_hi(checksum));
320 kgdb_io_ops->write_char(hexchars[checksum & 0xf]); 319 kgdb_io_ops->write_char(hex_asc_lo(checksum));
321 if (kgdb_io_ops->flush) 320 if (kgdb_io_ops->flush)
322 kgdb_io_ops->flush(); 321 kgdb_io_ops->flush();
323 322
@@ -478,8 +477,8 @@ static void error_packet(char *pkt, int error)
478{ 477{
479 error = -error; 478 error = -error;
480 pkt[0] = 'E'; 479 pkt[0] = 'E';
481 pkt[1] = hexchars[(error / 10)]; 480 pkt[1] = hex_asc[(error / 10)];
482 pkt[2] = hexchars[(error % 10)]; 481 pkt[2] = hex_asc[(error % 10)];
483 pkt[3] = '\0'; 482 pkt[3] = '\0';
484} 483}
485 484
@@ -510,10 +509,7 @@ static void int_to_threadref(unsigned char *id, int value)
510 scan = (unsigned char *)id; 509 scan = (unsigned char *)id;
511 while (i--) 510 while (i--)
512 *scan++ = 0; 511 *scan++ = 0;
513 *scan++ = (value >> 24) & 0xff; 512 put_unaligned_be32(value, scan);
514 *scan++ = (value >> 16) & 0xff;
515 *scan++ = (value >> 8) & 0xff;
516 *scan++ = (value & 0xff);
517} 513}
518 514
519static struct task_struct *getthread(struct pt_regs *regs, int tid) 515static struct task_struct *getthread(struct pt_regs *regs, int tid)
@@ -1503,7 +1499,8 @@ int kgdb_nmicallback(int cpu, void *regs)
1503 return 1; 1499 return 1;
1504} 1500}
1505 1501
1506void kgdb_console_write(struct console *co, const char *s, unsigned count) 1502static void kgdb_console_write(struct console *co, const char *s,
1503 unsigned count)
1507{ 1504{
1508 unsigned long flags; 1505 unsigned long flags;
1509 1506
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 1e0250cb9486..d4998f81e229 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -699,8 +699,9 @@ static int __register_kprobes(struct kprobe **kps, int num,
699 return -EINVAL; 699 return -EINVAL;
700 for (i = 0; i < num; i++) { 700 for (i = 0; i < num; i++) {
701 ret = __register_kprobe(kps[i], called_from); 701 ret = __register_kprobe(kps[i], called_from);
702 if (ret < 0 && i > 0) { 702 if (ret < 0) {
703 unregister_kprobes(kps, i); 703 if (i > 0)
704 unregister_kprobes(kps, i);
704 break; 705 break;
705 } 706 }
706 } 707 }
@@ -776,8 +777,9 @@ static int __register_jprobes(struct jprobe **jps, int num,
776 jp->kp.break_handler = longjmp_break_handler; 777 jp->kp.break_handler = longjmp_break_handler;
777 ret = __register_kprobe(&jp->kp, called_from); 778 ret = __register_kprobe(&jp->kp, called_from);
778 } 779 }
779 if (ret < 0 && i > 0) { 780 if (ret < 0) {
780 unregister_jprobes(jps, i); 781 if (i > 0)
782 unregister_jprobes(jps, i);
781 break; 783 break;
782 } 784 }
783 } 785 }
@@ -920,8 +922,9 @@ static int __register_kretprobes(struct kretprobe **rps, int num,
920 return -EINVAL; 922 return -EINVAL;
921 for (i = 0; i < num; i++) { 923 for (i = 0; i < num; i++) {
922 ret = __register_kretprobe(rps[i], called_from); 924 ret = __register_kretprobe(rps[i], called_from);
923 if (ret < 0 && i > 0) { 925 if (ret < 0) {
924 unregister_kretprobes(rps, i); 926 if (i > 0)
927 unregister_kretprobes(rps, i);
925 break; 928 break;
926 } 929 }
927 } 930 }
diff --git a/kernel/rcuclassic.c b/kernel/rcuclassic.c
index f4ffbd0f306f..a38895a5b8e2 100644
--- a/kernel/rcuclassic.c
+++ b/kernel/rcuclassic.c
@@ -89,8 +89,22 @@ static void force_quiescent_state(struct rcu_data *rdp,
89 /* 89 /*
90 * Don't send IPI to itself. With irqs disabled, 90 * Don't send IPI to itself. With irqs disabled,
91 * rdp->cpu is the current cpu. 91 * rdp->cpu is the current cpu.
92 *
93 * cpu_online_map is updated by the _cpu_down()
94 * using stop_machine_run(). Since we're in irqs disabled
95 * section, stop_machine_run() is not exectuting, hence
96 * the cpu_online_map is stable.
97 *
98 * However, a cpu might have been offlined _just_ before
99 * we disabled irqs while entering here.
100 * And rcu subsystem might not yet have handled the CPU_DEAD
101 * notification, leading to the offlined cpu's bit
102 * being set in the rcp->cpumask.
103 *
104 * Hence cpumask = (rcp->cpumask & cpu_online_map) to prevent
105 * sending smp_reschedule() to an offlined CPU.
92 */ 106 */
93 cpumask = rcp->cpumask; 107 cpus_and(cpumask, rcp->cpumask, cpu_online_map);
94 cpu_clear(rdp->cpu, cpumask); 108 cpu_clear(rdp->cpu, cpumask);
95 for_each_cpu_mask(cpu, cpumask) 109 for_each_cpu_mask(cpu, cpumask)
96 smp_send_reschedule(cpu); 110 smp_send_reschedule(cpu);
diff --git a/kernel/rcupreempt.c b/kernel/rcupreempt.c
index e1cdf196a515..5e02b7740702 100644
--- a/kernel/rcupreempt.c
+++ b/kernel/rcupreempt.c
@@ -217,8 +217,6 @@ long rcu_batches_completed(void)
217} 217}
218EXPORT_SYMBOL_GPL(rcu_batches_completed); 218EXPORT_SYMBOL_GPL(rcu_batches_completed);
219 219
220EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
221
222void __rcu_read_lock(void) 220void __rcu_read_lock(void)
223{ 221{
224 int idx; 222 int idx;
diff --git a/kernel/sched.c b/kernel/sched.c
index bfb8ad8ed171..94ead43eda62 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -312,12 +312,15 @@ static DEFINE_SPINLOCK(task_group_lock);
312#endif 312#endif
313 313
314/* 314/*
315 * A weight of 0, 1 or ULONG_MAX can cause arithmetics problems. 315 * A weight of 0 or 1 can cause arithmetics problems.
316 * A weight of a cfs_rq is the sum of weights of which entities
317 * are queued on this cfs_rq, so a weight of a entity should not be
318 * too large, so as the shares value of a task group.
316 * (The default weight is 1024 - so there's no practical 319 * (The default weight is 1024 - so there's no practical
317 * limitation from this.) 320 * limitation from this.)
318 */ 321 */
319#define MIN_SHARES 2 322#define MIN_SHARES 2
320#define MAX_SHARES (ULONG_MAX - 1) 323#define MAX_SHARES (1UL << 18)
321 324
322static int init_task_group_load = INIT_TASK_GROUP_LOAD; 325static int init_task_group_load = INIT_TASK_GROUP_LOAD;
323#endif 326#endif
@@ -1124,6 +1127,7 @@ static enum hrtimer_restart hrtick(struct hrtimer *timer)
1124 return HRTIMER_NORESTART; 1127 return HRTIMER_NORESTART;
1125} 1128}
1126 1129
1130#ifdef CONFIG_SMP
1127static void hotplug_hrtick_disable(int cpu) 1131static void hotplug_hrtick_disable(int cpu)
1128{ 1132{
1129 struct rq *rq = cpu_rq(cpu); 1133 struct rq *rq = cpu_rq(cpu);
@@ -1179,6 +1183,7 @@ static void init_hrtick(void)
1179{ 1183{
1180 hotcpu_notifier(hotplug_hrtick, 0); 1184 hotcpu_notifier(hotplug_hrtick, 0);
1181} 1185}
1186#endif /* CONFIG_SMP */
1182 1187
1183static void init_rq_hrtick(struct rq *rq) 1188static void init_rq_hrtick(struct rq *rq)
1184{ 1189{
@@ -1337,8 +1342,13 @@ calc_delta_mine(unsigned long delta_exec, unsigned long weight,
1337{ 1342{
1338 u64 tmp; 1343 u64 tmp;
1339 1344
1340 if (!lw->inv_weight) 1345 if (!lw->inv_weight) {
1341 lw->inv_weight = 1 + (WMULT_CONST-lw->weight/2)/(lw->weight+1); 1346 if (BITS_PER_LONG > 32 && unlikely(lw->weight >= WMULT_CONST))
1347 lw->inv_weight = 1;
1348 else
1349 lw->inv_weight = 1 + (WMULT_CONST-lw->weight/2)
1350 / (lw->weight+1);
1351 }
1342 1352
1343 tmp = (u64)delta_exec * weight; 1353 tmp = (u64)delta_exec * weight;
1344 /* 1354 /*
@@ -4159,12 +4169,10 @@ need_resched_nonpreemptible:
4159 clear_tsk_need_resched(prev); 4169 clear_tsk_need_resched(prev);
4160 4170
4161 if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { 4171 if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
4162 if (unlikely((prev->state & TASK_INTERRUPTIBLE) && 4172 if (unlikely(signal_pending_state(prev->state, prev)))
4163 signal_pending(prev))) {
4164 prev->state = TASK_RUNNING; 4173 prev->state = TASK_RUNNING;
4165 } else { 4174 else
4166 deactivate_task(rq, prev, 1); 4175 deactivate_task(rq, prev, 1);
4167 }
4168 switch_count = &prev->nvcsw; 4176 switch_count = &prev->nvcsw;
4169 } 4177 }
4170 4178
@@ -4390,22 +4398,20 @@ do_wait_for_common(struct completion *x, long timeout, int state)
4390 signal_pending(current)) || 4398 signal_pending(current)) ||
4391 (state == TASK_KILLABLE && 4399 (state == TASK_KILLABLE &&
4392 fatal_signal_pending(current))) { 4400 fatal_signal_pending(current))) {
4393 __remove_wait_queue(&x->wait, &wait); 4401 timeout = -ERESTARTSYS;
4394 return -ERESTARTSYS; 4402 break;
4395 } 4403 }
4396 __set_current_state(state); 4404 __set_current_state(state);
4397 spin_unlock_irq(&x->wait.lock); 4405 spin_unlock_irq(&x->wait.lock);
4398 timeout = schedule_timeout(timeout); 4406 timeout = schedule_timeout(timeout);
4399 spin_lock_irq(&x->wait.lock); 4407 spin_lock_irq(&x->wait.lock);
4400 if (!timeout) { 4408 } while (!x->done && timeout);
4401 __remove_wait_queue(&x->wait, &wait);
4402 return timeout;
4403 }
4404 } while (!x->done);
4405 __remove_wait_queue(&x->wait, &wait); 4409 __remove_wait_queue(&x->wait, &wait);
4410 if (!x->done)
4411 return timeout;
4406 } 4412 }
4407 x->done--; 4413 x->done--;
4408 return timeout; 4414 return timeout ?: 1;
4409} 4415}
4410 4416
4411static long __sched 4417static long __sched
@@ -5881,6 +5887,7 @@ static void migrate_dead_tasks(unsigned int dead_cpu)
5881 next = pick_next_task(rq, rq->curr); 5887 next = pick_next_task(rq, rq->curr);
5882 if (!next) 5888 if (!next)
5883 break; 5889 break;
5890 next->sched_class->put_prev_task(rq, next);
5884 migrate_dead(dead_cpu, next); 5891 migrate_dead(dead_cpu, next);
5885 5892
5886 } 5893 }
@@ -6871,7 +6878,12 @@ static int default_relax_domain_level = -1;
6871 6878
6872static int __init setup_relax_domain_level(char *str) 6879static int __init setup_relax_domain_level(char *str)
6873{ 6880{
6874 default_relax_domain_level = simple_strtoul(str, NULL, 0); 6881 unsigned long val;
6882
6883 val = simple_strtoul(str, NULL, 0);
6884 if (val < SD_LV_MAX)
6885 default_relax_domain_level = val;
6886
6875 return 1; 6887 return 1;
6876} 6888}
6877__setup("relax_domain_level=", setup_relax_domain_level); 6889__setup("relax_domain_level=", setup_relax_domain_level);
@@ -7230,6 +7242,18 @@ void __attribute__((weak)) arch_update_cpu_topology(void)
7230} 7242}
7231 7243
7232/* 7244/*
7245 * Free current domain masks.
7246 * Called after all cpus are attached to NULL domain.
7247 */
7248static void free_sched_domains(void)
7249{
7250 ndoms_cur = 0;
7251 if (doms_cur != &fallback_doms)
7252 kfree(doms_cur);
7253 doms_cur = &fallback_doms;
7254}
7255
7256/*
7233 * Set up scheduler domains and groups. Callers must hold the hotplug lock. 7257 * Set up scheduler domains and groups. Callers must hold the hotplug lock.
7234 * For now this just excludes isolated cpus, but could be used to 7258 * For now this just excludes isolated cpus, but could be used to
7235 * exclude other special cases in the future. 7259 * exclude other special cases in the future.
@@ -7376,6 +7400,7 @@ int arch_reinit_sched_domains(void)
7376 get_online_cpus(); 7400 get_online_cpus();
7377 mutex_lock(&sched_domains_mutex); 7401 mutex_lock(&sched_domains_mutex);
7378 detach_destroy_domains(&cpu_online_map); 7402 detach_destroy_domains(&cpu_online_map);
7403 free_sched_domains();
7379 err = arch_init_sched_domains(&cpu_online_map); 7404 err = arch_init_sched_domains(&cpu_online_map);
7380 mutex_unlock(&sched_domains_mutex); 7405 mutex_unlock(&sched_domains_mutex);
7381 put_online_cpus(); 7406 put_online_cpus();
@@ -7461,6 +7486,7 @@ static int update_sched_domains(struct notifier_block *nfb,
7461 case CPU_DOWN_PREPARE: 7486 case CPU_DOWN_PREPARE:
7462 case CPU_DOWN_PREPARE_FROZEN: 7487 case CPU_DOWN_PREPARE_FROZEN:
7463 detach_destroy_domains(&cpu_online_map); 7488 detach_destroy_domains(&cpu_online_map);
7489 free_sched_domains();
7464 return NOTIFY_OK; 7490 return NOTIFY_OK;
7465 7491
7466 case CPU_UP_CANCELED: 7492 case CPU_UP_CANCELED:
@@ -7479,8 +7505,16 @@ static int update_sched_domains(struct notifier_block *nfb,
7479 return NOTIFY_DONE; 7505 return NOTIFY_DONE;
7480 } 7506 }
7481 7507
7508#ifndef CONFIG_CPUSETS
7509 /*
7510 * Create default domain partitioning if cpusets are disabled.
7511 * Otherwise we let cpusets rebuild the domains based on the
7512 * current setup.
7513 */
7514
7482 /* The hotplug lock is already held by cpu_up/cpu_down */ 7515 /* The hotplug lock is already held by cpu_up/cpu_down */
7483 arch_init_sched_domains(&cpu_online_map); 7516 arch_init_sched_domains(&cpu_online_map);
7517#endif
7484 7518
7485 return NOTIFY_OK; 7519 return NOTIFY_OK;
7486} 7520}
@@ -7620,7 +7654,6 @@ static void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
7620 else 7654 else
7621 rt_se->rt_rq = parent->my_q; 7655 rt_se->rt_rq = parent->my_q;
7622 7656
7623 rt_se->rt_rq = &rq->rt;
7624 rt_se->my_q = rt_rq; 7657 rt_se->my_q = rt_rq;
7625 rt_se->parent = parent; 7658 rt_se->parent = parent;
7626 INIT_LIST_HEAD(&rt_se->run_list); 7659 INIT_LIST_HEAD(&rt_se->run_list);
@@ -8342,7 +8375,7 @@ static unsigned long to_ratio(u64 period, u64 runtime)
8342#ifdef CONFIG_CGROUP_SCHED 8375#ifdef CONFIG_CGROUP_SCHED
8343static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime) 8376static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
8344{ 8377{
8345 struct task_group *tgi, *parent = tg->parent; 8378 struct task_group *tgi, *parent = tg ? tg->parent : NULL;
8346 unsigned long total = 0; 8379 unsigned long total = 0;
8347 8380
8348 if (!parent) { 8381 if (!parent) {
@@ -8469,6 +8502,9 @@ int sched_group_set_rt_period(struct task_group *tg, long rt_period_us)
8469 rt_period = (u64)rt_period_us * NSEC_PER_USEC; 8502 rt_period = (u64)rt_period_us * NSEC_PER_USEC;
8470 rt_runtime = tg->rt_bandwidth.rt_runtime; 8503 rt_runtime = tg->rt_bandwidth.rt_runtime;
8471 8504
8505 if (rt_period == 0)
8506 return -EINVAL;
8507
8472 return tg_set_bandwidth(tg, rt_period, rt_runtime); 8508 return tg_set_bandwidth(tg, rt_period, rt_runtime);
8473} 8509}
8474 8510
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 3432d573205d..0f3c19197fa4 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -250,7 +250,8 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
250 if (rt_rq->rt_time || rt_rq->rt_nr_running) 250 if (rt_rq->rt_time || rt_rq->rt_nr_running)
251 idle = 0; 251 idle = 0;
252 spin_unlock(&rt_rq->rt_runtime_lock); 252 spin_unlock(&rt_rq->rt_runtime_lock);
253 } 253 } else if (rt_rq->rt_nr_running)
254 idle = 0;
254 255
255 if (enqueue) 256 if (enqueue)
256 sched_rt_rq_enqueue(rt_rq); 257 sched_rt_rq_enqueue(rt_rq);
@@ -449,13 +450,19 @@ void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
449#endif 450#endif
450} 451}
451 452
452static void enqueue_rt_entity(struct sched_rt_entity *rt_se) 453static void __enqueue_rt_entity(struct sched_rt_entity *rt_se)
453{ 454{
454 struct rt_rq *rt_rq = rt_rq_of_se(rt_se); 455 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
455 struct rt_prio_array *array = &rt_rq->active; 456 struct rt_prio_array *array = &rt_rq->active;
456 struct rt_rq *group_rq = group_rt_rq(rt_se); 457 struct rt_rq *group_rq = group_rt_rq(rt_se);
457 458
458 if (group_rq && rt_rq_throttled(group_rq)) 459 /*
460 * Don't enqueue the group if its throttled, or when empty.
461 * The latter is a consequence of the former when a child group
462 * get throttled and the current group doesn't have any other
463 * active members.
464 */
465 if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
459 return; 466 return;
460 467
461 list_add_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se)); 468 list_add_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se));
@@ -464,7 +471,7 @@ static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
464 inc_rt_tasks(rt_se, rt_rq); 471 inc_rt_tasks(rt_se, rt_rq);
465} 472}
466 473
467static void dequeue_rt_entity(struct sched_rt_entity *rt_se) 474static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
468{ 475{
469 struct rt_rq *rt_rq = rt_rq_of_se(rt_se); 476 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
470 struct rt_prio_array *array = &rt_rq->active; 477 struct rt_prio_array *array = &rt_rq->active;
@@ -480,11 +487,10 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
480 * Because the prio of an upper entry depends on the lower 487 * Because the prio of an upper entry depends on the lower
481 * entries, we must remove entries top - down. 488 * entries, we must remove entries top - down.
482 */ 489 */
483static void dequeue_rt_stack(struct task_struct *p) 490static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
484{ 491{
485 struct sched_rt_entity *rt_se, *back = NULL; 492 struct sched_rt_entity *back = NULL;
486 493
487 rt_se = &p->rt;
488 for_each_sched_rt_entity(rt_se) { 494 for_each_sched_rt_entity(rt_se) {
489 rt_se->back = back; 495 rt_se->back = back;
490 back = rt_se; 496 back = rt_se;
@@ -492,7 +498,26 @@ static void dequeue_rt_stack(struct task_struct *p)
492 498
493 for (rt_se = back; rt_se; rt_se = rt_se->back) { 499 for (rt_se = back; rt_se; rt_se = rt_se->back) {
494 if (on_rt_rq(rt_se)) 500 if (on_rt_rq(rt_se))
495 dequeue_rt_entity(rt_se); 501 __dequeue_rt_entity(rt_se);
502 }
503}
504
505static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
506{
507 dequeue_rt_stack(rt_se);
508 for_each_sched_rt_entity(rt_se)
509 __enqueue_rt_entity(rt_se);
510}
511
512static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
513{
514 dequeue_rt_stack(rt_se);
515
516 for_each_sched_rt_entity(rt_se) {
517 struct rt_rq *rt_rq = group_rt_rq(rt_se);
518
519 if (rt_rq && rt_rq->rt_nr_running)
520 __enqueue_rt_entity(rt_se);
496 } 521 }
497} 522}
498 523
@@ -506,32 +531,15 @@ static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
506 if (wakeup) 531 if (wakeup)
507 rt_se->timeout = 0; 532 rt_se->timeout = 0;
508 533
509 dequeue_rt_stack(p); 534 enqueue_rt_entity(rt_se);
510
511 /*
512 * enqueue everybody, bottom - up.
513 */
514 for_each_sched_rt_entity(rt_se)
515 enqueue_rt_entity(rt_se);
516} 535}
517 536
518static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep) 537static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
519{ 538{
520 struct sched_rt_entity *rt_se = &p->rt; 539 struct sched_rt_entity *rt_se = &p->rt;
521 struct rt_rq *rt_rq;
522 540
523 update_curr_rt(rq); 541 update_curr_rt(rq);
524 542 dequeue_rt_entity(rt_se);
525 dequeue_rt_stack(p);
526
527 /*
528 * re-enqueue all non-empty rt_rq entities.
529 */
530 for_each_sched_rt_entity(rt_se) {
531 rt_rq = group_rt_rq(rt_se);
532 if (rt_rq && rt_rq->rt_nr_running)
533 enqueue_rt_entity(rt_se);
534 }
535} 543}
536 544
537/* 545/*
@@ -542,8 +550,10 @@ static
542void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se) 550void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
543{ 551{
544 struct rt_prio_array *array = &rt_rq->active; 552 struct rt_prio_array *array = &rt_rq->active;
553 struct list_head *queue = array->queue + rt_se_prio(rt_se);
545 554
546 list_move_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se)); 555 if (on_rt_rq(rt_se))
556 list_move_tail(&rt_se->run_list, queue);
547} 557}
548 558
549static void requeue_task_rt(struct rq *rq, struct task_struct *p) 559static void requeue_task_rt(struct rq *rq, struct task_struct *p)
diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h
index a38878e0e49d..80179ef7450e 100644
--- a/kernel/sched_stats.h
+++ b/kernel/sched_stats.h
@@ -198,6 +198,9 @@ static inline void sched_info_queued(struct task_struct *t)
198/* 198/*
199 * Called when a process ceases being the active-running process, either 199 * Called when a process ceases being the active-running process, either
200 * voluntarily or involuntarily. Now we can calculate how long we ran. 200 * voluntarily or involuntarily. Now we can calculate how long we ran.
201 * Also, if the process is still in the TASK_RUNNING state, call
202 * sched_info_queued() to mark that it has now again started waiting on
203 * the runqueue.
201 */ 204 */
202static inline void sched_info_depart(struct task_struct *t) 205static inline void sched_info_depart(struct task_struct *t)
203{ 206{
@@ -206,6 +209,9 @@ static inline void sched_info_depart(struct task_struct *t)
206 209
207 t->sched_info.cpu_time += delta; 210 t->sched_info.cpu_time += delta;
208 rq_sched_info_depart(task_rq(t), delta); 211 rq_sched_info_depart(task_rq(t), delta);
212
213 if (t->state == TASK_RUNNING)
214 sched_info_queued(t);
209} 215}
210 216
211/* 217/*
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index 01b6522fd92b..a272d78185eb 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -49,12 +49,17 @@ static unsigned long get_timestamp(int this_cpu)
49 return cpu_clock(this_cpu) >> 30LL; /* 2^30 ~= 10^9 */ 49 return cpu_clock(this_cpu) >> 30LL; /* 2^30 ~= 10^9 */
50} 50}
51 51
52void touch_softlockup_watchdog(void) 52static void __touch_softlockup_watchdog(void)
53{ 53{
54 int this_cpu = raw_smp_processor_id(); 54 int this_cpu = raw_smp_processor_id();
55 55
56 __raw_get_cpu_var(touch_timestamp) = get_timestamp(this_cpu); 56 __raw_get_cpu_var(touch_timestamp) = get_timestamp(this_cpu);
57} 57}
58
59void touch_softlockup_watchdog(void)
60{
61 __raw_get_cpu_var(touch_timestamp) = 0;
62}
58EXPORT_SYMBOL(touch_softlockup_watchdog); 63EXPORT_SYMBOL(touch_softlockup_watchdog);
59 64
60void touch_all_softlockup_watchdogs(void) 65void touch_all_softlockup_watchdogs(void)
@@ -80,7 +85,7 @@ void softlockup_tick(void)
80 unsigned long now; 85 unsigned long now;
81 86
82 if (touch_timestamp == 0) { 87 if (touch_timestamp == 0) {
83 touch_softlockup_watchdog(); 88 __touch_softlockup_watchdog();
84 return; 89 return;
85 } 90 }
86 91
@@ -95,7 +100,7 @@ void softlockup_tick(void)
95 100
96 /* do not print during early bootup: */ 101 /* do not print during early bootup: */
97 if (unlikely(system_state != SYSTEM_RUNNING)) { 102 if (unlikely(system_state != SYSTEM_RUNNING)) {
98 touch_softlockup_watchdog(); 103 __touch_softlockup_watchdog();
99 return; 104 return;
100 } 105 }
101 106
@@ -115,6 +120,7 @@ void softlockup_tick(void)
115 printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %lus! [%s:%d]\n", 120 printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %lus! [%s:%d]\n",
116 this_cpu, now - touch_timestamp, 121 this_cpu, now - touch_timestamp,
117 current->comm, task_pid_nr(current)); 122 current->comm, task_pid_nr(current));
123 print_modules();
118 if (regs) 124 if (regs)
119 show_regs(regs); 125 show_regs(regs);
120 else 126 else
@@ -214,7 +220,7 @@ static int watchdog(void *__bind_cpu)
214 sched_setscheduler(current, SCHED_FIFO, &param); 220 sched_setscheduler(current, SCHED_FIFO, &param);
215 221
216 /* initialize timestamp */ 222 /* initialize timestamp */
217 touch_softlockup_watchdog(); 223 __touch_softlockup_watchdog();
218 224
219 set_current_state(TASK_INTERRUPTIBLE); 225 set_current_state(TASK_INTERRUPTIBLE);
220 /* 226 /*
@@ -223,7 +229,7 @@ static int watchdog(void *__bind_cpu)
223 * debug-printout triggers in softlockup_tick(). 229 * debug-printout triggers in softlockup_tick().
224 */ 230 */
225 while (!kthread_should_stop()) { 231 while (!kthread_should_stop()) {
226 touch_softlockup_watchdog(); 232 __touch_softlockup_watchdog();
227 schedule(); 233 schedule();
228 234
229 if (kthread_should_stop()) 235 if (kthread_should_stop())
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 29fc39f1029c..ce7799540c91 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -13,7 +13,7 @@
13 * Kai Petzke <wpp@marie.physik.tu-berlin.de> 13 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
14 * Theodore Ts'o <tytso@mit.edu> 14 * Theodore Ts'o <tytso@mit.edu>
15 * 15 *
16 * Made to use alloc_percpu by Christoph Lameter <clameter@sgi.com>. 16 * Made to use alloc_percpu by Christoph Lameter.
17 */ 17 */
18 18
19#include <linux/module.h> 19#include <linux/module.h>