aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/audit.c6
-rw-r--r--kernel/auditfilter.c3
-rw-r--r--kernel/capability.c132
-rw-r--r--kernel/cpuset.c42
-rw-r--r--kernel/futex.c93
-rw-r--r--kernel/hrtimer.c8
-rw-r--r--kernel/kgdb.c19
-rw-r--r--kernel/kprobes.c17
-rw-r--r--kernel/posix-cpu-timers.c3
-rw-r--r--kernel/printk.c2
-rw-r--r--kernel/rcuclassic.c16
-rw-r--r--kernel/rcupreempt.c22
-rw-r--r--kernel/relay.c2
-rw-r--r--kernel/sched.c528
-rw-r--r--kernel/sched_clock.c18
-rw-r--r--kernel/sched_debug.c5
-rw-r--r--kernel/sched_fair.c254
-rw-r--r--kernel/sched_rt.c70
-rw-r--r--kernel/sched_stats.h7
-rw-r--r--kernel/softlockup.c16
-rw-r--r--kernel/time/tick-sched.c8
-rw-r--r--kernel/workqueue.c2
22 files changed, 528 insertions, 745 deletions
diff --git a/kernel/audit.c b/kernel/audit.c
index e8692a5748c2..e092f1c0ce30 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -738,7 +738,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
738 if (!audit_enabled && msg_type != AUDIT_USER_AVC) 738 if (!audit_enabled && msg_type != AUDIT_USER_AVC)
739 return 0; 739 return 0;
740 740
741 err = audit_filter_user(&NETLINK_CB(skb), msg_type); 741 err = audit_filter_user(&NETLINK_CB(skb));
742 if (err == 1) { 742 if (err == 1) {
743 err = 0; 743 err = 0;
744 if (msg_type == AUDIT_USER_TTY) { 744 if (msg_type == AUDIT_USER_TTY) {
@@ -779,7 +779,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
779 } 779 }
780 /* fallthrough */ 780 /* fallthrough */
781 case AUDIT_LIST: 781 case AUDIT_LIST:
782 err = audit_receive_filter(nlh->nlmsg_type, NETLINK_CB(skb).pid, 782 err = audit_receive_filter(msg_type, NETLINK_CB(skb).pid,
783 uid, seq, data, nlmsg_len(nlh), 783 uid, seq, data, nlmsg_len(nlh),
784 loginuid, sessionid, sid); 784 loginuid, sessionid, sid);
785 break; 785 break;
@@ -798,7 +798,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
798 } 798 }
799 /* fallthrough */ 799 /* fallthrough */
800 case AUDIT_LIST_RULES: 800 case AUDIT_LIST_RULES:
801 err = audit_receive_filter(nlh->nlmsg_type, NETLINK_CB(skb).pid, 801 err = audit_receive_filter(msg_type, NETLINK_CB(skb).pid,
802 uid, seq, data, nlmsg_len(nlh), 802 uid, seq, data, nlmsg_len(nlh),
803 loginuid, sessionid, sid); 803 loginuid, sessionid, sid);
804 break; 804 break;
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
index 0e0bd27e6512..98c50cc671bb 100644
--- a/kernel/auditfilter.c
+++ b/kernel/auditfilter.c
@@ -1544,6 +1544,7 @@ static void audit_log_rule_change(uid_t loginuid, u32 sessionid, u32 sid,
1544 * @data: payload data 1544 * @data: payload data
1545 * @datasz: size of payload data 1545 * @datasz: size of payload data
1546 * @loginuid: loginuid of sender 1546 * @loginuid: loginuid of sender
1547 * @sessionid: sessionid for netlink audit message
1547 * @sid: SE Linux Security ID of sender 1548 * @sid: SE Linux Security ID of sender
1548 */ 1549 */
1549int audit_receive_filter(int type, int pid, int uid, int seq, void *data, 1550int audit_receive_filter(int type, int pid, int uid, int seq, void *data,
@@ -1720,7 +1721,7 @@ static int audit_filter_user_rules(struct netlink_skb_parms *cb,
1720 return 1; 1721 return 1;
1721} 1722}
1722 1723
1723int audit_filter_user(struct netlink_skb_parms *cb, int type) 1724int audit_filter_user(struct netlink_skb_parms *cb)
1724{ 1725{
1725 enum audit_state state = AUDIT_DISABLED; 1726 enum audit_state state = AUDIT_DISABLED;
1726 struct audit_entry *e; 1727 struct audit_entry *e;
diff --git a/kernel/capability.c b/kernel/capability.c
index 39e8193b41ea..901e0fdc3fff 100644
--- a/kernel/capability.c
+++ b/kernel/capability.c
@@ -53,11 +53,95 @@ static void warn_legacy_capability_use(void)
53} 53}
54 54
55/* 55/*
56 * Version 2 capabilities worked fine, but the linux/capability.h file
57 * that accompanied their introduction encouraged their use without
58 * the necessary user-space source code changes. As such, we have
59 * created a version 3 with equivalent functionality to version 2, but
60 * with a header change to protect legacy source code from using
61 * version 2 when it wanted to use version 1. If your system has code
62 * that trips the following warning, it is using version 2 specific
63 * capabilities and may be doing so insecurely.
64 *
65 * The remedy is to either upgrade your version of libcap (to 2.10+,
66 * if the application is linked against it), or recompile your
67 * application with modern kernel headers and this warning will go
68 * away.
69 */
70
71static void warn_deprecated_v2(void)
72{
73 static int warned;
74
75 if (!warned) {
76 char name[sizeof(current->comm)];
77
78 printk(KERN_INFO "warning: `%s' uses deprecated v2"
79 " capabilities in a way that may be insecure.\n",
80 get_task_comm(name, current));
81 warned = 1;
82 }
83}
84
85/*
86 * Version check. Return the number of u32s in each capability flag
87 * array, or a negative value on error.
88 */
89static int cap_validate_magic(cap_user_header_t header, unsigned *tocopy)
90{
91 __u32 version;
92
93 if (get_user(version, &header->version))
94 return -EFAULT;
95
96 switch (version) {
97 case _LINUX_CAPABILITY_VERSION_1:
98 warn_legacy_capability_use();
99 *tocopy = _LINUX_CAPABILITY_U32S_1;
100 break;
101 case _LINUX_CAPABILITY_VERSION_2:
102 warn_deprecated_v2();
103 /*
104 * fall through - v3 is otherwise equivalent to v2.
105 */
106 case _LINUX_CAPABILITY_VERSION_3:
107 *tocopy = _LINUX_CAPABILITY_U32S_3;
108 break;
109 default:
110 if (put_user((u32)_KERNEL_CAPABILITY_VERSION, &header->version))
111 return -EFAULT;
112 return -EINVAL;
113 }
114
115 return 0;
116}
117
118/*
56 * For sys_getproccap() and sys_setproccap(), any of the three 119 * For sys_getproccap() and sys_setproccap(), any of the three
57 * capability set pointers may be NULL -- indicating that that set is 120 * capability set pointers may be NULL -- indicating that that set is
58 * uninteresting and/or not to be changed. 121 * uninteresting and/or not to be changed.
59 */ 122 */
60 123
124/*
125 * Atomically modify the effective capabilities returning the original
126 * value. No permission check is performed here - it is assumed that the
127 * caller is permitted to set the desired effective capabilities.
128 */
129kernel_cap_t cap_set_effective(const kernel_cap_t pE_new)
130{
131 kernel_cap_t pE_old;
132
133 spin_lock(&task_capability_lock);
134
135 pE_old = current->cap_effective;
136 current->cap_effective = pE_new;
137
138 spin_unlock(&task_capability_lock);
139
140 return pE_old;
141}
142
143EXPORT_SYMBOL(cap_set_effective);
144
61/** 145/**
62 * sys_capget - get the capabilities of a given process. 146 * sys_capget - get the capabilities of a given process.
63 * @header: pointer to struct that contains capability version and 147 * @header: pointer to struct that contains capability version and
@@ -71,27 +155,13 @@ asmlinkage long sys_capget(cap_user_header_t header, cap_user_data_t dataptr)
71{ 155{
72 int ret = 0; 156 int ret = 0;
73 pid_t pid; 157 pid_t pid;
74 __u32 version;
75 struct task_struct *target; 158 struct task_struct *target;
76 unsigned tocopy; 159 unsigned tocopy;
77 kernel_cap_t pE, pI, pP; 160 kernel_cap_t pE, pI, pP;
78 161
79 if (get_user(version, &header->version)) 162 ret = cap_validate_magic(header, &tocopy);
80 return -EFAULT; 163 if (ret != 0)
81 164 return ret;
82 switch (version) {
83 case _LINUX_CAPABILITY_VERSION_1:
84 warn_legacy_capability_use();
85 tocopy = _LINUX_CAPABILITY_U32S_1;
86 break;
87 case _LINUX_CAPABILITY_VERSION_2:
88 tocopy = _LINUX_CAPABILITY_U32S_2;
89 break;
90 default:
91 if (put_user(_LINUX_CAPABILITY_VERSION, &header->version))
92 return -EFAULT;
93 return -EINVAL;
94 }
95 165
96 if (get_user(pid, &header->pid)) 166 if (get_user(pid, &header->pid))
97 return -EFAULT; 167 return -EFAULT;
@@ -118,7 +188,7 @@ out:
118 spin_unlock(&task_capability_lock); 188 spin_unlock(&task_capability_lock);
119 189
120 if (!ret) { 190 if (!ret) {
121 struct __user_cap_data_struct kdata[_LINUX_CAPABILITY_U32S]; 191 struct __user_cap_data_struct kdata[_KERNEL_CAPABILITY_U32S];
122 unsigned i; 192 unsigned i;
123 193
124 for (i = 0; i < tocopy; i++) { 194 for (i = 0; i < tocopy; i++) {
@@ -128,7 +198,7 @@ out:
128 } 198 }
129 199
130 /* 200 /*
131 * Note, in the case, tocopy < _LINUX_CAPABILITY_U32S, 201 * Note, in the case, tocopy < _KERNEL_CAPABILITY_U32S,
132 * we silently drop the upper capabilities here. This 202 * we silently drop the upper capabilities here. This
133 * has the effect of making older libcap 203 * has the effect of making older libcap
134 * implementations implicitly drop upper capability 204 * implementations implicitly drop upper capability
@@ -240,30 +310,16 @@ static inline int cap_set_all(kernel_cap_t *effective,
240 */ 310 */
241asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data) 311asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data)
242{ 312{
243 struct __user_cap_data_struct kdata[_LINUX_CAPABILITY_U32S]; 313 struct __user_cap_data_struct kdata[_KERNEL_CAPABILITY_U32S];
244 unsigned i, tocopy; 314 unsigned i, tocopy;
245 kernel_cap_t inheritable, permitted, effective; 315 kernel_cap_t inheritable, permitted, effective;
246 __u32 version;
247 struct task_struct *target; 316 struct task_struct *target;
248 int ret; 317 int ret;
249 pid_t pid; 318 pid_t pid;
250 319
251 if (get_user(version, &header->version)) 320 ret = cap_validate_magic(header, &tocopy);
252 return -EFAULT; 321 if (ret != 0)
253 322 return ret;
254 switch (version) {
255 case _LINUX_CAPABILITY_VERSION_1:
256 warn_legacy_capability_use();
257 tocopy = _LINUX_CAPABILITY_U32S_1;
258 break;
259 case _LINUX_CAPABILITY_VERSION_2:
260 tocopy = _LINUX_CAPABILITY_U32S_2;
261 break;
262 default:
263 if (put_user(_LINUX_CAPABILITY_VERSION, &header->version))
264 return -EFAULT;
265 return -EINVAL;
266 }
267 323
268 if (get_user(pid, &header->pid)) 324 if (get_user(pid, &header->pid))
269 return -EFAULT; 325 return -EFAULT;
@@ -281,7 +337,7 @@ asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data)
281 permitted.cap[i] = kdata[i].permitted; 337 permitted.cap[i] = kdata[i].permitted;
282 inheritable.cap[i] = kdata[i].inheritable; 338 inheritable.cap[i] = kdata[i].inheritable;
283 } 339 }
284 while (i < _LINUX_CAPABILITY_U32S) { 340 while (i < _KERNEL_CAPABILITY_U32S) {
285 effective.cap[i] = 0; 341 effective.cap[i] = 0;
286 permitted.cap[i] = 0; 342 permitted.cap[i] = 0;
287 inheritable.cap[i] = 0; 343 inheritable.cap[i] = 0;
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 86ea9e34e326..798b3ab054eb 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -797,8 +797,10 @@ static int update_cpumask(struct cpuset *cs, char *buf)
797 retval = cpulist_parse(buf, trialcs.cpus_allowed); 797 retval = cpulist_parse(buf, trialcs.cpus_allowed);
798 if (retval < 0) 798 if (retval < 0)
799 return retval; 799 return retval;
800
801 if (!cpus_subset(trialcs.cpus_allowed, cpu_online_map))
802 return -EINVAL;
800 } 803 }
801 cpus_and(trialcs.cpus_allowed, trialcs.cpus_allowed, cpu_online_map);
802 retval = validate_change(cs, &trialcs); 804 retval = validate_change(cs, &trialcs);
803 if (retval < 0) 805 if (retval < 0)
804 return retval; 806 return retval;
@@ -932,9 +934,11 @@ static int update_nodemask(struct cpuset *cs, char *buf)
932 retval = nodelist_parse(buf, trialcs.mems_allowed); 934 retval = nodelist_parse(buf, trialcs.mems_allowed);
933 if (retval < 0) 935 if (retval < 0)
934 goto done; 936 goto done;
937
938 if (!nodes_subset(trialcs.mems_allowed,
939 node_states[N_HIGH_MEMORY]))
940 return -EINVAL;
935 } 941 }
936 nodes_and(trialcs.mems_allowed, trialcs.mems_allowed,
937 node_states[N_HIGH_MEMORY]);
938 oldmem = cs->mems_allowed; 942 oldmem = cs->mems_allowed;
939 if (nodes_equal(oldmem, trialcs.mems_allowed)) { 943 if (nodes_equal(oldmem, trialcs.mems_allowed)) {
940 retval = 0; /* Too easy - nothing to do */ 944 retval = 0; /* Too easy - nothing to do */
@@ -1033,8 +1037,8 @@ int current_cpuset_is_being_rebound(void)
1033 1037
1034static int update_relax_domain_level(struct cpuset *cs, s64 val) 1038static int update_relax_domain_level(struct cpuset *cs, s64 val)
1035{ 1039{
1036 if ((int)val < 0) 1040 if (val < -1 || val >= SD_LV_MAX)
1037 val = -1; 1041 return -EINVAL;
1038 1042
1039 if (val != cs->relax_domain_level) { 1043 if (val != cs->relax_domain_level) {
1040 cs->relax_domain_level = val; 1044 cs->relax_domain_level = val;
@@ -1878,7 +1882,7 @@ static void scan_for_empty_cpusets(const struct cpuset *root)
1878 * in order to minimize text size. 1882 * in order to minimize text size.
1879 */ 1883 */
1880 1884
1881static void common_cpu_mem_hotplug_unplug(void) 1885static void common_cpu_mem_hotplug_unplug(int rebuild_sd)
1882{ 1886{
1883 cgroup_lock(); 1887 cgroup_lock();
1884 1888
@@ -1886,6 +1890,13 @@ static void common_cpu_mem_hotplug_unplug(void)
1886 top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY]; 1890 top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
1887 scan_for_empty_cpusets(&top_cpuset); 1891 scan_for_empty_cpusets(&top_cpuset);
1888 1892
1893 /*
1894 * Scheduler destroys domains on hotplug events.
1895 * Rebuild them based on the current settings.
1896 */
1897 if (rebuild_sd)
1898 rebuild_sched_domains();
1899
1889 cgroup_unlock(); 1900 cgroup_unlock();
1890} 1901}
1891 1902
@@ -1902,11 +1913,22 @@ static void common_cpu_mem_hotplug_unplug(void)
1902static int cpuset_handle_cpuhp(struct notifier_block *unused_nb, 1913static int cpuset_handle_cpuhp(struct notifier_block *unused_nb,
1903 unsigned long phase, void *unused_cpu) 1914 unsigned long phase, void *unused_cpu)
1904{ 1915{
1905 if (phase == CPU_DYING || phase == CPU_DYING_FROZEN) 1916 switch (phase) {
1917 case CPU_UP_CANCELED:
1918 case CPU_UP_CANCELED_FROZEN:
1919 case CPU_DOWN_FAILED:
1920 case CPU_DOWN_FAILED_FROZEN:
1921 case CPU_ONLINE:
1922 case CPU_ONLINE_FROZEN:
1923 case CPU_DEAD:
1924 case CPU_DEAD_FROZEN:
1925 common_cpu_mem_hotplug_unplug(1);
1926 break;
1927 default:
1906 return NOTIFY_DONE; 1928 return NOTIFY_DONE;
1929 }
1907 1930
1908 common_cpu_mem_hotplug_unplug(); 1931 return NOTIFY_OK;
1909 return 0;
1910} 1932}
1911 1933
1912#ifdef CONFIG_MEMORY_HOTPLUG 1934#ifdef CONFIG_MEMORY_HOTPLUG
@@ -1919,7 +1941,7 @@ static int cpuset_handle_cpuhp(struct notifier_block *unused_nb,
1919 1941
1920void cpuset_track_online_nodes(void) 1942void cpuset_track_online_nodes(void)
1921{ 1943{
1922 common_cpu_mem_hotplug_unplug(); 1944 common_cpu_mem_hotplug_unplug(0);
1923} 1945}
1924#endif 1946#endif
1925 1947
diff --git a/kernel/futex.c b/kernel/futex.c
index 449def8074fe..7d1136e97c14 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1096,21 +1096,64 @@ static void unqueue_me_pi(struct futex_q *q)
1096 * private futexes. 1096 * private futexes.
1097 */ 1097 */
1098static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, 1098static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
1099 struct task_struct *newowner) 1099 struct task_struct *newowner,
1100 struct rw_semaphore *fshared)
1100{ 1101{
1101 u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS; 1102 u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
1102 struct futex_pi_state *pi_state = q->pi_state; 1103 struct futex_pi_state *pi_state = q->pi_state;
1104 struct task_struct *oldowner = pi_state->owner;
1103 u32 uval, curval, newval; 1105 u32 uval, curval, newval;
1104 int ret; 1106 int ret, attempt = 0;
1105 1107
1106 /* Owner died? */ 1108 /* Owner died? */
1109 if (!pi_state->owner)
1110 newtid |= FUTEX_OWNER_DIED;
1111
1112 /*
1113 * We are here either because we stole the rtmutex from the
1114 * pending owner or we are the pending owner which failed to
1115 * get the rtmutex. We have to replace the pending owner TID
1116 * in the user space variable. This must be atomic as we have
1117 * to preserve the owner died bit here.
1118 *
1119 * Note: We write the user space value _before_ changing the
1120 * pi_state because we can fault here. Imagine swapped out
1121 * pages or a fork, which was running right before we acquired
1122 * mmap_sem, that marked all the anonymous memory readonly for
1123 * cow.
1124 *
1125 * Modifying pi_state _before_ the user space value would
1126 * leave the pi_state in an inconsistent state when we fault
1127 * here, because we need to drop the hash bucket lock to
1128 * handle the fault. This might be observed in the PID check
1129 * in lookup_pi_state.
1130 */
1131retry:
1132 if (get_futex_value_locked(&uval, uaddr))
1133 goto handle_fault;
1134
1135 while (1) {
1136 newval = (uval & FUTEX_OWNER_DIED) | newtid;
1137
1138 curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
1139
1140 if (curval == -EFAULT)
1141 goto handle_fault;
1142 if (curval == uval)
1143 break;
1144 uval = curval;
1145 }
1146
1147 /*
1148 * We fixed up user space. Now we need to fix the pi_state
1149 * itself.
1150 */
1107 if (pi_state->owner != NULL) { 1151 if (pi_state->owner != NULL) {
1108 spin_lock_irq(&pi_state->owner->pi_lock); 1152 spin_lock_irq(&pi_state->owner->pi_lock);
1109 WARN_ON(list_empty(&pi_state->list)); 1153 WARN_ON(list_empty(&pi_state->list));
1110 list_del_init(&pi_state->list); 1154 list_del_init(&pi_state->list);
1111 spin_unlock_irq(&pi_state->owner->pi_lock); 1155 spin_unlock_irq(&pi_state->owner->pi_lock);
1112 } else 1156 }
1113 newtid |= FUTEX_OWNER_DIED;
1114 1157
1115 pi_state->owner = newowner; 1158 pi_state->owner = newowner;
1116 1159
@@ -1118,26 +1161,35 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
1118 WARN_ON(!list_empty(&pi_state->list)); 1161 WARN_ON(!list_empty(&pi_state->list));
1119 list_add(&pi_state->list, &newowner->pi_state_list); 1162 list_add(&pi_state->list, &newowner->pi_state_list);
1120 spin_unlock_irq(&newowner->pi_lock); 1163 spin_unlock_irq(&newowner->pi_lock);
1164 return 0;
1121 1165
1122 /* 1166 /*
1123 * We own it, so we have to replace the pending owner 1167 * To handle the page fault we need to drop the hash bucket
1124 * TID. This must be atomic as we have preserve the 1168 * lock here. That gives the other task (either the pending
1125 * owner died bit here. 1169 * owner itself or the task which stole the rtmutex) the
1170 * chance to try the fixup of the pi_state. So once we are
1171 * back from handling the fault we need to check the pi_state
1172 * after reacquiring the hash bucket lock and before trying to
1173 * do another fixup. When the fixup has been done already we
1174 * simply return.
1126 */ 1175 */
1127 ret = get_futex_value_locked(&uval, uaddr); 1176handle_fault:
1177 spin_unlock(q->lock_ptr);
1128 1178
1129 while (!ret) { 1179 ret = futex_handle_fault((unsigned long)uaddr, fshared, attempt++);
1130 newval = (uval & FUTEX_OWNER_DIED) | newtid;
1131 1180
1132 curval = cmpxchg_futex_value_locked(uaddr, uval, newval); 1181 spin_lock(q->lock_ptr);
1133 1182
1134 if (curval == -EFAULT) 1183 /*
1135 ret = -EFAULT; 1184 * Check if someone else fixed it for us:
1136 if (curval == uval) 1185 */
1137 break; 1186 if (pi_state->owner != oldowner)
1138 uval = curval; 1187 return 0;
1139 } 1188
1140 return ret; 1189 if (ret)
1190 return ret;
1191
1192 goto retry;
1141} 1193}
1142 1194
1143/* 1195/*
@@ -1507,7 +1559,7 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
1507 * that case: 1559 * that case:
1508 */ 1560 */
1509 if (q.pi_state->owner != curr) 1561 if (q.pi_state->owner != curr)
1510 ret = fixup_pi_state_owner(uaddr, &q, curr); 1562 ret = fixup_pi_state_owner(uaddr, &q, curr, fshared);
1511 } else { 1563 } else {
1512 /* 1564 /*
1513 * Catch the rare case, where the lock was released 1565 * Catch the rare case, where the lock was released
@@ -1539,7 +1591,8 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
1539 int res; 1591 int res;
1540 1592
1541 owner = rt_mutex_owner(&q.pi_state->pi_mutex); 1593 owner = rt_mutex_owner(&q.pi_state->pi_mutex);
1542 res = fixup_pi_state_owner(uaddr, &q, owner); 1594 res = fixup_pi_state_owner(uaddr, &q, owner,
1595 fshared);
1543 1596
1544 /* propagate -EFAULT, if the fixup failed */ 1597 /* propagate -EFAULT, if the fixup failed */
1545 if (res) 1598 if (res)
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 635739d219da..ffca825d24bc 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -1000,10 +1000,18 @@ hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
1000 */ 1000 */
1001 raise = timer->state == HRTIMER_STATE_PENDING; 1001 raise = timer->state == HRTIMER_STATE_PENDING;
1002 1002
1003 /*
1004 * We use preempt_disable to prevent this task from migrating after
1005 * setting up the softirq and raising it. Otherwise, if me migrate
1006 * we will raise the softirq on the wrong CPU.
1007 */
1008 preempt_disable();
1009
1003 unlock_hrtimer_base(timer, &flags); 1010 unlock_hrtimer_base(timer, &flags);
1004 1011
1005 if (raise) 1012 if (raise)
1006 hrtimer_raise_softirq(); 1013 hrtimer_raise_softirq();
1014 preempt_enable();
1007 1015
1008 return ret; 1016 return ret;
1009} 1017}
diff --git a/kernel/kgdb.c b/kernel/kgdb.c
index 14787de568b3..3ec23c3ec97f 100644
--- a/kernel/kgdb.c
+++ b/kernel/kgdb.c
@@ -52,6 +52,7 @@
52#include <asm/byteorder.h> 52#include <asm/byteorder.h>
53#include <asm/atomic.h> 53#include <asm/atomic.h>
54#include <asm/system.h> 54#include <asm/system.h>
55#include <asm/unaligned.h>
55 56
56static int kgdb_break_asap; 57static int kgdb_break_asap;
57 58
@@ -227,8 +228,6 @@ void __weak kgdb_disable_hw_debug(struct pt_regs *regs)
227 * GDB remote protocol parser: 228 * GDB remote protocol parser:
228 */ 229 */
229 230
230static const char hexchars[] = "0123456789abcdef";
231
232static int hex(char ch) 231static int hex(char ch)
233{ 232{
234 if ((ch >= 'a') && (ch <= 'f')) 233 if ((ch >= 'a') && (ch <= 'f'))
@@ -316,8 +315,8 @@ static void put_packet(char *buffer)
316 } 315 }
317 316
318 kgdb_io_ops->write_char('#'); 317 kgdb_io_ops->write_char('#');
319 kgdb_io_ops->write_char(hexchars[checksum >> 4]); 318 kgdb_io_ops->write_char(hex_asc_hi(checksum));
320 kgdb_io_ops->write_char(hexchars[checksum & 0xf]); 319 kgdb_io_ops->write_char(hex_asc_lo(checksum));
321 if (kgdb_io_ops->flush) 320 if (kgdb_io_ops->flush)
322 kgdb_io_ops->flush(); 321 kgdb_io_ops->flush();
323 322
@@ -478,8 +477,8 @@ static void error_packet(char *pkt, int error)
478{ 477{
479 error = -error; 478 error = -error;
480 pkt[0] = 'E'; 479 pkt[0] = 'E';
481 pkt[1] = hexchars[(error / 10)]; 480 pkt[1] = hex_asc[(error / 10)];
482 pkt[2] = hexchars[(error % 10)]; 481 pkt[2] = hex_asc[(error % 10)];
483 pkt[3] = '\0'; 482 pkt[3] = '\0';
484} 483}
485 484
@@ -510,10 +509,7 @@ static void int_to_threadref(unsigned char *id, int value)
510 scan = (unsigned char *)id; 509 scan = (unsigned char *)id;
511 while (i--) 510 while (i--)
512 *scan++ = 0; 511 *scan++ = 0;
513 *scan++ = (value >> 24) & 0xff; 512 put_unaligned_be32(value, scan);
514 *scan++ = (value >> 16) & 0xff;
515 *scan++ = (value >> 8) & 0xff;
516 *scan++ = (value & 0xff);
517} 513}
518 514
519static struct task_struct *getthread(struct pt_regs *regs, int tid) 515static struct task_struct *getthread(struct pt_regs *regs, int tid)
@@ -1503,7 +1499,8 @@ int kgdb_nmicallback(int cpu, void *regs)
1503 return 1; 1499 return 1;
1504} 1500}
1505 1501
1506void kgdb_console_write(struct console *co, const char *s, unsigned count) 1502static void kgdb_console_write(struct console *co, const char *s,
1503 unsigned count)
1507{ 1504{
1508 unsigned long flags; 1505 unsigned long flags;
1509 1506
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 1e0250cb9486..1485ca8d0e00 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -79,7 +79,7 @@ static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
79 * 79 *
80 * For such cases, we now have a blacklist 80 * For such cases, we now have a blacklist
81 */ 81 */
82struct kprobe_blackpoint kprobe_blacklist[] = { 82static struct kprobe_blackpoint kprobe_blacklist[] = {
83 {"preempt_schedule",}, 83 {"preempt_schedule",},
84 {NULL} /* Terminator */ 84 {NULL} /* Terminator */
85}; 85};
@@ -699,8 +699,9 @@ static int __register_kprobes(struct kprobe **kps, int num,
699 return -EINVAL; 699 return -EINVAL;
700 for (i = 0; i < num; i++) { 700 for (i = 0; i < num; i++) {
701 ret = __register_kprobe(kps[i], called_from); 701 ret = __register_kprobe(kps[i], called_from);
702 if (ret < 0 && i > 0) { 702 if (ret < 0) {
703 unregister_kprobes(kps, i); 703 if (i > 0)
704 unregister_kprobes(kps, i);
704 break; 705 break;
705 } 706 }
706 } 707 }
@@ -776,8 +777,9 @@ static int __register_jprobes(struct jprobe **jps, int num,
776 jp->kp.break_handler = longjmp_break_handler; 777 jp->kp.break_handler = longjmp_break_handler;
777 ret = __register_kprobe(&jp->kp, called_from); 778 ret = __register_kprobe(&jp->kp, called_from);
778 } 779 }
779 if (ret < 0 && i > 0) { 780 if (ret < 0) {
780 unregister_jprobes(jps, i); 781 if (i > 0)
782 unregister_jprobes(jps, i);
781 break; 783 break;
782 } 784 }
783 } 785 }
@@ -920,8 +922,9 @@ static int __register_kretprobes(struct kretprobe **rps, int num,
920 return -EINVAL; 922 return -EINVAL;
921 for (i = 0; i < num; i++) { 923 for (i = 0; i < num; i++) {
922 ret = __register_kretprobe(rps[i], called_from); 924 ret = __register_kretprobe(rps[i], called_from);
923 if (ret < 0 && i > 0) { 925 if (ret < 0) {
924 unregister_kretprobes(rps, i); 926 if (i > 0)
927 unregister_kretprobes(rps, i);
925 break; 928 break;
926 } 929 }
927 } 930 }
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index f1525ad06cb3..c42a03aef36f 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -1037,6 +1037,9 @@ static void check_thread_timers(struct task_struct *tsk,
1037 sig->rlim[RLIMIT_RTTIME].rlim_cur += 1037 sig->rlim[RLIMIT_RTTIME].rlim_cur +=
1038 USEC_PER_SEC; 1038 USEC_PER_SEC;
1039 } 1039 }
1040 printk(KERN_INFO
1041 "RT Watchdog Timeout: %s[%d]\n",
1042 tsk->comm, task_pid_nr(tsk));
1040 __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk); 1043 __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
1041 } 1044 }
1042 } 1045 }
diff --git a/kernel/printk.c b/kernel/printk.c
index 8fb01c32aa3b..e2129e83fd75 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -666,7 +666,7 @@ static int acquire_console_semaphore_for_printk(unsigned int cpu)
666 return retval; 666 return retval;
667} 667}
668 668
669const char printk_recursion_bug_msg [] = 669static const char printk_recursion_bug_msg [] =
670 KERN_CRIT "BUG: recent printk recursion!\n"; 670 KERN_CRIT "BUG: recent printk recursion!\n";
671static int printk_recursion_bug; 671static int printk_recursion_bug;
672 672
diff --git a/kernel/rcuclassic.c b/kernel/rcuclassic.c
index f4ffbd0f306f..a38895a5b8e2 100644
--- a/kernel/rcuclassic.c
+++ b/kernel/rcuclassic.c
@@ -89,8 +89,22 @@ static void force_quiescent_state(struct rcu_data *rdp,
89 /* 89 /*
90 * Don't send IPI to itself. With irqs disabled, 90 * Don't send IPI to itself. With irqs disabled,
91 * rdp->cpu is the current cpu. 91 * rdp->cpu is the current cpu.
92 *
93 * cpu_online_map is updated by the _cpu_down()
94 * using stop_machine_run(). Since we're in irqs disabled
95 * section, stop_machine_run() is not exectuting, hence
96 * the cpu_online_map is stable.
97 *
98 * However, a cpu might have been offlined _just_ before
99 * we disabled irqs while entering here.
100 * And rcu subsystem might not yet have handled the CPU_DEAD
101 * notification, leading to the offlined cpu's bit
102 * being set in the rcp->cpumask.
103 *
104 * Hence cpumask = (rcp->cpumask & cpu_online_map) to prevent
105 * sending smp_reschedule() to an offlined CPU.
92 */ 106 */
93 cpumask = rcp->cpumask; 107 cpus_and(cpumask, rcp->cpumask, cpu_online_map);
94 cpu_clear(rdp->cpu, cpumask); 108 cpu_clear(rdp->cpu, cpumask);
95 for_each_cpu_mask(cpu, cpumask) 109 for_each_cpu_mask(cpu, cpumask)
96 smp_send_reschedule(cpu); 110 smp_send_reschedule(cpu);
diff --git a/kernel/rcupreempt.c b/kernel/rcupreempt.c
index e1cdf196a515..41d275a81df5 100644
--- a/kernel/rcupreempt.c
+++ b/kernel/rcupreempt.c
@@ -217,8 +217,6 @@ long rcu_batches_completed(void)
217} 217}
218EXPORT_SYMBOL_GPL(rcu_batches_completed); 218EXPORT_SYMBOL_GPL(rcu_batches_completed);
219 219
220EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
221
222void __rcu_read_lock(void) 220void __rcu_read_lock(void)
223{ 221{
224 int idx; 222 int idx;
@@ -927,26 +925,22 @@ void rcu_offline_cpu(int cpu)
927 spin_unlock_irqrestore(&rdp->lock, flags); 925 spin_unlock_irqrestore(&rdp->lock, flags);
928} 926}
929 927
930void __devinit rcu_online_cpu(int cpu)
931{
932 unsigned long flags;
933
934 spin_lock_irqsave(&rcu_ctrlblk.fliplock, flags);
935 cpu_set(cpu, rcu_cpu_online_map);
936 spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags);
937}
938
939#else /* #ifdef CONFIG_HOTPLUG_CPU */ 928#else /* #ifdef CONFIG_HOTPLUG_CPU */
940 929
941void rcu_offline_cpu(int cpu) 930void rcu_offline_cpu(int cpu)
942{ 931{
943} 932}
944 933
945void __devinit rcu_online_cpu(int cpu) 934#endif /* #else #ifdef CONFIG_HOTPLUG_CPU */
935
936void __cpuinit rcu_online_cpu(int cpu)
946{ 937{
947} 938 unsigned long flags;
948 939
949#endif /* #else #ifdef CONFIG_HOTPLUG_CPU */ 940 spin_lock_irqsave(&rcu_ctrlblk.fliplock, flags);
941 cpu_set(cpu, rcu_cpu_online_map);
942 spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags);
943}
950 944
951static void rcu_process_callbacks(struct softirq_action *unused) 945static void rcu_process_callbacks(struct softirq_action *unused)
952{ 946{
diff --git a/kernel/relay.c b/kernel/relay.c
index bc24dcdc570f..7de644cdec43 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -1191,7 +1191,7 @@ static ssize_t relay_file_splice_read(struct file *in,
1191 ret = 0; 1191 ret = 0;
1192 spliced = 0; 1192 spliced = 0;
1193 1193
1194 while (len) { 1194 while (len && !spliced) {
1195 ret = subbuf_splice_actor(in, ppos, pipe, len, flags, &nonpad_ret); 1195 ret = subbuf_splice_actor(in, ppos, pipe, len, flags, &nonpad_ret);
1196 if (ret < 0) 1196 if (ret < 0)
1197 break; 1197 break;
diff --git a/kernel/sched.c b/kernel/sched.c
index cfa222a91539..4e2f60335656 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -136,7 +136,7 @@ static inline void sg_inc_cpu_power(struct sched_group *sg, u32 val)
136 136
137static inline int rt_policy(int policy) 137static inline int rt_policy(int policy)
138{ 138{
139 if (unlikely(policy == SCHED_FIFO) || unlikely(policy == SCHED_RR)) 139 if (unlikely(policy == SCHED_FIFO || policy == SCHED_RR))
140 return 1; 140 return 1;
141 return 0; 141 return 0;
142} 142}
@@ -312,12 +312,15 @@ static DEFINE_SPINLOCK(task_group_lock);
312#endif 312#endif
313 313
314/* 314/*
315 * A weight of 0, 1 or ULONG_MAX can cause arithmetics problems. 315 * A weight of 0 or 1 can cause arithmetics problems.
316 * A weight of a cfs_rq is the sum of weights of which entities
317 * are queued on this cfs_rq, so a weight of a entity should not be
318 * too large, so as the shares value of a task group.
316 * (The default weight is 1024 - so there's no practical 319 * (The default weight is 1024 - so there's no practical
317 * limitation from this.) 320 * limitation from this.)
318 */ 321 */
319#define MIN_SHARES 2 322#define MIN_SHARES 2
320#define MAX_SHARES (ULONG_MAX - 1) 323#define MAX_SHARES (1UL << 18)
321 324
322static int init_task_group_load = INIT_TASK_GROUP_LOAD; 325static int init_task_group_load = INIT_TASK_GROUP_LOAD;
323#endif 326#endif
@@ -398,43 +401,6 @@ struct cfs_rq {
398 */ 401 */
399 struct list_head leaf_cfs_rq_list; 402 struct list_head leaf_cfs_rq_list;
400 struct task_group *tg; /* group that "owns" this runqueue */ 403 struct task_group *tg; /* group that "owns" this runqueue */
401
402#ifdef CONFIG_SMP
403 unsigned long task_weight;
404 unsigned long shares;
405 /*
406 * We need space to build a sched_domain wide view of the full task
407 * group tree, in order to avoid depending on dynamic memory allocation
408 * during the load balancing we place this in the per cpu task group
409 * hierarchy. This limits the load balancing to one instance per cpu,
410 * but more should not be needed anyway.
411 */
412 struct aggregate_struct {
413 /*
414 * load = weight(cpus) * f(tg)
415 *
416 * Where f(tg) is the recursive weight fraction assigned to
417 * this group.
418 */
419 unsigned long load;
420
421 /*
422 * part of the group weight distributed to this span.
423 */
424 unsigned long shares;
425
426 /*
427 * The sum of all runqueue weights within this span.
428 */
429 unsigned long rq_weight;
430
431 /*
432 * Weight contributed by tasks; this is the part we can
433 * influence by moving tasks around.
434 */
435 unsigned long task_weight;
436 } aggregate;
437#endif
438#endif 404#endif
439}; 405};
440 406
@@ -1161,6 +1127,7 @@ static enum hrtimer_restart hrtick(struct hrtimer *timer)
1161 return HRTIMER_NORESTART; 1127 return HRTIMER_NORESTART;
1162} 1128}
1163 1129
1130#ifdef CONFIG_SMP
1164static void hotplug_hrtick_disable(int cpu) 1131static void hotplug_hrtick_disable(int cpu)
1165{ 1132{
1166 struct rq *rq = cpu_rq(cpu); 1133 struct rq *rq = cpu_rq(cpu);
@@ -1216,6 +1183,7 @@ static void init_hrtick(void)
1216{ 1183{
1217 hotcpu_notifier(hotplug_hrtick, 0); 1184 hotcpu_notifier(hotplug_hrtick, 0);
1218} 1185}
1186#endif /* CONFIG_SMP */
1219 1187
1220static void init_rq_hrtick(struct rq *rq) 1188static void init_rq_hrtick(struct rq *rq)
1221{ 1189{
@@ -1368,17 +1336,19 @@ static void __resched_task(struct task_struct *p, int tif_bit)
1368 */ 1336 */
1369#define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y)) 1337#define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y))
1370 1338
1371/*
1372 * delta *= weight / lw
1373 */
1374static unsigned long 1339static unsigned long
1375calc_delta_mine(unsigned long delta_exec, unsigned long weight, 1340calc_delta_mine(unsigned long delta_exec, unsigned long weight,
1376 struct load_weight *lw) 1341 struct load_weight *lw)
1377{ 1342{
1378 u64 tmp; 1343 u64 tmp;
1379 1344
1380 if (!lw->inv_weight) 1345 if (!lw->inv_weight) {
1381 lw->inv_weight = 1 + (WMULT_CONST-lw->weight/2)/(lw->weight+1); 1346 if (BITS_PER_LONG > 32 && unlikely(lw->weight >= WMULT_CONST))
1347 lw->inv_weight = 1;
1348 else
1349 lw->inv_weight = 1 + (WMULT_CONST-lw->weight/2)
1350 / (lw->weight+1);
1351 }
1382 1352
1383 tmp = (u64)delta_exec * weight; 1353 tmp = (u64)delta_exec * weight;
1384 /* 1354 /*
@@ -1393,6 +1363,12 @@ calc_delta_mine(unsigned long delta_exec, unsigned long weight,
1393 return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX); 1363 return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX);
1394} 1364}
1395 1365
1366static inline unsigned long
1367calc_delta_fair(unsigned long delta_exec, struct load_weight *lw)
1368{
1369 return calc_delta_mine(delta_exec, NICE_0_LOAD, lw);
1370}
1371
1396static inline void update_load_add(struct load_weight *lw, unsigned long inc) 1372static inline void update_load_add(struct load_weight *lw, unsigned long inc)
1397{ 1373{
1398 lw->weight += inc; 1374 lw->weight += inc;
@@ -1505,326 +1481,6 @@ static unsigned long source_load(int cpu, int type);
1505static unsigned long target_load(int cpu, int type); 1481static unsigned long target_load(int cpu, int type);
1506static unsigned long cpu_avg_load_per_task(int cpu); 1482static unsigned long cpu_avg_load_per_task(int cpu);
1507static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd); 1483static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
1508
1509#ifdef CONFIG_FAIR_GROUP_SCHED
1510
1511/*
1512 * Group load balancing.
1513 *
1514 * We calculate a few balance domain wide aggregate numbers; load and weight.
1515 * Given the pictures below, and assuming each item has equal weight:
1516 *
1517 * root 1 - thread
1518 * / | \ A - group
1519 * A 1 B
1520 * /|\ / \
1521 * C 2 D 3 4
1522 * | |
1523 * 5 6
1524 *
1525 * load:
1526 * A and B get 1/3-rd of the total load. C and D get 1/3-rd of A's 1/3-rd,
1527 * which equals 1/9-th of the total load.
1528 *
1529 * shares:
1530 * The weight of this group on the selected cpus.
1531 *
1532 * rq_weight:
1533 * Direct sum of all the cpu's their rq weight, e.g. A would get 3 while
1534 * B would get 2.
1535 *
1536 * task_weight:
1537 * Part of the rq_weight contributed by tasks; all groups except B would
1538 * get 1, B gets 2.
1539 */
1540
1541static inline struct aggregate_struct *
1542aggregate(struct task_group *tg, struct sched_domain *sd)
1543{
1544 return &tg->cfs_rq[sd->first_cpu]->aggregate;
1545}
1546
1547typedef void (*aggregate_func)(struct task_group *, struct sched_domain *);
1548
1549/*
1550 * Iterate the full tree, calling @down when first entering a node and @up when
1551 * leaving it for the final time.
1552 */
1553static
1554void aggregate_walk_tree(aggregate_func down, aggregate_func up,
1555 struct sched_domain *sd)
1556{
1557 struct task_group *parent, *child;
1558
1559 rcu_read_lock();
1560 parent = &root_task_group;
1561down:
1562 (*down)(parent, sd);
1563 list_for_each_entry_rcu(child, &parent->children, siblings) {
1564 parent = child;
1565 goto down;
1566
1567up:
1568 continue;
1569 }
1570 (*up)(parent, sd);
1571
1572 child = parent;
1573 parent = parent->parent;
1574 if (parent)
1575 goto up;
1576 rcu_read_unlock();
1577}
1578
1579/*
1580 * Calculate the aggregate runqueue weight.
1581 */
1582static
1583void aggregate_group_weight(struct task_group *tg, struct sched_domain *sd)
1584{
1585 unsigned long rq_weight = 0;
1586 unsigned long task_weight = 0;
1587 int i;
1588
1589 for_each_cpu_mask(i, sd->span) {
1590 rq_weight += tg->cfs_rq[i]->load.weight;
1591 task_weight += tg->cfs_rq[i]->task_weight;
1592 }
1593
1594 aggregate(tg, sd)->rq_weight = rq_weight;
1595 aggregate(tg, sd)->task_weight = task_weight;
1596}
1597
1598/*
1599 * Compute the weight of this group on the given cpus.
1600 */
1601static
1602void aggregate_group_shares(struct task_group *tg, struct sched_domain *sd)
1603{
1604 unsigned long shares = 0;
1605 int i;
1606
1607 for_each_cpu_mask(i, sd->span)
1608 shares += tg->cfs_rq[i]->shares;
1609
1610 if ((!shares && aggregate(tg, sd)->rq_weight) || shares > tg->shares)
1611 shares = tg->shares;
1612
1613 aggregate(tg, sd)->shares = shares;
1614}
1615
1616/*
1617 * Compute the load fraction assigned to this group, relies on the aggregate
1618 * weight and this group's parent's load, i.e. top-down.
1619 */
1620static
1621void aggregate_group_load(struct task_group *tg, struct sched_domain *sd)
1622{
1623 unsigned long load;
1624
1625 if (!tg->parent) {
1626 int i;
1627
1628 load = 0;
1629 for_each_cpu_mask(i, sd->span)
1630 load += cpu_rq(i)->load.weight;
1631
1632 } else {
1633 load = aggregate(tg->parent, sd)->load;
1634
1635 /*
1636 * shares is our weight in the parent's rq so
1637 * shares/parent->rq_weight gives our fraction of the load
1638 */
1639 load *= aggregate(tg, sd)->shares;
1640 load /= aggregate(tg->parent, sd)->rq_weight + 1;
1641 }
1642
1643 aggregate(tg, sd)->load = load;
1644}
1645
1646static void __set_se_shares(struct sched_entity *se, unsigned long shares);
1647
1648/*
1649 * Calculate and set the cpu's group shares.
1650 */
1651static void
1652__update_group_shares_cpu(struct task_group *tg, struct sched_domain *sd,
1653 int tcpu)
1654{
1655 int boost = 0;
1656 unsigned long shares;
1657 unsigned long rq_weight;
1658
1659 if (!tg->se[tcpu])
1660 return;
1661
1662 rq_weight = tg->cfs_rq[tcpu]->load.weight;
1663
1664 /*
1665 * If there are currently no tasks on the cpu pretend there is one of
1666 * average load so that when a new task gets to run here it will not
1667 * get delayed by group starvation.
1668 */
1669 if (!rq_weight) {
1670 boost = 1;
1671 rq_weight = NICE_0_LOAD;
1672 }
1673
1674 /*
1675 * \Sum shares * rq_weight
1676 * shares = -----------------------
1677 * \Sum rq_weight
1678 *
1679 */
1680 shares = aggregate(tg, sd)->shares * rq_weight;
1681 shares /= aggregate(tg, sd)->rq_weight + 1;
1682
1683 /*
1684 * record the actual number of shares, not the boosted amount.
1685 */
1686 tg->cfs_rq[tcpu]->shares = boost ? 0 : shares;
1687
1688 if (shares < MIN_SHARES)
1689 shares = MIN_SHARES;
1690 else if (shares > MAX_SHARES)
1691 shares = MAX_SHARES;
1692
1693 __set_se_shares(tg->se[tcpu], shares);
1694}
1695
1696/*
1697 * Re-adjust the weights on the cpu the task came from and on the cpu the
1698 * task went to.
1699 */
1700static void
1701__move_group_shares(struct task_group *tg, struct sched_domain *sd,
1702 int scpu, int dcpu)
1703{
1704 unsigned long shares;
1705
1706 shares = tg->cfs_rq[scpu]->shares + tg->cfs_rq[dcpu]->shares;
1707
1708 __update_group_shares_cpu(tg, sd, scpu);
1709 __update_group_shares_cpu(tg, sd, dcpu);
1710
1711 /*
1712 * ensure we never loose shares due to rounding errors in the
1713 * above redistribution.
1714 */
1715 shares -= tg->cfs_rq[scpu]->shares + tg->cfs_rq[dcpu]->shares;
1716 if (shares)
1717 tg->cfs_rq[dcpu]->shares += shares;
1718}
1719
1720/*
1721 * Because changing a group's shares changes the weight of the super-group
1722 * we need to walk up the tree and change all shares until we hit the root.
1723 */
1724static void
1725move_group_shares(struct task_group *tg, struct sched_domain *sd,
1726 int scpu, int dcpu)
1727{
1728 while (tg) {
1729 __move_group_shares(tg, sd, scpu, dcpu);
1730 tg = tg->parent;
1731 }
1732}
1733
1734static
1735void aggregate_group_set_shares(struct task_group *tg, struct sched_domain *sd)
1736{
1737 unsigned long shares = aggregate(tg, sd)->shares;
1738 int i;
1739
1740 for_each_cpu_mask(i, sd->span) {
1741 struct rq *rq = cpu_rq(i);
1742 unsigned long flags;
1743
1744 spin_lock_irqsave(&rq->lock, flags);
1745 __update_group_shares_cpu(tg, sd, i);
1746 spin_unlock_irqrestore(&rq->lock, flags);
1747 }
1748
1749 aggregate_group_shares(tg, sd);
1750
1751 /*
1752 * ensure we never loose shares due to rounding errors in the
1753 * above redistribution.
1754 */
1755 shares -= aggregate(tg, sd)->shares;
1756 if (shares) {
1757 tg->cfs_rq[sd->first_cpu]->shares += shares;
1758 aggregate(tg, sd)->shares += shares;
1759 }
1760}
1761
1762/*
1763 * Calculate the accumulative weight and recursive load of each task group
1764 * while walking down the tree.
1765 */
1766static
1767void aggregate_get_down(struct task_group *tg, struct sched_domain *sd)
1768{
1769 aggregate_group_weight(tg, sd);
1770 aggregate_group_shares(tg, sd);
1771 aggregate_group_load(tg, sd);
1772}
1773
1774/*
1775 * Rebalance the cpu shares while walking back up the tree.
1776 */
1777static
1778void aggregate_get_up(struct task_group *tg, struct sched_domain *sd)
1779{
1780 aggregate_group_set_shares(tg, sd);
1781}
1782
1783static DEFINE_PER_CPU(spinlock_t, aggregate_lock);
1784
1785static void __init init_aggregate(void)
1786{
1787 int i;
1788
1789 for_each_possible_cpu(i)
1790 spin_lock_init(&per_cpu(aggregate_lock, i));
1791}
1792
1793static int get_aggregate(struct sched_domain *sd)
1794{
1795 if (!spin_trylock(&per_cpu(aggregate_lock, sd->first_cpu)))
1796 return 0;
1797
1798 aggregate_walk_tree(aggregate_get_down, aggregate_get_up, sd);
1799 return 1;
1800}
1801
1802static void put_aggregate(struct sched_domain *sd)
1803{
1804 spin_unlock(&per_cpu(aggregate_lock, sd->first_cpu));
1805}
1806
1807static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares)
1808{
1809 cfs_rq->shares = shares;
1810}
1811
1812#else
1813
1814static inline void init_aggregate(void)
1815{
1816}
1817
1818static inline int get_aggregate(struct sched_domain *sd)
1819{
1820 return 0;
1821}
1822
1823static inline void put_aggregate(struct sched_domain *sd)
1824{
1825}
1826#endif
1827
1828#else /* CONFIG_SMP */ 1484#else /* CONFIG_SMP */
1829 1485
1830#ifdef CONFIG_FAIR_GROUP_SCHED 1486#ifdef CONFIG_FAIR_GROUP_SCHED
@@ -1845,14 +1501,26 @@ static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares)
1845 1501
1846#define sched_class_highest (&rt_sched_class) 1502#define sched_class_highest (&rt_sched_class)
1847 1503
1848static void inc_nr_running(struct rq *rq) 1504static inline void inc_load(struct rq *rq, const struct task_struct *p)
1505{
1506 update_load_add(&rq->load, p->se.load.weight);
1507}
1508
1509static inline void dec_load(struct rq *rq, const struct task_struct *p)
1510{
1511 update_load_sub(&rq->load, p->se.load.weight);
1512}
1513
1514static void inc_nr_running(struct task_struct *p, struct rq *rq)
1849{ 1515{
1850 rq->nr_running++; 1516 rq->nr_running++;
1517 inc_load(rq, p);
1851} 1518}
1852 1519
1853static void dec_nr_running(struct rq *rq) 1520static void dec_nr_running(struct task_struct *p, struct rq *rq)
1854{ 1521{
1855 rq->nr_running--; 1522 rq->nr_running--;
1523 dec_load(rq, p);
1856} 1524}
1857 1525
1858static void set_load_weight(struct task_struct *p) 1526static void set_load_weight(struct task_struct *p)
@@ -1944,7 +1612,7 @@ static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
1944 rq->nr_uninterruptible--; 1612 rq->nr_uninterruptible--;
1945 1613
1946 enqueue_task(rq, p, wakeup); 1614 enqueue_task(rq, p, wakeup);
1947 inc_nr_running(rq); 1615 inc_nr_running(p, rq);
1948} 1616}
1949 1617
1950/* 1618/*
@@ -1956,7 +1624,7 @@ static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep)
1956 rq->nr_uninterruptible++; 1624 rq->nr_uninterruptible++;
1957 1625
1958 dequeue_task(rq, p, sleep); 1626 dequeue_task(rq, p, sleep);
1959 dec_nr_running(rq); 1627 dec_nr_running(p, rq);
1960} 1628}
1961 1629
1962/** 1630/**
@@ -2609,7 +2277,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
2609 * management (if any): 2277 * management (if any):
2610 */ 2278 */
2611 p->sched_class->task_new(rq, p); 2279 p->sched_class->task_new(rq, p);
2612 inc_nr_running(rq); 2280 inc_nr_running(p, rq);
2613 } 2281 }
2614 check_preempt_curr(rq, p); 2282 check_preempt_curr(rq, p);
2615#ifdef CONFIG_SMP 2283#ifdef CONFIG_SMP
@@ -3600,12 +3268,9 @@ static int load_balance(int this_cpu, struct rq *this_rq,
3600 unsigned long imbalance; 3268 unsigned long imbalance;
3601 struct rq *busiest; 3269 struct rq *busiest;
3602 unsigned long flags; 3270 unsigned long flags;
3603 int unlock_aggregate;
3604 3271
3605 cpus_setall(*cpus); 3272 cpus_setall(*cpus);
3606 3273
3607 unlock_aggregate = get_aggregate(sd);
3608
3609 /* 3274 /*
3610 * When power savings policy is enabled for the parent domain, idle 3275 * When power savings policy is enabled for the parent domain, idle
3611 * sibling can pick up load irrespective of busy siblings. In this case, 3276 * sibling can pick up load irrespective of busy siblings. In this case,
@@ -3721,9 +3386,8 @@ redo:
3721 3386
3722 if (!ld_moved && !sd_idle && sd->flags & SD_SHARE_CPUPOWER && 3387 if (!ld_moved && !sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
3723 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE)) 3388 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
3724 ld_moved = -1; 3389 return -1;
3725 3390 return ld_moved;
3726 goto out;
3727 3391
3728out_balanced: 3392out_balanced:
3729 schedstat_inc(sd, lb_balanced[idle]); 3393 schedstat_inc(sd, lb_balanced[idle]);
@@ -3738,13 +3402,8 @@ out_one_pinned:
3738 3402
3739 if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER && 3403 if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
3740 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE)) 3404 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
3741 ld_moved = -1; 3405 return -1;
3742 else 3406 return 0;
3743 ld_moved = 0;
3744out:
3745 if (unlock_aggregate)
3746 put_aggregate(sd);
3747 return ld_moved;
3748} 3407}
3749 3408
3750/* 3409/*
@@ -4430,7 +4089,7 @@ static inline void schedule_debug(struct task_struct *prev)
4430 * schedule() atomically, we ignore that path for now. 4089 * schedule() atomically, we ignore that path for now.
4431 * Otherwise, whine if we are scheduling when we should not be. 4090 * Otherwise, whine if we are scheduling when we should not be.
4432 */ 4091 */
4433 if (unlikely(in_atomic_preempt_off()) && unlikely(!prev->exit_state)) 4092 if (unlikely(in_atomic_preempt_off() && !prev->exit_state))
4434 __schedule_bug(prev); 4093 __schedule_bug(prev);
4435 4094
4436 profile_hit(SCHED_PROFILING, __builtin_return_address(0)); 4095 profile_hit(SCHED_PROFILING, __builtin_return_address(0));
@@ -4510,12 +4169,10 @@ need_resched_nonpreemptible:
4510 clear_tsk_need_resched(prev); 4169 clear_tsk_need_resched(prev);
4511 4170
4512 if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { 4171 if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
4513 if (unlikely((prev->state & TASK_INTERRUPTIBLE) && 4172 if (unlikely(signal_pending_state(prev->state, prev)))
4514 signal_pending(prev))) {
4515 prev->state = TASK_RUNNING; 4173 prev->state = TASK_RUNNING;
4516 } else { 4174 else
4517 deactivate_task(rq, prev, 1); 4175 deactivate_task(rq, prev, 1);
4518 }
4519 switch_count = &prev->nvcsw; 4176 switch_count = &prev->nvcsw;
4520 } 4177 }
4521 4178
@@ -4741,22 +4398,20 @@ do_wait_for_common(struct completion *x, long timeout, int state)
4741 signal_pending(current)) || 4398 signal_pending(current)) ||
4742 (state == TASK_KILLABLE && 4399 (state == TASK_KILLABLE &&
4743 fatal_signal_pending(current))) { 4400 fatal_signal_pending(current))) {
4744 __remove_wait_queue(&x->wait, &wait); 4401 timeout = -ERESTARTSYS;
4745 return -ERESTARTSYS; 4402 break;
4746 } 4403 }
4747 __set_current_state(state); 4404 __set_current_state(state);
4748 spin_unlock_irq(&x->wait.lock); 4405 spin_unlock_irq(&x->wait.lock);
4749 timeout = schedule_timeout(timeout); 4406 timeout = schedule_timeout(timeout);
4750 spin_lock_irq(&x->wait.lock); 4407 spin_lock_irq(&x->wait.lock);
4751 if (!timeout) { 4408 } while (!x->done && timeout);
4752 __remove_wait_queue(&x->wait, &wait);
4753 return timeout;
4754 }
4755 } while (!x->done);
4756 __remove_wait_queue(&x->wait, &wait); 4409 __remove_wait_queue(&x->wait, &wait);
4410 if (!x->done)
4411 return timeout;
4757 } 4412 }
4758 x->done--; 4413 x->done--;
4759 return timeout; 4414 return timeout ?: 1;
4760} 4415}
4761 4416
4762static long __sched 4417static long __sched
@@ -4931,8 +4586,10 @@ void set_user_nice(struct task_struct *p, long nice)
4931 goto out_unlock; 4586 goto out_unlock;
4932 } 4587 }
4933 on_rq = p->se.on_rq; 4588 on_rq = p->se.on_rq;
4934 if (on_rq) 4589 if (on_rq) {
4935 dequeue_task(rq, p, 0); 4590 dequeue_task(rq, p, 0);
4591 dec_load(rq, p);
4592 }
4936 4593
4937 p->static_prio = NICE_TO_PRIO(nice); 4594 p->static_prio = NICE_TO_PRIO(nice);
4938 set_load_weight(p); 4595 set_load_weight(p);
@@ -4942,6 +4599,7 @@ void set_user_nice(struct task_struct *p, long nice)
4942 4599
4943 if (on_rq) { 4600 if (on_rq) {
4944 enqueue_task(rq, p, 0); 4601 enqueue_task(rq, p, 0);
4602 inc_load(rq, p);
4945 /* 4603 /*
4946 * If the task increased its priority or is running and 4604 * If the task increased its priority or is running and
4947 * lowered its priority, then reschedule its CPU: 4605 * lowered its priority, then reschedule its CPU:
@@ -5964,10 +5622,10 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
5964 double_rq_lock(rq_src, rq_dest); 5622 double_rq_lock(rq_src, rq_dest);
5965 /* Already moved. */ 5623 /* Already moved. */
5966 if (task_cpu(p) != src_cpu) 5624 if (task_cpu(p) != src_cpu)
5967 goto out; 5625 goto done;
5968 /* Affinity changed (again). */ 5626 /* Affinity changed (again). */
5969 if (!cpu_isset(dest_cpu, p->cpus_allowed)) 5627 if (!cpu_isset(dest_cpu, p->cpus_allowed))
5970 goto out; 5628 goto fail;
5971 5629
5972 on_rq = p->se.on_rq; 5630 on_rq = p->se.on_rq;
5973 if (on_rq) 5631 if (on_rq)
@@ -5978,8 +5636,9 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
5978 activate_task(rq_dest, p, 0); 5636 activate_task(rq_dest, p, 0);
5979 check_preempt_curr(rq_dest, p); 5637 check_preempt_curr(rq_dest, p);
5980 } 5638 }
5639done:
5981 ret = 1; 5640 ret = 1;
5982out: 5641fail:
5983 double_rq_unlock(rq_src, rq_dest); 5642 double_rq_unlock(rq_src, rq_dest);
5984 return ret; 5643 return ret;
5985} 5644}
@@ -6229,6 +5888,7 @@ static void migrate_dead_tasks(unsigned int dead_cpu)
6229 next = pick_next_task(rq, rq->curr); 5888 next = pick_next_task(rq, rq->curr);
6230 if (!next) 5889 if (!next)
6231 break; 5890 break;
5891 next->sched_class->put_prev_task(rq, next);
6232 migrate_dead(dead_cpu, next); 5892 migrate_dead(dead_cpu, next);
6233 5893
6234 } 5894 }
@@ -7219,7 +6879,12 @@ static int default_relax_domain_level = -1;
7219 6879
7220static int __init setup_relax_domain_level(char *str) 6880static int __init setup_relax_domain_level(char *str)
7221{ 6881{
7222 default_relax_domain_level = simple_strtoul(str, NULL, 0); 6882 unsigned long val;
6883
6884 val = simple_strtoul(str, NULL, 0);
6885 if (val < SD_LV_MAX)
6886 default_relax_domain_level = val;
6887
7223 return 1; 6888 return 1;
7224} 6889}
7225__setup("relax_domain_level=", setup_relax_domain_level); 6890__setup("relax_domain_level=", setup_relax_domain_level);
@@ -7316,7 +6981,6 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7316 SD_INIT(sd, ALLNODES); 6981 SD_INIT(sd, ALLNODES);
7317 set_domain_attribute(sd, attr); 6982 set_domain_attribute(sd, attr);
7318 sd->span = *cpu_map; 6983 sd->span = *cpu_map;
7319 sd->first_cpu = first_cpu(sd->span);
7320 cpu_to_allnodes_group(i, cpu_map, &sd->groups, tmpmask); 6984 cpu_to_allnodes_group(i, cpu_map, &sd->groups, tmpmask);
7321 p = sd; 6985 p = sd;
7322 sd_allnodes = 1; 6986 sd_allnodes = 1;
@@ -7327,7 +6991,6 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7327 SD_INIT(sd, NODE); 6991 SD_INIT(sd, NODE);
7328 set_domain_attribute(sd, attr); 6992 set_domain_attribute(sd, attr);
7329 sched_domain_node_span(cpu_to_node(i), &sd->span); 6993 sched_domain_node_span(cpu_to_node(i), &sd->span);
7330 sd->first_cpu = first_cpu(sd->span);
7331 sd->parent = p; 6994 sd->parent = p;
7332 if (p) 6995 if (p)
7333 p->child = sd; 6996 p->child = sd;
@@ -7339,7 +7002,6 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7339 SD_INIT(sd, CPU); 7002 SD_INIT(sd, CPU);
7340 set_domain_attribute(sd, attr); 7003 set_domain_attribute(sd, attr);
7341 sd->span = *nodemask; 7004 sd->span = *nodemask;
7342 sd->first_cpu = first_cpu(sd->span);
7343 sd->parent = p; 7005 sd->parent = p;
7344 if (p) 7006 if (p)
7345 p->child = sd; 7007 p->child = sd;
@@ -7351,7 +7013,6 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7351 SD_INIT(sd, MC); 7013 SD_INIT(sd, MC);
7352 set_domain_attribute(sd, attr); 7014 set_domain_attribute(sd, attr);
7353 sd->span = cpu_coregroup_map(i); 7015 sd->span = cpu_coregroup_map(i);
7354 sd->first_cpu = first_cpu(sd->span);
7355 cpus_and(sd->span, sd->span, *cpu_map); 7016 cpus_and(sd->span, sd->span, *cpu_map);
7356 sd->parent = p; 7017 sd->parent = p;
7357 p->child = sd; 7018 p->child = sd;
@@ -7364,7 +7025,6 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7364 SD_INIT(sd, SIBLING); 7025 SD_INIT(sd, SIBLING);
7365 set_domain_attribute(sd, attr); 7026 set_domain_attribute(sd, attr);
7366 sd->span = per_cpu(cpu_sibling_map, i); 7027 sd->span = per_cpu(cpu_sibling_map, i);
7367 sd->first_cpu = first_cpu(sd->span);
7368 cpus_and(sd->span, sd->span, *cpu_map); 7028 cpus_and(sd->span, sd->span, *cpu_map);
7369 sd->parent = p; 7029 sd->parent = p;
7370 p->child = sd; 7030 p->child = sd;
@@ -7568,8 +7228,8 @@ static int build_sched_domains(const cpumask_t *cpu_map)
7568 7228
7569static cpumask_t *doms_cur; /* current sched domains */ 7229static cpumask_t *doms_cur; /* current sched domains */
7570static int ndoms_cur; /* number of sched domains in 'doms_cur' */ 7230static int ndoms_cur; /* number of sched domains in 'doms_cur' */
7571static struct sched_domain_attr *dattr_cur; /* attribues of custom domains 7231static struct sched_domain_attr *dattr_cur;
7572 in 'doms_cur' */ 7232 /* attribues of custom domains in 'doms_cur' */
7573 7233
7574/* 7234/*
7575 * Special case: If a kmalloc of a doms_cur partition (array of 7235 * Special case: If a kmalloc of a doms_cur partition (array of
@@ -7583,6 +7243,18 @@ void __attribute__((weak)) arch_update_cpu_topology(void)
7583} 7243}
7584 7244
7585/* 7245/*
7246 * Free current domain masks.
7247 * Called after all cpus are attached to NULL domain.
7248 */
7249static void free_sched_domains(void)
7250{
7251 ndoms_cur = 0;
7252 if (doms_cur != &fallback_doms)
7253 kfree(doms_cur);
7254 doms_cur = &fallback_doms;
7255}
7256
7257/*
7586 * Set up scheduler domains and groups. Callers must hold the hotplug lock. 7258 * Set up scheduler domains and groups. Callers must hold the hotplug lock.
7587 * For now this just excludes isolated cpus, but could be used to 7259 * For now this just excludes isolated cpus, but could be used to
7588 * exclude other special cases in the future. 7260 * exclude other special cases in the future.
@@ -7729,6 +7401,7 @@ int arch_reinit_sched_domains(void)
7729 get_online_cpus(); 7401 get_online_cpus();
7730 mutex_lock(&sched_domains_mutex); 7402 mutex_lock(&sched_domains_mutex);
7731 detach_destroy_domains(&cpu_online_map); 7403 detach_destroy_domains(&cpu_online_map);
7404 free_sched_domains();
7732 err = arch_init_sched_domains(&cpu_online_map); 7405 err = arch_init_sched_domains(&cpu_online_map);
7733 mutex_unlock(&sched_domains_mutex); 7406 mutex_unlock(&sched_domains_mutex);
7734 put_online_cpus(); 7407 put_online_cpus();
@@ -7814,6 +7487,7 @@ static int update_sched_domains(struct notifier_block *nfb,
7814 case CPU_DOWN_PREPARE: 7487 case CPU_DOWN_PREPARE:
7815 case CPU_DOWN_PREPARE_FROZEN: 7488 case CPU_DOWN_PREPARE_FROZEN:
7816 detach_destroy_domains(&cpu_online_map); 7489 detach_destroy_domains(&cpu_online_map);
7490 free_sched_domains();
7817 return NOTIFY_OK; 7491 return NOTIFY_OK;
7818 7492
7819 case CPU_UP_CANCELED: 7493 case CPU_UP_CANCELED:
@@ -7832,8 +7506,16 @@ static int update_sched_domains(struct notifier_block *nfb,
7832 return NOTIFY_DONE; 7506 return NOTIFY_DONE;
7833 } 7507 }
7834 7508
7509#ifndef CONFIG_CPUSETS
7510 /*
7511 * Create default domain partitioning if cpusets are disabled.
7512 * Otherwise we let cpusets rebuild the domains based on the
7513 * current setup.
7514 */
7515
7835 /* The hotplug lock is already held by cpu_up/cpu_down */ 7516 /* The hotplug lock is already held by cpu_up/cpu_down */
7836 arch_init_sched_domains(&cpu_online_map); 7517 arch_init_sched_domains(&cpu_online_map);
7518#endif
7837 7519
7838 return NOTIFY_OK; 7520 return NOTIFY_OK;
7839} 7521}
@@ -7973,7 +7655,6 @@ static void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
7973 else 7655 else
7974 rt_se->rt_rq = parent->my_q; 7656 rt_se->rt_rq = parent->my_q;
7975 7657
7976 rt_se->rt_rq = &rq->rt;
7977 rt_se->my_q = rt_rq; 7658 rt_se->my_q = rt_rq;
7978 rt_se->parent = parent; 7659 rt_se->parent = parent;
7979 INIT_LIST_HEAD(&rt_se->run_list); 7660 INIT_LIST_HEAD(&rt_se->run_list);
@@ -8034,7 +7715,6 @@ void __init sched_init(void)
8034 } 7715 }
8035 7716
8036#ifdef CONFIG_SMP 7717#ifdef CONFIG_SMP
8037 init_aggregate();
8038 init_defrootdomain(); 7718 init_defrootdomain();
8039#endif 7719#endif
8040 7720
@@ -8599,11 +8279,14 @@ void sched_move_task(struct task_struct *tsk)
8599#endif 8279#endif
8600 8280
8601#ifdef CONFIG_FAIR_GROUP_SCHED 8281#ifdef CONFIG_FAIR_GROUP_SCHED
8602static void __set_se_shares(struct sched_entity *se, unsigned long shares) 8282static void set_se_shares(struct sched_entity *se, unsigned long shares)
8603{ 8283{
8604 struct cfs_rq *cfs_rq = se->cfs_rq; 8284 struct cfs_rq *cfs_rq = se->cfs_rq;
8285 struct rq *rq = cfs_rq->rq;
8605 int on_rq; 8286 int on_rq;
8606 8287
8288 spin_lock_irq(&rq->lock);
8289
8607 on_rq = se->on_rq; 8290 on_rq = se->on_rq;
8608 if (on_rq) 8291 if (on_rq)
8609 dequeue_entity(cfs_rq, se, 0); 8292 dequeue_entity(cfs_rq, se, 0);
@@ -8613,17 +8296,8 @@ static void __set_se_shares(struct sched_entity *se, unsigned long shares)
8613 8296
8614 if (on_rq) 8297 if (on_rq)
8615 enqueue_entity(cfs_rq, se, 0); 8298 enqueue_entity(cfs_rq, se, 0);
8616}
8617 8299
8618static void set_se_shares(struct sched_entity *se, unsigned long shares) 8300 spin_unlock_irq(&rq->lock);
8619{
8620 struct cfs_rq *cfs_rq = se->cfs_rq;
8621 struct rq *rq = cfs_rq->rq;
8622 unsigned long flags;
8623
8624 spin_lock_irqsave(&rq->lock, flags);
8625 __set_se_shares(se, shares);
8626 spin_unlock_irqrestore(&rq->lock, flags);
8627} 8301}
8628 8302
8629static DEFINE_MUTEX(shares_mutex); 8303static DEFINE_MUTEX(shares_mutex);
@@ -8662,13 +8336,8 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)
8662 * w/o tripping rebalance_share or load_balance_fair. 8336 * w/o tripping rebalance_share or load_balance_fair.
8663 */ 8337 */
8664 tg->shares = shares; 8338 tg->shares = shares;
8665 for_each_possible_cpu(i) { 8339 for_each_possible_cpu(i)
8666 /*
8667 * force a rebalance
8668 */
8669 cfs_rq_set_shares(tg->cfs_rq[i], 0);
8670 set_se_shares(tg->se[i], shares); 8340 set_se_shares(tg->se[i], shares);
8671 }
8672 8341
8673 /* 8342 /*
8674 * Enable load balance activity on this group, by inserting it back on 8343 * Enable load balance activity on this group, by inserting it back on
@@ -8707,7 +8376,7 @@ static unsigned long to_ratio(u64 period, u64 runtime)
8707#ifdef CONFIG_CGROUP_SCHED 8376#ifdef CONFIG_CGROUP_SCHED
8708static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime) 8377static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
8709{ 8378{
8710 struct task_group *tgi, *parent = tg->parent; 8379 struct task_group *tgi, *parent = tg ? tg->parent : NULL;
8711 unsigned long total = 0; 8380 unsigned long total = 0;
8712 8381
8713 if (!parent) { 8382 if (!parent) {
@@ -8834,6 +8503,9 @@ int sched_group_set_rt_period(struct task_group *tg, long rt_period_us)
8834 rt_period = (u64)rt_period_us * NSEC_PER_USEC; 8503 rt_period = (u64)rt_period_us * NSEC_PER_USEC;
8835 rt_runtime = tg->rt_bandwidth.rt_runtime; 8504 rt_runtime = tg->rt_bandwidth.rt_runtime;
8836 8505
8506 if (rt_period == 0)
8507 return -EINVAL;
8508
8837 return tg_set_bandwidth(tg, rt_period, rt_runtime); 8509 return tg_set_bandwidth(tg, rt_period, rt_runtime);
8838} 8510}
8839 8511
diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c
index 9c597e37f7de..ce05271219ab 100644
--- a/kernel/sched_clock.c
+++ b/kernel/sched_clock.c
@@ -59,22 +59,26 @@ static inline struct sched_clock_data *cpu_sdc(int cpu)
59 return &per_cpu(sched_clock_data, cpu); 59 return &per_cpu(sched_clock_data, cpu);
60} 60}
61 61
62static __read_mostly int sched_clock_running;
63
62void sched_clock_init(void) 64void sched_clock_init(void)
63{ 65{
64 u64 ktime_now = ktime_to_ns(ktime_get()); 66 u64 ktime_now = ktime_to_ns(ktime_get());
65 u64 now = 0; 67 unsigned long now_jiffies = jiffies;
66 int cpu; 68 int cpu;
67 69
68 for_each_possible_cpu(cpu) { 70 for_each_possible_cpu(cpu) {
69 struct sched_clock_data *scd = cpu_sdc(cpu); 71 struct sched_clock_data *scd = cpu_sdc(cpu);
70 72
71 scd->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 73 scd->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
72 scd->prev_jiffies = jiffies; 74 scd->prev_jiffies = now_jiffies;
73 scd->prev_raw = now; 75 scd->prev_raw = 0;
74 scd->tick_raw = now; 76 scd->tick_raw = 0;
75 scd->tick_gtod = ktime_now; 77 scd->tick_gtod = ktime_now;
76 scd->clock = ktime_now; 78 scd->clock = ktime_now;
77 } 79 }
80
81 sched_clock_running = 1;
78} 82}
79 83
80/* 84/*
@@ -136,6 +140,9 @@ u64 sched_clock_cpu(int cpu)
136 struct sched_clock_data *scd = cpu_sdc(cpu); 140 struct sched_clock_data *scd = cpu_sdc(cpu);
137 u64 now, clock; 141 u64 now, clock;
138 142
143 if (unlikely(!sched_clock_running))
144 return 0ull;
145
139 WARN_ON_ONCE(!irqs_disabled()); 146 WARN_ON_ONCE(!irqs_disabled());
140 now = sched_clock(); 147 now = sched_clock();
141 148
@@ -174,6 +181,9 @@ void sched_clock_tick(void)
174 struct sched_clock_data *scd = this_scd(); 181 struct sched_clock_data *scd = this_scd();
175 u64 now, now_gtod; 182 u64 now, now_gtod;
176 183
184 if (unlikely(!sched_clock_running))
185 return;
186
177 WARN_ON_ONCE(!irqs_disabled()); 187 WARN_ON_ONCE(!irqs_disabled());
178 188
179 now = sched_clock(); 189 now = sched_clock();
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index 5f06118fbc31..8bb713040ac9 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -167,11 +167,6 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
167#endif 167#endif
168 SEQ_printf(m, " .%-30s: %ld\n", "nr_spread_over", 168 SEQ_printf(m, " .%-30s: %ld\n", "nr_spread_over",
169 cfs_rq->nr_spread_over); 169 cfs_rq->nr_spread_over);
170#ifdef CONFIG_FAIR_GROUP_SCHED
171#ifdef CONFIG_SMP
172 SEQ_printf(m, " .%-30s: %lu\n", "shares", cfs_rq->shares);
173#endif
174#endif
175} 170}
176 171
177static void print_cpu(struct seq_file *m, int cpu) 172static void print_cpu(struct seq_file *m, int cpu)
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index e24ecd39c4b8..08ae848b71d4 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -334,34 +334,6 @@ int sched_nr_latency_handler(struct ctl_table *table, int write,
334#endif 334#endif
335 335
336/* 336/*
337 * delta *= w / rw
338 */
339static inline unsigned long
340calc_delta_weight(unsigned long delta, struct sched_entity *se)
341{
342 for_each_sched_entity(se) {
343 delta = calc_delta_mine(delta,
344 se->load.weight, &cfs_rq_of(se)->load);
345 }
346
347 return delta;
348}
349
350/*
351 * delta *= rw / w
352 */
353static inline unsigned long
354calc_delta_fair(unsigned long delta, struct sched_entity *se)
355{
356 for_each_sched_entity(se) {
357 delta = calc_delta_mine(delta,
358 cfs_rq_of(se)->load.weight, &se->load);
359 }
360
361 return delta;
362}
363
364/*
365 * The idea is to set a period in which each task runs once. 337 * The idea is to set a period in which each task runs once.
366 * 338 *
367 * When there are too many tasks (sysctl_sched_nr_latency) we have to stretch 339 * When there are too many tasks (sysctl_sched_nr_latency) we have to stretch
@@ -390,54 +362,47 @@ static u64 __sched_period(unsigned long nr_running)
390 */ 362 */
391static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) 363static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
392{ 364{
393 return calc_delta_weight(__sched_period(cfs_rq->nr_running), se); 365 u64 slice = __sched_period(cfs_rq->nr_running);
366
367 for_each_sched_entity(se) {
368 cfs_rq = cfs_rq_of(se);
369
370 slice *= se->load.weight;
371 do_div(slice, cfs_rq->load.weight);
372 }
373
374
375 return slice;
394} 376}
395 377
396/* 378/*
397 * We calculate the vruntime slice of a to be inserted task 379 * We calculate the vruntime slice of a to be inserted task
398 * 380 *
399 * vs = s*rw/w = p 381 * vs = s/w = p/rw
400 */ 382 */
401static u64 sched_vslice_add(struct cfs_rq *cfs_rq, struct sched_entity *se) 383static u64 sched_vslice_add(struct cfs_rq *cfs_rq, struct sched_entity *se)
402{ 384{
403 unsigned long nr_running = cfs_rq->nr_running; 385 unsigned long nr_running = cfs_rq->nr_running;
386 unsigned long weight;
387 u64 vslice;
404 388
405 if (!se->on_rq) 389 if (!se->on_rq)
406 nr_running++; 390 nr_running++;
407 391
408 return __sched_period(nr_running); 392 vslice = __sched_period(nr_running);
409}
410
411/*
412 * The goal of calc_delta_asym() is to be asymmetrically around NICE_0_LOAD, in
413 * that it favours >=0 over <0.
414 *
415 * -20 |
416 * |
417 * 0 --------+-------
418 * .'
419 * 19 .'
420 *
421 */
422static unsigned long
423calc_delta_asym(unsigned long delta, struct sched_entity *se)
424{
425 struct load_weight lw = {
426 .weight = NICE_0_LOAD,
427 .inv_weight = 1UL << (WMULT_SHIFT-NICE_0_SHIFT)
428 };
429 393
430 for_each_sched_entity(se) { 394 for_each_sched_entity(se) {
431 struct load_weight *se_lw = &se->load; 395 cfs_rq = cfs_rq_of(se);
432 396
433 if (se->load.weight < NICE_0_LOAD) 397 weight = cfs_rq->load.weight;
434 se_lw = &lw; 398 if (!se->on_rq)
399 weight += se->load.weight;
435 400
436 delta = calc_delta_mine(delta, 401 vslice *= NICE_0_LOAD;
437 cfs_rq_of(se)->load.weight, se_lw); 402 do_div(vslice, weight);
438 } 403 }
439 404
440 return delta; 405 return vslice;
441} 406}
442 407
443/* 408/*
@@ -454,7 +419,11 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
454 419
455 curr->sum_exec_runtime += delta_exec; 420 curr->sum_exec_runtime += delta_exec;
456 schedstat_add(cfs_rq, exec_clock, delta_exec); 421 schedstat_add(cfs_rq, exec_clock, delta_exec);
457 delta_exec_weighted = calc_delta_fair(delta_exec, curr); 422 delta_exec_weighted = delta_exec;
423 if (unlikely(curr->load.weight != NICE_0_LOAD)) {
424 delta_exec_weighted = calc_delta_fair(delta_exec_weighted,
425 &curr->load);
426 }
458 curr->vruntime += delta_exec_weighted; 427 curr->vruntime += delta_exec_weighted;
459} 428}
460 429
@@ -541,27 +510,10 @@ update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
541 * Scheduling class queueing methods: 510 * Scheduling class queueing methods:
542 */ 511 */
543 512
544#if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED
545static void
546add_cfs_task_weight(struct cfs_rq *cfs_rq, unsigned long weight)
547{
548 cfs_rq->task_weight += weight;
549}
550#else
551static inline void
552add_cfs_task_weight(struct cfs_rq *cfs_rq, unsigned long weight)
553{
554}
555#endif
556
557static void 513static void
558account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) 514account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
559{ 515{
560 update_load_add(&cfs_rq->load, se->load.weight); 516 update_load_add(&cfs_rq->load, se->load.weight);
561 if (!parent_entity(se))
562 inc_cpu_load(rq_of(cfs_rq), se->load.weight);
563 if (entity_is_task(se))
564 add_cfs_task_weight(cfs_rq, se->load.weight);
565 cfs_rq->nr_running++; 517 cfs_rq->nr_running++;
566 se->on_rq = 1; 518 se->on_rq = 1;
567 list_add(&se->group_node, &cfs_rq->tasks); 519 list_add(&se->group_node, &cfs_rq->tasks);
@@ -571,10 +523,6 @@ static void
571account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) 523account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
572{ 524{
573 update_load_sub(&cfs_rq->load, se->load.weight); 525 update_load_sub(&cfs_rq->load, se->load.weight);
574 if (!parent_entity(se))
575 dec_cpu_load(rq_of(cfs_rq), se->load.weight);
576 if (entity_is_task(se))
577 add_cfs_task_weight(cfs_rq, -se->load.weight);
578 cfs_rq->nr_running--; 526 cfs_rq->nr_running--;
579 se->on_rq = 0; 527 se->on_rq = 0;
580 list_del_init(&se->group_node); 528 list_del_init(&se->group_node);
@@ -661,17 +609,8 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
661 609
662 if (!initial) { 610 if (!initial) {
663 /* sleeps upto a single latency don't count. */ 611 /* sleeps upto a single latency don't count. */
664 if (sched_feat(NEW_FAIR_SLEEPERS)) { 612 if (sched_feat(NEW_FAIR_SLEEPERS))
665 unsigned long thresh = sysctl_sched_latency; 613 vruntime -= sysctl_sched_latency;
666
667 /*
668 * convert the sleeper threshold into virtual time
669 */
670 if (sched_feat(NORMALIZED_SLEEPER))
671 thresh = calc_delta_fair(thresh, se);
672
673 vruntime -= thresh;
674 }
675 614
676 /* ensure we never gain time by being placed backwards. */ 615 /* ensure we never gain time by being placed backwards. */
677 vruntime = max_vruntime(se->vruntime, vruntime); 616 vruntime = max_vruntime(se->vruntime, vruntime);
@@ -1057,16 +996,27 @@ wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq,
1057 struct task_struct *curr = this_rq->curr; 996 struct task_struct *curr = this_rq->curr;
1058 unsigned long tl = this_load; 997 unsigned long tl = this_load;
1059 unsigned long tl_per_task; 998 unsigned long tl_per_task;
999 int balanced;
1060 1000
1061 if (!(this_sd->flags & SD_WAKE_AFFINE)) 1001 if (!(this_sd->flags & SD_WAKE_AFFINE) || !sched_feat(AFFINE_WAKEUPS))
1062 return 0; 1002 return 0;
1063 1003
1064 /* 1004 /*
1005 * If sync wakeup then subtract the (maximum possible)
1006 * effect of the currently running task from the load
1007 * of the current CPU:
1008 */
1009 if (sync)
1010 tl -= current->se.load.weight;
1011
1012 balanced = 100*(tl + p->se.load.weight) <= imbalance*load;
1013
1014 /*
1065 * If the currently running task will sleep within 1015 * If the currently running task will sleep within
1066 * a reasonable amount of time then attract this newly 1016 * a reasonable amount of time then attract this newly
1067 * woken task: 1017 * woken task:
1068 */ 1018 */
1069 if (sync && curr->sched_class == &fair_sched_class) { 1019 if (sync && balanced && curr->sched_class == &fair_sched_class) {
1070 if (curr->se.avg_overlap < sysctl_sched_migration_cost && 1020 if (curr->se.avg_overlap < sysctl_sched_migration_cost &&
1071 p->se.avg_overlap < sysctl_sched_migration_cost) 1021 p->se.avg_overlap < sysctl_sched_migration_cost)
1072 return 1; 1022 return 1;
@@ -1075,16 +1025,8 @@ wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq,
1075 schedstat_inc(p, se.nr_wakeups_affine_attempts); 1025 schedstat_inc(p, se.nr_wakeups_affine_attempts);
1076 tl_per_task = cpu_avg_load_per_task(this_cpu); 1026 tl_per_task = cpu_avg_load_per_task(this_cpu);
1077 1027
1078 /*
1079 * If sync wakeup then subtract the (maximum possible)
1080 * effect of the currently running task from the load
1081 * of the current CPU:
1082 */
1083 if (sync)
1084 tl -= current->se.load.weight;
1085
1086 if ((tl <= load && tl + target_load(prev_cpu, idx) <= tl_per_task) || 1028 if ((tl <= load && tl + target_load(prev_cpu, idx) <= tl_per_task) ||
1087 100*(tl + p->se.load.weight) <= imbalance*load) { 1029 balanced) {
1088 /* 1030 /*
1089 * This domain has SD_WAKE_AFFINE and 1031 * This domain has SD_WAKE_AFFINE and
1090 * p is cache cold in this domain, and 1032 * p is cache cold in this domain, and
@@ -1169,10 +1111,11 @@ static unsigned long wakeup_gran(struct sched_entity *se)
1169 unsigned long gran = sysctl_sched_wakeup_granularity; 1111 unsigned long gran = sysctl_sched_wakeup_granularity;
1170 1112
1171 /* 1113 /*
1172 * More easily preempt - nice tasks, while not making it harder for 1114 * More easily preempt - nice tasks, while not making
1173 * + nice tasks. 1115 * it harder for + nice tasks.
1174 */ 1116 */
1175 gran = calc_delta_asym(sysctl_sched_wakeup_granularity, se); 1117 if (unlikely(se->load.weight > NICE_0_LOAD))
1118 gran = calc_delta_fair(gran, &se->load);
1176 1119
1177 return gran; 1120 return gran;
1178} 1121}
@@ -1366,90 +1309,75 @@ static struct task_struct *load_balance_next_fair(void *arg)
1366 return __load_balance_iterator(cfs_rq, cfs_rq->balance_iterator); 1309 return __load_balance_iterator(cfs_rq, cfs_rq->balance_iterator);
1367} 1310}
1368 1311
1369static unsigned long 1312#ifdef CONFIG_FAIR_GROUP_SCHED
1370__load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, 1313static int cfs_rq_best_prio(struct cfs_rq *cfs_rq)
1371 unsigned long max_load_move, struct sched_domain *sd,
1372 enum cpu_idle_type idle, int *all_pinned, int *this_best_prio,
1373 struct cfs_rq *cfs_rq)
1374{ 1314{
1375 struct rq_iterator cfs_rq_iterator; 1315 struct sched_entity *curr;
1316 struct task_struct *p;
1376 1317
1377 cfs_rq_iterator.start = load_balance_start_fair; 1318 if (!cfs_rq->nr_running || !first_fair(cfs_rq))
1378 cfs_rq_iterator.next = load_balance_next_fair; 1319 return MAX_PRIO;
1379 cfs_rq_iterator.arg = cfs_rq; 1320
1321 curr = cfs_rq->curr;
1322 if (!curr)
1323 curr = __pick_next_entity(cfs_rq);
1324
1325 p = task_of(curr);
1380 1326
1381 return balance_tasks(this_rq, this_cpu, busiest, 1327 return p->prio;
1382 max_load_move, sd, idle, all_pinned,
1383 this_best_prio, &cfs_rq_iterator);
1384} 1328}
1329#endif
1385 1330
1386#ifdef CONFIG_FAIR_GROUP_SCHED
1387static unsigned long 1331static unsigned long
1388load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, 1332load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
1389 unsigned long max_load_move, 1333 unsigned long max_load_move,
1390 struct sched_domain *sd, enum cpu_idle_type idle, 1334 struct sched_domain *sd, enum cpu_idle_type idle,
1391 int *all_pinned, int *this_best_prio) 1335 int *all_pinned, int *this_best_prio)
1392{ 1336{
1337 struct cfs_rq *busy_cfs_rq;
1393 long rem_load_move = max_load_move; 1338 long rem_load_move = max_load_move;
1394 int busiest_cpu = cpu_of(busiest); 1339 struct rq_iterator cfs_rq_iterator;
1395 struct task_group *tg;
1396
1397 rcu_read_lock();
1398 list_for_each_entry(tg, &task_groups, list) {
1399 long imbalance;
1400 unsigned long this_weight, busiest_weight;
1401 long rem_load, max_load, moved_load;
1402
1403 /*
1404 * empty group
1405 */
1406 if (!aggregate(tg, sd)->task_weight)
1407 continue;
1408
1409 rem_load = rem_load_move * aggregate(tg, sd)->rq_weight;
1410 rem_load /= aggregate(tg, sd)->load + 1;
1411
1412 this_weight = tg->cfs_rq[this_cpu]->task_weight;
1413 busiest_weight = tg->cfs_rq[busiest_cpu]->task_weight;
1414 1340
1415 imbalance = (busiest_weight - this_weight) / 2; 1341 cfs_rq_iterator.start = load_balance_start_fair;
1342 cfs_rq_iterator.next = load_balance_next_fair;
1416 1343
1417 if (imbalance < 0) 1344 for_each_leaf_cfs_rq(busiest, busy_cfs_rq) {
1418 imbalance = busiest_weight; 1345#ifdef CONFIG_FAIR_GROUP_SCHED
1346 struct cfs_rq *this_cfs_rq;
1347 long imbalance;
1348 unsigned long maxload;
1419 1349
1420 max_load = max(rem_load, imbalance); 1350 this_cfs_rq = cpu_cfs_rq(busy_cfs_rq, this_cpu);
1421 moved_load = __load_balance_fair(this_rq, this_cpu, busiest,
1422 max_load, sd, idle, all_pinned, this_best_prio,
1423 tg->cfs_rq[busiest_cpu]);
1424 1351
1425 if (!moved_load) 1352 imbalance = busy_cfs_rq->load.weight - this_cfs_rq->load.weight;
1353 /* Don't pull if this_cfs_rq has more load than busy_cfs_rq */
1354 if (imbalance <= 0)
1426 continue; 1355 continue;
1427 1356
1428 move_group_shares(tg, sd, busiest_cpu, this_cpu); 1357 /* Don't pull more than imbalance/2 */
1358 imbalance /= 2;
1359 maxload = min(rem_load_move, imbalance);
1429 1360
1430 moved_load *= aggregate(tg, sd)->load; 1361 *this_best_prio = cfs_rq_best_prio(this_cfs_rq);
1431 moved_load /= aggregate(tg, sd)->rq_weight + 1; 1362#else
1363# define maxload rem_load_move
1364#endif
1365 /*
1366 * pass busy_cfs_rq argument into
1367 * load_balance_[start|next]_fair iterators
1368 */
1369 cfs_rq_iterator.arg = busy_cfs_rq;
1370 rem_load_move -= balance_tasks(this_rq, this_cpu, busiest,
1371 maxload, sd, idle, all_pinned,
1372 this_best_prio,
1373 &cfs_rq_iterator);
1432 1374
1433 rem_load_move -= moved_load; 1375 if (rem_load_move <= 0)
1434 if (rem_load_move < 0)
1435 break; 1376 break;
1436 } 1377 }
1437 rcu_read_unlock();
1438 1378
1439 return max_load_move - rem_load_move; 1379 return max_load_move - rem_load_move;
1440} 1380}
1441#else
1442static unsigned long
1443load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
1444 unsigned long max_load_move,
1445 struct sched_domain *sd, enum cpu_idle_type idle,
1446 int *all_pinned, int *this_best_prio)
1447{
1448 return __load_balance_fair(this_rq, this_cpu, busiest,
1449 max_load_move, sd, idle, all_pinned,
1450 this_best_prio, &busiest->cfs);
1451}
1452#endif
1453 1381
1454static int 1382static int
1455move_one_task_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, 1383move_one_task_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 060e87b0cb1c..0f3c19197fa4 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -250,7 +250,8 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
250 if (rt_rq->rt_time || rt_rq->rt_nr_running) 250 if (rt_rq->rt_time || rt_rq->rt_nr_running)
251 idle = 0; 251 idle = 0;
252 spin_unlock(&rt_rq->rt_runtime_lock); 252 spin_unlock(&rt_rq->rt_runtime_lock);
253 } 253 } else if (rt_rq->rt_nr_running)
254 idle = 0;
254 255
255 if (enqueue) 256 if (enqueue)
256 sched_rt_rq_enqueue(rt_rq); 257 sched_rt_rq_enqueue(rt_rq);
@@ -449,13 +450,19 @@ void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
449#endif 450#endif
450} 451}
451 452
452static void enqueue_rt_entity(struct sched_rt_entity *rt_se) 453static void __enqueue_rt_entity(struct sched_rt_entity *rt_se)
453{ 454{
454 struct rt_rq *rt_rq = rt_rq_of_se(rt_se); 455 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
455 struct rt_prio_array *array = &rt_rq->active; 456 struct rt_prio_array *array = &rt_rq->active;
456 struct rt_rq *group_rq = group_rt_rq(rt_se); 457 struct rt_rq *group_rq = group_rt_rq(rt_se);
457 458
458 if (group_rq && rt_rq_throttled(group_rq)) 459 /*
460 * Don't enqueue the group if its throttled, or when empty.
461 * The latter is a consequence of the former when a child group
462 * get throttled and the current group doesn't have any other
463 * active members.
464 */
465 if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
459 return; 466 return;
460 467
461 list_add_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se)); 468 list_add_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se));
@@ -464,7 +471,7 @@ static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
464 inc_rt_tasks(rt_se, rt_rq); 471 inc_rt_tasks(rt_se, rt_rq);
465} 472}
466 473
467static void dequeue_rt_entity(struct sched_rt_entity *rt_se) 474static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
468{ 475{
469 struct rt_rq *rt_rq = rt_rq_of_se(rt_se); 476 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
470 struct rt_prio_array *array = &rt_rq->active; 477 struct rt_prio_array *array = &rt_rq->active;
@@ -480,11 +487,10 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
480 * Because the prio of an upper entry depends on the lower 487 * Because the prio of an upper entry depends on the lower
481 * entries, we must remove entries top - down. 488 * entries, we must remove entries top - down.
482 */ 489 */
483static void dequeue_rt_stack(struct task_struct *p) 490static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
484{ 491{
485 struct sched_rt_entity *rt_se, *back = NULL; 492 struct sched_rt_entity *back = NULL;
486 493
487 rt_se = &p->rt;
488 for_each_sched_rt_entity(rt_se) { 494 for_each_sched_rt_entity(rt_se) {
489 rt_se->back = back; 495 rt_se->back = back;
490 back = rt_se; 496 back = rt_se;
@@ -492,7 +498,26 @@ static void dequeue_rt_stack(struct task_struct *p)
492 498
493 for (rt_se = back; rt_se; rt_se = rt_se->back) { 499 for (rt_se = back; rt_se; rt_se = rt_se->back) {
494 if (on_rt_rq(rt_se)) 500 if (on_rt_rq(rt_se))
495 dequeue_rt_entity(rt_se); 501 __dequeue_rt_entity(rt_se);
502 }
503}
504
505static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
506{
507 dequeue_rt_stack(rt_se);
508 for_each_sched_rt_entity(rt_se)
509 __enqueue_rt_entity(rt_se);
510}
511
512static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
513{
514 dequeue_rt_stack(rt_se);
515
516 for_each_sched_rt_entity(rt_se) {
517 struct rt_rq *rt_rq = group_rt_rq(rt_se);
518
519 if (rt_rq && rt_rq->rt_nr_running)
520 __enqueue_rt_entity(rt_se);
496 } 521 }
497} 522}
498 523
@@ -506,36 +531,15 @@ static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
506 if (wakeup) 531 if (wakeup)
507 rt_se->timeout = 0; 532 rt_se->timeout = 0;
508 533
509 dequeue_rt_stack(p); 534 enqueue_rt_entity(rt_se);
510
511 /*
512 * enqueue everybody, bottom - up.
513 */
514 for_each_sched_rt_entity(rt_se)
515 enqueue_rt_entity(rt_se);
516
517 inc_cpu_load(rq, p->se.load.weight);
518} 535}
519 536
520static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep) 537static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
521{ 538{
522 struct sched_rt_entity *rt_se = &p->rt; 539 struct sched_rt_entity *rt_se = &p->rt;
523 struct rt_rq *rt_rq;
524 540
525 update_curr_rt(rq); 541 update_curr_rt(rq);
526 542 dequeue_rt_entity(rt_se);
527 dequeue_rt_stack(p);
528
529 /*
530 * re-enqueue all non-empty rt_rq entities.
531 */
532 for_each_sched_rt_entity(rt_se) {
533 rt_rq = group_rt_rq(rt_se);
534 if (rt_rq && rt_rq->rt_nr_running)
535 enqueue_rt_entity(rt_se);
536 }
537
538 dec_cpu_load(rq, p->se.load.weight);
539} 543}
540 544
541/* 545/*
@@ -546,8 +550,10 @@ static
546void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se) 550void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
547{ 551{
548 struct rt_prio_array *array = &rt_rq->active; 552 struct rt_prio_array *array = &rt_rq->active;
553 struct list_head *queue = array->queue + rt_se_prio(rt_se);
549 554
550 list_move_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se)); 555 if (on_rt_rq(rt_se))
556 list_move_tail(&rt_se->run_list, queue);
551} 557}
552 558
553static void requeue_task_rt(struct rq *rq, struct task_struct *p) 559static void requeue_task_rt(struct rq *rq, struct task_struct *p)
diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h
index 5bae2e0c3ff2..80179ef7450e 100644
--- a/kernel/sched_stats.h
+++ b/kernel/sched_stats.h
@@ -67,6 +67,7 @@ static int show_schedstat(struct seq_file *seq, void *v)
67 preempt_enable(); 67 preempt_enable();
68#endif 68#endif
69 } 69 }
70 kfree(mask_str);
70 return 0; 71 return 0;
71} 72}
72 73
@@ -197,6 +198,9 @@ static inline void sched_info_queued(struct task_struct *t)
197/* 198/*
198 * Called when a process ceases being the active-running process, either 199 * Called when a process ceases being the active-running process, either
199 * voluntarily or involuntarily. Now we can calculate how long we ran. 200 * voluntarily or involuntarily. Now we can calculate how long we ran.
201 * Also, if the process is still in the TASK_RUNNING state, call
202 * sched_info_queued() to mark that it has now again started waiting on
203 * the runqueue.
200 */ 204 */
201static inline void sched_info_depart(struct task_struct *t) 205static inline void sched_info_depart(struct task_struct *t)
202{ 206{
@@ -205,6 +209,9 @@ static inline void sched_info_depart(struct task_struct *t)
205 209
206 t->sched_info.cpu_time += delta; 210 t->sched_info.cpu_time += delta;
207 rq_sched_info_depart(task_rq(t), delta); 211 rq_sched_info_depart(task_rq(t), delta);
212
213 if (t->state == TASK_RUNNING)
214 sched_info_queued(t);
208} 215}
209 216
210/* 217/*
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index 01b6522fd92b..a272d78185eb 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -49,12 +49,17 @@ static unsigned long get_timestamp(int this_cpu)
49 return cpu_clock(this_cpu) >> 30LL; /* 2^30 ~= 10^9 */ 49 return cpu_clock(this_cpu) >> 30LL; /* 2^30 ~= 10^9 */
50} 50}
51 51
52void touch_softlockup_watchdog(void) 52static void __touch_softlockup_watchdog(void)
53{ 53{
54 int this_cpu = raw_smp_processor_id(); 54 int this_cpu = raw_smp_processor_id();
55 55
56 __raw_get_cpu_var(touch_timestamp) = get_timestamp(this_cpu); 56 __raw_get_cpu_var(touch_timestamp) = get_timestamp(this_cpu);
57} 57}
58
59void touch_softlockup_watchdog(void)
60{
61 __raw_get_cpu_var(touch_timestamp) = 0;
62}
58EXPORT_SYMBOL(touch_softlockup_watchdog); 63EXPORT_SYMBOL(touch_softlockup_watchdog);
59 64
60void touch_all_softlockup_watchdogs(void) 65void touch_all_softlockup_watchdogs(void)
@@ -80,7 +85,7 @@ void softlockup_tick(void)
80 unsigned long now; 85 unsigned long now;
81 86
82 if (touch_timestamp == 0) { 87 if (touch_timestamp == 0) {
83 touch_softlockup_watchdog(); 88 __touch_softlockup_watchdog();
84 return; 89 return;
85 } 90 }
86 91
@@ -95,7 +100,7 @@ void softlockup_tick(void)
95 100
96 /* do not print during early bootup: */ 101 /* do not print during early bootup: */
97 if (unlikely(system_state != SYSTEM_RUNNING)) { 102 if (unlikely(system_state != SYSTEM_RUNNING)) {
98 touch_softlockup_watchdog(); 103 __touch_softlockup_watchdog();
99 return; 104 return;
100 } 105 }
101 106
@@ -115,6 +120,7 @@ void softlockup_tick(void)
115 printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %lus! [%s:%d]\n", 120 printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %lus! [%s:%d]\n",
116 this_cpu, now - touch_timestamp, 121 this_cpu, now - touch_timestamp,
117 current->comm, task_pid_nr(current)); 122 current->comm, task_pid_nr(current));
123 print_modules();
118 if (regs) 124 if (regs)
119 show_regs(regs); 125 show_regs(regs);
120 else 126 else
@@ -214,7 +220,7 @@ static int watchdog(void *__bind_cpu)
214 sched_setscheduler(current, SCHED_FIFO, &param); 220 sched_setscheduler(current, SCHED_FIFO, &param);
215 221
216 /* initialize timestamp */ 222 /* initialize timestamp */
217 touch_softlockup_watchdog(); 223 __touch_softlockup_watchdog();
218 224
219 set_current_state(TASK_INTERRUPTIBLE); 225 set_current_state(TASK_INTERRUPTIBLE);
220 /* 226 /*
@@ -223,7 +229,7 @@ static int watchdog(void *__bind_cpu)
223 * debug-printout triggers in softlockup_tick(). 229 * debug-printout triggers in softlockup_tick().
224 */ 230 */
225 while (!kthread_should_stop()) { 231 while (!kthread_should_stop()) {
226 touch_softlockup_watchdog(); 232 __touch_softlockup_watchdog();
227 schedule(); 233 schedule();
228 234
229 if (kthread_should_stop()) 235 if (kthread_should_stop())
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index b854a895591e..86baa4f0dfe4 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -48,6 +48,13 @@ static void tick_do_update_jiffies64(ktime_t now)
48 unsigned long ticks = 0; 48 unsigned long ticks = 0;
49 ktime_t delta; 49 ktime_t delta;
50 50
51 /*
52 * Do a quick check without holding xtime_lock:
53 */
54 delta = ktime_sub(now, last_jiffies_update);
55 if (delta.tv64 < tick_period.tv64)
56 return;
57
51 /* Reevalute with xtime_lock held */ 58 /* Reevalute with xtime_lock held */
52 write_seqlock(&xtime_lock); 59 write_seqlock(&xtime_lock);
53 60
@@ -228,6 +235,7 @@ void tick_nohz_stop_sched_tick(void)
228 local_softirq_pending()); 235 local_softirq_pending());
229 ratelimit++; 236 ratelimit++;
230 } 237 }
238 goto end;
231 } 239 }
232 240
233 ts->idle_calls++; 241 ts->idle_calls++;
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 29fc39f1029c..ce7799540c91 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -13,7 +13,7 @@
13 * Kai Petzke <wpp@marie.physik.tu-berlin.de> 13 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
14 * Theodore Ts'o <tytso@mit.edu> 14 * Theodore Ts'o <tytso@mit.edu>
15 * 15 *
16 * Made to use alloc_percpu by Christoph Lameter <clameter@sgi.com>. 16 * Made to use alloc_percpu by Christoph Lameter.
17 */ 17 */
18 18
19#include <linux/module.h> 19#include <linux/module.h>