aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-06-25 06:27:56 -0400
committerIngo Molnar <mingo@elte.hu>2008-06-25 06:27:56 -0400
commit97e6722b8d6b5880c4d5751127d5083b1b7df27d (patch)
tree060c52298d42530f975064f7addc4e637fe00289 /kernel
parentf22f9a89ce6857d377bf22dba4c1a8cd256c5136 (diff)
parent543cf4cb3fe6f6cae3651ba918b9c56200b257d0 (diff)
Merge branch 'linus' into tracing/ftrace
Diffstat (limited to 'kernel')
-rw-r--r--kernel/futex.c93
-rw-r--r--kernel/kgdb.c3
-rw-r--r--kernel/sched.c14
-rw-r--r--kernel/sched_rt.c3
4 files changed, 83 insertions, 30 deletions
diff --git a/kernel/futex.c b/kernel/futex.c
index 449def8074fe..7d1136e97c14 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1096,21 +1096,64 @@ static void unqueue_me_pi(struct futex_q *q)
1096 * private futexes. 1096 * private futexes.
1097 */ 1097 */
1098static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, 1098static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
1099 struct task_struct *newowner) 1099 struct task_struct *newowner,
1100 struct rw_semaphore *fshared)
1100{ 1101{
1101 u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS; 1102 u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
1102 struct futex_pi_state *pi_state = q->pi_state; 1103 struct futex_pi_state *pi_state = q->pi_state;
1104 struct task_struct *oldowner = pi_state->owner;
1103 u32 uval, curval, newval; 1105 u32 uval, curval, newval;
1104 int ret; 1106 int ret, attempt = 0;
1105 1107
1106 /* Owner died? */ 1108 /* Owner died? */
1109 if (!pi_state->owner)
1110 newtid |= FUTEX_OWNER_DIED;
1111
1112 /*
1113 * We are here either because we stole the rtmutex from the
1114 * pending owner or we are the pending owner which failed to
1115 * get the rtmutex. We have to replace the pending owner TID
1116 * in the user space variable. This must be atomic as we have
1117 * to preserve the owner died bit here.
1118 *
1119 * Note: We write the user space value _before_ changing the
1120 * pi_state because we can fault here. Imagine swapped out
1121 * pages or a fork, which was running right before we acquired
1122 * mmap_sem, that marked all the anonymous memory readonly for
1123 * cow.
1124 *
1125 * Modifying pi_state _before_ the user space value would
1126 * leave the pi_state in an inconsistent state when we fault
1127 * here, because we need to drop the hash bucket lock to
1128 * handle the fault. This might be observed in the PID check
1129 * in lookup_pi_state.
1130 */
1131retry:
1132 if (get_futex_value_locked(&uval, uaddr))
1133 goto handle_fault;
1134
1135 while (1) {
1136 newval = (uval & FUTEX_OWNER_DIED) | newtid;
1137
1138 curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
1139
1140 if (curval == -EFAULT)
1141 goto handle_fault;
1142 if (curval == uval)
1143 break;
1144 uval = curval;
1145 }
1146
1147 /*
1148 * We fixed up user space. Now we need to fix the pi_state
1149 * itself.
1150 */
1107 if (pi_state->owner != NULL) { 1151 if (pi_state->owner != NULL) {
1108 spin_lock_irq(&pi_state->owner->pi_lock); 1152 spin_lock_irq(&pi_state->owner->pi_lock);
1109 WARN_ON(list_empty(&pi_state->list)); 1153 WARN_ON(list_empty(&pi_state->list));
1110 list_del_init(&pi_state->list); 1154 list_del_init(&pi_state->list);
1111 spin_unlock_irq(&pi_state->owner->pi_lock); 1155 spin_unlock_irq(&pi_state->owner->pi_lock);
1112 } else 1156 }
1113 newtid |= FUTEX_OWNER_DIED;
1114 1157
1115 pi_state->owner = newowner; 1158 pi_state->owner = newowner;
1116 1159
@@ -1118,26 +1161,35 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
1118 WARN_ON(!list_empty(&pi_state->list)); 1161 WARN_ON(!list_empty(&pi_state->list));
1119 list_add(&pi_state->list, &newowner->pi_state_list); 1162 list_add(&pi_state->list, &newowner->pi_state_list);
1120 spin_unlock_irq(&newowner->pi_lock); 1163 spin_unlock_irq(&newowner->pi_lock);
1164 return 0;
1121 1165
1122 /* 1166 /*
1123 * We own it, so we have to replace the pending owner 1167 * To handle the page fault we need to drop the hash bucket
1124 * TID. This must be atomic as we have preserve the 1168 * lock here. That gives the other task (either the pending
1125 * owner died bit here. 1169 * owner itself or the task which stole the rtmutex) the
1170 * chance to try the fixup of the pi_state. So once we are
1171 * back from handling the fault we need to check the pi_state
1172 * after reacquiring the hash bucket lock and before trying to
1173 * do another fixup. When the fixup has been done already we
1174 * simply return.
1126 */ 1175 */
1127 ret = get_futex_value_locked(&uval, uaddr); 1176handle_fault:
1177 spin_unlock(q->lock_ptr);
1128 1178
1129 while (!ret) { 1179 ret = futex_handle_fault((unsigned long)uaddr, fshared, attempt++);
1130 newval = (uval & FUTEX_OWNER_DIED) | newtid;
1131 1180
1132 curval = cmpxchg_futex_value_locked(uaddr, uval, newval); 1181 spin_lock(q->lock_ptr);
1133 1182
1134 if (curval == -EFAULT) 1183 /*
1135 ret = -EFAULT; 1184 * Check if someone else fixed it for us:
1136 if (curval == uval) 1185 */
1137 break; 1186 if (pi_state->owner != oldowner)
1138 uval = curval; 1187 return 0;
1139 } 1188
1140 return ret; 1189 if (ret)
1190 return ret;
1191
1192 goto retry;
1141} 1193}
1142 1194
1143/* 1195/*
@@ -1507,7 +1559,7 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
1507 * that case: 1559 * that case:
1508 */ 1560 */
1509 if (q.pi_state->owner != curr) 1561 if (q.pi_state->owner != curr)
1510 ret = fixup_pi_state_owner(uaddr, &q, curr); 1562 ret = fixup_pi_state_owner(uaddr, &q, curr, fshared);
1511 } else { 1563 } else {
1512 /* 1564 /*
1513 * Catch the rare case, where the lock was released 1565 * Catch the rare case, where the lock was released
@@ -1539,7 +1591,8 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
1539 int res; 1591 int res;
1540 1592
1541 owner = rt_mutex_owner(&q.pi_state->pi_mutex); 1593 owner = rt_mutex_owner(&q.pi_state->pi_mutex);
1542 res = fixup_pi_state_owner(uaddr, &q, owner); 1594 res = fixup_pi_state_owner(uaddr, &q, owner,
1595 fshared);
1543 1596
1544 /* propagate -EFAULT, if the fixup failed */ 1597 /* propagate -EFAULT, if the fixup failed */
1545 if (res) 1598 if (res)
diff --git a/kernel/kgdb.c b/kernel/kgdb.c
index 79e3c90113c2..3ec23c3ec97f 100644
--- a/kernel/kgdb.c
+++ b/kernel/kgdb.c
@@ -1499,7 +1499,8 @@ int kgdb_nmicallback(int cpu, void *regs)
1499 return 1; 1499 return 1;
1500} 1500}
1501 1501
1502void kgdb_console_write(struct console *co, const char *s, unsigned count) 1502static void kgdb_console_write(struct console *co, const char *s,
1503 unsigned count)
1503{ 1504{
1504 unsigned long flags; 1505 unsigned long flags;
1505 1506
diff --git a/kernel/sched.c b/kernel/sched.c
index 27eaa47d438a..1ffa76813a01 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4449,22 +4449,20 @@ do_wait_for_common(struct completion *x, long timeout, int state)
4449 signal_pending(current)) || 4449 signal_pending(current)) ||
4450 (state == TASK_KILLABLE && 4450 (state == TASK_KILLABLE &&
4451 fatal_signal_pending(current))) { 4451 fatal_signal_pending(current))) {
4452 __remove_wait_queue(&x->wait, &wait); 4452 timeout = -ERESTARTSYS;
4453 return -ERESTARTSYS; 4453 break;
4454 } 4454 }
4455 __set_current_state(state); 4455 __set_current_state(state);
4456 spin_unlock_irq(&x->wait.lock); 4456 spin_unlock_irq(&x->wait.lock);
4457 timeout = schedule_timeout(timeout); 4457 timeout = schedule_timeout(timeout);
4458 spin_lock_irq(&x->wait.lock); 4458 spin_lock_irq(&x->wait.lock);
4459 if (!timeout) { 4459 } while (!x->done && timeout);
4460 __remove_wait_queue(&x->wait, &wait);
4461 return timeout;
4462 }
4463 } while (!x->done);
4464 __remove_wait_queue(&x->wait, &wait); 4460 __remove_wait_queue(&x->wait, &wait);
4461 if (!x->done)
4462 return timeout;
4465 } 4463 }
4466 x->done--; 4464 x->done--;
4467 return timeout; 4465 return timeout ?: 1;
4468} 4466}
4469 4467
4470static long __sched 4468static long __sched
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 1dad5bbb59b6..0f3c19197fa4 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -250,7 +250,8 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
250 if (rt_rq->rt_time || rt_rq->rt_nr_running) 250 if (rt_rq->rt_time || rt_rq->rt_nr_running)
251 idle = 0; 251 idle = 0;
252 spin_unlock(&rt_rq->rt_runtime_lock); 252 spin_unlock(&rt_rq->rt_runtime_lock);
253 } 253 } else if (rt_rq->rt_nr_running)
254 idle = 0;
254 255
255 if (enqueue) 256 if (enqueue)
256 sched_rt_rq_enqueue(rt_rq); 257 sched_rt_rq_enqueue(rt_rq);