diff options
Diffstat (limited to 'kernel/futex.c')
-rw-r--r-- | kernel/futex.c | 138 |
1 files changed, 76 insertions, 62 deletions
diff --git a/kernel/futex.c b/kernel/futex.c index 6b50a024bca2..ebb48d6d1a87 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
@@ -1115,24 +1115,87 @@ handle_fault: | |||
1115 | 1115 | ||
1116 | static long futex_wait_restart(struct restart_block *restart); | 1116 | static long futex_wait_restart(struct restart_block *restart); |
1117 | 1117 | ||
1118 | /** | ||
1119 | * futex_wait_queue_me() - queue_me() and wait for wakeup, timeout, or signal | ||
1120 | * @hb: the futex hash bucket, must be locked by the caller | ||
1121 | * @q: the futex_q to queue up on | ||
1122 | * @timeout: the prepared hrtimer_sleeper, or null for no timeout | ||
1123 | * @wait: the wait_queue to add to the futex_q after queueing in the hb | ||
1124 | */ | ||
1125 | static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q, | ||
1126 | struct hrtimer_sleeper *timeout, | ||
1127 | wait_queue_t *wait) | ||
1128 | { | ||
1129 | queue_me(q, hb); | ||
1130 | |||
1131 | /* | ||
1132 | * There might have been scheduling since the queue_me(), as we | ||
1133 | * cannot hold a spinlock across the get_user() in case it | ||
1134 | * faults, and we cannot just set TASK_INTERRUPTIBLE state when | ||
1135 | * queueing ourselves into the futex hash. This code thus has to | ||
1136 | * rely on the futex_wake() code removing us from hash when it | ||
1137 | * wakes us up. | ||
1138 | */ | ||
1139 | |||
1140 | /* add_wait_queue is the barrier after __set_current_state. */ | ||
1141 | __set_current_state(TASK_INTERRUPTIBLE); | ||
1142 | |||
1143 | /* | ||
1144 | * Add current as the futex_q waiter. We don't remove ourselves from | ||
1145 | * the wait_queue because we are the only user of it. | ||
1146 | */ | ||
1147 | add_wait_queue(&q->waiter, wait); | ||
1148 | |||
1149 | /* Arm the timer */ | ||
1150 | if (timeout) { | ||
1151 | hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS); | ||
1152 | if (!hrtimer_active(&timeout->timer)) | ||
1153 | timeout->task = NULL; | ||
1154 | } | ||
1155 | |||
1156 | /* | ||
1157 | * !plist_node_empty() is safe here without any lock. | ||
1158 | * q.lock_ptr != 0 is not safe, because of ordering against wakeup. | ||
1159 | */ | ||
1160 | if (likely(!plist_node_empty(&q->list))) { | ||
1161 | /* | ||
1162 | * If the timer has already expired, current will already be | ||
1163 | * flagged for rescheduling. Only call schedule if there | ||
1164 | * is no timeout, or if it has yet to expire. | ||
1165 | */ | ||
1166 | if (!timeout || timeout->task) | ||
1167 | schedule(); | ||
1168 | } | ||
1169 | __set_current_state(TASK_RUNNING); | ||
1170 | } | ||
1171 | |||
1118 | static int futex_wait(u32 __user *uaddr, int fshared, | 1172 | static int futex_wait(u32 __user *uaddr, int fshared, |
1119 | u32 val, ktime_t *abs_time, u32 bitset, int clockrt) | 1173 | u32 val, ktime_t *abs_time, u32 bitset, int clockrt) |
1120 | { | 1174 | { |
1121 | struct task_struct *curr = current; | 1175 | struct hrtimer_sleeper timeout, *to = NULL; |
1176 | DECLARE_WAITQUEUE(wait, current); | ||
1122 | struct restart_block *restart; | 1177 | struct restart_block *restart; |
1123 | DECLARE_WAITQUEUE(wait, curr); | ||
1124 | struct futex_hash_bucket *hb; | 1178 | struct futex_hash_bucket *hb; |
1125 | struct futex_q q; | 1179 | struct futex_q q; |
1126 | u32 uval; | 1180 | u32 uval; |
1127 | int ret; | 1181 | int ret; |
1128 | struct hrtimer_sleeper t; | ||
1129 | int rem = 0; | ||
1130 | 1182 | ||
1131 | if (!bitset) | 1183 | if (!bitset) |
1132 | return -EINVAL; | 1184 | return -EINVAL; |
1133 | 1185 | ||
1134 | q.pi_state = NULL; | 1186 | q.pi_state = NULL; |
1135 | q.bitset = bitset; | 1187 | q.bitset = bitset; |
1188 | |||
1189 | if (abs_time) { | ||
1190 | to = &timeout; | ||
1191 | |||
1192 | hrtimer_init_on_stack(&to->timer, clockrt ? CLOCK_REALTIME : | ||
1193 | CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | ||
1194 | hrtimer_init_sleeper(to, current); | ||
1195 | hrtimer_set_expires_range_ns(&to->timer, *abs_time, | ||
1196 | current->timer_slack_ns); | ||
1197 | } | ||
1198 | |||
1136 | retry: | 1199 | retry: |
1137 | q.key = FUTEX_KEY_INIT; | 1200 | q.key = FUTEX_KEY_INIT; |
1138 | ret = get_futex_key(uaddr, fshared, &q.key); | 1201 | ret = get_futex_key(uaddr, fshared, &q.key); |
@@ -1178,75 +1241,22 @@ retry_private: | |||
1178 | goto retry; | 1241 | goto retry; |
1179 | } | 1242 | } |
1180 | ret = -EWOULDBLOCK; | 1243 | ret = -EWOULDBLOCK; |
1244 | |||
1245 | /* Only actually queue if *uaddr contained val. */ | ||
1181 | if (unlikely(uval != val)) { | 1246 | if (unlikely(uval != val)) { |
1182 | queue_unlock(&q, hb); | 1247 | queue_unlock(&q, hb); |
1183 | goto out_put_key; | 1248 | goto out_put_key; |
1184 | } | 1249 | } |
1185 | 1250 | ||
1186 | /* Only actually queue if *uaddr contained val. */ | 1251 | /* queue_me and wait for wakeup, timeout, or a signal. */ |
1187 | queue_me(&q, hb); | 1252 | futex_wait_queue_me(hb, &q, to, &wait); |
1188 | |||
1189 | /* | ||
1190 | * There might have been scheduling since the queue_me(), as we | ||
1191 | * cannot hold a spinlock across the get_user() in case it | ||
1192 | * faults, and we cannot just set TASK_INTERRUPTIBLE state when | ||
1193 | * queueing ourselves into the futex hash. This code thus has to | ||
1194 | * rely on the futex_wake() code removing us from hash when it | ||
1195 | * wakes us up. | ||
1196 | */ | ||
1197 | |||
1198 | /* add_wait_queue is the barrier after __set_current_state. */ | ||
1199 | __set_current_state(TASK_INTERRUPTIBLE); | ||
1200 | add_wait_queue(&q.waiter, &wait); | ||
1201 | /* | ||
1202 | * !plist_node_empty() is safe here without any lock. | ||
1203 | * q.lock_ptr != 0 is not safe, because of ordering against wakeup. | ||
1204 | */ | ||
1205 | if (likely(!plist_node_empty(&q.list))) { | ||
1206 | if (!abs_time) | ||
1207 | schedule(); | ||
1208 | else { | ||
1209 | hrtimer_init_on_stack(&t.timer, | ||
1210 | clockrt ? CLOCK_REALTIME : | ||
1211 | CLOCK_MONOTONIC, | ||
1212 | HRTIMER_MODE_ABS); | ||
1213 | hrtimer_init_sleeper(&t, current); | ||
1214 | hrtimer_set_expires_range_ns(&t.timer, *abs_time, | ||
1215 | current->timer_slack_ns); | ||
1216 | |||
1217 | hrtimer_start_expires(&t.timer, HRTIMER_MODE_ABS); | ||
1218 | if (!hrtimer_active(&t.timer)) | ||
1219 | t.task = NULL; | ||
1220 | |||
1221 | /* | ||
1222 | * the timer could have already expired, in which | ||
1223 | * case current would be flagged for rescheduling. | ||
1224 | * Don't bother calling schedule. | ||
1225 | */ | ||
1226 | if (likely(t.task)) | ||
1227 | schedule(); | ||
1228 | |||
1229 | hrtimer_cancel(&t.timer); | ||
1230 | |||
1231 | /* Flag if a timeout occured */ | ||
1232 | rem = (t.task == NULL); | ||
1233 | |||
1234 | destroy_hrtimer_on_stack(&t.timer); | ||
1235 | } | ||
1236 | } | ||
1237 | __set_current_state(TASK_RUNNING); | ||
1238 | |||
1239 | /* | ||
1240 | * NOTE: we don't remove ourselves from the waitqueue because | ||
1241 | * we are the only user of it. | ||
1242 | */ | ||
1243 | 1253 | ||
1244 | /* If we were woken (and unqueued), we succeeded, whatever. */ | 1254 | /* If we were woken (and unqueued), we succeeded, whatever. */ |
1245 | ret = 0; | 1255 | ret = 0; |
1246 | if (!unqueue_me(&q)) | 1256 | if (!unqueue_me(&q)) |
1247 | goto out_put_key; | 1257 | goto out_put_key; |
1248 | ret = -ETIMEDOUT; | 1258 | ret = -ETIMEDOUT; |
1249 | if (rem) | 1259 | if (to && !to->task) |
1250 | goto out_put_key; | 1260 | goto out_put_key; |
1251 | 1261 | ||
1252 | /* | 1262 | /* |
@@ -1275,6 +1285,10 @@ retry_private: | |||
1275 | out_put_key: | 1285 | out_put_key: |
1276 | put_futex_key(fshared, &q.key); | 1286 | put_futex_key(fshared, &q.key); |
1277 | out: | 1287 | out: |
1288 | if (to) { | ||
1289 | hrtimer_cancel(&to->timer); | ||
1290 | destroy_hrtimer_on_stack(&to->timer); | ||
1291 | } | ||
1278 | return ret; | 1292 | return ret; |
1279 | } | 1293 | } |
1280 | 1294 | ||