diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/futex.c | 53 | ||||
-rw-r--r-- | kernel/trace/Kconfig | 23 | ||||
-rw-r--r-- | kernel/trace/trace_mmiotrace.c | 14 | ||||
-rw-r--r-- | kernel/user.c | 3 |
4 files changed, 62 insertions, 31 deletions
diff --git a/kernel/futex.c b/kernel/futex.c index f89d373a9c6d..438701adce23 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
@@ -1165,6 +1165,7 @@ static int futex_wait(u32 __user *uaddr, int fshared, | |||
1165 | u32 val, ktime_t *abs_time, u32 bitset, int clockrt) | 1165 | u32 val, ktime_t *abs_time, u32 bitset, int clockrt) |
1166 | { | 1166 | { |
1167 | struct task_struct *curr = current; | 1167 | struct task_struct *curr = current; |
1168 | struct restart_block *restart; | ||
1168 | DECLARE_WAITQUEUE(wait, curr); | 1169 | DECLARE_WAITQUEUE(wait, curr); |
1169 | struct futex_hash_bucket *hb; | 1170 | struct futex_hash_bucket *hb; |
1170 | struct futex_q q; | 1171 | struct futex_q q; |
@@ -1216,11 +1217,13 @@ retry: | |||
1216 | 1217 | ||
1217 | if (!ret) | 1218 | if (!ret) |
1218 | goto retry; | 1219 | goto retry; |
1219 | return ret; | 1220 | goto out; |
1220 | } | 1221 | } |
1221 | ret = -EWOULDBLOCK; | 1222 | ret = -EWOULDBLOCK; |
1222 | if (uval != val) | 1223 | if (unlikely(uval != val)) { |
1223 | goto out_unlock_put_key; | 1224 | queue_unlock(&q, hb); |
1225 | goto out_put_key; | ||
1226 | } | ||
1224 | 1227 | ||
1225 | /* Only actually queue if *uaddr contained val. */ | 1228 | /* Only actually queue if *uaddr contained val. */ |
1226 | queue_me(&q, hb); | 1229 | queue_me(&q, hb); |
@@ -1284,38 +1287,38 @@ retry: | |||
1284 | */ | 1287 | */ |
1285 | 1288 | ||
1286 | /* If we were woken (and unqueued), we succeeded, whatever. */ | 1289 | /* If we were woken (and unqueued), we succeeded, whatever. */ |
1290 | ret = 0; | ||
1287 | if (!unqueue_me(&q)) | 1291 | if (!unqueue_me(&q)) |
1288 | return 0; | 1292 | goto out_put_key; |
1293 | ret = -ETIMEDOUT; | ||
1289 | if (rem) | 1294 | if (rem) |
1290 | return -ETIMEDOUT; | 1295 | goto out_put_key; |
1291 | 1296 | ||
1292 | /* | 1297 | /* |
1293 | * We expect signal_pending(current), but another thread may | 1298 | * We expect signal_pending(current), but another thread may |
1294 | * have handled it for us already. | 1299 | * have handled it for us already. |
1295 | */ | 1300 | */ |
1301 | ret = -ERESTARTSYS; | ||
1296 | if (!abs_time) | 1302 | if (!abs_time) |
1297 | return -ERESTARTSYS; | 1303 | goto out_put_key; |
1298 | else { | ||
1299 | struct restart_block *restart; | ||
1300 | restart = ¤t_thread_info()->restart_block; | ||
1301 | restart->fn = futex_wait_restart; | ||
1302 | restart->futex.uaddr = (u32 *)uaddr; | ||
1303 | restart->futex.val = val; | ||
1304 | restart->futex.time = abs_time->tv64; | ||
1305 | restart->futex.bitset = bitset; | ||
1306 | restart->futex.flags = 0; | ||
1307 | |||
1308 | if (fshared) | ||
1309 | restart->futex.flags |= FLAGS_SHARED; | ||
1310 | if (clockrt) | ||
1311 | restart->futex.flags |= FLAGS_CLOCKRT; | ||
1312 | return -ERESTART_RESTARTBLOCK; | ||
1313 | } | ||
1314 | 1304 | ||
1315 | out_unlock_put_key: | 1305 | restart = ¤t_thread_info()->restart_block; |
1316 | queue_unlock(&q, hb); | 1306 | restart->fn = futex_wait_restart; |
1317 | put_futex_key(fshared, &q.key); | 1307 | restart->futex.uaddr = (u32 *)uaddr; |
1308 | restart->futex.val = val; | ||
1309 | restart->futex.time = abs_time->tv64; | ||
1310 | restart->futex.bitset = bitset; | ||
1311 | restart->futex.flags = 0; | ||
1312 | |||
1313 | if (fshared) | ||
1314 | restart->futex.flags |= FLAGS_SHARED; | ||
1315 | if (clockrt) | ||
1316 | restart->futex.flags |= FLAGS_CLOCKRT; | ||
1318 | 1317 | ||
1318 | ret = -ERESTART_RESTARTBLOCK; | ||
1319 | |||
1320 | out_put_key: | ||
1321 | put_futex_key(fshared, &q.key); | ||
1319 | out: | 1322 | out: |
1320 | return ret; | 1323 | return ret; |
1321 | } | 1324 | } |
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index e2a4ff6fc3a6..58a93fbd68aa 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
@@ -302,4 +302,27 @@ config FTRACE_STARTUP_TEST | |||
302 | functioning properly. It will do tests on all the configured | 302 | functioning properly. It will do tests on all the configured |
303 | tracers of ftrace. | 303 | tracers of ftrace. |
304 | 304 | ||
305 | config MMIOTRACE | ||
306 | bool "Memory mapped IO tracing" | ||
307 | depends on HAVE_MMIOTRACE_SUPPORT && DEBUG_KERNEL && PCI | ||
308 | select TRACING | ||
309 | help | ||
310 | Mmiotrace traces Memory Mapped I/O access and is meant for | ||
311 | debugging and reverse engineering. It is called from the ioremap | ||
312 | implementation and works via page faults. Tracing is disabled by | ||
313 | default and can be enabled at run-time. | ||
314 | |||
315 | See Documentation/tracers/mmiotrace.txt. | ||
316 | If you are not helping to develop drivers, say N. | ||
317 | |||
318 | config MMIOTRACE_TEST | ||
319 | tristate "Test module for mmiotrace" | ||
320 | depends on MMIOTRACE && m | ||
321 | help | ||
322 | This is a dumb module for testing mmiotrace. It is very dangerous | ||
323 | as it will write garbage to IO memory starting at a given address. | ||
324 | However, it should be safe to use on e.g. unused portion of VRAM. | ||
325 | |||
326 | Say N, unless you absolutely know what you are doing. | ||
327 | |||
305 | endmenu | 328 | endmenu |
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c index fffcb069f1dc..80e503ef6136 100644 --- a/kernel/trace/trace_mmiotrace.c +++ b/kernel/trace/trace_mmiotrace.c | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <linux/kernel.h> | 9 | #include <linux/kernel.h> |
10 | #include <linux/mmiotrace.h> | 10 | #include <linux/mmiotrace.h> |
11 | #include <linux/pci.h> | 11 | #include <linux/pci.h> |
12 | #include <asm/atomic.h> | ||
12 | 13 | ||
13 | #include "trace.h" | 14 | #include "trace.h" |
14 | 15 | ||
@@ -19,6 +20,7 @@ struct header_iter { | |||
19 | static struct trace_array *mmio_trace_array; | 20 | static struct trace_array *mmio_trace_array; |
20 | static bool overrun_detected; | 21 | static bool overrun_detected; |
21 | static unsigned long prev_overruns; | 22 | static unsigned long prev_overruns; |
23 | static atomic_t dropped_count; | ||
22 | 24 | ||
23 | static void mmio_reset_data(struct trace_array *tr) | 25 | static void mmio_reset_data(struct trace_array *tr) |
24 | { | 26 | { |
@@ -121,11 +123,11 @@ static void mmio_close(struct trace_iterator *iter) | |||
121 | 123 | ||
122 | static unsigned long count_overruns(struct trace_iterator *iter) | 124 | static unsigned long count_overruns(struct trace_iterator *iter) |
123 | { | 125 | { |
124 | unsigned long cnt = 0; | 126 | unsigned long cnt = atomic_xchg(&dropped_count, 0); |
125 | unsigned long over = ring_buffer_overruns(iter->tr->buffer); | 127 | unsigned long over = ring_buffer_overruns(iter->tr->buffer); |
126 | 128 | ||
127 | if (over > prev_overruns) | 129 | if (over > prev_overruns) |
128 | cnt = over - prev_overruns; | 130 | cnt += over - prev_overruns; |
129 | prev_overruns = over; | 131 | prev_overruns = over; |
130 | return cnt; | 132 | return cnt; |
131 | } | 133 | } |
@@ -310,8 +312,10 @@ static void __trace_mmiotrace_rw(struct trace_array *tr, | |||
310 | 312 | ||
311 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | 313 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), |
312 | &irq_flags); | 314 | &irq_flags); |
313 | if (!event) | 315 | if (!event) { |
316 | atomic_inc(&dropped_count); | ||
314 | return; | 317 | return; |
318 | } | ||
315 | entry = ring_buffer_event_data(event); | 319 | entry = ring_buffer_event_data(event); |
316 | tracing_generic_entry_update(&entry->ent, 0, preempt_count()); | 320 | tracing_generic_entry_update(&entry->ent, 0, preempt_count()); |
317 | entry->ent.type = TRACE_MMIO_RW; | 321 | entry->ent.type = TRACE_MMIO_RW; |
@@ -338,8 +342,10 @@ static void __trace_mmiotrace_map(struct trace_array *tr, | |||
338 | 342 | ||
339 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | 343 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), |
340 | &irq_flags); | 344 | &irq_flags); |
341 | if (!event) | 345 | if (!event) { |
346 | atomic_inc(&dropped_count); | ||
342 | return; | 347 | return; |
348 | } | ||
343 | entry = ring_buffer_event_data(event); | 349 | entry = ring_buffer_event_data(event); |
344 | tracing_generic_entry_update(&entry->ent, 0, preempt_count()); | 350 | tracing_generic_entry_update(&entry->ent, 0, preempt_count()); |
345 | entry->ent.type = TRACE_MMIO_MAP; | 351 | entry->ent.type = TRACE_MMIO_MAP; |
diff --git a/kernel/user.c b/kernel/user.c index 477b6660f447..3551ac742395 100644 --- a/kernel/user.c +++ b/kernel/user.c | |||
@@ -72,6 +72,7 @@ static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent) | |||
72 | static void uid_hash_remove(struct user_struct *up) | 72 | static void uid_hash_remove(struct user_struct *up) |
73 | { | 73 | { |
74 | hlist_del_init(&up->uidhash_node); | 74 | hlist_del_init(&up->uidhash_node); |
75 | put_user_ns(up->user_ns); | ||
75 | } | 76 | } |
76 | 77 | ||
77 | static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent) | 78 | static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent) |
@@ -334,7 +335,6 @@ static void free_user(struct user_struct *up, unsigned long flags) | |||
334 | atomic_inc(&up->__count); | 335 | atomic_inc(&up->__count); |
335 | spin_unlock_irqrestore(&uidhash_lock, flags); | 336 | spin_unlock_irqrestore(&uidhash_lock, flags); |
336 | 337 | ||
337 | put_user_ns(up->user_ns); | ||
338 | INIT_WORK(&up->work, remove_user_sysfs_dir); | 338 | INIT_WORK(&up->work, remove_user_sysfs_dir); |
339 | schedule_work(&up->work); | 339 | schedule_work(&up->work); |
340 | } | 340 | } |
@@ -357,7 +357,6 @@ static void free_user(struct user_struct *up, unsigned long flags) | |||
357 | sched_destroy_user(up); | 357 | sched_destroy_user(up); |
358 | key_put(up->uid_keyring); | 358 | key_put(up->uid_keyring); |
359 | key_put(up->session_keyring); | 359 | key_put(up->session_keyring); |
360 | put_user_ns(up->user_ns); | ||
361 | kmem_cache_free(uid_cachep, up); | 360 | kmem_cache_free(uid_cachep, up); |
362 | } | 361 | } |
363 | 362 | ||