diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/Makefile | 1 | ||||
-rw-r--r-- | kernel/cgroup.c | 2 | ||||
-rw-r--r-- | kernel/futex.c | 53 | ||||
-rw-r--r-- | kernel/posix-cpu-timers.c | 60 | ||||
-rw-r--r-- | kernel/power/Makefile | 2 | ||||
-rw-r--r-- | kernel/power/swap.c | 5 | ||||
-rw-r--r-- | kernel/sched.c | 15 | ||||
-rw-r--r-- | kernel/trace/Kconfig | 25 | ||||
-rw-r--r-- | kernel/trace/ftrace.c | 6 | ||||
-rw-r--r-- | kernel/trace/trace_mmiotrace.c | 14 | ||||
-rw-r--r-- | kernel/trace/trace_selftest.c | 19 |
11 files changed, 135 insertions, 67 deletions
diff --git a/kernel/Makefile b/kernel/Makefile index 170a9213c1b6..e4791b3ba55d 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
@@ -51,6 +51,7 @@ obj-$(CONFIG_UID16) += uid16.o | |||
51 | obj-$(CONFIG_MODULES) += module.o | 51 | obj-$(CONFIG_MODULES) += module.o |
52 | obj-$(CONFIG_KALLSYMS) += kallsyms.o | 52 | obj-$(CONFIG_KALLSYMS) += kallsyms.o |
53 | obj-$(CONFIG_PM) += power/ | 53 | obj-$(CONFIG_PM) += power/ |
54 | obj-$(CONFIG_FREEZER) += power/ | ||
54 | obj-$(CONFIG_BSD_PROCESS_ACCT) += acct.o | 55 | obj-$(CONFIG_BSD_PROCESS_ACCT) += acct.o |
55 | obj-$(CONFIG_KEXEC) += kexec.o | 56 | obj-$(CONFIG_KEXEC) += kexec.o |
56 | obj-$(CONFIG_BACKTRACE_SELF_TEST) += backtracetest.o | 57 | obj-$(CONFIG_BACKTRACE_SELF_TEST) += backtracetest.o |
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index e14db9c089b9..9edb5c4b79b4 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
@@ -1122,8 +1122,8 @@ static void cgroup_kill_sb(struct super_block *sb) { | |||
1122 | 1122 | ||
1123 | mutex_unlock(&cgroup_mutex); | 1123 | mutex_unlock(&cgroup_mutex); |
1124 | 1124 | ||
1125 | kfree(root); | ||
1126 | kill_litter_super(sb); | 1125 | kill_litter_super(sb); |
1126 | kfree(root); | ||
1127 | } | 1127 | } |
1128 | 1128 | ||
1129 | static struct file_system_type cgroup_fs_type = { | 1129 | static struct file_system_type cgroup_fs_type = { |
diff --git a/kernel/futex.c b/kernel/futex.c index f89d373a9c6d..438701adce23 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
@@ -1165,6 +1165,7 @@ static int futex_wait(u32 __user *uaddr, int fshared, | |||
1165 | u32 val, ktime_t *abs_time, u32 bitset, int clockrt) | 1165 | u32 val, ktime_t *abs_time, u32 bitset, int clockrt) |
1166 | { | 1166 | { |
1167 | struct task_struct *curr = current; | 1167 | struct task_struct *curr = current; |
1168 | struct restart_block *restart; | ||
1168 | DECLARE_WAITQUEUE(wait, curr); | 1169 | DECLARE_WAITQUEUE(wait, curr); |
1169 | struct futex_hash_bucket *hb; | 1170 | struct futex_hash_bucket *hb; |
1170 | struct futex_q q; | 1171 | struct futex_q q; |
@@ -1216,11 +1217,13 @@ retry: | |||
1216 | 1217 | ||
1217 | if (!ret) | 1218 | if (!ret) |
1218 | goto retry; | 1219 | goto retry; |
1219 | return ret; | 1220 | goto out; |
1220 | } | 1221 | } |
1221 | ret = -EWOULDBLOCK; | 1222 | ret = -EWOULDBLOCK; |
1222 | if (uval != val) | 1223 | if (unlikely(uval != val)) { |
1223 | goto out_unlock_put_key; | 1224 | queue_unlock(&q, hb); |
1225 | goto out_put_key; | ||
1226 | } | ||
1224 | 1227 | ||
1225 | /* Only actually queue if *uaddr contained val. */ | 1228 | /* Only actually queue if *uaddr contained val. */ |
1226 | queue_me(&q, hb); | 1229 | queue_me(&q, hb); |
@@ -1284,38 +1287,38 @@ retry: | |||
1284 | */ | 1287 | */ |
1285 | 1288 | ||
1286 | /* If we were woken (and unqueued), we succeeded, whatever. */ | 1289 | /* If we were woken (and unqueued), we succeeded, whatever. */ |
1290 | ret = 0; | ||
1287 | if (!unqueue_me(&q)) | 1291 | if (!unqueue_me(&q)) |
1288 | return 0; | 1292 | goto out_put_key; |
1293 | ret = -ETIMEDOUT; | ||
1289 | if (rem) | 1294 | if (rem) |
1290 | return -ETIMEDOUT; | 1295 | goto out_put_key; |
1291 | 1296 | ||
1292 | /* | 1297 | /* |
1293 | * We expect signal_pending(current), but another thread may | 1298 | * We expect signal_pending(current), but another thread may |
1294 | * have handled it for us already. | 1299 | * have handled it for us already. |
1295 | */ | 1300 | */ |
1301 | ret = -ERESTARTSYS; | ||
1296 | if (!abs_time) | 1302 | if (!abs_time) |
1297 | return -ERESTARTSYS; | 1303 | goto out_put_key; |
1298 | else { | ||
1299 | struct restart_block *restart; | ||
1300 | restart = ¤t_thread_info()->restart_block; | ||
1301 | restart->fn = futex_wait_restart; | ||
1302 | restart->futex.uaddr = (u32 *)uaddr; | ||
1303 | restart->futex.val = val; | ||
1304 | restart->futex.time = abs_time->tv64; | ||
1305 | restart->futex.bitset = bitset; | ||
1306 | restart->futex.flags = 0; | ||
1307 | |||
1308 | if (fshared) | ||
1309 | restart->futex.flags |= FLAGS_SHARED; | ||
1310 | if (clockrt) | ||
1311 | restart->futex.flags |= FLAGS_CLOCKRT; | ||
1312 | return -ERESTART_RESTARTBLOCK; | ||
1313 | } | ||
1314 | 1304 | ||
1315 | out_unlock_put_key: | 1305 | restart = ¤t_thread_info()->restart_block; |
1316 | queue_unlock(&q, hb); | 1306 | restart->fn = futex_wait_restart; |
1317 | put_futex_key(fshared, &q.key); | 1307 | restart->futex.uaddr = (u32 *)uaddr; |
1308 | restart->futex.val = val; | ||
1309 | restart->futex.time = abs_time->tv64; | ||
1310 | restart->futex.bitset = bitset; | ||
1311 | restart->futex.flags = 0; | ||
1312 | |||
1313 | if (fshared) | ||
1314 | restart->futex.flags |= FLAGS_SHARED; | ||
1315 | if (clockrt) | ||
1316 | restart->futex.flags |= FLAGS_CLOCKRT; | ||
1318 | 1317 | ||
1318 | ret = -ERESTART_RESTARTBLOCK; | ||
1319 | |||
1320 | out_put_key: | ||
1321 | put_futex_key(fshared, &q.key); | ||
1319 | out: | 1322 | out: |
1320 | return ret; | 1323 | return ret; |
1321 | } | 1324 | } |
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index 2313a4cc14ea..e976e505648d 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c | |||
@@ -681,6 +681,33 @@ static void cpu_timer_fire(struct k_itimer *timer) | |||
681 | } | 681 | } |
682 | 682 | ||
683 | /* | 683 | /* |
684 | * Sample a process (thread group) timer for the given group_leader task. | ||
685 | * Must be called with tasklist_lock held for reading. | ||
686 | */ | ||
687 | static int cpu_timer_sample_group(const clockid_t which_clock, | ||
688 | struct task_struct *p, | ||
689 | union cpu_time_count *cpu) | ||
690 | { | ||
691 | struct task_cputime cputime; | ||
692 | |||
693 | thread_group_cputimer(p, &cputime); | ||
694 | switch (CPUCLOCK_WHICH(which_clock)) { | ||
695 | default: | ||
696 | return -EINVAL; | ||
697 | case CPUCLOCK_PROF: | ||
698 | cpu->cpu = cputime_add(cputime.utime, cputime.stime); | ||
699 | break; | ||
700 | case CPUCLOCK_VIRT: | ||
701 | cpu->cpu = cputime.utime; | ||
702 | break; | ||
703 | case CPUCLOCK_SCHED: | ||
704 | cpu->sched = cputime.sum_exec_runtime + task_delta_exec(p); | ||
705 | break; | ||
706 | } | ||
707 | return 0; | ||
708 | } | ||
709 | |||
710 | /* | ||
684 | * Guts of sys_timer_settime for CPU timers. | 711 | * Guts of sys_timer_settime for CPU timers. |
685 | * This is called with the timer locked and interrupts disabled. | 712 | * This is called with the timer locked and interrupts disabled. |
686 | * If we return TIMER_RETRY, it's necessary to release the timer's lock | 713 | * If we return TIMER_RETRY, it's necessary to release the timer's lock |
@@ -741,7 +768,7 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags, | |||
741 | if (CPUCLOCK_PERTHREAD(timer->it_clock)) { | 768 | if (CPUCLOCK_PERTHREAD(timer->it_clock)) { |
742 | cpu_clock_sample(timer->it_clock, p, &val); | 769 | cpu_clock_sample(timer->it_clock, p, &val); |
743 | } else { | 770 | } else { |
744 | cpu_clock_sample_group(timer->it_clock, p, &val); | 771 | cpu_timer_sample_group(timer->it_clock, p, &val); |
745 | } | 772 | } |
746 | 773 | ||
747 | if (old) { | 774 | if (old) { |
@@ -889,7 +916,7 @@ void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp) | |||
889 | read_unlock(&tasklist_lock); | 916 | read_unlock(&tasklist_lock); |
890 | goto dead; | 917 | goto dead; |
891 | } else { | 918 | } else { |
892 | cpu_clock_sample_group(timer->it_clock, p, &now); | 919 | cpu_timer_sample_group(timer->it_clock, p, &now); |
893 | clear_dead = (unlikely(p->exit_state) && | 920 | clear_dead = (unlikely(p->exit_state) && |
894 | thread_group_empty(p)); | 921 | thread_group_empty(p)); |
895 | } | 922 | } |
@@ -1244,7 +1271,7 @@ void posix_cpu_timer_schedule(struct k_itimer *timer) | |||
1244 | clear_dead_task(timer, now); | 1271 | clear_dead_task(timer, now); |
1245 | goto out_unlock; | 1272 | goto out_unlock; |
1246 | } | 1273 | } |
1247 | cpu_clock_sample_group(timer->it_clock, p, &now); | 1274 | cpu_timer_sample_group(timer->it_clock, p, &now); |
1248 | bump_cpu_timer(timer, now); | 1275 | bump_cpu_timer(timer, now); |
1249 | /* Leave the tasklist_lock locked for the call below. */ | 1276 | /* Leave the tasklist_lock locked for the call below. */ |
1250 | } | 1277 | } |
@@ -1409,33 +1436,6 @@ void run_posix_cpu_timers(struct task_struct *tsk) | |||
1409 | } | 1436 | } |
1410 | 1437 | ||
1411 | /* | 1438 | /* |
1412 | * Sample a process (thread group) timer for the given group_leader task. | ||
1413 | * Must be called with tasklist_lock held for reading. | ||
1414 | */ | ||
1415 | static int cpu_timer_sample_group(const clockid_t which_clock, | ||
1416 | struct task_struct *p, | ||
1417 | union cpu_time_count *cpu) | ||
1418 | { | ||
1419 | struct task_cputime cputime; | ||
1420 | |||
1421 | thread_group_cputimer(p, &cputime); | ||
1422 | switch (CPUCLOCK_WHICH(which_clock)) { | ||
1423 | default: | ||
1424 | return -EINVAL; | ||
1425 | case CPUCLOCK_PROF: | ||
1426 | cpu->cpu = cputime_add(cputime.utime, cputime.stime); | ||
1427 | break; | ||
1428 | case CPUCLOCK_VIRT: | ||
1429 | cpu->cpu = cputime.utime; | ||
1430 | break; | ||
1431 | case CPUCLOCK_SCHED: | ||
1432 | cpu->sched = cputime.sum_exec_runtime + task_delta_exec(p); | ||
1433 | break; | ||
1434 | } | ||
1435 | return 0; | ||
1436 | } | ||
1437 | |||
1438 | /* | ||
1439 | * Set one of the process-wide special case CPU timers. | 1439 | * Set one of the process-wide special case CPU timers. |
1440 | * The tsk->sighand->siglock must be held by the caller. | 1440 | * The tsk->sighand->siglock must be held by the caller. |
1441 | * The *newval argument is relative and we update it to be absolute, *oldval | 1441 | * The *newval argument is relative and we update it to be absolute, *oldval |
diff --git a/kernel/power/Makefile b/kernel/power/Makefile index d7a10167a25b..720ea4f781bd 100644 --- a/kernel/power/Makefile +++ b/kernel/power/Makefile | |||
@@ -3,7 +3,7 @@ ifeq ($(CONFIG_PM_DEBUG),y) | |||
3 | EXTRA_CFLAGS += -DDEBUG | 3 | EXTRA_CFLAGS += -DDEBUG |
4 | endif | 4 | endif |
5 | 5 | ||
6 | obj-y := main.o | 6 | obj-$(CONFIG_PM) += main.o |
7 | obj-$(CONFIG_PM_SLEEP) += console.o | 7 | obj-$(CONFIG_PM_SLEEP) += console.o |
8 | obj-$(CONFIG_FREEZER) += process.o | 8 | obj-$(CONFIG_FREEZER) += process.o |
9 | obj-$(CONFIG_HIBERNATION) += swsusp.o disk.o snapshot.o swap.o user.o | 9 | obj-$(CONFIG_HIBERNATION) += swsusp.o disk.o snapshot.o swap.o user.o |
diff --git a/kernel/power/swap.c b/kernel/power/swap.c index 6da14358537c..505f319e489c 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c | |||
@@ -60,6 +60,7 @@ static struct block_device *resume_bdev; | |||
60 | static int submit(int rw, pgoff_t page_off, struct page *page, | 60 | static int submit(int rw, pgoff_t page_off, struct page *page, |
61 | struct bio **bio_chain) | 61 | struct bio **bio_chain) |
62 | { | 62 | { |
63 | const int bio_rw = rw | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG); | ||
63 | struct bio *bio; | 64 | struct bio *bio; |
64 | 65 | ||
65 | bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1); | 66 | bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1); |
@@ -80,7 +81,7 @@ static int submit(int rw, pgoff_t page_off, struct page *page, | |||
80 | bio_get(bio); | 81 | bio_get(bio); |
81 | 82 | ||
82 | if (bio_chain == NULL) { | 83 | if (bio_chain == NULL) { |
83 | submit_bio(rw | (1 << BIO_RW_SYNC), bio); | 84 | submit_bio(bio_rw, bio); |
84 | wait_on_page_locked(page); | 85 | wait_on_page_locked(page); |
85 | if (rw == READ) | 86 | if (rw == READ) |
86 | bio_set_pages_dirty(bio); | 87 | bio_set_pages_dirty(bio); |
@@ -90,7 +91,7 @@ static int submit(int rw, pgoff_t page_off, struct page *page, | |||
90 | get_page(page); /* These pages are freed later */ | 91 | get_page(page); /* These pages are freed later */ |
91 | bio->bi_private = *bio_chain; | 92 | bio->bi_private = *bio_chain; |
92 | *bio_chain = bio; | 93 | *bio_chain = bio; |
93 | submit_bio(rw | (1 << BIO_RW_SYNC), bio); | 94 | submit_bio(bio_rw, bio); |
94 | } | 95 | } |
95 | return 0; | 96 | return 0; |
96 | } | 97 | } |
diff --git a/kernel/sched.c b/kernel/sched.c index 61245b8d0f16..7d97ff7c4478 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -6939,20 +6939,26 @@ static void free_rootdomain(struct root_domain *rd) | |||
6939 | 6939 | ||
6940 | static void rq_attach_root(struct rq *rq, struct root_domain *rd) | 6940 | static void rq_attach_root(struct rq *rq, struct root_domain *rd) |
6941 | { | 6941 | { |
6942 | struct root_domain *old_rd = NULL; | ||
6942 | unsigned long flags; | 6943 | unsigned long flags; |
6943 | 6944 | ||
6944 | spin_lock_irqsave(&rq->lock, flags); | 6945 | spin_lock_irqsave(&rq->lock, flags); |
6945 | 6946 | ||
6946 | if (rq->rd) { | 6947 | if (rq->rd) { |
6947 | struct root_domain *old_rd = rq->rd; | 6948 | old_rd = rq->rd; |
6948 | 6949 | ||
6949 | if (cpumask_test_cpu(rq->cpu, old_rd->online)) | 6950 | if (cpumask_test_cpu(rq->cpu, old_rd->online)) |
6950 | set_rq_offline(rq); | 6951 | set_rq_offline(rq); |
6951 | 6952 | ||
6952 | cpumask_clear_cpu(rq->cpu, old_rd->span); | 6953 | cpumask_clear_cpu(rq->cpu, old_rd->span); |
6953 | 6954 | ||
6954 | if (atomic_dec_and_test(&old_rd->refcount)) | 6955 | /* |
6955 | free_rootdomain(old_rd); | 6956 | * If we dont want to free the old_rt yet then |
6957 | * set old_rd to NULL to skip the freeing later | ||
6958 | * in this function: | ||
6959 | */ | ||
6960 | if (!atomic_dec_and_test(&old_rd->refcount)) | ||
6961 | old_rd = NULL; | ||
6956 | } | 6962 | } |
6957 | 6963 | ||
6958 | atomic_inc(&rd->refcount); | 6964 | atomic_inc(&rd->refcount); |
@@ -6963,6 +6969,9 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd) | |||
6963 | set_rq_online(rq); | 6969 | set_rq_online(rq); |
6964 | 6970 | ||
6965 | spin_unlock_irqrestore(&rq->lock, flags); | 6971 | spin_unlock_irqrestore(&rq->lock, flags); |
6972 | |||
6973 | if (old_rd) | ||
6974 | free_rootdomain(old_rd); | ||
6966 | } | 6975 | } |
6967 | 6976 | ||
6968 | static int __init_refok init_rootdomain(struct root_domain *rd, bool bootmem) | 6977 | static int __init_refok init_rootdomain(struct root_domain *rd, bool bootmem) |
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index e2a4ff6fc3a6..34e707e5ab87 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
@@ -52,6 +52,7 @@ config FUNCTION_TRACER | |||
52 | depends on HAVE_FUNCTION_TRACER | 52 | depends on HAVE_FUNCTION_TRACER |
53 | depends on DEBUG_KERNEL | 53 | depends on DEBUG_KERNEL |
54 | select FRAME_POINTER | 54 | select FRAME_POINTER |
55 | select KALLSYMS | ||
55 | select TRACING | 56 | select TRACING |
56 | select CONTEXT_SWITCH_TRACER | 57 | select CONTEXT_SWITCH_TRACER |
57 | help | 58 | help |
@@ -238,6 +239,7 @@ config STACK_TRACER | |||
238 | depends on DEBUG_KERNEL | 239 | depends on DEBUG_KERNEL |
239 | select FUNCTION_TRACER | 240 | select FUNCTION_TRACER |
240 | select STACKTRACE | 241 | select STACKTRACE |
242 | select KALLSYMS | ||
241 | help | 243 | help |
242 | This special tracer records the maximum stack footprint of the | 244 | This special tracer records the maximum stack footprint of the |
243 | kernel and displays it in debugfs/tracing/stack_trace. | 245 | kernel and displays it in debugfs/tracing/stack_trace. |
@@ -302,4 +304,27 @@ config FTRACE_STARTUP_TEST | |||
302 | functioning properly. It will do tests on all the configured | 304 | functioning properly. It will do tests on all the configured |
303 | tracers of ftrace. | 305 | tracers of ftrace. |
304 | 306 | ||
307 | config MMIOTRACE | ||
308 | bool "Memory mapped IO tracing" | ||
309 | depends on HAVE_MMIOTRACE_SUPPORT && DEBUG_KERNEL && PCI | ||
310 | select TRACING | ||
311 | help | ||
312 | Mmiotrace traces Memory Mapped I/O access and is meant for | ||
313 | debugging and reverse engineering. It is called from the ioremap | ||
314 | implementation and works via page faults. Tracing is disabled by | ||
315 | default and can be enabled at run-time. | ||
316 | |||
317 | See Documentation/tracers/mmiotrace.txt. | ||
318 | If you are not helping to develop drivers, say N. | ||
319 | |||
320 | config MMIOTRACE_TEST | ||
321 | tristate "Test module for mmiotrace" | ||
322 | depends on MMIOTRACE && m | ||
323 | help | ||
324 | This is a dumb module for testing mmiotrace. It is very dangerous | ||
325 | as it will write garbage to IO memory starting at a given address. | ||
326 | However, it should be safe to use on e.g. unused portion of VRAM. | ||
327 | |||
328 | Say N, unless you absolutely know what you are doing. | ||
329 | |||
305 | endmenu | 330 | endmenu |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 9a236ffe2aa4..fdf913dfc7e8 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -2033,7 +2033,7 @@ free: | |||
2033 | static int start_graph_tracing(void) | 2033 | static int start_graph_tracing(void) |
2034 | { | 2034 | { |
2035 | struct ftrace_ret_stack **ret_stack_list; | 2035 | struct ftrace_ret_stack **ret_stack_list; |
2036 | int ret; | 2036 | int ret, cpu; |
2037 | 2037 | ||
2038 | ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE * | 2038 | ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE * |
2039 | sizeof(struct ftrace_ret_stack *), | 2039 | sizeof(struct ftrace_ret_stack *), |
@@ -2042,6 +2042,10 @@ static int start_graph_tracing(void) | |||
2042 | if (!ret_stack_list) | 2042 | if (!ret_stack_list) |
2043 | return -ENOMEM; | 2043 | return -ENOMEM; |
2044 | 2044 | ||
2045 | /* The cpu_boot init_task->ret_stack will never be freed */ | ||
2046 | for_each_online_cpu(cpu) | ||
2047 | ftrace_graph_init_task(idle_task(cpu)); | ||
2048 | |||
2045 | do { | 2049 | do { |
2046 | ret = alloc_retstack_tasklist(ret_stack_list); | 2050 | ret = alloc_retstack_tasklist(ret_stack_list); |
2047 | } while (ret == -EAGAIN); | 2051 | } while (ret == -EAGAIN); |
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c index fffcb069f1dc..80e503ef6136 100644 --- a/kernel/trace/trace_mmiotrace.c +++ b/kernel/trace/trace_mmiotrace.c | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <linux/kernel.h> | 9 | #include <linux/kernel.h> |
10 | #include <linux/mmiotrace.h> | 10 | #include <linux/mmiotrace.h> |
11 | #include <linux/pci.h> | 11 | #include <linux/pci.h> |
12 | #include <asm/atomic.h> | ||
12 | 13 | ||
13 | #include "trace.h" | 14 | #include "trace.h" |
14 | 15 | ||
@@ -19,6 +20,7 @@ struct header_iter { | |||
19 | static struct trace_array *mmio_trace_array; | 20 | static struct trace_array *mmio_trace_array; |
20 | static bool overrun_detected; | 21 | static bool overrun_detected; |
21 | static unsigned long prev_overruns; | 22 | static unsigned long prev_overruns; |
23 | static atomic_t dropped_count; | ||
22 | 24 | ||
23 | static void mmio_reset_data(struct trace_array *tr) | 25 | static void mmio_reset_data(struct trace_array *tr) |
24 | { | 26 | { |
@@ -121,11 +123,11 @@ static void mmio_close(struct trace_iterator *iter) | |||
121 | 123 | ||
122 | static unsigned long count_overruns(struct trace_iterator *iter) | 124 | static unsigned long count_overruns(struct trace_iterator *iter) |
123 | { | 125 | { |
124 | unsigned long cnt = 0; | 126 | unsigned long cnt = atomic_xchg(&dropped_count, 0); |
125 | unsigned long over = ring_buffer_overruns(iter->tr->buffer); | 127 | unsigned long over = ring_buffer_overruns(iter->tr->buffer); |
126 | 128 | ||
127 | if (over > prev_overruns) | 129 | if (over > prev_overruns) |
128 | cnt = over - prev_overruns; | 130 | cnt += over - prev_overruns; |
129 | prev_overruns = over; | 131 | prev_overruns = over; |
130 | return cnt; | 132 | return cnt; |
131 | } | 133 | } |
@@ -310,8 +312,10 @@ static void __trace_mmiotrace_rw(struct trace_array *tr, | |||
310 | 312 | ||
311 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | 313 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), |
312 | &irq_flags); | 314 | &irq_flags); |
313 | if (!event) | 315 | if (!event) { |
316 | atomic_inc(&dropped_count); | ||
314 | return; | 317 | return; |
318 | } | ||
315 | entry = ring_buffer_event_data(event); | 319 | entry = ring_buffer_event_data(event); |
316 | tracing_generic_entry_update(&entry->ent, 0, preempt_count()); | 320 | tracing_generic_entry_update(&entry->ent, 0, preempt_count()); |
317 | entry->ent.type = TRACE_MMIO_RW; | 321 | entry->ent.type = TRACE_MMIO_RW; |
@@ -338,8 +342,10 @@ static void __trace_mmiotrace_map(struct trace_array *tr, | |||
338 | 342 | ||
339 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | 343 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), |
340 | &irq_flags); | 344 | &irq_flags); |
341 | if (!event) | 345 | if (!event) { |
346 | atomic_inc(&dropped_count); | ||
342 | return; | 347 | return; |
348 | } | ||
343 | entry = ring_buffer_event_data(event); | 349 | entry = ring_buffer_event_data(event); |
344 | tracing_generic_entry_update(&entry->ent, 0, preempt_count()); | 350 | tracing_generic_entry_update(&entry->ent, 0, preempt_count()); |
345 | entry->ent.type = TRACE_MMIO_MAP; | 351 | entry->ent.type = TRACE_MMIO_MAP; |
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index 88c8eb70f54a..bc8e80a86bca 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c | |||
@@ -23,10 +23,20 @@ static int trace_test_buffer_cpu(struct trace_array *tr, int cpu) | |||
23 | { | 23 | { |
24 | struct ring_buffer_event *event; | 24 | struct ring_buffer_event *event; |
25 | struct trace_entry *entry; | 25 | struct trace_entry *entry; |
26 | unsigned int loops = 0; | ||
26 | 27 | ||
27 | while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) { | 28 | while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) { |
28 | entry = ring_buffer_event_data(event); | 29 | entry = ring_buffer_event_data(event); |
29 | 30 | ||
31 | /* | ||
32 | * The ring buffer is a size of trace_buf_size, if | ||
33 | * we loop more than the size, there's something wrong | ||
34 | * with the ring buffer. | ||
35 | */ | ||
36 | if (loops++ > trace_buf_size) { | ||
37 | printk(KERN_CONT ".. bad ring buffer "); | ||
38 | goto failed; | ||
39 | } | ||
30 | if (!trace_valid_entry(entry)) { | 40 | if (!trace_valid_entry(entry)) { |
31 | printk(KERN_CONT ".. invalid entry %d ", | 41 | printk(KERN_CONT ".. invalid entry %d ", |
32 | entry->type); | 42 | entry->type); |
@@ -57,11 +67,20 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count) | |||
57 | 67 | ||
58 | cnt = ring_buffer_entries(tr->buffer); | 68 | cnt = ring_buffer_entries(tr->buffer); |
59 | 69 | ||
70 | /* | ||
71 | * The trace_test_buffer_cpu runs a while loop to consume all data. | ||
72 | * If the calling tracer is broken, and is constantly filling | ||
73 | * the buffer, this will run forever, and hard lock the box. | ||
74 | * We disable the ring buffer while we do this test to prevent | ||
75 | * a hard lock up. | ||
76 | */ | ||
77 | tracing_off(); | ||
60 | for_each_possible_cpu(cpu) { | 78 | for_each_possible_cpu(cpu) { |
61 | ret = trace_test_buffer_cpu(tr, cpu); | 79 | ret = trace_test_buffer_cpu(tr, cpu); |
62 | if (ret) | 80 | if (ret) |
63 | break; | 81 | break; |
64 | } | 82 | } |
83 | tracing_on(); | ||
65 | __raw_spin_unlock(&ftrace_max_lock); | 84 | __raw_spin_unlock(&ftrace_max_lock); |
66 | local_irq_restore(flags); | 85 | local_irq_restore(flags); |
67 | 86 | ||