aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/irq/manage.c12
-rw-r--r--kernel/lockdep.c36
-rw-r--r--kernel/lockdep_proc.c28
-rw-r--r--kernel/mutex.c10
-rw-r--r--kernel/sched.c2
5 files changed, 64 insertions, 24 deletions
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 801addda3c43..e9d1c8205a3b 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -673,6 +673,18 @@ int request_irq(unsigned int irq, irq_handler_t handler,
673 struct irq_desc *desc; 673 struct irq_desc *desc;
674 int retval; 674 int retval;
675 675
676 /*
677 * handle_IRQ_event() always ignores IRQF_DISABLED except for
678 * the _first_ irqaction (sigh). That can cause oopsing, but
679 * the behavior is classified as "will not fix" so we need to
680 * start nudging drivers away from using that idiom.
681 */
682 if ((irqflags & (IRQF_SHARED|IRQF_DISABLED))
683 == (IRQF_SHARED|IRQF_DISABLED))
684 pr_warning("IRQ %d/%s: IRQF_DISABLED is not "
685 "guaranteed on shared IRQs\n",
686 irq, devname);
687
676#ifdef CONFIG_LOCKDEP 688#ifdef CONFIG_LOCKDEP
677 /* 689 /*
678 * Lockdep wants atomic interrupt handlers: 690 * Lockdep wants atomic interrupt handlers:
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 46a404173db2..90f3fb64dbce 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -136,16 +136,16 @@ static inline struct lock_class *hlock_class(struct held_lock *hlock)
136#ifdef CONFIG_LOCK_STAT 136#ifdef CONFIG_LOCK_STAT
137static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats); 137static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats);
138 138
139static int lock_contention_point(struct lock_class *class, unsigned long ip) 139static int lock_point(unsigned long points[], unsigned long ip)
140{ 140{
141 int i; 141 int i;
142 142
143 for (i = 0; i < ARRAY_SIZE(class->contention_point); i++) { 143 for (i = 0; i < LOCKSTAT_POINTS; i++) {
144 if (class->contention_point[i] == 0) { 144 if (points[i] == 0) {
145 class->contention_point[i] = ip; 145 points[i] = ip;
146 break; 146 break;
147 } 147 }
148 if (class->contention_point[i] == ip) 148 if (points[i] == ip)
149 break; 149 break;
150 } 150 }
151 151
@@ -185,6 +185,9 @@ struct lock_class_stats lock_stats(struct lock_class *class)
185 for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++) 185 for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++)
186 stats.contention_point[i] += pcs->contention_point[i]; 186 stats.contention_point[i] += pcs->contention_point[i];
187 187
188 for (i = 0; i < ARRAY_SIZE(stats.contending_point); i++)
189 stats.contending_point[i] += pcs->contending_point[i];
190
188 lock_time_add(&pcs->read_waittime, &stats.read_waittime); 191 lock_time_add(&pcs->read_waittime, &stats.read_waittime);
189 lock_time_add(&pcs->write_waittime, &stats.write_waittime); 192 lock_time_add(&pcs->write_waittime, &stats.write_waittime);
190 193
@@ -209,6 +212,7 @@ void clear_lock_stats(struct lock_class *class)
209 memset(cpu_stats, 0, sizeof(struct lock_class_stats)); 212 memset(cpu_stats, 0, sizeof(struct lock_class_stats));
210 } 213 }
211 memset(class->contention_point, 0, sizeof(class->contention_point)); 214 memset(class->contention_point, 0, sizeof(class->contention_point));
215 memset(class->contending_point, 0, sizeof(class->contending_point));
212} 216}
213 217
214static struct lock_class_stats *get_lock_stats(struct lock_class *class) 218static struct lock_class_stats *get_lock_stats(struct lock_class *class)
@@ -576,7 +580,8 @@ static void print_lock_class_header(struct lock_class *class, int depth)
576/* 580/*
577 * printk all lock dependencies starting at <entry>: 581 * printk all lock dependencies starting at <entry>:
578 */ 582 */
579static void print_lock_dependencies(struct lock_class *class, int depth) 583static void __used
584print_lock_dependencies(struct lock_class *class, int depth)
580{ 585{
581 struct lock_list *entry; 586 struct lock_list *entry;
582 587
@@ -2999,7 +3004,7 @@ __lock_contended(struct lockdep_map *lock, unsigned long ip)
2999 struct held_lock *hlock, *prev_hlock; 3004 struct held_lock *hlock, *prev_hlock;
3000 struct lock_class_stats *stats; 3005 struct lock_class_stats *stats;
3001 unsigned int depth; 3006 unsigned int depth;
3002 int i, point; 3007 int i, contention_point, contending_point;
3003 3008
3004 depth = curr->lockdep_depth; 3009 depth = curr->lockdep_depth;
3005 if (DEBUG_LOCKS_WARN_ON(!depth)) 3010 if (DEBUG_LOCKS_WARN_ON(!depth))
@@ -3023,18 +3028,22 @@ __lock_contended(struct lockdep_map *lock, unsigned long ip)
3023found_it: 3028found_it:
3024 hlock->waittime_stamp = sched_clock(); 3029 hlock->waittime_stamp = sched_clock();
3025 3030
3026 point = lock_contention_point(hlock_class(hlock), ip); 3031 contention_point = lock_point(hlock_class(hlock)->contention_point, ip);
3032 contending_point = lock_point(hlock_class(hlock)->contending_point,
3033 lock->ip);
3027 3034
3028 stats = get_lock_stats(hlock_class(hlock)); 3035 stats = get_lock_stats(hlock_class(hlock));
3029 if (point < ARRAY_SIZE(stats->contention_point)) 3036 if (contention_point < LOCKSTAT_POINTS)
3030 stats->contention_point[point]++; 3037 stats->contention_point[contention_point]++;
3038 if (contending_point < LOCKSTAT_POINTS)
3039 stats->contending_point[contending_point]++;
3031 if (lock->cpu != smp_processor_id()) 3040 if (lock->cpu != smp_processor_id())
3032 stats->bounces[bounce_contended + !!hlock->read]++; 3041 stats->bounces[bounce_contended + !!hlock->read]++;
3033 put_lock_stats(stats); 3042 put_lock_stats(stats);
3034} 3043}
3035 3044
3036static void 3045static void
3037__lock_acquired(struct lockdep_map *lock) 3046__lock_acquired(struct lockdep_map *lock, unsigned long ip)
3038{ 3047{
3039 struct task_struct *curr = current; 3048 struct task_struct *curr = current;
3040 struct held_lock *hlock, *prev_hlock; 3049 struct held_lock *hlock, *prev_hlock;
@@ -3083,6 +3092,7 @@ found_it:
3083 put_lock_stats(stats); 3092 put_lock_stats(stats);
3084 3093
3085 lock->cpu = cpu; 3094 lock->cpu = cpu;
3095 lock->ip = ip;
3086} 3096}
3087 3097
3088void lock_contended(struct lockdep_map *lock, unsigned long ip) 3098void lock_contended(struct lockdep_map *lock, unsigned long ip)
@@ -3104,7 +3114,7 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip)
3104} 3114}
3105EXPORT_SYMBOL_GPL(lock_contended); 3115EXPORT_SYMBOL_GPL(lock_contended);
3106 3116
3107void lock_acquired(struct lockdep_map *lock) 3117void lock_acquired(struct lockdep_map *lock, unsigned long ip)
3108{ 3118{
3109 unsigned long flags; 3119 unsigned long flags;
3110 3120
@@ -3117,7 +3127,7 @@ void lock_acquired(struct lockdep_map *lock)
3117 raw_local_irq_save(flags); 3127 raw_local_irq_save(flags);
3118 check_flags(flags); 3128 check_flags(flags);
3119 current->lockdep_recursion = 1; 3129 current->lockdep_recursion = 1;
3120 __lock_acquired(lock); 3130 __lock_acquired(lock, ip);
3121 current->lockdep_recursion = 0; 3131 current->lockdep_recursion = 0;
3122 raw_local_irq_restore(flags); 3132 raw_local_irq_restore(flags);
3123} 3133}
diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
index 20dbcbf9c7dd..13716b813896 100644
--- a/kernel/lockdep_proc.c
+++ b/kernel/lockdep_proc.c
@@ -470,11 +470,12 @@ static void seq_line(struct seq_file *m, char c, int offset, int length)
470 470
471static void snprint_time(char *buf, size_t bufsiz, s64 nr) 471static void snprint_time(char *buf, size_t bufsiz, s64 nr)
472{ 472{
473 unsigned long rem; 473 s64 div;
474 s32 rem;
474 475
475 nr += 5; /* for display rounding */ 476 nr += 5; /* for display rounding */
476 rem = do_div(nr, 1000); /* XXX: do_div_signed */ 477 div = div_s64_rem(nr, 1000, &rem);
477 snprintf(buf, bufsiz, "%lld.%02d", (long long)nr, (int)rem/10); 478 snprintf(buf, bufsiz, "%lld.%02d", (long long)div, (int)rem/10);
478} 479}
479 480
480static void seq_time(struct seq_file *m, s64 time) 481static void seq_time(struct seq_file *m, s64 time)
@@ -556,7 +557,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
556 if (stats->read_holdtime.nr) 557 if (stats->read_holdtime.nr)
557 namelen += 2; 558 namelen += 2;
558 559
559 for (i = 0; i < ARRAY_SIZE(class->contention_point); i++) { 560 for (i = 0; i < LOCKSTAT_POINTS; i++) {
560 char sym[KSYM_SYMBOL_LEN]; 561 char sym[KSYM_SYMBOL_LEN];
561 char ip[32]; 562 char ip[32];
562 563
@@ -573,6 +574,23 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
573 stats->contention_point[i], 574 stats->contention_point[i],
574 ip, sym); 575 ip, sym);
575 } 576 }
577 for (i = 0; i < LOCKSTAT_POINTS; i++) {
578 char sym[KSYM_SYMBOL_LEN];
579 char ip[32];
580
581 if (class->contending_point[i] == 0)
582 break;
583
584 if (!i)
585 seq_line(m, '-', 40-namelen, namelen);
586
587 sprint_symbol(sym, class->contending_point[i]);
588 snprintf(ip, sizeof(ip), "[<%p>]",
589 (void *)class->contending_point[i]);
590 seq_printf(m, "%40s %14lu %29s %s\n", name,
591 stats->contending_point[i],
592 ip, sym);
593 }
576 if (i) { 594 if (i) {
577 seq_puts(m, "\n"); 595 seq_puts(m, "\n");
578 seq_line(m, '.', 0, 40 + 1 + 10 * (14 + 1)); 596 seq_line(m, '.', 0, 40 + 1 + 10 * (14 + 1));
@@ -582,7 +600,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
582 600
583static void seq_header(struct seq_file *m) 601static void seq_header(struct seq_file *m)
584{ 602{
585 seq_printf(m, "lock_stat version 0.2\n"); 603 seq_printf(m, "lock_stat version 0.3\n");
586 seq_line(m, '-', 0, 40 + 1 + 10 * (14 + 1)); 604 seq_line(m, '-', 0, 40 + 1 + 10 * (14 + 1));
587 seq_printf(m, "%40s %14s %14s %14s %14s %14s %14s %14s %14s " 605 seq_printf(m, "%40s %14s %14s %14s %14s %14s %14s %14s %14s "
588 "%14s %14s\n", 606 "%14s %14s\n",
diff --git a/kernel/mutex.c b/kernel/mutex.c
index 12c779dc65d4..4f45d4b658ef 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -59,7 +59,7 @@ EXPORT_SYMBOL(__mutex_init);
59 * We also put the fastpath first in the kernel image, to make sure the 59 * We also put the fastpath first in the kernel image, to make sure the
60 * branch is predicted by the CPU as default-untaken. 60 * branch is predicted by the CPU as default-untaken.
61 */ 61 */
62static void noinline __sched 62static __used noinline void __sched
63__mutex_lock_slowpath(atomic_t *lock_count); 63__mutex_lock_slowpath(atomic_t *lock_count);
64 64
65/*** 65/***
@@ -96,7 +96,7 @@ void inline __sched mutex_lock(struct mutex *lock)
96EXPORT_SYMBOL(mutex_lock); 96EXPORT_SYMBOL(mutex_lock);
97#endif 97#endif
98 98
99static noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count); 99static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
100 100
101/*** 101/***
102 * mutex_unlock - release the mutex 102 * mutex_unlock - release the mutex
@@ -184,7 +184,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
184 } 184 }
185 185
186done: 186done:
187 lock_acquired(&lock->dep_map); 187 lock_acquired(&lock->dep_map, ip);
188 /* got the lock - rejoice! */ 188 /* got the lock - rejoice! */
189 mutex_remove_waiter(lock, &waiter, task_thread_info(task)); 189 mutex_remove_waiter(lock, &waiter, task_thread_info(task));
190 debug_mutex_set_owner(lock, task_thread_info(task)); 190 debug_mutex_set_owner(lock, task_thread_info(task));
@@ -268,7 +268,7 @@ __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
268/* 268/*
269 * Release the lock, slowpath: 269 * Release the lock, slowpath:
270 */ 270 */
271static noinline void 271static __used noinline void
272__mutex_unlock_slowpath(atomic_t *lock_count) 272__mutex_unlock_slowpath(atomic_t *lock_count)
273{ 273{
274 __mutex_unlock_common_slowpath(lock_count, 1); 274 __mutex_unlock_common_slowpath(lock_count, 1);
@@ -313,7 +313,7 @@ int __sched mutex_lock_killable(struct mutex *lock)
313} 313}
314EXPORT_SYMBOL(mutex_lock_killable); 314EXPORT_SYMBOL(mutex_lock_killable);
315 315
316static noinline void __sched 316static __used noinline void __sched
317__mutex_lock_slowpath(atomic_t *lock_count) 317__mutex_lock_slowpath(atomic_t *lock_count)
318{ 318{
319 struct mutex *lock = container_of(lock_count, struct mutex, count); 319 struct mutex *lock = container_of(lock_count, struct mutex, count);
diff --git a/kernel/sched.c b/kernel/sched.c
index b7480fb5c3dc..1b8f8c3aecc4 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4339,7 +4339,7 @@ void __kprobes sub_preempt_count(int val)
4339 /* 4339 /*
4340 * Underflow? 4340 * Underflow?
4341 */ 4341 */
4342 if (DEBUG_LOCKS_WARN_ON(val > preempt_count())) 4342 if (DEBUG_LOCKS_WARN_ON(val > preempt_count() - (!!kernel_locked())))
4343 return; 4343 return;
4344 /* 4344 /*
4345 * Is the spinlock portion underflowing? 4345 * Is the spinlock portion underflowing?