aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/dma-coherent.c10
-rw-r--r--kernel/lockdep.c295
-rw-r--r--kernel/lockdep_internals.h6
-rw-r--r--kernel/lockdep_proc.c37
-rw-r--r--kernel/posix-timers.c19
-rw-r--r--kernel/sched.c21
-rw-r--r--kernel/sched_rt.c8
-rw-r--r--kernel/signal.c1
-rw-r--r--kernel/smp.c54
-rw-r--r--kernel/spinlock.c11
-rw-r--r--kernel/workqueue.c24
11 files changed, 356 insertions, 130 deletions
diff --git a/kernel/dma-coherent.c b/kernel/dma-coherent.c
index 91e96950cd52..c1d4d5b4c61c 100644
--- a/kernel/dma-coherent.c
+++ b/kernel/dma-coherent.c
@@ -92,7 +92,7 @@ void *dma_mark_declared_memory_occupied(struct device *dev,
92EXPORT_SYMBOL(dma_mark_declared_memory_occupied); 92EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
93 93
94/** 94/**
95 * Try to allocate memory from the per-device coherent area. 95 * dma_alloc_from_coherent() - try to allocate memory from the per-device coherent area
96 * 96 *
97 * @dev: device from which we allocate memory 97 * @dev: device from which we allocate memory
98 * @size: size of requested memory area 98 * @size: size of requested memory area
@@ -100,11 +100,11 @@ EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
100 * @ret: This pointer will be filled with the virtual address 100 * @ret: This pointer will be filled with the virtual address
101 * to allocated area. 101 * to allocated area.
102 * 102 *
103 * This function should be only called from per-arch %dma_alloc_coherent() 103 * This function should be only called from per-arch dma_alloc_coherent()
104 * to support allocation from per-device coherent memory pools. 104 * to support allocation from per-device coherent memory pools.
105 * 105 *
106 * Returns 0 if dma_alloc_coherent should continue with allocating from 106 * Returns 0 if dma_alloc_coherent should continue with allocating from
107 * generic memory areas, or !0 if dma_alloc_coherent should return %ret. 107 * generic memory areas, or !0 if dma_alloc_coherent should return @ret.
108 */ 108 */
109int dma_alloc_from_coherent(struct device *dev, ssize_t size, 109int dma_alloc_from_coherent(struct device *dev, ssize_t size,
110 dma_addr_t *dma_handle, void **ret) 110 dma_addr_t *dma_handle, void **ret)
@@ -126,7 +126,7 @@ int dma_alloc_from_coherent(struct device *dev, ssize_t size,
126} 126}
127 127
128/** 128/**
129 * Try to free the memory allocated from per-device coherent memory pool. 129 * dma_release_from_coherent() - try to free the memory allocated from per-device coherent memory pool
130 * @dev: device from which the memory was allocated 130 * @dev: device from which the memory was allocated
131 * @order: the order of pages allocated 131 * @order: the order of pages allocated
132 * @vaddr: virtual address of allocated pages 132 * @vaddr: virtual address of allocated pages
@@ -135,7 +135,7 @@ int dma_alloc_from_coherent(struct device *dev, ssize_t size,
135 * coherent memory pool and if so, releases that memory. 135 * coherent memory pool and if so, releases that memory.
136 * 136 *
137 * Returns 1 if we correctly released the memory, or 0 if 137 * Returns 1 if we correctly released the memory, or 0 if
138 * %dma_release_coherent() should proceed with releasing memory from 138 * dma_release_coherent() should proceed with releasing memory from
139 * generic pools. 139 * generic pools.
140 */ 140 */
141int dma_release_from_coherent(struct device *dev, int order, void *vaddr) 141int dma_release_from_coherent(struct device *dev, int order, void *vaddr)
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index d38a64362973..1aa91fd6b06e 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -124,6 +124,15 @@ static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
124unsigned long nr_lock_classes; 124unsigned long nr_lock_classes;
125static struct lock_class lock_classes[MAX_LOCKDEP_KEYS]; 125static struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
126 126
127static inline struct lock_class *hlock_class(struct held_lock *hlock)
128{
129 if (!hlock->class_idx) {
130 DEBUG_LOCKS_WARN_ON(1);
131 return NULL;
132 }
133 return lock_classes + hlock->class_idx - 1;
134}
135
127#ifdef CONFIG_LOCK_STAT 136#ifdef CONFIG_LOCK_STAT
128static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats); 137static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats);
129 138
@@ -222,7 +231,7 @@ static void lock_release_holdtime(struct held_lock *hlock)
222 231
223 holdtime = sched_clock() - hlock->holdtime_stamp; 232 holdtime = sched_clock() - hlock->holdtime_stamp;
224 233
225 stats = get_lock_stats(hlock->class); 234 stats = get_lock_stats(hlock_class(hlock));
226 if (hlock->read) 235 if (hlock->read)
227 lock_time_inc(&stats->read_holdtime, holdtime); 236 lock_time_inc(&stats->read_holdtime, holdtime);
228 else 237 else
@@ -372,6 +381,19 @@ unsigned int nr_process_chains;
372unsigned int max_lockdep_depth; 381unsigned int max_lockdep_depth;
373unsigned int max_recursion_depth; 382unsigned int max_recursion_depth;
374 383
384static unsigned int lockdep_dependency_gen_id;
385
386static bool lockdep_dependency_visit(struct lock_class *source,
387 unsigned int depth)
388{
389 if (!depth)
390 lockdep_dependency_gen_id++;
391 if (source->dep_gen_id == lockdep_dependency_gen_id)
392 return true;
393 source->dep_gen_id = lockdep_dependency_gen_id;
394 return false;
395}
396
375#ifdef CONFIG_DEBUG_LOCKDEP 397#ifdef CONFIG_DEBUG_LOCKDEP
376/* 398/*
377 * We cannot printk in early bootup code. Not even early_printk() 399 * We cannot printk in early bootup code. Not even early_printk()
@@ -505,7 +527,7 @@ static void print_lockdep_cache(struct lockdep_map *lock)
505 527
506static void print_lock(struct held_lock *hlock) 528static void print_lock(struct held_lock *hlock)
507{ 529{
508 print_lock_name(hlock->class); 530 print_lock_name(hlock_class(hlock));
509 printk(", at: "); 531 printk(", at: ");
510 print_ip_sym(hlock->acquire_ip); 532 print_ip_sym(hlock->acquire_ip);
511} 533}
@@ -558,6 +580,9 @@ static void print_lock_dependencies(struct lock_class *class, int depth)
558{ 580{
559 struct lock_list *entry; 581 struct lock_list *entry;
560 582
583 if (lockdep_dependency_visit(class, depth))
584 return;
585
561 if (DEBUG_LOCKS_WARN_ON(depth >= 20)) 586 if (DEBUG_LOCKS_WARN_ON(depth >= 20))
562 return; 587 return;
563 588
@@ -932,7 +957,7 @@ static noinline int print_circular_bug_tail(void)
932 if (debug_locks_silent) 957 if (debug_locks_silent)
933 return 0; 958 return 0;
934 959
935 this.class = check_source->class; 960 this.class = hlock_class(check_source);
936 if (!save_trace(&this.trace)) 961 if (!save_trace(&this.trace))
937 return 0; 962 return 0;
938 963
@@ -959,6 +984,67 @@ static int noinline print_infinite_recursion_bug(void)
959 return 0; 984 return 0;
960} 985}
961 986
987unsigned long __lockdep_count_forward_deps(struct lock_class *class,
988 unsigned int depth)
989{
990 struct lock_list *entry;
991 unsigned long ret = 1;
992
993 if (lockdep_dependency_visit(class, depth))
994 return 0;
995
996 /*
997 * Recurse this class's dependency list:
998 */
999 list_for_each_entry(entry, &class->locks_after, entry)
1000 ret += __lockdep_count_forward_deps(entry->class, depth + 1);
1001
1002 return ret;
1003}
1004
1005unsigned long lockdep_count_forward_deps(struct lock_class *class)
1006{
1007 unsigned long ret, flags;
1008
1009 local_irq_save(flags);
1010 __raw_spin_lock(&lockdep_lock);
1011 ret = __lockdep_count_forward_deps(class, 0);
1012 __raw_spin_unlock(&lockdep_lock);
1013 local_irq_restore(flags);
1014
1015 return ret;
1016}
1017
1018unsigned long __lockdep_count_backward_deps(struct lock_class *class,
1019 unsigned int depth)
1020{
1021 struct lock_list *entry;
1022 unsigned long ret = 1;
1023
1024 if (lockdep_dependency_visit(class, depth))
1025 return 0;
1026 /*
1027 * Recurse this class's dependency list:
1028 */
1029 list_for_each_entry(entry, &class->locks_before, entry)
1030 ret += __lockdep_count_backward_deps(entry->class, depth + 1);
1031
1032 return ret;
1033}
1034
1035unsigned long lockdep_count_backward_deps(struct lock_class *class)
1036{
1037 unsigned long ret, flags;
1038
1039 local_irq_save(flags);
1040 __raw_spin_lock(&lockdep_lock);
1041 ret = __lockdep_count_backward_deps(class, 0);
1042 __raw_spin_unlock(&lockdep_lock);
1043 local_irq_restore(flags);
1044
1045 return ret;
1046}
1047
962/* 1048/*
963 * Prove that the dependency graph starting at <entry> can not 1049 * Prove that the dependency graph starting at <entry> can not
964 * lead to <target>. Print an error and return 0 if it does. 1050 * lead to <target>. Print an error and return 0 if it does.
@@ -968,6 +1054,9 @@ check_noncircular(struct lock_class *source, unsigned int depth)
968{ 1054{
969 struct lock_list *entry; 1055 struct lock_list *entry;
970 1056
1057 if (lockdep_dependency_visit(source, depth))
1058 return 1;
1059
971 debug_atomic_inc(&nr_cyclic_check_recursions); 1060 debug_atomic_inc(&nr_cyclic_check_recursions);
972 if (depth > max_recursion_depth) 1061 if (depth > max_recursion_depth)
973 max_recursion_depth = depth; 1062 max_recursion_depth = depth;
@@ -977,7 +1066,7 @@ check_noncircular(struct lock_class *source, unsigned int depth)
977 * Check this lock's dependency list: 1066 * Check this lock's dependency list:
978 */ 1067 */
979 list_for_each_entry(entry, &source->locks_after, entry) { 1068 list_for_each_entry(entry, &source->locks_after, entry) {
980 if (entry->class == check_target->class) 1069 if (entry->class == hlock_class(check_target))
981 return print_circular_bug_header(entry, depth+1); 1070 return print_circular_bug_header(entry, depth+1);
982 debug_atomic_inc(&nr_cyclic_checks); 1071 debug_atomic_inc(&nr_cyclic_checks);
983 if (!check_noncircular(entry->class, depth+1)) 1072 if (!check_noncircular(entry->class, depth+1))
@@ -1011,6 +1100,9 @@ find_usage_forwards(struct lock_class *source, unsigned int depth)
1011 struct lock_list *entry; 1100 struct lock_list *entry;
1012 int ret; 1101 int ret;
1013 1102
1103 if (lockdep_dependency_visit(source, depth))
1104 return 1;
1105
1014 if (depth > max_recursion_depth) 1106 if (depth > max_recursion_depth)
1015 max_recursion_depth = depth; 1107 max_recursion_depth = depth;
1016 if (depth >= RECURSION_LIMIT) 1108 if (depth >= RECURSION_LIMIT)
@@ -1050,6 +1142,9 @@ find_usage_backwards(struct lock_class *source, unsigned int depth)
1050 struct lock_list *entry; 1142 struct lock_list *entry;
1051 int ret; 1143 int ret;
1052 1144
1145 if (lockdep_dependency_visit(source, depth))
1146 return 1;
1147
1053 if (!__raw_spin_is_locked(&lockdep_lock)) 1148 if (!__raw_spin_is_locked(&lockdep_lock))
1054 return DEBUG_LOCKS_WARN_ON(1); 1149 return DEBUG_LOCKS_WARN_ON(1);
1055 1150
@@ -1064,6 +1159,11 @@ find_usage_backwards(struct lock_class *source, unsigned int depth)
1064 return 2; 1159 return 2;
1065 } 1160 }
1066 1161
1162 if (!source && debug_locks_off_graph_unlock()) {
1163 WARN_ON(1);
1164 return 0;
1165 }
1166
1067 /* 1167 /*
1068 * Check this lock's dependency list: 1168 * Check this lock's dependency list:
1069 */ 1169 */
@@ -1103,9 +1203,9 @@ print_bad_irq_dependency(struct task_struct *curr,
1103 printk("\nand this task is already holding:\n"); 1203 printk("\nand this task is already holding:\n");
1104 print_lock(prev); 1204 print_lock(prev);
1105 printk("which would create a new lock dependency:\n"); 1205 printk("which would create a new lock dependency:\n");
1106 print_lock_name(prev->class); 1206 print_lock_name(hlock_class(prev));
1107 printk(" ->"); 1207 printk(" ->");
1108 print_lock_name(next->class); 1208 print_lock_name(hlock_class(next));
1109 printk("\n"); 1209 printk("\n");
1110 1210
1111 printk("\nbut this new dependency connects a %s-irq-safe lock:\n", 1211 printk("\nbut this new dependency connects a %s-irq-safe lock:\n",
@@ -1146,12 +1246,12 @@ check_usage(struct task_struct *curr, struct held_lock *prev,
1146 1246
1147 find_usage_bit = bit_backwards; 1247 find_usage_bit = bit_backwards;
1148 /* fills in <backwards_match> */ 1248 /* fills in <backwards_match> */
1149 ret = find_usage_backwards(prev->class, 0); 1249 ret = find_usage_backwards(hlock_class(prev), 0);
1150 if (!ret || ret == 1) 1250 if (!ret || ret == 1)
1151 return ret; 1251 return ret;
1152 1252
1153 find_usage_bit = bit_forwards; 1253 find_usage_bit = bit_forwards;
1154 ret = find_usage_forwards(next->class, 0); 1254 ret = find_usage_forwards(hlock_class(next), 0);
1155 if (!ret || ret == 1) 1255 if (!ret || ret == 1)
1156 return ret; 1256 return ret;
1157 /* ret == 2 */ 1257 /* ret == 2 */
@@ -1272,18 +1372,32 @@ check_deadlock(struct task_struct *curr, struct held_lock *next,
1272 struct lockdep_map *next_instance, int read) 1372 struct lockdep_map *next_instance, int read)
1273{ 1373{
1274 struct held_lock *prev; 1374 struct held_lock *prev;
1375 struct held_lock *nest = NULL;
1275 int i; 1376 int i;
1276 1377
1277 for (i = 0; i < curr->lockdep_depth; i++) { 1378 for (i = 0; i < curr->lockdep_depth; i++) {
1278 prev = curr->held_locks + i; 1379 prev = curr->held_locks + i;
1279 if (prev->class != next->class) 1380
1381 if (prev->instance == next->nest_lock)
1382 nest = prev;
1383
1384 if (hlock_class(prev) != hlock_class(next))
1280 continue; 1385 continue;
1386
1281 /* 1387 /*
1282 * Allow read-after-read recursion of the same 1388 * Allow read-after-read recursion of the same
1283 * lock class (i.e. read_lock(lock)+read_lock(lock)): 1389 * lock class (i.e. read_lock(lock)+read_lock(lock)):
1284 */ 1390 */
1285 if ((read == 2) && prev->read) 1391 if ((read == 2) && prev->read)
1286 return 2; 1392 return 2;
1393
1394 /*
1395 * We're holding the nest_lock, which serializes this lock's
1396 * nesting behaviour.
1397 */
1398 if (nest)
1399 return 2;
1400
1287 return print_deadlock_bug(curr, prev, next); 1401 return print_deadlock_bug(curr, prev, next);
1288 } 1402 }
1289 return 1; 1403 return 1;
@@ -1329,7 +1443,7 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
1329 */ 1443 */
1330 check_source = next; 1444 check_source = next;
1331 check_target = prev; 1445 check_target = prev;
1332 if (!(check_noncircular(next->class, 0))) 1446 if (!(check_noncircular(hlock_class(next), 0)))
1333 return print_circular_bug_tail(); 1447 return print_circular_bug_tail();
1334 1448
1335 if (!check_prev_add_irq(curr, prev, next)) 1449 if (!check_prev_add_irq(curr, prev, next))
@@ -1353,8 +1467,8 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
1353 * chains - the second one will be new, but L1 already has 1467 * chains - the second one will be new, but L1 already has
1354 * L2 added to its dependency list, due to the first chain.) 1468 * L2 added to its dependency list, due to the first chain.)
1355 */ 1469 */
1356 list_for_each_entry(entry, &prev->class->locks_after, entry) { 1470 list_for_each_entry(entry, &hlock_class(prev)->locks_after, entry) {
1357 if (entry->class == next->class) { 1471 if (entry->class == hlock_class(next)) {
1358 if (distance == 1) 1472 if (distance == 1)
1359 entry->distance = 1; 1473 entry->distance = 1;
1360 return 2; 1474 return 2;
@@ -1365,26 +1479,28 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
1365 * Ok, all validations passed, add the new lock 1479 * Ok, all validations passed, add the new lock
1366 * to the previous lock's dependency list: 1480 * to the previous lock's dependency list:
1367 */ 1481 */
1368 ret = add_lock_to_list(prev->class, next->class, 1482 ret = add_lock_to_list(hlock_class(prev), hlock_class(next),
1369 &prev->class->locks_after, next->acquire_ip, distance); 1483 &hlock_class(prev)->locks_after,
1484 next->acquire_ip, distance);
1370 1485
1371 if (!ret) 1486 if (!ret)
1372 return 0; 1487 return 0;
1373 1488
1374 ret = add_lock_to_list(next->class, prev->class, 1489 ret = add_lock_to_list(hlock_class(next), hlock_class(prev),
1375 &next->class->locks_before, next->acquire_ip, distance); 1490 &hlock_class(next)->locks_before,
1491 next->acquire_ip, distance);
1376 if (!ret) 1492 if (!ret)
1377 return 0; 1493 return 0;
1378 1494
1379 /* 1495 /*
1380 * Debugging printouts: 1496 * Debugging printouts:
1381 */ 1497 */
1382 if (verbose(prev->class) || verbose(next->class)) { 1498 if (verbose(hlock_class(prev)) || verbose(hlock_class(next))) {
1383 graph_unlock(); 1499 graph_unlock();
1384 printk("\n new dependency: "); 1500 printk("\n new dependency: ");
1385 print_lock_name(prev->class); 1501 print_lock_name(hlock_class(prev));
1386 printk(" => "); 1502 printk(" => ");
1387 print_lock_name(next->class); 1503 print_lock_name(hlock_class(next));
1388 printk("\n"); 1504 printk("\n");
1389 dump_stack(); 1505 dump_stack();
1390 return graph_lock(); 1506 return graph_lock();
@@ -1481,7 +1597,7 @@ static inline int lookup_chain_cache(struct task_struct *curr,
1481 struct held_lock *hlock, 1597 struct held_lock *hlock,
1482 u64 chain_key) 1598 u64 chain_key)
1483{ 1599{
1484 struct lock_class *class = hlock->class; 1600 struct lock_class *class = hlock_class(hlock);
1485 struct list_head *hash_head = chainhashentry(chain_key); 1601 struct list_head *hash_head = chainhashentry(chain_key);
1486 struct lock_chain *chain; 1602 struct lock_chain *chain;
1487 struct held_lock *hlock_curr, *hlock_next; 1603 struct held_lock *hlock_curr, *hlock_next;
@@ -1554,7 +1670,7 @@ cache_hit:
1554 if (likely(cn + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) { 1670 if (likely(cn + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) {
1555 chain->base = cn; 1671 chain->base = cn;
1556 for (j = 0; j < chain->depth - 1; j++, i++) { 1672 for (j = 0; j < chain->depth - 1; j++, i++) {
1557 int lock_id = curr->held_locks[i].class - lock_classes; 1673 int lock_id = curr->held_locks[i].class_idx - 1;
1558 chain_hlocks[chain->base + j] = lock_id; 1674 chain_hlocks[chain->base + j] = lock_id;
1559 } 1675 }
1560 chain_hlocks[chain->base + j] = class - lock_classes; 1676 chain_hlocks[chain->base + j] = class - lock_classes;
@@ -1650,7 +1766,7 @@ static void check_chain_key(struct task_struct *curr)
1650 WARN_ON(1); 1766 WARN_ON(1);
1651 return; 1767 return;
1652 } 1768 }
1653 id = hlock->class - lock_classes; 1769 id = hlock->class_idx - 1;
1654 if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS)) 1770 if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))
1655 return; 1771 return;
1656 1772
@@ -1695,7 +1811,7 @@ print_usage_bug(struct task_struct *curr, struct held_lock *this,
1695 print_lock(this); 1811 print_lock(this);
1696 1812
1697 printk("{%s} state was registered at:\n", usage_str[prev_bit]); 1813 printk("{%s} state was registered at:\n", usage_str[prev_bit]);
1698 print_stack_trace(this->class->usage_traces + prev_bit, 1); 1814 print_stack_trace(hlock_class(this)->usage_traces + prev_bit, 1);
1699 1815
1700 print_irqtrace_events(curr); 1816 print_irqtrace_events(curr);
1701 printk("\nother info that might help us debug this:\n"); 1817 printk("\nother info that might help us debug this:\n");
@@ -1714,7 +1830,7 @@ static inline int
1714valid_state(struct task_struct *curr, struct held_lock *this, 1830valid_state(struct task_struct *curr, struct held_lock *this,
1715 enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit) 1831 enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit)
1716{ 1832{
1717 if (unlikely(this->class->usage_mask & (1 << bad_bit))) 1833 if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit)))
1718 return print_usage_bug(curr, this, bad_bit, new_bit); 1834 return print_usage_bug(curr, this, bad_bit, new_bit);
1719 return 1; 1835 return 1;
1720} 1836}
@@ -1753,7 +1869,7 @@ print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other,
1753 lockdep_print_held_locks(curr); 1869 lockdep_print_held_locks(curr);
1754 1870
1755 printk("\nthe first lock's dependencies:\n"); 1871 printk("\nthe first lock's dependencies:\n");
1756 print_lock_dependencies(this->class, 0); 1872 print_lock_dependencies(hlock_class(this), 0);
1757 1873
1758 printk("\nthe second lock's dependencies:\n"); 1874 printk("\nthe second lock's dependencies:\n");
1759 print_lock_dependencies(other, 0); 1875 print_lock_dependencies(other, 0);
@@ -1776,7 +1892,7 @@ check_usage_forwards(struct task_struct *curr, struct held_lock *this,
1776 1892
1777 find_usage_bit = bit; 1893 find_usage_bit = bit;
1778 /* fills in <forwards_match> */ 1894 /* fills in <forwards_match> */
1779 ret = find_usage_forwards(this->class, 0); 1895 ret = find_usage_forwards(hlock_class(this), 0);
1780 if (!ret || ret == 1) 1896 if (!ret || ret == 1)
1781 return ret; 1897 return ret;
1782 1898
@@ -1795,7 +1911,7 @@ check_usage_backwards(struct task_struct *curr, struct held_lock *this,
1795 1911
1796 find_usage_bit = bit; 1912 find_usage_bit = bit;
1797 /* fills in <backwards_match> */ 1913 /* fills in <backwards_match> */
1798 ret = find_usage_backwards(this->class, 0); 1914 ret = find_usage_backwards(hlock_class(this), 0);
1799 if (!ret || ret == 1) 1915 if (!ret || ret == 1)
1800 return ret; 1916 return ret;
1801 1917
@@ -1861,7 +1977,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
1861 LOCK_ENABLED_HARDIRQS_READ, "hard-read")) 1977 LOCK_ENABLED_HARDIRQS_READ, "hard-read"))
1862 return 0; 1978 return 0;
1863#endif 1979#endif
1864 if (hardirq_verbose(this->class)) 1980 if (hardirq_verbose(hlock_class(this)))
1865 ret = 2; 1981 ret = 2;
1866 break; 1982 break;
1867 case LOCK_USED_IN_SOFTIRQ: 1983 case LOCK_USED_IN_SOFTIRQ:
@@ -1886,7 +2002,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
1886 LOCK_ENABLED_SOFTIRQS_READ, "soft-read")) 2002 LOCK_ENABLED_SOFTIRQS_READ, "soft-read"))
1887 return 0; 2003 return 0;
1888#endif 2004#endif
1889 if (softirq_verbose(this->class)) 2005 if (softirq_verbose(hlock_class(this)))
1890 ret = 2; 2006 ret = 2;
1891 break; 2007 break;
1892 case LOCK_USED_IN_HARDIRQ_READ: 2008 case LOCK_USED_IN_HARDIRQ_READ:
@@ -1899,7 +2015,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
1899 if (!check_usage_forwards(curr, this, 2015 if (!check_usage_forwards(curr, this,
1900 LOCK_ENABLED_HARDIRQS, "hard")) 2016 LOCK_ENABLED_HARDIRQS, "hard"))
1901 return 0; 2017 return 0;
1902 if (hardirq_verbose(this->class)) 2018 if (hardirq_verbose(hlock_class(this)))
1903 ret = 2; 2019 ret = 2;
1904 break; 2020 break;
1905 case LOCK_USED_IN_SOFTIRQ_READ: 2021 case LOCK_USED_IN_SOFTIRQ_READ:
@@ -1912,7 +2028,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
1912 if (!check_usage_forwards(curr, this, 2028 if (!check_usage_forwards(curr, this,
1913 LOCK_ENABLED_SOFTIRQS, "soft")) 2029 LOCK_ENABLED_SOFTIRQS, "soft"))
1914 return 0; 2030 return 0;
1915 if (softirq_verbose(this->class)) 2031 if (softirq_verbose(hlock_class(this)))
1916 ret = 2; 2032 ret = 2;
1917 break; 2033 break;
1918 case LOCK_ENABLED_HARDIRQS: 2034 case LOCK_ENABLED_HARDIRQS:
@@ -1938,7 +2054,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
1938 LOCK_USED_IN_HARDIRQ_READ, "hard-read")) 2054 LOCK_USED_IN_HARDIRQ_READ, "hard-read"))
1939 return 0; 2055 return 0;
1940#endif 2056#endif
1941 if (hardirq_verbose(this->class)) 2057 if (hardirq_verbose(hlock_class(this)))
1942 ret = 2; 2058 ret = 2;
1943 break; 2059 break;
1944 case LOCK_ENABLED_SOFTIRQS: 2060 case LOCK_ENABLED_SOFTIRQS:
@@ -1964,7 +2080,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
1964 LOCK_USED_IN_SOFTIRQ_READ, "soft-read")) 2080 LOCK_USED_IN_SOFTIRQ_READ, "soft-read"))
1965 return 0; 2081 return 0;
1966#endif 2082#endif
1967 if (softirq_verbose(this->class)) 2083 if (softirq_verbose(hlock_class(this)))
1968 ret = 2; 2084 ret = 2;
1969 break; 2085 break;
1970 case LOCK_ENABLED_HARDIRQS_READ: 2086 case LOCK_ENABLED_HARDIRQS_READ:
@@ -1979,7 +2095,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
1979 LOCK_USED_IN_HARDIRQ, "hard")) 2095 LOCK_USED_IN_HARDIRQ, "hard"))
1980 return 0; 2096 return 0;
1981#endif 2097#endif
1982 if (hardirq_verbose(this->class)) 2098 if (hardirq_verbose(hlock_class(this)))
1983 ret = 2; 2099 ret = 2;
1984 break; 2100 break;
1985 case LOCK_ENABLED_SOFTIRQS_READ: 2101 case LOCK_ENABLED_SOFTIRQS_READ:
@@ -1994,7 +2110,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
1994 LOCK_USED_IN_SOFTIRQ, "soft")) 2110 LOCK_USED_IN_SOFTIRQ, "soft"))
1995 return 0; 2111 return 0;
1996#endif 2112#endif
1997 if (softirq_verbose(this->class)) 2113 if (softirq_verbose(hlock_class(this)))
1998 ret = 2; 2114 ret = 2;
1999 break; 2115 break;
2000 default: 2116 default:
@@ -2310,7 +2426,7 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
2310 * If already set then do not dirty the cacheline, 2426 * If already set then do not dirty the cacheline,
2311 * nor do any checks: 2427 * nor do any checks:
2312 */ 2428 */
2313 if (likely(this->class->usage_mask & new_mask)) 2429 if (likely(hlock_class(this)->usage_mask & new_mask))
2314 return 1; 2430 return 1;
2315 2431
2316 if (!graph_lock()) 2432 if (!graph_lock())
@@ -2318,14 +2434,14 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
2318 /* 2434 /*
2319 * Make sure we didnt race: 2435 * Make sure we didnt race:
2320 */ 2436 */
2321 if (unlikely(this->class->usage_mask & new_mask)) { 2437 if (unlikely(hlock_class(this)->usage_mask & new_mask)) {
2322 graph_unlock(); 2438 graph_unlock();
2323 return 1; 2439 return 1;
2324 } 2440 }
2325 2441
2326 this->class->usage_mask |= new_mask; 2442 hlock_class(this)->usage_mask |= new_mask;
2327 2443
2328 if (!save_trace(this->class->usage_traces + new_bit)) 2444 if (!save_trace(hlock_class(this)->usage_traces + new_bit))
2329 return 0; 2445 return 0;
2330 2446
2331 switch (new_bit) { 2447 switch (new_bit) {
@@ -2405,7 +2521,7 @@ EXPORT_SYMBOL_GPL(lockdep_init_map);
2405 */ 2521 */
2406static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, 2522static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2407 int trylock, int read, int check, int hardirqs_off, 2523 int trylock, int read, int check, int hardirqs_off,
2408 unsigned long ip) 2524 struct lockdep_map *nest_lock, unsigned long ip)
2409{ 2525{
2410 struct task_struct *curr = current; 2526 struct task_struct *curr = current;
2411 struct lock_class *class = NULL; 2527 struct lock_class *class = NULL;
@@ -2459,10 +2575,12 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2459 return 0; 2575 return 0;
2460 2576
2461 hlock = curr->held_locks + depth; 2577 hlock = curr->held_locks + depth;
2462 2578 if (DEBUG_LOCKS_WARN_ON(!class))
2463 hlock->class = class; 2579 return 0;
2580 hlock->class_idx = class - lock_classes + 1;
2464 hlock->acquire_ip = ip; 2581 hlock->acquire_ip = ip;
2465 hlock->instance = lock; 2582 hlock->instance = lock;
2583 hlock->nest_lock = nest_lock;
2466 hlock->trylock = trylock; 2584 hlock->trylock = trylock;
2467 hlock->read = read; 2585 hlock->read = read;
2468 hlock->check = check; 2586 hlock->check = check;
@@ -2574,6 +2692,55 @@ static int check_unlock(struct task_struct *curr, struct lockdep_map *lock,
2574 return 1; 2692 return 1;
2575} 2693}
2576 2694
2695static int
2696__lock_set_subclass(struct lockdep_map *lock,
2697 unsigned int subclass, unsigned long ip)
2698{
2699 struct task_struct *curr = current;
2700 struct held_lock *hlock, *prev_hlock;
2701 struct lock_class *class;
2702 unsigned int depth;
2703 int i;
2704
2705 depth = curr->lockdep_depth;
2706 if (DEBUG_LOCKS_WARN_ON(!depth))
2707 return 0;
2708
2709 prev_hlock = NULL;
2710 for (i = depth-1; i >= 0; i--) {
2711 hlock = curr->held_locks + i;
2712 /*
2713 * We must not cross into another context:
2714 */
2715 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
2716 break;
2717 if (hlock->instance == lock)
2718 goto found_it;
2719 prev_hlock = hlock;
2720 }
2721 return print_unlock_inbalance_bug(curr, lock, ip);
2722
2723found_it:
2724 class = register_lock_class(lock, subclass, 0);
2725 hlock->class_idx = class - lock_classes + 1;
2726
2727 curr->lockdep_depth = i;
2728 curr->curr_chain_key = hlock->prev_chain_key;
2729
2730 for (; i < depth; i++) {
2731 hlock = curr->held_locks + i;
2732 if (!__lock_acquire(hlock->instance,
2733 hlock_class(hlock)->subclass, hlock->trylock,
2734 hlock->read, hlock->check, hlock->hardirqs_off,
2735 hlock->nest_lock, hlock->acquire_ip))
2736 return 0;
2737 }
2738
2739 if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))
2740 return 0;
2741 return 1;
2742}
2743
2577/* 2744/*
2578 * Remove the lock to the list of currently held locks in a 2745 * Remove the lock to the list of currently held locks in a
2579 * potentially non-nested (out of order) manner. This is a 2746 * potentially non-nested (out of order) manner. This is a
@@ -2624,9 +2791,9 @@ found_it:
2624 for (i++; i < depth; i++) { 2791 for (i++; i < depth; i++) {
2625 hlock = curr->held_locks + i; 2792 hlock = curr->held_locks + i;
2626 if (!__lock_acquire(hlock->instance, 2793 if (!__lock_acquire(hlock->instance,
2627 hlock->class->subclass, hlock->trylock, 2794 hlock_class(hlock)->subclass, hlock->trylock,
2628 hlock->read, hlock->check, hlock->hardirqs_off, 2795 hlock->read, hlock->check, hlock->hardirqs_off,
2629 hlock->acquire_ip)) 2796 hlock->nest_lock, hlock->acquire_ip))
2630 return 0; 2797 return 0;
2631 } 2798 }
2632 2799
@@ -2669,7 +2836,7 @@ static int lock_release_nested(struct task_struct *curr,
2669 2836
2670#ifdef CONFIG_DEBUG_LOCKDEP 2837#ifdef CONFIG_DEBUG_LOCKDEP
2671 hlock->prev_chain_key = 0; 2838 hlock->prev_chain_key = 0;
2672 hlock->class = NULL; 2839 hlock->class_idx = 0;
2673 hlock->acquire_ip = 0; 2840 hlock->acquire_ip = 0;
2674 hlock->irq_context = 0; 2841 hlock->irq_context = 0;
2675#endif 2842#endif
@@ -2738,18 +2905,36 @@ static void check_flags(unsigned long flags)
2738#endif 2905#endif
2739} 2906}
2740 2907
2908void
2909lock_set_subclass(struct lockdep_map *lock,
2910 unsigned int subclass, unsigned long ip)
2911{
2912 unsigned long flags;
2913
2914 if (unlikely(current->lockdep_recursion))
2915 return;
2916
2917 raw_local_irq_save(flags);
2918 current->lockdep_recursion = 1;
2919 check_flags(flags);
2920 if (__lock_set_subclass(lock, subclass, ip))
2921 check_chain_key(current);
2922 current->lockdep_recursion = 0;
2923 raw_local_irq_restore(flags);
2924}
2925
2926EXPORT_SYMBOL_GPL(lock_set_subclass);
2927
2741/* 2928/*
2742 * We are not always called with irqs disabled - do that here, 2929 * We are not always called with irqs disabled - do that here,
2743 * and also avoid lockdep recursion: 2930 * and also avoid lockdep recursion:
2744 */ 2931 */
2745void lock_acquire(struct lockdep_map *lock, unsigned int subclass, 2932void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2746 int trylock, int read, int check, unsigned long ip) 2933 int trylock, int read, int check,
2934 struct lockdep_map *nest_lock, unsigned long ip)
2747{ 2935{
2748 unsigned long flags; 2936 unsigned long flags;
2749 2937
2750 if (unlikely(!lock_stat && !prove_locking))
2751 return;
2752
2753 if (unlikely(current->lockdep_recursion)) 2938 if (unlikely(current->lockdep_recursion))
2754 return; 2939 return;
2755 2940
@@ -2758,7 +2943,7 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2758 2943
2759 current->lockdep_recursion = 1; 2944 current->lockdep_recursion = 1;
2760 __lock_acquire(lock, subclass, trylock, read, check, 2945 __lock_acquire(lock, subclass, trylock, read, check,
2761 irqs_disabled_flags(flags), ip); 2946 irqs_disabled_flags(flags), nest_lock, ip);
2762 current->lockdep_recursion = 0; 2947 current->lockdep_recursion = 0;
2763 raw_local_irq_restore(flags); 2948 raw_local_irq_restore(flags);
2764} 2949}
@@ -2770,9 +2955,6 @@ void lock_release(struct lockdep_map *lock, int nested,
2770{ 2955{
2771 unsigned long flags; 2956 unsigned long flags;
2772 2957
2773 if (unlikely(!lock_stat && !prove_locking))
2774 return;
2775
2776 if (unlikely(current->lockdep_recursion)) 2958 if (unlikely(current->lockdep_recursion))
2777 return; 2959 return;
2778 2960
@@ -2845,9 +3027,9 @@ __lock_contended(struct lockdep_map *lock, unsigned long ip)
2845found_it: 3027found_it:
2846 hlock->waittime_stamp = sched_clock(); 3028 hlock->waittime_stamp = sched_clock();
2847 3029
2848 point = lock_contention_point(hlock->class, ip); 3030 point = lock_contention_point(hlock_class(hlock), ip);
2849 3031
2850 stats = get_lock_stats(hlock->class); 3032 stats = get_lock_stats(hlock_class(hlock));
2851 if (point < ARRAY_SIZE(stats->contention_point)) 3033 if (point < ARRAY_SIZE(stats->contention_point))
2852 stats->contention_point[i]++; 3034 stats->contention_point[i]++;
2853 if (lock->cpu != smp_processor_id()) 3035 if (lock->cpu != smp_processor_id())
@@ -2893,7 +3075,7 @@ found_it:
2893 hlock->holdtime_stamp = now; 3075 hlock->holdtime_stamp = now;
2894 } 3076 }
2895 3077
2896 stats = get_lock_stats(hlock->class); 3078 stats = get_lock_stats(hlock_class(hlock));
2897 if (waittime) { 3079 if (waittime) {
2898 if (hlock->read) 3080 if (hlock->read)
2899 lock_time_inc(&stats->read_waittime, waittime); 3081 lock_time_inc(&stats->read_waittime, waittime);
@@ -2988,6 +3170,7 @@ static void zap_class(struct lock_class *class)
2988 list_del_rcu(&class->hash_entry); 3170 list_del_rcu(&class->hash_entry);
2989 list_del_rcu(&class->lock_entry); 3171 list_del_rcu(&class->lock_entry);
2990 3172
3173 class->key = NULL;
2991} 3174}
2992 3175
2993static inline int within(const void *addr, void *start, unsigned long size) 3176static inline int within(const void *addr, void *start, unsigned long size)
diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h
index c3600a091a28..55db193d366d 100644
--- a/kernel/lockdep_internals.h
+++ b/kernel/lockdep_internals.h
@@ -17,9 +17,6 @@
17 */ 17 */
18#define MAX_LOCKDEP_ENTRIES 8192UL 18#define MAX_LOCKDEP_ENTRIES 8192UL
19 19
20#define MAX_LOCKDEP_KEYS_BITS 11
21#define MAX_LOCKDEP_KEYS (1UL << MAX_LOCKDEP_KEYS_BITS)
22
23#define MAX_LOCKDEP_CHAINS_BITS 14 20#define MAX_LOCKDEP_CHAINS_BITS 14
24#define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS) 21#define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS)
25 22
@@ -53,6 +50,9 @@ extern unsigned int nr_process_chains;
53extern unsigned int max_lockdep_depth; 50extern unsigned int max_lockdep_depth;
54extern unsigned int max_recursion_depth; 51extern unsigned int max_recursion_depth;
55 52
53extern unsigned long lockdep_count_forward_deps(struct lock_class *);
54extern unsigned long lockdep_count_backward_deps(struct lock_class *);
55
56#ifdef CONFIG_DEBUG_LOCKDEP 56#ifdef CONFIG_DEBUG_LOCKDEP
57/* 57/*
58 * Various lockdep statistics: 58 * Various lockdep statistics:
diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
index 9b0e940e2545..fa19aee604c2 100644
--- a/kernel/lockdep_proc.c
+++ b/kernel/lockdep_proc.c
@@ -63,34 +63,6 @@ static void l_stop(struct seq_file *m, void *v)
63{ 63{
64} 64}
65 65
66static unsigned long count_forward_deps(struct lock_class *class)
67{
68 struct lock_list *entry;
69 unsigned long ret = 1;
70
71 /*
72 * Recurse this class's dependency list:
73 */
74 list_for_each_entry(entry, &class->locks_after, entry)
75 ret += count_forward_deps(entry->class);
76
77 return ret;
78}
79
80static unsigned long count_backward_deps(struct lock_class *class)
81{
82 struct lock_list *entry;
83 unsigned long ret = 1;
84
85 /*
86 * Recurse this class's dependency list:
87 */
88 list_for_each_entry(entry, &class->locks_before, entry)
89 ret += count_backward_deps(entry->class);
90
91 return ret;
92}
93
94static void print_name(struct seq_file *m, struct lock_class *class) 66static void print_name(struct seq_file *m, struct lock_class *class)
95{ 67{
96 char str[128]; 68 char str[128];
@@ -124,10 +96,10 @@ static int l_show(struct seq_file *m, void *v)
124#ifdef CONFIG_DEBUG_LOCKDEP 96#ifdef CONFIG_DEBUG_LOCKDEP
125 seq_printf(m, " OPS:%8ld", class->ops); 97 seq_printf(m, " OPS:%8ld", class->ops);
126#endif 98#endif
127 nr_forward_deps = count_forward_deps(class); 99 nr_forward_deps = lockdep_count_forward_deps(class);
128 seq_printf(m, " FD:%5ld", nr_forward_deps); 100 seq_printf(m, " FD:%5ld", nr_forward_deps);
129 101
130 nr_backward_deps = count_backward_deps(class); 102 nr_backward_deps = lockdep_count_backward_deps(class);
131 seq_printf(m, " BD:%5ld", nr_backward_deps); 103 seq_printf(m, " BD:%5ld", nr_backward_deps);
132 104
133 get_usage_chars(class, &c1, &c2, &c3, &c4); 105 get_usage_chars(class, &c1, &c2, &c3, &c4);
@@ -229,6 +201,9 @@ static int lc_show(struct seq_file *m, void *v)
229 201
230 for (i = 0; i < chain->depth; i++) { 202 for (i = 0; i < chain->depth; i++) {
231 class = lock_chain_get_class(chain, i); 203 class = lock_chain_get_class(chain, i);
204 if (!class->key)
205 continue;
206
232 seq_printf(m, "[%p] ", class->key); 207 seq_printf(m, "[%p] ", class->key);
233 print_name(m, class); 208 print_name(m, class);
234 seq_puts(m, "\n"); 209 seq_puts(m, "\n");
@@ -350,7 +325,7 @@ static int lockdep_stats_show(struct seq_file *m, void *v)
350 if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ) 325 if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ)
351 nr_hardirq_read_unsafe++; 326 nr_hardirq_read_unsafe++;
352 327
353 sum_forward_deps += count_forward_deps(class); 328 sum_forward_deps += lockdep_count_forward_deps(class);
354 } 329 }
355#ifdef CONFIG_DEBUG_LOCKDEP 330#ifdef CONFIG_DEBUG_LOCKDEP
356 DEBUG_LOCKS_WARN_ON(debug_atomic_read(&nr_unused_locks) != nr_unused); 331 DEBUG_LOCKS_WARN_ON(debug_atomic_read(&nr_unused_locks) != nr_unused);
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
index 9a21681aa80f..e36d5798cbff 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -289,21 +289,29 @@ void do_schedule_next_timer(struct siginfo *info)
289 else 289 else
290 schedule_next_timer(timr); 290 schedule_next_timer(timr);
291 291
292 info->si_overrun = timr->it_overrun_last; 292 info->si_overrun += timr->it_overrun_last;
293 } 293 }
294 294
295 if (timr) 295 if (timr)
296 unlock_timer(timr, flags); 296 unlock_timer(timr, flags);
297} 297}
298 298
299int posix_timer_event(struct k_itimer *timr,int si_private) 299int posix_timer_event(struct k_itimer *timr, int si_private)
300{ 300{
301 memset(&timr->sigq->info, 0, sizeof(siginfo_t)); 301 /*
302 * FIXME: if ->sigq is queued we can race with
303 * dequeue_signal()->do_schedule_next_timer().
304 *
305 * If dequeue_signal() sees the "right" value of
306 * si_sys_private it calls do_schedule_next_timer().
307 * We re-queue ->sigq and drop ->it_lock().
308 * do_schedule_next_timer() locks the timer
309 * and re-schedules it while ->sigq is pending.
310 * Not really bad, but not that we want.
311 */
302 timr->sigq->info.si_sys_private = si_private; 312 timr->sigq->info.si_sys_private = si_private;
303 /* Send signal to the process that owns this timer.*/
304 313
305 timr->sigq->info.si_signo = timr->it_sigev_signo; 314 timr->sigq->info.si_signo = timr->it_sigev_signo;
306 timr->sigq->info.si_errno = 0;
307 timr->sigq->info.si_code = SI_TIMER; 315 timr->sigq->info.si_code = SI_TIMER;
308 timr->sigq->info.si_tid = timr->it_id; 316 timr->sigq->info.si_tid = timr->it_id;
309 timr->sigq->info.si_value = timr->it_sigev_value; 317 timr->sigq->info.si_value = timr->it_sigev_value;
@@ -435,6 +443,7 @@ static struct k_itimer * alloc_posix_timer(void)
435 kmem_cache_free(posix_timers_cache, tmr); 443 kmem_cache_free(posix_timers_cache, tmr);
436 tmr = NULL; 444 tmr = NULL;
437 } 445 }
446 memset(&tmr->sigq->info, 0, sizeof(siginfo_t));
438 return tmr; 447 return tmr;
439} 448}
440 449
diff --git a/kernel/sched.c b/kernel/sched.c
index f0141947c7d5..d601fb0406ca 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -600,7 +600,6 @@ struct rq {
600 /* BKL stats */ 600 /* BKL stats */
601 unsigned int bkl_count; 601 unsigned int bkl_count;
602#endif 602#endif
603 struct lock_class_key rq_lock_key;
604}; 603};
605 604
606static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); 605static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
@@ -2759,10 +2758,10 @@ static void double_rq_lock(struct rq *rq1, struct rq *rq2)
2759 } else { 2758 } else {
2760 if (rq1 < rq2) { 2759 if (rq1 < rq2) {
2761 spin_lock(&rq1->lock); 2760 spin_lock(&rq1->lock);
2762 spin_lock(&rq2->lock); 2761 spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
2763 } else { 2762 } else {
2764 spin_lock(&rq2->lock); 2763 spin_lock(&rq2->lock);
2765 spin_lock(&rq1->lock); 2764 spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
2766 } 2765 }
2767 } 2766 }
2768 update_rq_clock(rq1); 2767 update_rq_clock(rq1);
@@ -2805,14 +2804,21 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
2805 if (busiest < this_rq) { 2804 if (busiest < this_rq) {
2806 spin_unlock(&this_rq->lock); 2805 spin_unlock(&this_rq->lock);
2807 spin_lock(&busiest->lock); 2806 spin_lock(&busiest->lock);
2808 spin_lock(&this_rq->lock); 2807 spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING);
2809 ret = 1; 2808 ret = 1;
2810 } else 2809 } else
2811 spin_lock(&busiest->lock); 2810 spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING);
2812 } 2811 }
2813 return ret; 2812 return ret;
2814} 2813}
2815 2814
2815static void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
2816 __releases(busiest->lock)
2817{
2818 spin_unlock(&busiest->lock);
2819 lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
2820}
2821
2816/* 2822/*
2817 * If dest_cpu is allowed for this process, migrate the task to it. 2823 * If dest_cpu is allowed for this process, migrate the task to it.
2818 * This is accomplished by forcing the cpu_allowed mask to only 2824 * This is accomplished by forcing the cpu_allowed mask to only
@@ -3637,7 +3643,7 @@ redo:
3637 ld_moved = move_tasks(this_rq, this_cpu, busiest, 3643 ld_moved = move_tasks(this_rq, this_cpu, busiest,
3638 imbalance, sd, CPU_NEWLY_IDLE, 3644 imbalance, sd, CPU_NEWLY_IDLE,
3639 &all_pinned); 3645 &all_pinned);
3640 spin_unlock(&busiest->lock); 3646 double_unlock_balance(this_rq, busiest);
3641 3647
3642 if (unlikely(all_pinned)) { 3648 if (unlikely(all_pinned)) {
3643 cpu_clear(cpu_of(busiest), *cpus); 3649 cpu_clear(cpu_of(busiest), *cpus);
@@ -3752,7 +3758,7 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)
3752 else 3758 else
3753 schedstat_inc(sd, alb_failed); 3759 schedstat_inc(sd, alb_failed);
3754 } 3760 }
3755 spin_unlock(&target_rq->lock); 3761 double_unlock_balance(busiest_rq, target_rq);
3756} 3762}
3757 3763
3758#ifdef CONFIG_NO_HZ 3764#ifdef CONFIG_NO_HZ
@@ -8000,7 +8006,6 @@ void __init sched_init(void)
8000 8006
8001 rq = cpu_rq(i); 8007 rq = cpu_rq(i);
8002 spin_lock_init(&rq->lock); 8008 spin_lock_init(&rq->lock);
8003 lockdep_set_class(&rq->lock, &rq->rq_lock_key);
8004 rq->nr_running = 0; 8009 rq->nr_running = 0;
8005 init_cfs_rq(&rq->cfs, rq); 8010 init_cfs_rq(&rq->cfs, rq);
8006 init_rt_rq(&rq->rt, rq); 8011 init_rt_rq(&rq->rt, rq);
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 908c04f9dad0..6163e4cf885b 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -861,6 +861,8 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
861#define RT_MAX_TRIES 3 861#define RT_MAX_TRIES 3
862 862
863static int double_lock_balance(struct rq *this_rq, struct rq *busiest); 863static int double_lock_balance(struct rq *this_rq, struct rq *busiest);
864static void double_unlock_balance(struct rq *this_rq, struct rq *busiest);
865
864static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep); 866static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
865 867
866static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) 868static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
@@ -1022,7 +1024,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1022 break; 1024 break;
1023 1025
1024 /* try again */ 1026 /* try again */
1025 spin_unlock(&lowest_rq->lock); 1027 double_unlock_balance(rq, lowest_rq);
1026 lowest_rq = NULL; 1028 lowest_rq = NULL;
1027 } 1029 }
1028 1030
@@ -1091,7 +1093,7 @@ static int push_rt_task(struct rq *rq)
1091 1093
1092 resched_task(lowest_rq->curr); 1094 resched_task(lowest_rq->curr);
1093 1095
1094 spin_unlock(&lowest_rq->lock); 1096 double_unlock_balance(rq, lowest_rq);
1095 1097
1096 ret = 1; 1098 ret = 1;
1097out: 1099out:
@@ -1197,7 +1199,7 @@ static int pull_rt_task(struct rq *this_rq)
1197 1199
1198 } 1200 }
1199 skip: 1201 skip:
1200 spin_unlock(&src_rq->lock); 1202 double_unlock_balance(this_rq, src_rq);
1201 } 1203 }
1202 1204
1203 return ret; 1205 return ret;
diff --git a/kernel/signal.c b/kernel/signal.c
index 954f77d7e3bc..c539f60c6f41 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1304,6 +1304,7 @@ int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
1304 q->info.si_overrun++; 1304 q->info.si_overrun++;
1305 goto out; 1305 goto out;
1306 } 1306 }
1307 q->info.si_overrun = 0;
1307 1308
1308 signalfd_notify(t, sig); 1309 signalfd_notify(t, sig);
1309 pending = group ? &t->signal->shared_pending : &t->pending; 1310 pending = group ? &t->signal->shared_pending : &t->pending;
diff --git a/kernel/smp.c b/kernel/smp.c
index 96fc7c0edc59..e6084f6efb4d 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -260,6 +260,41 @@ void __smp_call_function_single(int cpu, struct call_single_data *data)
260 generic_exec_single(cpu, data); 260 generic_exec_single(cpu, data);
261} 261}
262 262
263/* Dummy function */
264static void quiesce_dummy(void *unused)
265{
266}
267
268/*
269 * Ensure stack based data used in call function mask is safe to free.
270 *
271 * This is needed by smp_call_function_mask when using on-stack data, because
272 * a single call function queue is shared by all CPUs, and any CPU may pick up
273 * the data item on the queue at any time before it is deleted. So we need to
274 * ensure that all CPUs have transitioned through a quiescent state after
275 * this call.
276 *
277 * This is a very slow function, implemented by sending synchronous IPIs to
278 * all possible CPUs. For this reason, we have to alloc data rather than use
279 * stack based data even in the case of synchronous calls. The stack based
280 * data is then just used for deadlock/oom fallback which will be very rare.
281 *
282 * If a faster scheme can be made, we could go back to preferring stack based
283 * data -- the data allocation/free is non-zero cost.
284 */
285static void smp_call_function_mask_quiesce_stack(cpumask_t mask)
286{
287 struct call_single_data data;
288 int cpu;
289
290 data.func = quiesce_dummy;
291 data.info = NULL;
292 data.flags = CSD_FLAG_WAIT;
293
294 for_each_cpu_mask(cpu, mask)
295 generic_exec_single(cpu, &data);
296}
297
263/** 298/**
264 * smp_call_function_mask(): Run a function on a set of other CPUs. 299 * smp_call_function_mask(): Run a function on a set of other CPUs.
265 * @mask: The set of cpus to run on. 300 * @mask: The set of cpus to run on.
@@ -285,6 +320,7 @@ int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info,
285 cpumask_t allbutself; 320 cpumask_t allbutself;
286 unsigned long flags; 321 unsigned long flags;
287 int cpu, num_cpus; 322 int cpu, num_cpus;
323 int slowpath = 0;
288 324
289 /* Can deadlock when called with interrupts disabled */ 325 /* Can deadlock when called with interrupts disabled */
290 WARN_ON(irqs_disabled()); 326 WARN_ON(irqs_disabled());
@@ -306,15 +342,16 @@ int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info,
306 return smp_call_function_single(cpu, func, info, wait); 342 return smp_call_function_single(cpu, func, info, wait);
307 } 343 }
308 344
309 if (!wait) { 345 data = kmalloc(sizeof(*data), GFP_ATOMIC);
310 data = kmalloc(sizeof(*data), GFP_ATOMIC); 346 if (data) {
311 if (data) 347 data->csd.flags = CSD_FLAG_ALLOC;
312 data->csd.flags = CSD_FLAG_ALLOC; 348 if (wait)
313 } 349 data->csd.flags |= CSD_FLAG_WAIT;
314 if (!data) { 350 } else {
315 data = &d; 351 data = &d;
316 data->csd.flags = CSD_FLAG_WAIT; 352 data->csd.flags = CSD_FLAG_WAIT;
317 wait = 1; 353 wait = 1;
354 slowpath = 1;
318 } 355 }
319 356
320 spin_lock_init(&data->lock); 357 spin_lock_init(&data->lock);
@@ -331,8 +368,11 @@ int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info,
331 arch_send_call_function_ipi(mask); 368 arch_send_call_function_ipi(mask);
332 369
333 /* optionally wait for the CPUs to complete */ 370 /* optionally wait for the CPUs to complete */
334 if (wait) 371 if (wait) {
335 csd_flag_wait(&data->csd); 372 csd_flag_wait(&data->csd);
373 if (unlikely(slowpath))
374 smp_call_function_mask_quiesce_stack(allbutself);
375 }
336 376
337 return 0; 377 return 0;
338} 378}
diff --git a/kernel/spinlock.c b/kernel/spinlock.c
index a1fb54c93cdd..44baeea94ab9 100644
--- a/kernel/spinlock.c
+++ b/kernel/spinlock.c
@@ -292,6 +292,7 @@ void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass)
292} 292}
293 293
294EXPORT_SYMBOL(_spin_lock_nested); 294EXPORT_SYMBOL(_spin_lock_nested);
295
295unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass) 296unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass)
296{ 297{
297 unsigned long flags; 298 unsigned long flags;
@@ -314,6 +315,16 @@ unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclas
314 315
315EXPORT_SYMBOL(_spin_lock_irqsave_nested); 316EXPORT_SYMBOL(_spin_lock_irqsave_nested);
316 317
318void __lockfunc _spin_lock_nest_lock(spinlock_t *lock,
319 struct lockdep_map *nest_lock)
320{
321 preempt_disable();
322 spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_);
323 LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
324}
325
326EXPORT_SYMBOL(_spin_lock_nest_lock);
327
317#endif 328#endif
318 329
319void __lockfunc _spin_unlock(spinlock_t *lock) 330void __lockfunc _spin_unlock(spinlock_t *lock)
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 4a26a1382df0..4048e92aa04f 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -290,11 +290,11 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
290 290
291 BUG_ON(get_wq_data(work) != cwq); 291 BUG_ON(get_wq_data(work) != cwq);
292 work_clear_pending(work); 292 work_clear_pending(work);
293 lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_); 293 lock_map_acquire(&cwq->wq->lockdep_map);
294 lock_acquire(&lockdep_map, 0, 0, 0, 2, _THIS_IP_); 294 lock_map_acquire(&lockdep_map);
295 f(work); 295 f(work);
296 lock_release(&lockdep_map, 1, _THIS_IP_); 296 lock_map_release(&lockdep_map);
297 lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_); 297 lock_map_release(&cwq->wq->lockdep_map);
298 298
299 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { 299 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
300 printk(KERN_ERR "BUG: workqueue leaked lock or atomic: " 300 printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
@@ -413,8 +413,8 @@ void flush_workqueue(struct workqueue_struct *wq)
413 int cpu; 413 int cpu;
414 414
415 might_sleep(); 415 might_sleep();
416 lock_acquire(&wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_); 416 lock_map_acquire(&wq->lockdep_map);
417 lock_release(&wq->lockdep_map, 1, _THIS_IP_); 417 lock_map_release(&wq->lockdep_map);
418 for_each_cpu_mask_nr(cpu, *cpu_map) 418 for_each_cpu_mask_nr(cpu, *cpu_map)
419 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); 419 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
420} 420}
@@ -441,8 +441,8 @@ int flush_work(struct work_struct *work)
441 if (!cwq) 441 if (!cwq)
442 return 0; 442 return 0;
443 443
444 lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_); 444 lock_map_acquire(&cwq->wq->lockdep_map);
445 lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_); 445 lock_map_release(&cwq->wq->lockdep_map);
446 446
447 prev = NULL; 447 prev = NULL;
448 spin_lock_irq(&cwq->lock); 448 spin_lock_irq(&cwq->lock);
@@ -536,8 +536,8 @@ static void wait_on_work(struct work_struct *work)
536 536
537 might_sleep(); 537 might_sleep();
538 538
539 lock_acquire(&work->lockdep_map, 0, 0, 0, 2, _THIS_IP_); 539 lock_map_acquire(&work->lockdep_map);
540 lock_release(&work->lockdep_map, 1, _THIS_IP_); 540 lock_map_release(&work->lockdep_map);
541 541
542 cwq = get_wq_data(work); 542 cwq = get_wq_data(work);
543 if (!cwq) 543 if (!cwq)
@@ -872,8 +872,8 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)
872 if (cwq->thread == NULL) 872 if (cwq->thread == NULL)
873 return; 873 return;
874 874
875 lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_); 875 lock_map_acquire(&cwq->wq->lockdep_map);
876 lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_); 876 lock_map_release(&cwq->wq->lockdep_map);
877 877
878 flush_cpu_workqueue(cwq); 878 flush_cpu_workqueue(cwq);
879 /* 879 /*