aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig3
-rw-r--r--lib/Kconfig.debug13
-rw-r--r--lib/Makefile4
-rwxr-xr-xlib/build_OID_registry2
-rw-r--r--lib/debug_locks.c2
-rw-r--r--lib/fault-inject.c21
-rw-r--r--lib/kobject.c2
-rw-r--r--lib/locking-selftest.c720
-rw-r--r--lib/net_utils.c26
-rw-r--r--lib/percpu-refcount.c158
10 files changed, 908 insertions, 43 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index fe01d418b09a..d246a3bbd6ef 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -22,6 +22,9 @@ config GENERIC_STRNCPY_FROM_USER
22config GENERIC_STRNLEN_USER 22config GENERIC_STRNLEN_USER
23 bool 23 bool
24 24
25config GENERIC_NET_UTILS
26 bool
27
25config GENERIC_FIND_FIRST_BIT 28config GENERIC_FIND_FIRST_BIT
26 bool 29 bool
27 30
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 566cf2bc08ea..7154f799541a 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -547,6 +547,19 @@ config DEBUG_MUTEXES
547 This feature allows mutex semantics violations to be detected and 547 This feature allows mutex semantics violations to be detected and
548 reported. 548 reported.
549 549
550config DEBUG_WW_MUTEX_SLOWPATH
551 bool "Wait/wound mutex debugging: Slowpath testing"
552 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
553 select DEBUG_LOCK_ALLOC
554 select DEBUG_SPINLOCK
555 select DEBUG_MUTEXES
556 help
557 This feature enables slowpath testing for w/w mutex users by
558 injecting additional -EDEADLK wound/backoff cases. Together with
559 the full mutex checks enabled with (CONFIG_PROVE_LOCKING) this
560 will test all possible w/w mutex interface abuse with the
561 exception of simply not acquiring all the required locks.
562
550config DEBUG_LOCK_ALLOC 563config DEBUG_LOCK_ALLOC
551 bool "Lock debugging: detect incorrect freeing of live locks" 564 bool "Lock debugging: detect incorrect freeing of live locks"
552 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT 565 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
diff --git a/lib/Makefile b/lib/Makefile
index c55a037a354e..8f8d385187f2 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -13,7 +13,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
13 sha1.o md5.o irq_regs.o reciprocal_div.o argv_split.o \ 13 sha1.o md5.o irq_regs.o reciprocal_div.o argv_split.o \
14 proportions.o flex_proportions.o prio_heap.o ratelimit.o show_mem.o \ 14 proportions.o flex_proportions.o prio_heap.o ratelimit.o show_mem.o \
15 is_single_threaded.o plist.o decompress.o kobject_uevent.o \ 15 is_single_threaded.o plist.o decompress.o kobject_uevent.o \
16 earlycpio.o 16 earlycpio.o percpu-refcount.o
17 17
18obj-$(CONFIG_ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS) += usercopy.o 18obj-$(CONFIG_ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS) += usercopy.o
19lib-$(CONFIG_MMU) += ioremap.o 19lib-$(CONFIG_MMU) += ioremap.o
@@ -137,6 +137,8 @@ obj-$(CONFIG_DDR) += jedec_ddr_data.o
137obj-$(CONFIG_GENERIC_STRNCPY_FROM_USER) += strncpy_from_user.o 137obj-$(CONFIG_GENERIC_STRNCPY_FROM_USER) += strncpy_from_user.o
138obj-$(CONFIG_GENERIC_STRNLEN_USER) += strnlen_user.o 138obj-$(CONFIG_GENERIC_STRNLEN_USER) += strnlen_user.o
139 139
140obj-$(CONFIG_GENERIC_NET_UTILS) += net_utils.o
141
140obj-$(CONFIG_STMP_DEVICE) += stmp_device.o 142obj-$(CONFIG_STMP_DEVICE) += stmp_device.o
141 143
142libfdt_files = fdt.o fdt_ro.o fdt_wip.o fdt_rw.o fdt_sw.o fdt_strerror.o 144libfdt_files = fdt.o fdt_ro.o fdt_wip.o fdt_rw.o fdt_sw.o fdt_strerror.o
diff --git a/lib/build_OID_registry b/lib/build_OID_registry
index dfbdaab81bc8..5d9827217360 100755
--- a/lib/build_OID_registry
+++ b/lib/build_OID_registry
@@ -50,8 +50,6 @@ my @indices = ();
50my @lengths = (); 50my @lengths = ();
51my $total_length = 0; 51my $total_length = 0;
52 52
53print "Compiling ", $#names + 1, " OIDs\n";
54
55for (my $i = 0; $i <= $#names; $i++) { 53for (my $i = 0; $i <= $#names; $i++) {
56 my $name = $names[$i]; 54 my $name = $names[$i];
57 my $oid = $oids[$i]; 55 my $oid = $oids[$i];
diff --git a/lib/debug_locks.c b/lib/debug_locks.c
index f2fa60c59343..96c4c633d95e 100644
--- a/lib/debug_locks.c
+++ b/lib/debug_locks.c
@@ -30,6 +30,7 @@ EXPORT_SYMBOL_GPL(debug_locks);
30 * a locking bug is detected. 30 * a locking bug is detected.
31 */ 31 */
32int debug_locks_silent; 32int debug_locks_silent;
33EXPORT_SYMBOL_GPL(debug_locks_silent);
33 34
34/* 35/*
35 * Generic 'turn off all lock debugging' function: 36 * Generic 'turn off all lock debugging' function:
@@ -44,3 +45,4 @@ int debug_locks_off(void)
44 } 45 }
45 return 0; 46 return 0;
46} 47}
48EXPORT_SYMBOL_GPL(debug_locks_off);
diff --git a/lib/fault-inject.c b/lib/fault-inject.c
index c5c7a762b850..d7d501ea856d 100644
--- a/lib/fault-inject.c
+++ b/lib/fault-inject.c
@@ -182,27 +182,6 @@ static struct dentry *debugfs_create_stacktrace_depth(
182 182
183#endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */ 183#endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */
184 184
185static int debugfs_atomic_t_set(void *data, u64 val)
186{
187 atomic_set((atomic_t *)data, val);
188 return 0;
189}
190
191static int debugfs_atomic_t_get(void *data, u64 *val)
192{
193 *val = atomic_read((atomic_t *)data);
194 return 0;
195}
196
197DEFINE_SIMPLE_ATTRIBUTE(fops_atomic_t, debugfs_atomic_t_get,
198 debugfs_atomic_t_set, "%lld\n");
199
200static struct dentry *debugfs_create_atomic_t(const char *name, umode_t mode,
201 struct dentry *parent, atomic_t *value)
202{
203 return debugfs_create_file(name, mode, parent, value, &fops_atomic_t);
204}
205
206struct dentry *fault_create_debugfs_attr(const char *name, 185struct dentry *fault_create_debugfs_attr(const char *name,
207 struct dentry *parent, struct fault_attr *attr) 186 struct dentry *parent, struct fault_attr *attr)
208{ 187{
diff --git a/lib/kobject.c b/lib/kobject.c
index b7e29a6056d3..4a1f33d43548 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -805,7 +805,7 @@ static struct kset *kset_create(const char *name,
805 kset = kzalloc(sizeof(*kset), GFP_KERNEL); 805 kset = kzalloc(sizeof(*kset), GFP_KERNEL);
806 if (!kset) 806 if (!kset)
807 return NULL; 807 return NULL;
808 retval = kobject_set_name(&kset->kobj, name); 808 retval = kobject_set_name(&kset->kobj, "%s", name);
809 if (retval) { 809 if (retval) {
810 kfree(kset); 810 kfree(kset);
811 return NULL; 811 return NULL;
diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c
index c3eb261a7df3..aad024dde3c4 100644
--- a/lib/locking-selftest.c
+++ b/lib/locking-selftest.c
@@ -26,6 +26,8 @@
26 */ 26 */
27static unsigned int debug_locks_verbose; 27static unsigned int debug_locks_verbose;
28 28
29static DEFINE_WW_CLASS(ww_lockdep);
30
29static int __init setup_debug_locks_verbose(char *str) 31static int __init setup_debug_locks_verbose(char *str)
30{ 32{
31 get_option(&str, &debug_locks_verbose); 33 get_option(&str, &debug_locks_verbose);
@@ -42,6 +44,10 @@ __setup("debug_locks_verbose=", setup_debug_locks_verbose);
42#define LOCKTYPE_RWLOCK 0x2 44#define LOCKTYPE_RWLOCK 0x2
43#define LOCKTYPE_MUTEX 0x4 45#define LOCKTYPE_MUTEX 0x4
44#define LOCKTYPE_RWSEM 0x8 46#define LOCKTYPE_RWSEM 0x8
47#define LOCKTYPE_WW 0x10
48
49static struct ww_acquire_ctx t, t2;
50static struct ww_mutex o, o2, o3;
45 51
46/* 52/*
47 * Normal standalone locks, for the circular and irq-context 53 * Normal standalone locks, for the circular and irq-context
@@ -193,6 +199,20 @@ static void init_shared_classes(void)
193#define RSU(x) up_read(&rwsem_##x) 199#define RSU(x) up_read(&rwsem_##x)
194#define RWSI(x) init_rwsem(&rwsem_##x) 200#define RWSI(x) init_rwsem(&rwsem_##x)
195 201
202#ifndef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
203#define WWAI(x) ww_acquire_init(x, &ww_lockdep)
204#else
205#define WWAI(x) do { ww_acquire_init(x, &ww_lockdep); (x)->deadlock_inject_countdown = ~0U; } while (0)
206#endif
207#define WWAD(x) ww_acquire_done(x)
208#define WWAF(x) ww_acquire_fini(x)
209
210#define WWL(x, c) ww_mutex_lock(x, c)
211#define WWT(x) ww_mutex_trylock(x)
212#define WWL1(x) ww_mutex_lock(x, NULL)
213#define WWU(x) ww_mutex_unlock(x)
214
215
196#define LOCK_UNLOCK_2(x,y) LOCK(x); LOCK(y); UNLOCK(y); UNLOCK(x) 216#define LOCK_UNLOCK_2(x,y) LOCK(x); LOCK(y); UNLOCK(y); UNLOCK(x)
197 217
198/* 218/*
@@ -894,11 +914,13 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft)
894# define I_RWLOCK(x) lockdep_reset_lock(&rwlock_##x.dep_map) 914# define I_RWLOCK(x) lockdep_reset_lock(&rwlock_##x.dep_map)
895# define I_MUTEX(x) lockdep_reset_lock(&mutex_##x.dep_map) 915# define I_MUTEX(x) lockdep_reset_lock(&mutex_##x.dep_map)
896# define I_RWSEM(x) lockdep_reset_lock(&rwsem_##x.dep_map) 916# define I_RWSEM(x) lockdep_reset_lock(&rwsem_##x.dep_map)
917# define I_WW(x) lockdep_reset_lock(&x.dep_map)
897#else 918#else
898# define I_SPINLOCK(x) 919# define I_SPINLOCK(x)
899# define I_RWLOCK(x) 920# define I_RWLOCK(x)
900# define I_MUTEX(x) 921# define I_MUTEX(x)
901# define I_RWSEM(x) 922# define I_RWSEM(x)
923# define I_WW(x)
902#endif 924#endif
903 925
904#define I1(x) \ 926#define I1(x) \
@@ -920,11 +942,20 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft)
920static void reset_locks(void) 942static void reset_locks(void)
921{ 943{
922 local_irq_disable(); 944 local_irq_disable();
945 lockdep_free_key_range(&ww_lockdep.acquire_key, 1);
946 lockdep_free_key_range(&ww_lockdep.mutex_key, 1);
947
923 I1(A); I1(B); I1(C); I1(D); 948 I1(A); I1(B); I1(C); I1(D);
924 I1(X1); I1(X2); I1(Y1); I1(Y2); I1(Z1); I1(Z2); 949 I1(X1); I1(X2); I1(Y1); I1(Y2); I1(Z1); I1(Z2);
950 I_WW(t); I_WW(t2); I_WW(o.base); I_WW(o2.base); I_WW(o3.base);
925 lockdep_reset(); 951 lockdep_reset();
926 I2(A); I2(B); I2(C); I2(D); 952 I2(A); I2(B); I2(C); I2(D);
927 init_shared_classes(); 953 init_shared_classes();
954
955 ww_mutex_init(&o, &ww_lockdep); ww_mutex_init(&o2, &ww_lockdep); ww_mutex_init(&o3, &ww_lockdep);
956 memset(&t, 0, sizeof(t)); memset(&t2, 0, sizeof(t2));
957 memset(&ww_lockdep.acquire_key, 0, sizeof(ww_lockdep.acquire_key));
958 memset(&ww_lockdep.mutex_key, 0, sizeof(ww_lockdep.mutex_key));
928 local_irq_enable(); 959 local_irq_enable();
929} 960}
930 961
@@ -938,7 +969,6 @@ static int unexpected_testcase_failures;
938static void dotest(void (*testcase_fn)(void), int expected, int lockclass_mask) 969static void dotest(void (*testcase_fn)(void), int expected, int lockclass_mask)
939{ 970{
940 unsigned long saved_preempt_count = preempt_count(); 971 unsigned long saved_preempt_count = preempt_count();
941 int expected_failure = 0;
942 972
943 WARN_ON(irqs_disabled()); 973 WARN_ON(irqs_disabled());
944 974
@@ -947,25 +977,17 @@ static void dotest(void (*testcase_fn)(void), int expected, int lockclass_mask)
947 * Filter out expected failures: 977 * Filter out expected failures:
948 */ 978 */
949#ifndef CONFIG_PROVE_LOCKING 979#ifndef CONFIG_PROVE_LOCKING
950 if ((lockclass_mask & LOCKTYPE_SPIN) && debug_locks != expected) 980 if (expected == FAILURE && debug_locks) {
951 expected_failure = 1; 981 expected_testcase_failures++;
952 if ((lockclass_mask & LOCKTYPE_RWLOCK) && debug_locks != expected) 982 printk("failed|");
953 expected_failure = 1; 983 }
954 if ((lockclass_mask & LOCKTYPE_MUTEX) && debug_locks != expected) 984 else
955 expected_failure = 1;
956 if ((lockclass_mask & LOCKTYPE_RWSEM) && debug_locks != expected)
957 expected_failure = 1;
958#endif 985#endif
959 if (debug_locks != expected) { 986 if (debug_locks != expected) {
960 if (expected_failure) { 987 unexpected_testcase_failures++;
961 expected_testcase_failures++; 988 printk("FAILED|");
962 printk("failed|"); 989
963 } else { 990 dump_stack();
964 unexpected_testcase_failures++;
965
966 printk("FAILED|");
967 dump_stack();
968 }
969 } else { 991 } else {
970 testcase_successes++; 992 testcase_successes++;
971 printk(" ok |"); 993 printk(" ok |");
@@ -1108,6 +1130,666 @@ static inline void print_testname(const char *testname)
1108 DO_TESTCASE_6IRW(desc, name, 312); \ 1130 DO_TESTCASE_6IRW(desc, name, 312); \
1109 DO_TESTCASE_6IRW(desc, name, 321); 1131 DO_TESTCASE_6IRW(desc, name, 321);
1110 1132
1133static void ww_test_fail_acquire(void)
1134{
1135 int ret;
1136
1137 WWAI(&t);
1138 t.stamp++;
1139
1140 ret = WWL(&o, &t);
1141
1142 if (WARN_ON(!o.ctx) ||
1143 WARN_ON(ret))
1144 return;
1145
1146 /* No lockdep test, pure API */
1147 ret = WWL(&o, &t);
1148 WARN_ON(ret != -EALREADY);
1149
1150 ret = WWT(&o);
1151 WARN_ON(ret);
1152
1153 t2 = t;
1154 t2.stamp++;
1155 ret = WWL(&o, &t2);
1156 WARN_ON(ret != -EDEADLK);
1157 WWU(&o);
1158
1159 if (WWT(&o))
1160 WWU(&o);
1161#ifdef CONFIG_DEBUG_LOCK_ALLOC
1162 else
1163 DEBUG_LOCKS_WARN_ON(1);
1164#endif
1165}
1166
1167static void ww_test_normal(void)
1168{
1169 int ret;
1170
1171 WWAI(&t);
1172
1173 /*
1174 * None of the ww_mutex codepaths should be taken in the 'normal'
1175 * mutex calls. The easiest way to verify this is by using the
1176 * normal mutex calls, and making sure o.ctx is unmodified.
1177 */
1178
1179 /* mutex_lock (and indirectly, mutex_lock_nested) */
1180 o.ctx = (void *)~0UL;
1181 mutex_lock(&o.base);
1182 mutex_unlock(&o.base);
1183 WARN_ON(o.ctx != (void *)~0UL);
1184
1185 /* mutex_lock_interruptible (and *_nested) */
1186 o.ctx = (void *)~0UL;
1187 ret = mutex_lock_interruptible(&o.base);
1188 if (!ret)
1189 mutex_unlock(&o.base);
1190 else
1191 WARN_ON(1);
1192 WARN_ON(o.ctx != (void *)~0UL);
1193
1194 /* mutex_lock_killable (and *_nested) */
1195 o.ctx = (void *)~0UL;
1196 ret = mutex_lock_killable(&o.base);
1197 if (!ret)
1198 mutex_unlock(&o.base);
1199 else
1200 WARN_ON(1);
1201 WARN_ON(o.ctx != (void *)~0UL);
1202
1203 /* trylock, succeeding */
1204 o.ctx = (void *)~0UL;
1205 ret = mutex_trylock(&o.base);
1206 WARN_ON(!ret);
1207 if (ret)
1208 mutex_unlock(&o.base);
1209 else
1210 WARN_ON(1);
1211 WARN_ON(o.ctx != (void *)~0UL);
1212
1213 /* trylock, failing */
1214 o.ctx = (void *)~0UL;
1215 mutex_lock(&o.base);
1216 ret = mutex_trylock(&o.base);
1217 WARN_ON(ret);
1218 mutex_unlock(&o.base);
1219 WARN_ON(o.ctx != (void *)~0UL);
1220
1221 /* nest_lock */
1222 o.ctx = (void *)~0UL;
1223 mutex_lock_nest_lock(&o.base, &t);
1224 mutex_unlock(&o.base);
1225 WARN_ON(o.ctx != (void *)~0UL);
1226}
1227
1228static void ww_test_two_contexts(void)
1229{
1230 WWAI(&t);
1231 WWAI(&t2);
1232}
1233
1234static void ww_test_diff_class(void)
1235{
1236 WWAI(&t);
1237#ifdef CONFIG_DEBUG_MUTEXES
1238 t.ww_class = NULL;
1239#endif
1240 WWL(&o, &t);
1241}
1242
1243static void ww_test_context_done_twice(void)
1244{
1245 WWAI(&t);
1246 WWAD(&t);
1247 WWAD(&t);
1248 WWAF(&t);
1249}
1250
1251static void ww_test_context_unlock_twice(void)
1252{
1253 WWAI(&t);
1254 WWAD(&t);
1255 WWAF(&t);
1256 WWAF(&t);
1257}
1258
1259static void ww_test_context_fini_early(void)
1260{
1261 WWAI(&t);
1262 WWL(&o, &t);
1263 WWAD(&t);
1264 WWAF(&t);
1265}
1266
1267static void ww_test_context_lock_after_done(void)
1268{
1269 WWAI(&t);
1270 WWAD(&t);
1271 WWL(&o, &t);
1272}
1273
1274static void ww_test_object_unlock_twice(void)
1275{
1276 WWL1(&o);
1277 WWU(&o);
1278 WWU(&o);
1279}
1280
1281static void ww_test_object_lock_unbalanced(void)
1282{
1283 WWAI(&t);
1284 WWL(&o, &t);
1285 t.acquired = 0;
1286 WWU(&o);
1287 WWAF(&t);
1288}
1289
1290static void ww_test_object_lock_stale_context(void)
1291{
1292 WWAI(&t);
1293 o.ctx = &t2;
1294 WWL(&o, &t);
1295}
1296
1297static void ww_test_edeadlk_normal(void)
1298{
1299 int ret;
1300
1301 mutex_lock(&o2.base);
1302 o2.ctx = &t2;
1303 mutex_release(&o2.base.dep_map, 1, _THIS_IP_);
1304
1305 WWAI(&t);
1306 t2 = t;
1307 t2.stamp--;
1308
1309 ret = WWL(&o, &t);
1310 WARN_ON(ret);
1311
1312 ret = WWL(&o2, &t);
1313 WARN_ON(ret != -EDEADLK);
1314
1315 o2.ctx = NULL;
1316 mutex_acquire(&o2.base.dep_map, 0, 1, _THIS_IP_);
1317 mutex_unlock(&o2.base);
1318 WWU(&o);
1319
1320 WWL(&o2, &t);
1321}
1322
1323static void ww_test_edeadlk_normal_slow(void)
1324{
1325 int ret;
1326
1327 mutex_lock(&o2.base);
1328 mutex_release(&o2.base.dep_map, 1, _THIS_IP_);
1329 o2.ctx = &t2;
1330
1331 WWAI(&t);
1332 t2 = t;
1333 t2.stamp--;
1334
1335 ret = WWL(&o, &t);
1336 WARN_ON(ret);
1337
1338 ret = WWL(&o2, &t);
1339 WARN_ON(ret != -EDEADLK);
1340
1341 o2.ctx = NULL;
1342 mutex_acquire(&o2.base.dep_map, 0, 1, _THIS_IP_);
1343 mutex_unlock(&o2.base);
1344 WWU(&o);
1345
1346 ww_mutex_lock_slow(&o2, &t);
1347}
1348
1349static void ww_test_edeadlk_no_unlock(void)
1350{
1351 int ret;
1352
1353 mutex_lock(&o2.base);
1354 o2.ctx = &t2;
1355 mutex_release(&o2.base.dep_map, 1, _THIS_IP_);
1356
1357 WWAI(&t);
1358 t2 = t;
1359 t2.stamp--;
1360
1361 ret = WWL(&o, &t);
1362 WARN_ON(ret);
1363
1364 ret = WWL(&o2, &t);
1365 WARN_ON(ret != -EDEADLK);
1366
1367 o2.ctx = NULL;
1368 mutex_acquire(&o2.base.dep_map, 0, 1, _THIS_IP_);
1369 mutex_unlock(&o2.base);
1370
1371 WWL(&o2, &t);
1372}
1373
1374static void ww_test_edeadlk_no_unlock_slow(void)
1375{
1376 int ret;
1377
1378 mutex_lock(&o2.base);
1379 mutex_release(&o2.base.dep_map, 1, _THIS_IP_);
1380 o2.ctx = &t2;
1381
1382 WWAI(&t);
1383 t2 = t;
1384 t2.stamp--;
1385
1386 ret = WWL(&o, &t);
1387 WARN_ON(ret);
1388
1389 ret = WWL(&o2, &t);
1390 WARN_ON(ret != -EDEADLK);
1391
1392 o2.ctx = NULL;
1393 mutex_acquire(&o2.base.dep_map, 0, 1, _THIS_IP_);
1394 mutex_unlock(&o2.base);
1395
1396 ww_mutex_lock_slow(&o2, &t);
1397}
1398
1399static void ww_test_edeadlk_acquire_more(void)
1400{
1401 int ret;
1402
1403 mutex_lock(&o2.base);
1404 mutex_release(&o2.base.dep_map, 1, _THIS_IP_);
1405 o2.ctx = &t2;
1406
1407 WWAI(&t);
1408 t2 = t;
1409 t2.stamp--;
1410
1411 ret = WWL(&o, &t);
1412 WARN_ON(ret);
1413
1414 ret = WWL(&o2, &t);
1415 WARN_ON(ret != -EDEADLK);
1416
1417 ret = WWL(&o3, &t);
1418}
1419
1420static void ww_test_edeadlk_acquire_more_slow(void)
1421{
1422 int ret;
1423
1424 mutex_lock(&o2.base);
1425 mutex_release(&o2.base.dep_map, 1, _THIS_IP_);
1426 o2.ctx = &t2;
1427
1428 WWAI(&t);
1429 t2 = t;
1430 t2.stamp--;
1431
1432 ret = WWL(&o, &t);
1433 WARN_ON(ret);
1434
1435 ret = WWL(&o2, &t);
1436 WARN_ON(ret != -EDEADLK);
1437
1438 ww_mutex_lock_slow(&o3, &t);
1439}
1440
1441static void ww_test_edeadlk_acquire_more_edeadlk(void)
1442{
1443 int ret;
1444
1445 mutex_lock(&o2.base);
1446 mutex_release(&o2.base.dep_map, 1, _THIS_IP_);
1447 o2.ctx = &t2;
1448
1449 mutex_lock(&o3.base);
1450 mutex_release(&o3.base.dep_map, 1, _THIS_IP_);
1451 o3.ctx = &t2;
1452
1453 WWAI(&t);
1454 t2 = t;
1455 t2.stamp--;
1456
1457 ret = WWL(&o, &t);
1458 WARN_ON(ret);
1459
1460 ret = WWL(&o2, &t);
1461 WARN_ON(ret != -EDEADLK);
1462
1463 ret = WWL(&o3, &t);
1464 WARN_ON(ret != -EDEADLK);
1465}
1466
1467static void ww_test_edeadlk_acquire_more_edeadlk_slow(void)
1468{
1469 int ret;
1470
1471 mutex_lock(&o2.base);
1472 mutex_release(&o2.base.dep_map, 1, _THIS_IP_);
1473 o2.ctx = &t2;
1474
1475 mutex_lock(&o3.base);
1476 mutex_release(&o3.base.dep_map, 1, _THIS_IP_);
1477 o3.ctx = &t2;
1478
1479 WWAI(&t);
1480 t2 = t;
1481 t2.stamp--;
1482
1483 ret = WWL(&o, &t);
1484 WARN_ON(ret);
1485
1486 ret = WWL(&o2, &t);
1487 WARN_ON(ret != -EDEADLK);
1488
1489 ww_mutex_lock_slow(&o3, &t);
1490}
1491
1492static void ww_test_edeadlk_acquire_wrong(void)
1493{
1494 int ret;
1495
1496 mutex_lock(&o2.base);
1497 mutex_release(&o2.base.dep_map, 1, _THIS_IP_);
1498 o2.ctx = &t2;
1499
1500 WWAI(&t);
1501 t2 = t;
1502 t2.stamp--;
1503
1504 ret = WWL(&o, &t);
1505 WARN_ON(ret);
1506
1507 ret = WWL(&o2, &t);
1508 WARN_ON(ret != -EDEADLK);
1509 if (!ret)
1510 WWU(&o2);
1511
1512 WWU(&o);
1513
1514 ret = WWL(&o3, &t);
1515}
1516
1517static void ww_test_edeadlk_acquire_wrong_slow(void)
1518{
1519 int ret;
1520
1521 mutex_lock(&o2.base);
1522 mutex_release(&o2.base.dep_map, 1, _THIS_IP_);
1523 o2.ctx = &t2;
1524
1525 WWAI(&t);
1526 t2 = t;
1527 t2.stamp--;
1528
1529 ret = WWL(&o, &t);
1530 WARN_ON(ret);
1531
1532 ret = WWL(&o2, &t);
1533 WARN_ON(ret != -EDEADLK);
1534 if (!ret)
1535 WWU(&o2);
1536
1537 WWU(&o);
1538
1539 ww_mutex_lock_slow(&o3, &t);
1540}
1541
1542static void ww_test_spin_nest_unlocked(void)
1543{
1544 raw_spin_lock_nest_lock(&lock_A, &o.base);
1545 U(A);
1546}
1547
1548static void ww_test_unneeded_slow(void)
1549{
1550 WWAI(&t);
1551
1552 ww_mutex_lock_slow(&o, &t);
1553}
1554
1555static void ww_test_context_block(void)
1556{
1557 int ret;
1558
1559 WWAI(&t);
1560
1561 ret = WWL(&o, &t);
1562 WARN_ON(ret);
1563 WWL1(&o2);
1564}
1565
1566static void ww_test_context_try(void)
1567{
1568 int ret;
1569
1570 WWAI(&t);
1571
1572 ret = WWL(&o, &t);
1573 WARN_ON(ret);
1574
1575 ret = WWT(&o2);
1576 WARN_ON(!ret);
1577 WWU(&o2);
1578 WWU(&o);
1579}
1580
1581static void ww_test_context_context(void)
1582{
1583 int ret;
1584
1585 WWAI(&t);
1586
1587 ret = WWL(&o, &t);
1588 WARN_ON(ret);
1589
1590 ret = WWL(&o2, &t);
1591 WARN_ON(ret);
1592
1593 WWU(&o2);
1594 WWU(&o);
1595}
1596
1597static void ww_test_try_block(void)
1598{
1599 bool ret;
1600
1601 ret = WWT(&o);
1602 WARN_ON(!ret);
1603
1604 WWL1(&o2);
1605 WWU(&o2);
1606 WWU(&o);
1607}
1608
1609static void ww_test_try_try(void)
1610{
1611 bool ret;
1612
1613 ret = WWT(&o);
1614 WARN_ON(!ret);
1615 ret = WWT(&o2);
1616 WARN_ON(!ret);
1617 WWU(&o2);
1618 WWU(&o);
1619}
1620
1621static void ww_test_try_context(void)
1622{
1623 int ret;
1624
1625 ret = WWT(&o);
1626 WARN_ON(!ret);
1627
1628 WWAI(&t);
1629
1630 ret = WWL(&o2, &t);
1631 WARN_ON(ret);
1632}
1633
1634static void ww_test_block_block(void)
1635{
1636 WWL1(&o);
1637 WWL1(&o2);
1638}
1639
1640static void ww_test_block_try(void)
1641{
1642 bool ret;
1643
1644 WWL1(&o);
1645 ret = WWT(&o2);
1646 WARN_ON(!ret);
1647}
1648
1649static void ww_test_block_context(void)
1650{
1651 int ret;
1652
1653 WWL1(&o);
1654 WWAI(&t);
1655
1656 ret = WWL(&o2, &t);
1657 WARN_ON(ret);
1658}
1659
1660static void ww_test_spin_block(void)
1661{
1662 L(A);
1663 U(A);
1664
1665 WWL1(&o);
1666 L(A);
1667 U(A);
1668 WWU(&o);
1669
1670 L(A);
1671 WWL1(&o);
1672 WWU(&o);
1673 U(A);
1674}
1675
1676static void ww_test_spin_try(void)
1677{
1678 bool ret;
1679
1680 L(A);
1681 U(A);
1682
1683 ret = WWT(&o);
1684 WARN_ON(!ret);
1685 L(A);
1686 U(A);
1687 WWU(&o);
1688
1689 L(A);
1690 ret = WWT(&o);
1691 WARN_ON(!ret);
1692 WWU(&o);
1693 U(A);
1694}
1695
1696static void ww_test_spin_context(void)
1697{
1698 int ret;
1699
1700 L(A);
1701 U(A);
1702
1703 WWAI(&t);
1704
1705 ret = WWL(&o, &t);
1706 WARN_ON(ret);
1707 L(A);
1708 U(A);
1709 WWU(&o);
1710
1711 L(A);
1712 ret = WWL(&o, &t);
1713 WARN_ON(ret);
1714 WWU(&o);
1715 U(A);
1716}
1717
1718static void ww_tests(void)
1719{
1720 printk(" --------------------------------------------------------------------------\n");
1721 printk(" | Wound/wait tests |\n");
1722 printk(" ---------------------\n");
1723
1724 print_testname("ww api failures");
1725 dotest(ww_test_fail_acquire, SUCCESS, LOCKTYPE_WW);
1726 dotest(ww_test_normal, SUCCESS, LOCKTYPE_WW);
1727 dotest(ww_test_unneeded_slow, FAILURE, LOCKTYPE_WW);
1728 printk("\n");
1729
1730 print_testname("ww contexts mixing");
1731 dotest(ww_test_two_contexts, FAILURE, LOCKTYPE_WW);
1732 dotest(ww_test_diff_class, FAILURE, LOCKTYPE_WW);
1733 printk("\n");
1734
1735 print_testname("finishing ww context");
1736 dotest(ww_test_context_done_twice, FAILURE, LOCKTYPE_WW);
1737 dotest(ww_test_context_unlock_twice, FAILURE, LOCKTYPE_WW);
1738 dotest(ww_test_context_fini_early, FAILURE, LOCKTYPE_WW);
1739 dotest(ww_test_context_lock_after_done, FAILURE, LOCKTYPE_WW);
1740 printk("\n");
1741
1742 print_testname("locking mismatches");
1743 dotest(ww_test_object_unlock_twice, FAILURE, LOCKTYPE_WW);
1744 dotest(ww_test_object_lock_unbalanced, FAILURE, LOCKTYPE_WW);
1745 dotest(ww_test_object_lock_stale_context, FAILURE, LOCKTYPE_WW);
1746 printk("\n");
1747
1748 print_testname("EDEADLK handling");
1749 dotest(ww_test_edeadlk_normal, SUCCESS, LOCKTYPE_WW);
1750 dotest(ww_test_edeadlk_normal_slow, SUCCESS, LOCKTYPE_WW);
1751 dotest(ww_test_edeadlk_no_unlock, FAILURE, LOCKTYPE_WW);
1752 dotest(ww_test_edeadlk_no_unlock_slow, FAILURE, LOCKTYPE_WW);
1753 dotest(ww_test_edeadlk_acquire_more, FAILURE, LOCKTYPE_WW);
1754 dotest(ww_test_edeadlk_acquire_more_slow, FAILURE, LOCKTYPE_WW);
1755 dotest(ww_test_edeadlk_acquire_more_edeadlk, FAILURE, LOCKTYPE_WW);
1756 dotest(ww_test_edeadlk_acquire_more_edeadlk_slow, FAILURE, LOCKTYPE_WW);
1757 dotest(ww_test_edeadlk_acquire_wrong, FAILURE, LOCKTYPE_WW);
1758 dotest(ww_test_edeadlk_acquire_wrong_slow, FAILURE, LOCKTYPE_WW);
1759 printk("\n");
1760
1761 print_testname("spinlock nest unlocked");
1762 dotest(ww_test_spin_nest_unlocked, FAILURE, LOCKTYPE_WW);
1763 printk("\n");
1764
1765 printk(" -----------------------------------------------------\n");
1766 printk(" |block | try |context|\n");
1767 printk(" -----------------------------------------------------\n");
1768
1769 print_testname("context");
1770 dotest(ww_test_context_block, FAILURE, LOCKTYPE_WW);
1771 dotest(ww_test_context_try, SUCCESS, LOCKTYPE_WW);
1772 dotest(ww_test_context_context, SUCCESS, LOCKTYPE_WW);
1773 printk("\n");
1774
1775 print_testname("try");
1776 dotest(ww_test_try_block, FAILURE, LOCKTYPE_WW);
1777 dotest(ww_test_try_try, SUCCESS, LOCKTYPE_WW);
1778 dotest(ww_test_try_context, FAILURE, LOCKTYPE_WW);
1779 printk("\n");
1780
1781 print_testname("block");
1782 dotest(ww_test_block_block, FAILURE, LOCKTYPE_WW);
1783 dotest(ww_test_block_try, SUCCESS, LOCKTYPE_WW);
1784 dotest(ww_test_block_context, FAILURE, LOCKTYPE_WW);
1785 printk("\n");
1786
1787 print_testname("spinlock");
1788 dotest(ww_test_spin_block, FAILURE, LOCKTYPE_WW);
1789 dotest(ww_test_spin_try, SUCCESS, LOCKTYPE_WW);
1790 dotest(ww_test_spin_context, FAILURE, LOCKTYPE_WW);
1791 printk("\n");
1792}
1111 1793
1112void locking_selftest(void) 1794void locking_selftest(void)
1113{ 1795{
@@ -1188,6 +1870,8 @@ void locking_selftest(void)
1188 DO_TESTCASE_6x2("irq read-recursion", irq_read_recursion); 1870 DO_TESTCASE_6x2("irq read-recursion", irq_read_recursion);
1189// DO_TESTCASE_6x2B("irq read-recursion #2", irq_read_recursion2); 1871// DO_TESTCASE_6x2B("irq read-recursion #2", irq_read_recursion2);
1190 1872
1873 ww_tests();
1874
1191 if (unexpected_testcase_failures) { 1875 if (unexpected_testcase_failures) {
1192 printk("-----------------------------------------------------------------\n"); 1876 printk("-----------------------------------------------------------------\n");
1193 debug_locks = 0; 1877 debug_locks = 0;
diff --git a/lib/net_utils.c b/lib/net_utils.c
new file mode 100644
index 000000000000..2e3c52c8d050
--- /dev/null
+++ b/lib/net_utils.c
@@ -0,0 +1,26 @@
1#include <linux/string.h>
2#include <linux/if_ether.h>
3#include <linux/ctype.h>
4#include <linux/kernel.h>
5
6int mac_pton(const char *s, u8 *mac)
7{
8 int i;
9
10 /* XX:XX:XX:XX:XX:XX */
11 if (strlen(s) < 3 * ETH_ALEN - 1)
12 return 0;
13
14 /* Don't dirty result unless string is valid MAC. */
15 for (i = 0; i < ETH_ALEN; i++) {
16 if (!isxdigit(s[i * 3]) || !isxdigit(s[i * 3 + 1]))
17 return 0;
18 if (i != ETH_ALEN - 1 && s[i * 3 + 2] != ':')
19 return 0;
20 }
21 for (i = 0; i < ETH_ALEN; i++) {
22 mac[i] = (hex_to_bin(s[i * 3]) << 4) | hex_to_bin(s[i * 3 + 1]);
23 }
24 return 1;
25}
26EXPORT_SYMBOL(mac_pton);
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
new file mode 100644
index 000000000000..7deeb6297a48
--- /dev/null
+++ b/lib/percpu-refcount.c
@@ -0,0 +1,158 @@
1#define pr_fmt(fmt) "%s: " fmt "\n", __func__
2
3#include <linux/kernel.h>
4#include <linux/percpu-refcount.h>
5
6/*
7 * Initially, a percpu refcount is just a set of percpu counters. Initially, we
8 * don't try to detect the ref hitting 0 - which means that get/put can just
9 * increment or decrement the local counter. Note that the counter on a
10 * particular cpu can (and will) wrap - this is fine, when we go to shutdown the
11 * percpu counters will all sum to the correct value
12 *
13 * (More precisely: because moduler arithmatic is commutative the sum of all the
14 * pcpu_count vars will be equal to what it would have been if all the gets and
15 * puts were done to a single integer, even if some of the percpu integers
16 * overflow or underflow).
17 *
18 * The real trick to implementing percpu refcounts is shutdown. We can't detect
19 * the ref hitting 0 on every put - this would require global synchronization
20 * and defeat the whole purpose of using percpu refs.
21 *
22 * What we do is require the user to keep track of the initial refcount; we know
23 * the ref can't hit 0 before the user drops the initial ref, so as long as we
24 * convert to non percpu mode before the initial ref is dropped everything
25 * works.
26 *
27 * Converting to non percpu mode is done with some RCUish stuff in
28 * percpu_ref_kill. Additionally, we need a bias value so that the atomic_t
29 * can't hit 0 before we've added up all the percpu refs.
30 */
31
32#define PCPU_COUNT_BIAS (1U << 31)
33
34/**
35 * percpu_ref_init - initialize a percpu refcount
36 * @ref: percpu_ref to initialize
37 * @release: function which will be called when refcount hits 0
38 *
39 * Initializes the refcount in single atomic counter mode with a refcount of 1;
40 * analagous to atomic_set(ref, 1).
41 *
42 * Note that @release must not sleep - it may potentially be called from RCU
43 * callback context by percpu_ref_kill().
44 */
45int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release)
46{
47 atomic_set(&ref->count, 1 + PCPU_COUNT_BIAS);
48
49 ref->pcpu_count = alloc_percpu(unsigned);
50 if (!ref->pcpu_count)
51 return -ENOMEM;
52
53 ref->release = release;
54 return 0;
55}
56
57/**
58 * percpu_ref_cancel_init - cancel percpu_ref_init()
59 * @ref: percpu_ref to cancel init for
60 *
61 * Once a percpu_ref is initialized, its destruction is initiated by
62 * percpu_ref_kill() and completes asynchronously, which can be painful to
63 * do when destroying a half-constructed object in init failure path.
64 *
65 * This function destroys @ref without invoking @ref->release and the
66 * memory area containing it can be freed immediately on return. To
67 * prevent accidental misuse, it's required that @ref has finished
68 * percpu_ref_init(), whether successful or not, but never used.
69 *
70 * The weird name and usage restriction are to prevent people from using
71 * this function by mistake for normal shutdown instead of
72 * percpu_ref_kill().
73 */
74void percpu_ref_cancel_init(struct percpu_ref *ref)
75{
76 unsigned __percpu *pcpu_count = ref->pcpu_count;
77 int cpu;
78
79 WARN_ON_ONCE(atomic_read(&ref->count) != 1 + PCPU_COUNT_BIAS);
80
81 if (pcpu_count) {
82 for_each_possible_cpu(cpu)
83 WARN_ON_ONCE(*per_cpu_ptr(pcpu_count, cpu));
84 free_percpu(ref->pcpu_count);
85 }
86}
87
88static void percpu_ref_kill_rcu(struct rcu_head *rcu)
89{
90 struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu);
91 unsigned __percpu *pcpu_count = ref->pcpu_count;
92 unsigned count = 0;
93 int cpu;
94
95 /* Mask out PCPU_REF_DEAD */
96 pcpu_count = (unsigned __percpu *)
97 (((unsigned long) pcpu_count) & ~PCPU_STATUS_MASK);
98
99 for_each_possible_cpu(cpu)
100 count += *per_cpu_ptr(pcpu_count, cpu);
101
102 free_percpu(pcpu_count);
103
104 pr_debug("global %i pcpu %i", atomic_read(&ref->count), (int) count);
105
106 /*
107 * It's crucial that we sum the percpu counters _before_ adding the sum
108 * to &ref->count; since gets could be happening on one cpu while puts
109 * happen on another, adding a single cpu's count could cause
110 * @ref->count to hit 0 before we've got a consistent value - but the
111 * sum of all the counts will be consistent and correct.
112 *
113 * Subtracting the bias value then has to happen _after_ adding count to
114 * &ref->count; we need the bias value to prevent &ref->count from
115 * reaching 0 before we add the percpu counts. But doing it at the same
116 * time is equivalent and saves us atomic operations:
117 */
118
119 atomic_add((int) count - PCPU_COUNT_BIAS, &ref->count);
120
121 /* @ref is viewed as dead on all CPUs, send out kill confirmation */
122 if (ref->confirm_kill)
123 ref->confirm_kill(ref);
124
125 /*
126 * Now we're in single atomic_t mode with a consistent refcount, so it's
127 * safe to drop our initial ref:
128 */
129 percpu_ref_put(ref);
130}
131
132/**
133 * percpu_ref_kill_and_confirm - drop the initial ref and schedule confirmation
134 * @ref: percpu_ref to kill
135 * @confirm_kill: optional confirmation callback
136 *
137 * Equivalent to percpu_ref_kill() but also schedules kill confirmation if
138 * @confirm_kill is not NULL. @confirm_kill, which may not block, will be
139 * called after @ref is seen as dead from all CPUs - all further
140 * invocations of percpu_ref_tryget() will fail. See percpu_ref_tryget()
141 * for more details.
142 *
143 * Due to the way percpu_ref is implemented, @confirm_kill will be called
144 * after at least one full RCU grace period has passed but this is an
145 * implementation detail and callers must not depend on it.
146 */
147void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
148 percpu_ref_func_t *confirm_kill)
149{
150 WARN_ONCE(REF_STATUS(ref->pcpu_count) == PCPU_REF_DEAD,
151 "percpu_ref_kill() called more than once!\n");
152
153 ref->pcpu_count = (unsigned __percpu *)
154 (((unsigned long) ref->pcpu_count)|PCPU_REF_DEAD);
155 ref->confirm_kill = confirm_kill;
156
157 call_rcu_sched(&ref->rcu, percpu_ref_kill_rcu);
158}