diff options
Diffstat (limited to 'lib')
41 files changed, 2618 insertions, 468 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index 0c8b78a9ae2e..6762529ad9e4 100644 --- a/lib/Kconfig +++ b/lib/Kconfig | |||
| @@ -158,6 +158,14 @@ config CRC32_BIT | |||
| 158 | 158 | ||
| 159 | endchoice | 159 | endchoice |
| 160 | 160 | ||
| 161 | config CRC4 | ||
| 162 | tristate "CRC4 functions" | ||
| 163 | help | ||
| 164 | This option is provided for the case where no in-kernel-tree | ||
| 165 | modules require CRC4 functions, but a module built outside | ||
| 166 | the kernel tree does. Such modules that use library CRC4 | ||
| 167 | functions require M here. | ||
| 168 | |||
| 161 | config CRC7 | 169 | config CRC7 |
| 162 | tristate "CRC7 functions" | 170 | tristate "CRC7 functions" |
| 163 | help | 171 | help |
| @@ -548,6 +556,9 @@ config ARCH_HAS_SG_CHAIN | |||
| 548 | config ARCH_HAS_PMEM_API | 556 | config ARCH_HAS_PMEM_API |
| 549 | bool | 557 | bool |
| 550 | 558 | ||
| 559 | config ARCH_HAS_UACCESS_FLUSHCACHE | ||
| 560 | bool | ||
| 561 | |||
| 551 | config ARCH_HAS_MMIO_FLUSH | 562 | config ARCH_HAS_MMIO_FLUSH |
| 552 | bool | 563 | bool |
| 553 | 564 | ||
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index e4587ebe52c7..789c6e9e5e01 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
| @@ -286,7 +286,7 @@ config DEBUG_FS | |||
| 286 | write to these files. | 286 | write to these files. |
| 287 | 287 | ||
| 288 | For detailed documentation on the debugfs API, see | 288 | For detailed documentation on the debugfs API, see |
| 289 | Documentation/DocBook/filesystems. | 289 | Documentation/filesystems/. |
| 290 | 290 | ||
| 291 | If unsure, say N. | 291 | If unsure, say N. |
| 292 | 292 | ||
| @@ -778,34 +778,45 @@ config DEBUG_SHIRQ | |||
| 778 | menu "Debug Lockups and Hangs" | 778 | menu "Debug Lockups and Hangs" |
| 779 | 779 | ||
| 780 | config LOCKUP_DETECTOR | 780 | config LOCKUP_DETECTOR |
| 781 | bool "Detect Hard and Soft Lockups" | 781 | bool |
| 782 | |||
| 783 | config SOFTLOCKUP_DETECTOR | ||
| 784 | bool "Detect Soft Lockups" | ||
| 782 | depends on DEBUG_KERNEL && !S390 | 785 | depends on DEBUG_KERNEL && !S390 |
| 786 | select LOCKUP_DETECTOR | ||
| 783 | help | 787 | help |
| 784 | Say Y here to enable the kernel to act as a watchdog to detect | 788 | Say Y here to enable the kernel to act as a watchdog to detect |
| 785 | hard and soft lockups. | 789 | soft lockups. |
| 786 | 790 | ||
| 787 | Softlockups are bugs that cause the kernel to loop in kernel | 791 | Softlockups are bugs that cause the kernel to loop in kernel |
| 788 | mode for more than 20 seconds, without giving other tasks a | 792 | mode for more than 20 seconds, without giving other tasks a |
| 789 | chance to run. The current stack trace is displayed upon | 793 | chance to run. The current stack trace is displayed upon |
| 790 | detection and the system will stay locked up. | 794 | detection and the system will stay locked up. |
| 791 | 795 | ||
| 796 | config HARDLOCKUP_DETECTOR_PERF | ||
| 797 | bool | ||
| 798 | select SOFTLOCKUP_DETECTOR | ||
| 799 | |||
| 800 | # | ||
| 801 | # arch/ can define HAVE_HARDLOCKUP_DETECTOR_ARCH to provide their own hard | ||
| 802 | # lockup detector rather than the perf based detector. | ||
| 803 | # | ||
| 804 | config HARDLOCKUP_DETECTOR | ||
| 805 | bool "Detect Hard Lockups" | ||
| 806 | depends on DEBUG_KERNEL && !S390 | ||
| 807 | depends on HAVE_HARDLOCKUP_DETECTOR_PERF || HAVE_HARDLOCKUP_DETECTOR_ARCH | ||
| 808 | select LOCKUP_DETECTOR | ||
| 809 | select HARDLOCKUP_DETECTOR_PERF if HAVE_HARDLOCKUP_DETECTOR_PERF | ||
| 810 | select HARDLOCKUP_DETECTOR_ARCH if HAVE_HARDLOCKUP_DETECTOR_ARCH | ||
| 811 | help | ||
| 812 | Say Y here to enable the kernel to act as a watchdog to detect | ||
| 813 | hard lockups. | ||
| 814 | |||
| 792 | Hardlockups are bugs that cause the CPU to loop in kernel mode | 815 | Hardlockups are bugs that cause the CPU to loop in kernel mode |
| 793 | for more than 10 seconds, without letting other interrupts have a | 816 | for more than 10 seconds, without letting other interrupts have a |
| 794 | chance to run. The current stack trace is displayed upon detection | 817 | chance to run. The current stack trace is displayed upon detection |
| 795 | and the system will stay locked up. | 818 | and the system will stay locked up. |
| 796 | 819 | ||
| 797 | The overhead should be minimal. A periodic hrtimer runs to | ||
| 798 | generate interrupts and kick the watchdog task every 4 seconds. | ||
| 799 | An NMI is generated every 10 seconds or so to check for hardlockups. | ||
| 800 | |||
| 801 | The frequency of hrtimer and NMI events and the soft and hard lockup | ||
| 802 | thresholds can be controlled through the sysctl watchdog_thresh. | ||
| 803 | |||
| 804 | config HARDLOCKUP_DETECTOR | ||
| 805 | def_bool y | ||
| 806 | depends on LOCKUP_DETECTOR && !HAVE_NMI_WATCHDOG | ||
| 807 | depends on PERF_EVENTS && HAVE_PERF_EVENTS_NMI | ||
| 808 | |||
| 809 | config BOOTPARAM_HARDLOCKUP_PANIC | 820 | config BOOTPARAM_HARDLOCKUP_PANIC |
| 810 | bool "Panic (Reboot) On Hard Lockups" | 821 | bool "Panic (Reboot) On Hard Lockups" |
| 811 | depends on HARDLOCKUP_DETECTOR | 822 | depends on HARDLOCKUP_DETECTOR |
| @@ -826,7 +837,7 @@ config BOOTPARAM_HARDLOCKUP_PANIC_VALUE | |||
| 826 | 837 | ||
| 827 | config BOOTPARAM_SOFTLOCKUP_PANIC | 838 | config BOOTPARAM_SOFTLOCKUP_PANIC |
| 828 | bool "Panic (Reboot) On Soft Lockups" | 839 | bool "Panic (Reboot) On Soft Lockups" |
| 829 | depends on LOCKUP_DETECTOR | 840 | depends on SOFTLOCKUP_DETECTOR |
| 830 | help | 841 | help |
| 831 | Say Y here to enable the kernel to panic on "soft lockups", | 842 | Say Y here to enable the kernel to panic on "soft lockups", |
| 832 | which are bugs that cause the kernel to loop in kernel | 843 | which are bugs that cause the kernel to loop in kernel |
| @@ -843,7 +854,7 @@ config BOOTPARAM_SOFTLOCKUP_PANIC | |||
| 843 | 854 | ||
| 844 | config BOOTPARAM_SOFTLOCKUP_PANIC_VALUE | 855 | config BOOTPARAM_SOFTLOCKUP_PANIC_VALUE |
| 845 | int | 856 | int |
| 846 | depends on LOCKUP_DETECTOR | 857 | depends on SOFTLOCKUP_DETECTOR |
| 847 | range 0 1 | 858 | range 0 1 |
| 848 | default 0 if !BOOTPARAM_SOFTLOCKUP_PANIC | 859 | default 0 if !BOOTPARAM_SOFTLOCKUP_PANIC |
| 849 | default 1 if BOOTPARAM_SOFTLOCKUP_PANIC | 860 | default 1 if BOOTPARAM_SOFTLOCKUP_PANIC |
| @@ -851,7 +862,7 @@ config BOOTPARAM_SOFTLOCKUP_PANIC_VALUE | |||
| 851 | config DETECT_HUNG_TASK | 862 | config DETECT_HUNG_TASK |
| 852 | bool "Detect Hung Tasks" | 863 | bool "Detect Hung Tasks" |
| 853 | depends on DEBUG_KERNEL | 864 | depends on DEBUG_KERNEL |
| 854 | default LOCKUP_DETECTOR | 865 | default SOFTLOCKUP_DETECTOR |
| 855 | help | 866 | help |
| 856 | Say Y here to enable the kernel to detect "hung tasks", | 867 | Say Y here to enable the kernel to detect "hung tasks", |
| 857 | which are bugs that cause the task to be stuck in | 868 | which are bugs that cause the task to be stuck in |
| @@ -1052,6 +1063,7 @@ config DEBUG_LOCK_ALLOC | |||
| 1052 | depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT | 1063 | depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT |
| 1053 | select DEBUG_SPINLOCK | 1064 | select DEBUG_SPINLOCK |
| 1054 | select DEBUG_MUTEXES | 1065 | select DEBUG_MUTEXES |
| 1066 | select DEBUG_RT_MUTEXES if RT_MUTEXES | ||
| 1055 | select LOCKDEP | 1067 | select LOCKDEP |
| 1056 | help | 1068 | help |
| 1057 | This feature will check whether any held lock (spinlock, rwlock, | 1069 | This feature will check whether any held lock (spinlock, rwlock, |
| @@ -1067,6 +1079,7 @@ config PROVE_LOCKING | |||
| 1067 | select LOCKDEP | 1079 | select LOCKDEP |
| 1068 | select DEBUG_SPINLOCK | 1080 | select DEBUG_SPINLOCK |
| 1069 | select DEBUG_MUTEXES | 1081 | select DEBUG_MUTEXES |
| 1082 | select DEBUG_RT_MUTEXES if RT_MUTEXES | ||
| 1070 | select DEBUG_LOCK_ALLOC | 1083 | select DEBUG_LOCK_ALLOC |
| 1071 | select TRACE_IRQFLAGS | 1084 | select TRACE_IRQFLAGS |
| 1072 | default n | 1085 | default n |
| @@ -1121,6 +1134,7 @@ config LOCK_STAT | |||
| 1121 | select LOCKDEP | 1134 | select LOCKDEP |
| 1122 | select DEBUG_SPINLOCK | 1135 | select DEBUG_SPINLOCK |
| 1123 | select DEBUG_MUTEXES | 1136 | select DEBUG_MUTEXES |
| 1137 | select DEBUG_RT_MUTEXES if RT_MUTEXES | ||
| 1124 | select DEBUG_LOCK_ALLOC | 1138 | select DEBUG_LOCK_ALLOC |
| 1125 | default n | 1139 | default n |
| 1126 | help | 1140 | help |
| @@ -1301,189 +1315,7 @@ config DEBUG_CREDENTIALS | |||
| 1301 | 1315 | ||
| 1302 | If unsure, say N. | 1316 | If unsure, say N. |
| 1303 | 1317 | ||
| 1304 | menu "RCU Debugging" | 1318 | source "kernel/rcu/Kconfig.debug" |
| 1305 | |||
| 1306 | config PROVE_RCU | ||
| 1307 | def_bool PROVE_LOCKING | ||
| 1308 | |||
| 1309 | config PROVE_RCU_REPEATEDLY | ||
| 1310 | bool "RCU debugging: don't disable PROVE_RCU on first splat" | ||
| 1311 | depends on PROVE_RCU | ||
| 1312 | default n | ||
| 1313 | help | ||
| 1314 | By itself, PROVE_RCU will disable checking upon issuing the | ||
| 1315 | first warning (or "splat"). This feature prevents such | ||
| 1316 | disabling, allowing multiple RCU-lockdep warnings to be printed | ||
| 1317 | on a single reboot. | ||
| 1318 | |||
| 1319 | Say Y to allow multiple RCU-lockdep warnings per boot. | ||
| 1320 | |||
| 1321 | Say N if you are unsure. | ||
| 1322 | |||
| 1323 | config SPARSE_RCU_POINTER | ||
| 1324 | bool "RCU debugging: sparse-based checks for pointer usage" | ||
| 1325 | default n | ||
| 1326 | help | ||
| 1327 | This feature enables the __rcu sparse annotation for | ||
| 1328 | RCU-protected pointers. This annotation will cause sparse | ||
| 1329 | to flag any non-RCU used of annotated pointers. This can be | ||
| 1330 | helpful when debugging RCU usage. Please note that this feature | ||
| 1331 | is not intended to enforce code cleanliness; it is instead merely | ||
| 1332 | a debugging aid. | ||
| 1333 | |||
| 1334 | Say Y to make sparse flag questionable use of RCU-protected pointers | ||
| 1335 | |||
| 1336 | Say N if you are unsure. | ||
| 1337 | |||
| 1338 | config TORTURE_TEST | ||
| 1339 | tristate | ||
| 1340 | default n | ||
| 1341 | |||
| 1342 | config RCU_PERF_TEST | ||
| 1343 | tristate "performance tests for RCU" | ||
| 1344 | depends on DEBUG_KERNEL | ||
| 1345 | select TORTURE_TEST | ||
| 1346 | select SRCU | ||
| 1347 | select TASKS_RCU | ||
| 1348 | default n | ||
| 1349 | help | ||
| 1350 | This option provides a kernel module that runs performance | ||
| 1351 | tests on the RCU infrastructure. The kernel module may be built | ||
| 1352 | after the fact on the running kernel to be tested, if desired. | ||
| 1353 | |||
| 1354 | Say Y here if you want RCU performance tests to be built into | ||
| 1355 | the kernel. | ||
| 1356 | Say M if you want the RCU performance tests to build as a module. | ||
| 1357 | Say N if you are unsure. | ||
| 1358 | |||
| 1359 | config RCU_TORTURE_TEST | ||
| 1360 | tristate "torture tests for RCU" | ||
| 1361 | depends on DEBUG_KERNEL | ||
| 1362 | select TORTURE_TEST | ||
| 1363 | select SRCU | ||
| 1364 | select TASKS_RCU | ||
| 1365 | default n | ||
| 1366 | help | ||
| 1367 | This option provides a kernel module that runs torture tests | ||
| 1368 | on the RCU infrastructure. The kernel module may be built | ||
| 1369 | after the fact on the running kernel to be tested, if desired. | ||
| 1370 | |||
| 1371 | Say Y here if you want RCU torture tests to be built into | ||
| 1372 | the kernel. | ||
| 1373 | Say M if you want the RCU torture tests to build as a module. | ||
| 1374 | Say N if you are unsure. | ||
| 1375 | |||
| 1376 | config RCU_TORTURE_TEST_SLOW_PREINIT | ||
| 1377 | bool "Slow down RCU grace-period pre-initialization to expose races" | ||
| 1378 | depends on RCU_TORTURE_TEST | ||
| 1379 | help | ||
| 1380 | This option delays grace-period pre-initialization (the | ||
| 1381 | propagation of CPU-hotplug changes up the rcu_node combining | ||
| 1382 | tree) for a few jiffies between initializing each pair of | ||
| 1383 | consecutive rcu_node structures. This helps to expose races | ||
| 1384 | involving grace-period pre-initialization, in other words, it | ||
| 1385 | makes your kernel less stable. It can also greatly increase | ||
| 1386 | grace-period latency, especially on systems with large numbers | ||
| 1387 | of CPUs. This is useful when torture-testing RCU, but in | ||
| 1388 | almost no other circumstance. | ||
| 1389 | |||
| 1390 | Say Y here if you want your system to crash and hang more often. | ||
| 1391 | Say N if you want a sane system. | ||
| 1392 | |||
| 1393 | config RCU_TORTURE_TEST_SLOW_PREINIT_DELAY | ||
| 1394 | int "How much to slow down RCU grace-period pre-initialization" | ||
| 1395 | range 0 5 | ||
| 1396 | default 3 | ||
| 1397 | depends on RCU_TORTURE_TEST_SLOW_PREINIT | ||
| 1398 | help | ||
| 1399 | This option specifies the number of jiffies to wait between | ||
| 1400 | each rcu_node structure pre-initialization step. | ||
| 1401 | |||
| 1402 | config RCU_TORTURE_TEST_SLOW_INIT | ||
| 1403 | bool "Slow down RCU grace-period initialization to expose races" | ||
| 1404 | depends on RCU_TORTURE_TEST | ||
| 1405 | help | ||
| 1406 | This option delays grace-period initialization for a few | ||
| 1407 | jiffies between initializing each pair of consecutive | ||
| 1408 | rcu_node structures. This helps to expose races involving | ||
| 1409 | grace-period initialization, in other words, it makes your | ||
| 1410 | kernel less stable. It can also greatly increase grace-period | ||
| 1411 | latency, especially on systems with large numbers of CPUs. | ||
| 1412 | This is useful when torture-testing RCU, but in almost no | ||
| 1413 | other circumstance. | ||
| 1414 | |||
| 1415 | Say Y here if you want your system to crash and hang more often. | ||
| 1416 | Say N if you want a sane system. | ||
| 1417 | |||
| 1418 | config RCU_TORTURE_TEST_SLOW_INIT_DELAY | ||
| 1419 | int "How much to slow down RCU grace-period initialization" | ||
| 1420 | range 0 5 | ||
| 1421 | default 3 | ||
| 1422 | depends on RCU_TORTURE_TEST_SLOW_INIT | ||
| 1423 | help | ||
| 1424 | This option specifies the number of jiffies to wait between | ||
| 1425 | each rcu_node structure initialization. | ||
| 1426 | |||
| 1427 | config RCU_TORTURE_TEST_SLOW_CLEANUP | ||
| 1428 | bool "Slow down RCU grace-period cleanup to expose races" | ||
| 1429 | depends on RCU_TORTURE_TEST | ||
| 1430 | help | ||
| 1431 | This option delays grace-period cleanup for a few jiffies | ||
| 1432 | between cleaning up each pair of consecutive rcu_node | ||
| 1433 | structures. This helps to expose races involving grace-period | ||
| 1434 | cleanup, in other words, it makes your kernel less stable. | ||
| 1435 | It can also greatly increase grace-period latency, especially | ||
| 1436 | on systems with large numbers of CPUs. This is useful when | ||
| 1437 | torture-testing RCU, but in almost no other circumstance. | ||
| 1438 | |||
| 1439 | Say Y here if you want your system to crash and hang more often. | ||
| 1440 | Say N if you want a sane system. | ||
| 1441 | |||
| 1442 | config RCU_TORTURE_TEST_SLOW_CLEANUP_DELAY | ||
| 1443 | int "How much to slow down RCU grace-period cleanup" | ||
| 1444 | range 0 5 | ||
| 1445 | default 3 | ||
| 1446 | depends on RCU_TORTURE_TEST_SLOW_CLEANUP | ||
| 1447 | help | ||
| 1448 | This option specifies the number of jiffies to wait between | ||
| 1449 | each rcu_node structure cleanup operation. | ||
| 1450 | |||
| 1451 | config RCU_CPU_STALL_TIMEOUT | ||
| 1452 | int "RCU CPU stall timeout in seconds" | ||
| 1453 | depends on RCU_STALL_COMMON | ||
| 1454 | range 3 300 | ||
| 1455 | default 21 | ||
| 1456 | help | ||
| 1457 | If a given RCU grace period extends more than the specified | ||
| 1458 | number of seconds, a CPU stall warning is printed. If the | ||
| 1459 | RCU grace period persists, additional CPU stall warnings are | ||
| 1460 | printed at more widely spaced intervals. | ||
| 1461 | |||
| 1462 | config RCU_TRACE | ||
| 1463 | bool "Enable tracing for RCU" | ||
| 1464 | depends on DEBUG_KERNEL | ||
| 1465 | default y if TREE_RCU | ||
| 1466 | select TRACE_CLOCK | ||
| 1467 | help | ||
| 1468 | This option provides tracing in RCU which presents stats | ||
| 1469 | in debugfs for debugging RCU implementation. It also enables | ||
| 1470 | additional tracepoints for ftrace-style event tracing. | ||
| 1471 | |||
| 1472 | Say Y here if you want to enable RCU tracing | ||
| 1473 | Say N if you are unsure. | ||
| 1474 | |||
| 1475 | config RCU_EQS_DEBUG | ||
| 1476 | bool "Provide debugging asserts for adding NO_HZ support to an arch" | ||
| 1477 | depends on DEBUG_KERNEL | ||
| 1478 | help | ||
| 1479 | This option provides consistency checks in RCU's handling of | ||
| 1480 | NO_HZ. These checks have proven quite helpful in detecting | ||
| 1481 | bugs in arch-specific NO_HZ code. | ||
| 1482 | |||
| 1483 | Say N here if you need ultimate kernel/user switch latencies | ||
| 1484 | Say Y if you are unsure | ||
| 1485 | |||
| 1486 | endmenu # "RCU Debugging" | ||
| 1487 | 1319 | ||
| 1488 | config DEBUG_WQ_FORCE_RR_CPU | 1320 | config DEBUG_WQ_FORCE_RR_CPU |
| 1489 | bool "Force round-robin CPU selection for unbound work items" | 1321 | bool "Force round-robin CPU selection for unbound work items" |
| @@ -1773,7 +1605,7 @@ config RBTREE_TEST | |||
| 1773 | 1605 | ||
| 1774 | config INTERVAL_TREE_TEST | 1606 | config INTERVAL_TREE_TEST |
| 1775 | tristate "Interval tree test" | 1607 | tristate "Interval tree test" |
| 1776 | depends on m && DEBUG_KERNEL | 1608 | depends on DEBUG_KERNEL |
| 1777 | select INTERVAL_TREE | 1609 | select INTERVAL_TREE |
| 1778 | help | 1610 | help |
| 1779 | A benchmark measuring the performance of the interval tree library | 1611 | A benchmark measuring the performance of the interval tree library |
| @@ -1964,6 +1796,17 @@ config TEST_FIRMWARE | |||
| 1964 | 1796 | ||
| 1965 | If unsure, say N. | 1797 | If unsure, say N. |
| 1966 | 1798 | ||
| 1799 | config TEST_SYSCTL | ||
| 1800 | tristate "sysctl test driver" | ||
| 1801 | default n | ||
| 1802 | depends on PROC_SYSCTL | ||
| 1803 | help | ||
| 1804 | This builds the "test_sysctl" module. This driver enables to test the | ||
| 1805 | proc sysctl interfaces available to drivers safely without affecting | ||
| 1806 | production knobs which might alter system functionality. | ||
| 1807 | |||
| 1808 | If unsure, say N. | ||
| 1809 | |||
| 1967 | config TEST_UDELAY | 1810 | config TEST_UDELAY |
| 1968 | tristate "udelay test driver" | 1811 | tristate "udelay test driver" |
| 1969 | default n | 1812 | default n |
| @@ -2004,6 +1847,33 @@ config BUG_ON_DATA_CORRUPTION | |||
| 2004 | 1847 | ||
| 2005 | If unsure, say N. | 1848 | If unsure, say N. |
| 2006 | 1849 | ||
| 1850 | config TEST_KMOD | ||
| 1851 | tristate "kmod stress tester" | ||
| 1852 | default n | ||
| 1853 | depends on m | ||
| 1854 | depends on BLOCK && (64BIT || LBDAF) # for XFS, BTRFS | ||
| 1855 | depends on NETDEVICES && NET_CORE && INET # for TUN | ||
| 1856 | select TEST_LKM | ||
| 1857 | select XFS_FS | ||
| 1858 | select TUN | ||
| 1859 | select BTRFS_FS | ||
| 1860 | help | ||
| 1861 | Test the kernel's module loading mechanism: kmod. kmod implements | ||
| 1862 | support to load modules using the Linux kernel's usermode helper. | ||
| 1863 | This test provides a series of tests against kmod. | ||
| 1864 | |||
| 1865 | Although technically you can either build test_kmod as a module or | ||
| 1866 | into the kernel we disallow building it into the kernel since | ||
| 1867 | it stress tests request_module() and this will very likely cause | ||
| 1868 | some issues by taking over precious threads available from other | ||
| 1869 | module load requests, ultimately this could be fatal. | ||
| 1870 | |||
| 1871 | To run tests run: | ||
| 1872 | |||
| 1873 | tools/testing/selftests/kmod/kmod.sh --help | ||
| 1874 | |||
| 1875 | If unsure, say N. | ||
| 1876 | |||
| 2007 | source "samples/Kconfig" | 1877 | source "samples/Kconfig" |
| 2008 | 1878 | ||
| 2009 | source "lib/Kconfig.kgdb" | 1879 | source "lib/Kconfig.kgdb" |
diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb index 533f912638ed..ab4ff0eea776 100644 --- a/lib/Kconfig.kgdb +++ b/lib/Kconfig.kgdb | |||
| @@ -13,7 +13,7 @@ menuconfig KGDB | |||
| 13 | CONFIG_FRAME_POINTER to aid in producing more reliable stack | 13 | CONFIG_FRAME_POINTER to aid in producing more reliable stack |
| 14 | backtraces in the external debugger. Documentation of | 14 | backtraces in the external debugger. Documentation of |
| 15 | kernel debugger is available at http://kgdb.sourceforge.net | 15 | kernel debugger is available at http://kgdb.sourceforge.net |
| 16 | as well as in DocBook form in Documentation/DocBook/. If | 16 | as well as in Documentation/dev-tools/kgdb.rst. If |
| 17 | unsure, say N. | 17 | unsure, say N. |
| 18 | 18 | ||
| 19 | if KGDB | 19 | if KGDB |
diff --git a/lib/Makefile b/lib/Makefile index 0166fbc0fa81..40c18372b301 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
| @@ -25,9 +25,6 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \ | |||
| 25 | earlycpio.o seq_buf.o siphash.o \ | 25 | earlycpio.o seq_buf.o siphash.o \ |
| 26 | nmi_backtrace.o nodemask.o win_minmax.o | 26 | nmi_backtrace.o nodemask.o win_minmax.o |
| 27 | 27 | ||
| 28 | CFLAGS_radix-tree.o += -DCONFIG_SPARSE_RCU_POINTER | ||
| 29 | CFLAGS_idr.o += -DCONFIG_SPARSE_RCU_POINTER | ||
| 30 | |||
| 31 | lib-$(CONFIG_MMU) += ioremap.o | 28 | lib-$(CONFIG_MMU) += ioremap.o |
| 32 | lib-$(CONFIG_SMP) += cpumask.o | 29 | lib-$(CONFIG_SMP) += cpumask.o |
| 33 | lib-$(CONFIG_DMA_NOOP_OPS) += dma-noop.o | 30 | lib-$(CONFIG_DMA_NOOP_OPS) += dma-noop.o |
| @@ -41,7 +38,7 @@ obj-y += bcd.o div64.o sort.o parser.o debug_locks.o random32.o \ | |||
| 41 | gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \ | 38 | gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \ |
| 42 | bsearch.o find_bit.o llist.o memweight.o kfifo.o \ | 39 | bsearch.o find_bit.o llist.o memweight.o kfifo.o \ |
| 43 | percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o \ | 40 | percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o \ |
| 44 | once.o refcount.o usercopy.o | 41 | once.o refcount.o usercopy.o errseq.o |
| 45 | obj-y += string_helpers.o | 42 | obj-y += string_helpers.o |
| 46 | obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o | 43 | obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o |
| 47 | obj-y += hexdump.o | 44 | obj-y += hexdump.o |
| @@ -49,6 +46,7 @@ obj-$(CONFIG_TEST_HEXDUMP) += test_hexdump.o | |||
| 49 | obj-y += kstrtox.o | 46 | obj-y += kstrtox.o |
| 50 | obj-$(CONFIG_TEST_BPF) += test_bpf.o | 47 | obj-$(CONFIG_TEST_BPF) += test_bpf.o |
| 51 | obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o | 48 | obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o |
| 49 | obj-$(CONFIG_TEST_SYSCTL) += test_sysctl.o | ||
| 52 | obj-$(CONFIG_TEST_HASH) += test_hash.o test_siphash.o | 50 | obj-$(CONFIG_TEST_HASH) += test_hash.o test_siphash.o |
| 53 | obj-$(CONFIG_TEST_KASAN) += test_kasan.o | 51 | obj-$(CONFIG_TEST_KASAN) += test_kasan.o |
| 54 | obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o | 52 | obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o |
| @@ -63,6 +61,7 @@ obj-$(CONFIG_TEST_PRINTF) += test_printf.o | |||
| 63 | obj-$(CONFIG_TEST_BITMAP) += test_bitmap.o | 61 | obj-$(CONFIG_TEST_BITMAP) += test_bitmap.o |
| 64 | obj-$(CONFIG_TEST_UUID) += test_uuid.o | 62 | obj-$(CONFIG_TEST_UUID) += test_uuid.o |
| 65 | obj-$(CONFIG_TEST_PARMAN) += test_parman.o | 63 | obj-$(CONFIG_TEST_PARMAN) += test_parman.o |
| 64 | obj-$(CONFIG_TEST_KMOD) += test_kmod.o | ||
| 66 | 65 | ||
| 67 | ifeq ($(CONFIG_DEBUG_KOBJECT),y) | 66 | ifeq ($(CONFIG_DEBUG_KOBJECT),y) |
| 68 | CFLAGS_kobject.o += -DDEBUG | 67 | CFLAGS_kobject.o += -DDEBUG |
| @@ -99,6 +98,7 @@ obj-$(CONFIG_CRC_T10DIF)+= crc-t10dif.o | |||
| 99 | obj-$(CONFIG_CRC_ITU_T) += crc-itu-t.o | 98 | obj-$(CONFIG_CRC_ITU_T) += crc-itu-t.o |
| 100 | obj-$(CONFIG_CRC32) += crc32.o | 99 | obj-$(CONFIG_CRC32) += crc32.o |
| 101 | obj-$(CONFIG_CRC32_SELFTEST) += crc32test.o | 100 | obj-$(CONFIG_CRC32_SELFTEST) += crc32test.o |
| 101 | obj-$(CONFIG_CRC4) += crc4.o | ||
| 102 | obj-$(CONFIG_CRC7) += crc7.o | 102 | obj-$(CONFIG_CRC7) += crc7.o |
| 103 | obj-$(CONFIG_LIBCRC32C) += libcrc32c.o | 103 | obj-$(CONFIG_LIBCRC32C) += libcrc32c.o |
| 104 | obj-$(CONFIG_CRC8) += crc8.o | 104 | obj-$(CONFIG_CRC8) += crc8.o |
diff --git a/lib/atomic64_test.c b/lib/atomic64_test.c index fd70c0e0e673..62ab629f51ca 100644 --- a/lib/atomic64_test.c +++ b/lib/atomic64_test.c | |||
| @@ -153,8 +153,10 @@ static __init void test_atomic64(void) | |||
| 153 | long long v0 = 0xaaa31337c001d00dLL; | 153 | long long v0 = 0xaaa31337c001d00dLL; |
| 154 | long long v1 = 0xdeadbeefdeafcafeLL; | 154 | long long v1 = 0xdeadbeefdeafcafeLL; |
| 155 | long long v2 = 0xfaceabadf00df001LL; | 155 | long long v2 = 0xfaceabadf00df001LL; |
| 156 | long long v3 = 0x8000000000000000LL; | ||
| 156 | long long onestwos = 0x1111111122222222LL; | 157 | long long onestwos = 0x1111111122222222LL; |
| 157 | long long one = 1LL; | 158 | long long one = 1LL; |
| 159 | int r_int; | ||
| 158 | 160 | ||
| 159 | atomic64_t v = ATOMIC64_INIT(v0); | 161 | atomic64_t v = ATOMIC64_INIT(v0); |
| 160 | long long r = v0; | 162 | long long r = v0; |
| @@ -240,6 +242,11 @@ static __init void test_atomic64(void) | |||
| 240 | BUG_ON(!atomic64_inc_not_zero(&v)); | 242 | BUG_ON(!atomic64_inc_not_zero(&v)); |
| 241 | r += one; | 243 | r += one; |
| 242 | BUG_ON(v.counter != r); | 244 | BUG_ON(v.counter != r); |
| 245 | |||
| 246 | /* Confirm the return value fits in an int, even if the value doesn't */ | ||
| 247 | INIT(v3); | ||
| 248 | r_int = atomic64_inc_not_zero(&v); | ||
| 249 | BUG_ON(!r_int); | ||
| 243 | } | 250 | } |
| 244 | 251 | ||
| 245 | static __init int test_atomics_init(void) | 252 | static __init int test_atomics_init(void) |
diff --git a/lib/bitmap.c b/lib/bitmap.c index 08c6ef3a2b6f..9a532805364b 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c | |||
| @@ -251,7 +251,7 @@ int __bitmap_weight(const unsigned long *bitmap, unsigned int bits) | |||
| 251 | } | 251 | } |
| 252 | EXPORT_SYMBOL(__bitmap_weight); | 252 | EXPORT_SYMBOL(__bitmap_weight); |
| 253 | 253 | ||
| 254 | void bitmap_set(unsigned long *map, unsigned int start, int len) | 254 | void __bitmap_set(unsigned long *map, unsigned int start, int len) |
| 255 | { | 255 | { |
| 256 | unsigned long *p = map + BIT_WORD(start); | 256 | unsigned long *p = map + BIT_WORD(start); |
| 257 | const unsigned int size = start + len; | 257 | const unsigned int size = start + len; |
| @@ -270,9 +270,9 @@ void bitmap_set(unsigned long *map, unsigned int start, int len) | |||
| 270 | *p |= mask_to_set; | 270 | *p |= mask_to_set; |
| 271 | } | 271 | } |
| 272 | } | 272 | } |
| 273 | EXPORT_SYMBOL(bitmap_set); | 273 | EXPORT_SYMBOL(__bitmap_set); |
| 274 | 274 | ||
| 275 | void bitmap_clear(unsigned long *map, unsigned int start, int len) | 275 | void __bitmap_clear(unsigned long *map, unsigned int start, int len) |
| 276 | { | 276 | { |
| 277 | unsigned long *p = map + BIT_WORD(start); | 277 | unsigned long *p = map + BIT_WORD(start); |
| 278 | const unsigned int size = start + len; | 278 | const unsigned int size = start + len; |
| @@ -291,7 +291,7 @@ void bitmap_clear(unsigned long *map, unsigned int start, int len) | |||
| 291 | *p &= ~mask_to_clear; | 291 | *p &= ~mask_to_clear; |
| 292 | } | 292 | } |
| 293 | } | 293 | } |
| 294 | EXPORT_SYMBOL(bitmap_clear); | 294 | EXPORT_SYMBOL(__bitmap_clear); |
| 295 | 295 | ||
| 296 | /** | 296 | /** |
| 297 | * bitmap_find_next_zero_area_off - find a contiguous aligned zero area | 297 | * bitmap_find_next_zero_area_off - find a contiguous aligned zero area |
diff --git a/lib/bsearch.c b/lib/bsearch.c index e33c179089db..18b445b010c3 100644 --- a/lib/bsearch.c +++ b/lib/bsearch.c | |||
| @@ -33,19 +33,21 @@ | |||
| 33 | void *bsearch(const void *key, const void *base, size_t num, size_t size, | 33 | void *bsearch(const void *key, const void *base, size_t num, size_t size, |
| 34 | int (*cmp)(const void *key, const void *elt)) | 34 | int (*cmp)(const void *key, const void *elt)) |
| 35 | { | 35 | { |
| 36 | size_t start = 0, end = num; | 36 | const char *pivot; |
| 37 | int result; | 37 | int result; |
| 38 | 38 | ||
| 39 | while (start < end) { | 39 | while (num > 0) { |
| 40 | size_t mid = start + (end - start) / 2; | 40 | pivot = base + (num >> 1) * size; |
| 41 | result = cmp(key, pivot); | ||
| 41 | 42 | ||
| 42 | result = cmp(key, base + mid * size); | 43 | if (result == 0) |
| 43 | if (result < 0) | 44 | return (void *)pivot; |
| 44 | end = mid; | 45 | |
| 45 | else if (result > 0) | 46 | if (result > 0) { |
| 46 | start = mid + 1; | 47 | base = pivot + size; |
| 47 | else | 48 | num--; |
| 48 | return (void *)base + mid * size; | 49 | } |
| 50 | num >>= 1; | ||
| 49 | } | 51 | } |
| 50 | 52 | ||
| 51 | return NULL; | 53 | return NULL; |
diff --git a/lib/cmdline.c b/lib/cmdline.c index 3c6432df7e63..4c0888c4a68d 100644 --- a/lib/cmdline.c +++ b/lib/cmdline.c | |||
| @@ -23,14 +23,14 @@ | |||
| 23 | * the values[M, M+1, ..., N] into the ints array in get_options. | 23 | * the values[M, M+1, ..., N] into the ints array in get_options. |
| 24 | */ | 24 | */ |
| 25 | 25 | ||
| 26 | static int get_range(char **str, int *pint) | 26 | static int get_range(char **str, int *pint, int n) |
| 27 | { | 27 | { |
| 28 | int x, inc_counter, upper_range; | 28 | int x, inc_counter, upper_range; |
| 29 | 29 | ||
| 30 | (*str)++; | 30 | (*str)++; |
| 31 | upper_range = simple_strtol((*str), NULL, 0); | 31 | upper_range = simple_strtol((*str), NULL, 0); |
| 32 | inc_counter = upper_range - *pint; | 32 | inc_counter = upper_range - *pint; |
| 33 | for (x = *pint; x < upper_range; x++) | 33 | for (x = *pint; n && x < upper_range; x++, n--) |
| 34 | *pint++ = x; | 34 | *pint++ = x; |
| 35 | return inc_counter; | 35 | return inc_counter; |
| 36 | } | 36 | } |
| @@ -97,7 +97,7 @@ char *get_options(const char *str, int nints, int *ints) | |||
| 97 | break; | 97 | break; |
| 98 | if (res == 3) { | 98 | if (res == 3) { |
| 99 | int range_nums; | 99 | int range_nums; |
| 100 | range_nums = get_range((char **)&str, ints + i); | 100 | range_nums = get_range((char **)&str, ints + i, nints - i); |
| 101 | if (range_nums < 0) | 101 | if (range_nums < 0) |
| 102 | break; | 102 | break; |
| 103 | /* | 103 | /* |
diff --git a/lib/cpumask.c b/lib/cpumask.c index 81dedaab36cc..4731a0895760 100644 --- a/lib/cpumask.c +++ b/lib/cpumask.c | |||
| @@ -43,6 +43,38 @@ int cpumask_any_but(const struct cpumask *mask, unsigned int cpu) | |||
| 43 | } | 43 | } |
| 44 | EXPORT_SYMBOL(cpumask_any_but); | 44 | EXPORT_SYMBOL(cpumask_any_but); |
| 45 | 45 | ||
| 46 | /** | ||
| 47 | * cpumask_next_wrap - helper to implement for_each_cpu_wrap | ||
| 48 | * @n: the cpu prior to the place to search | ||
| 49 | * @mask: the cpumask pointer | ||
| 50 | * @start: the start point of the iteration | ||
| 51 | * @wrap: assume @n crossing @start terminates the iteration | ||
| 52 | * | ||
| 53 | * Returns >= nr_cpu_ids on completion | ||
| 54 | * | ||
| 55 | * Note: the @wrap argument is required for the start condition when | ||
| 56 | * we cannot assume @start is set in @mask. | ||
| 57 | */ | ||
| 58 | int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap) | ||
| 59 | { | ||
| 60 | int next; | ||
| 61 | |||
| 62 | again: | ||
| 63 | next = cpumask_next(n, mask); | ||
| 64 | |||
| 65 | if (wrap && n < start && next >= start) { | ||
| 66 | return nr_cpumask_bits; | ||
| 67 | |||
| 68 | } else if (next >= nr_cpumask_bits) { | ||
| 69 | wrap = true; | ||
| 70 | n = -1; | ||
| 71 | goto again; | ||
| 72 | } | ||
| 73 | |||
| 74 | return next; | ||
| 75 | } | ||
| 76 | EXPORT_SYMBOL(cpumask_next_wrap); | ||
| 77 | |||
| 46 | /* These are not inline because of header tangles. */ | 78 | /* These are not inline because of header tangles. */ |
| 47 | #ifdef CONFIG_CPUMASK_OFFSTACK | 79 | #ifdef CONFIG_CPUMASK_OFFSTACK |
| 48 | /** | 80 | /** |
diff --git a/lib/crc4.c b/lib/crc4.c new file mode 100644 index 000000000000..cf6db46661be --- /dev/null +++ b/lib/crc4.c | |||
| @@ -0,0 +1,46 @@ | |||
| 1 | /* | ||
| 2 | * crc4.c - simple crc-4 calculations. | ||
| 3 | * | ||
| 4 | * This source code is licensed under the GNU General Public License, Version | ||
| 5 | * 2. See the file COPYING for more details. | ||
| 6 | */ | ||
| 7 | |||
| 8 | #include <linux/crc4.h> | ||
| 9 | #include <linux/module.h> | ||
| 10 | |||
| 11 | static const uint8_t crc4_tab[] = { | ||
| 12 | 0x0, 0x7, 0xe, 0x9, 0xb, 0xc, 0x5, 0x2, | ||
| 13 | 0x1, 0x6, 0xf, 0x8, 0xa, 0xd, 0x4, 0x3, | ||
| 14 | }; | ||
| 15 | |||
| 16 | /** | ||
| 17 | * crc4 - calculate the 4-bit crc of a value. | ||
| 18 | * @crc: starting crc4 | ||
| 19 | * @x: value to checksum | ||
| 20 | * @bits: number of bits in @x to checksum | ||
| 21 | * | ||
| 22 | * Returns the crc4 value of @x, using polynomial 0b10111. | ||
| 23 | * | ||
| 24 | * The @x value is treated as left-aligned, and bits above @bits are ignored | ||
| 25 | * in the crc calculations. | ||
| 26 | */ | ||
| 27 | uint8_t crc4(uint8_t c, uint64_t x, int bits) | ||
| 28 | { | ||
| 29 | int i; | ||
| 30 | |||
| 31 | /* mask off anything above the top bit */ | ||
| 32 | x &= (1ull << bits) - 1; | ||
| 33 | |||
| 34 | /* Align to 4-bits */ | ||
| 35 | bits = (bits + 3) & ~0x3; | ||
| 36 | |||
| 37 | /* Calculate crc4 over four-bit nibbles, starting at the MSbit */ | ||
| 38 | for (i = bits - 4; i >= 0; i -= 4) | ||
| 39 | c = crc4_tab[c ^ ((x >> i) & 0xf)]; | ||
| 40 | |||
| 41 | return c; | ||
| 42 | } | ||
| 43 | EXPORT_SYMBOL_GPL(crc4); | ||
| 44 | |||
| 45 | MODULE_DESCRIPTION("CRC4 calculations"); | ||
| 46 | MODULE_LICENSE("GPL"); | ||
diff --git a/lib/dma-noop.c b/lib/dma-noop.c index de26c8b68f34..acc4190e2731 100644 --- a/lib/dma-noop.c +++ b/lib/dma-noop.c | |||
| @@ -7,6 +7,7 @@ | |||
| 7 | #include <linux/mm.h> | 7 | #include <linux/mm.h> |
| 8 | #include <linux/dma-mapping.h> | 8 | #include <linux/dma-mapping.h> |
| 9 | #include <linux/scatterlist.h> | 9 | #include <linux/scatterlist.h> |
| 10 | #include <linux/pfn.h> | ||
| 10 | 11 | ||
| 11 | static void *dma_noop_alloc(struct device *dev, size_t size, | 12 | static void *dma_noop_alloc(struct device *dev, size_t size, |
| 12 | dma_addr_t *dma_handle, gfp_t gfp, | 13 | dma_addr_t *dma_handle, gfp_t gfp, |
| @@ -16,7 +17,8 @@ static void *dma_noop_alloc(struct device *dev, size_t size, | |||
| 16 | 17 | ||
| 17 | ret = (void *)__get_free_pages(gfp, get_order(size)); | 18 | ret = (void *)__get_free_pages(gfp, get_order(size)); |
| 18 | if (ret) | 19 | if (ret) |
| 19 | *dma_handle = virt_to_phys(ret); | 20 | *dma_handle = virt_to_phys(ret) - PFN_PHYS(dev->dma_pfn_offset); |
| 21 | |||
| 20 | return ret; | 22 | return ret; |
| 21 | } | 23 | } |
| 22 | 24 | ||
| @@ -32,7 +34,7 @@ static dma_addr_t dma_noop_map_page(struct device *dev, struct page *page, | |||
| 32 | enum dma_data_direction dir, | 34 | enum dma_data_direction dir, |
| 33 | unsigned long attrs) | 35 | unsigned long attrs) |
| 34 | { | 36 | { |
| 35 | return page_to_phys(page) + offset; | 37 | return page_to_phys(page) + offset - PFN_PHYS(dev->dma_pfn_offset); |
| 36 | } | 38 | } |
| 37 | 39 | ||
| 38 | static int dma_noop_map_sg(struct device *dev, struct scatterlist *sgl, int nents, | 40 | static int dma_noop_map_sg(struct device *dev, struct scatterlist *sgl, int nents, |
| @@ -43,34 +45,23 @@ static int dma_noop_map_sg(struct device *dev, struct scatterlist *sgl, int nent | |||
| 43 | struct scatterlist *sg; | 45 | struct scatterlist *sg; |
| 44 | 46 | ||
| 45 | for_each_sg(sgl, sg, nents, i) { | 47 | for_each_sg(sgl, sg, nents, i) { |
| 48 | dma_addr_t offset = PFN_PHYS(dev->dma_pfn_offset); | ||
| 46 | void *va; | 49 | void *va; |
| 47 | 50 | ||
| 48 | BUG_ON(!sg_page(sg)); | 51 | BUG_ON(!sg_page(sg)); |
| 49 | va = sg_virt(sg); | 52 | va = sg_virt(sg); |
| 50 | sg_dma_address(sg) = (dma_addr_t)virt_to_phys(va); | 53 | sg_dma_address(sg) = (dma_addr_t)virt_to_phys(va) - offset; |
| 51 | sg_dma_len(sg) = sg->length; | 54 | sg_dma_len(sg) = sg->length; |
| 52 | } | 55 | } |
| 53 | 56 | ||
| 54 | return nents; | 57 | return nents; |
| 55 | } | 58 | } |
| 56 | 59 | ||
| 57 | static int dma_noop_mapping_error(struct device *dev, dma_addr_t dma_addr) | ||
| 58 | { | ||
| 59 | return 0; | ||
| 60 | } | ||
| 61 | |||
| 62 | static int dma_noop_supported(struct device *dev, u64 mask) | ||
| 63 | { | ||
| 64 | return 1; | ||
| 65 | } | ||
| 66 | |||
| 67 | const struct dma_map_ops dma_noop_ops = { | 60 | const struct dma_map_ops dma_noop_ops = { |
| 68 | .alloc = dma_noop_alloc, | 61 | .alloc = dma_noop_alloc, |
| 69 | .free = dma_noop_free, | 62 | .free = dma_noop_free, |
| 70 | .map_page = dma_noop_map_page, | 63 | .map_page = dma_noop_map_page, |
| 71 | .map_sg = dma_noop_map_sg, | 64 | .map_sg = dma_noop_map_sg, |
| 72 | .mapping_error = dma_noop_mapping_error, | ||
| 73 | .dma_supported = dma_noop_supported, | ||
| 74 | }; | 65 | }; |
| 75 | 66 | ||
| 76 | EXPORT_SYMBOL(dma_noop_ops); | 67 | EXPORT_SYMBOL(dma_noop_ops); |
diff --git a/lib/dma-virt.c b/lib/dma-virt.c index dcd4df1f7174..5c4f11329721 100644 --- a/lib/dma-virt.c +++ b/lib/dma-virt.c | |||
| @@ -51,22 +51,10 @@ static int dma_virt_map_sg(struct device *dev, struct scatterlist *sgl, | |||
| 51 | return nents; | 51 | return nents; |
| 52 | } | 52 | } |
| 53 | 53 | ||
| 54 | static int dma_virt_mapping_error(struct device *dev, dma_addr_t dma_addr) | ||
| 55 | { | ||
| 56 | return false; | ||
| 57 | } | ||
| 58 | |||
| 59 | static int dma_virt_supported(struct device *dev, u64 mask) | ||
| 60 | { | ||
| 61 | return true; | ||
| 62 | } | ||
| 63 | |||
| 64 | const struct dma_map_ops dma_virt_ops = { | 54 | const struct dma_map_ops dma_virt_ops = { |
| 65 | .alloc = dma_virt_alloc, | 55 | .alloc = dma_virt_alloc, |
| 66 | .free = dma_virt_free, | 56 | .free = dma_virt_free, |
| 67 | .map_page = dma_virt_map_page, | 57 | .map_page = dma_virt_map_page, |
| 68 | .map_sg = dma_virt_map_sg, | 58 | .map_sg = dma_virt_map_sg, |
| 69 | .mapping_error = dma_virt_mapping_error, | ||
| 70 | .dma_supported = dma_virt_supported, | ||
| 71 | }; | 59 | }; |
| 72 | EXPORT_SYMBOL(dma_virt_ops); | 60 | EXPORT_SYMBOL(dma_virt_ops); |
diff --git a/lib/errseq.c b/lib/errseq.c new file mode 100644 index 000000000000..841fa24e6e00 --- /dev/null +++ b/lib/errseq.c | |||
| @@ -0,0 +1,208 @@ | |||
| 1 | #include <linux/err.h> | ||
| 2 | #include <linux/bug.h> | ||
| 3 | #include <linux/atomic.h> | ||
| 4 | #include <linux/errseq.h> | ||
| 5 | |||
| 6 | /* | ||
| 7 | * An errseq_t is a way of recording errors in one place, and allowing any | ||
| 8 | * number of "subscribers" to tell whether it has changed since a previous | ||
| 9 | * point where it was sampled. | ||
| 10 | * | ||
| 11 | * It's implemented as an unsigned 32-bit value. The low order bits are | ||
| 12 | * designated to hold an error code (between 0 and -MAX_ERRNO). The upper bits | ||
| 13 | * are used as a counter. This is done with atomics instead of locking so that | ||
| 14 | * these functions can be called from any context. | ||
| 15 | * | ||
| 16 | * The general idea is for consumers to sample an errseq_t value. That value | ||
| 17 | * can later be used to tell whether any new errors have occurred since that | ||
| 18 | * sampling was done. | ||
| 19 | * | ||
| 20 | * Note that there is a risk of collisions if new errors are being recorded | ||
| 21 | * frequently, since we have so few bits to use as a counter. | ||
| 22 | * | ||
| 23 | * To mitigate this, one bit is used as a flag to tell whether the value has | ||
| 24 | * been sampled since a new value was recorded. That allows us to avoid bumping | ||
| 25 | * the counter if no one has sampled it since the last time an error was | ||
| 26 | * recorded. | ||
| 27 | * | ||
| 28 | * A new errseq_t should always be zeroed out. A errseq_t value of all zeroes | ||
| 29 | * is the special (but common) case where there has never been an error. An all | ||
| 30 | * zero value thus serves as the "epoch" if one wishes to know whether there | ||
| 31 | * has ever been an error set since it was first initialized. | ||
| 32 | */ | ||
| 33 | |||
| 34 | /* The low bits are designated for error code (max of MAX_ERRNO) */ | ||
| 35 | #define ERRSEQ_SHIFT ilog2(MAX_ERRNO + 1) | ||
| 36 | |||
| 37 | /* This bit is used as a flag to indicate whether the value has been seen */ | ||
| 38 | #define ERRSEQ_SEEN (1 << ERRSEQ_SHIFT) | ||
| 39 | |||
| 40 | /* The lowest bit of the counter */ | ||
| 41 | #define ERRSEQ_CTR_INC (1 << (ERRSEQ_SHIFT + 1)) | ||
| 42 | |||
| 43 | /** | ||
| 44 | * __errseq_set - set a errseq_t for later reporting | ||
| 45 | * @eseq: errseq_t field that should be set | ||
| 46 | * @err: error to set | ||
| 47 | * | ||
| 48 | * This function sets the error in *eseq, and increments the sequence counter | ||
| 49 | * if the last sequence was sampled at some point in the past. | ||
| 50 | * | ||
| 51 | * Any error set will always overwrite an existing error. | ||
| 52 | * | ||
| 53 | * Most callers will want to use the errseq_set inline wrapper to efficiently | ||
| 54 | * handle the common case where err is 0. | ||
| 55 | * | ||
| 56 | * We do return an errseq_t here, primarily for debugging purposes. The return | ||
| 57 | * value should not be used as a previously sampled value in later calls as it | ||
| 58 | * will not have the SEEN flag set. | ||
| 59 | */ | ||
| 60 | errseq_t __errseq_set(errseq_t *eseq, int err) | ||
| 61 | { | ||
| 62 | errseq_t cur, old; | ||
| 63 | |||
| 64 | /* MAX_ERRNO must be able to serve as a mask */ | ||
| 65 | BUILD_BUG_ON_NOT_POWER_OF_2(MAX_ERRNO + 1); | ||
| 66 | |||
| 67 | /* | ||
| 68 | * Ensure the error code actually fits where we want it to go. If it | ||
| 69 | * doesn't then just throw a warning and don't record anything. We | ||
| 70 | * also don't accept zero here as that would effectively clear a | ||
| 71 | * previous error. | ||
| 72 | */ | ||
| 73 | old = READ_ONCE(*eseq); | ||
| 74 | |||
| 75 | if (WARN(unlikely(err == 0 || (unsigned int)-err > MAX_ERRNO), | ||
| 76 | "err = %d\n", err)) | ||
| 77 | return old; | ||
| 78 | |||
| 79 | for (;;) { | ||
| 80 | errseq_t new; | ||
| 81 | |||
| 82 | /* Clear out error bits and set new error */ | ||
| 83 | new = (old & ~(MAX_ERRNO|ERRSEQ_SEEN)) | -err; | ||
| 84 | |||
| 85 | /* Only increment if someone has looked at it */ | ||
| 86 | if (old & ERRSEQ_SEEN) | ||
| 87 | new += ERRSEQ_CTR_INC; | ||
| 88 | |||
| 89 | /* If there would be no change, then call it done */ | ||
| 90 | if (new == old) { | ||
| 91 | cur = new; | ||
| 92 | break; | ||
| 93 | } | ||
| 94 | |||
| 95 | /* Try to swap the new value into place */ | ||
| 96 | cur = cmpxchg(eseq, old, new); | ||
| 97 | |||
| 98 | /* | ||
| 99 | * Call it success if we did the swap or someone else beat us | ||
| 100 | * to it for the same value. | ||
| 101 | */ | ||
| 102 | if (likely(cur == old || cur == new)) | ||
| 103 | break; | ||
| 104 | |||
| 105 | /* Raced with an update, try again */ | ||
| 106 | old = cur; | ||
| 107 | } | ||
| 108 | return cur; | ||
| 109 | } | ||
| 110 | EXPORT_SYMBOL(__errseq_set); | ||
| 111 | |||
| 112 | /** | ||
| 113 | * errseq_sample - grab current errseq_t value | ||
| 114 | * @eseq: pointer to errseq_t to be sampled | ||
| 115 | * | ||
| 116 | * This function allows callers to sample an errseq_t value, marking it as | ||
| 117 | * "seen" if required. | ||
| 118 | */ | ||
| 119 | errseq_t errseq_sample(errseq_t *eseq) | ||
| 120 | { | ||
| 121 | errseq_t old = READ_ONCE(*eseq); | ||
| 122 | errseq_t new = old; | ||
| 123 | |||
| 124 | /* | ||
| 125 | * For the common case of no errors ever having been set, we can skip | ||
| 126 | * marking the SEEN bit. Once an error has been set, the value will | ||
| 127 | * never go back to zero. | ||
| 128 | */ | ||
| 129 | if (old != 0) { | ||
| 130 | new |= ERRSEQ_SEEN; | ||
| 131 | if (old != new) | ||
| 132 | cmpxchg(eseq, old, new); | ||
| 133 | } | ||
| 134 | return new; | ||
| 135 | } | ||
| 136 | EXPORT_SYMBOL(errseq_sample); | ||
| 137 | |||
| 138 | /** | ||
| 139 | * errseq_check - has an error occurred since a particular sample point? | ||
| 140 | * @eseq: pointer to errseq_t value to be checked | ||
| 141 | * @since: previously-sampled errseq_t from which to check | ||
| 142 | * | ||
| 143 | * Grab the value that eseq points to, and see if it has changed "since" | ||
| 144 | * the given value was sampled. The "since" value is not advanced, so there | ||
| 145 | * is no need to mark the value as seen. | ||
| 146 | * | ||
| 147 | * Returns the latest error set in the errseq_t or 0 if it hasn't changed. | ||
| 148 | */ | ||
| 149 | int errseq_check(errseq_t *eseq, errseq_t since) | ||
| 150 | { | ||
| 151 | errseq_t cur = READ_ONCE(*eseq); | ||
| 152 | |||
| 153 | if (likely(cur == since)) | ||
| 154 | return 0; | ||
| 155 | return -(cur & MAX_ERRNO); | ||
| 156 | } | ||
| 157 | EXPORT_SYMBOL(errseq_check); | ||
| 158 | |||
| 159 | /** | ||
| 160 | * errseq_check_and_advance - check an errseq_t and advance to current value | ||
| 161 | * @eseq: pointer to value being checked and reported | ||
| 162 | * @since: pointer to previously-sampled errseq_t to check against and advance | ||
| 163 | * | ||
| 164 | * Grab the eseq value, and see whether it matches the value that "since" | ||
| 165 | * points to. If it does, then just return 0. | ||
| 166 | * | ||
| 167 | * If it doesn't, then the value has changed. Set the "seen" flag, and try to | ||
| 168 | * swap it into place as the new eseq value. Then, set that value as the new | ||
| 169 | * "since" value, and return whatever the error portion is set to. | ||
| 170 | * | ||
| 171 | * Note that no locking is provided here for concurrent updates to the "since" | ||
| 172 | * value. The caller must provide that if necessary. Because of this, callers | ||
| 173 | * may want to do a lockless errseq_check before taking the lock and calling | ||
| 174 | * this. | ||
| 175 | */ | ||
| 176 | int errseq_check_and_advance(errseq_t *eseq, errseq_t *since) | ||
| 177 | { | ||
| 178 | int err = 0; | ||
| 179 | errseq_t old, new; | ||
| 180 | |||
| 181 | /* | ||
| 182 | * Most callers will want to use the inline wrapper to check this, | ||
| 183 | * so that the common case of no error is handled without needing | ||
| 184 | * to take the lock that protects the "since" value. | ||
| 185 | */ | ||
| 186 | old = READ_ONCE(*eseq); | ||
| 187 | if (old != *since) { | ||
| 188 | /* | ||
| 189 | * Set the flag and try to swap it into place if it has | ||
| 190 | * changed. | ||
| 191 | * | ||
| 192 | * We don't care about the outcome of the swap here. If the | ||
| 193 | * swap doesn't occur, then it has either been updated by a | ||
| 194 | * writer who is altering the value in some way (updating | ||
| 195 | * counter or resetting the error), or another reader who is | ||
| 196 | * just setting the "seen" flag. Either outcome is OK, and we | ||
| 197 | * can advance "since" and return an error based on what we | ||
| 198 | * have. | ||
| 199 | */ | ||
| 200 | new = old | ERRSEQ_SEEN; | ||
| 201 | if (new != old) | ||
| 202 | cmpxchg(eseq, old, new); | ||
| 203 | *since = new; | ||
| 204 | err = -(new & MAX_ERRNO); | ||
| 205 | } | ||
| 206 | return err; | ||
| 207 | } | ||
| 208 | EXPORT_SYMBOL(errseq_check_and_advance); | ||
diff --git a/lib/extable.c b/lib/extable.c index 62968daa66a9..f54996fdd0b8 100644 --- a/lib/extable.c +++ b/lib/extable.c | |||
| @@ -9,6 +9,7 @@ | |||
| 9 | * 2 of the License, or (at your option) any later version. | 9 | * 2 of the License, or (at your option) any later version. |
| 10 | */ | 10 | */ |
| 11 | 11 | ||
| 12 | #include <linux/bsearch.h> | ||
| 12 | #include <linux/module.h> | 13 | #include <linux/module.h> |
| 13 | #include <linux/init.h> | 14 | #include <linux/init.h> |
| 14 | #include <linux/sort.h> | 15 | #include <linux/sort.h> |
| @@ -51,7 +52,7 @@ static void swap_ex(void *a, void *b, int size) | |||
| 51 | * This is used both for the kernel exception table and for | 52 | * This is used both for the kernel exception table and for |
| 52 | * the exception tables of modules that get loaded. | 53 | * the exception tables of modules that get loaded. |
| 53 | */ | 54 | */ |
| 54 | static int cmp_ex(const void *a, const void *b) | 55 | static int cmp_ex_sort(const void *a, const void *b) |
| 55 | { | 56 | { |
| 56 | const struct exception_table_entry *x = a, *y = b; | 57 | const struct exception_table_entry *x = a, *y = b; |
| 57 | 58 | ||
| @@ -67,7 +68,7 @@ void sort_extable(struct exception_table_entry *start, | |||
| 67 | struct exception_table_entry *finish) | 68 | struct exception_table_entry *finish) |
| 68 | { | 69 | { |
| 69 | sort(start, finish - start, sizeof(struct exception_table_entry), | 70 | sort(start, finish - start, sizeof(struct exception_table_entry), |
| 70 | cmp_ex, swap_ex); | 71 | cmp_ex_sort, swap_ex); |
| 71 | } | 72 | } |
| 72 | 73 | ||
| 73 | #ifdef CONFIG_MODULES | 74 | #ifdef CONFIG_MODULES |
| @@ -93,6 +94,20 @@ void trim_init_extable(struct module *m) | |||
| 93 | #endif /* !ARCH_HAS_SORT_EXTABLE */ | 94 | #endif /* !ARCH_HAS_SORT_EXTABLE */ |
| 94 | 95 | ||
| 95 | #ifndef ARCH_HAS_SEARCH_EXTABLE | 96 | #ifndef ARCH_HAS_SEARCH_EXTABLE |
| 97 | |||
| 98 | static int cmp_ex_search(const void *key, const void *elt) | ||
| 99 | { | ||
| 100 | const struct exception_table_entry *_elt = elt; | ||
| 101 | unsigned long _key = *(unsigned long *)key; | ||
| 102 | |||
| 103 | /* avoid overflow */ | ||
| 104 | if (_key > ex_to_insn(_elt)) | ||
| 105 | return 1; | ||
| 106 | if (_key < ex_to_insn(_elt)) | ||
| 107 | return -1; | ||
| 108 | return 0; | ||
| 109 | } | ||
| 110 | |||
| 96 | /* | 111 | /* |
| 97 | * Search one exception table for an entry corresponding to the | 112 | * Search one exception table for an entry corresponding to the |
| 98 | * given instruction address, and return the address of the entry, | 113 | * given instruction address, and return the address of the entry, |
| @@ -101,25 +116,11 @@ void trim_init_extable(struct module *m) | |||
| 101 | * already sorted. | 116 | * already sorted. |
| 102 | */ | 117 | */ |
| 103 | const struct exception_table_entry * | 118 | const struct exception_table_entry * |
| 104 | search_extable(const struct exception_table_entry *first, | 119 | search_extable(const struct exception_table_entry *base, |
| 105 | const struct exception_table_entry *last, | 120 | const size_t num, |
| 106 | unsigned long value) | 121 | unsigned long value) |
| 107 | { | 122 | { |
| 108 | while (first <= last) { | 123 | return bsearch(&value, base, num, |
| 109 | const struct exception_table_entry *mid; | 124 | sizeof(struct exception_table_entry), cmp_ex_search); |
| 110 | |||
| 111 | mid = ((last - first) >> 1) + first; | ||
| 112 | /* | ||
| 113 | * careful, the distance between value and insn | ||
| 114 | * can be larger than MAX_LONG: | ||
| 115 | */ | ||
| 116 | if (ex_to_insn(mid) < value) | ||
| 117 | first = mid + 1; | ||
| 118 | else if (ex_to_insn(mid) > value) | ||
| 119 | last = mid - 1; | ||
| 120 | else | ||
| 121 | return mid; | ||
| 122 | } | ||
| 123 | return NULL; | ||
| 124 | } | 125 | } |
| 125 | #endif | 126 | #endif |
diff --git a/lib/fault-inject.c b/lib/fault-inject.c index 4ff157159a0d..7d315fdb9f13 100644 --- a/lib/fault-inject.c +++ b/lib/fault-inject.c | |||
| @@ -107,6 +107,15 @@ static inline bool fail_stacktrace(struct fault_attr *attr) | |||
| 107 | 107 | ||
| 108 | bool should_fail(struct fault_attr *attr, ssize_t size) | 108 | bool should_fail(struct fault_attr *attr, ssize_t size) |
| 109 | { | 109 | { |
| 110 | if (in_task()) { | ||
| 111 | unsigned int fail_nth = READ_ONCE(current->fail_nth); | ||
| 112 | |||
| 113 | if (fail_nth && !WRITE_ONCE(current->fail_nth, fail_nth - 1)) | ||
| 114 | goto fail; | ||
| 115 | |||
| 116 | return false; | ||
| 117 | } | ||
| 118 | |||
| 110 | /* No need to check any other properties if the probability is 0 */ | 119 | /* No need to check any other properties if the probability is 0 */ |
| 111 | if (attr->probability == 0) | 120 | if (attr->probability == 0) |
| 112 | return false; | 121 | return false; |
| @@ -134,6 +143,7 @@ bool should_fail(struct fault_attr *attr, ssize_t size) | |||
| 134 | if (!fail_stacktrace(attr)) | 143 | if (!fail_stacktrace(attr)) |
| 135 | return false; | 144 | return false; |
| 136 | 145 | ||
| 146 | fail: | ||
| 137 | fail_dump(attr); | 147 | fail_dump(attr); |
| 138 | 148 | ||
| 139 | if (atomic_read(&attr->times) != -1) | 149 | if (atomic_read(&attr->times) != -1) |
diff --git a/lib/flex_proportions.c b/lib/flex_proportions.c index a71cf1bdd4c9..2cc1f94e03a1 100644 --- a/lib/flex_proportions.c +++ b/lib/flex_proportions.c | |||
| @@ -207,7 +207,7 @@ static void fprop_reflect_period_percpu(struct fprop_global *p, | |||
| 207 | if (val < (nr_cpu_ids * PROP_BATCH)) | 207 | if (val < (nr_cpu_ids * PROP_BATCH)) |
| 208 | val = percpu_counter_sum(&pl->events); | 208 | val = percpu_counter_sum(&pl->events); |
| 209 | 209 | ||
| 210 | __percpu_counter_add(&pl->events, | 210 | percpu_counter_add_batch(&pl->events, |
| 211 | -val + (val >> (period-pl->period)), PROP_BATCH); | 211 | -val + (val >> (period-pl->period)), PROP_BATCH); |
| 212 | } else | 212 | } else |
| 213 | percpu_counter_set(&pl->events, 0); | 213 | percpu_counter_set(&pl->events, 0); |
| @@ -219,7 +219,7 @@ static void fprop_reflect_period_percpu(struct fprop_global *p, | |||
| 219 | void __fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl) | 219 | void __fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl) |
| 220 | { | 220 | { |
| 221 | fprop_reflect_period_percpu(p, pl); | 221 | fprop_reflect_period_percpu(p, pl); |
| 222 | __percpu_counter_add(&pl->events, 1, PROP_BATCH); | 222 | percpu_counter_add_batch(&pl->events, 1, PROP_BATCH); |
| 223 | percpu_counter_add(&p->events, 1); | 223 | percpu_counter_add(&p->events, 1); |
| 224 | } | 224 | } |
| 225 | 225 | ||
| @@ -267,6 +267,6 @@ void __fprop_inc_percpu_max(struct fprop_global *p, | |||
| 267 | return; | 267 | return; |
| 268 | } else | 268 | } else |
| 269 | fprop_reflect_period_percpu(p, pl); | 269 | fprop_reflect_period_percpu(p, pl); |
| 270 | __percpu_counter_add(&pl->events, 1, PROP_BATCH); | 270 | percpu_counter_add_batch(&pl->events, 1, PROP_BATCH); |
| 271 | percpu_counter_add(&p->events, 1); | 271 | percpu_counter_add(&p->events, 1); |
| 272 | } | 272 | } |
diff --git a/lib/interval_tree_test.c b/lib/interval_tree_test.c index 245900b98c8e..df495fe81421 100644 --- a/lib/interval_tree_test.c +++ b/lib/interval_tree_test.c | |||
| @@ -1,27 +1,38 @@ | |||
| 1 | #include <linux/module.h> | 1 | #include <linux/module.h> |
| 2 | #include <linux/moduleparam.h> | ||
| 2 | #include <linux/interval_tree.h> | 3 | #include <linux/interval_tree.h> |
| 3 | #include <linux/random.h> | 4 | #include <linux/random.h> |
| 5 | #include <linux/slab.h> | ||
| 4 | #include <asm/timex.h> | 6 | #include <asm/timex.h> |
| 5 | 7 | ||
| 6 | #define NODES 100 | 8 | #define __param(type, name, init, msg) \ |
| 7 | #define PERF_LOOPS 100000 | 9 | static type name = init; \ |
| 8 | #define SEARCHES 100 | 10 | module_param(name, type, 0444); \ |
| 9 | #define SEARCH_LOOPS 10000 | 11 | MODULE_PARM_DESC(name, msg); |
| 12 | |||
| 13 | __param(int, nnodes, 100, "Number of nodes in the interval tree"); | ||
| 14 | __param(int, perf_loops, 100000, "Number of iterations modifying the tree"); | ||
| 15 | |||
| 16 | __param(int, nsearches, 100, "Number of searches to the interval tree"); | ||
| 17 | __param(int, search_loops, 10000, "Number of iterations searching the tree"); | ||
| 18 | __param(bool, search_all, false, "Searches will iterate all nodes in the tree"); | ||
| 19 | |||
| 20 | __param(uint, max_endpoint, ~0, "Largest value for the interval's endpoint"); | ||
| 10 | 21 | ||
| 11 | static struct rb_root root = RB_ROOT; | 22 | static struct rb_root root = RB_ROOT; |
| 12 | static struct interval_tree_node nodes[NODES]; | 23 | static struct interval_tree_node *nodes = NULL; |
| 13 | static u32 queries[SEARCHES]; | 24 | static u32 *queries = NULL; |
| 14 | 25 | ||
| 15 | static struct rnd_state rnd; | 26 | static struct rnd_state rnd; |
| 16 | 27 | ||
| 17 | static inline unsigned long | 28 | static inline unsigned long |
| 18 | search(unsigned long query, struct rb_root *root) | 29 | search(struct rb_root *root, unsigned long start, unsigned long last) |
| 19 | { | 30 | { |
| 20 | struct interval_tree_node *node; | 31 | struct interval_tree_node *node; |
| 21 | unsigned long results = 0; | 32 | unsigned long results = 0; |
| 22 | 33 | ||
| 23 | for (node = interval_tree_iter_first(root, query, query); node; | 34 | for (node = interval_tree_iter_first(root, start, last); node; |
| 24 | node = interval_tree_iter_next(node, query, query)) | 35 | node = interval_tree_iter_next(node, start, last)) |
| 25 | results++; | 36 | results++; |
| 26 | return results; | 37 | return results; |
| 27 | } | 38 | } |
| @@ -29,19 +40,22 @@ search(unsigned long query, struct rb_root *root) | |||
| 29 | static void init(void) | 40 | static void init(void) |
| 30 | { | 41 | { |
| 31 | int i; | 42 | int i; |
| 32 | for (i = 0; i < NODES; i++) { | 43 | |
| 33 | u32 a = prandom_u32_state(&rnd); | 44 | for (i = 0; i < nnodes; i++) { |
| 34 | u32 b = prandom_u32_state(&rnd); | 45 | u32 b = (prandom_u32_state(&rnd) >> 4) % max_endpoint; |
| 35 | if (a <= b) { | 46 | u32 a = (prandom_u32_state(&rnd) >> 4) % b; |
| 36 | nodes[i].start = a; | 47 | |
| 37 | nodes[i].last = b; | 48 | nodes[i].start = a; |
| 38 | } else { | 49 | nodes[i].last = b; |
| 39 | nodes[i].start = b; | ||
| 40 | nodes[i].last = a; | ||
| 41 | } | ||
| 42 | } | 50 | } |
| 43 | for (i = 0; i < SEARCHES; i++) | 51 | |
| 44 | queries[i] = prandom_u32_state(&rnd); | 52 | /* |
| 53 | * Limit the search scope to what the user defined. | ||
| 54 | * Otherwise we are merely measuring empty walks, | ||
| 55 | * which is pointless. | ||
| 56 | */ | ||
| 57 | for (i = 0; i < nsearches; i++) | ||
| 58 | queries[i] = (prandom_u32_state(&rnd) >> 4) % max_endpoint; | ||
| 45 | } | 59 | } |
| 46 | 60 | ||
| 47 | static int interval_tree_test_init(void) | 61 | static int interval_tree_test_init(void) |
| @@ -50,6 +64,16 @@ static int interval_tree_test_init(void) | |||
| 50 | unsigned long results; | 64 | unsigned long results; |
| 51 | cycles_t time1, time2, time; | 65 | cycles_t time1, time2, time; |
| 52 | 66 | ||
| 67 | nodes = kmalloc(nnodes * sizeof(struct interval_tree_node), GFP_KERNEL); | ||
| 68 | if (!nodes) | ||
| 69 | return -ENOMEM; | ||
| 70 | |||
| 71 | queries = kmalloc(nsearches * sizeof(int), GFP_KERNEL); | ||
| 72 | if (!queries) { | ||
| 73 | kfree(nodes); | ||
| 74 | return -ENOMEM; | ||
| 75 | } | ||
| 76 | |||
| 53 | printk(KERN_ALERT "interval tree insert/remove"); | 77 | printk(KERN_ALERT "interval tree insert/remove"); |
| 54 | 78 | ||
| 55 | prandom_seed_state(&rnd, 3141592653589793238ULL); | 79 | prandom_seed_state(&rnd, 3141592653589793238ULL); |
| @@ -57,39 +81,46 @@ static int interval_tree_test_init(void) | |||
| 57 | 81 | ||
| 58 | time1 = get_cycles(); | 82 | time1 = get_cycles(); |
| 59 | 83 | ||
| 60 | for (i = 0; i < PERF_LOOPS; i++) { | 84 | for (i = 0; i < perf_loops; i++) { |
| 61 | for (j = 0; j < NODES; j++) | 85 | for (j = 0; j < nnodes; j++) |
| 62 | interval_tree_insert(nodes + j, &root); | 86 | interval_tree_insert(nodes + j, &root); |
| 63 | for (j = 0; j < NODES; j++) | 87 | for (j = 0; j < nnodes; j++) |
| 64 | interval_tree_remove(nodes + j, &root); | 88 | interval_tree_remove(nodes + j, &root); |
| 65 | } | 89 | } |
| 66 | 90 | ||
| 67 | time2 = get_cycles(); | 91 | time2 = get_cycles(); |
| 68 | time = time2 - time1; | 92 | time = time2 - time1; |
| 69 | 93 | ||
| 70 | time = div_u64(time, PERF_LOOPS); | 94 | time = div_u64(time, perf_loops); |
| 71 | printk(" -> %llu cycles\n", (unsigned long long)time); | 95 | printk(" -> %llu cycles\n", (unsigned long long)time); |
| 72 | 96 | ||
| 73 | printk(KERN_ALERT "interval tree search"); | 97 | printk(KERN_ALERT "interval tree search"); |
| 74 | 98 | ||
| 75 | for (j = 0; j < NODES; j++) | 99 | for (j = 0; j < nnodes; j++) |
| 76 | interval_tree_insert(nodes + j, &root); | 100 | interval_tree_insert(nodes + j, &root); |
| 77 | 101 | ||
| 78 | time1 = get_cycles(); | 102 | time1 = get_cycles(); |
| 79 | 103 | ||
| 80 | results = 0; | 104 | results = 0; |
| 81 | for (i = 0; i < SEARCH_LOOPS; i++) | 105 | for (i = 0; i < search_loops; i++) |
| 82 | for (j = 0; j < SEARCHES; j++) | 106 | for (j = 0; j < nsearches; j++) { |
| 83 | results += search(queries[j], &root); | 107 | unsigned long start = search_all ? 0 : queries[j]; |
| 108 | unsigned long last = search_all ? max_endpoint : queries[j]; | ||
| 109 | |||
| 110 | results += search(&root, start, last); | ||
| 111 | } | ||
| 84 | 112 | ||
| 85 | time2 = get_cycles(); | 113 | time2 = get_cycles(); |
| 86 | time = time2 - time1; | 114 | time = time2 - time1; |
| 87 | 115 | ||
| 88 | time = div_u64(time, SEARCH_LOOPS); | 116 | time = div_u64(time, search_loops); |
| 89 | results = div_u64(results, SEARCH_LOOPS); | 117 | results = div_u64(results, search_loops); |
| 90 | printk(" -> %llu cycles (%lu results)\n", | 118 | printk(" -> %llu cycles (%lu results)\n", |
| 91 | (unsigned long long)time, results); | 119 | (unsigned long long)time, results); |
| 92 | 120 | ||
| 121 | kfree(queries); | ||
| 122 | kfree(nodes); | ||
| 123 | |||
| 93 | return -EAGAIN; /* Fail will directly unload the module */ | 124 | return -EAGAIN; /* Fail will directly unload the module */ |
| 94 | } | 125 | } |
| 95 | 126 | ||
diff --git a/lib/iov_iter.c b/lib/iov_iter.c index f835964c9485..52c8dd6d8e82 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c | |||
| @@ -130,6 +130,24 @@ | |||
| 130 | } \ | 130 | } \ |
| 131 | } | 131 | } |
| 132 | 132 | ||
| 133 | static int copyout(void __user *to, const void *from, size_t n) | ||
| 134 | { | ||
| 135 | if (access_ok(VERIFY_WRITE, to, n)) { | ||
| 136 | kasan_check_read(from, n); | ||
| 137 | n = raw_copy_to_user(to, from, n); | ||
| 138 | } | ||
| 139 | return n; | ||
| 140 | } | ||
| 141 | |||
| 142 | static int copyin(void *to, const void __user *from, size_t n) | ||
| 143 | { | ||
| 144 | if (access_ok(VERIFY_READ, from, n)) { | ||
| 145 | kasan_check_write(to, n); | ||
| 146 | n = raw_copy_from_user(to, from, n); | ||
| 147 | } | ||
| 148 | return n; | ||
| 149 | } | ||
| 150 | |||
| 133 | static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes, | 151 | static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes, |
| 134 | struct iov_iter *i) | 152 | struct iov_iter *i) |
| 135 | { | 153 | { |
| @@ -144,6 +162,7 @@ static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t b | |||
| 144 | if (unlikely(!bytes)) | 162 | if (unlikely(!bytes)) |
| 145 | return 0; | 163 | return 0; |
| 146 | 164 | ||
| 165 | might_fault(); | ||
| 147 | wanted = bytes; | 166 | wanted = bytes; |
| 148 | iov = i->iov; | 167 | iov = i->iov; |
| 149 | skip = i->iov_offset; | 168 | skip = i->iov_offset; |
| @@ -155,7 +174,7 @@ static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t b | |||
| 155 | from = kaddr + offset; | 174 | from = kaddr + offset; |
| 156 | 175 | ||
| 157 | /* first chunk, usually the only one */ | 176 | /* first chunk, usually the only one */ |
| 158 | left = __copy_to_user_inatomic(buf, from, copy); | 177 | left = copyout(buf, from, copy); |
| 159 | copy -= left; | 178 | copy -= left; |
| 160 | skip += copy; | 179 | skip += copy; |
| 161 | from += copy; | 180 | from += copy; |
| @@ -165,7 +184,7 @@ static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t b | |||
| 165 | iov++; | 184 | iov++; |
| 166 | buf = iov->iov_base; | 185 | buf = iov->iov_base; |
| 167 | copy = min(bytes, iov->iov_len); | 186 | copy = min(bytes, iov->iov_len); |
| 168 | left = __copy_to_user_inatomic(buf, from, copy); | 187 | left = copyout(buf, from, copy); |
| 169 | copy -= left; | 188 | copy -= left; |
| 170 | skip = copy; | 189 | skip = copy; |
| 171 | from += copy; | 190 | from += copy; |
| @@ -184,7 +203,7 @@ static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t b | |||
| 184 | 203 | ||
| 185 | kaddr = kmap(page); | 204 | kaddr = kmap(page); |
| 186 | from = kaddr + offset; | 205 | from = kaddr + offset; |
| 187 | left = __copy_to_user(buf, from, copy); | 206 | left = copyout(buf, from, copy); |
| 188 | copy -= left; | 207 | copy -= left; |
| 189 | skip += copy; | 208 | skip += copy; |
| 190 | from += copy; | 209 | from += copy; |
| @@ -193,7 +212,7 @@ static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t b | |||
| 193 | iov++; | 212 | iov++; |
| 194 | buf = iov->iov_base; | 213 | buf = iov->iov_base; |
| 195 | copy = min(bytes, iov->iov_len); | 214 | copy = min(bytes, iov->iov_len); |
| 196 | left = __copy_to_user(buf, from, copy); | 215 | left = copyout(buf, from, copy); |
| 197 | copy -= left; | 216 | copy -= left; |
| 198 | skip = copy; | 217 | skip = copy; |
| 199 | from += copy; | 218 | from += copy; |
| @@ -227,6 +246,7 @@ static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t | |||
| 227 | if (unlikely(!bytes)) | 246 | if (unlikely(!bytes)) |
| 228 | return 0; | 247 | return 0; |
| 229 | 248 | ||
| 249 | might_fault(); | ||
| 230 | wanted = bytes; | 250 | wanted = bytes; |
| 231 | iov = i->iov; | 251 | iov = i->iov; |
| 232 | skip = i->iov_offset; | 252 | skip = i->iov_offset; |
| @@ -238,7 +258,7 @@ static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t | |||
| 238 | to = kaddr + offset; | 258 | to = kaddr + offset; |
| 239 | 259 | ||
| 240 | /* first chunk, usually the only one */ | 260 | /* first chunk, usually the only one */ |
| 241 | left = __copy_from_user_inatomic(to, buf, copy); | 261 | left = copyin(to, buf, copy); |
| 242 | copy -= left; | 262 | copy -= left; |
| 243 | skip += copy; | 263 | skip += copy; |
| 244 | to += copy; | 264 | to += copy; |
| @@ -248,7 +268,7 @@ static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t | |||
| 248 | iov++; | 268 | iov++; |
| 249 | buf = iov->iov_base; | 269 | buf = iov->iov_base; |
| 250 | copy = min(bytes, iov->iov_len); | 270 | copy = min(bytes, iov->iov_len); |
| 251 | left = __copy_from_user_inatomic(to, buf, copy); | 271 | left = copyin(to, buf, copy); |
| 252 | copy -= left; | 272 | copy -= left; |
| 253 | skip = copy; | 273 | skip = copy; |
| 254 | to += copy; | 274 | to += copy; |
| @@ -267,7 +287,7 @@ static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t | |||
| 267 | 287 | ||
| 268 | kaddr = kmap(page); | 288 | kaddr = kmap(page); |
| 269 | to = kaddr + offset; | 289 | to = kaddr + offset; |
| 270 | left = __copy_from_user(to, buf, copy); | 290 | left = copyin(to, buf, copy); |
| 271 | copy -= left; | 291 | copy -= left; |
| 272 | skip += copy; | 292 | skip += copy; |
| 273 | to += copy; | 293 | to += copy; |
| @@ -276,7 +296,7 @@ static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t | |||
| 276 | iov++; | 296 | iov++; |
| 277 | buf = iov->iov_base; | 297 | buf = iov->iov_base; |
| 278 | copy = min(bytes, iov->iov_len); | 298 | copy = min(bytes, iov->iov_len); |
| 279 | left = __copy_from_user(to, buf, copy); | 299 | left = copyin(to, buf, copy); |
| 280 | copy -= left; | 300 | copy -= left; |
| 281 | skip = copy; | 301 | skip = copy; |
| 282 | to += copy; | 302 | to += copy; |
| @@ -535,14 +555,15 @@ static size_t copy_pipe_to_iter(const void *addr, size_t bytes, | |||
| 535 | return bytes; | 555 | return bytes; |
| 536 | } | 556 | } |
| 537 | 557 | ||
| 538 | size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) | 558 | size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) |
| 539 | { | 559 | { |
| 540 | const char *from = addr; | 560 | const char *from = addr; |
| 541 | if (unlikely(i->type & ITER_PIPE)) | 561 | if (unlikely(i->type & ITER_PIPE)) |
| 542 | return copy_pipe_to_iter(addr, bytes, i); | 562 | return copy_pipe_to_iter(addr, bytes, i); |
| 563 | if (iter_is_iovec(i)) | ||
| 564 | might_fault(); | ||
| 543 | iterate_and_advance(i, bytes, v, | 565 | iterate_and_advance(i, bytes, v, |
| 544 | __copy_to_user(v.iov_base, (from += v.iov_len) - v.iov_len, | 566 | copyout(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len), |
| 545 | v.iov_len), | ||
| 546 | memcpy_to_page(v.bv_page, v.bv_offset, | 567 | memcpy_to_page(v.bv_page, v.bv_offset, |
| 547 | (from += v.bv_len) - v.bv_len, v.bv_len), | 568 | (from += v.bv_len) - v.bv_len, v.bv_len), |
| 548 | memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len) | 569 | memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len) |
| @@ -550,18 +571,19 @@ size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) | |||
| 550 | 571 | ||
| 551 | return bytes; | 572 | return bytes; |
| 552 | } | 573 | } |
| 553 | EXPORT_SYMBOL(copy_to_iter); | 574 | EXPORT_SYMBOL(_copy_to_iter); |
| 554 | 575 | ||
| 555 | size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) | 576 | size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) |
| 556 | { | 577 | { |
| 557 | char *to = addr; | 578 | char *to = addr; |
| 558 | if (unlikely(i->type & ITER_PIPE)) { | 579 | if (unlikely(i->type & ITER_PIPE)) { |
| 559 | WARN_ON(1); | 580 | WARN_ON(1); |
| 560 | return 0; | 581 | return 0; |
| 561 | } | 582 | } |
| 583 | if (iter_is_iovec(i)) | ||
| 584 | might_fault(); | ||
| 562 | iterate_and_advance(i, bytes, v, | 585 | iterate_and_advance(i, bytes, v, |
| 563 | __copy_from_user((to += v.iov_len) - v.iov_len, v.iov_base, | 586 | copyin((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len), |
| 564 | v.iov_len), | ||
| 565 | memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page, | 587 | memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page, |
| 566 | v.bv_offset, v.bv_len), | 588 | v.bv_offset, v.bv_len), |
| 567 | memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len) | 589 | memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len) |
| @@ -569,9 +591,9 @@ size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) | |||
| 569 | 591 | ||
| 570 | return bytes; | 592 | return bytes; |
| 571 | } | 593 | } |
| 572 | EXPORT_SYMBOL(copy_from_iter); | 594 | EXPORT_SYMBOL(_copy_from_iter); |
| 573 | 595 | ||
| 574 | bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i) | 596 | bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i) |
| 575 | { | 597 | { |
| 576 | char *to = addr; | 598 | char *to = addr; |
| 577 | if (unlikely(i->type & ITER_PIPE)) { | 599 | if (unlikely(i->type & ITER_PIPE)) { |
| @@ -581,8 +603,10 @@ bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i) | |||
| 581 | if (unlikely(i->count < bytes)) | 603 | if (unlikely(i->count < bytes)) |
| 582 | return false; | 604 | return false; |
| 583 | 605 | ||
| 606 | if (iter_is_iovec(i)) | ||
| 607 | might_fault(); | ||
| 584 | iterate_all_kinds(i, bytes, v, ({ | 608 | iterate_all_kinds(i, bytes, v, ({ |
| 585 | if (__copy_from_user((to += v.iov_len) - v.iov_len, | 609 | if (copyin((to += v.iov_len) - v.iov_len, |
| 586 | v.iov_base, v.iov_len)) | 610 | v.iov_base, v.iov_len)) |
| 587 | return false; | 611 | return false; |
| 588 | 0;}), | 612 | 0;}), |
| @@ -594,9 +618,9 @@ bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i) | |||
| 594 | iov_iter_advance(i, bytes); | 618 | iov_iter_advance(i, bytes); |
| 595 | return true; | 619 | return true; |
| 596 | } | 620 | } |
| 597 | EXPORT_SYMBOL(copy_from_iter_full); | 621 | EXPORT_SYMBOL(_copy_from_iter_full); |
| 598 | 622 | ||
| 599 | size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) | 623 | size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) |
| 600 | { | 624 | { |
| 601 | char *to = addr; | 625 | char *to = addr; |
| 602 | if (unlikely(i->type & ITER_PIPE)) { | 626 | if (unlikely(i->type & ITER_PIPE)) { |
| @@ -613,9 +637,31 @@ size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) | |||
| 613 | 637 | ||
| 614 | return bytes; | 638 | return bytes; |
| 615 | } | 639 | } |
| 616 | EXPORT_SYMBOL(copy_from_iter_nocache); | 640 | EXPORT_SYMBOL(_copy_from_iter_nocache); |
| 617 | 641 | ||
| 618 | bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i) | 642 | #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE |
| 643 | size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i) | ||
| 644 | { | ||
| 645 | char *to = addr; | ||
| 646 | if (unlikely(i->type & ITER_PIPE)) { | ||
| 647 | WARN_ON(1); | ||
| 648 | return 0; | ||
| 649 | } | ||
| 650 | iterate_and_advance(i, bytes, v, | ||
| 651 | __copy_from_user_flushcache((to += v.iov_len) - v.iov_len, | ||
| 652 | v.iov_base, v.iov_len), | ||
| 653 | memcpy_page_flushcache((to += v.bv_len) - v.bv_len, v.bv_page, | ||
| 654 | v.bv_offset, v.bv_len), | ||
| 655 | memcpy_flushcache((to += v.iov_len) - v.iov_len, v.iov_base, | ||
| 656 | v.iov_len) | ||
| 657 | ) | ||
| 658 | |||
| 659 | return bytes; | ||
| 660 | } | ||
| 661 | EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache); | ||
| 662 | #endif | ||
| 663 | |||
| 664 | bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i) | ||
| 619 | { | 665 | { |
| 620 | char *to = addr; | 666 | char *to = addr; |
| 621 | if (unlikely(i->type & ITER_PIPE)) { | 667 | if (unlikely(i->type & ITER_PIPE)) { |
| @@ -637,11 +683,22 @@ bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i) | |||
| 637 | iov_iter_advance(i, bytes); | 683 | iov_iter_advance(i, bytes); |
| 638 | return true; | 684 | return true; |
| 639 | } | 685 | } |
| 640 | EXPORT_SYMBOL(copy_from_iter_full_nocache); | 686 | EXPORT_SYMBOL(_copy_from_iter_full_nocache); |
| 687 | |||
| 688 | static inline bool page_copy_sane(struct page *page, size_t offset, size_t n) | ||
| 689 | { | ||
| 690 | size_t v = n + offset; | ||
| 691 | if (likely(n <= v && v <= (PAGE_SIZE << compound_order(page)))) | ||
| 692 | return true; | ||
| 693 | WARN_ON(1); | ||
| 694 | return false; | ||
| 695 | } | ||
| 641 | 696 | ||
| 642 | size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, | 697 | size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, |
| 643 | struct iov_iter *i) | 698 | struct iov_iter *i) |
| 644 | { | 699 | { |
| 700 | if (unlikely(!page_copy_sane(page, offset, bytes))) | ||
| 701 | return 0; | ||
| 645 | if (i->type & (ITER_BVEC|ITER_KVEC)) { | 702 | if (i->type & (ITER_BVEC|ITER_KVEC)) { |
| 646 | void *kaddr = kmap_atomic(page); | 703 | void *kaddr = kmap_atomic(page); |
| 647 | size_t wanted = copy_to_iter(kaddr + offset, bytes, i); | 704 | size_t wanted = copy_to_iter(kaddr + offset, bytes, i); |
| @@ -657,13 +714,15 @@ EXPORT_SYMBOL(copy_page_to_iter); | |||
| 657 | size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, | 714 | size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, |
| 658 | struct iov_iter *i) | 715 | struct iov_iter *i) |
| 659 | { | 716 | { |
| 717 | if (unlikely(!page_copy_sane(page, offset, bytes))) | ||
| 718 | return 0; | ||
| 660 | if (unlikely(i->type & ITER_PIPE)) { | 719 | if (unlikely(i->type & ITER_PIPE)) { |
| 661 | WARN_ON(1); | 720 | WARN_ON(1); |
| 662 | return 0; | 721 | return 0; |
| 663 | } | 722 | } |
| 664 | if (i->type & (ITER_BVEC|ITER_KVEC)) { | 723 | if (i->type & (ITER_BVEC|ITER_KVEC)) { |
| 665 | void *kaddr = kmap_atomic(page); | 724 | void *kaddr = kmap_atomic(page); |
| 666 | size_t wanted = copy_from_iter(kaddr + offset, bytes, i); | 725 | size_t wanted = _copy_from_iter(kaddr + offset, bytes, i); |
| 667 | kunmap_atomic(kaddr); | 726 | kunmap_atomic(kaddr); |
| 668 | return wanted; | 727 | return wanted; |
| 669 | } else | 728 | } else |
| @@ -700,7 +759,7 @@ size_t iov_iter_zero(size_t bytes, struct iov_iter *i) | |||
| 700 | if (unlikely(i->type & ITER_PIPE)) | 759 | if (unlikely(i->type & ITER_PIPE)) |
| 701 | return pipe_zero(bytes, i); | 760 | return pipe_zero(bytes, i); |
| 702 | iterate_and_advance(i, bytes, v, | 761 | iterate_and_advance(i, bytes, v, |
| 703 | __clear_user(v.iov_base, v.iov_len), | 762 | clear_user(v.iov_base, v.iov_len), |
| 704 | memzero_page(v.bv_page, v.bv_offset, v.bv_len), | 763 | memzero_page(v.bv_page, v.bv_offset, v.bv_len), |
| 705 | memset(v.iov_base, 0, v.iov_len) | 764 | memset(v.iov_base, 0, v.iov_len) |
| 706 | ) | 765 | ) |
| @@ -713,14 +772,17 @@ size_t iov_iter_copy_from_user_atomic(struct page *page, | |||
| 713 | struct iov_iter *i, unsigned long offset, size_t bytes) | 772 | struct iov_iter *i, unsigned long offset, size_t bytes) |
| 714 | { | 773 | { |
| 715 | char *kaddr = kmap_atomic(page), *p = kaddr + offset; | 774 | char *kaddr = kmap_atomic(page), *p = kaddr + offset; |
| 775 | if (unlikely(!page_copy_sane(page, offset, bytes))) { | ||
| 776 | kunmap_atomic(kaddr); | ||
| 777 | return 0; | ||
| 778 | } | ||
| 716 | if (unlikely(i->type & ITER_PIPE)) { | 779 | if (unlikely(i->type & ITER_PIPE)) { |
| 717 | kunmap_atomic(kaddr); | 780 | kunmap_atomic(kaddr); |
| 718 | WARN_ON(1); | 781 | WARN_ON(1); |
| 719 | return 0; | 782 | return 0; |
| 720 | } | 783 | } |
| 721 | iterate_all_kinds(i, bytes, v, | 784 | iterate_all_kinds(i, bytes, v, |
| 722 | __copy_from_user_inatomic((p += v.iov_len) - v.iov_len, | 785 | copyin((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len), |
| 723 | v.iov_base, v.iov_len), | ||
| 724 | memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page, | 786 | memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page, |
| 725 | v.bv_offset, v.bv_len), | 787 | v.bv_offset, v.bv_len), |
| 726 | memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len) | 788 | memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len) |
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index 9a2b811966eb..719c155fce20 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c | |||
| @@ -23,6 +23,8 @@ | |||
| 23 | #include <linux/socket.h> | 23 | #include <linux/socket.h> |
| 24 | #include <linux/skbuff.h> | 24 | #include <linux/skbuff.h> |
| 25 | #include <linux/netlink.h> | 25 | #include <linux/netlink.h> |
| 26 | #include <linux/uuid.h> | ||
| 27 | #include <linux/ctype.h> | ||
| 26 | #include <net/sock.h> | 28 | #include <net/sock.h> |
| 27 | #include <net/net_namespace.h> | 29 | #include <net/net_namespace.h> |
| 28 | 30 | ||
| @@ -52,19 +54,13 @@ static const char *kobject_actions[] = { | |||
| 52 | [KOBJ_OFFLINE] = "offline", | 54 | [KOBJ_OFFLINE] = "offline", |
| 53 | }; | 55 | }; |
| 54 | 56 | ||
| 55 | /** | 57 | static int kobject_action_type(const char *buf, size_t count, |
| 56 | * kobject_action_type - translate action string to numeric type | 58 | enum kobject_action *type, |
| 57 | * | 59 | const char **args) |
| 58 | * @buf: buffer containing the action string, newline is ignored | ||
| 59 | * @count: length of buffer | ||
| 60 | * @type: pointer to the location to store the action type | ||
| 61 | * | ||
| 62 | * Returns 0 if the action string was recognized. | ||
| 63 | */ | ||
| 64 | int kobject_action_type(const char *buf, size_t count, | ||
| 65 | enum kobject_action *type) | ||
| 66 | { | 60 | { |
| 67 | enum kobject_action action; | 61 | enum kobject_action action; |
| 62 | size_t count_first; | ||
| 63 | const char *args_start; | ||
| 68 | int ret = -EINVAL; | 64 | int ret = -EINVAL; |
| 69 | 65 | ||
| 70 | if (count && (buf[count-1] == '\n' || buf[count-1] == '\0')) | 66 | if (count && (buf[count-1] == '\n' || buf[count-1] == '\0')) |
| @@ -73,11 +69,20 @@ int kobject_action_type(const char *buf, size_t count, | |||
| 73 | if (!count) | 69 | if (!count) |
| 74 | goto out; | 70 | goto out; |
| 75 | 71 | ||
| 72 | args_start = strnchr(buf, count, ' '); | ||
| 73 | if (args_start) { | ||
| 74 | count_first = args_start - buf; | ||
| 75 | args_start = args_start + 1; | ||
| 76 | } else | ||
| 77 | count_first = count; | ||
| 78 | |||
| 76 | for (action = 0; action < ARRAY_SIZE(kobject_actions); action++) { | 79 | for (action = 0; action < ARRAY_SIZE(kobject_actions); action++) { |
| 77 | if (strncmp(kobject_actions[action], buf, count) != 0) | 80 | if (strncmp(kobject_actions[action], buf, count_first) != 0) |
| 78 | continue; | 81 | continue; |
| 79 | if (kobject_actions[action][count] != '\0') | 82 | if (kobject_actions[action][count_first] != '\0') |
| 80 | continue; | 83 | continue; |
| 84 | if (args) | ||
| 85 | *args = args_start; | ||
| 81 | *type = action; | 86 | *type = action; |
| 82 | ret = 0; | 87 | ret = 0; |
| 83 | break; | 88 | break; |
| @@ -86,6 +91,142 @@ out: | |||
| 86 | return ret; | 91 | return ret; |
| 87 | } | 92 | } |
| 88 | 93 | ||
| 94 | static const char *action_arg_word_end(const char *buf, const char *buf_end, | ||
| 95 | char delim) | ||
| 96 | { | ||
| 97 | const char *next = buf; | ||
| 98 | |||
| 99 | while (next <= buf_end && *next != delim) | ||
| 100 | if (!isalnum(*next++)) | ||
| 101 | return NULL; | ||
| 102 | |||
| 103 | if (next == buf) | ||
| 104 | return NULL; | ||
| 105 | |||
| 106 | return next; | ||
| 107 | } | ||
| 108 | |||
| 109 | static int kobject_action_args(const char *buf, size_t count, | ||
| 110 | struct kobj_uevent_env **ret_env) | ||
| 111 | { | ||
| 112 | struct kobj_uevent_env *env = NULL; | ||
| 113 | const char *next, *buf_end, *key; | ||
| 114 | int key_len; | ||
| 115 | int r = -EINVAL; | ||
| 116 | |||
| 117 | if (count && (buf[count - 1] == '\n' || buf[count - 1] == '\0')) | ||
| 118 | count--; | ||
| 119 | |||
| 120 | if (!count) | ||
| 121 | return -EINVAL; | ||
| 122 | |||
| 123 | env = kzalloc(sizeof(*env), GFP_KERNEL); | ||
| 124 | if (!env) | ||
| 125 | return -ENOMEM; | ||
| 126 | |||
| 127 | /* first arg is UUID */ | ||
| 128 | if (count < UUID_STRING_LEN || !uuid_is_valid(buf) || | ||
| 129 | add_uevent_var(env, "SYNTH_UUID=%.*s", UUID_STRING_LEN, buf)) | ||
| 130 | goto out; | ||
| 131 | |||
| 132 | /* | ||
| 133 | * the rest are custom environment variables in KEY=VALUE | ||
| 134 | * format with ' ' delimiter between each KEY=VALUE pair | ||
| 135 | */ | ||
| 136 | next = buf + UUID_STRING_LEN; | ||
| 137 | buf_end = buf + count - 1; | ||
| 138 | |||
| 139 | while (next <= buf_end) { | ||
| 140 | if (*next != ' ') | ||
| 141 | goto out; | ||
| 142 | |||
| 143 | /* skip the ' ', key must follow */ | ||
| 144 | key = ++next; | ||
| 145 | if (key > buf_end) | ||
| 146 | goto out; | ||
| 147 | |||
| 148 | buf = next; | ||
| 149 | next = action_arg_word_end(buf, buf_end, '='); | ||
| 150 | if (!next || next > buf_end || *next != '=') | ||
| 151 | goto out; | ||
| 152 | key_len = next - buf; | ||
| 153 | |||
| 154 | /* skip the '=', value must follow */ | ||
| 155 | if (++next > buf_end) | ||
| 156 | goto out; | ||
| 157 | |||
| 158 | buf = next; | ||
| 159 | next = action_arg_word_end(buf, buf_end, ' '); | ||
| 160 | if (!next) | ||
| 161 | goto out; | ||
| 162 | |||
| 163 | if (add_uevent_var(env, "SYNTH_ARG_%.*s=%.*s", | ||
| 164 | key_len, key, (int) (next - buf), buf)) | ||
| 165 | goto out; | ||
| 166 | } | ||
| 167 | |||
| 168 | r = 0; | ||
| 169 | out: | ||
| 170 | if (r) | ||
| 171 | kfree(env); | ||
| 172 | else | ||
| 173 | *ret_env = env; | ||
| 174 | return r; | ||
| 175 | } | ||
| 176 | |||
| 177 | /** | ||
| 178 | * kobject_synth_uevent - send synthetic uevent with arguments | ||
| 179 | * | ||
| 180 | * @kobj: struct kobject for which synthetic uevent is to be generated | ||
| 181 | * @buf: buffer containing action type and action args, newline is ignored | ||
| 182 | * @count: length of buffer | ||
| 183 | * | ||
| 184 | * Returns 0 if kobject_synthetic_uevent() is completed with success or the | ||
| 185 | * corresponding error when it fails. | ||
| 186 | */ | ||
| 187 | int kobject_synth_uevent(struct kobject *kobj, const char *buf, size_t count) | ||
| 188 | { | ||
| 189 | char *no_uuid_envp[] = { "SYNTH_UUID=0", NULL }; | ||
| 190 | enum kobject_action action; | ||
| 191 | const char *action_args; | ||
| 192 | struct kobj_uevent_env *env; | ||
| 193 | const char *msg = NULL, *devpath; | ||
| 194 | int r; | ||
| 195 | |||
| 196 | r = kobject_action_type(buf, count, &action, &action_args); | ||
| 197 | if (r) { | ||
| 198 | msg = "unknown uevent action string\n"; | ||
| 199 | goto out; | ||
| 200 | } | ||
| 201 | |||
| 202 | if (!action_args) { | ||
| 203 | r = kobject_uevent_env(kobj, action, no_uuid_envp); | ||
| 204 | goto out; | ||
| 205 | } | ||
| 206 | |||
| 207 | r = kobject_action_args(action_args, | ||
| 208 | count - (action_args - buf), &env); | ||
| 209 | if (r == -EINVAL) { | ||
| 210 | msg = "incorrect uevent action arguments\n"; | ||
| 211 | goto out; | ||
| 212 | } | ||
| 213 | |||
| 214 | if (r) | ||
| 215 | goto out; | ||
| 216 | |||
| 217 | r = kobject_uevent_env(kobj, action, env->envp); | ||
| 218 | kfree(env); | ||
| 219 | out: | ||
| 220 | if (r) { | ||
| 221 | devpath = kobject_get_path(kobj, GFP_KERNEL); | ||
| 222 | printk(KERN_WARNING "synth uevent: %s: %s", | ||
| 223 | devpath ?: "unknown device", | ||
| 224 | msg ?: "failed to send uevent"); | ||
| 225 | kfree(devpath); | ||
| 226 | } | ||
| 227 | return r; | ||
| 228 | } | ||
| 229 | |||
| 89 | #ifdef CONFIG_NET | 230 | #ifdef CONFIG_NET |
| 90 | static int kobj_bcast_filter(struct sock *dsk, struct sk_buff *skb, void *data) | 231 | static int kobj_bcast_filter(struct sock *dsk, struct sk_buff *skb, void *data) |
| 91 | { | 232 | { |
diff --git a/lib/kstrtox.c b/lib/kstrtox.c index bf85e05ce858..720144075c1e 100644 --- a/lib/kstrtox.c +++ b/lib/kstrtox.c | |||
| @@ -51,13 +51,15 @@ unsigned int _parse_integer(const char *s, unsigned int base, unsigned long long | |||
| 51 | 51 | ||
| 52 | res = 0; | 52 | res = 0; |
| 53 | rv = 0; | 53 | rv = 0; |
| 54 | while (*s) { | 54 | while (1) { |
| 55 | unsigned int c = *s; | ||
| 56 | unsigned int lc = c | 0x20; /* don't tolower() this line */ | ||
| 55 | unsigned int val; | 57 | unsigned int val; |
| 56 | 58 | ||
| 57 | if ('0' <= *s && *s <= '9') | 59 | if ('0' <= c && c <= '9') |
| 58 | val = *s - '0'; | 60 | val = c - '0'; |
| 59 | else if ('a' <= _tolower(*s) && _tolower(*s) <= 'f') | 61 | else if ('a' <= lc && lc <= 'f') |
| 60 | val = _tolower(*s) - 'a' + 10; | 62 | val = lc - 'a' + 10; |
| 61 | else | 63 | else |
| 62 | break; | 64 | break; |
| 63 | 65 | ||
diff --git a/lib/libcrc32c.c b/lib/libcrc32c.c index 74a54b7f2562..9f79547d1b97 100644 --- a/lib/libcrc32c.c +++ b/lib/libcrc32c.c | |||
| @@ -43,7 +43,7 @@ static struct crypto_shash *tfm; | |||
| 43 | u32 crc32c(u32 crc, const void *address, unsigned int length) | 43 | u32 crc32c(u32 crc, const void *address, unsigned int length) |
| 44 | { | 44 | { |
| 45 | SHASH_DESC_ON_STACK(shash, tfm); | 45 | SHASH_DESC_ON_STACK(shash, tfm); |
| 46 | u32 *ctx = (u32 *)shash_desc_ctx(shash); | 46 | u32 ret, *ctx = (u32 *)shash_desc_ctx(shash); |
| 47 | int err; | 47 | int err; |
| 48 | 48 | ||
| 49 | shash->tfm = tfm; | 49 | shash->tfm = tfm; |
| @@ -53,7 +53,9 @@ u32 crc32c(u32 crc, const void *address, unsigned int length) | |||
| 53 | err = crypto_shash_update(shash, address, length); | 53 | err = crypto_shash_update(shash, address, length); |
| 54 | BUG_ON(err); | 54 | BUG_ON(err); |
| 55 | 55 | ||
| 56 | return *ctx; | 56 | ret = *ctx; |
| 57 | barrier_data(ctx); | ||
| 58 | return ret; | ||
| 57 | } | 59 | } |
| 58 | 60 | ||
| 59 | EXPORT_SYMBOL(crc32c); | 61 | EXPORT_SYMBOL(crc32c); |
diff --git a/lib/locking-selftest-rtmutex.h b/lib/locking-selftest-rtmutex.h new file mode 100644 index 000000000000..e3cb83989d16 --- /dev/null +++ b/lib/locking-selftest-rtmutex.h | |||
| @@ -0,0 +1,11 @@ | |||
| 1 | #undef LOCK | ||
| 2 | #define LOCK RTL | ||
| 3 | |||
| 4 | #undef UNLOCK | ||
| 5 | #define UNLOCK RTU | ||
| 6 | |||
| 7 | #undef RLOCK | ||
| 8 | #undef WLOCK | ||
| 9 | |||
| 10 | #undef INIT | ||
| 11 | #define INIT RTI | ||
diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c index f3a217ea0388..6f2b135dc5e8 100644 --- a/lib/locking-selftest.c +++ b/lib/locking-selftest.c | |||
| @@ -21,6 +21,7 @@ | |||
| 21 | #include <linux/interrupt.h> | 21 | #include <linux/interrupt.h> |
| 22 | #include <linux/debug_locks.h> | 22 | #include <linux/debug_locks.h> |
| 23 | #include <linux/irqflags.h> | 23 | #include <linux/irqflags.h> |
| 24 | #include <linux/rtmutex.h> | ||
| 24 | 25 | ||
| 25 | /* | 26 | /* |
| 26 | * Change this to 1 if you want to see the failure printouts: | 27 | * Change this to 1 if you want to see the failure printouts: |
| @@ -46,6 +47,7 @@ __setup("debug_locks_verbose=", setup_debug_locks_verbose); | |||
| 46 | #define LOCKTYPE_MUTEX 0x4 | 47 | #define LOCKTYPE_MUTEX 0x4 |
| 47 | #define LOCKTYPE_RWSEM 0x8 | 48 | #define LOCKTYPE_RWSEM 0x8 |
| 48 | #define LOCKTYPE_WW 0x10 | 49 | #define LOCKTYPE_WW 0x10 |
| 50 | #define LOCKTYPE_RTMUTEX 0x20 | ||
| 49 | 51 | ||
| 50 | static struct ww_acquire_ctx t, t2; | 52 | static struct ww_acquire_ctx t, t2; |
| 51 | static struct ww_mutex o, o2, o3; | 53 | static struct ww_mutex o, o2, o3; |
| @@ -74,6 +76,15 @@ static DECLARE_RWSEM(rwsem_B); | |||
| 74 | static DECLARE_RWSEM(rwsem_C); | 76 | static DECLARE_RWSEM(rwsem_C); |
| 75 | static DECLARE_RWSEM(rwsem_D); | 77 | static DECLARE_RWSEM(rwsem_D); |
| 76 | 78 | ||
| 79 | #ifdef CONFIG_RT_MUTEXES | ||
| 80 | |||
| 81 | static DEFINE_RT_MUTEX(rtmutex_A); | ||
| 82 | static DEFINE_RT_MUTEX(rtmutex_B); | ||
| 83 | static DEFINE_RT_MUTEX(rtmutex_C); | ||
| 84 | static DEFINE_RT_MUTEX(rtmutex_D); | ||
| 85 | |||
| 86 | #endif | ||
| 87 | |||
| 77 | /* | 88 | /* |
| 78 | * Locks that we initialize dynamically as well so that | 89 | * Locks that we initialize dynamically as well so that |
| 79 | * e.g. X1 and X2 becomes two instances of the same class, | 90 | * e.g. X1 and X2 becomes two instances of the same class, |
| @@ -108,6 +119,17 @@ static DECLARE_RWSEM(rwsem_Y2); | |||
| 108 | static DECLARE_RWSEM(rwsem_Z1); | 119 | static DECLARE_RWSEM(rwsem_Z1); |
| 109 | static DECLARE_RWSEM(rwsem_Z2); | 120 | static DECLARE_RWSEM(rwsem_Z2); |
| 110 | 121 | ||
| 122 | #ifdef CONFIG_RT_MUTEXES | ||
| 123 | |||
| 124 | static DEFINE_RT_MUTEX(rtmutex_X1); | ||
| 125 | static DEFINE_RT_MUTEX(rtmutex_X2); | ||
| 126 | static DEFINE_RT_MUTEX(rtmutex_Y1); | ||
| 127 | static DEFINE_RT_MUTEX(rtmutex_Y2); | ||
| 128 | static DEFINE_RT_MUTEX(rtmutex_Z1); | ||
| 129 | static DEFINE_RT_MUTEX(rtmutex_Z2); | ||
| 130 | |||
| 131 | #endif | ||
| 132 | |||
| 111 | /* | 133 | /* |
| 112 | * non-inlined runtime initializers, to let separate locks share | 134 | * non-inlined runtime initializers, to let separate locks share |
| 113 | * the same lock-class: | 135 | * the same lock-class: |
| @@ -129,6 +151,17 @@ INIT_CLASS_FUNC(Z) | |||
| 129 | 151 | ||
| 130 | static void init_shared_classes(void) | 152 | static void init_shared_classes(void) |
| 131 | { | 153 | { |
| 154 | #ifdef CONFIG_RT_MUTEXES | ||
| 155 | static struct lock_class_key rt_X, rt_Y, rt_Z; | ||
| 156 | |||
| 157 | __rt_mutex_init(&rtmutex_X1, __func__, &rt_X); | ||
| 158 | __rt_mutex_init(&rtmutex_X2, __func__, &rt_X); | ||
| 159 | __rt_mutex_init(&rtmutex_Y1, __func__, &rt_Y); | ||
| 160 | __rt_mutex_init(&rtmutex_Y2, __func__, &rt_Y); | ||
| 161 | __rt_mutex_init(&rtmutex_Z1, __func__, &rt_Z); | ||
| 162 | __rt_mutex_init(&rtmutex_Z2, __func__, &rt_Z); | ||
| 163 | #endif | ||
| 164 | |||
| 132 | init_class_X(&lock_X1, &rwlock_X1, &mutex_X1, &rwsem_X1); | 165 | init_class_X(&lock_X1, &rwlock_X1, &mutex_X1, &rwsem_X1); |
| 133 | init_class_X(&lock_X2, &rwlock_X2, &mutex_X2, &rwsem_X2); | 166 | init_class_X(&lock_X2, &rwlock_X2, &mutex_X2, &rwsem_X2); |
| 134 | 167 | ||
| @@ -193,6 +226,10 @@ static void init_shared_classes(void) | |||
| 193 | #define MU(x) mutex_unlock(&mutex_##x) | 226 | #define MU(x) mutex_unlock(&mutex_##x) |
| 194 | #define MI(x) mutex_init(&mutex_##x) | 227 | #define MI(x) mutex_init(&mutex_##x) |
| 195 | 228 | ||
| 229 | #define RTL(x) rt_mutex_lock(&rtmutex_##x) | ||
| 230 | #define RTU(x) rt_mutex_unlock(&rtmutex_##x) | ||
| 231 | #define RTI(x) rt_mutex_init(&rtmutex_##x) | ||
| 232 | |||
| 196 | #define WSL(x) down_write(&rwsem_##x) | 233 | #define WSL(x) down_write(&rwsem_##x) |
| 197 | #define WSU(x) up_write(&rwsem_##x) | 234 | #define WSU(x) up_write(&rwsem_##x) |
| 198 | 235 | ||
| @@ -264,6 +301,11 @@ GENERATE_TESTCASE(AA_wsem) | |||
| 264 | #include "locking-selftest-rsem.h" | 301 | #include "locking-selftest-rsem.h" |
| 265 | GENERATE_TESTCASE(AA_rsem) | 302 | GENERATE_TESTCASE(AA_rsem) |
| 266 | 303 | ||
| 304 | #ifdef CONFIG_RT_MUTEXES | ||
| 305 | #include "locking-selftest-rtmutex.h" | ||
| 306 | GENERATE_TESTCASE(AA_rtmutex); | ||
| 307 | #endif | ||
| 308 | |||
| 267 | #undef E | 309 | #undef E |
| 268 | 310 | ||
| 269 | /* | 311 | /* |
| @@ -345,6 +387,11 @@ GENERATE_TESTCASE(ABBA_wsem) | |||
| 345 | #include "locking-selftest-rsem.h" | 387 | #include "locking-selftest-rsem.h" |
| 346 | GENERATE_TESTCASE(ABBA_rsem) | 388 | GENERATE_TESTCASE(ABBA_rsem) |
| 347 | 389 | ||
| 390 | #ifdef CONFIG_RT_MUTEXES | ||
| 391 | #include "locking-selftest-rtmutex.h" | ||
| 392 | GENERATE_TESTCASE(ABBA_rtmutex); | ||
| 393 | #endif | ||
| 394 | |||
| 348 | #undef E | 395 | #undef E |
| 349 | 396 | ||
| 350 | /* | 397 | /* |
| @@ -373,6 +420,11 @@ GENERATE_TESTCASE(ABBCCA_wsem) | |||
| 373 | #include "locking-selftest-rsem.h" | 420 | #include "locking-selftest-rsem.h" |
| 374 | GENERATE_TESTCASE(ABBCCA_rsem) | 421 | GENERATE_TESTCASE(ABBCCA_rsem) |
| 375 | 422 | ||
| 423 | #ifdef CONFIG_RT_MUTEXES | ||
| 424 | #include "locking-selftest-rtmutex.h" | ||
| 425 | GENERATE_TESTCASE(ABBCCA_rtmutex); | ||
| 426 | #endif | ||
| 427 | |||
| 376 | #undef E | 428 | #undef E |
| 377 | 429 | ||
| 378 | /* | 430 | /* |
| @@ -401,6 +453,11 @@ GENERATE_TESTCASE(ABCABC_wsem) | |||
| 401 | #include "locking-selftest-rsem.h" | 453 | #include "locking-selftest-rsem.h" |
| 402 | GENERATE_TESTCASE(ABCABC_rsem) | 454 | GENERATE_TESTCASE(ABCABC_rsem) |
| 403 | 455 | ||
| 456 | #ifdef CONFIG_RT_MUTEXES | ||
| 457 | #include "locking-selftest-rtmutex.h" | ||
| 458 | GENERATE_TESTCASE(ABCABC_rtmutex); | ||
| 459 | #endif | ||
| 460 | |||
| 404 | #undef E | 461 | #undef E |
| 405 | 462 | ||
| 406 | /* | 463 | /* |
| @@ -430,6 +487,11 @@ GENERATE_TESTCASE(ABBCCDDA_wsem) | |||
| 430 | #include "locking-selftest-rsem.h" | 487 | #include "locking-selftest-rsem.h" |
| 431 | GENERATE_TESTCASE(ABBCCDDA_rsem) | 488 | GENERATE_TESTCASE(ABBCCDDA_rsem) |
| 432 | 489 | ||
| 490 | #ifdef CONFIG_RT_MUTEXES | ||
| 491 | #include "locking-selftest-rtmutex.h" | ||
| 492 | GENERATE_TESTCASE(ABBCCDDA_rtmutex); | ||
| 493 | #endif | ||
| 494 | |||
| 433 | #undef E | 495 | #undef E |
| 434 | 496 | ||
| 435 | /* | 497 | /* |
| @@ -458,6 +520,11 @@ GENERATE_TESTCASE(ABCDBDDA_wsem) | |||
| 458 | #include "locking-selftest-rsem.h" | 520 | #include "locking-selftest-rsem.h" |
| 459 | GENERATE_TESTCASE(ABCDBDDA_rsem) | 521 | GENERATE_TESTCASE(ABCDBDDA_rsem) |
| 460 | 522 | ||
| 523 | #ifdef CONFIG_RT_MUTEXES | ||
| 524 | #include "locking-selftest-rtmutex.h" | ||
| 525 | GENERATE_TESTCASE(ABCDBDDA_rtmutex); | ||
| 526 | #endif | ||
| 527 | |||
| 461 | #undef E | 528 | #undef E |
| 462 | 529 | ||
| 463 | /* | 530 | /* |
| @@ -486,6 +553,11 @@ GENERATE_TESTCASE(ABCDBCDA_wsem) | |||
| 486 | #include "locking-selftest-rsem.h" | 553 | #include "locking-selftest-rsem.h" |
| 487 | GENERATE_TESTCASE(ABCDBCDA_rsem) | 554 | GENERATE_TESTCASE(ABCDBCDA_rsem) |
| 488 | 555 | ||
| 556 | #ifdef CONFIG_RT_MUTEXES | ||
| 557 | #include "locking-selftest-rtmutex.h" | ||
| 558 | GENERATE_TESTCASE(ABCDBCDA_rtmutex); | ||
| 559 | #endif | ||
| 560 | |||
| 489 | #undef E | 561 | #undef E |
| 490 | 562 | ||
| 491 | /* | 563 | /* |
| @@ -513,33 +585,10 @@ GENERATE_TESTCASE(double_unlock_wsem) | |||
| 513 | #include "locking-selftest-rsem.h" | 585 | #include "locking-selftest-rsem.h" |
| 514 | GENERATE_TESTCASE(double_unlock_rsem) | 586 | GENERATE_TESTCASE(double_unlock_rsem) |
| 515 | 587 | ||
| 516 | #undef E | 588 | #ifdef CONFIG_RT_MUTEXES |
| 517 | 589 | #include "locking-selftest-rtmutex.h" | |
| 518 | /* | 590 | GENERATE_TESTCASE(double_unlock_rtmutex); |
| 519 | * Bad unlock ordering: | 591 | #endif |
| 520 | */ | ||
| 521 | #define E() \ | ||
| 522 | \ | ||
| 523 | LOCK(A); \ | ||
| 524 | LOCK(B); \ | ||
| 525 | UNLOCK(A); /* fail */ \ | ||
| 526 | UNLOCK(B); | ||
| 527 | |||
| 528 | /* | ||
| 529 | * 6 testcases: | ||
| 530 | */ | ||
| 531 | #include "locking-selftest-spin.h" | ||
| 532 | GENERATE_TESTCASE(bad_unlock_order_spin) | ||
| 533 | #include "locking-selftest-wlock.h" | ||
| 534 | GENERATE_TESTCASE(bad_unlock_order_wlock) | ||
| 535 | #include "locking-selftest-rlock.h" | ||
| 536 | GENERATE_TESTCASE(bad_unlock_order_rlock) | ||
| 537 | #include "locking-selftest-mutex.h" | ||
| 538 | GENERATE_TESTCASE(bad_unlock_order_mutex) | ||
| 539 | #include "locking-selftest-wsem.h" | ||
| 540 | GENERATE_TESTCASE(bad_unlock_order_wsem) | ||
| 541 | #include "locking-selftest-rsem.h" | ||
| 542 | GENERATE_TESTCASE(bad_unlock_order_rsem) | ||
| 543 | 592 | ||
| 544 | #undef E | 593 | #undef E |
| 545 | 594 | ||
| @@ -567,6 +616,11 @@ GENERATE_TESTCASE(init_held_wsem) | |||
| 567 | #include "locking-selftest-rsem.h" | 616 | #include "locking-selftest-rsem.h" |
| 568 | GENERATE_TESTCASE(init_held_rsem) | 617 | GENERATE_TESTCASE(init_held_rsem) |
| 569 | 618 | ||
| 619 | #ifdef CONFIG_RT_MUTEXES | ||
| 620 | #include "locking-selftest-rtmutex.h" | ||
| 621 | GENERATE_TESTCASE(init_held_rtmutex); | ||
| 622 | #endif | ||
| 623 | |||
| 570 | #undef E | 624 | #undef E |
| 571 | 625 | ||
| 572 | /* | 626 | /* |
| @@ -916,6 +970,9 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft) | |||
| 916 | # define I_MUTEX(x) lockdep_reset_lock(&mutex_##x.dep_map) | 970 | # define I_MUTEX(x) lockdep_reset_lock(&mutex_##x.dep_map) |
| 917 | # define I_RWSEM(x) lockdep_reset_lock(&rwsem_##x.dep_map) | 971 | # define I_RWSEM(x) lockdep_reset_lock(&rwsem_##x.dep_map) |
| 918 | # define I_WW(x) lockdep_reset_lock(&x.dep_map) | 972 | # define I_WW(x) lockdep_reset_lock(&x.dep_map) |
| 973 | #ifdef CONFIG_RT_MUTEXES | ||
| 974 | # define I_RTMUTEX(x) lockdep_reset_lock(&rtmutex_##x.dep_map) | ||
| 975 | #endif | ||
| 919 | #else | 976 | #else |
| 920 | # define I_SPINLOCK(x) | 977 | # define I_SPINLOCK(x) |
| 921 | # define I_RWLOCK(x) | 978 | # define I_RWLOCK(x) |
| @@ -924,12 +981,23 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft) | |||
| 924 | # define I_WW(x) | 981 | # define I_WW(x) |
| 925 | #endif | 982 | #endif |
| 926 | 983 | ||
| 984 | #ifndef I_RTMUTEX | ||
| 985 | # define I_RTMUTEX(x) | ||
| 986 | #endif | ||
| 987 | |||
| 988 | #ifdef CONFIG_RT_MUTEXES | ||
| 989 | #define I2_RTMUTEX(x) rt_mutex_init(&rtmutex_##x) | ||
| 990 | #else | ||
| 991 | #define I2_RTMUTEX(x) | ||
| 992 | #endif | ||
| 993 | |||
| 927 | #define I1(x) \ | 994 | #define I1(x) \ |
| 928 | do { \ | 995 | do { \ |
| 929 | I_SPINLOCK(x); \ | 996 | I_SPINLOCK(x); \ |
| 930 | I_RWLOCK(x); \ | 997 | I_RWLOCK(x); \ |
| 931 | I_MUTEX(x); \ | 998 | I_MUTEX(x); \ |
| 932 | I_RWSEM(x); \ | 999 | I_RWSEM(x); \ |
| 1000 | I_RTMUTEX(x); \ | ||
| 933 | } while (0) | 1001 | } while (0) |
| 934 | 1002 | ||
| 935 | #define I2(x) \ | 1003 | #define I2(x) \ |
| @@ -938,6 +1006,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft) | |||
| 938 | rwlock_init(&rwlock_##x); \ | 1006 | rwlock_init(&rwlock_##x); \ |
| 939 | mutex_init(&mutex_##x); \ | 1007 | mutex_init(&mutex_##x); \ |
| 940 | init_rwsem(&rwsem_##x); \ | 1008 | init_rwsem(&rwsem_##x); \ |
| 1009 | I2_RTMUTEX(x); \ | ||
| 941 | } while (0) | 1010 | } while (0) |
| 942 | 1011 | ||
| 943 | static void reset_locks(void) | 1012 | static void reset_locks(void) |
| @@ -1013,6 +1082,12 @@ static void dotest(void (*testcase_fn)(void), int expected, int lockclass_mask) | |||
| 1013 | reset_locks(); | 1082 | reset_locks(); |
| 1014 | } | 1083 | } |
| 1015 | 1084 | ||
| 1085 | #ifdef CONFIG_RT_MUTEXES | ||
| 1086 | #define dotest_rt(fn, e, m) dotest((fn), (e), (m)) | ||
| 1087 | #else | ||
| 1088 | #define dotest_rt(fn, e, m) | ||
| 1089 | #endif | ||
| 1090 | |||
| 1016 | static inline void print_testname(const char *testname) | 1091 | static inline void print_testname(const char *testname) |
| 1017 | { | 1092 | { |
| 1018 | printk("%33s:", testname); | 1093 | printk("%33s:", testname); |
| @@ -1050,6 +1125,7 @@ static inline void print_testname(const char *testname) | |||
| 1050 | dotest(name##_mutex, FAILURE, LOCKTYPE_MUTEX); \ | 1125 | dotest(name##_mutex, FAILURE, LOCKTYPE_MUTEX); \ |
| 1051 | dotest(name##_wsem, FAILURE, LOCKTYPE_RWSEM); \ | 1126 | dotest(name##_wsem, FAILURE, LOCKTYPE_RWSEM); \ |
| 1052 | dotest(name##_rsem, FAILURE, LOCKTYPE_RWSEM); \ | 1127 | dotest(name##_rsem, FAILURE, LOCKTYPE_RWSEM); \ |
| 1128 | dotest_rt(name##_rtmutex, FAILURE, LOCKTYPE_RTMUTEX); \ | ||
| 1053 | pr_cont("\n"); | 1129 | pr_cont("\n"); |
| 1054 | 1130 | ||
| 1055 | #define DO_TESTCASE_6_SUCCESS(desc, name) \ | 1131 | #define DO_TESTCASE_6_SUCCESS(desc, name) \ |
| @@ -1060,6 +1136,7 @@ static inline void print_testname(const char *testname) | |||
| 1060 | dotest(name##_mutex, SUCCESS, LOCKTYPE_MUTEX); \ | 1136 | dotest(name##_mutex, SUCCESS, LOCKTYPE_MUTEX); \ |
| 1061 | dotest(name##_wsem, SUCCESS, LOCKTYPE_RWSEM); \ | 1137 | dotest(name##_wsem, SUCCESS, LOCKTYPE_RWSEM); \ |
| 1062 | dotest(name##_rsem, SUCCESS, LOCKTYPE_RWSEM); \ | 1138 | dotest(name##_rsem, SUCCESS, LOCKTYPE_RWSEM); \ |
| 1139 | dotest_rt(name##_rtmutex, SUCCESS, LOCKTYPE_RTMUTEX); \ | ||
| 1063 | pr_cont("\n"); | 1140 | pr_cont("\n"); |
| 1064 | 1141 | ||
| 1065 | /* | 1142 | /* |
| @@ -1073,6 +1150,7 @@ static inline void print_testname(const char *testname) | |||
| 1073 | dotest(name##_mutex, FAILURE, LOCKTYPE_MUTEX); \ | 1150 | dotest(name##_mutex, FAILURE, LOCKTYPE_MUTEX); \ |
| 1074 | dotest(name##_wsem, FAILURE, LOCKTYPE_RWSEM); \ | 1151 | dotest(name##_wsem, FAILURE, LOCKTYPE_RWSEM); \ |
| 1075 | dotest(name##_rsem, FAILURE, LOCKTYPE_RWSEM); \ | 1152 | dotest(name##_rsem, FAILURE, LOCKTYPE_RWSEM); \ |
| 1153 | dotest_rt(name##_rtmutex, FAILURE, LOCKTYPE_RTMUTEX); \ | ||
| 1076 | pr_cont("\n"); | 1154 | pr_cont("\n"); |
| 1077 | 1155 | ||
| 1078 | #define DO_TESTCASE_2I(desc, name, nr) \ | 1156 | #define DO_TESTCASE_2I(desc, name, nr) \ |
| @@ -1825,7 +1903,6 @@ void locking_selftest(void) | |||
| 1825 | DO_TESTCASE_6R("A-B-C-D-B-C-D-A deadlock", ABCDBCDA); | 1903 | DO_TESTCASE_6R("A-B-C-D-B-C-D-A deadlock", ABCDBCDA); |
| 1826 | DO_TESTCASE_6("double unlock", double_unlock); | 1904 | DO_TESTCASE_6("double unlock", double_unlock); |
| 1827 | DO_TESTCASE_6("initialize held", init_held); | 1905 | DO_TESTCASE_6("initialize held", init_held); |
| 1828 | DO_TESTCASE_6_SUCCESS("bad unlock order", bad_unlock_order); | ||
| 1829 | 1906 | ||
| 1830 | printk(" --------------------------------------------------------------------------\n"); | 1907 | printk(" --------------------------------------------------------------------------\n"); |
| 1831 | print_testname("recursive read-lock"); | 1908 | print_testname("recursive read-lock"); |
diff --git a/lib/nlattr.c b/lib/nlattr.c index a7e0b16078df..fb52435be42d 100644 --- a/lib/nlattr.c +++ b/lib/nlattr.c | |||
| @@ -352,7 +352,7 @@ struct nlattr *__nla_reserve(struct sk_buff *skb, int attrtype, int attrlen) | |||
| 352 | { | 352 | { |
| 353 | struct nlattr *nla; | 353 | struct nlattr *nla; |
| 354 | 354 | ||
| 355 | nla = (struct nlattr *) skb_put(skb, nla_total_size(attrlen)); | 355 | nla = skb_put(skb, nla_total_size(attrlen)); |
| 356 | nla->nla_type = attrtype; | 356 | nla->nla_type = attrtype; |
| 357 | nla->nla_len = nla_attr_size(attrlen); | 357 | nla->nla_len = nla_attr_size(attrlen); |
| 358 | 358 | ||
| @@ -398,12 +398,7 @@ EXPORT_SYMBOL(__nla_reserve_64bit); | |||
| 398 | */ | 398 | */ |
| 399 | void *__nla_reserve_nohdr(struct sk_buff *skb, int attrlen) | 399 | void *__nla_reserve_nohdr(struct sk_buff *skb, int attrlen) |
| 400 | { | 400 | { |
| 401 | void *start; | 401 | return skb_put_zero(skb, NLA_ALIGN(attrlen)); |
| 402 | |||
| 403 | start = skb_put(skb, NLA_ALIGN(attrlen)); | ||
| 404 | memset(start, 0, NLA_ALIGN(attrlen)); | ||
| 405 | |||
| 406 | return start; | ||
| 407 | } | 402 | } |
| 408 | EXPORT_SYMBOL(__nla_reserve_nohdr); | 403 | EXPORT_SYMBOL(__nla_reserve_nohdr); |
| 409 | 404 | ||
| @@ -617,7 +612,7 @@ int nla_append(struct sk_buff *skb, int attrlen, const void *data) | |||
| 617 | if (unlikely(skb_tailroom(skb) < NLA_ALIGN(attrlen))) | 612 | if (unlikely(skb_tailroom(skb) < NLA_ALIGN(attrlen))) |
| 618 | return -EMSGSIZE; | 613 | return -EMSGSIZE; |
| 619 | 614 | ||
| 620 | memcpy(skb_put(skb, attrlen), data, attrlen); | 615 | skb_put_data(skb, data, attrlen); |
| 621 | return 0; | 616 | return 0; |
| 622 | } | 617 | } |
| 623 | EXPORT_SYMBOL(nla_append); | 618 | EXPORT_SYMBOL(nla_append); |
diff --git a/lib/nmi_backtrace.c b/lib/nmi_backtrace.c index 4e8a30d1c22f..0bc0a3535a8a 100644 --- a/lib/nmi_backtrace.c +++ b/lib/nmi_backtrace.c | |||
| @@ -86,9 +86,11 @@ void nmi_trigger_cpumask_backtrace(const cpumask_t *mask, | |||
| 86 | 86 | ||
| 87 | bool nmi_cpu_backtrace(struct pt_regs *regs) | 87 | bool nmi_cpu_backtrace(struct pt_regs *regs) |
| 88 | { | 88 | { |
| 89 | static arch_spinlock_t lock = __ARCH_SPIN_LOCK_UNLOCKED; | ||
| 89 | int cpu = smp_processor_id(); | 90 | int cpu = smp_processor_id(); |
| 90 | 91 | ||
| 91 | if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) { | 92 | if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) { |
| 93 | arch_spin_lock(&lock); | ||
| 92 | if (regs && cpu_in_idle(instruction_pointer(regs))) { | 94 | if (regs && cpu_in_idle(instruction_pointer(regs))) { |
| 93 | pr_warn("NMI backtrace for cpu %d skipped: idling at pc %#lx\n", | 95 | pr_warn("NMI backtrace for cpu %d skipped: idling at pc %#lx\n", |
| 94 | cpu, instruction_pointer(regs)); | 96 | cpu, instruction_pointer(regs)); |
| @@ -99,6 +101,7 @@ bool nmi_cpu_backtrace(struct pt_regs *regs) | |||
| 99 | else | 101 | else |
| 100 | dump_stack(); | 102 | dump_stack(); |
| 101 | } | 103 | } |
| 104 | arch_spin_unlock(&lock); | ||
| 102 | cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask)); | 105 | cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask)); |
| 103 | return true; | 106 | return true; |
| 104 | } | 107 | } |
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c index 9c21000df0b5..3bf4a9984f4c 100644 --- a/lib/percpu_counter.c +++ b/lib/percpu_counter.c | |||
| @@ -72,7 +72,14 @@ void percpu_counter_set(struct percpu_counter *fbc, s64 amount) | |||
| 72 | } | 72 | } |
| 73 | EXPORT_SYMBOL(percpu_counter_set); | 73 | EXPORT_SYMBOL(percpu_counter_set); |
| 74 | 74 | ||
| 75 | void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch) | 75 | /** |
| 76 | * This function is both preempt and irq safe. The former is due to explicit | ||
| 77 | * preemption disable. The latter is guaranteed by the fact that the slow path | ||
| 78 | * is explicitly protected by an irq-safe spinlock whereas the fast patch uses | ||
| 79 | * this_cpu_add which is irq-safe by definition. Hence there is no need muck | ||
| 80 | * with irq state before calling this one | ||
| 81 | */ | ||
| 82 | void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch) | ||
| 76 | { | 83 | { |
| 77 | s64 count; | 84 | s64 count; |
| 78 | 85 | ||
| @@ -89,7 +96,7 @@ void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch) | |||
| 89 | } | 96 | } |
| 90 | preempt_enable(); | 97 | preempt_enable(); |
| 91 | } | 98 | } |
| 92 | EXPORT_SYMBOL(__percpu_counter_add); | 99 | EXPORT_SYMBOL(percpu_counter_add_batch); |
| 93 | 100 | ||
| 94 | /* | 101 | /* |
| 95 | * Add up all the per-cpu counts, return the result. This is a more accurate | 102 | * Add up all the per-cpu counts, return the result. This is a more accurate |
diff --git a/lib/raid6/mktables.c b/lib/raid6/mktables.c index 39787db588b0..e824d088f72c 100644 --- a/lib/raid6/mktables.c +++ b/lib/raid6/mktables.c | |||
| @@ -125,6 +125,26 @@ int main(int argc, char *argv[]) | |||
| 125 | printf("EXPORT_SYMBOL(raid6_gfexp);\n"); | 125 | printf("EXPORT_SYMBOL(raid6_gfexp);\n"); |
| 126 | printf("#endif\n"); | 126 | printf("#endif\n"); |
| 127 | 127 | ||
| 128 | /* Compute log-of-2 table */ | ||
| 129 | printf("\nconst u8 __attribute__((aligned(256)))\n" | ||
| 130 | "raid6_gflog[256] =\n" "{\n"); | ||
| 131 | for (i = 0; i < 256; i += 8) { | ||
| 132 | printf("\t"); | ||
| 133 | for (j = 0; j < 8; j++) { | ||
| 134 | v = 255; | ||
| 135 | for (k = 0; k < 256; k++) | ||
| 136 | if (exptbl[k] == (i + j)) { | ||
| 137 | v = k; | ||
| 138 | break; | ||
| 139 | } | ||
| 140 | printf("0x%02x,%c", v, (j == 7) ? '\n' : ' '); | ||
| 141 | } | ||
| 142 | } | ||
| 143 | printf("};\n"); | ||
| 144 | printf("#ifdef __KERNEL__\n"); | ||
| 145 | printf("EXPORT_SYMBOL(raid6_gflog);\n"); | ||
| 146 | printf("#endif\n"); | ||
| 147 | |||
| 128 | /* Compute inverse table x^-1 == x^254 */ | 148 | /* Compute inverse table x^-1 == x^254 */ |
| 129 | printf("\nconst u8 __attribute__((aligned(256)))\n" | 149 | printf("\nconst u8 __attribute__((aligned(256)))\n" |
| 130 | "raid6_gfinv[256] =\n" "{\n"); | 150 | "raid6_gfinv[256] =\n" "{\n"); |
diff --git a/lib/refcount.c b/lib/refcount.c index 9f906783987e..5d0582a9480c 100644 --- a/lib/refcount.c +++ b/lib/refcount.c | |||
| @@ -37,6 +37,8 @@ | |||
| 37 | #include <linux/refcount.h> | 37 | #include <linux/refcount.h> |
| 38 | #include <linux/bug.h> | 38 | #include <linux/bug.h> |
| 39 | 39 | ||
| 40 | #ifdef CONFIG_REFCOUNT_FULL | ||
| 41 | |||
| 40 | /** | 42 | /** |
| 41 | * refcount_add_not_zero - add a value to a refcount unless it is 0 | 43 | * refcount_add_not_zero - add a value to a refcount unless it is 0 |
| 42 | * @i: the value to add to the refcount | 44 | * @i: the value to add to the refcount |
| @@ -225,6 +227,7 @@ void refcount_dec(refcount_t *r) | |||
| 225 | WARN_ONCE(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n"); | 227 | WARN_ONCE(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n"); |
| 226 | } | 228 | } |
| 227 | EXPORT_SYMBOL(refcount_dec); | 229 | EXPORT_SYMBOL(refcount_dec); |
| 230 | #endif /* CONFIG_REFCOUNT_FULL */ | ||
| 228 | 231 | ||
| 229 | /** | 232 | /** |
| 230 | * refcount_dec_if_one - decrement a refcount if it is 1 | 233 | * refcount_dec_if_one - decrement a refcount if it is 1 |
diff --git a/lib/rhashtable.c b/lib/rhashtable.c index d9e7274a04cd..42466c167257 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c | |||
| @@ -211,11 +211,10 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, | |||
| 211 | int i; | 211 | int i; |
| 212 | 212 | ||
| 213 | size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]); | 213 | size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]); |
| 214 | if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER) || | 214 | if (gfp != GFP_KERNEL) |
| 215 | gfp != GFP_KERNEL) | ||
| 216 | tbl = kzalloc(size, gfp | __GFP_NOWARN | __GFP_NORETRY); | 215 | tbl = kzalloc(size, gfp | __GFP_NOWARN | __GFP_NORETRY); |
| 217 | if (tbl == NULL && gfp == GFP_KERNEL) | 216 | else |
| 218 | tbl = vzalloc(size); | 217 | tbl = kvzalloc(size, gfp); |
| 219 | 218 | ||
| 220 | size = nbuckets; | 219 | size = nbuckets; |
| 221 | 220 | ||
diff --git a/lib/scatterlist.c b/lib/scatterlist.c index c6cf82242d65..be7b4dd6b68d 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c | |||
| @@ -751,3 +751,38 @@ size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents, | |||
| 751 | return sg_copy_buffer(sgl, nents, buf, buflen, skip, true); | 751 | return sg_copy_buffer(sgl, nents, buf, buflen, skip, true); |
| 752 | } | 752 | } |
| 753 | EXPORT_SYMBOL(sg_pcopy_to_buffer); | 753 | EXPORT_SYMBOL(sg_pcopy_to_buffer); |
| 754 | |||
| 755 | /** | ||
| 756 | * sg_zero_buffer - Zero-out a part of a SG list | ||
| 757 | * @sgl: The SG list | ||
| 758 | * @nents: Number of SG entries | ||
| 759 | * @buflen: The number of bytes to zero out | ||
| 760 | * @skip: Number of bytes to skip before zeroing | ||
| 761 | * | ||
| 762 | * Returns the number of bytes zeroed. | ||
| 763 | **/ | ||
| 764 | size_t sg_zero_buffer(struct scatterlist *sgl, unsigned int nents, | ||
| 765 | size_t buflen, off_t skip) | ||
| 766 | { | ||
| 767 | unsigned int offset = 0; | ||
| 768 | struct sg_mapping_iter miter; | ||
| 769 | unsigned int sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG; | ||
| 770 | |||
| 771 | sg_miter_start(&miter, sgl, nents, sg_flags); | ||
| 772 | |||
| 773 | if (!sg_miter_skip(&miter, skip)) | ||
| 774 | return false; | ||
| 775 | |||
| 776 | while (offset < buflen && sg_miter_next(&miter)) { | ||
| 777 | unsigned int len; | ||
| 778 | |||
| 779 | len = min(miter.length, buflen - offset); | ||
| 780 | memset(miter.addr, 0, len); | ||
| 781 | |||
| 782 | offset += len; | ||
| 783 | } | ||
| 784 | |||
| 785 | sg_miter_stop(&miter); | ||
| 786 | return offset; | ||
| 787 | } | ||
| 788 | EXPORT_SYMBOL(sg_zero_buffer); | ||
diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c index 690d75b132fa..2fb007be0212 100644 --- a/lib/smp_processor_id.c +++ b/lib/smp_processor_id.c | |||
| @@ -28,7 +28,7 @@ notrace static unsigned int check_preemption_disabled(const char *what1, | |||
| 28 | /* | 28 | /* |
| 29 | * It is valid to assume CPU-locality during early bootup: | 29 | * It is valid to assume CPU-locality during early bootup: |
| 30 | */ | 30 | */ |
| 31 | if (system_state != SYSTEM_RUNNING) | 31 | if (system_state < SYSTEM_SCHEDULING) |
| 32 | goto out; | 32 | goto out; |
| 33 | 33 | ||
| 34 | /* | 34 | /* |
diff --git a/lib/string.c b/lib/string.c index 1c1fc9187b05..ebbb99c775bd 100644 --- a/lib/string.c +++ b/lib/string.c | |||
| @@ -978,3 +978,10 @@ char *strreplace(char *s, char old, char new) | |||
| 978 | return s; | 978 | return s; |
| 979 | } | 979 | } |
| 980 | EXPORT_SYMBOL(strreplace); | 980 | EXPORT_SYMBOL(strreplace); |
| 981 | |||
| 982 | void fortify_panic(const char *name) | ||
| 983 | { | ||
| 984 | pr_emerg("detected buffer overflow in %s\n", name); | ||
| 985 | BUG(); | ||
| 986 | } | ||
| 987 | EXPORT_SYMBOL(fortify_panic); | ||
diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c index 8e105ed4df12..a5f567747ced 100644 --- a/lib/strnlen_user.c +++ b/lib/strnlen_user.c | |||
| @@ -121,37 +121,3 @@ long strnlen_user(const char __user *str, long count) | |||
| 121 | return 0; | 121 | return 0; |
| 122 | } | 122 | } |
| 123 | EXPORT_SYMBOL(strnlen_user); | 123 | EXPORT_SYMBOL(strnlen_user); |
| 124 | |||
| 125 | /** | ||
| 126 | * strlen_user: - Get the size of a user string INCLUDING final NUL. | ||
| 127 | * @str: The string to measure. | ||
| 128 | * | ||
| 129 | * Context: User context only. This function may sleep if pagefaults are | ||
| 130 | * enabled. | ||
| 131 | * | ||
| 132 | * Get the size of a NUL-terminated string in user space. | ||
| 133 | * | ||
| 134 | * Returns the size of the string INCLUDING the terminating NUL. | ||
| 135 | * On exception, returns 0. | ||
| 136 | * | ||
| 137 | * If there is a limit on the length of a valid string, you may wish to | ||
| 138 | * consider using strnlen_user() instead. | ||
| 139 | */ | ||
| 140 | long strlen_user(const char __user *str) | ||
| 141 | { | ||
| 142 | unsigned long max_addr, src_addr; | ||
| 143 | |||
| 144 | max_addr = user_addr_max(); | ||
| 145 | src_addr = (unsigned long)str; | ||
| 146 | if (likely(src_addr < max_addr)) { | ||
| 147 | unsigned long max = max_addr - src_addr; | ||
| 148 | long retval; | ||
| 149 | |||
| 150 | user_access_begin(); | ||
| 151 | retval = do_strnlen_user(str, ~0ul, max); | ||
| 152 | user_access_end(); | ||
| 153 | return retval; | ||
| 154 | } | ||
| 155 | return 0; | ||
| 156 | } | ||
| 157 | EXPORT_SYMBOL(strlen_user); | ||
diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c index e2cbd43d193c..2526a2975c51 100644 --- a/lib/test_bitmap.c +++ b/lib/test_bitmap.c | |||
| @@ -333,10 +333,39 @@ static void __init test_bitmap_u32_array_conversions(void) | |||
| 333 | } | 333 | } |
| 334 | } | 334 | } |
| 335 | 335 | ||
| 336 | static void noinline __init test_mem_optimisations(void) | ||
| 337 | { | ||
| 338 | DECLARE_BITMAP(bmap1, 1024); | ||
| 339 | DECLARE_BITMAP(bmap2, 1024); | ||
| 340 | unsigned int start, nbits; | ||
| 341 | |||
| 342 | for (start = 0; start < 1024; start += 8) { | ||
| 343 | memset(bmap1, 0x5a, sizeof(bmap1)); | ||
| 344 | memset(bmap2, 0x5a, sizeof(bmap2)); | ||
| 345 | for (nbits = 0; nbits < 1024 - start; nbits += 8) { | ||
| 346 | bitmap_set(bmap1, start, nbits); | ||
| 347 | __bitmap_set(bmap2, start, nbits); | ||
| 348 | if (!bitmap_equal(bmap1, bmap2, 1024)) | ||
| 349 | printk("set not equal %d %d\n", start, nbits); | ||
| 350 | if (!__bitmap_equal(bmap1, bmap2, 1024)) | ||
| 351 | printk("set not __equal %d %d\n", start, nbits); | ||
| 352 | |||
| 353 | bitmap_clear(bmap1, start, nbits); | ||
| 354 | __bitmap_clear(bmap2, start, nbits); | ||
| 355 | if (!bitmap_equal(bmap1, bmap2, 1024)) | ||
| 356 | printk("clear not equal %d %d\n", start, nbits); | ||
| 357 | if (!__bitmap_equal(bmap1, bmap2, 1024)) | ||
| 358 | printk("clear not __equal %d %d\n", start, | ||
| 359 | nbits); | ||
| 360 | } | ||
| 361 | } | ||
| 362 | } | ||
| 363 | |||
| 336 | static int __init test_bitmap_init(void) | 364 | static int __init test_bitmap_init(void) |
| 337 | { | 365 | { |
| 338 | test_zero_fill_copy(); | 366 | test_zero_fill_copy(); |
| 339 | test_bitmap_u32_array_conversions(); | 367 | test_bitmap_u32_array_conversions(); |
| 368 | test_mem_optimisations(); | ||
| 340 | 369 | ||
| 341 | if (failed_tests == 0) | 370 | if (failed_tests == 0) |
| 342 | pr_info("all %u tests passed\n", total_tests); | 371 | pr_info("all %u tests passed\n", total_tests); |
diff --git a/lib/test_bpf.c b/lib/test_bpf.c index be88cbaadde3..d9d5a410955c 100644 --- a/lib/test_bpf.c +++ b/lib/test_bpf.c | |||
| @@ -84,6 +84,7 @@ struct bpf_test { | |||
| 84 | } test[MAX_SUBTESTS]; | 84 | } test[MAX_SUBTESTS]; |
| 85 | int (*fill_helper)(struct bpf_test *self); | 85 | int (*fill_helper)(struct bpf_test *self); |
| 86 | __u8 frag_data[MAX_DATA]; | 86 | __u8 frag_data[MAX_DATA]; |
| 87 | int stack_depth; /* for eBPF only, since tests don't call verifier */ | ||
| 87 | }; | 88 | }; |
| 88 | 89 | ||
| 89 | /* Large test cases need separate allocation and fill handler. */ | 90 | /* Large test cases need separate allocation and fill handler. */ |
| @@ -434,6 +435,30 @@ loop: | |||
| 434 | return 0; | 435 | return 0; |
| 435 | } | 436 | } |
| 436 | 437 | ||
| 438 | static int bpf_fill_jump_around_ld_abs(struct bpf_test *self) | ||
| 439 | { | ||
| 440 | unsigned int len = BPF_MAXINSNS; | ||
| 441 | struct bpf_insn *insn; | ||
| 442 | int i = 0; | ||
| 443 | |||
| 444 | insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL); | ||
| 445 | if (!insn) | ||
| 446 | return -ENOMEM; | ||
| 447 | |||
| 448 | insn[i++] = BPF_MOV64_REG(R6, R1); | ||
| 449 | insn[i++] = BPF_LD_ABS(BPF_B, 0); | ||
| 450 | insn[i] = BPF_JMP_IMM(BPF_JEQ, R0, 10, len - i - 2); | ||
| 451 | i++; | ||
| 452 | while (i < len - 1) | ||
| 453 | insn[i++] = BPF_LD_ABS(BPF_B, 1); | ||
| 454 | insn[i] = BPF_EXIT_INSN(); | ||
| 455 | |||
| 456 | self->u.ptr.insns = insn; | ||
| 457 | self->u.ptr.len = len; | ||
| 458 | |||
| 459 | return 0; | ||
| 460 | } | ||
| 461 | |||
| 437 | static int __bpf_fill_stxdw(struct bpf_test *self, int size) | 462 | static int __bpf_fill_stxdw(struct bpf_test *self, int size) |
| 438 | { | 463 | { |
| 439 | unsigned int len = BPF_MAXINSNS; | 464 | unsigned int len = BPF_MAXINSNS; |
| @@ -455,6 +480,7 @@ static int __bpf_fill_stxdw(struct bpf_test *self, int size) | |||
| 455 | 480 | ||
| 456 | self->u.ptr.insns = insn; | 481 | self->u.ptr.insns = insn; |
| 457 | self->u.ptr.len = len; | 482 | self->u.ptr.len = len; |
| 483 | self->stack_depth = 40; | ||
| 458 | 484 | ||
| 459 | return 0; | 485 | return 0; |
| 460 | } | 486 | } |
| @@ -2317,7 +2343,8 @@ static struct bpf_test tests[] = { | |||
| 2317 | { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0x06, 0, 0, | 2343 | { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0x06, 0, 0, |
| 2318 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, | 2344 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, |
| 2319 | 0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6}, | 2345 | 0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6}, |
| 2320 | { { 38, 256 } } | 2346 | { { 38, 256 } }, |
| 2347 | .stack_depth = 64, | ||
| 2321 | }, | 2348 | }, |
| 2322 | /* BPF_ALU | BPF_MOV | BPF_X */ | 2349 | /* BPF_ALU | BPF_MOV | BPF_X */ |
| 2323 | { | 2350 | { |
| @@ -4169,6 +4196,7 @@ static struct bpf_test tests[] = { | |||
| 4169 | INTERNAL, | 4196 | INTERNAL, |
| 4170 | { }, | 4197 | { }, |
| 4171 | { { 0, 0xff } }, | 4198 | { { 0, 0xff } }, |
| 4199 | .stack_depth = 40, | ||
| 4172 | }, | 4200 | }, |
| 4173 | { | 4201 | { |
| 4174 | "ST_MEM_B: Store/Load byte: max positive", | 4202 | "ST_MEM_B: Store/Load byte: max positive", |
| @@ -4181,6 +4209,7 @@ static struct bpf_test tests[] = { | |||
| 4181 | INTERNAL, | 4209 | INTERNAL, |
| 4182 | { }, | 4210 | { }, |
| 4183 | { { 0, 0x7f } }, | 4211 | { { 0, 0x7f } }, |
| 4212 | .stack_depth = 40, | ||
| 4184 | }, | 4213 | }, |
| 4185 | { | 4214 | { |
| 4186 | "STX_MEM_B: Store/Load byte: max negative", | 4215 | "STX_MEM_B: Store/Load byte: max negative", |
| @@ -4194,6 +4223,7 @@ static struct bpf_test tests[] = { | |||
| 4194 | INTERNAL, | 4223 | INTERNAL, |
| 4195 | { }, | 4224 | { }, |
| 4196 | { { 0, 0xff } }, | 4225 | { { 0, 0xff } }, |
| 4226 | .stack_depth = 40, | ||
| 4197 | }, | 4227 | }, |
| 4198 | { | 4228 | { |
| 4199 | "ST_MEM_H: Store/Load half word: max negative", | 4229 | "ST_MEM_H: Store/Load half word: max negative", |
| @@ -4206,6 +4236,7 @@ static struct bpf_test tests[] = { | |||
| 4206 | INTERNAL, | 4236 | INTERNAL, |
| 4207 | { }, | 4237 | { }, |
| 4208 | { { 0, 0xffff } }, | 4238 | { { 0, 0xffff } }, |
| 4239 | .stack_depth = 40, | ||
| 4209 | }, | 4240 | }, |
| 4210 | { | 4241 | { |
| 4211 | "ST_MEM_H: Store/Load half word: max positive", | 4242 | "ST_MEM_H: Store/Load half word: max positive", |
| @@ -4218,6 +4249,7 @@ static struct bpf_test tests[] = { | |||
| 4218 | INTERNAL, | 4249 | INTERNAL, |
| 4219 | { }, | 4250 | { }, |
| 4220 | { { 0, 0x7fff } }, | 4251 | { { 0, 0x7fff } }, |
| 4252 | .stack_depth = 40, | ||
| 4221 | }, | 4253 | }, |
| 4222 | { | 4254 | { |
| 4223 | "STX_MEM_H: Store/Load half word: max negative", | 4255 | "STX_MEM_H: Store/Load half word: max negative", |
| @@ -4231,6 +4263,7 @@ static struct bpf_test tests[] = { | |||
| 4231 | INTERNAL, | 4263 | INTERNAL, |
| 4232 | { }, | 4264 | { }, |
| 4233 | { { 0, 0xffff } }, | 4265 | { { 0, 0xffff } }, |
| 4266 | .stack_depth = 40, | ||
| 4234 | }, | 4267 | }, |
| 4235 | { | 4268 | { |
| 4236 | "ST_MEM_W: Store/Load word: max negative", | 4269 | "ST_MEM_W: Store/Load word: max negative", |
| @@ -4243,6 +4276,7 @@ static struct bpf_test tests[] = { | |||
| 4243 | INTERNAL, | 4276 | INTERNAL, |
| 4244 | { }, | 4277 | { }, |
| 4245 | { { 0, 0xffffffff } }, | 4278 | { { 0, 0xffffffff } }, |
| 4279 | .stack_depth = 40, | ||
| 4246 | }, | 4280 | }, |
| 4247 | { | 4281 | { |
| 4248 | "ST_MEM_W: Store/Load word: max positive", | 4282 | "ST_MEM_W: Store/Load word: max positive", |
| @@ -4255,6 +4289,7 @@ static struct bpf_test tests[] = { | |||
| 4255 | INTERNAL, | 4289 | INTERNAL, |
| 4256 | { }, | 4290 | { }, |
| 4257 | { { 0, 0x7fffffff } }, | 4291 | { { 0, 0x7fffffff } }, |
| 4292 | .stack_depth = 40, | ||
| 4258 | }, | 4293 | }, |
| 4259 | { | 4294 | { |
| 4260 | "STX_MEM_W: Store/Load word: max negative", | 4295 | "STX_MEM_W: Store/Load word: max negative", |
| @@ -4268,6 +4303,7 @@ static struct bpf_test tests[] = { | |||
| 4268 | INTERNAL, | 4303 | INTERNAL, |
| 4269 | { }, | 4304 | { }, |
| 4270 | { { 0, 0xffffffff } }, | 4305 | { { 0, 0xffffffff } }, |
| 4306 | .stack_depth = 40, | ||
| 4271 | }, | 4307 | }, |
| 4272 | { | 4308 | { |
| 4273 | "ST_MEM_DW: Store/Load double word: max negative", | 4309 | "ST_MEM_DW: Store/Load double word: max negative", |
| @@ -4280,6 +4316,7 @@ static struct bpf_test tests[] = { | |||
| 4280 | INTERNAL, | 4316 | INTERNAL, |
| 4281 | { }, | 4317 | { }, |
| 4282 | { { 0, 0xffffffff } }, | 4318 | { { 0, 0xffffffff } }, |
| 4319 | .stack_depth = 40, | ||
| 4283 | }, | 4320 | }, |
| 4284 | { | 4321 | { |
| 4285 | "ST_MEM_DW: Store/Load double word: max negative 2", | 4322 | "ST_MEM_DW: Store/Load double word: max negative 2", |
| @@ -4297,6 +4334,7 @@ static struct bpf_test tests[] = { | |||
| 4297 | INTERNAL, | 4334 | INTERNAL, |
| 4298 | { }, | 4335 | { }, |
| 4299 | { { 0, 0x1 } }, | 4336 | { { 0, 0x1 } }, |
| 4337 | .stack_depth = 40, | ||
| 4300 | }, | 4338 | }, |
| 4301 | { | 4339 | { |
| 4302 | "ST_MEM_DW: Store/Load double word: max positive", | 4340 | "ST_MEM_DW: Store/Load double word: max positive", |
| @@ -4309,6 +4347,7 @@ static struct bpf_test tests[] = { | |||
| 4309 | INTERNAL, | 4347 | INTERNAL, |
| 4310 | { }, | 4348 | { }, |
| 4311 | { { 0, 0x7fffffff } }, | 4349 | { { 0, 0x7fffffff } }, |
| 4350 | .stack_depth = 40, | ||
| 4312 | }, | 4351 | }, |
| 4313 | { | 4352 | { |
| 4314 | "STX_MEM_DW: Store/Load double word: max negative", | 4353 | "STX_MEM_DW: Store/Load double word: max negative", |
| @@ -4322,6 +4361,7 @@ static struct bpf_test tests[] = { | |||
| 4322 | INTERNAL, | 4361 | INTERNAL, |
| 4323 | { }, | 4362 | { }, |
| 4324 | { { 0, 0xffffffff } }, | 4363 | { { 0, 0xffffffff } }, |
| 4364 | .stack_depth = 40, | ||
| 4325 | }, | 4365 | }, |
| 4326 | /* BPF_STX | BPF_XADD | BPF_W/DW */ | 4366 | /* BPF_STX | BPF_XADD | BPF_W/DW */ |
| 4327 | { | 4367 | { |
| @@ -4336,6 +4376,7 @@ static struct bpf_test tests[] = { | |||
| 4336 | INTERNAL, | 4376 | INTERNAL, |
| 4337 | { }, | 4377 | { }, |
| 4338 | { { 0, 0x22 } }, | 4378 | { { 0, 0x22 } }, |
| 4379 | .stack_depth = 40, | ||
| 4339 | }, | 4380 | }, |
| 4340 | { | 4381 | { |
| 4341 | "STX_XADD_W: Test side-effects, r10: 0x12 + 0x10 = 0x22", | 4382 | "STX_XADD_W: Test side-effects, r10: 0x12 + 0x10 = 0x22", |
| @@ -4351,6 +4392,7 @@ static struct bpf_test tests[] = { | |||
| 4351 | INTERNAL, | 4392 | INTERNAL, |
| 4352 | { }, | 4393 | { }, |
| 4353 | { { 0, 0 } }, | 4394 | { { 0, 0 } }, |
| 4395 | .stack_depth = 40, | ||
| 4354 | }, | 4396 | }, |
| 4355 | { | 4397 | { |
| 4356 | "STX_XADD_W: Test side-effects, r0: 0x12 + 0x10 = 0x22", | 4398 | "STX_XADD_W: Test side-effects, r0: 0x12 + 0x10 = 0x22", |
| @@ -4363,6 +4405,7 @@ static struct bpf_test tests[] = { | |||
| 4363 | INTERNAL, | 4405 | INTERNAL, |
| 4364 | { }, | 4406 | { }, |
| 4365 | { { 0, 0x12 } }, | 4407 | { { 0, 0x12 } }, |
| 4408 | .stack_depth = 40, | ||
| 4366 | }, | 4409 | }, |
| 4367 | { | 4410 | { |
| 4368 | "STX_XADD_W: X + 1 + 1 + 1 + ...", | 4411 | "STX_XADD_W: X + 1 + 1 + 1 + ...", |
| @@ -4384,6 +4427,7 @@ static struct bpf_test tests[] = { | |||
| 4384 | INTERNAL, | 4427 | INTERNAL, |
| 4385 | { }, | 4428 | { }, |
| 4386 | { { 0, 0x22 } }, | 4429 | { { 0, 0x22 } }, |
| 4430 | .stack_depth = 40, | ||
| 4387 | }, | 4431 | }, |
| 4388 | { | 4432 | { |
| 4389 | "STX_XADD_DW: Test side-effects, r10: 0x12 + 0x10 = 0x22", | 4433 | "STX_XADD_DW: Test side-effects, r10: 0x12 + 0x10 = 0x22", |
| @@ -4399,6 +4443,7 @@ static struct bpf_test tests[] = { | |||
| 4399 | INTERNAL, | 4443 | INTERNAL, |
| 4400 | { }, | 4444 | { }, |
| 4401 | { { 0, 0 } }, | 4445 | { { 0, 0 } }, |
| 4446 | .stack_depth = 40, | ||
| 4402 | }, | 4447 | }, |
| 4403 | { | 4448 | { |
| 4404 | "STX_XADD_DW: Test side-effects, r0: 0x12 + 0x10 = 0x22", | 4449 | "STX_XADD_DW: Test side-effects, r0: 0x12 + 0x10 = 0x22", |
| @@ -4411,6 +4456,7 @@ static struct bpf_test tests[] = { | |||
| 4411 | INTERNAL, | 4456 | INTERNAL, |
| 4412 | { }, | 4457 | { }, |
| 4413 | { { 0, 0x12 } }, | 4458 | { { 0, 0x12 } }, |
| 4459 | .stack_depth = 40, | ||
| 4414 | }, | 4460 | }, |
| 4415 | { | 4461 | { |
| 4416 | "STX_XADD_DW: X + 1 + 1 + 1 + ...", | 4462 | "STX_XADD_DW: X + 1 + 1 + 1 + ...", |
| @@ -5022,6 +5068,14 @@ static struct bpf_test tests[] = { | |||
| 5022 | { { ETH_HLEN, 0xbef } }, | 5068 | { { ETH_HLEN, 0xbef } }, |
| 5023 | .fill_helper = bpf_fill_ld_abs_vlan_push_pop, | 5069 | .fill_helper = bpf_fill_ld_abs_vlan_push_pop, |
| 5024 | }, | 5070 | }, |
| 5071 | { | ||
| 5072 | "BPF_MAXINSNS: jump around ld_abs", | ||
| 5073 | { }, | ||
| 5074 | INTERNAL, | ||
| 5075 | { 10, 11 }, | ||
| 5076 | { { 2, 10 } }, | ||
| 5077 | .fill_helper = bpf_fill_jump_around_ld_abs, | ||
| 5078 | }, | ||
| 5025 | /* | 5079 | /* |
| 5026 | * LD_IND / LD_ABS on fragmented SKBs | 5080 | * LD_IND / LD_ABS on fragmented SKBs |
| 5027 | */ | 5081 | */ |
| @@ -5663,7 +5717,7 @@ static struct sk_buff *populate_skb(char *buf, int size) | |||
| 5663 | if (!skb) | 5717 | if (!skb) |
| 5664 | return NULL; | 5718 | return NULL; |
| 5665 | 5719 | ||
| 5666 | memcpy(__skb_put(skb, size), buf, size); | 5720 | __skb_put_data(skb, buf, size); |
| 5667 | 5721 | ||
| 5668 | /* Initialize a fake skb with test pattern. */ | 5722 | /* Initialize a fake skb with test pattern. */ |
| 5669 | skb_reset_mac_header(skb); | 5723 | skb_reset_mac_header(skb); |
| @@ -5809,6 +5863,7 @@ static struct bpf_prog *generate_filter(int which, int *err) | |||
| 5809 | /* Type doesn't really matter here as long as it's not unspec. */ | 5863 | /* Type doesn't really matter here as long as it's not unspec. */ |
| 5810 | fp->type = BPF_PROG_TYPE_SOCKET_FILTER; | 5864 | fp->type = BPF_PROG_TYPE_SOCKET_FILTER; |
| 5811 | memcpy(fp->insnsi, fptr, fp->len * sizeof(struct bpf_insn)); | 5865 | memcpy(fp->insnsi, fptr, fp->len * sizeof(struct bpf_insn)); |
| 5866 | fp->aux->stack_depth = tests[which].stack_depth; | ||
| 5812 | 5867 | ||
| 5813 | /* We cannot error here as we don't need type compatibility | 5868 | /* We cannot error here as we don't need type compatibility |
| 5814 | * checks. | 5869 | * checks. |
diff --git a/lib/test_kmod.c b/lib/test_kmod.c new file mode 100644 index 000000000000..6c1d678bcf8b --- /dev/null +++ b/lib/test_kmod.c | |||
| @@ -0,0 +1,1246 @@ | |||
| 1 | /* | ||
| 2 | * kmod stress test driver | ||
| 3 | * | ||
| 4 | * Copyright (C) 2017 Luis R. Rodriguez <mcgrof@kernel.org> | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify it | ||
| 7 | * under the terms of the GNU General Public License as published by the Free | ||
| 8 | * Software Foundation; either version 2 of the License, or at your option any | ||
| 9 | * later version; or, when distributed separately from the Linux kernel or | ||
| 10 | * when incorporated into other software packages, subject to the following | ||
| 11 | * license: | ||
| 12 | * | ||
| 13 | * This program is free software; you can redistribute it and/or modify it | ||
| 14 | * under the terms of copyleft-next (version 0.3.1 or later) as published | ||
| 15 | * at http://copyleft-next.org/. | ||
| 16 | */ | ||
| 17 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
| 18 | |||
| 19 | /* | ||
| 20 | * This driver provides an interface to trigger and test the kernel's | ||
| 21 | * module loader through a series of configurations and a few triggers. | ||
| 22 | * To test this driver use the following script as root: | ||
| 23 | * | ||
| 24 | * tools/testing/selftests/kmod/kmod.sh --help | ||
| 25 | */ | ||
| 26 | |||
| 27 | #include <linux/kernel.h> | ||
| 28 | #include <linux/module.h> | ||
| 29 | #include <linux/kmod.h> | ||
| 30 | #include <linux/printk.h> | ||
| 31 | #include <linux/kthread.h> | ||
| 32 | #include <linux/sched.h> | ||
| 33 | #include <linux/fs.h> | ||
| 34 | #include <linux/miscdevice.h> | ||
| 35 | #include <linux/vmalloc.h> | ||
| 36 | #include <linux/slab.h> | ||
| 37 | #include <linux/device.h> | ||
| 38 | |||
| 39 | #define TEST_START_NUM_THREADS 50 | ||
| 40 | #define TEST_START_DRIVER "test_module" | ||
| 41 | #define TEST_START_TEST_FS "xfs" | ||
| 42 | #define TEST_START_TEST_CASE TEST_KMOD_DRIVER | ||
| 43 | |||
| 44 | |||
| 45 | static bool force_init_test = false; | ||
| 46 | module_param(force_init_test, bool_enable_only, 0644); | ||
| 47 | MODULE_PARM_DESC(force_init_test, | ||
| 48 | "Force kicking a test immediately after driver loads"); | ||
| 49 | |||
| 50 | /* | ||
| 51 | * For device allocation / registration | ||
| 52 | */ | ||
| 53 | static DEFINE_MUTEX(reg_dev_mutex); | ||
| 54 | static LIST_HEAD(reg_test_devs); | ||
| 55 | |||
| 56 | /* | ||
| 57 | * num_test_devs actually represents the *next* ID of the next | ||
| 58 | * device we will allow to create. | ||
| 59 | */ | ||
| 60 | static int num_test_devs; | ||
| 61 | |||
| 62 | /** | ||
| 63 | * enum kmod_test_case - linker table test case | ||
| 64 | * | ||
| 65 | * If you add a test case, please be sure to review if you need to se | ||
| 66 | * @need_mod_put for your tests case. | ||
| 67 | * | ||
| 68 | * @TEST_KMOD_DRIVER: stress tests request_module() | ||
| 69 | * @TEST_KMOD_FS_TYPE: stress tests get_fs_type() | ||
| 70 | */ | ||
| 71 | enum kmod_test_case { | ||
| 72 | __TEST_KMOD_INVALID = 0, | ||
| 73 | |||
| 74 | TEST_KMOD_DRIVER, | ||
| 75 | TEST_KMOD_FS_TYPE, | ||
| 76 | |||
| 77 | __TEST_KMOD_MAX, | ||
| 78 | }; | ||
| 79 | |||
| 80 | struct test_config { | ||
| 81 | char *test_driver; | ||
| 82 | char *test_fs; | ||
| 83 | unsigned int num_threads; | ||
| 84 | enum kmod_test_case test_case; | ||
| 85 | int test_result; | ||
| 86 | }; | ||
| 87 | |||
| 88 | struct kmod_test_device; | ||
| 89 | |||
| 90 | /** | ||
| 91 | * kmod_test_device_info - thread info | ||
| 92 | * | ||
| 93 | * @ret_sync: return value if request_module() is used, sync request for | ||
| 94 | * @TEST_KMOD_DRIVER | ||
| 95 | * @fs_sync: return value of get_fs_type() for @TEST_KMOD_FS_TYPE | ||
| 96 | * @thread_idx: thread ID | ||
| 97 | * @test_dev: test device test is being performed under | ||
| 98 | * @need_mod_put: Some tests (get_fs_type() is one) requires putting the module | ||
| 99 | * (module_put(fs_sync->owner)) when done, otherwise you will not be able | ||
| 100 | * to unload the respective modules and re-test. We use this to keep | ||
| 101 | * accounting of when we need this and to help out in case we need to | ||
| 102 | * error out and deal with module_put() on error. | ||
| 103 | */ | ||
| 104 | struct kmod_test_device_info { | ||
| 105 | int ret_sync; | ||
| 106 | struct file_system_type *fs_sync; | ||
| 107 | struct task_struct *task_sync; | ||
| 108 | unsigned int thread_idx; | ||
| 109 | struct kmod_test_device *test_dev; | ||
| 110 | bool need_mod_put; | ||
| 111 | }; | ||
| 112 | |||
| 113 | /** | ||
| 114 | * kmod_test_device - test device to help test kmod | ||
| 115 | * | ||
| 116 | * @dev_idx: unique ID for test device | ||
| 117 | * @config: configuration for the test | ||
| 118 | * @misc_dev: we use a misc device under the hood | ||
| 119 | * @dev: pointer to misc_dev's own struct device | ||
| 120 | * @config_mutex: protects configuration of test | ||
| 121 | * @trigger_mutex: the test trigger can only be fired once at a time | ||
| 122 | * @thread_lock: protects @done count, and the @info per each thread | ||
| 123 | * @done: number of threads which have completed or failed | ||
| 124 | * @test_is_oom: when we run out of memory, use this to halt moving forward | ||
| 125 | * @kthreads_done: completion used to signal when all work is done | ||
| 126 | * @list: needed to be part of the reg_test_devs | ||
| 127 | * @info: array of info for each thread | ||
| 128 | */ | ||
| 129 | struct kmod_test_device { | ||
| 130 | int dev_idx; | ||
| 131 | struct test_config config; | ||
| 132 | struct miscdevice misc_dev; | ||
| 133 | struct device *dev; | ||
| 134 | struct mutex config_mutex; | ||
| 135 | struct mutex trigger_mutex; | ||
| 136 | struct mutex thread_mutex; | ||
| 137 | |||
| 138 | unsigned int done; | ||
| 139 | |||
| 140 | bool test_is_oom; | ||
| 141 | struct completion kthreads_done; | ||
| 142 | struct list_head list; | ||
| 143 | |||
| 144 | struct kmod_test_device_info *info; | ||
| 145 | }; | ||
| 146 | |||
| 147 | static const char *test_case_str(enum kmod_test_case test_case) | ||
| 148 | { | ||
| 149 | switch (test_case) { | ||
| 150 | case TEST_KMOD_DRIVER: | ||
| 151 | return "TEST_KMOD_DRIVER"; | ||
| 152 | case TEST_KMOD_FS_TYPE: | ||
| 153 | return "TEST_KMOD_FS_TYPE"; | ||
| 154 | default: | ||
| 155 | return "invalid"; | ||
| 156 | } | ||
| 157 | } | ||
| 158 | |||
| 159 | static struct miscdevice *dev_to_misc_dev(struct device *dev) | ||
| 160 | { | ||
| 161 | return dev_get_drvdata(dev); | ||
| 162 | } | ||
| 163 | |||
| 164 | static struct kmod_test_device *misc_dev_to_test_dev(struct miscdevice *misc_dev) | ||
| 165 | { | ||
| 166 | return container_of(misc_dev, struct kmod_test_device, misc_dev); | ||
| 167 | } | ||
| 168 | |||
| 169 | static struct kmod_test_device *dev_to_test_dev(struct device *dev) | ||
| 170 | { | ||
| 171 | struct miscdevice *misc_dev; | ||
| 172 | |||
| 173 | misc_dev = dev_to_misc_dev(dev); | ||
| 174 | |||
| 175 | return misc_dev_to_test_dev(misc_dev); | ||
| 176 | } | ||
| 177 | |||
| 178 | /* Must run with thread_mutex held */ | ||
| 179 | static void kmod_test_done_check(struct kmod_test_device *test_dev, | ||
| 180 | unsigned int idx) | ||
| 181 | { | ||
| 182 | struct test_config *config = &test_dev->config; | ||
| 183 | |||
| 184 | test_dev->done++; | ||
| 185 | dev_dbg(test_dev->dev, "Done thread count: %u\n", test_dev->done); | ||
| 186 | |||
| 187 | if (test_dev->done == config->num_threads) { | ||
| 188 | dev_info(test_dev->dev, "Done: %u threads have all run now\n", | ||
| 189 | test_dev->done); | ||
| 190 | dev_info(test_dev->dev, "Last thread to run: %u\n", idx); | ||
| 191 | complete(&test_dev->kthreads_done); | ||
| 192 | } | ||
| 193 | } | ||
| 194 | |||
| 195 | static void test_kmod_put_module(struct kmod_test_device_info *info) | ||
| 196 | { | ||
| 197 | struct kmod_test_device *test_dev = info->test_dev; | ||
| 198 | struct test_config *config = &test_dev->config; | ||
| 199 | |||
| 200 | if (!info->need_mod_put) | ||
| 201 | return; | ||
| 202 | |||
| 203 | switch (config->test_case) { | ||
| 204 | case TEST_KMOD_DRIVER: | ||
| 205 | break; | ||
| 206 | case TEST_KMOD_FS_TYPE: | ||
| 207 | if (info && info->fs_sync && info->fs_sync->owner) | ||
| 208 | module_put(info->fs_sync->owner); | ||
| 209 | break; | ||
| 210 | default: | ||
| 211 | BUG(); | ||
| 212 | } | ||
| 213 | |||
| 214 | info->need_mod_put = true; | ||
| 215 | } | ||
| 216 | |||
| 217 | static int run_request(void *data) | ||
| 218 | { | ||
| 219 | struct kmod_test_device_info *info = data; | ||
| 220 | struct kmod_test_device *test_dev = info->test_dev; | ||
| 221 | struct test_config *config = &test_dev->config; | ||
| 222 | |||
| 223 | switch (config->test_case) { | ||
| 224 | case TEST_KMOD_DRIVER: | ||
| 225 | info->ret_sync = request_module("%s", config->test_driver); | ||
| 226 | break; | ||
| 227 | case TEST_KMOD_FS_TYPE: | ||
| 228 | info->fs_sync = get_fs_type(config->test_fs); | ||
| 229 | info->need_mod_put = true; | ||
| 230 | break; | ||
| 231 | default: | ||
| 232 | /* __trigger_config_run() already checked for test sanity */ | ||
| 233 | BUG(); | ||
| 234 | return -EINVAL; | ||
| 235 | } | ||
| 236 | |||
| 237 | dev_dbg(test_dev->dev, "Ran thread %u\n", info->thread_idx); | ||
| 238 | |||
| 239 | test_kmod_put_module(info); | ||
| 240 | |||
| 241 | mutex_lock(&test_dev->thread_mutex); | ||
| 242 | info->task_sync = NULL; | ||
| 243 | kmod_test_done_check(test_dev, info->thread_idx); | ||
| 244 | mutex_unlock(&test_dev->thread_mutex); | ||
| 245 | |||
| 246 | return 0; | ||
| 247 | } | ||
| 248 | |||
| 249 | static int tally_work_test(struct kmod_test_device_info *info) | ||
| 250 | { | ||
| 251 | struct kmod_test_device *test_dev = info->test_dev; | ||
| 252 | struct test_config *config = &test_dev->config; | ||
| 253 | int err_ret = 0; | ||
| 254 | |||
| 255 | switch (config->test_case) { | ||
| 256 | case TEST_KMOD_DRIVER: | ||
| 257 | /* | ||
| 258 | * Only capture errors, if one is found that's | ||
| 259 | * enough, for now. | ||
| 260 | */ | ||
| 261 | if (info->ret_sync != 0) | ||
| 262 | err_ret = info->ret_sync; | ||
| 263 | dev_info(test_dev->dev, | ||
| 264 | "Sync thread %d return status: %d\n", | ||
| 265 | info->thread_idx, info->ret_sync); | ||
| 266 | break; | ||
| 267 | case TEST_KMOD_FS_TYPE: | ||
| 268 | /* For now we make this simple */ | ||
| 269 | if (!info->fs_sync) | ||
| 270 | err_ret = -EINVAL; | ||
| 271 | dev_info(test_dev->dev, "Sync thread %u fs: %s\n", | ||
| 272 | info->thread_idx, info->fs_sync ? config->test_fs : | ||
| 273 | "NULL"); | ||
| 274 | break; | ||
| 275 | default: | ||
| 276 | BUG(); | ||
| 277 | } | ||
| 278 | |||
| 279 | return err_ret; | ||
| 280 | } | ||
| 281 | |||
| 282 | /* | ||
| 283 | * XXX: add result option to display if all errors did not match. | ||
| 284 | * For now we just keep any error code if one was found. | ||
| 285 | * | ||
| 286 | * If this ran it means *all* tasks were created fine and we | ||
| 287 | * are now just collecting results. | ||
| 288 | * | ||
| 289 | * Only propagate errors, do not override with a subsequent sucess case. | ||
| 290 | */ | ||
| 291 | static void tally_up_work(struct kmod_test_device *test_dev) | ||
| 292 | { | ||
| 293 | struct test_config *config = &test_dev->config; | ||
| 294 | struct kmod_test_device_info *info; | ||
| 295 | unsigned int idx; | ||
| 296 | int err_ret = 0; | ||
| 297 | int ret = 0; | ||
| 298 | |||
| 299 | mutex_lock(&test_dev->thread_mutex); | ||
| 300 | |||
| 301 | dev_info(test_dev->dev, "Results:\n"); | ||
| 302 | |||
| 303 | for (idx=0; idx < config->num_threads; idx++) { | ||
| 304 | info = &test_dev->info[idx]; | ||
| 305 | ret = tally_work_test(info); | ||
| 306 | if (ret) | ||
| 307 | err_ret = ret; | ||
| 308 | } | ||
| 309 | |||
| 310 | /* | ||
| 311 | * Note: request_module() returns 256 for a module not found even | ||
| 312 | * though modprobe itself returns 1. | ||
| 313 | */ | ||
| 314 | config->test_result = err_ret; | ||
| 315 | |||
| 316 | mutex_unlock(&test_dev->thread_mutex); | ||
| 317 | } | ||
| 318 | |||
| 319 | static int try_one_request(struct kmod_test_device *test_dev, unsigned int idx) | ||
| 320 | { | ||
| 321 | struct kmod_test_device_info *info = &test_dev->info[idx]; | ||
| 322 | int fail_ret = -ENOMEM; | ||
| 323 | |||
| 324 | mutex_lock(&test_dev->thread_mutex); | ||
| 325 | |||
| 326 | info->thread_idx = idx; | ||
| 327 | info->test_dev = test_dev; | ||
| 328 | info->task_sync = kthread_run(run_request, info, "%s-%u", | ||
| 329 | KBUILD_MODNAME, idx); | ||
| 330 | |||
| 331 | if (!info->task_sync || IS_ERR(info->task_sync)) { | ||
| 332 | test_dev->test_is_oom = true; | ||
| 333 | dev_err(test_dev->dev, "Setting up thread %u failed\n", idx); | ||
| 334 | info->task_sync = NULL; | ||
| 335 | goto err_out; | ||
| 336 | } else | ||
| 337 | dev_dbg(test_dev->dev, "Kicked off thread %u\n", idx); | ||
| 338 | |||
| 339 | mutex_unlock(&test_dev->thread_mutex); | ||
| 340 | |||
| 341 | return 0; | ||
| 342 | |||
| 343 | err_out: | ||
| 344 | info->ret_sync = fail_ret; | ||
| 345 | mutex_unlock(&test_dev->thread_mutex); | ||
| 346 | |||
| 347 | return fail_ret; | ||
| 348 | } | ||
| 349 | |||
| 350 | static void test_dev_kmod_stop_tests(struct kmod_test_device *test_dev) | ||
| 351 | { | ||
| 352 | struct test_config *config = &test_dev->config; | ||
| 353 | struct kmod_test_device_info *info; | ||
| 354 | unsigned int i; | ||
| 355 | |||
| 356 | dev_info(test_dev->dev, "Ending request_module() tests\n"); | ||
| 357 | |||
| 358 | mutex_lock(&test_dev->thread_mutex); | ||
| 359 | |||
| 360 | for (i=0; i < config->num_threads; i++) { | ||
| 361 | info = &test_dev->info[i]; | ||
| 362 | if (info->task_sync && !IS_ERR(info->task_sync)) { | ||
| 363 | dev_info(test_dev->dev, | ||
| 364 | "Stopping still-running thread %i\n", i); | ||
| 365 | kthread_stop(info->task_sync); | ||
| 366 | } | ||
| 367 | |||
| 368 | /* | ||
| 369 | * info->task_sync is well protected, it can only be | ||
| 370 | * NULL or a pointer to a struct. If its NULL we either | ||
| 371 | * never ran, or we did and we completed the work. Completed | ||
| 372 | * tasks *always* put the module for us. This is a sanity | ||
| 373 | * check -- just in case. | ||
| 374 | */ | ||
| 375 | if (info->task_sync && info->need_mod_put) | ||
| 376 | test_kmod_put_module(info); | ||
| 377 | } | ||
| 378 | |||
| 379 | mutex_unlock(&test_dev->thread_mutex); | ||
| 380 | } | ||
| 381 | |||
| 382 | /* | ||
| 383 | * Only wait *iff* we did not run into any errors during all of our thread | ||
| 384 | * set up. If run into any issues we stop threads and just bail out with | ||
| 385 | * an error to the trigger. This also means we don't need any tally work | ||
| 386 | * for any threads which fail. | ||
| 387 | */ | ||
| 388 | static int try_requests(struct kmod_test_device *test_dev) | ||
| 389 | { | ||
| 390 | struct test_config *config = &test_dev->config; | ||
| 391 | unsigned int idx; | ||
| 392 | int ret; | ||
| 393 | bool any_error = false; | ||
| 394 | |||
| 395 | for (idx=0; idx < config->num_threads; idx++) { | ||
| 396 | if (test_dev->test_is_oom) { | ||
| 397 | any_error = true; | ||
| 398 | break; | ||
| 399 | } | ||
| 400 | |||
| 401 | ret = try_one_request(test_dev, idx); | ||
| 402 | if (ret) { | ||
| 403 | any_error = true; | ||
| 404 | break; | ||
| 405 | } | ||
| 406 | } | ||
| 407 | |||
| 408 | if (!any_error) { | ||
| 409 | test_dev->test_is_oom = false; | ||
| 410 | dev_info(test_dev->dev, | ||
| 411 | "No errors were found while initializing threads\n"); | ||
| 412 | wait_for_completion(&test_dev->kthreads_done); | ||
| 413 | tally_up_work(test_dev); | ||
| 414 | } else { | ||
| 415 | test_dev->test_is_oom = true; | ||
| 416 | dev_info(test_dev->dev, | ||
| 417 | "At least one thread failed to start, stop all work\n"); | ||
| 418 | test_dev_kmod_stop_tests(test_dev); | ||
| 419 | return -ENOMEM; | ||
| 420 | } | ||
| 421 | |||
| 422 | return 0; | ||
| 423 | } | ||
| 424 | |||
| 425 | static int run_test_driver(struct kmod_test_device *test_dev) | ||
| 426 | { | ||
| 427 | struct test_config *config = &test_dev->config; | ||
| 428 | |||
| 429 | dev_info(test_dev->dev, "Test case: %s (%u)\n", | ||
| 430 | test_case_str(config->test_case), | ||
| 431 | config->test_case); | ||
| 432 | dev_info(test_dev->dev, "Test driver to load: %s\n", | ||
| 433 | config->test_driver); | ||
| 434 | dev_info(test_dev->dev, "Number of threads to run: %u\n", | ||
| 435 | config->num_threads); | ||
| 436 | dev_info(test_dev->dev, "Thread IDs will range from 0 - %u\n", | ||
| 437 | config->num_threads - 1); | ||
| 438 | |||
| 439 | return try_requests(test_dev); | ||
| 440 | } | ||
| 441 | |||
| 442 | static int run_test_fs_type(struct kmod_test_device *test_dev) | ||
| 443 | { | ||
| 444 | struct test_config *config = &test_dev->config; | ||
| 445 | |||
| 446 | dev_info(test_dev->dev, "Test case: %s (%u)\n", | ||
| 447 | test_case_str(config->test_case), | ||
| 448 | config->test_case); | ||
| 449 | dev_info(test_dev->dev, "Test filesystem to load: %s\n", | ||
| 450 | config->test_fs); | ||
| 451 | dev_info(test_dev->dev, "Number of threads to run: %u\n", | ||
| 452 | config->num_threads); | ||
| 453 | dev_info(test_dev->dev, "Thread IDs will range from 0 - %u\n", | ||
| 454 | config->num_threads - 1); | ||
| 455 | |||
| 456 | return try_requests(test_dev); | ||
| 457 | } | ||
| 458 | |||
| 459 | static ssize_t config_show(struct device *dev, | ||
| 460 | struct device_attribute *attr, | ||
| 461 | char *buf) | ||
| 462 | { | ||
| 463 | struct kmod_test_device *test_dev = dev_to_test_dev(dev); | ||
| 464 | struct test_config *config = &test_dev->config; | ||
| 465 | int len = 0; | ||
| 466 | |||
| 467 | mutex_lock(&test_dev->config_mutex); | ||
| 468 | |||
| 469 | len += snprintf(buf, PAGE_SIZE, | ||
| 470 | "Custom trigger configuration for: %s\n", | ||
| 471 | dev_name(dev)); | ||
| 472 | |||
| 473 | len += snprintf(buf+len, PAGE_SIZE - len, | ||
| 474 | "Number of threads:\t%u\n", | ||
| 475 | config->num_threads); | ||
| 476 | |||
| 477 | len += snprintf(buf+len, PAGE_SIZE - len, | ||
| 478 | "Test_case:\t%s (%u)\n", | ||
| 479 | test_case_str(config->test_case), | ||
| 480 | config->test_case); | ||
| 481 | |||
| 482 | if (config->test_driver) | ||
| 483 | len += snprintf(buf+len, PAGE_SIZE - len, | ||
| 484 | "driver:\t%s\n", | ||
| 485 | config->test_driver); | ||
| 486 | else | ||
| 487 | len += snprintf(buf+len, PAGE_SIZE - len, | ||
| 488 | "driver:\tEMTPY\n"); | ||
| 489 | |||
| 490 | if (config->test_fs) | ||
| 491 | len += snprintf(buf+len, PAGE_SIZE - len, | ||
| 492 | "fs:\t%s\n", | ||
| 493 | config->test_fs); | ||
| 494 | else | ||
| 495 | len += snprintf(buf+len, PAGE_SIZE - len, | ||
| 496 | "fs:\tEMTPY\n"); | ||
| 497 | |||
| 498 | mutex_unlock(&test_dev->config_mutex); | ||
| 499 | |||
| 500 | return len; | ||
| 501 | } | ||
| 502 | static DEVICE_ATTR_RO(config); | ||
| 503 | |||
| 504 | /* | ||
| 505 | * This ensures we don't allow kicking threads through if our configuration | ||
| 506 | * is faulty. | ||
| 507 | */ | ||
| 508 | static int __trigger_config_run(struct kmod_test_device *test_dev) | ||
| 509 | { | ||
| 510 | struct test_config *config = &test_dev->config; | ||
| 511 | |||
| 512 | test_dev->done = 0; | ||
| 513 | |||
| 514 | switch (config->test_case) { | ||
| 515 | case TEST_KMOD_DRIVER: | ||
| 516 | return run_test_driver(test_dev); | ||
| 517 | case TEST_KMOD_FS_TYPE: | ||
| 518 | return run_test_fs_type(test_dev); | ||
| 519 | default: | ||
| 520 | dev_warn(test_dev->dev, | ||
| 521 | "Invalid test case requested: %u\n", | ||
| 522 | config->test_case); | ||
| 523 | return -EINVAL; | ||
| 524 | } | ||
| 525 | } | ||
| 526 | |||
| 527 | static int trigger_config_run(struct kmod_test_device *test_dev) | ||
| 528 | { | ||
| 529 | struct test_config *config = &test_dev->config; | ||
| 530 | int ret; | ||
| 531 | |||
| 532 | mutex_lock(&test_dev->trigger_mutex); | ||
| 533 | mutex_lock(&test_dev->config_mutex); | ||
| 534 | |||
| 535 | ret = __trigger_config_run(test_dev); | ||
| 536 | if (ret < 0) | ||
| 537 | goto out; | ||
| 538 | dev_info(test_dev->dev, "General test result: %d\n", | ||
| 539 | config->test_result); | ||
| 540 | |||
| 541 | /* | ||
| 542 | * We must return 0 after a trigger even unless something went | ||
| 543 | * wrong with the setup of the test. If the test setup went fine | ||
| 544 | * then userspace must just check the result of config->test_result. | ||
| 545 | * One issue with relying on the return from a call in the kernel | ||
| 546 | * is if the kernel returns a possitive value using this trigger | ||
| 547 | * will not return the value to userspace, it would be lost. | ||
| 548 | * | ||
| 549 | * By not relying on capturing the return value of tests we are using | ||
| 550 | * through the trigger it also us to run tests with set -e and only | ||
| 551 | * fail when something went wrong with the driver upon trigger | ||
| 552 | * requests. | ||
| 553 | */ | ||
| 554 | ret = 0; | ||
| 555 | |||
| 556 | out: | ||
| 557 | mutex_unlock(&test_dev->config_mutex); | ||
| 558 | mutex_unlock(&test_dev->trigger_mutex); | ||
| 559 | |||
| 560 | return ret; | ||
| 561 | } | ||
| 562 | |||
| 563 | static ssize_t | ||
| 564 | trigger_config_store(struct device *dev, | ||
| 565 | struct device_attribute *attr, | ||
| 566 | const char *buf, size_t count) | ||
| 567 | { | ||
| 568 | struct kmod_test_device *test_dev = dev_to_test_dev(dev); | ||
| 569 | int ret; | ||
| 570 | |||
| 571 | if (test_dev->test_is_oom) | ||
| 572 | return -ENOMEM; | ||
| 573 | |||
| 574 | /* For all intents and purposes we don't care what userspace | ||
| 575 | * sent this trigger, we care only that we were triggered. | ||
| 576 | * We treat the return value only for caputuring issues with | ||
| 577 | * the test setup. At this point all the test variables should | ||
| 578 | * have been allocated so typically this should never fail. | ||
| 579 | */ | ||
| 580 | ret = trigger_config_run(test_dev); | ||
| 581 | if (unlikely(ret < 0)) | ||
| 582 | goto out; | ||
| 583 | |||
| 584 | /* | ||
| 585 | * Note: any return > 0 will be treated as success | ||
| 586 | * and the error value will not be available to userspace. | ||
| 587 | * Do not rely on trying to send to userspace a test value | ||
| 588 | * return value as possitive return errors will be lost. | ||
| 589 | */ | ||
| 590 | if (WARN_ON(ret > 0)) | ||
| 591 | return -EINVAL; | ||
| 592 | |||
| 593 | ret = count; | ||
| 594 | out: | ||
| 595 | return ret; | ||
| 596 | } | ||
| 597 | static DEVICE_ATTR_WO(trigger_config); | ||
| 598 | |||
| 599 | /* | ||
| 600 | * XXX: move to kstrncpy() once merged. | ||
| 601 | * | ||
| 602 | * Users should use kfree_const() when freeing these. | ||
| 603 | */ | ||
| 604 | static int __kstrncpy(char **dst, const char *name, size_t count, gfp_t gfp) | ||
| 605 | { | ||
| 606 | *dst = kstrndup(name, count, gfp); | ||
| 607 | if (!*dst) | ||
| 608 | return -ENOSPC; | ||
| 609 | return count; | ||
| 610 | } | ||
| 611 | |||
| 612 | static int config_copy_test_driver_name(struct test_config *config, | ||
| 613 | const char *name, | ||
| 614 | size_t count) | ||
| 615 | { | ||
| 616 | return __kstrncpy(&config->test_driver, name, count, GFP_KERNEL); | ||
| 617 | } | ||
| 618 | |||
| 619 | |||
| 620 | static int config_copy_test_fs(struct test_config *config, const char *name, | ||
| 621 | size_t count) | ||
| 622 | { | ||
| 623 | return __kstrncpy(&config->test_fs, name, count, GFP_KERNEL); | ||
| 624 | } | ||
| 625 | |||
| 626 | static void __kmod_config_free(struct test_config *config) | ||
| 627 | { | ||
| 628 | if (!config) | ||
| 629 | return; | ||
| 630 | |||
| 631 | kfree_const(config->test_driver); | ||
| 632 | config->test_driver = NULL; | ||
| 633 | |||
| 634 | kfree_const(config->test_fs); | ||
| 635 | config->test_driver = NULL; | ||
| 636 | } | ||
| 637 | |||
| 638 | static void kmod_config_free(struct kmod_test_device *test_dev) | ||
| 639 | { | ||
| 640 | struct test_config *config; | ||
| 641 | |||
| 642 | if (!test_dev) | ||
| 643 | return; | ||
| 644 | |||
| 645 | config = &test_dev->config; | ||
| 646 | |||
| 647 | mutex_lock(&test_dev->config_mutex); | ||
| 648 | __kmod_config_free(config); | ||
| 649 | mutex_unlock(&test_dev->config_mutex); | ||
| 650 | } | ||
| 651 | |||
| 652 | static ssize_t config_test_driver_store(struct device *dev, | ||
| 653 | struct device_attribute *attr, | ||
| 654 | const char *buf, size_t count) | ||
| 655 | { | ||
| 656 | struct kmod_test_device *test_dev = dev_to_test_dev(dev); | ||
| 657 | struct test_config *config = &test_dev->config; | ||
| 658 | int copied; | ||
| 659 | |||
| 660 | mutex_lock(&test_dev->config_mutex); | ||
| 661 | |||
| 662 | kfree_const(config->test_driver); | ||
| 663 | config->test_driver = NULL; | ||
| 664 | |||
| 665 | copied = config_copy_test_driver_name(config, buf, count); | ||
| 666 | mutex_unlock(&test_dev->config_mutex); | ||
| 667 | |||
| 668 | return copied; | ||
| 669 | } | ||
| 670 | |||
| 671 | /* | ||
| 672 | * As per sysfs_kf_seq_show() the buf is max PAGE_SIZE. | ||
| 673 | */ | ||
| 674 | static ssize_t config_test_show_str(struct mutex *config_mutex, | ||
| 675 | char *dst, | ||
| 676 | char *src) | ||
| 677 | { | ||
| 678 | int len; | ||
| 679 | |||
| 680 | mutex_lock(config_mutex); | ||
| 681 | len = snprintf(dst, PAGE_SIZE, "%s\n", src); | ||
| 682 | mutex_unlock(config_mutex); | ||
| 683 | |||
| 684 | return len; | ||
| 685 | } | ||
| 686 | |||
| 687 | static ssize_t config_test_driver_show(struct device *dev, | ||
| 688 | struct device_attribute *attr, | ||
| 689 | char *buf) | ||
| 690 | { | ||
| 691 | struct kmod_test_device *test_dev = dev_to_test_dev(dev); | ||
| 692 | struct test_config *config = &test_dev->config; | ||
| 693 | |||
| 694 | return config_test_show_str(&test_dev->config_mutex, buf, | ||
| 695 | config->test_driver); | ||
| 696 | } | ||
| 697 | static DEVICE_ATTR(config_test_driver, 0644, config_test_driver_show, | ||
| 698 | config_test_driver_store); | ||
| 699 | |||
| 700 | static ssize_t config_test_fs_store(struct device *dev, | ||
| 701 | struct device_attribute *attr, | ||
| 702 | const char *buf, size_t count) | ||
| 703 | { | ||
| 704 | struct kmod_test_device *test_dev = dev_to_test_dev(dev); | ||
| 705 | struct test_config *config = &test_dev->config; | ||
| 706 | int copied; | ||
| 707 | |||
| 708 | mutex_lock(&test_dev->config_mutex); | ||
| 709 | |||
| 710 | kfree_const(config->test_fs); | ||
| 711 | config->test_fs = NULL; | ||
| 712 | |||
| 713 | copied = config_copy_test_fs(config, buf, count); | ||
| 714 | mutex_unlock(&test_dev->config_mutex); | ||
| 715 | |||
| 716 | return copied; | ||
| 717 | } | ||
| 718 | |||
| 719 | static ssize_t config_test_fs_show(struct device *dev, | ||
| 720 | struct device_attribute *attr, | ||
| 721 | char *buf) | ||
| 722 | { | ||
| 723 | struct kmod_test_device *test_dev = dev_to_test_dev(dev); | ||
| 724 | struct test_config *config = &test_dev->config; | ||
| 725 | |||
| 726 | return config_test_show_str(&test_dev->config_mutex, buf, | ||
| 727 | config->test_fs); | ||
| 728 | } | ||
| 729 | static DEVICE_ATTR(config_test_fs, 0644, config_test_fs_show, | ||
| 730 | config_test_fs_store); | ||
| 731 | |||
| 732 | static int trigger_config_run_type(struct kmod_test_device *test_dev, | ||
| 733 | enum kmod_test_case test_case, | ||
| 734 | const char *test_str) | ||
| 735 | { | ||
| 736 | int copied = 0; | ||
| 737 | struct test_config *config = &test_dev->config; | ||
| 738 | |||
| 739 | mutex_lock(&test_dev->config_mutex); | ||
| 740 | |||
| 741 | switch (test_case) { | ||
| 742 | case TEST_KMOD_DRIVER: | ||
| 743 | kfree_const(config->test_driver); | ||
| 744 | config->test_driver = NULL; | ||
| 745 | copied = config_copy_test_driver_name(config, test_str, | ||
| 746 | strlen(test_str)); | ||
| 747 | break; | ||
| 748 | case TEST_KMOD_FS_TYPE: | ||
| 749 | break; | ||
| 750 | kfree_const(config->test_fs); | ||
| 751 | config->test_driver = NULL; | ||
| 752 | copied = config_copy_test_fs(config, test_str, | ||
| 753 | strlen(test_str)); | ||
| 754 | default: | ||
| 755 | mutex_unlock(&test_dev->config_mutex); | ||
| 756 | return -EINVAL; | ||
| 757 | } | ||
| 758 | |||
| 759 | config->test_case = test_case; | ||
| 760 | |||
| 761 | mutex_unlock(&test_dev->config_mutex); | ||
| 762 | |||
| 763 | if (copied <= 0 || copied != strlen(test_str)) { | ||
| 764 | test_dev->test_is_oom = true; | ||
| 765 | return -ENOMEM; | ||
| 766 | } | ||
| 767 | |||
| 768 | test_dev->test_is_oom = false; | ||
| 769 | |||
| 770 | return trigger_config_run(test_dev); | ||
| 771 | } | ||
| 772 | |||
| 773 | static void free_test_dev_info(struct kmod_test_device *test_dev) | ||
| 774 | { | ||
| 775 | vfree(test_dev->info); | ||
| 776 | test_dev->info = NULL; | ||
| 777 | } | ||
| 778 | |||
| 779 | static int kmod_config_sync_info(struct kmod_test_device *test_dev) | ||
| 780 | { | ||
| 781 | struct test_config *config = &test_dev->config; | ||
| 782 | |||
| 783 | free_test_dev_info(test_dev); | ||
| 784 | test_dev->info = vzalloc(config->num_threads * | ||
| 785 | sizeof(struct kmod_test_device_info)); | ||
| 786 | if (!test_dev->info) { | ||
| 787 | dev_err(test_dev->dev, "Cannot alloc test_dev info\n"); | ||
| 788 | return -ENOMEM; | ||
| 789 | } | ||
| 790 | |||
| 791 | return 0; | ||
| 792 | } | ||
| 793 | |||
| 794 | /* | ||
| 795 | * Old kernels may not have this, if you want to port this code to | ||
| 796 | * test it on older kernels. | ||
| 797 | */ | ||
| 798 | #ifdef get_kmod_umh_limit | ||
| 799 | static unsigned int kmod_init_test_thread_limit(void) | ||
| 800 | { | ||
| 801 | return get_kmod_umh_limit(); | ||
| 802 | } | ||
| 803 | #else | ||
| 804 | static unsigned int kmod_init_test_thread_limit(void) | ||
| 805 | { | ||
| 806 | return TEST_START_NUM_THREADS; | ||
| 807 | } | ||
| 808 | #endif | ||
| 809 | |||
| 810 | static int __kmod_config_init(struct kmod_test_device *test_dev) | ||
| 811 | { | ||
| 812 | struct test_config *config = &test_dev->config; | ||
| 813 | int ret = -ENOMEM, copied; | ||
| 814 | |||
| 815 | __kmod_config_free(config); | ||
| 816 | |||
| 817 | copied = config_copy_test_driver_name(config, TEST_START_DRIVER, | ||
| 818 | strlen(TEST_START_DRIVER)); | ||
| 819 | if (copied != strlen(TEST_START_DRIVER)) | ||
| 820 | goto err_out; | ||
| 821 | |||
| 822 | copied = config_copy_test_fs(config, TEST_START_TEST_FS, | ||
| 823 | strlen(TEST_START_TEST_FS)); | ||
| 824 | if (copied != strlen(TEST_START_TEST_FS)) | ||
| 825 | goto err_out; | ||
| 826 | |||
| 827 | config->num_threads = kmod_init_test_thread_limit(); | ||
| 828 | config->test_result = 0; | ||
| 829 | config->test_case = TEST_START_TEST_CASE; | ||
| 830 | |||
| 831 | ret = kmod_config_sync_info(test_dev); | ||
| 832 | if (ret) | ||
| 833 | goto err_out; | ||
| 834 | |||
| 835 | test_dev->test_is_oom = false; | ||
| 836 | |||
| 837 | return 0; | ||
| 838 | |||
| 839 | err_out: | ||
| 840 | test_dev->test_is_oom = true; | ||
| 841 | WARN_ON(test_dev->test_is_oom); | ||
| 842 | |||
| 843 | __kmod_config_free(config); | ||
| 844 | |||
| 845 | return ret; | ||
| 846 | } | ||
| 847 | |||
| 848 | static ssize_t reset_store(struct device *dev, | ||
| 849 | struct device_attribute *attr, | ||
| 850 | const char *buf, size_t count) | ||
| 851 | { | ||
| 852 | struct kmod_test_device *test_dev = dev_to_test_dev(dev); | ||
| 853 | int ret; | ||
| 854 | |||
| 855 | mutex_lock(&test_dev->trigger_mutex); | ||
| 856 | mutex_lock(&test_dev->config_mutex); | ||
| 857 | |||
| 858 | ret = __kmod_config_init(test_dev); | ||
| 859 | if (ret < 0) { | ||
| 860 | ret = -ENOMEM; | ||
| 861 | dev_err(dev, "could not alloc settings for config trigger: %d\n", | ||
| 862 | ret); | ||
| 863 | goto out; | ||
| 864 | } | ||
| 865 | |||
| 866 | dev_info(dev, "reset\n"); | ||
| 867 | ret = count; | ||
| 868 | |||
| 869 | out: | ||
| 870 | mutex_unlock(&test_dev->config_mutex); | ||
| 871 | mutex_unlock(&test_dev->trigger_mutex); | ||
| 872 | |||
| 873 | return ret; | ||
| 874 | } | ||
| 875 | static DEVICE_ATTR_WO(reset); | ||
| 876 | |||
| 877 | static int test_dev_config_update_uint_sync(struct kmod_test_device *test_dev, | ||
| 878 | const char *buf, size_t size, | ||
| 879 | unsigned int *config, | ||
| 880 | int (*test_sync)(struct kmod_test_device *test_dev)) | ||
| 881 | { | ||
| 882 | int ret; | ||
| 883 | long new; | ||
| 884 | unsigned int old_val; | ||
| 885 | |||
| 886 | ret = kstrtol(buf, 10, &new); | ||
| 887 | if (ret) | ||
| 888 | return ret; | ||
| 889 | |||
| 890 | if (new > UINT_MAX) | ||
| 891 | return -EINVAL; | ||
| 892 | |||
| 893 | mutex_lock(&test_dev->config_mutex); | ||
| 894 | |||
| 895 | old_val = *config; | ||
| 896 | *(unsigned int *)config = new; | ||
| 897 | |||
| 898 | ret = test_sync(test_dev); | ||
| 899 | if (ret) { | ||
| 900 | *(unsigned int *)config = old_val; | ||
| 901 | |||
| 902 | ret = test_sync(test_dev); | ||
| 903 | WARN_ON(ret); | ||
| 904 | |||
| 905 | mutex_unlock(&test_dev->config_mutex); | ||
| 906 | return -EINVAL; | ||
| 907 | } | ||
| 908 | |||
| 909 | mutex_unlock(&test_dev->config_mutex); | ||
| 910 | /* Always return full write size even if we didn't consume all */ | ||
| 911 | return size; | ||
| 912 | } | ||
| 913 | |||
| 914 | static int test_dev_config_update_uint_range(struct kmod_test_device *test_dev, | ||
| 915 | const char *buf, size_t size, | ||
| 916 | unsigned int *config, | ||
| 917 | unsigned int min, | ||
| 918 | unsigned int max) | ||
| 919 | { | ||
| 920 | int ret; | ||
| 921 | long new; | ||
| 922 | |||
| 923 | ret = kstrtol(buf, 10, &new); | ||
| 924 | if (ret) | ||
| 925 | return ret; | ||
| 926 | |||
| 927 | if (new < min || new > max || new > UINT_MAX) | ||
| 928 | return -EINVAL; | ||
| 929 | |||
| 930 | mutex_lock(&test_dev->config_mutex); | ||
| 931 | *config = new; | ||
| 932 | mutex_unlock(&test_dev->config_mutex); | ||
| 933 | |||
| 934 | /* Always return full write size even if we didn't consume all */ | ||
| 935 | return size; | ||
| 936 | } | ||
| 937 | |||
| 938 | static int test_dev_config_update_int(struct kmod_test_device *test_dev, | ||
| 939 | const char *buf, size_t size, | ||
| 940 | int *config) | ||
| 941 | { | ||
| 942 | int ret; | ||
| 943 | long new; | ||
| 944 | |||
| 945 | ret = kstrtol(buf, 10, &new); | ||
| 946 | if (ret) | ||
| 947 | return ret; | ||
| 948 | |||
| 949 | if (new > INT_MAX || new < INT_MIN) | ||
| 950 | return -EINVAL; | ||
| 951 | |||
| 952 | mutex_lock(&test_dev->config_mutex); | ||
| 953 | *config = new; | ||
| 954 | mutex_unlock(&test_dev->config_mutex); | ||
| 955 | /* Always return full write size even if we didn't consume all */ | ||
| 956 | return size; | ||
| 957 | } | ||
| 958 | |||
| 959 | static ssize_t test_dev_config_show_int(struct kmod_test_device *test_dev, | ||
| 960 | char *buf, | ||
| 961 | int config) | ||
| 962 | { | ||
| 963 | int val; | ||
| 964 | |||
| 965 | mutex_lock(&test_dev->config_mutex); | ||
| 966 | val = config; | ||
| 967 | mutex_unlock(&test_dev->config_mutex); | ||
| 968 | |||
| 969 | return snprintf(buf, PAGE_SIZE, "%d\n", val); | ||
| 970 | } | ||
| 971 | |||
| 972 | static ssize_t test_dev_config_show_uint(struct kmod_test_device *test_dev, | ||
| 973 | char *buf, | ||
| 974 | unsigned int config) | ||
| 975 | { | ||
| 976 | unsigned int val; | ||
| 977 | |||
| 978 | mutex_lock(&test_dev->config_mutex); | ||
| 979 | val = config; | ||
| 980 | mutex_unlock(&test_dev->config_mutex); | ||
| 981 | |||
| 982 | return snprintf(buf, PAGE_SIZE, "%u\n", val); | ||
| 983 | } | ||
| 984 | |||
| 985 | static ssize_t test_result_store(struct device *dev, | ||
| 986 | struct device_attribute *attr, | ||
| 987 | const char *buf, size_t count) | ||
| 988 | { | ||
| 989 | struct kmod_test_device *test_dev = dev_to_test_dev(dev); | ||
| 990 | struct test_config *config = &test_dev->config; | ||
| 991 | |||
| 992 | return test_dev_config_update_int(test_dev, buf, count, | ||
| 993 | &config->test_result); | ||
| 994 | } | ||
| 995 | |||
| 996 | static ssize_t config_num_threads_store(struct device *dev, | ||
| 997 | struct device_attribute *attr, | ||
| 998 | const char *buf, size_t count) | ||
| 999 | { | ||
| 1000 | struct kmod_test_device *test_dev = dev_to_test_dev(dev); | ||
| 1001 | struct test_config *config = &test_dev->config; | ||
| 1002 | |||
| 1003 | return test_dev_config_update_uint_sync(test_dev, buf, count, | ||
| 1004 | &config->num_threads, | ||
| 1005 | kmod_config_sync_info); | ||
| 1006 | } | ||
| 1007 | |||
| 1008 | static ssize_t config_num_threads_show(struct device *dev, | ||
| 1009 | struct device_attribute *attr, | ||
| 1010 | char *buf) | ||
| 1011 | { | ||
| 1012 | struct kmod_test_device *test_dev = dev_to_test_dev(dev); | ||
| 1013 | struct test_config *config = &test_dev->config; | ||
| 1014 | |||
| 1015 | return test_dev_config_show_int(test_dev, buf, config->num_threads); | ||
| 1016 | } | ||
| 1017 | static DEVICE_ATTR(config_num_threads, 0644, config_num_threads_show, | ||
| 1018 | config_num_threads_store); | ||
| 1019 | |||
| 1020 | static ssize_t config_test_case_store(struct device *dev, | ||
| 1021 | struct device_attribute *attr, | ||
| 1022 | const char *buf, size_t count) | ||
| 1023 | { | ||
| 1024 | struct kmod_test_device *test_dev = dev_to_test_dev(dev); | ||
| 1025 | struct test_config *config = &test_dev->config; | ||
| 1026 | |||
| 1027 | return test_dev_config_update_uint_range(test_dev, buf, count, | ||
| 1028 | &config->test_case, | ||
| 1029 | __TEST_KMOD_INVALID + 1, | ||
| 1030 | __TEST_KMOD_MAX - 1); | ||
| 1031 | } | ||
| 1032 | |||
| 1033 | static ssize_t config_test_case_show(struct device *dev, | ||
| 1034 | struct device_attribute *attr, | ||
| 1035 | char *buf) | ||
| 1036 | { | ||
| 1037 | struct kmod_test_device *test_dev = dev_to_test_dev(dev); | ||
| 1038 | struct test_config *config = &test_dev->config; | ||
| 1039 | |||
| 1040 | return test_dev_config_show_uint(test_dev, buf, config->test_case); | ||
| 1041 | } | ||
| 1042 | static DEVICE_ATTR(config_test_case, 0644, config_test_case_show, | ||
| 1043 | config_test_case_store); | ||
| 1044 | |||
| 1045 | static ssize_t test_result_show(struct device *dev, | ||
| 1046 | struct device_attribute *attr, | ||
| 1047 | char *buf) | ||
| 1048 | { | ||
| 1049 | struct kmod_test_device *test_dev = dev_to_test_dev(dev); | ||
| 1050 | struct test_config *config = &test_dev->config; | ||
| 1051 | |||
| 1052 | return test_dev_config_show_int(test_dev, buf, config->test_result); | ||
| 1053 | } | ||
| 1054 | static DEVICE_ATTR(test_result, 0644, test_result_show, test_result_store); | ||
| 1055 | |||
| 1056 | #define TEST_KMOD_DEV_ATTR(name) &dev_attr_##name.attr | ||
| 1057 | |||
| 1058 | static struct attribute *test_dev_attrs[] = { | ||
| 1059 | TEST_KMOD_DEV_ATTR(trigger_config), | ||
| 1060 | TEST_KMOD_DEV_ATTR(config), | ||
| 1061 | TEST_KMOD_DEV_ATTR(reset), | ||
| 1062 | |||
| 1063 | TEST_KMOD_DEV_ATTR(config_test_driver), | ||
| 1064 | TEST_KMOD_DEV_ATTR(config_test_fs), | ||
| 1065 | TEST_KMOD_DEV_ATTR(config_num_threads), | ||
| 1066 | TEST_KMOD_DEV_ATTR(config_test_case), | ||
| 1067 | TEST_KMOD_DEV_ATTR(test_result), | ||
| 1068 | |||
| 1069 | NULL, | ||
| 1070 | }; | ||
| 1071 | |||
| 1072 | ATTRIBUTE_GROUPS(test_dev); | ||
| 1073 | |||
| 1074 | static int kmod_config_init(struct kmod_test_device *test_dev) | ||
| 1075 | { | ||
| 1076 | int ret; | ||
| 1077 | |||
| 1078 | mutex_lock(&test_dev->config_mutex); | ||
| 1079 | ret = __kmod_config_init(test_dev); | ||
| 1080 | mutex_unlock(&test_dev->config_mutex); | ||
| 1081 | |||
| 1082 | return ret; | ||
| 1083 | } | ||
| 1084 | |||
| 1085 | static struct kmod_test_device *alloc_test_dev_kmod(int idx) | ||
| 1086 | { | ||
| 1087 | int ret; | ||
| 1088 | struct kmod_test_device *test_dev; | ||
| 1089 | struct miscdevice *misc_dev; | ||
| 1090 | |||
| 1091 | test_dev = vzalloc(sizeof(struct kmod_test_device)); | ||
| 1092 | if (!test_dev) { | ||
| 1093 | pr_err("Cannot alloc test_dev\n"); | ||
| 1094 | goto err_out; | ||
| 1095 | } | ||
| 1096 | |||
| 1097 | mutex_init(&test_dev->config_mutex); | ||
| 1098 | mutex_init(&test_dev->trigger_mutex); | ||
| 1099 | mutex_init(&test_dev->thread_mutex); | ||
| 1100 | |||
| 1101 | init_completion(&test_dev->kthreads_done); | ||
| 1102 | |||
| 1103 | ret = kmod_config_init(test_dev); | ||
| 1104 | if (ret < 0) { | ||
| 1105 | pr_err("Cannot alloc kmod_config_init()\n"); | ||
| 1106 | goto err_out_free; | ||
| 1107 | } | ||
| 1108 | |||
| 1109 | test_dev->dev_idx = idx; | ||
| 1110 | misc_dev = &test_dev->misc_dev; | ||
| 1111 | |||
| 1112 | misc_dev->minor = MISC_DYNAMIC_MINOR; | ||
| 1113 | misc_dev->name = kasprintf(GFP_KERNEL, "test_kmod%d", idx); | ||
| 1114 | if (!misc_dev->name) { | ||
| 1115 | pr_err("Cannot alloc misc_dev->name\n"); | ||
| 1116 | goto err_out_free_config; | ||
| 1117 | } | ||
| 1118 | misc_dev->groups = test_dev_groups; | ||
| 1119 | |||
| 1120 | return test_dev; | ||
| 1121 | |||
| 1122 | err_out_free_config: | ||
| 1123 | free_test_dev_info(test_dev); | ||
| 1124 | kmod_config_free(test_dev); | ||
| 1125 | err_out_free: | ||
| 1126 | vfree(test_dev); | ||
| 1127 | test_dev = NULL; | ||
| 1128 | err_out: | ||
| 1129 | return NULL; | ||
| 1130 | } | ||
| 1131 | |||
| 1132 | static void free_test_dev_kmod(struct kmod_test_device *test_dev) | ||
| 1133 | { | ||
| 1134 | if (test_dev) { | ||
| 1135 | kfree_const(test_dev->misc_dev.name); | ||
| 1136 | test_dev->misc_dev.name = NULL; | ||
| 1137 | free_test_dev_info(test_dev); | ||
| 1138 | kmod_config_free(test_dev); | ||
| 1139 | vfree(test_dev); | ||
| 1140 | test_dev = NULL; | ||
| 1141 | } | ||
| 1142 | } | ||
| 1143 | |||
| 1144 | static struct kmod_test_device *register_test_dev_kmod(void) | ||
| 1145 | { | ||
| 1146 | struct kmod_test_device *test_dev = NULL; | ||
| 1147 | int ret; | ||
| 1148 | |||
| 1149 | mutex_unlock(®_dev_mutex); | ||
| 1150 | |||
| 1151 | /* int should suffice for number of devices, test for wrap */ | ||
| 1152 | if (unlikely(num_test_devs + 1) < 0) { | ||
| 1153 | pr_err("reached limit of number of test devices\n"); | ||
| 1154 | goto out; | ||
| 1155 | } | ||
| 1156 | |||
| 1157 | test_dev = alloc_test_dev_kmod(num_test_devs); | ||
| 1158 | if (!test_dev) | ||
| 1159 | goto out; | ||
| 1160 | |||
| 1161 | ret = misc_register(&test_dev->misc_dev); | ||
| 1162 | if (ret) { | ||
| 1163 | pr_err("could not register misc device: %d\n", ret); | ||
| 1164 | free_test_dev_kmod(test_dev); | ||
| 1165 | goto out; | ||
| 1166 | } | ||
| 1167 | |||
| 1168 | test_dev->dev = test_dev->misc_dev.this_device; | ||
| 1169 | list_add_tail(&test_dev->list, ®_test_devs); | ||
| 1170 | dev_info(test_dev->dev, "interface ready\n"); | ||
| 1171 | |||
| 1172 | num_test_devs++; | ||
| 1173 | |||
| 1174 | out: | ||
| 1175 | mutex_unlock(®_dev_mutex); | ||
| 1176 | |||
| 1177 | return test_dev; | ||
| 1178 | |||
| 1179 | } | ||
| 1180 | |||
| 1181 | static int __init test_kmod_init(void) | ||
| 1182 | { | ||
| 1183 | struct kmod_test_device *test_dev; | ||
| 1184 | int ret; | ||
| 1185 | |||
| 1186 | test_dev = register_test_dev_kmod(); | ||
| 1187 | if (!test_dev) { | ||
| 1188 | pr_err("Cannot add first test kmod device\n"); | ||
| 1189 | return -ENODEV; | ||
| 1190 | } | ||
| 1191 | |||
| 1192 | /* | ||
| 1193 | * With some work we might be able to gracefully enable | ||
| 1194 | * testing with this driver built-in, for now this seems | ||
| 1195 | * rather risky. For those willing to try have at it, | ||
| 1196 | * and enable the below. Good luck! If that works, try | ||
| 1197 | * lowering the init level for more fun. | ||
| 1198 | */ | ||
| 1199 | if (force_init_test) { | ||
| 1200 | ret = trigger_config_run_type(test_dev, | ||
| 1201 | TEST_KMOD_DRIVER, "tun"); | ||
| 1202 | if (WARN_ON(ret)) | ||
| 1203 | return ret; | ||
| 1204 | ret = trigger_config_run_type(test_dev, | ||
| 1205 | TEST_KMOD_FS_TYPE, "btrfs"); | ||
| 1206 | if (WARN_ON(ret)) | ||
| 1207 | return ret; | ||
| 1208 | } | ||
| 1209 | |||
| 1210 | return 0; | ||
| 1211 | } | ||
| 1212 | late_initcall(test_kmod_init); | ||
| 1213 | |||
| 1214 | static | ||
| 1215 | void unregister_test_dev_kmod(struct kmod_test_device *test_dev) | ||
| 1216 | { | ||
| 1217 | mutex_lock(&test_dev->trigger_mutex); | ||
| 1218 | mutex_lock(&test_dev->config_mutex); | ||
| 1219 | |||
| 1220 | test_dev_kmod_stop_tests(test_dev); | ||
| 1221 | |||
| 1222 | dev_info(test_dev->dev, "removing interface\n"); | ||
| 1223 | misc_deregister(&test_dev->misc_dev); | ||
| 1224 | kfree(&test_dev->misc_dev.name); | ||
| 1225 | |||
| 1226 | mutex_unlock(&test_dev->config_mutex); | ||
| 1227 | mutex_unlock(&test_dev->trigger_mutex); | ||
| 1228 | |||
| 1229 | free_test_dev_kmod(test_dev); | ||
| 1230 | } | ||
| 1231 | |||
| 1232 | static void __exit test_kmod_exit(void) | ||
| 1233 | { | ||
| 1234 | struct kmod_test_device *test_dev, *tmp; | ||
| 1235 | |||
| 1236 | mutex_lock(®_dev_mutex); | ||
| 1237 | list_for_each_entry_safe(test_dev, tmp, ®_test_devs, list) { | ||
| 1238 | list_del(&test_dev->list); | ||
| 1239 | unregister_test_dev_kmod(test_dev); | ||
| 1240 | } | ||
| 1241 | mutex_unlock(®_dev_mutex); | ||
| 1242 | } | ||
| 1243 | module_exit(test_kmod_exit); | ||
| 1244 | |||
| 1245 | MODULE_AUTHOR("Luis R. Rodriguez <mcgrof@kernel.org>"); | ||
| 1246 | MODULE_LICENSE("GPL"); | ||
diff --git a/lib/test_sysctl.c b/lib/test_sysctl.c new file mode 100644 index 000000000000..3dd801c1c85b --- /dev/null +++ b/lib/test_sysctl.c | |||
| @@ -0,0 +1,148 @@ | |||
| 1 | /* | ||
| 2 | * proc sysctl test driver | ||
| 3 | * | ||
| 4 | * Copyright (C) 2017 Luis R. Rodriguez <mcgrof@kernel.org> | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify it | ||
| 7 | * under the terms of the GNU General Public License as published by the Free | ||
| 8 | * Software Foundation; either version 2 of the License, or at your option any | ||
| 9 | * later version; or, when distributed separately from the Linux kernel or | ||
| 10 | * when incorporated into other software packages, subject to the following | ||
| 11 | * license: | ||
| 12 | * | ||
| 13 | * This program is free software; you can redistribute it and/or modify it | ||
| 14 | * under the terms of copyleft-next (version 0.3.1 or later) as published | ||
| 15 | * at http://copyleft-next.org/. | ||
| 16 | */ | ||
| 17 | |||
| 18 | /* | ||
| 19 | * This module provides an interface to the the proc sysctl interfaces. This | ||
| 20 | * driver requires CONFIG_PROC_SYSCTL. It will not normally be loaded by the | ||
| 21 | * system unless explicitly requested by name. You can also build this driver | ||
| 22 | * into your kernel. | ||
| 23 | */ | ||
| 24 | |||
| 25 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
| 26 | |||
| 27 | #include <linux/init.h> | ||
| 28 | #include <linux/list.h> | ||
| 29 | #include <linux/module.h> | ||
| 30 | #include <linux/printk.h> | ||
| 31 | #include <linux/fs.h> | ||
| 32 | #include <linux/miscdevice.h> | ||
| 33 | #include <linux/slab.h> | ||
| 34 | #include <linux/uaccess.h> | ||
| 35 | #include <linux/async.h> | ||
| 36 | #include <linux/delay.h> | ||
| 37 | #include <linux/vmalloc.h> | ||
| 38 | |||
| 39 | static int i_zero; | ||
| 40 | static int i_one_hundred = 100; | ||
| 41 | |||
| 42 | struct test_sysctl_data { | ||
| 43 | int int_0001; | ||
| 44 | int int_0002; | ||
| 45 | int int_0003[4]; | ||
| 46 | |||
| 47 | unsigned int uint_0001; | ||
| 48 | |||
| 49 | char string_0001[65]; | ||
| 50 | }; | ||
| 51 | |||
| 52 | static struct test_sysctl_data test_data = { | ||
| 53 | .int_0001 = 60, | ||
| 54 | .int_0002 = 1, | ||
| 55 | |||
| 56 | .int_0003[0] = 0, | ||
| 57 | .int_0003[1] = 1, | ||
| 58 | .int_0003[2] = 2, | ||
| 59 | .int_0003[3] = 3, | ||
| 60 | |||
| 61 | .uint_0001 = 314, | ||
| 62 | |||
| 63 | .string_0001 = "(none)", | ||
| 64 | }; | ||
| 65 | |||
| 66 | /* These are all under /proc/sys/debug/test_sysctl/ */ | ||
| 67 | static struct ctl_table test_table[] = { | ||
| 68 | { | ||
| 69 | .procname = "int_0001", | ||
| 70 | .data = &test_data.int_0001, | ||
| 71 | .maxlen = sizeof(int), | ||
| 72 | .mode = 0644, | ||
| 73 | .proc_handler = proc_dointvec_minmax, | ||
| 74 | .extra1 = &i_zero, | ||
| 75 | .extra2 = &i_one_hundred, | ||
| 76 | }, | ||
| 77 | { | ||
| 78 | .procname = "int_0002", | ||
| 79 | .data = &test_data.int_0002, | ||
| 80 | .maxlen = sizeof(int), | ||
| 81 | .mode = 0644, | ||
| 82 | .proc_handler = proc_dointvec, | ||
| 83 | }, | ||
| 84 | { | ||
| 85 | .procname = "int_0003", | ||
| 86 | .data = &test_data.int_0003, | ||
| 87 | .maxlen = sizeof(test_data.int_0003), | ||
| 88 | .mode = 0644, | ||
| 89 | .proc_handler = proc_dointvec, | ||
| 90 | }, | ||
| 91 | { | ||
| 92 | .procname = "uint_0001", | ||
| 93 | .data = &test_data.uint_0001, | ||
| 94 | .maxlen = sizeof(unsigned int), | ||
| 95 | .mode = 0644, | ||
| 96 | .proc_handler = proc_douintvec, | ||
| 97 | }, | ||
| 98 | { | ||
| 99 | .procname = "string_0001", | ||
| 100 | .data = &test_data.string_0001, | ||
| 101 | .maxlen = sizeof(test_data.string_0001), | ||
| 102 | .mode = 0644, | ||
| 103 | .proc_handler = proc_dostring, | ||
| 104 | }, | ||
| 105 | { } | ||
| 106 | }; | ||
| 107 | |||
| 108 | static struct ctl_table test_sysctl_table[] = { | ||
| 109 | { | ||
| 110 | .procname = "test_sysctl", | ||
| 111 | .maxlen = 0, | ||
| 112 | .mode = 0555, | ||
| 113 | .child = test_table, | ||
| 114 | }, | ||
| 115 | { } | ||
| 116 | }; | ||
| 117 | |||
| 118 | static struct ctl_table test_sysctl_root_table[] = { | ||
| 119 | { | ||
| 120 | .procname = "debug", | ||
| 121 | .maxlen = 0, | ||
| 122 | .mode = 0555, | ||
| 123 | .child = test_sysctl_table, | ||
| 124 | }, | ||
| 125 | { } | ||
| 126 | }; | ||
| 127 | |||
| 128 | static struct ctl_table_header *test_sysctl_header; | ||
| 129 | |||
| 130 | static int __init test_sysctl_init(void) | ||
| 131 | { | ||
| 132 | test_sysctl_header = register_sysctl_table(test_sysctl_root_table); | ||
| 133 | if (!test_sysctl_header) | ||
| 134 | return -ENOMEM; | ||
| 135 | return 0; | ||
| 136 | } | ||
| 137 | late_initcall(test_sysctl_init); | ||
| 138 | |||
| 139 | static void __exit test_sysctl_exit(void) | ||
| 140 | { | ||
| 141 | if (test_sysctl_header) | ||
| 142 | unregister_sysctl_table(test_sysctl_header); | ||
| 143 | } | ||
| 144 | |||
| 145 | module_exit(test_sysctl_exit); | ||
| 146 | |||
| 147 | MODULE_AUTHOR("Luis R. Rodriguez <mcgrof@kernel.org>"); | ||
| 148 | MODULE_LICENSE("GPL"); | ||
diff --git a/lib/test_uuid.c b/lib/test_uuid.c index 547d3127a3cf..478c049630b5 100644 --- a/lib/test_uuid.c +++ b/lib/test_uuid.c | |||
| @@ -11,25 +11,25 @@ | |||
| 11 | 11 | ||
| 12 | struct test_uuid_data { | 12 | struct test_uuid_data { |
| 13 | const char *uuid; | 13 | const char *uuid; |
| 14 | uuid_le le; | 14 | guid_t le; |
| 15 | uuid_be be; | 15 | uuid_t be; |
| 16 | }; | 16 | }; |
| 17 | 17 | ||
| 18 | static const struct test_uuid_data test_uuid_test_data[] = { | 18 | static const struct test_uuid_data test_uuid_test_data[] = { |
| 19 | { | 19 | { |
| 20 | .uuid = "c33f4995-3701-450e-9fbf-206a2e98e576", | 20 | .uuid = "c33f4995-3701-450e-9fbf-206a2e98e576", |
| 21 | .le = UUID_LE(0xc33f4995, 0x3701, 0x450e, 0x9f, 0xbf, 0x20, 0x6a, 0x2e, 0x98, 0xe5, 0x76), | 21 | .le = GUID_INIT(0xc33f4995, 0x3701, 0x450e, 0x9f, 0xbf, 0x20, 0x6a, 0x2e, 0x98, 0xe5, 0x76), |
| 22 | .be = UUID_BE(0xc33f4995, 0x3701, 0x450e, 0x9f, 0xbf, 0x20, 0x6a, 0x2e, 0x98, 0xe5, 0x76), | 22 | .be = UUID_INIT(0xc33f4995, 0x3701, 0x450e, 0x9f, 0xbf, 0x20, 0x6a, 0x2e, 0x98, 0xe5, 0x76), |
| 23 | }, | 23 | }, |
| 24 | { | 24 | { |
| 25 | .uuid = "64b4371c-77c1-48f9-8221-29f054fc023b", | 25 | .uuid = "64b4371c-77c1-48f9-8221-29f054fc023b", |
| 26 | .le = UUID_LE(0x64b4371c, 0x77c1, 0x48f9, 0x82, 0x21, 0x29, 0xf0, 0x54, 0xfc, 0x02, 0x3b), | 26 | .le = GUID_INIT(0x64b4371c, 0x77c1, 0x48f9, 0x82, 0x21, 0x29, 0xf0, 0x54, 0xfc, 0x02, 0x3b), |
| 27 | .be = UUID_BE(0x64b4371c, 0x77c1, 0x48f9, 0x82, 0x21, 0x29, 0xf0, 0x54, 0xfc, 0x02, 0x3b), | 27 | .be = UUID_INIT(0x64b4371c, 0x77c1, 0x48f9, 0x82, 0x21, 0x29, 0xf0, 0x54, 0xfc, 0x02, 0x3b), |
| 28 | }, | 28 | }, |
| 29 | { | 29 | { |
| 30 | .uuid = "0cb4ddff-a545-4401-9d06-688af53e7f84", | 30 | .uuid = "0cb4ddff-a545-4401-9d06-688af53e7f84", |
| 31 | .le = UUID_LE(0x0cb4ddff, 0xa545, 0x4401, 0x9d, 0x06, 0x68, 0x8a, 0xf5, 0x3e, 0x7f, 0x84), | 31 | .le = GUID_INIT(0x0cb4ddff, 0xa545, 0x4401, 0x9d, 0x06, 0x68, 0x8a, 0xf5, 0x3e, 0x7f, 0x84), |
| 32 | .be = UUID_BE(0x0cb4ddff, 0xa545, 0x4401, 0x9d, 0x06, 0x68, 0x8a, 0xf5, 0x3e, 0x7f, 0x84), | 32 | .be = UUID_INIT(0x0cb4ddff, 0xa545, 0x4401, 0x9d, 0x06, 0x68, 0x8a, 0xf5, 0x3e, 0x7f, 0x84), |
| 33 | }, | 33 | }, |
| 34 | }; | 34 | }; |
| 35 | 35 | ||
| @@ -61,28 +61,28 @@ static void __init test_uuid_failed(const char *prefix, bool wrong, bool be, | |||
| 61 | 61 | ||
| 62 | static void __init test_uuid_test(const struct test_uuid_data *data) | 62 | static void __init test_uuid_test(const struct test_uuid_data *data) |
| 63 | { | 63 | { |
| 64 | uuid_le le; | 64 | guid_t le; |
| 65 | uuid_be be; | 65 | uuid_t be; |
| 66 | char buf[48]; | 66 | char buf[48]; |
| 67 | 67 | ||
| 68 | /* LE */ | 68 | /* LE */ |
| 69 | total_tests++; | 69 | total_tests++; |
| 70 | if (uuid_le_to_bin(data->uuid, &le)) | 70 | if (guid_parse(data->uuid, &le)) |
| 71 | test_uuid_failed("conversion", false, false, data->uuid, NULL); | 71 | test_uuid_failed("conversion", false, false, data->uuid, NULL); |
| 72 | 72 | ||
| 73 | total_tests++; | 73 | total_tests++; |
| 74 | if (uuid_le_cmp(data->le, le)) { | 74 | if (!guid_equal(&data->le, &le)) { |
| 75 | sprintf(buf, "%pUl", &le); | 75 | sprintf(buf, "%pUl", &le); |
| 76 | test_uuid_failed("cmp", false, false, data->uuid, buf); | 76 | test_uuid_failed("cmp", false, false, data->uuid, buf); |
| 77 | } | 77 | } |
| 78 | 78 | ||
| 79 | /* BE */ | 79 | /* BE */ |
| 80 | total_tests++; | 80 | total_tests++; |
| 81 | if (uuid_be_to_bin(data->uuid, &be)) | 81 | if (uuid_parse(data->uuid, &be)) |
| 82 | test_uuid_failed("conversion", false, true, data->uuid, NULL); | 82 | test_uuid_failed("conversion", false, true, data->uuid, NULL); |
| 83 | 83 | ||
| 84 | total_tests++; | 84 | total_tests++; |
| 85 | if (uuid_be_cmp(data->be, be)) { | 85 | if (uuid_equal(&data->be, &be)) { |
| 86 | sprintf(buf, "%pUb", &be); | 86 | sprintf(buf, "%pUb", &be); |
| 87 | test_uuid_failed("cmp", false, true, data->uuid, buf); | 87 | test_uuid_failed("cmp", false, true, data->uuid, buf); |
| 88 | } | 88 | } |
| @@ -90,17 +90,17 @@ static void __init test_uuid_test(const struct test_uuid_data *data) | |||
| 90 | 90 | ||
| 91 | static void __init test_uuid_wrong(const char *data) | 91 | static void __init test_uuid_wrong(const char *data) |
| 92 | { | 92 | { |
| 93 | uuid_le le; | 93 | guid_t le; |
| 94 | uuid_be be; | 94 | uuid_t be; |
| 95 | 95 | ||
| 96 | /* LE */ | 96 | /* LE */ |
| 97 | total_tests++; | 97 | total_tests++; |
| 98 | if (!uuid_le_to_bin(data, &le)) | 98 | if (!guid_parse(data, &le)) |
| 99 | test_uuid_failed("negative", true, false, data, NULL); | 99 | test_uuid_failed("negative", true, false, data, NULL); |
| 100 | 100 | ||
| 101 | /* BE */ | 101 | /* BE */ |
| 102 | total_tests++; | 102 | total_tests++; |
| 103 | if (!uuid_be_to_bin(data, &be)) | 103 | if (!uuid_parse(data, &be)) |
| 104 | test_uuid_failed("negative", true, true, data, NULL); | 104 | test_uuid_failed("negative", true, true, data, NULL); |
| 105 | } | 105 | } |
| 106 | 106 | ||
diff --git a/lib/usercopy.c b/lib/usercopy.c index 1b6010a3beb8..f5d9f08ee032 100644 --- a/lib/usercopy.c +++ b/lib/usercopy.c | |||
| @@ -6,8 +6,11 @@ | |||
| 6 | unsigned long _copy_from_user(void *to, const void __user *from, unsigned long n) | 6 | unsigned long _copy_from_user(void *to, const void __user *from, unsigned long n) |
| 7 | { | 7 | { |
| 8 | unsigned long res = n; | 8 | unsigned long res = n; |
| 9 | if (likely(access_ok(VERIFY_READ, from, n))) | 9 | might_fault(); |
| 10 | if (likely(access_ok(VERIFY_READ, from, n))) { | ||
| 11 | kasan_check_write(to, n); | ||
| 10 | res = raw_copy_from_user(to, from, n); | 12 | res = raw_copy_from_user(to, from, n); |
| 13 | } | ||
| 11 | if (unlikely(res)) | 14 | if (unlikely(res)) |
| 12 | memset(to + (n - res), 0, res); | 15 | memset(to + (n - res), 0, res); |
| 13 | return res; | 16 | return res; |
| @@ -18,8 +21,11 @@ EXPORT_SYMBOL(_copy_from_user); | |||
| 18 | #ifndef INLINE_COPY_TO_USER | 21 | #ifndef INLINE_COPY_TO_USER |
| 19 | unsigned long _copy_to_user(void *to, const void __user *from, unsigned long n) | 22 | unsigned long _copy_to_user(void *to, const void __user *from, unsigned long n) |
| 20 | { | 23 | { |
| 21 | if (likely(access_ok(VERIFY_WRITE, to, n))) | 24 | might_fault(); |
| 25 | if (likely(access_ok(VERIFY_WRITE, to, n))) { | ||
| 26 | kasan_check_read(from, n); | ||
| 22 | n = raw_copy_to_user(to, from, n); | 27 | n = raw_copy_to_user(to, from, n); |
| 28 | } | ||
| 23 | return n; | 29 | return n; |
| 24 | } | 30 | } |
| 25 | EXPORT_SYMBOL(_copy_to_user); | 31 | EXPORT_SYMBOL(_copy_to_user); |
diff --git a/lib/uuid.c b/lib/uuid.c index 37687af77ff8..680b9fb9ba09 100644 --- a/lib/uuid.c +++ b/lib/uuid.c | |||
| @@ -21,10 +21,13 @@ | |||
| 21 | #include <linux/uuid.h> | 21 | #include <linux/uuid.h> |
| 22 | #include <linux/random.h> | 22 | #include <linux/random.h> |
| 23 | 23 | ||
| 24 | const u8 uuid_le_index[16] = {3,2,1,0,5,4,7,6,8,9,10,11,12,13,14,15}; | 24 | const guid_t guid_null; |
| 25 | EXPORT_SYMBOL(uuid_le_index); | 25 | EXPORT_SYMBOL(guid_null); |
| 26 | const u8 uuid_be_index[16] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15}; | 26 | const uuid_t uuid_null; |
| 27 | EXPORT_SYMBOL(uuid_be_index); | 27 | EXPORT_SYMBOL(uuid_null); |
| 28 | |||
| 29 | const u8 guid_index[16] = {3,2,1,0,5,4,7,6,8,9,10,11,12,13,14,15}; | ||
| 30 | const u8 uuid_index[16] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15}; | ||
| 28 | 31 | ||
| 29 | /*************************************************************** | 32 | /*************************************************************** |
| 30 | * Random UUID interface | 33 | * Random UUID interface |
| @@ -53,21 +56,21 @@ static void __uuid_gen_common(__u8 b[16]) | |||
| 53 | b[8] = (b[8] & 0x3F) | 0x80; | 56 | b[8] = (b[8] & 0x3F) | 0x80; |
| 54 | } | 57 | } |
| 55 | 58 | ||
| 56 | void uuid_le_gen(uuid_le *lu) | 59 | void guid_gen(guid_t *lu) |
| 57 | { | 60 | { |
| 58 | __uuid_gen_common(lu->b); | 61 | __uuid_gen_common(lu->b); |
| 59 | /* version 4 : random generation */ | 62 | /* version 4 : random generation */ |
| 60 | lu->b[7] = (lu->b[7] & 0x0F) | 0x40; | 63 | lu->b[7] = (lu->b[7] & 0x0F) | 0x40; |
| 61 | } | 64 | } |
| 62 | EXPORT_SYMBOL_GPL(uuid_le_gen); | 65 | EXPORT_SYMBOL_GPL(guid_gen); |
| 63 | 66 | ||
| 64 | void uuid_be_gen(uuid_be *bu) | 67 | void uuid_gen(uuid_t *bu) |
| 65 | { | 68 | { |
| 66 | __uuid_gen_common(bu->b); | 69 | __uuid_gen_common(bu->b); |
| 67 | /* version 4 : random generation */ | 70 | /* version 4 : random generation */ |
| 68 | bu->b[6] = (bu->b[6] & 0x0F) | 0x40; | 71 | bu->b[6] = (bu->b[6] & 0x0F) | 0x40; |
| 69 | } | 72 | } |
| 70 | EXPORT_SYMBOL_GPL(uuid_be_gen); | 73 | EXPORT_SYMBOL_GPL(uuid_gen); |
| 71 | 74 | ||
| 72 | /** | 75 | /** |
| 73 | * uuid_is_valid - checks if UUID string valid | 76 | * uuid_is_valid - checks if UUID string valid |
| @@ -97,7 +100,7 @@ bool uuid_is_valid(const char *uuid) | |||
| 97 | } | 100 | } |
| 98 | EXPORT_SYMBOL(uuid_is_valid); | 101 | EXPORT_SYMBOL(uuid_is_valid); |
| 99 | 102 | ||
| 100 | static int __uuid_to_bin(const char *uuid, __u8 b[16], const u8 ei[16]) | 103 | static int __uuid_parse(const char *uuid, __u8 b[16], const u8 ei[16]) |
| 101 | { | 104 | { |
| 102 | static const u8 si[16] = {0,2,4,6,9,11,14,16,19,21,24,26,28,30,32,34}; | 105 | static const u8 si[16] = {0,2,4,6,9,11,14,16,19,21,24,26,28,30,32,34}; |
| 103 | unsigned int i; | 106 | unsigned int i; |
| @@ -115,14 +118,14 @@ static int __uuid_to_bin(const char *uuid, __u8 b[16], const u8 ei[16]) | |||
| 115 | return 0; | 118 | return 0; |
| 116 | } | 119 | } |
| 117 | 120 | ||
| 118 | int uuid_le_to_bin(const char *uuid, uuid_le *u) | 121 | int guid_parse(const char *uuid, guid_t *u) |
| 119 | { | 122 | { |
| 120 | return __uuid_to_bin(uuid, u->b, uuid_le_index); | 123 | return __uuid_parse(uuid, u->b, guid_index); |
| 121 | } | 124 | } |
| 122 | EXPORT_SYMBOL(uuid_le_to_bin); | 125 | EXPORT_SYMBOL(guid_parse); |
| 123 | 126 | ||
| 124 | int uuid_be_to_bin(const char *uuid, uuid_be *u) | 127 | int uuid_parse(const char *uuid, uuid_t *u) |
| 125 | { | 128 | { |
| 126 | return __uuid_to_bin(uuid, u->b, uuid_be_index); | 129 | return __uuid_parse(uuid, u->b, uuid_index); |
| 127 | } | 130 | } |
| 128 | EXPORT_SYMBOL(uuid_be_to_bin); | 131 | EXPORT_SYMBOL(uuid_parse); |
diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 2d41de3f98a1..86c3385b9eb3 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c | |||
| @@ -31,6 +31,7 @@ | |||
| 31 | #include <linux/dcache.h> | 31 | #include <linux/dcache.h> |
| 32 | #include <linux/cred.h> | 32 | #include <linux/cred.h> |
| 33 | #include <linux/uuid.h> | 33 | #include <linux/uuid.h> |
| 34 | #include <linux/of.h> | ||
| 34 | #include <net/addrconf.h> | 35 | #include <net/addrconf.h> |
| 35 | #ifdef CONFIG_BLOCK | 36 | #ifdef CONFIG_BLOCK |
| 36 | #include <linux/blkdev.h> | 37 | #include <linux/blkdev.h> |
| @@ -1308,14 +1309,14 @@ char *uuid_string(char *buf, char *end, const u8 *addr, | |||
| 1308 | char uuid[UUID_STRING_LEN + 1]; | 1309 | char uuid[UUID_STRING_LEN + 1]; |
| 1309 | char *p = uuid; | 1310 | char *p = uuid; |
| 1310 | int i; | 1311 | int i; |
| 1311 | const u8 *index = uuid_be_index; | 1312 | const u8 *index = uuid_index; |
| 1312 | bool uc = false; | 1313 | bool uc = false; |
| 1313 | 1314 | ||
| 1314 | switch (*(++fmt)) { | 1315 | switch (*(++fmt)) { |
| 1315 | case 'L': | 1316 | case 'L': |
| 1316 | uc = true; /* fall-through */ | 1317 | uc = true; /* fall-through */ |
| 1317 | case 'l': | 1318 | case 'l': |
| 1318 | index = uuid_le_index; | 1319 | index = guid_index; |
| 1319 | break; | 1320 | break; |
| 1320 | case 'B': | 1321 | case 'B': |
| 1321 | uc = true; | 1322 | uc = true; |
| @@ -1470,6 +1471,126 @@ char *flags_string(char *buf, char *end, void *flags_ptr, const char *fmt) | |||
| 1470 | return format_flags(buf, end, flags, names); | 1471 | return format_flags(buf, end, flags, names); |
| 1471 | } | 1472 | } |
| 1472 | 1473 | ||
| 1474 | static const char *device_node_name_for_depth(const struct device_node *np, int depth) | ||
| 1475 | { | ||
| 1476 | for ( ; np && depth; depth--) | ||
| 1477 | np = np->parent; | ||
| 1478 | |||
| 1479 | return kbasename(np->full_name); | ||
| 1480 | } | ||
| 1481 | |||
| 1482 | static noinline_for_stack | ||
| 1483 | char *device_node_gen_full_name(const struct device_node *np, char *buf, char *end) | ||
| 1484 | { | ||
| 1485 | int depth; | ||
| 1486 | const struct device_node *parent = np->parent; | ||
| 1487 | static const struct printf_spec strspec = { | ||
| 1488 | .field_width = -1, | ||
| 1489 | .precision = -1, | ||
| 1490 | }; | ||
| 1491 | |||
| 1492 | /* special case for root node */ | ||
| 1493 | if (!parent) | ||
| 1494 | return string(buf, end, "/", strspec); | ||
| 1495 | |||
| 1496 | for (depth = 0; parent->parent; depth++) | ||
| 1497 | parent = parent->parent; | ||
| 1498 | |||
| 1499 | for ( ; depth >= 0; depth--) { | ||
| 1500 | buf = string(buf, end, "/", strspec); | ||
| 1501 | buf = string(buf, end, device_node_name_for_depth(np, depth), | ||
| 1502 | strspec); | ||
| 1503 | } | ||
| 1504 | return buf; | ||
| 1505 | } | ||
| 1506 | |||
| 1507 | static noinline_for_stack | ||
| 1508 | char *device_node_string(char *buf, char *end, struct device_node *dn, | ||
| 1509 | struct printf_spec spec, const char *fmt) | ||
| 1510 | { | ||
| 1511 | char tbuf[sizeof("xxxx") + 1]; | ||
| 1512 | const char *p; | ||
| 1513 | int ret; | ||
| 1514 | char *buf_start = buf; | ||
| 1515 | struct property *prop; | ||
| 1516 | bool has_mult, pass; | ||
| 1517 | static const struct printf_spec num_spec = { | ||
| 1518 | .flags = SMALL, | ||
| 1519 | .field_width = -1, | ||
| 1520 | .precision = -1, | ||
| 1521 | .base = 10, | ||
| 1522 | }; | ||
| 1523 | |||
| 1524 | struct printf_spec str_spec = spec; | ||
| 1525 | str_spec.field_width = -1; | ||
| 1526 | |||
| 1527 | if (!IS_ENABLED(CONFIG_OF)) | ||
| 1528 | return string(buf, end, "(!OF)", spec); | ||
| 1529 | |||
| 1530 | if ((unsigned long)dn < PAGE_SIZE) | ||
| 1531 | return string(buf, end, "(null)", spec); | ||
| 1532 | |||
| 1533 | /* simple case without anything any more format specifiers */ | ||
| 1534 | fmt++; | ||
| 1535 | if (fmt[0] == '\0' || strcspn(fmt,"fnpPFcC") > 0) | ||
| 1536 | fmt = "f"; | ||
| 1537 | |||
| 1538 | for (pass = false; strspn(fmt,"fnpPFcC"); fmt++, pass = true) { | ||
| 1539 | if (pass) { | ||
| 1540 | if (buf < end) | ||
| 1541 | *buf = ':'; | ||
| 1542 | buf++; | ||
| 1543 | } | ||
| 1544 | |||
| 1545 | switch (*fmt) { | ||
| 1546 | case 'f': /* full_name */ | ||
| 1547 | buf = device_node_gen_full_name(dn, buf, end); | ||
| 1548 | break; | ||
| 1549 | case 'n': /* name */ | ||
| 1550 | buf = string(buf, end, dn->name, str_spec); | ||
| 1551 | break; | ||
| 1552 | case 'p': /* phandle */ | ||
| 1553 | buf = number(buf, end, (unsigned int)dn->phandle, num_spec); | ||
| 1554 | break; | ||
| 1555 | case 'P': /* path-spec */ | ||
| 1556 | p = kbasename(of_node_full_name(dn)); | ||
| 1557 | if (!p[1]) | ||
| 1558 | p = "/"; | ||
| 1559 | buf = string(buf, end, p, str_spec); | ||
| 1560 | break; | ||
| 1561 | case 'F': /* flags */ | ||
| 1562 | tbuf[0] = of_node_check_flag(dn, OF_DYNAMIC) ? 'D' : '-'; | ||
| 1563 | tbuf[1] = of_node_check_flag(dn, OF_DETACHED) ? 'd' : '-'; | ||
| 1564 | tbuf[2] = of_node_check_flag(dn, OF_POPULATED) ? 'P' : '-'; | ||
| 1565 | tbuf[3] = of_node_check_flag(dn, OF_POPULATED_BUS) ? 'B' : '-'; | ||
| 1566 | tbuf[4] = 0; | ||
| 1567 | buf = string(buf, end, tbuf, str_spec); | ||
| 1568 | break; | ||
| 1569 | case 'c': /* major compatible string */ | ||
| 1570 | ret = of_property_read_string(dn, "compatible", &p); | ||
| 1571 | if (!ret) | ||
| 1572 | buf = string(buf, end, p, str_spec); | ||
| 1573 | break; | ||
| 1574 | case 'C': /* full compatible string */ | ||
| 1575 | has_mult = false; | ||
| 1576 | of_property_for_each_string(dn, "compatible", prop, p) { | ||
| 1577 | if (has_mult) | ||
| 1578 | buf = string(buf, end, ",", str_spec); | ||
| 1579 | buf = string(buf, end, "\"", str_spec); | ||
| 1580 | buf = string(buf, end, p, str_spec); | ||
| 1581 | buf = string(buf, end, "\"", str_spec); | ||
| 1582 | |||
| 1583 | has_mult = true; | ||
| 1584 | } | ||
| 1585 | break; | ||
| 1586 | default: | ||
| 1587 | break; | ||
| 1588 | } | ||
| 1589 | } | ||
| 1590 | |||
| 1591 | return widen_string(buf, buf - buf_start, end, spec); | ||
| 1592 | } | ||
| 1593 | |||
| 1473 | int kptr_restrict __read_mostly; | 1594 | int kptr_restrict __read_mostly; |
| 1474 | 1595 | ||
| 1475 | /* | 1596 | /* |
| @@ -1566,6 +1687,16 @@ int kptr_restrict __read_mostly; | |||
| 1566 | * p page flags (see struct page) given as pointer to unsigned long | 1687 | * p page flags (see struct page) given as pointer to unsigned long |
| 1567 | * g gfp flags (GFP_* and __GFP_*) given as pointer to gfp_t | 1688 | * g gfp flags (GFP_* and __GFP_*) given as pointer to gfp_t |
| 1568 | * v vma flags (VM_*) given as pointer to unsigned long | 1689 | * v vma flags (VM_*) given as pointer to unsigned long |
| 1690 | * - 'O' For a kobject based struct. Must be one of the following: | ||
| 1691 | * - 'OF[fnpPcCF]' For a device tree object | ||
| 1692 | * Without any optional arguments prints the full_name | ||
| 1693 | * f device node full_name | ||
| 1694 | * n device node name | ||
| 1695 | * p device node phandle | ||
| 1696 | * P device node path spec (name + @unit) | ||
| 1697 | * F device node flags | ||
| 1698 | * c major compatible string | ||
| 1699 | * C full compatible string | ||
| 1569 | * | 1700 | * |
| 1570 | * ** Please update also Documentation/printk-formats.txt when making changes ** | 1701 | * ** Please update also Documentation/printk-formats.txt when making changes ** |
| 1571 | * | 1702 | * |
| @@ -1721,6 +1852,11 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr, | |||
| 1721 | 1852 | ||
| 1722 | case 'G': | 1853 | case 'G': |
| 1723 | return flags_string(buf, end, ptr, fmt); | 1854 | return flags_string(buf, end, ptr, fmt); |
| 1855 | case 'O': | ||
| 1856 | switch (fmt[1]) { | ||
| 1857 | case 'F': | ||
| 1858 | return device_node_string(buf, end, ptr, spec, fmt + 1); | ||
| 1859 | } | ||
| 1724 | } | 1860 | } |
| 1725 | spec.flags |= SMALL; | 1861 | spec.flags |= SMALL; |
| 1726 | if (spec.field_width == -1) { | 1862 | if (spec.field_width == -1) { |
