diff options
Diffstat (limited to 'lib')
36 files changed, 1938 insertions, 343 deletions
diff --git a/lib/842/842_decompress.c b/lib/842/842_decompress.c index 8881dad2a6a0..a7f278d2ed8f 100644 --- a/lib/842/842_decompress.c +++ b/lib/842/842_decompress.c | |||
| @@ -69,7 +69,7 @@ struct sw842_param { | |||
| 69 | ((s) == 2 ? be16_to_cpu(get_unaligned((__be16 *)d)) : \ | 69 | ((s) == 2 ? be16_to_cpu(get_unaligned((__be16 *)d)) : \ |
| 70 | (s) == 4 ? be32_to_cpu(get_unaligned((__be32 *)d)) : \ | 70 | (s) == 4 ? be32_to_cpu(get_unaligned((__be32 *)d)) : \ |
| 71 | (s) == 8 ? be64_to_cpu(get_unaligned((__be64 *)d)) : \ | 71 | (s) == 8 ? be64_to_cpu(get_unaligned((__be64 *)d)) : \ |
| 72 | WARN(1, "pr_debug param err invalid size %x\n", s)) | 72 | 0) |
| 73 | 73 | ||
| 74 | static int next_bits(struct sw842_param *p, u64 *d, u8 n); | 74 | static int next_bits(struct sw842_param *p, u64 *d, u8 n); |
| 75 | 75 | ||
| @@ -202,10 +202,14 @@ static int __do_index(struct sw842_param *p, u8 size, u8 bits, u64 fsize) | |||
| 202 | return -EINVAL; | 202 | return -EINVAL; |
| 203 | } | 203 | } |
| 204 | 204 | ||
| 205 | pr_debug("index%x to %lx off %lx adjoff %lx tot %lx data %lx\n", | 205 | if (size != 2 && size != 4 && size != 8) |
| 206 | size, (unsigned long)index, (unsigned long)(index * size), | 206 | WARN(1, "__do_index invalid size %x\n", size); |
| 207 | (unsigned long)offset, (unsigned long)total, | 207 | else |
| 208 | (unsigned long)beN_to_cpu(&p->ostart[offset], size)); | 208 | pr_debug("index%x to %lx off %lx adjoff %lx tot %lx data %lx\n", |
| 209 | size, (unsigned long)index, | ||
| 210 | (unsigned long)(index * size), (unsigned long)offset, | ||
| 211 | (unsigned long)total, | ||
| 212 | (unsigned long)beN_to_cpu(&p->ostart[offset], size)); | ||
| 209 | 213 | ||
| 210 | memcpy(p->out, &p->ostart[offset], size); | 214 | memcpy(p->out, &p->ostart[offset], size); |
| 211 | p->out += size; | 215 | p->out += size; |
diff --git a/lib/Kconfig b/lib/Kconfig index f0df318104e7..133ebc0c1773 100644 --- a/lib/Kconfig +++ b/lib/Kconfig | |||
| @@ -210,9 +210,11 @@ config RANDOM32_SELFTEST | |||
| 210 | # compression support is select'ed if needed | 210 | # compression support is select'ed if needed |
| 211 | # | 211 | # |
| 212 | config 842_COMPRESS | 212 | config 842_COMPRESS |
| 213 | select CRC32 | ||
| 213 | tristate | 214 | tristate |
| 214 | 215 | ||
| 215 | config 842_DECOMPRESS | 216 | config 842_DECOMPRESS |
| 217 | select CRC32 | ||
| 216 | tristate | 218 | tristate |
| 217 | 219 | ||
| 218 | config ZLIB_INFLATE | 220 | config ZLIB_INFLATE |
| @@ -475,6 +477,11 @@ config DDR | |||
| 475 | information. This data is useful for drivers handling | 477 | information. This data is useful for drivers handling |
| 476 | DDR SDRAM controllers. | 478 | DDR SDRAM controllers. |
| 477 | 479 | ||
| 480 | config IRQ_POLL | ||
| 481 | bool "IRQ polling library" | ||
| 482 | help | ||
| 483 | Helper library to poll interrupt mitigation using polling. | ||
| 484 | |||
| 478 | config MPILIB | 485 | config MPILIB |
| 479 | tristate | 486 | tristate |
| 480 | select CLZ_TAB | 487 | select CLZ_TAB |
| @@ -512,9 +519,9 @@ source "lib/fonts/Kconfig" | |||
| 512 | config SG_SPLIT | 519 | config SG_SPLIT |
| 513 | def_bool n | 520 | def_bool n |
| 514 | help | 521 | help |
| 515 | Provides a heler to split scatterlists into chunks, each chunk being a | 522 | Provides a helper to split scatterlists into chunks, each chunk being |
| 516 | scatterlist. This should be selected by a driver or an API which | 523 | a scatterlist. This should be selected by a driver or an API which |
| 517 | whishes to split a scatterlist amongst multiple DMA channel. | 524 | whishes to split a scatterlist amongst multiple DMA channels. |
| 518 | 525 | ||
| 519 | # | 526 | # |
| 520 | # sg chaining option | 527 | # sg chaining option |
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 8c15b29d5adc..ecb9e75614bf 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
| @@ -580,6 +580,14 @@ config DEBUG_VM_RB | |||
| 580 | 580 | ||
| 581 | If unsure, say N. | 581 | If unsure, say N. |
| 582 | 582 | ||
| 583 | config DEBUG_VM_PGFLAGS | ||
| 584 | bool "Debug page-flags operations" | ||
| 585 | depends on DEBUG_VM | ||
| 586 | help | ||
| 587 | Enables extra validation on page flags operations. | ||
| 588 | |||
| 589 | If unsure, say N. | ||
| 590 | |||
| 583 | config DEBUG_VIRTUAL | 591 | config DEBUG_VIRTUAL |
| 584 | bool "Debug VM translations" | 592 | bool "Debug VM translations" |
| 585 | depends on DEBUG_KERNEL && X86 | 593 | depends on DEBUG_KERNEL && X86 |
| @@ -812,6 +820,17 @@ config BOOTPARAM_HUNG_TASK_PANIC_VALUE | |||
| 812 | default 0 if !BOOTPARAM_HUNG_TASK_PANIC | 820 | default 0 if !BOOTPARAM_HUNG_TASK_PANIC |
| 813 | default 1 if BOOTPARAM_HUNG_TASK_PANIC | 821 | default 1 if BOOTPARAM_HUNG_TASK_PANIC |
| 814 | 822 | ||
| 823 | config WQ_WATCHDOG | ||
| 824 | bool "Detect Workqueue Stalls" | ||
| 825 | depends on DEBUG_KERNEL | ||
| 826 | help | ||
| 827 | Say Y here to enable stall detection on workqueues. If a | ||
| 828 | worker pool doesn't make forward progress on a pending work | ||
| 829 | item for over a given amount of time, 30s by default, a | ||
| 830 | warning message is printed along with dump of workqueue | ||
| 831 | state. This can be configured through kernel parameter | ||
| 832 | "workqueue.watchdog_thresh" and its sysfs counterpart. | ||
| 833 | |||
| 815 | endmenu # "Debug lockups and hangs" | 834 | endmenu # "Debug lockups and hangs" |
| 816 | 835 | ||
| 817 | config PANIC_ON_OOPS | 836 | config PANIC_ON_OOPS |
| @@ -1484,6 +1503,29 @@ config OF_RECONFIG_NOTIFIER_ERROR_INJECT | |||
| 1484 | 1503 | ||
| 1485 | If unsure, say N. | 1504 | If unsure, say N. |
| 1486 | 1505 | ||
| 1506 | config NETDEV_NOTIFIER_ERROR_INJECT | ||
| 1507 | tristate "Netdev notifier error injection module" | ||
| 1508 | depends on NET && NOTIFIER_ERROR_INJECTION | ||
| 1509 | help | ||
| 1510 | This option provides the ability to inject artificial errors to | ||
| 1511 | netdevice notifier chain callbacks. It is controlled through debugfs | ||
| 1512 | interface /sys/kernel/debug/notifier-error-inject/netdev | ||
| 1513 | |||
| 1514 | If the notifier call chain should be failed with some events | ||
| 1515 | notified, write the error code to "actions/<notifier event>/error". | ||
| 1516 | |||
| 1517 | Example: Inject netdevice mtu change error (-22 = -EINVAL) | ||
| 1518 | |||
| 1519 | # cd /sys/kernel/debug/notifier-error-inject/netdev | ||
| 1520 | # echo -22 > actions/NETDEV_CHANGEMTU/error | ||
| 1521 | # ip link set eth0 mtu 1024 | ||
| 1522 | RTNETLINK answers: Invalid argument | ||
| 1523 | |||
| 1524 | To compile this code as a module, choose M here: the module will | ||
| 1525 | be called netdev-notifier-error-inject. | ||
| 1526 | |||
| 1527 | If unsure, say N. | ||
| 1528 | |||
| 1487 | config FAULT_INJECTION | 1529 | config FAULT_INJECTION |
| 1488 | bool "Fault-injection framework" | 1530 | bool "Fault-injection framework" |
| 1489 | depends on DEBUG_KERNEL | 1531 | depends on DEBUG_KERNEL |
| @@ -1523,8 +1565,7 @@ config FAIL_IO_TIMEOUT | |||
| 1523 | 1565 | ||
| 1524 | config FAIL_MMC_REQUEST | 1566 | config FAIL_MMC_REQUEST |
| 1525 | bool "Fault-injection capability for MMC IO" | 1567 | bool "Fault-injection capability for MMC IO" |
| 1526 | select DEBUG_FS | 1568 | depends on FAULT_INJECTION_DEBUG_FS && MMC |
| 1527 | depends on FAULT_INJECTION && MMC | ||
| 1528 | help | 1569 | help |
| 1529 | Provide fault-injection capability for MMC IO. | 1570 | Provide fault-injection capability for MMC IO. |
| 1530 | This will make the mmc core return data errors. This is | 1571 | This will make the mmc core return data errors. This is |
| @@ -1556,7 +1597,6 @@ config FAULT_INJECTION_STACKTRACE_FILTER | |||
| 1556 | 1597 | ||
| 1557 | config LATENCYTOP | 1598 | config LATENCYTOP |
| 1558 | bool "Latency measuring infrastructure" | 1599 | bool "Latency measuring infrastructure" |
| 1559 | depends on HAVE_LATENCYTOP_SUPPORT | ||
| 1560 | depends on DEBUG_KERNEL | 1600 | depends on DEBUG_KERNEL |
| 1561 | depends on STACKTRACE_SUPPORT | 1601 | depends on STACKTRACE_SUPPORT |
| 1562 | depends on PROC_FS | 1602 | depends on PROC_FS |
| @@ -1853,3 +1893,43 @@ source "samples/Kconfig" | |||
| 1853 | 1893 | ||
| 1854 | source "lib/Kconfig.kgdb" | 1894 | source "lib/Kconfig.kgdb" |
| 1855 | 1895 | ||
| 1896 | source "lib/Kconfig.ubsan" | ||
| 1897 | |||
| 1898 | config ARCH_HAS_DEVMEM_IS_ALLOWED | ||
| 1899 | bool | ||
| 1900 | |||
| 1901 | config STRICT_DEVMEM | ||
| 1902 | bool "Filter access to /dev/mem" | ||
| 1903 | depends on MMU | ||
| 1904 | depends on ARCH_HAS_DEVMEM_IS_ALLOWED | ||
| 1905 | default y if TILE || PPC | ||
| 1906 | ---help--- | ||
| 1907 | If this option is disabled, you allow userspace (root) access to all | ||
| 1908 | of memory, including kernel and userspace memory. Accidental | ||
| 1909 | access to this is obviously disastrous, but specific access can | ||
| 1910 | be used by people debugging the kernel. Note that with PAT support | ||
| 1911 | enabled, even in this case there are restrictions on /dev/mem | ||
| 1912 | use due to the cache aliasing requirements. | ||
| 1913 | |||
| 1914 | If this option is switched on, and IO_STRICT_DEVMEM=n, the /dev/mem | ||
| 1915 | file only allows userspace access to PCI space and the BIOS code and | ||
| 1916 | data regions. This is sufficient for dosemu and X and all common | ||
| 1917 | users of /dev/mem. | ||
| 1918 | |||
| 1919 | If in doubt, say Y. | ||
| 1920 | |||
| 1921 | config IO_STRICT_DEVMEM | ||
| 1922 | bool "Filter I/O access to /dev/mem" | ||
| 1923 | depends on STRICT_DEVMEM | ||
| 1924 | ---help--- | ||
| 1925 | If this option is disabled, you allow userspace (root) access to all | ||
| 1926 | io-memory regardless of whether a driver is actively using that | ||
| 1927 | range. Accidental access to this is obviously disastrous, but | ||
| 1928 | specific access can be used by people debugging kernel drivers. | ||
| 1929 | |||
| 1930 | If this option is switched on, the /dev/mem file only allows | ||
| 1931 | userspace access to *idle* io-memory ranges (see /proc/iomem) This | ||
| 1932 | may break traditional users of /dev/mem (dosemu, legacy X, etc...) | ||
| 1933 | if the driver using a given range cannot be disabled. | ||
| 1934 | |||
| 1935 | If in doubt, say Y. | ||
diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan new file mode 100644 index 000000000000..49518fb48cab --- /dev/null +++ b/lib/Kconfig.ubsan | |||
| @@ -0,0 +1,29 @@ | |||
| 1 | config ARCH_HAS_UBSAN_SANITIZE_ALL | ||
| 2 | bool | ||
| 3 | |||
| 4 | config UBSAN | ||
| 5 | bool "Undefined behaviour sanity checker" | ||
| 6 | help | ||
| 7 | This option enables undefined behaviour sanity checker | ||
| 8 | Compile-time instrumentation is used to detect various undefined | ||
| 9 | behaviours in runtime. Various types of checks may be enabled | ||
| 10 | via boot parameter ubsan_handle (see: Documentation/ubsan.txt). | ||
| 11 | |||
| 12 | config UBSAN_SANITIZE_ALL | ||
| 13 | bool "Enable instrumentation for the entire kernel" | ||
| 14 | depends on UBSAN | ||
| 15 | depends on ARCH_HAS_UBSAN_SANITIZE_ALL | ||
| 16 | default y | ||
| 17 | help | ||
| 18 | This option activates instrumentation for the entire kernel. | ||
| 19 | If you don't enable this option, you have to explicitly specify | ||
| 20 | UBSAN_SANITIZE := y for the files/directories you want to check for UB. | ||
| 21 | |||
| 22 | config UBSAN_ALIGNMENT | ||
| 23 | bool "Enable checking of pointers alignment" | ||
| 24 | depends on UBSAN | ||
| 25 | default y if !HAVE_EFFICIENT_UNALIGNED_ACCESS | ||
| 26 | help | ||
| 27 | This option enables detection of unaligned memory accesses. | ||
| 28 | Enabling this option on architectures that support unalligned | ||
| 29 | accesses may produce a lot of false positives. | ||
diff --git a/lib/Makefile b/lib/Makefile index 7f1de26613d2..a7c26a41a738 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
| @@ -31,7 +31,7 @@ obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ | |||
| 31 | obj-y += string_helpers.o | 31 | obj-y += string_helpers.o |
| 32 | obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o | 32 | obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o |
| 33 | obj-y += hexdump.o | 33 | obj-y += hexdump.o |
| 34 | obj-$(CONFIG_TEST_HEXDUMP) += test-hexdump.o | 34 | obj-$(CONFIG_TEST_HEXDUMP) += test_hexdump.o |
| 35 | obj-y += kstrtox.o | 35 | obj-y += kstrtox.o |
| 36 | obj-$(CONFIG_TEST_BPF) += test_bpf.o | 36 | obj-$(CONFIG_TEST_BPF) += test_bpf.o |
| 37 | obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o | 37 | obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o |
| @@ -120,6 +120,7 @@ obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o | |||
| 120 | obj-$(CONFIG_NOTIFIER_ERROR_INJECTION) += notifier-error-inject.o | 120 | obj-$(CONFIG_NOTIFIER_ERROR_INJECTION) += notifier-error-inject.o |
| 121 | obj-$(CONFIG_CPU_NOTIFIER_ERROR_INJECT) += cpu-notifier-error-inject.o | 121 | obj-$(CONFIG_CPU_NOTIFIER_ERROR_INJECT) += cpu-notifier-error-inject.o |
| 122 | obj-$(CONFIG_PM_NOTIFIER_ERROR_INJECT) += pm-notifier-error-inject.o | 122 | obj-$(CONFIG_PM_NOTIFIER_ERROR_INJECT) += pm-notifier-error-inject.o |
| 123 | obj-$(CONFIG_NETDEV_NOTIFIER_ERROR_INJECT) += netdev-notifier-error-inject.o | ||
| 123 | obj-$(CONFIG_MEMORY_NOTIFIER_ERROR_INJECT) += memory-notifier-error-inject.o | 124 | obj-$(CONFIG_MEMORY_NOTIFIER_ERROR_INJECT) += memory-notifier-error-inject.o |
| 124 | obj-$(CONFIG_OF_RECONFIG_NOTIFIER_ERROR_INJECT) += \ | 125 | obj-$(CONFIG_OF_RECONFIG_NOTIFIER_ERROR_INJECT) += \ |
| 125 | of-reconfig-notifier-error-inject.o | 126 | of-reconfig-notifier-error-inject.o |
| @@ -153,7 +154,7 @@ obj-$(CONFIG_GLOB) += glob.o | |||
| 153 | obj-$(CONFIG_MPILIB) += mpi/ | 154 | obj-$(CONFIG_MPILIB) += mpi/ |
| 154 | obj-$(CONFIG_SIGNATURE) += digsig.o | 155 | obj-$(CONFIG_SIGNATURE) += digsig.o |
| 155 | 156 | ||
| 156 | obj-$(CONFIG_CLZ_TAB) += clz_tab.o | 157 | lib-$(CONFIG_CLZ_TAB) += clz_tab.o |
| 157 | 158 | ||
| 158 | obj-$(CONFIG_DDR) += jedec_ddr_data.o | 159 | obj-$(CONFIG_DDR) += jedec_ddr_data.o |
| 159 | 160 | ||
| @@ -164,6 +165,7 @@ obj-$(CONFIG_GENERIC_NET_UTILS) += net_utils.o | |||
| 164 | 165 | ||
| 165 | obj-$(CONFIG_SG_SPLIT) += sg_split.o | 166 | obj-$(CONFIG_SG_SPLIT) += sg_split.o |
| 166 | obj-$(CONFIG_STMP_DEVICE) += stmp_device.o | 167 | obj-$(CONFIG_STMP_DEVICE) += stmp_device.o |
| 168 | obj-$(CONFIG_IRQ_POLL) += irq_poll.o | ||
| 167 | 169 | ||
| 168 | libfdt_files = fdt.o fdt_ro.o fdt_wip.o fdt_rw.o fdt_sw.o fdt_strerror.o \ | 170 | libfdt_files = fdt.o fdt_ro.o fdt_wip.o fdt_rw.o fdt_sw.o fdt_strerror.o \ |
| 169 | fdt_empty_tree.o | 171 | fdt_empty_tree.o |
| @@ -208,3 +210,6 @@ quiet_cmd_build_OID_registry = GEN $@ | |||
| 208 | clean-files += oid_registry_data.c | 210 | clean-files += oid_registry_data.c |
| 209 | 211 | ||
| 210 | obj-$(CONFIG_UCS2_STRING) += ucs2_string.o | 212 | obj-$(CONFIG_UCS2_STRING) += ucs2_string.o |
| 213 | obj-$(CONFIG_UBSAN) += ubsan.o | ||
| 214 | |||
| 215 | UBSAN_SANITIZE_ubsan.o := n | ||
diff --git a/lib/atomic64_test.c b/lib/atomic64_test.c index 83c33a5bcffb..d62de8bf022d 100644 --- a/lib/atomic64_test.c +++ b/lib/atomic64_test.c | |||
| @@ -16,6 +16,10 @@ | |||
| 16 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
| 17 | #include <linux/atomic.h> | 17 | #include <linux/atomic.h> |
| 18 | 18 | ||
| 19 | #ifdef CONFIG_X86 | ||
| 20 | #include <asm/processor.h> /* for boot_cpu_has below */ | ||
| 21 | #endif | ||
| 22 | |||
| 19 | #define TEST(bit, op, c_op, val) \ | 23 | #define TEST(bit, op, c_op, val) \ |
| 20 | do { \ | 24 | do { \ |
| 21 | atomic##bit##_set(&v, v0); \ | 25 | atomic##bit##_set(&v, v0); \ |
| @@ -27,6 +31,65 @@ do { \ | |||
| 27 | (unsigned long long)r); \ | 31 | (unsigned long long)r); \ |
| 28 | } while (0) | 32 | } while (0) |
| 29 | 33 | ||
| 34 | /* | ||
| 35 | * Test for a atomic operation family, | ||
| 36 | * @test should be a macro accepting parameters (bit, op, ...) | ||
| 37 | */ | ||
| 38 | |||
| 39 | #define FAMILY_TEST(test, bit, op, args...) \ | ||
| 40 | do { \ | ||
| 41 | test(bit, op, ##args); \ | ||
| 42 | test(bit, op##_acquire, ##args); \ | ||
| 43 | test(bit, op##_release, ##args); \ | ||
| 44 | test(bit, op##_relaxed, ##args); \ | ||
| 45 | } while (0) | ||
| 46 | |||
| 47 | #define TEST_RETURN(bit, op, c_op, val) \ | ||
| 48 | do { \ | ||
| 49 | atomic##bit##_set(&v, v0); \ | ||
| 50 | r = v0; \ | ||
| 51 | r c_op val; \ | ||
| 52 | BUG_ON(atomic##bit##_##op(val, &v) != r); \ | ||
| 53 | BUG_ON(atomic##bit##_read(&v) != r); \ | ||
| 54 | } while (0) | ||
| 55 | |||
| 56 | #define RETURN_FAMILY_TEST(bit, op, c_op, val) \ | ||
| 57 | do { \ | ||
| 58 | FAMILY_TEST(TEST_RETURN, bit, op, c_op, val); \ | ||
| 59 | } while (0) | ||
| 60 | |||
| 61 | #define TEST_ARGS(bit, op, init, ret, expect, args...) \ | ||
| 62 | do { \ | ||
| 63 | atomic##bit##_set(&v, init); \ | ||
| 64 | BUG_ON(atomic##bit##_##op(&v, ##args) != ret); \ | ||
| 65 | BUG_ON(atomic##bit##_read(&v) != expect); \ | ||
| 66 | } while (0) | ||
| 67 | |||
| 68 | #define XCHG_FAMILY_TEST(bit, init, new) \ | ||
| 69 | do { \ | ||
| 70 | FAMILY_TEST(TEST_ARGS, bit, xchg, init, init, new, new); \ | ||
| 71 | } while (0) | ||
| 72 | |||
| 73 | #define CMPXCHG_FAMILY_TEST(bit, init, new, wrong) \ | ||
| 74 | do { \ | ||
| 75 | FAMILY_TEST(TEST_ARGS, bit, cmpxchg, \ | ||
| 76 | init, init, new, init, new); \ | ||
| 77 | FAMILY_TEST(TEST_ARGS, bit, cmpxchg, \ | ||
| 78 | init, init, init, wrong, new); \ | ||
| 79 | } while (0) | ||
| 80 | |||
| 81 | #define INC_RETURN_FAMILY_TEST(bit, i) \ | ||
| 82 | do { \ | ||
| 83 | FAMILY_TEST(TEST_ARGS, bit, inc_return, \ | ||
| 84 | i, (i) + one, (i) + one); \ | ||
| 85 | } while (0) | ||
| 86 | |||
| 87 | #define DEC_RETURN_FAMILY_TEST(bit, i) \ | ||
| 88 | do { \ | ||
| 89 | FAMILY_TEST(TEST_ARGS, bit, dec_return, \ | ||
| 90 | i, (i) - one, (i) - one); \ | ||
| 91 | } while (0) | ||
| 92 | |||
| 30 | static __init void test_atomic(void) | 93 | static __init void test_atomic(void) |
| 31 | { | 94 | { |
| 32 | int v0 = 0xaaa31337; | 95 | int v0 = 0xaaa31337; |
| @@ -45,6 +108,18 @@ static __init void test_atomic(void) | |||
| 45 | TEST(, and, &=, v1); | 108 | TEST(, and, &=, v1); |
| 46 | TEST(, xor, ^=, v1); | 109 | TEST(, xor, ^=, v1); |
| 47 | TEST(, andnot, &= ~, v1); | 110 | TEST(, andnot, &= ~, v1); |
| 111 | |||
| 112 | RETURN_FAMILY_TEST(, add_return, +=, onestwos); | ||
| 113 | RETURN_FAMILY_TEST(, add_return, +=, -one); | ||
| 114 | RETURN_FAMILY_TEST(, sub_return, -=, onestwos); | ||
| 115 | RETURN_FAMILY_TEST(, sub_return, -=, -one); | ||
| 116 | |||
| 117 | INC_RETURN_FAMILY_TEST(, v0); | ||
| 118 | DEC_RETURN_FAMILY_TEST(, v0); | ||
| 119 | |||
| 120 | XCHG_FAMILY_TEST(, v0, v1); | ||
| 121 | CMPXCHG_FAMILY_TEST(, v0, v1, onestwos); | ||
| 122 | |||
| 48 | } | 123 | } |
| 49 | 124 | ||
| 50 | #define INIT(c) do { atomic64_set(&v, c); r = c; } while (0) | 125 | #define INIT(c) do { atomic64_set(&v, c); r = c; } while (0) |
| @@ -74,25 +149,10 @@ static __init void test_atomic64(void) | |||
| 74 | TEST(64, xor, ^=, v1); | 149 | TEST(64, xor, ^=, v1); |
| 75 | TEST(64, andnot, &= ~, v1); | 150 | TEST(64, andnot, &= ~, v1); |
| 76 | 151 | ||
| 77 | INIT(v0); | 152 | RETURN_FAMILY_TEST(64, add_return, +=, onestwos); |
| 78 | r += onestwos; | 153 | RETURN_FAMILY_TEST(64, add_return, +=, -one); |
| 79 | BUG_ON(atomic64_add_return(onestwos, &v) != r); | 154 | RETURN_FAMILY_TEST(64, sub_return, -=, onestwos); |
| 80 | BUG_ON(v.counter != r); | 155 | RETURN_FAMILY_TEST(64, sub_return, -=, -one); |
| 81 | |||
| 82 | INIT(v0); | ||
| 83 | r += -one; | ||
| 84 | BUG_ON(atomic64_add_return(-one, &v) != r); | ||
| 85 | BUG_ON(v.counter != r); | ||
| 86 | |||
| 87 | INIT(v0); | ||
| 88 | r -= onestwos; | ||
| 89 | BUG_ON(atomic64_sub_return(onestwos, &v) != r); | ||
| 90 | BUG_ON(v.counter != r); | ||
| 91 | |||
| 92 | INIT(v0); | ||
| 93 | r -= -one; | ||
| 94 | BUG_ON(atomic64_sub_return(-one, &v) != r); | ||
| 95 | BUG_ON(v.counter != r); | ||
| 96 | 156 | ||
| 97 | INIT(v0); | 157 | INIT(v0); |
| 98 | atomic64_inc(&v); | 158 | atomic64_inc(&v); |
| @@ -100,33 +160,15 @@ static __init void test_atomic64(void) | |||
| 100 | BUG_ON(v.counter != r); | 160 | BUG_ON(v.counter != r); |
| 101 | 161 | ||
| 102 | INIT(v0); | 162 | INIT(v0); |
| 103 | r += one; | ||
| 104 | BUG_ON(atomic64_inc_return(&v) != r); | ||
| 105 | BUG_ON(v.counter != r); | ||
| 106 | |||
| 107 | INIT(v0); | ||
| 108 | atomic64_dec(&v); | 163 | atomic64_dec(&v); |
| 109 | r -= one; | 164 | r -= one; |
| 110 | BUG_ON(v.counter != r); | 165 | BUG_ON(v.counter != r); |
| 111 | 166 | ||
| 112 | INIT(v0); | 167 | INC_RETURN_FAMILY_TEST(64, v0); |
| 113 | r -= one; | 168 | DEC_RETURN_FAMILY_TEST(64, v0); |
| 114 | BUG_ON(atomic64_dec_return(&v) != r); | ||
| 115 | BUG_ON(v.counter != r); | ||
| 116 | 169 | ||
| 117 | INIT(v0); | 170 | XCHG_FAMILY_TEST(64, v0, v1); |
| 118 | BUG_ON(atomic64_xchg(&v, v1) != v0); | 171 | CMPXCHG_FAMILY_TEST(64, v0, v1, v2); |
| 119 | r = v1; | ||
| 120 | BUG_ON(v.counter != r); | ||
| 121 | |||
| 122 | INIT(v0); | ||
| 123 | BUG_ON(atomic64_cmpxchg(&v, v0, v1) != v0); | ||
| 124 | r = v1; | ||
| 125 | BUG_ON(v.counter != r); | ||
| 126 | |||
| 127 | INIT(v0); | ||
| 128 | BUG_ON(atomic64_cmpxchg(&v, v2, v1) != v0); | ||
| 129 | BUG_ON(v.counter != r); | ||
| 130 | 172 | ||
| 131 | INIT(v0); | 173 | INIT(v0); |
| 132 | BUG_ON(atomic64_add_unless(&v, one, v0)); | 174 | BUG_ON(atomic64_add_unless(&v, one, v0)); |
diff --git a/lib/btree.c b/lib/btree.c index 4264871ea1a0..f93a945274af 100644 --- a/lib/btree.c +++ b/lib/btree.c | |||
| @@ -5,7 +5,7 @@ | |||
| 5 | * | 5 | * |
| 6 | * Copyright (c) 2007-2008 Joern Engel <joern@logfs.org> | 6 | * Copyright (c) 2007-2008 Joern Engel <joern@logfs.org> |
| 7 | * Bits and pieces stolen from Peter Zijlstra's code, which is | 7 | * Bits and pieces stolen from Peter Zijlstra's code, which is |
| 8 | * Copyright 2007, Red Hat Inc. Peter Zijlstra <pzijlstr@redhat.com> | 8 | * Copyright 2007, Red Hat Inc. Peter Zijlstra |
| 9 | * GPLv2 | 9 | * GPLv2 |
| 10 | * | 10 | * |
| 11 | * see http://programming.kicks-ass.net/kernel-patches/vma_lookup/btree.patch | 11 | * see http://programming.kicks-ass.net/kernel-patches/vma_lookup/btree.patch |
diff --git a/lib/div64.c b/lib/div64.c index 62a698a432bc..7f345259c32f 100644 --- a/lib/div64.c +++ b/lib/div64.c | |||
| @@ -13,7 +13,8 @@ | |||
| 13 | * | 13 | * |
| 14 | * Code generated for this function might be very inefficient | 14 | * Code generated for this function might be very inefficient |
| 15 | * for some CPUs. __div64_32() can be overridden by linking arch-specific | 15 | * for some CPUs. __div64_32() can be overridden by linking arch-specific |
| 16 | * assembly versions such as arch/ppc/lib/div64.S and arch/sh/lib/div64.S. | 16 | * assembly versions such as arch/ppc/lib/div64.S and arch/sh/lib/div64.S |
| 17 | * or by defining a preprocessor macro in arch/include/asm/div64.h. | ||
| 17 | */ | 18 | */ |
| 18 | 19 | ||
| 19 | #include <linux/export.h> | 20 | #include <linux/export.h> |
| @@ -23,6 +24,7 @@ | |||
| 23 | /* Not needed on 64bit architectures */ | 24 | /* Not needed on 64bit architectures */ |
| 24 | #if BITS_PER_LONG == 32 | 25 | #if BITS_PER_LONG == 32 |
| 25 | 26 | ||
| 27 | #ifndef __div64_32 | ||
| 26 | uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base) | 28 | uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base) |
| 27 | { | 29 | { |
| 28 | uint64_t rem = *n; | 30 | uint64_t rem = *n; |
| @@ -55,8 +57,8 @@ uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base) | |||
| 55 | *n = res; | 57 | *n = res; |
| 56 | return rem; | 58 | return rem; |
| 57 | } | 59 | } |
| 58 | |||
| 59 | EXPORT_SYMBOL(__div64_32); | 60 | EXPORT_SYMBOL(__div64_32); |
| 61 | #endif | ||
| 60 | 62 | ||
| 61 | #ifndef div_s64_rem | 63 | #ifndef div_s64_rem |
| 62 | s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder) | 64 | s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder) |
diff --git a/lib/dma-debug.c b/lib/dma-debug.c index 8855f019ebe8..4a1515f4b452 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c | |||
| @@ -1181,7 +1181,7 @@ static inline bool overlap(void *addr, unsigned long len, void *start, void *end | |||
| 1181 | 1181 | ||
| 1182 | static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len) | 1182 | static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len) |
| 1183 | { | 1183 | { |
| 1184 | if (overlap(addr, len, _text, _etext) || | 1184 | if (overlap(addr, len, _stext, _etext) || |
| 1185 | overlap(addr, len, __start_rodata, __end_rodata)) | 1185 | overlap(addr, len, __start_rodata, __end_rodata)) |
| 1186 | err_printk(dev, NULL, "DMA-API: device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len); | 1186 | err_printk(dev, NULL, "DMA-API: device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len); |
| 1187 | } | 1187 | } |
| @@ -1464,7 +1464,7 @@ void debug_dma_alloc_coherent(struct device *dev, size_t size, | |||
| 1464 | entry->type = dma_debug_coherent; | 1464 | entry->type = dma_debug_coherent; |
| 1465 | entry->dev = dev; | 1465 | entry->dev = dev; |
| 1466 | entry->pfn = page_to_pfn(virt_to_page(virt)); | 1466 | entry->pfn = page_to_pfn(virt_to_page(virt)); |
| 1467 | entry->offset = (size_t) virt & PAGE_MASK; | 1467 | entry->offset = (size_t) virt & ~PAGE_MASK; |
| 1468 | entry->size = size; | 1468 | entry->size = size; |
| 1469 | entry->dev_addr = dma_addr; | 1469 | entry->dev_addr = dma_addr; |
| 1470 | entry->direction = DMA_BIDIRECTIONAL; | 1470 | entry->direction = DMA_BIDIRECTIONAL; |
| @@ -1480,7 +1480,7 @@ void debug_dma_free_coherent(struct device *dev, size_t size, | |||
| 1480 | .type = dma_debug_coherent, | 1480 | .type = dma_debug_coherent, |
| 1481 | .dev = dev, | 1481 | .dev = dev, |
| 1482 | .pfn = page_to_pfn(virt_to_page(virt)), | 1482 | .pfn = page_to_pfn(virt_to_page(virt)), |
| 1483 | .offset = (size_t) virt & PAGE_MASK, | 1483 | .offset = (size_t) virt & ~PAGE_MASK, |
| 1484 | .dev_addr = addr, | 1484 | .dev_addr = addr, |
| 1485 | .size = size, | 1485 | .size = size, |
| 1486 | .direction = DMA_BIDIRECTIONAL, | 1486 | .direction = DMA_BIDIRECTIONAL, |
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c index e3952e9c8ec0..fe42b6ec3f0c 100644 --- a/lib/dynamic_debug.c +++ b/lib/dynamic_debug.c | |||
| @@ -657,14 +657,9 @@ static ssize_t ddebug_proc_write(struct file *file, const char __user *ubuf, | |||
| 657 | pr_warn("expected <%d bytes into control\n", USER_BUF_PAGE); | 657 | pr_warn("expected <%d bytes into control\n", USER_BUF_PAGE); |
| 658 | return -E2BIG; | 658 | return -E2BIG; |
| 659 | } | 659 | } |
| 660 | tmpbuf = kmalloc(len + 1, GFP_KERNEL); | 660 | tmpbuf = memdup_user_nul(ubuf, len); |
| 661 | if (!tmpbuf) | 661 | if (IS_ERR(tmpbuf)) |
| 662 | return -ENOMEM; | 662 | return PTR_ERR(tmpbuf); |
| 663 | if (copy_from_user(tmpbuf, ubuf, len)) { | ||
| 664 | kfree(tmpbuf); | ||
| 665 | return -EFAULT; | ||
| 666 | } | ||
| 667 | tmpbuf[len] = '\0'; | ||
| 668 | vpr_info("read %d bytes from userspace\n", (int)len); | 663 | vpr_info("read %d bytes from userspace\n", (int)len); |
| 669 | 664 | ||
| 670 | ret = ddebug_exec_queries(tmpbuf, NULL); | 665 | ret = ddebug_exec_queries(tmpbuf, NULL); |
diff --git a/lib/genalloc.c b/lib/genalloc.c index 116a166b096f..0a1139644d32 100644 --- a/lib/genalloc.c +++ b/lib/genalloc.c | |||
| @@ -270,6 +270,25 @@ EXPORT_SYMBOL(gen_pool_destroy); | |||
| 270 | */ | 270 | */ |
| 271 | unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size) | 271 | unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size) |
| 272 | { | 272 | { |
| 273 | return gen_pool_alloc_algo(pool, size, pool->algo, pool->data); | ||
| 274 | } | ||
| 275 | EXPORT_SYMBOL(gen_pool_alloc); | ||
| 276 | |||
| 277 | /** | ||
| 278 | * gen_pool_alloc_algo - allocate special memory from the pool | ||
| 279 | * @pool: pool to allocate from | ||
| 280 | * @size: number of bytes to allocate from the pool | ||
| 281 | * @algo: algorithm passed from caller | ||
| 282 | * @data: data passed to algorithm | ||
| 283 | * | ||
| 284 | * Allocate the requested number of bytes from the specified pool. | ||
| 285 | * Uses the pool allocation function (with first-fit algorithm by default). | ||
| 286 | * Can not be used in NMI handler on architectures without | ||
| 287 | * NMI-safe cmpxchg implementation. | ||
| 288 | */ | ||
| 289 | unsigned long gen_pool_alloc_algo(struct gen_pool *pool, size_t size, | ||
| 290 | genpool_algo_t algo, void *data) | ||
| 291 | { | ||
| 273 | struct gen_pool_chunk *chunk; | 292 | struct gen_pool_chunk *chunk; |
| 274 | unsigned long addr = 0; | 293 | unsigned long addr = 0; |
| 275 | int order = pool->min_alloc_order; | 294 | int order = pool->min_alloc_order; |
| @@ -290,8 +309,8 @@ unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size) | |||
| 290 | 309 | ||
| 291 | end_bit = chunk_size(chunk) >> order; | 310 | end_bit = chunk_size(chunk) >> order; |
| 292 | retry: | 311 | retry: |
| 293 | start_bit = pool->algo(chunk->bits, end_bit, start_bit, nbits, | 312 | start_bit = algo(chunk->bits, end_bit, start_bit, |
| 294 | pool->data); | 313 | nbits, data, pool); |
| 295 | if (start_bit >= end_bit) | 314 | if (start_bit >= end_bit) |
| 296 | continue; | 315 | continue; |
| 297 | remain = bitmap_set_ll(chunk->bits, start_bit, nbits); | 316 | remain = bitmap_set_ll(chunk->bits, start_bit, nbits); |
| @@ -310,7 +329,7 @@ retry: | |||
| 310 | rcu_read_unlock(); | 329 | rcu_read_unlock(); |
| 311 | return addr; | 330 | return addr; |
| 312 | } | 331 | } |
| 313 | EXPORT_SYMBOL(gen_pool_alloc); | 332 | EXPORT_SYMBOL(gen_pool_alloc_algo); |
| 314 | 333 | ||
| 315 | /** | 334 | /** |
| 316 | * gen_pool_dma_alloc - allocate special memory from the pool for DMA usage | 335 | * gen_pool_dma_alloc - allocate special memory from the pool for DMA usage |
| @@ -501,15 +520,74 @@ EXPORT_SYMBOL(gen_pool_set_algo); | |||
| 501 | * @start: The bitnumber to start searching at | 520 | * @start: The bitnumber to start searching at |
| 502 | * @nr: The number of zeroed bits we're looking for | 521 | * @nr: The number of zeroed bits we're looking for |
| 503 | * @data: additional data - unused | 522 | * @data: additional data - unused |
| 523 | * @pool: pool to find the fit region memory from | ||
| 504 | */ | 524 | */ |
| 505 | unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size, | 525 | unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size, |
| 506 | unsigned long start, unsigned int nr, void *data) | 526 | unsigned long start, unsigned int nr, void *data, |
| 527 | struct gen_pool *pool) | ||
| 507 | { | 528 | { |
| 508 | return bitmap_find_next_zero_area(map, size, start, nr, 0); | 529 | return bitmap_find_next_zero_area(map, size, start, nr, 0); |
| 509 | } | 530 | } |
| 510 | EXPORT_SYMBOL(gen_pool_first_fit); | 531 | EXPORT_SYMBOL(gen_pool_first_fit); |
| 511 | 532 | ||
| 512 | /** | 533 | /** |
| 534 | * gen_pool_first_fit_align - find the first available region | ||
| 535 | * of memory matching the size requirement (alignment constraint) | ||
| 536 | * @map: The address to base the search on | ||
| 537 | * @size: The bitmap size in bits | ||
| 538 | * @start: The bitnumber to start searching at | ||
| 539 | * @nr: The number of zeroed bits we're looking for | ||
| 540 | * @data: data for alignment | ||
| 541 | * @pool: pool to get order from | ||
| 542 | */ | ||
| 543 | unsigned long gen_pool_first_fit_align(unsigned long *map, unsigned long size, | ||
| 544 | unsigned long start, unsigned int nr, void *data, | ||
| 545 | struct gen_pool *pool) | ||
| 546 | { | ||
| 547 | struct genpool_data_align *alignment; | ||
| 548 | unsigned long align_mask; | ||
| 549 | int order; | ||
| 550 | |||
| 551 | alignment = data; | ||
| 552 | order = pool->min_alloc_order; | ||
| 553 | align_mask = ((alignment->align + (1UL << order) - 1) >> order) - 1; | ||
| 554 | return bitmap_find_next_zero_area(map, size, start, nr, align_mask); | ||
| 555 | } | ||
| 556 | EXPORT_SYMBOL(gen_pool_first_fit_align); | ||
| 557 | |||
| 558 | /** | ||
| 559 | * gen_pool_fixed_alloc - reserve a specific region | ||
| 560 | * @map: The address to base the search on | ||
| 561 | * @size: The bitmap size in bits | ||
| 562 | * @start: The bitnumber to start searching at | ||
| 563 | * @nr: The number of zeroed bits we're looking for | ||
| 564 | * @data: data for alignment | ||
| 565 | * @pool: pool to get order from | ||
| 566 | */ | ||
| 567 | unsigned long gen_pool_fixed_alloc(unsigned long *map, unsigned long size, | ||
| 568 | unsigned long start, unsigned int nr, void *data, | ||
| 569 | struct gen_pool *pool) | ||
| 570 | { | ||
| 571 | struct genpool_data_fixed *fixed_data; | ||
| 572 | int order; | ||
| 573 | unsigned long offset_bit; | ||
| 574 | unsigned long start_bit; | ||
| 575 | |||
| 576 | fixed_data = data; | ||
| 577 | order = pool->min_alloc_order; | ||
| 578 | offset_bit = fixed_data->offset >> order; | ||
| 579 | if (WARN_ON(fixed_data->offset & ((1UL << order) - 1))) | ||
| 580 | return size; | ||
| 581 | |||
| 582 | start_bit = bitmap_find_next_zero_area(map, size, | ||
| 583 | start + offset_bit, nr, 0); | ||
| 584 | if (start_bit != offset_bit) | ||
| 585 | start_bit = size; | ||
| 586 | return start_bit; | ||
| 587 | } | ||
| 588 | EXPORT_SYMBOL(gen_pool_fixed_alloc); | ||
| 589 | |||
| 590 | /** | ||
| 513 | * gen_pool_first_fit_order_align - find the first available region | 591 | * gen_pool_first_fit_order_align - find the first available region |
| 514 | * of memory matching the size requirement. The region will be aligned | 592 | * of memory matching the size requirement. The region will be aligned |
| 515 | * to the order of the size specified. | 593 | * to the order of the size specified. |
| @@ -518,10 +596,11 @@ EXPORT_SYMBOL(gen_pool_first_fit); | |||
| 518 | * @start: The bitnumber to start searching at | 596 | * @start: The bitnumber to start searching at |
| 519 | * @nr: The number of zeroed bits we're looking for | 597 | * @nr: The number of zeroed bits we're looking for |
| 520 | * @data: additional data - unused | 598 | * @data: additional data - unused |
| 599 | * @pool: pool to find the fit region memory from | ||
| 521 | */ | 600 | */ |
| 522 | unsigned long gen_pool_first_fit_order_align(unsigned long *map, | 601 | unsigned long gen_pool_first_fit_order_align(unsigned long *map, |
| 523 | unsigned long size, unsigned long start, | 602 | unsigned long size, unsigned long start, |
| 524 | unsigned int nr, void *data) | 603 | unsigned int nr, void *data, struct gen_pool *pool) |
| 525 | { | 604 | { |
| 526 | unsigned long align_mask = roundup_pow_of_two(nr) - 1; | 605 | unsigned long align_mask = roundup_pow_of_two(nr) - 1; |
| 527 | 606 | ||
| @@ -537,12 +616,14 @@ EXPORT_SYMBOL(gen_pool_first_fit_order_align); | |||
| 537 | * @start: The bitnumber to start searching at | 616 | * @start: The bitnumber to start searching at |
| 538 | * @nr: The number of zeroed bits we're looking for | 617 | * @nr: The number of zeroed bits we're looking for |
| 539 | * @data: additional data - unused | 618 | * @data: additional data - unused |
| 619 | * @pool: pool to find the fit region memory from | ||
| 540 | * | 620 | * |
| 541 | * Iterate over the bitmap to find the smallest free region | 621 | * Iterate over the bitmap to find the smallest free region |
| 542 | * which we can allocate the memory. | 622 | * which we can allocate the memory. |
| 543 | */ | 623 | */ |
| 544 | unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size, | 624 | unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size, |
| 545 | unsigned long start, unsigned int nr, void *data) | 625 | unsigned long start, unsigned int nr, void *data, |
| 626 | struct gen_pool *pool) | ||
| 546 | { | 627 | { |
| 547 | unsigned long start_bit = size; | 628 | unsigned long start_bit = size; |
| 548 | unsigned long len = size + 1; | 629 | unsigned long len = size + 1; |
diff --git a/lib/iomap_copy.c b/lib/iomap_copy.c index 4527e751b5e0..b8f1d6cbb200 100644 --- a/lib/iomap_copy.c +++ b/lib/iomap_copy.c | |||
| @@ -42,6 +42,27 @@ void __attribute__((weak)) __iowrite32_copy(void __iomem *to, | |||
| 42 | EXPORT_SYMBOL_GPL(__iowrite32_copy); | 42 | EXPORT_SYMBOL_GPL(__iowrite32_copy); |
| 43 | 43 | ||
| 44 | /** | 44 | /** |
| 45 | * __ioread32_copy - copy data from MMIO space, in 32-bit units | ||
| 46 | * @to: destination (must be 32-bit aligned) | ||
| 47 | * @from: source, in MMIO space (must be 32-bit aligned) | ||
| 48 | * @count: number of 32-bit quantities to copy | ||
| 49 | * | ||
| 50 | * Copy data from MMIO space to kernel space, in units of 32 bits at a | ||
| 51 | * time. Order of access is not guaranteed, nor is a memory barrier | ||
| 52 | * performed afterwards. | ||
| 53 | */ | ||
| 54 | void __ioread32_copy(void *to, const void __iomem *from, size_t count) | ||
| 55 | { | ||
| 56 | u32 *dst = to; | ||
| 57 | const u32 __iomem *src = from; | ||
| 58 | const u32 __iomem *end = src + count; | ||
| 59 | |||
| 60 | while (src < end) | ||
| 61 | *dst++ = __raw_readl(src++); | ||
| 62 | } | ||
| 63 | EXPORT_SYMBOL_GPL(__ioread32_copy); | ||
| 64 | |||
| 65 | /** | ||
| 45 | * __iowrite64_copy - copy data to MMIO space, in 64-bit or 32-bit units | 66 | * __iowrite64_copy - copy data to MMIO space, in 64-bit or 32-bit units |
| 46 | * @to: destination, in MMIO space (must be 64-bit aligned) | 67 | * @to: destination, in MMIO space (must be 64-bit aligned) |
| 47 | * @from: source (must be 64-bit aligned) | 68 | * @from: source (must be 64-bit aligned) |
diff --git a/lib/iov_iter.c b/lib/iov_iter.c index 75232ad0a5e7..5fecddc32b1b 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c | |||
| @@ -369,7 +369,7 @@ static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t | |||
| 369 | kunmap_atomic(from); | 369 | kunmap_atomic(from); |
| 370 | } | 370 | } |
| 371 | 371 | ||
| 372 | static void memcpy_to_page(struct page *page, size_t offset, char *from, size_t len) | 372 | static void memcpy_to_page(struct page *page, size_t offset, const char *from, size_t len) |
| 373 | { | 373 | { |
| 374 | char *to = kmap_atomic(page); | 374 | char *to = kmap_atomic(page); |
| 375 | memcpy(to + offset, from, len); | 375 | memcpy(to + offset, from, len); |
| @@ -383,9 +383,9 @@ static void memzero_page(struct page *page, size_t offset, size_t len) | |||
| 383 | kunmap_atomic(addr); | 383 | kunmap_atomic(addr); |
| 384 | } | 384 | } |
| 385 | 385 | ||
| 386 | size_t copy_to_iter(void *addr, size_t bytes, struct iov_iter *i) | 386 | size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) |
| 387 | { | 387 | { |
| 388 | char *from = addr; | 388 | const char *from = addr; |
| 389 | if (unlikely(bytes > i->count)) | 389 | if (unlikely(bytes > i->count)) |
| 390 | bytes = i->count; | 390 | bytes = i->count; |
| 391 | 391 | ||
| @@ -704,10 +704,10 @@ size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, | |||
| 704 | } | 704 | } |
| 705 | EXPORT_SYMBOL(csum_and_copy_from_iter); | 705 | EXPORT_SYMBOL(csum_and_copy_from_iter); |
| 706 | 706 | ||
| 707 | size_t csum_and_copy_to_iter(void *addr, size_t bytes, __wsum *csum, | 707 | size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum, |
| 708 | struct iov_iter *i) | 708 | struct iov_iter *i) |
| 709 | { | 709 | { |
| 710 | char *from = addr; | 710 | const char *from = addr; |
| 711 | __wsum sum, next; | 711 | __wsum sum, next; |
| 712 | size_t off = 0; | 712 | size_t off = 0; |
| 713 | if (unlikely(bytes > i->count)) | 713 | if (unlikely(bytes > i->count)) |
| @@ -849,3 +849,4 @@ int import_single_range(int rw, void __user *buf, size_t len, | |||
| 849 | iov_iter_init(i, rw, iov, 1, len); | 849 | iov_iter_init(i, rw, iov, 1, len); |
| 850 | return 0; | 850 | return 0; |
| 851 | } | 851 | } |
| 852 | EXPORT_SYMBOL(import_single_range); | ||
diff --git a/lib/irq_poll.c b/lib/irq_poll.c new file mode 100644 index 000000000000..836f7db4e548 --- /dev/null +++ b/lib/irq_poll.c | |||
| @@ -0,0 +1,222 @@ | |||
| 1 | /* | ||
| 2 | * Functions related to interrupt-poll handling in the block layer. This | ||
| 3 | * is similar to NAPI for network devices. | ||
| 4 | */ | ||
| 5 | #include <linux/kernel.h> | ||
| 6 | #include <linux/module.h> | ||
| 7 | #include <linux/init.h> | ||
| 8 | #include <linux/bio.h> | ||
| 9 | #include <linux/interrupt.h> | ||
| 10 | #include <linux/cpu.h> | ||
| 11 | #include <linux/irq_poll.h> | ||
| 12 | #include <linux/delay.h> | ||
| 13 | |||
| 14 | static unsigned int irq_poll_budget __read_mostly = 256; | ||
| 15 | |||
| 16 | static DEFINE_PER_CPU(struct list_head, blk_cpu_iopoll); | ||
| 17 | |||
| 18 | /** | ||
| 19 | * irq_poll_sched - Schedule a run of the iopoll handler | ||
| 20 | * @iop: The parent iopoll structure | ||
| 21 | * | ||
| 22 | * Description: | ||
| 23 | * Add this irq_poll structure to the pending poll list and trigger the | ||
| 24 | * raise of the blk iopoll softirq. | ||
| 25 | **/ | ||
| 26 | void irq_poll_sched(struct irq_poll *iop) | ||
| 27 | { | ||
| 28 | unsigned long flags; | ||
| 29 | |||
| 30 | if (test_bit(IRQ_POLL_F_DISABLE, &iop->state)) | ||
| 31 | return; | ||
| 32 | if (test_and_set_bit(IRQ_POLL_F_SCHED, &iop->state)) | ||
| 33 | return; | ||
| 34 | |||
| 35 | local_irq_save(flags); | ||
| 36 | list_add_tail(&iop->list, this_cpu_ptr(&blk_cpu_iopoll)); | ||
| 37 | __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ); | ||
| 38 | local_irq_restore(flags); | ||
| 39 | } | ||
| 40 | EXPORT_SYMBOL(irq_poll_sched); | ||
| 41 | |||
| 42 | /** | ||
| 43 | * __irq_poll_complete - Mark this @iop as un-polled again | ||
| 44 | * @iop: The parent iopoll structure | ||
| 45 | * | ||
| 46 | * Description: | ||
| 47 | * See irq_poll_complete(). This function must be called with interrupts | ||
| 48 | * disabled. | ||
| 49 | **/ | ||
| 50 | static void __irq_poll_complete(struct irq_poll *iop) | ||
| 51 | { | ||
| 52 | list_del(&iop->list); | ||
| 53 | smp_mb__before_atomic(); | ||
| 54 | clear_bit_unlock(IRQ_POLL_F_SCHED, &iop->state); | ||
| 55 | } | ||
| 56 | |||
| 57 | /** | ||
| 58 | * irq_poll_complete - Mark this @iop as un-polled again | ||
| 59 | * @iop: The parent iopoll structure | ||
| 60 | * | ||
| 61 | * Description: | ||
| 62 | * If a driver consumes less than the assigned budget in its run of the | ||
| 63 | * iopoll handler, it'll end the polled mode by calling this function. The | ||
| 64 | * iopoll handler will not be invoked again before irq_poll_sched() | ||
| 65 | * is called. | ||
| 66 | **/ | ||
| 67 | void irq_poll_complete(struct irq_poll *iop) | ||
| 68 | { | ||
| 69 | unsigned long flags; | ||
| 70 | |||
| 71 | local_irq_save(flags); | ||
| 72 | __irq_poll_complete(iop); | ||
| 73 | local_irq_restore(flags); | ||
| 74 | } | ||
| 75 | EXPORT_SYMBOL(irq_poll_complete); | ||
| 76 | |||
| 77 | static void irq_poll_softirq(struct softirq_action *h) | ||
| 78 | { | ||
| 79 | struct list_head *list = this_cpu_ptr(&blk_cpu_iopoll); | ||
| 80 | int rearm = 0, budget = irq_poll_budget; | ||
| 81 | unsigned long start_time = jiffies; | ||
| 82 | |||
| 83 | local_irq_disable(); | ||
| 84 | |||
| 85 | while (!list_empty(list)) { | ||
| 86 | struct irq_poll *iop; | ||
| 87 | int work, weight; | ||
| 88 | |||
| 89 | /* | ||
| 90 | * If softirq window is exhausted then punt. | ||
| 91 | */ | ||
| 92 | if (budget <= 0 || time_after(jiffies, start_time)) { | ||
| 93 | rearm = 1; | ||
| 94 | break; | ||
| 95 | } | ||
| 96 | |||
| 97 | local_irq_enable(); | ||
| 98 | |||
| 99 | /* Even though interrupts have been re-enabled, this | ||
| 100 | * access is safe because interrupts can only add new | ||
| 101 | * entries to the tail of this list, and only ->poll() | ||
| 102 | * calls can remove this head entry from the list. | ||
| 103 | */ | ||
| 104 | iop = list_entry(list->next, struct irq_poll, list); | ||
| 105 | |||
| 106 | weight = iop->weight; | ||
| 107 | work = 0; | ||
| 108 | if (test_bit(IRQ_POLL_F_SCHED, &iop->state)) | ||
| 109 | work = iop->poll(iop, weight); | ||
| 110 | |||
| 111 | budget -= work; | ||
| 112 | |||
| 113 | local_irq_disable(); | ||
| 114 | |||
| 115 | /* | ||
| 116 | * Drivers must not modify the iopoll state, if they | ||
| 117 | * consume their assigned weight (or more, some drivers can't | ||
| 118 | * easily just stop processing, they have to complete an | ||
| 119 | * entire mask of commands).In such cases this code | ||
| 120 | * still "owns" the iopoll instance and therefore can | ||
| 121 | * move the instance around on the list at-will. | ||
| 122 | */ | ||
| 123 | if (work >= weight) { | ||
| 124 | if (test_bit(IRQ_POLL_F_DISABLE, &iop->state)) | ||
| 125 | __irq_poll_complete(iop); | ||
| 126 | else | ||
| 127 | list_move_tail(&iop->list, list); | ||
| 128 | } | ||
| 129 | } | ||
| 130 | |||
| 131 | if (rearm) | ||
| 132 | __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ); | ||
| 133 | |||
| 134 | local_irq_enable(); | ||
| 135 | } | ||
| 136 | |||
| 137 | /** | ||
| 138 | * irq_poll_disable - Disable iopoll on this @iop | ||
| 139 | * @iop: The parent iopoll structure | ||
| 140 | * | ||
| 141 | * Description: | ||
| 142 | * Disable io polling and wait for any pending callbacks to have completed. | ||
| 143 | **/ | ||
| 144 | void irq_poll_disable(struct irq_poll *iop) | ||
| 145 | { | ||
| 146 | set_bit(IRQ_POLL_F_DISABLE, &iop->state); | ||
| 147 | while (test_and_set_bit(IRQ_POLL_F_SCHED, &iop->state)) | ||
| 148 | msleep(1); | ||
| 149 | clear_bit(IRQ_POLL_F_DISABLE, &iop->state); | ||
| 150 | } | ||
| 151 | EXPORT_SYMBOL(irq_poll_disable); | ||
| 152 | |||
| 153 | /** | ||
| 154 | * irq_poll_enable - Enable iopoll on this @iop | ||
| 155 | * @iop: The parent iopoll structure | ||
| 156 | * | ||
| 157 | * Description: | ||
| 158 | * Enable iopoll on this @iop. Note that the handler run will not be | ||
| 159 | * scheduled, it will only mark it as active. | ||
| 160 | **/ | ||
| 161 | void irq_poll_enable(struct irq_poll *iop) | ||
| 162 | { | ||
| 163 | BUG_ON(!test_bit(IRQ_POLL_F_SCHED, &iop->state)); | ||
| 164 | smp_mb__before_atomic(); | ||
| 165 | clear_bit_unlock(IRQ_POLL_F_SCHED, &iop->state); | ||
| 166 | } | ||
| 167 | EXPORT_SYMBOL(irq_poll_enable); | ||
| 168 | |||
| 169 | /** | ||
| 170 | * irq_poll_init - Initialize this @iop | ||
| 171 | * @iop: The parent iopoll structure | ||
| 172 | * @weight: The default weight (or command completion budget) | ||
| 173 | * @poll_fn: The handler to invoke | ||
| 174 | * | ||
| 175 | * Description: | ||
| 176 | * Initialize and enable this irq_poll structure. | ||
| 177 | **/ | ||
| 178 | void irq_poll_init(struct irq_poll *iop, int weight, irq_poll_fn *poll_fn) | ||
| 179 | { | ||
| 180 | memset(iop, 0, sizeof(*iop)); | ||
| 181 | INIT_LIST_HEAD(&iop->list); | ||
| 182 | iop->weight = weight; | ||
| 183 | iop->poll = poll_fn; | ||
| 184 | } | ||
| 185 | EXPORT_SYMBOL(irq_poll_init); | ||
| 186 | |||
| 187 | static int irq_poll_cpu_notify(struct notifier_block *self, | ||
| 188 | unsigned long action, void *hcpu) | ||
| 189 | { | ||
| 190 | /* | ||
| 191 | * If a CPU goes away, splice its entries to the current CPU | ||
| 192 | * and trigger a run of the softirq | ||
| 193 | */ | ||
| 194 | if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { | ||
| 195 | int cpu = (unsigned long) hcpu; | ||
| 196 | |||
| 197 | local_irq_disable(); | ||
| 198 | list_splice_init(&per_cpu(blk_cpu_iopoll, cpu), | ||
| 199 | this_cpu_ptr(&blk_cpu_iopoll)); | ||
| 200 | __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ); | ||
| 201 | local_irq_enable(); | ||
| 202 | } | ||
| 203 | |||
| 204 | return NOTIFY_OK; | ||
| 205 | } | ||
| 206 | |||
| 207 | static struct notifier_block irq_poll_cpu_notifier = { | ||
| 208 | .notifier_call = irq_poll_cpu_notify, | ||
| 209 | }; | ||
| 210 | |||
| 211 | static __init int irq_poll_setup(void) | ||
| 212 | { | ||
| 213 | int i; | ||
| 214 | |||
| 215 | for_each_possible_cpu(i) | ||
| 216 | INIT_LIST_HEAD(&per_cpu(blk_cpu_iopoll, i)); | ||
| 217 | |||
| 218 | open_softirq(IRQ_POLL_SOFTIRQ, irq_poll_softirq); | ||
| 219 | register_hotcpu_notifier(&irq_poll_cpu_notifier); | ||
| 220 | return 0; | ||
| 221 | } | ||
| 222 | subsys_initcall(irq_poll_setup); | ||
diff --git a/lib/kasprintf.c b/lib/kasprintf.c index f194e6e593e1..7f6c506a4942 100644 --- a/lib/kasprintf.c +++ b/lib/kasprintf.c | |||
| @@ -13,19 +13,21 @@ | |||
| 13 | /* Simplified asprintf. */ | 13 | /* Simplified asprintf. */ |
| 14 | char *kvasprintf(gfp_t gfp, const char *fmt, va_list ap) | 14 | char *kvasprintf(gfp_t gfp, const char *fmt, va_list ap) |
| 15 | { | 15 | { |
| 16 | unsigned int len; | 16 | unsigned int first, second; |
| 17 | char *p; | 17 | char *p; |
| 18 | va_list aq; | 18 | va_list aq; |
| 19 | 19 | ||
| 20 | va_copy(aq, ap); | 20 | va_copy(aq, ap); |
| 21 | len = vsnprintf(NULL, 0, fmt, aq); | 21 | first = vsnprintf(NULL, 0, fmt, aq); |
| 22 | va_end(aq); | 22 | va_end(aq); |
| 23 | 23 | ||
| 24 | p = kmalloc_track_caller(len+1, gfp); | 24 | p = kmalloc_track_caller(first+1, gfp); |
| 25 | if (!p) | 25 | if (!p) |
| 26 | return NULL; | 26 | return NULL; |
| 27 | 27 | ||
| 28 | vsnprintf(p, len+1, fmt, ap); | 28 | second = vsnprintf(p, first+1, fmt, ap); |
| 29 | WARN(first != second, "different return values (%u and %u) from vsnprintf(\"%s\", ...)", | ||
| 30 | first, second, fmt); | ||
| 29 | 31 | ||
| 30 | return p; | 32 | return p; |
| 31 | } | 33 | } |
diff --git a/lib/libcrc32c.c b/lib/libcrc32c.c index 6a08ce7d6adc..74a54b7f2562 100644 --- a/lib/libcrc32c.c +++ b/lib/libcrc32c.c | |||
| @@ -36,6 +36,7 @@ | |||
| 36 | #include <linux/init.h> | 36 | #include <linux/init.h> |
| 37 | #include <linux/kernel.h> | 37 | #include <linux/kernel.h> |
| 38 | #include <linux/module.h> | 38 | #include <linux/module.h> |
| 39 | #include <linux/crc32c.h> | ||
| 39 | 40 | ||
| 40 | static struct crypto_shash *tfm; | 41 | static struct crypto_shash *tfm; |
| 41 | 42 | ||
| @@ -74,3 +75,4 @@ module_exit(libcrc32c_mod_fini); | |||
| 74 | MODULE_AUTHOR("Clay Haapala <chaapala@cisco.com>"); | 75 | MODULE_AUTHOR("Clay Haapala <chaapala@cisco.com>"); |
| 75 | MODULE_DESCRIPTION("CRC32c (Castagnoli) calculations"); | 76 | MODULE_DESCRIPTION("CRC32c (Castagnoli) calculations"); |
| 76 | MODULE_LICENSE("GPL"); | 77 | MODULE_LICENSE("GPL"); |
| 78 | MODULE_SOFTDEP("pre: crc32c"); | ||
diff --git a/lib/list_debug.c b/lib/list_debug.c index c24c2f7e296f..3345a089ef7b 100644 --- a/lib/list_debug.c +++ b/lib/list_debug.c | |||
| @@ -12,6 +12,13 @@ | |||
| 12 | #include <linux/kernel.h> | 12 | #include <linux/kernel.h> |
| 13 | #include <linux/rculist.h> | 13 | #include <linux/rculist.h> |
| 14 | 14 | ||
| 15 | static struct list_head force_poison; | ||
| 16 | void list_force_poison(struct list_head *entry) | ||
| 17 | { | ||
| 18 | entry->next = &force_poison; | ||
| 19 | entry->prev = &force_poison; | ||
| 20 | } | ||
| 21 | |||
| 15 | /* | 22 | /* |
| 16 | * Insert a new entry between two known consecutive entries. | 23 | * Insert a new entry between two known consecutive entries. |
| 17 | * | 24 | * |
| @@ -23,6 +30,8 @@ void __list_add(struct list_head *new, | |||
| 23 | struct list_head *prev, | 30 | struct list_head *prev, |
| 24 | struct list_head *next) | 31 | struct list_head *next) |
| 25 | { | 32 | { |
| 33 | WARN(new->next == &force_poison || new->prev == &force_poison, | ||
| 34 | "list_add attempted on force-poisoned entry\n"); | ||
| 26 | WARN(next->prev != prev, | 35 | WARN(next->prev != prev, |
| 27 | "list_add corruption. next->prev should be " | 36 | "list_add corruption. next->prev should be " |
| 28 | "prev (%p), but was %p. (next=%p).\n", | 37 | "prev (%p), but was %p. (next=%p).\n", |
| @@ -37,7 +46,7 @@ void __list_add(struct list_head *new, | |||
| 37 | next->prev = new; | 46 | next->prev = new; |
| 38 | new->next = next; | 47 | new->next = next; |
| 39 | new->prev = prev; | 48 | new->prev = prev; |
| 40 | prev->next = new; | 49 | WRITE_ONCE(prev->next, new); |
| 41 | } | 50 | } |
| 42 | EXPORT_SYMBOL(__list_add); | 51 | EXPORT_SYMBOL(__list_add); |
| 43 | 52 | ||
diff --git a/lib/lru_cache.c b/lib/lru_cache.c index 028f5d996eef..28ba40b99337 100644 --- a/lib/lru_cache.c +++ b/lib/lru_cache.c | |||
| @@ -238,7 +238,7 @@ void lc_reset(struct lru_cache *lc) | |||
| 238 | * @seq: the seq_file to print into | 238 | * @seq: the seq_file to print into |
| 239 | * @lc: the lru cache to print statistics of | 239 | * @lc: the lru cache to print statistics of |
| 240 | */ | 240 | */ |
| 241 | size_t lc_seq_printf_stats(struct seq_file *seq, struct lru_cache *lc) | 241 | void lc_seq_printf_stats(struct seq_file *seq, struct lru_cache *lc) |
| 242 | { | 242 | { |
| 243 | /* NOTE: | 243 | /* NOTE: |
| 244 | * total calls to lc_get are | 244 | * total calls to lc_get are |
| @@ -250,8 +250,6 @@ size_t lc_seq_printf_stats(struct seq_file *seq, struct lru_cache *lc) | |||
| 250 | seq_printf(seq, "\t%s: used:%u/%u hits:%lu misses:%lu starving:%lu locked:%lu changed:%lu\n", | 250 | seq_printf(seq, "\t%s: used:%u/%u hits:%lu misses:%lu starving:%lu locked:%lu changed:%lu\n", |
| 251 | lc->name, lc->used, lc->nr_elements, | 251 | lc->name, lc->used, lc->nr_elements, |
| 252 | lc->hits, lc->misses, lc->starving, lc->locked, lc->changed); | 252 | lc->hits, lc->misses, lc->starving, lc->locked, lc->changed); |
| 253 | |||
| 254 | return 0; | ||
| 255 | } | 253 | } |
| 256 | 254 | ||
| 257 | static struct hlist_head *lc_hash_slot(struct lru_cache *lc, unsigned int enr) | 255 | static struct hlist_head *lc_hash_slot(struct lru_cache *lc, unsigned int enr) |
diff --git a/lib/mpi/mpicoder.c b/lib/mpi/mpicoder.c index 3db76b8c1115..ec533a6c77b5 100644 --- a/lib/mpi/mpicoder.c +++ b/lib/mpi/mpicoder.c | |||
| @@ -135,7 +135,9 @@ EXPORT_SYMBOL_GPL(mpi_read_from_buffer); | |||
| 135 | * @buf: bufer to which the output will be written to. Needs to be at | 135 | * @buf: bufer to which the output will be written to. Needs to be at |
| 136 | * leaset mpi_get_size(a) long. | 136 | * leaset mpi_get_size(a) long. |
| 137 | * @buf_len: size of the buf. | 137 | * @buf_len: size of the buf. |
| 138 | * @nbytes: receives the actual length of the data written. | 138 | * @nbytes: receives the actual length of the data written on success and |
| 139 | * the data to-be-written on -EOVERFLOW in case buf_len was too | ||
| 140 | * small. | ||
| 139 | * @sign: if not NULL, it will be set to the sign of a. | 141 | * @sign: if not NULL, it will be set to the sign of a. |
| 140 | * | 142 | * |
| 141 | * Return: 0 on success or error code in case of error | 143 | * Return: 0 on success or error code in case of error |
| @@ -148,7 +150,7 @@ int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes, | |||
| 148 | unsigned int n = mpi_get_size(a); | 150 | unsigned int n = mpi_get_size(a); |
| 149 | int i, lzeros = 0; | 151 | int i, lzeros = 0; |
| 150 | 152 | ||
| 151 | if (buf_len < n || !buf || !nbytes) | 153 | if (!buf || !nbytes) |
| 152 | return -EINVAL; | 154 | return -EINVAL; |
| 153 | 155 | ||
| 154 | if (sign) | 156 | if (sign) |
| @@ -163,6 +165,11 @@ int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes, | |||
| 163 | break; | 165 | break; |
| 164 | } | 166 | } |
| 165 | 167 | ||
| 168 | if (buf_len < n - lzeros) { | ||
| 169 | *nbytes = n - lzeros; | ||
| 170 | return -EOVERFLOW; | ||
| 171 | } | ||
| 172 | |||
| 166 | p = buf; | 173 | p = buf; |
| 167 | *nbytes = n - lzeros; | 174 | *nbytes = n - lzeros; |
| 168 | 175 | ||
| @@ -332,7 +339,8 @@ EXPORT_SYMBOL_GPL(mpi_set_buffer); | |||
| 332 | * @nbytes: in/out param - it has the be set to the maximum number of | 339 | * @nbytes: in/out param - it has the be set to the maximum number of |
| 333 | * bytes that can be written to sgl. This has to be at least | 340 | * bytes that can be written to sgl. This has to be at least |
| 334 | * the size of the integer a. On return it receives the actual | 341 | * the size of the integer a. On return it receives the actual |
| 335 | * length of the data written. | 342 | * length of the data written on success or the data that would |
| 343 | * be written if buffer was too small. | ||
| 336 | * @sign: if not NULL, it will be set to the sign of a. | 344 | * @sign: if not NULL, it will be set to the sign of a. |
| 337 | * | 345 | * |
| 338 | * Return: 0 on success or error code in case of error | 346 | * Return: 0 on success or error code in case of error |
| @@ -345,7 +353,7 @@ int mpi_write_to_sgl(MPI a, struct scatterlist *sgl, unsigned *nbytes, | |||
| 345 | unsigned int n = mpi_get_size(a); | 353 | unsigned int n = mpi_get_size(a); |
| 346 | int i, x, y = 0, lzeros = 0, buf_len; | 354 | int i, x, y = 0, lzeros = 0, buf_len; |
| 347 | 355 | ||
| 348 | if (!nbytes || *nbytes < n) | 356 | if (!nbytes) |
| 349 | return -EINVAL; | 357 | return -EINVAL; |
| 350 | 358 | ||
| 351 | if (sign) | 359 | if (sign) |
| @@ -360,6 +368,11 @@ int mpi_write_to_sgl(MPI a, struct scatterlist *sgl, unsigned *nbytes, | |||
| 360 | break; | 368 | break; |
| 361 | } | 369 | } |
| 362 | 370 | ||
| 371 | if (*nbytes < n - lzeros) { | ||
| 372 | *nbytes = n - lzeros; | ||
| 373 | return -EOVERFLOW; | ||
| 374 | } | ||
| 375 | |||
| 363 | *nbytes = n - lzeros; | 376 | *nbytes = n - lzeros; |
| 364 | buf_len = sgl->length; | 377 | buf_len = sgl->length; |
| 365 | p2 = sg_virt(sgl); | 378 | p2 = sg_virt(sgl); |
diff --git a/lib/netdev-notifier-error-inject.c b/lib/netdev-notifier-error-inject.c new file mode 100644 index 000000000000..13e9c62e216f --- /dev/null +++ b/lib/netdev-notifier-error-inject.c | |||
| @@ -0,0 +1,55 @@ | |||
| 1 | #include <linux/kernel.h> | ||
| 2 | #include <linux/module.h> | ||
| 3 | #include <linux/netdevice.h> | ||
| 4 | |||
| 5 | #include "notifier-error-inject.h" | ||
| 6 | |||
| 7 | static int priority; | ||
| 8 | module_param(priority, int, 0); | ||
| 9 | MODULE_PARM_DESC(priority, "specify netdevice notifier priority"); | ||
| 10 | |||
| 11 | static struct notifier_err_inject netdev_notifier_err_inject = { | ||
| 12 | .actions = { | ||
| 13 | { NOTIFIER_ERR_INJECT_ACTION(NETDEV_REGISTER) }, | ||
| 14 | { NOTIFIER_ERR_INJECT_ACTION(NETDEV_CHANGEMTU) }, | ||
| 15 | { NOTIFIER_ERR_INJECT_ACTION(NETDEV_CHANGENAME) }, | ||
| 16 | { NOTIFIER_ERR_INJECT_ACTION(NETDEV_PRE_UP) }, | ||
| 17 | { NOTIFIER_ERR_INJECT_ACTION(NETDEV_PRE_TYPE_CHANGE) }, | ||
| 18 | { NOTIFIER_ERR_INJECT_ACTION(NETDEV_POST_INIT) }, | ||
| 19 | { NOTIFIER_ERR_INJECT_ACTION(NETDEV_PRECHANGEMTU) }, | ||
| 20 | { NOTIFIER_ERR_INJECT_ACTION(NETDEV_PRECHANGEUPPER) }, | ||
| 21 | { NOTIFIER_ERR_INJECT_ACTION(NETDEV_CHANGEUPPER) }, | ||
| 22 | {} | ||
| 23 | } | ||
| 24 | }; | ||
| 25 | |||
| 26 | static struct dentry *dir; | ||
| 27 | |||
| 28 | static int netdev_err_inject_init(void) | ||
| 29 | { | ||
| 30 | int err; | ||
| 31 | |||
| 32 | dir = notifier_err_inject_init("netdev", notifier_err_inject_dir, | ||
| 33 | &netdev_notifier_err_inject, priority); | ||
| 34 | if (IS_ERR(dir)) | ||
| 35 | return PTR_ERR(dir); | ||
| 36 | |||
| 37 | err = register_netdevice_notifier(&netdev_notifier_err_inject.nb); | ||
| 38 | if (err) | ||
| 39 | debugfs_remove_recursive(dir); | ||
| 40 | |||
| 41 | return err; | ||
| 42 | } | ||
| 43 | |||
| 44 | static void netdev_err_inject_exit(void) | ||
| 45 | { | ||
| 46 | unregister_netdevice_notifier(&netdev_notifier_err_inject.nb); | ||
| 47 | debugfs_remove_recursive(dir); | ||
| 48 | } | ||
| 49 | |||
| 50 | module_init(netdev_err_inject_init); | ||
| 51 | module_exit(netdev_err_inject_exit); | ||
| 52 | |||
| 53 | MODULE_DESCRIPTION("Netdevice notifier error injection module"); | ||
| 54 | MODULE_LICENSE("GPL"); | ||
| 55 | MODULE_AUTHOR("Nikolay Aleksandrov <razor@blackwall.org>"); | ||
diff --git a/lib/proportions.c b/lib/proportions.c index 6f724298f67a..efa54f259ea9 100644 --- a/lib/proportions.c +++ b/lib/proportions.c | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * Floating proportions | 2 | * Floating proportions |
| 3 | * | 3 | * |
| 4 | * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> | 4 | * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra |
| 5 | * | 5 | * |
| 6 | * Description: | 6 | * Description: |
| 7 | * | 7 | * |
diff --git a/lib/raid6/altivec.uc b/lib/raid6/altivec.uc index bec27fce7501..682aae8a1fef 100644 --- a/lib/raid6/altivec.uc +++ b/lib/raid6/altivec.uc | |||
| @@ -101,6 +101,7 @@ static void raid6_altivec$#_gen_syndrome(int disks, size_t bytes, void **ptrs) | |||
| 101 | 101 | ||
| 102 | raid6_altivec$#_gen_syndrome_real(disks, bytes, ptrs); | 102 | raid6_altivec$#_gen_syndrome_real(disks, bytes, ptrs); |
| 103 | 103 | ||
| 104 | disable_kernel_altivec(); | ||
| 104 | preempt_enable(); | 105 | preempt_enable(); |
| 105 | } | 106 | } |
| 106 | 107 | ||
diff --git a/lib/ratelimit.c b/lib/ratelimit.c index 40e03ea2a967..2c5de86460c5 100644 --- a/lib/ratelimit.c +++ b/lib/ratelimit.c | |||
| @@ -49,7 +49,7 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func) | |||
| 49 | if (rs->missed) | 49 | if (rs->missed) |
| 50 | printk(KERN_WARNING "%s: %d callbacks suppressed\n", | 50 | printk(KERN_WARNING "%s: %d callbacks suppressed\n", |
| 51 | func, rs->missed); | 51 | func, rs->missed); |
| 52 | rs->begin = 0; | 52 | rs->begin = jiffies; |
| 53 | rs->printed = 0; | 53 | rs->printed = 0; |
| 54 | rs->missed = 0; | 54 | rs->missed = 0; |
| 55 | } | 55 | } |
diff --git a/lib/rhashtable.c b/lib/rhashtable.c index a54ff8949f91..cc808707d1cf 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c | |||
| @@ -231,9 +231,6 @@ static int rhashtable_rehash_attach(struct rhashtable *ht, | |||
| 231 | */ | 231 | */ |
| 232 | rcu_assign_pointer(old_tbl->future_tbl, new_tbl); | 232 | rcu_assign_pointer(old_tbl->future_tbl, new_tbl); |
| 233 | 233 | ||
| 234 | /* Ensure the new table is visible to readers. */ | ||
| 235 | smp_wmb(); | ||
| 236 | |||
| 237 | spin_unlock_bh(old_tbl->locks); | 234 | spin_unlock_bh(old_tbl->locks); |
| 238 | 235 | ||
| 239 | return 0; | 236 | return 0; |
| @@ -389,33 +386,31 @@ static bool rhashtable_check_elasticity(struct rhashtable *ht, | |||
| 389 | return false; | 386 | return false; |
| 390 | } | 387 | } |
| 391 | 388 | ||
| 392 | int rhashtable_insert_rehash(struct rhashtable *ht) | 389 | int rhashtable_insert_rehash(struct rhashtable *ht, |
| 390 | struct bucket_table *tbl) | ||
| 393 | { | 391 | { |
| 394 | struct bucket_table *old_tbl; | 392 | struct bucket_table *old_tbl; |
| 395 | struct bucket_table *new_tbl; | 393 | struct bucket_table *new_tbl; |
| 396 | struct bucket_table *tbl; | ||
| 397 | unsigned int size; | 394 | unsigned int size; |
| 398 | int err; | 395 | int err; |
| 399 | 396 | ||
| 400 | old_tbl = rht_dereference_rcu(ht->tbl, ht); | 397 | old_tbl = rht_dereference_rcu(ht->tbl, ht); |
| 401 | tbl = rhashtable_last_table(ht, old_tbl); | ||
| 402 | 398 | ||
| 403 | size = tbl->size; | 399 | size = tbl->size; |
| 404 | 400 | ||
| 401 | err = -EBUSY; | ||
| 402 | |||
| 405 | if (rht_grow_above_75(ht, tbl)) | 403 | if (rht_grow_above_75(ht, tbl)) |
| 406 | size *= 2; | 404 | size *= 2; |
| 407 | /* Do not schedule more than one rehash */ | 405 | /* Do not schedule more than one rehash */ |
| 408 | else if (old_tbl != tbl) | 406 | else if (old_tbl != tbl) |
| 409 | return -EBUSY; | 407 | goto fail; |
| 408 | |||
| 409 | err = -ENOMEM; | ||
| 410 | 410 | ||
| 411 | new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC); | 411 | new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC); |
| 412 | if (new_tbl == NULL) { | 412 | if (new_tbl == NULL) |
| 413 | /* Schedule async resize/rehash to try allocation | 413 | goto fail; |
| 414 | * non-atomic context. | ||
| 415 | */ | ||
| 416 | schedule_work(&ht->run_work); | ||
| 417 | return -ENOMEM; | ||
| 418 | } | ||
| 419 | 414 | ||
| 420 | err = rhashtable_rehash_attach(ht, tbl, new_tbl); | 415 | err = rhashtable_rehash_attach(ht, tbl, new_tbl); |
| 421 | if (err) { | 416 | if (err) { |
| @@ -426,12 +421,24 @@ int rhashtable_insert_rehash(struct rhashtable *ht) | |||
| 426 | schedule_work(&ht->run_work); | 421 | schedule_work(&ht->run_work); |
| 427 | 422 | ||
| 428 | return err; | 423 | return err; |
| 424 | |||
| 425 | fail: | ||
| 426 | /* Do not fail the insert if someone else did a rehash. */ | ||
| 427 | if (likely(rcu_dereference_raw(tbl->future_tbl))) | ||
| 428 | return 0; | ||
| 429 | |||
| 430 | /* Schedule async rehash to retry allocation in process context. */ | ||
| 431 | if (err == -ENOMEM) | ||
| 432 | schedule_work(&ht->run_work); | ||
| 433 | |||
| 434 | return err; | ||
| 429 | } | 435 | } |
| 430 | EXPORT_SYMBOL_GPL(rhashtable_insert_rehash); | 436 | EXPORT_SYMBOL_GPL(rhashtable_insert_rehash); |
| 431 | 437 | ||
| 432 | int rhashtable_insert_slow(struct rhashtable *ht, const void *key, | 438 | struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht, |
| 433 | struct rhash_head *obj, | 439 | const void *key, |
| 434 | struct bucket_table *tbl) | 440 | struct rhash_head *obj, |
| 441 | struct bucket_table *tbl) | ||
| 435 | { | 442 | { |
| 436 | struct rhash_head *head; | 443 | struct rhash_head *head; |
| 437 | unsigned int hash; | 444 | unsigned int hash; |
| @@ -467,7 +474,12 @@ int rhashtable_insert_slow(struct rhashtable *ht, const void *key, | |||
| 467 | exit: | 474 | exit: |
| 468 | spin_unlock(rht_bucket_lock(tbl, hash)); | 475 | spin_unlock(rht_bucket_lock(tbl, hash)); |
| 469 | 476 | ||
| 470 | return err; | 477 | if (err == 0) |
| 478 | return NULL; | ||
| 479 | else if (err == -EAGAIN) | ||
| 480 | return tbl; | ||
| 481 | else | ||
| 482 | return ERR_PTR(err); | ||
| 471 | } | 483 | } |
| 472 | EXPORT_SYMBOL_GPL(rhashtable_insert_slow); | 484 | EXPORT_SYMBOL_GPL(rhashtable_insert_slow); |
| 473 | 485 | ||
| @@ -503,10 +515,11 @@ int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter) | |||
| 503 | if (!iter->walker) | 515 | if (!iter->walker) |
| 504 | return -ENOMEM; | 516 | return -ENOMEM; |
| 505 | 517 | ||
| 506 | mutex_lock(&ht->mutex); | 518 | spin_lock(&ht->lock); |
| 507 | iter->walker->tbl = rht_dereference(ht->tbl, ht); | 519 | iter->walker->tbl = |
| 520 | rcu_dereference_protected(ht->tbl, lockdep_is_held(&ht->lock)); | ||
| 508 | list_add(&iter->walker->list, &iter->walker->tbl->walkers); | 521 | list_add(&iter->walker->list, &iter->walker->tbl->walkers); |
| 509 | mutex_unlock(&ht->mutex); | 522 | spin_unlock(&ht->lock); |
| 510 | 523 | ||
| 511 | return 0; | 524 | return 0; |
| 512 | } | 525 | } |
| @@ -520,10 +533,10 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_init); | |||
| 520 | */ | 533 | */ |
| 521 | void rhashtable_walk_exit(struct rhashtable_iter *iter) | 534 | void rhashtable_walk_exit(struct rhashtable_iter *iter) |
| 522 | { | 535 | { |
| 523 | mutex_lock(&iter->ht->mutex); | 536 | spin_lock(&iter->ht->lock); |
| 524 | if (iter->walker->tbl) | 537 | if (iter->walker->tbl) |
| 525 | list_del(&iter->walker->list); | 538 | list_del(&iter->walker->list); |
| 526 | mutex_unlock(&iter->ht->mutex); | 539 | spin_unlock(&iter->ht->lock); |
| 527 | kfree(iter->walker); | 540 | kfree(iter->walker); |
| 528 | } | 541 | } |
| 529 | EXPORT_SYMBOL_GPL(rhashtable_walk_exit); | 542 | EXPORT_SYMBOL_GPL(rhashtable_walk_exit); |
| @@ -547,14 +560,12 @@ int rhashtable_walk_start(struct rhashtable_iter *iter) | |||
| 547 | { | 560 | { |
| 548 | struct rhashtable *ht = iter->ht; | 561 | struct rhashtable *ht = iter->ht; |
| 549 | 562 | ||
| 550 | mutex_lock(&ht->mutex); | 563 | rcu_read_lock(); |
| 551 | 564 | ||
| 565 | spin_lock(&ht->lock); | ||
| 552 | if (iter->walker->tbl) | 566 | if (iter->walker->tbl) |
| 553 | list_del(&iter->walker->list); | 567 | list_del(&iter->walker->list); |
| 554 | 568 | spin_unlock(&ht->lock); | |
| 555 | rcu_read_lock(); | ||
| 556 | |||
| 557 | mutex_unlock(&ht->mutex); | ||
| 558 | 569 | ||
| 559 | if (!iter->walker->tbl) { | 570 | if (!iter->walker->tbl) { |
| 560 | iter->walker->tbl = rht_dereference_rcu(ht->tbl, ht); | 571 | iter->walker->tbl = rht_dereference_rcu(ht->tbl, ht); |
| @@ -723,9 +734,6 @@ int rhashtable_init(struct rhashtable *ht, | |||
| 723 | if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT)) | 734 | if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT)) |
| 724 | return -EINVAL; | 735 | return -EINVAL; |
| 725 | 736 | ||
| 726 | if (params->nelem_hint) | ||
| 727 | size = rounded_hashtable_size(params); | ||
| 728 | |||
| 729 | memset(ht, 0, sizeof(*ht)); | 737 | memset(ht, 0, sizeof(*ht)); |
| 730 | mutex_init(&ht->mutex); | 738 | mutex_init(&ht->mutex); |
| 731 | spin_lock_init(&ht->lock); | 739 | spin_lock_init(&ht->lock); |
| @@ -745,6 +753,9 @@ int rhashtable_init(struct rhashtable *ht, | |||
| 745 | 753 | ||
| 746 | ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE); | 754 | ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE); |
| 747 | 755 | ||
| 756 | if (params->nelem_hint) | ||
| 757 | size = rounded_hashtable_size(&ht->p); | ||
| 758 | |||
| 748 | /* The maximum (not average) chain length grows with the | 759 | /* The maximum (not average) chain length grows with the |
| 749 | * size of the hash table, at a rate of (log N)/(log log N). | 760 | * size of the hash table, at a rate of (log N)/(log log N). |
| 750 | * The value of 16 is selected so that even if the hash | 761 | * The value of 16 is selected so that even if the hash |
diff --git a/lib/seq_buf.c b/lib/seq_buf.c index 5c94e1012a91..cb18469e1f49 100644 --- a/lib/seq_buf.c +++ b/lib/seq_buf.c | |||
| @@ -306,10 +306,12 @@ int seq_buf_to_user(struct seq_buf *s, char __user *ubuf, int cnt) | |||
| 306 | if (!cnt) | 306 | if (!cnt) |
| 307 | return 0; | 307 | return 0; |
| 308 | 308 | ||
| 309 | if (s->len <= s->readpos) | 309 | len = seq_buf_used(s); |
| 310 | |||
| 311 | if (len <= s->readpos) | ||
| 310 | return -EBUSY; | 312 | return -EBUSY; |
| 311 | 313 | ||
| 312 | len = seq_buf_used(s) - s->readpos; | 314 | len -= s->readpos; |
| 313 | if (cnt > len) | 315 | if (cnt > len) |
| 314 | cnt = len; | 316 | cnt = len; |
| 315 | ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt); | 317 | ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt); |
diff --git a/lib/string_helpers.c b/lib/string_helpers.c index 5939f63d90cd..5c88204b6f1f 100644 --- a/lib/string_helpers.c +++ b/lib/string_helpers.c | |||
| @@ -43,50 +43,73 @@ void string_get_size(u64 size, u64 blk_size, const enum string_size_units units, | |||
| 43 | [STRING_UNITS_10] = 1000, | 43 | [STRING_UNITS_10] = 1000, |
| 44 | [STRING_UNITS_2] = 1024, | 44 | [STRING_UNITS_2] = 1024, |
| 45 | }; | 45 | }; |
| 46 | int i, j; | 46 | static const unsigned int rounding[] = { 500, 50, 5 }; |
| 47 | u32 remainder = 0, sf_cap, exp; | 47 | int i = 0, j; |
| 48 | u32 remainder = 0, sf_cap; | ||
| 48 | char tmp[8]; | 49 | char tmp[8]; |
| 49 | const char *unit; | 50 | const char *unit; |
| 50 | 51 | ||
| 51 | tmp[0] = '\0'; | 52 | tmp[0] = '\0'; |
| 52 | i = 0; | 53 | |
| 53 | if (!size) | 54 | if (blk_size == 0) |
| 55 | size = 0; | ||
| 56 | if (size == 0) | ||
| 54 | goto out; | 57 | goto out; |
| 55 | 58 | ||
| 56 | while (blk_size >= divisor[units]) { | 59 | /* This is Napier's algorithm. Reduce the original block size to |
| 57 | remainder = do_div(blk_size, divisor[units]); | 60 | * |
| 61 | * coefficient * divisor[units]^i | ||
| 62 | * | ||
| 63 | * we do the reduction so both coefficients are just under 32 bits so | ||
| 64 | * that multiplying them together won't overflow 64 bits and we keep | ||
| 65 | * as much precision as possible in the numbers. | ||
| 66 | * | ||
| 67 | * Note: it's safe to throw away the remainders here because all the | ||
| 68 | * precision is in the coefficients. | ||
| 69 | */ | ||
| 70 | while (blk_size >> 32) { | ||
| 71 | do_div(blk_size, divisor[units]); | ||
| 58 | i++; | 72 | i++; |
| 59 | } | 73 | } |
| 60 | 74 | ||
| 61 | exp = divisor[units] / (u32)blk_size; | 75 | while (size >> 32) { |
| 62 | /* | 76 | do_div(size, divisor[units]); |
| 63 | * size must be strictly greater than exp here to ensure that remainder | ||
| 64 | * is greater than divisor[units] coming out of the if below. | ||
| 65 | */ | ||
| 66 | if (size > exp) { | ||
| 67 | remainder = do_div(size, divisor[units]); | ||
| 68 | remainder *= blk_size; | ||
| 69 | i++; | 77 | i++; |
| 70 | } else { | ||
| 71 | remainder *= size; | ||
| 72 | } | 78 | } |
| 73 | 79 | ||
| 80 | /* now perform the actual multiplication keeping i as the sum of the | ||
| 81 | * two logarithms */ | ||
| 74 | size *= blk_size; | 82 | size *= blk_size; |
| 75 | size += remainder / divisor[units]; | ||
| 76 | remainder %= divisor[units]; | ||
| 77 | 83 | ||
| 84 | /* and logarithmically reduce it until it's just under the divisor */ | ||
| 78 | while (size >= divisor[units]) { | 85 | while (size >= divisor[units]) { |
| 79 | remainder = do_div(size, divisor[units]); | 86 | remainder = do_div(size, divisor[units]); |
| 80 | i++; | 87 | i++; |
| 81 | } | 88 | } |
| 82 | 89 | ||
| 90 | /* work out in j how many digits of precision we need from the | ||
| 91 | * remainder */ | ||
| 83 | sf_cap = size; | 92 | sf_cap = size; |
| 84 | for (j = 0; sf_cap*10 < 1000; j++) | 93 | for (j = 0; sf_cap*10 < 1000; j++) |
| 85 | sf_cap *= 10; | 94 | sf_cap *= 10; |
| 86 | 95 | ||
| 87 | if (j) { | 96 | if (units == STRING_UNITS_2) { |
| 97 | /* express the remainder as a decimal. It's currently the | ||
| 98 | * numerator of a fraction whose denominator is | ||
| 99 | * divisor[units], which is 1 << 10 for STRING_UNITS_2 */ | ||
| 88 | remainder *= 1000; | 100 | remainder *= 1000; |
| 89 | remainder /= divisor[units]; | 101 | remainder >>= 10; |
| 102 | } | ||
| 103 | |||
| 104 | /* add a 5 to the digit below what will be printed to ensure | ||
| 105 | * an arithmetical round up and carry it through to size */ | ||
| 106 | remainder += rounding[j]; | ||
| 107 | if (remainder >= 1000) { | ||
| 108 | remainder -= 1000; | ||
| 109 | size += 1; | ||
| 110 | } | ||
| 111 | |||
| 112 | if (j) { | ||
| 90 | snprintf(tmp, sizeof(tmp), ".%03u", remainder); | 113 | snprintf(tmp, sizeof(tmp), ".%03u", remainder); |
| 91 | tmp[j+1] = '\0'; | 114 | tmp[j+1] = '\0'; |
| 92 | } | 115 | } |
diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c index e0af6ff73d14..33840324138c 100644 --- a/lib/strncpy_from_user.c +++ b/lib/strncpy_from_user.c | |||
| @@ -39,7 +39,7 @@ static inline long do_strncpy_from_user(char *dst, const char __user *src, long | |||
| 39 | unsigned long c, data; | 39 | unsigned long c, data; |
| 40 | 40 | ||
| 41 | /* Fall back to byte-at-a-time if we get a page fault */ | 41 | /* Fall back to byte-at-a-time if we get a page fault */ |
| 42 | if (unlikely(__get_user(c,(unsigned long __user *)(src+res)))) | 42 | if (unlikely(unsafe_get_user(c,(unsigned long __user *)(src+res)))) |
| 43 | break; | 43 | break; |
| 44 | *(unsigned long *)(dst+res) = c; | 44 | *(unsigned long *)(dst+res) = c; |
| 45 | if (has_zero(c, &data, &constants)) { | 45 | if (has_zero(c, &data, &constants)) { |
| @@ -55,7 +55,7 @@ byte_at_a_time: | |||
| 55 | while (max) { | 55 | while (max) { |
| 56 | char c; | 56 | char c; |
| 57 | 57 | ||
| 58 | if (unlikely(__get_user(c,src+res))) | 58 | if (unlikely(unsafe_get_user(c,src+res))) |
| 59 | return -EFAULT; | 59 | return -EFAULT; |
| 60 | dst[res] = c; | 60 | dst[res] = c; |
| 61 | if (!c) | 61 | if (!c) |
| @@ -107,7 +107,12 @@ long strncpy_from_user(char *dst, const char __user *src, long count) | |||
| 107 | src_addr = (unsigned long)src; | 107 | src_addr = (unsigned long)src; |
| 108 | if (likely(src_addr < max_addr)) { | 108 | if (likely(src_addr < max_addr)) { |
| 109 | unsigned long max = max_addr - src_addr; | 109 | unsigned long max = max_addr - src_addr; |
| 110 | return do_strncpy_from_user(dst, src, count, max); | 110 | long retval; |
| 111 | |||
| 112 | user_access_begin(); | ||
| 113 | retval = do_strncpy_from_user(dst, src, count, max); | ||
| 114 | user_access_end(); | ||
| 115 | return retval; | ||
| 111 | } | 116 | } |
| 112 | return -EFAULT; | 117 | return -EFAULT; |
| 113 | } | 118 | } |
diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c index 3a5f2b366d84..2625943625d7 100644 --- a/lib/strnlen_user.c +++ b/lib/strnlen_user.c | |||
| @@ -45,7 +45,7 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count, | |||
| 45 | src -= align; | 45 | src -= align; |
| 46 | max += align; | 46 | max += align; |
| 47 | 47 | ||
| 48 | if (unlikely(__get_user(c,(unsigned long __user *)src))) | 48 | if (unlikely(unsafe_get_user(c,(unsigned long __user *)src))) |
| 49 | return 0; | 49 | return 0; |
| 50 | c |= aligned_byte_mask(align); | 50 | c |= aligned_byte_mask(align); |
| 51 | 51 | ||
| @@ -61,7 +61,7 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count, | |||
| 61 | if (unlikely(max <= sizeof(unsigned long))) | 61 | if (unlikely(max <= sizeof(unsigned long))) |
| 62 | break; | 62 | break; |
| 63 | max -= sizeof(unsigned long); | 63 | max -= sizeof(unsigned long); |
| 64 | if (unlikely(__get_user(c,(unsigned long __user *)(src+res)))) | 64 | if (unlikely(unsafe_get_user(c,(unsigned long __user *)(src+res)))) |
| 65 | return 0; | 65 | return 0; |
| 66 | } | 66 | } |
| 67 | res -= align; | 67 | res -= align; |
| @@ -112,7 +112,12 @@ long strnlen_user(const char __user *str, long count) | |||
| 112 | src_addr = (unsigned long)str; | 112 | src_addr = (unsigned long)str; |
| 113 | if (likely(src_addr < max_addr)) { | 113 | if (likely(src_addr < max_addr)) { |
| 114 | unsigned long max = max_addr - src_addr; | 114 | unsigned long max = max_addr - src_addr; |
| 115 | return do_strnlen_user(str, count, max); | 115 | long retval; |
| 116 | |||
| 117 | user_access_begin(); | ||
| 118 | retval = do_strnlen_user(str, count, max); | ||
| 119 | user_access_end(); | ||
| 120 | return retval; | ||
| 116 | } | 121 | } |
| 117 | return 0; | 122 | return 0; |
| 118 | } | 123 | } |
| @@ -141,7 +146,12 @@ long strlen_user(const char __user *str) | |||
| 141 | src_addr = (unsigned long)str; | 146 | src_addr = (unsigned long)str; |
| 142 | if (likely(src_addr < max_addr)) { | 147 | if (likely(src_addr < max_addr)) { |
| 143 | unsigned long max = max_addr - src_addr; | 148 | unsigned long max = max_addr - src_addr; |
| 144 | return do_strnlen_user(str, ~0ul, max); | 149 | long retval; |
| 150 | |||
| 151 | user_access_begin(); | ||
| 152 | retval = do_strnlen_user(str, ~0ul, max); | ||
| 153 | user_access_end(); | ||
| 154 | return retval; | ||
| 145 | } | 155 | } |
| 146 | return 0; | 156 | return 0; |
| 147 | } | 157 | } |
diff --git a/lib/test_bpf.c b/lib/test_bpf.c index 10cd1860e5b0..27a7a26b1ece 100644 --- a/lib/test_bpf.c +++ b/lib/test_bpf.c | |||
| @@ -1685,6 +1685,126 @@ static struct bpf_test tests[] = { | |||
| 1685 | { }, | 1685 | { }, |
| 1686 | { { 0, 0x35d97ef2 } } | 1686 | { { 0, 0x35d97ef2 } } |
| 1687 | }, | 1687 | }, |
| 1688 | { /* Mainly checking JIT here. */ | ||
| 1689 | "MOV REG64", | ||
| 1690 | .u.insns_int = { | ||
| 1691 | BPF_LD_IMM64(R0, 0xffffffffffffffffLL), | ||
| 1692 | BPF_MOV64_REG(R1, R0), | ||
| 1693 | BPF_MOV64_REG(R2, R1), | ||
| 1694 | BPF_MOV64_REG(R3, R2), | ||
| 1695 | BPF_MOV64_REG(R4, R3), | ||
| 1696 | BPF_MOV64_REG(R5, R4), | ||
| 1697 | BPF_MOV64_REG(R6, R5), | ||
| 1698 | BPF_MOV64_REG(R7, R6), | ||
| 1699 | BPF_MOV64_REG(R8, R7), | ||
| 1700 | BPF_MOV64_REG(R9, R8), | ||
| 1701 | BPF_ALU64_IMM(BPF_MOV, R0, 0), | ||
| 1702 | BPF_ALU64_IMM(BPF_MOV, R1, 0), | ||
| 1703 | BPF_ALU64_IMM(BPF_MOV, R2, 0), | ||
| 1704 | BPF_ALU64_IMM(BPF_MOV, R3, 0), | ||
| 1705 | BPF_ALU64_IMM(BPF_MOV, R4, 0), | ||
| 1706 | BPF_ALU64_IMM(BPF_MOV, R5, 0), | ||
| 1707 | BPF_ALU64_IMM(BPF_MOV, R6, 0), | ||
| 1708 | BPF_ALU64_IMM(BPF_MOV, R7, 0), | ||
| 1709 | BPF_ALU64_IMM(BPF_MOV, R8, 0), | ||
| 1710 | BPF_ALU64_IMM(BPF_MOV, R9, 0), | ||
| 1711 | BPF_ALU64_REG(BPF_ADD, R0, R0), | ||
| 1712 | BPF_ALU64_REG(BPF_ADD, R0, R1), | ||
| 1713 | BPF_ALU64_REG(BPF_ADD, R0, R2), | ||
| 1714 | BPF_ALU64_REG(BPF_ADD, R0, R3), | ||
| 1715 | BPF_ALU64_REG(BPF_ADD, R0, R4), | ||
| 1716 | BPF_ALU64_REG(BPF_ADD, R0, R5), | ||
| 1717 | BPF_ALU64_REG(BPF_ADD, R0, R6), | ||
| 1718 | BPF_ALU64_REG(BPF_ADD, R0, R7), | ||
| 1719 | BPF_ALU64_REG(BPF_ADD, R0, R8), | ||
| 1720 | BPF_ALU64_REG(BPF_ADD, R0, R9), | ||
| 1721 | BPF_ALU64_IMM(BPF_ADD, R0, 0xfefe), | ||
| 1722 | BPF_EXIT_INSN(), | ||
| 1723 | }, | ||
| 1724 | INTERNAL, | ||
| 1725 | { }, | ||
| 1726 | { { 0, 0xfefe } } | ||
| 1727 | }, | ||
| 1728 | { /* Mainly checking JIT here. */ | ||
| 1729 | "MOV REG32", | ||
| 1730 | .u.insns_int = { | ||
| 1731 | BPF_LD_IMM64(R0, 0xffffffffffffffffLL), | ||
| 1732 | BPF_MOV64_REG(R1, R0), | ||
| 1733 | BPF_MOV64_REG(R2, R1), | ||
| 1734 | BPF_MOV64_REG(R3, R2), | ||
| 1735 | BPF_MOV64_REG(R4, R3), | ||
| 1736 | BPF_MOV64_REG(R5, R4), | ||
| 1737 | BPF_MOV64_REG(R6, R5), | ||
| 1738 | BPF_MOV64_REG(R7, R6), | ||
| 1739 | BPF_MOV64_REG(R8, R7), | ||
| 1740 | BPF_MOV64_REG(R9, R8), | ||
| 1741 | BPF_ALU32_IMM(BPF_MOV, R0, 0), | ||
| 1742 | BPF_ALU32_IMM(BPF_MOV, R1, 0), | ||
| 1743 | BPF_ALU32_IMM(BPF_MOV, R2, 0), | ||
| 1744 | BPF_ALU32_IMM(BPF_MOV, R3, 0), | ||
| 1745 | BPF_ALU32_IMM(BPF_MOV, R4, 0), | ||
| 1746 | BPF_ALU32_IMM(BPF_MOV, R5, 0), | ||
| 1747 | BPF_ALU32_IMM(BPF_MOV, R6, 0), | ||
| 1748 | BPF_ALU32_IMM(BPF_MOV, R7, 0), | ||
| 1749 | BPF_ALU32_IMM(BPF_MOV, R8, 0), | ||
| 1750 | BPF_ALU32_IMM(BPF_MOV, R9, 0), | ||
| 1751 | BPF_ALU64_REG(BPF_ADD, R0, R0), | ||
| 1752 | BPF_ALU64_REG(BPF_ADD, R0, R1), | ||
| 1753 | BPF_ALU64_REG(BPF_ADD, R0, R2), | ||
| 1754 | BPF_ALU64_REG(BPF_ADD, R0, R3), | ||
| 1755 | BPF_ALU64_REG(BPF_ADD, R0, R4), | ||
| 1756 | BPF_ALU64_REG(BPF_ADD, R0, R5), | ||
| 1757 | BPF_ALU64_REG(BPF_ADD, R0, R6), | ||
| 1758 | BPF_ALU64_REG(BPF_ADD, R0, R7), | ||
| 1759 | BPF_ALU64_REG(BPF_ADD, R0, R8), | ||
| 1760 | BPF_ALU64_REG(BPF_ADD, R0, R9), | ||
| 1761 | BPF_ALU64_IMM(BPF_ADD, R0, 0xfefe), | ||
| 1762 | BPF_EXIT_INSN(), | ||
| 1763 | }, | ||
| 1764 | INTERNAL, | ||
| 1765 | { }, | ||
| 1766 | { { 0, 0xfefe } } | ||
| 1767 | }, | ||
| 1768 | { /* Mainly checking JIT here. */ | ||
| 1769 | "LD IMM64", | ||
| 1770 | .u.insns_int = { | ||
| 1771 | BPF_LD_IMM64(R0, 0xffffffffffffffffLL), | ||
| 1772 | BPF_MOV64_REG(R1, R0), | ||
| 1773 | BPF_MOV64_REG(R2, R1), | ||
| 1774 | BPF_MOV64_REG(R3, R2), | ||
| 1775 | BPF_MOV64_REG(R4, R3), | ||
| 1776 | BPF_MOV64_REG(R5, R4), | ||
| 1777 | BPF_MOV64_REG(R6, R5), | ||
| 1778 | BPF_MOV64_REG(R7, R6), | ||
| 1779 | BPF_MOV64_REG(R8, R7), | ||
| 1780 | BPF_MOV64_REG(R9, R8), | ||
| 1781 | BPF_LD_IMM64(R0, 0x0LL), | ||
| 1782 | BPF_LD_IMM64(R1, 0x0LL), | ||
| 1783 | BPF_LD_IMM64(R2, 0x0LL), | ||
| 1784 | BPF_LD_IMM64(R3, 0x0LL), | ||
| 1785 | BPF_LD_IMM64(R4, 0x0LL), | ||
| 1786 | BPF_LD_IMM64(R5, 0x0LL), | ||
| 1787 | BPF_LD_IMM64(R6, 0x0LL), | ||
| 1788 | BPF_LD_IMM64(R7, 0x0LL), | ||
| 1789 | BPF_LD_IMM64(R8, 0x0LL), | ||
| 1790 | BPF_LD_IMM64(R9, 0x0LL), | ||
| 1791 | BPF_ALU64_REG(BPF_ADD, R0, R0), | ||
| 1792 | BPF_ALU64_REG(BPF_ADD, R0, R1), | ||
| 1793 | BPF_ALU64_REG(BPF_ADD, R0, R2), | ||
| 1794 | BPF_ALU64_REG(BPF_ADD, R0, R3), | ||
| 1795 | BPF_ALU64_REG(BPF_ADD, R0, R4), | ||
| 1796 | BPF_ALU64_REG(BPF_ADD, R0, R5), | ||
| 1797 | BPF_ALU64_REG(BPF_ADD, R0, R6), | ||
| 1798 | BPF_ALU64_REG(BPF_ADD, R0, R7), | ||
| 1799 | BPF_ALU64_REG(BPF_ADD, R0, R8), | ||
| 1800 | BPF_ALU64_REG(BPF_ADD, R0, R9), | ||
| 1801 | BPF_ALU64_IMM(BPF_ADD, R0, 0xfefe), | ||
| 1802 | BPF_EXIT_INSN(), | ||
| 1803 | }, | ||
| 1804 | INTERNAL, | ||
| 1805 | { }, | ||
| 1806 | { { 0, 0xfefe } } | ||
| 1807 | }, | ||
| 1688 | { | 1808 | { |
| 1689 | "INT: ALU MIX", | 1809 | "INT: ALU MIX", |
| 1690 | .u.insns_int = { | 1810 | .u.insns_int = { |
diff --git a/lib/test_firmware.c b/lib/test_firmware.c index 86374c1c49a4..a3e8ec3fb1c5 100644 --- a/lib/test_firmware.c +++ b/lib/test_firmware.c | |||
| @@ -12,6 +12,7 @@ | |||
| 12 | #include <linux/init.h> | 12 | #include <linux/init.h> |
| 13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
| 14 | #include <linux/printk.h> | 14 | #include <linux/printk.h> |
| 15 | #include <linux/completion.h> | ||
| 15 | #include <linux/firmware.h> | 16 | #include <linux/firmware.h> |
| 16 | #include <linux/device.h> | 17 | #include <linux/device.h> |
| 17 | #include <linux/fs.h> | 18 | #include <linux/fs.h> |
| @@ -54,10 +55,9 @@ static ssize_t trigger_request_store(struct device *dev, | |||
| 54 | int rc; | 55 | int rc; |
| 55 | char *name; | 56 | char *name; |
| 56 | 57 | ||
| 57 | name = kzalloc(count + 1, GFP_KERNEL); | 58 | name = kstrndup(buf, count, GFP_KERNEL); |
| 58 | if (!name) | 59 | if (!name) |
| 59 | return -ENOSPC; | 60 | return -ENOSPC; |
| 60 | memcpy(name, buf, count); | ||
| 61 | 61 | ||
| 62 | pr_info("loading '%s'\n", name); | 62 | pr_info("loading '%s'\n", name); |
| 63 | 63 | ||
| @@ -65,17 +65,73 @@ static ssize_t trigger_request_store(struct device *dev, | |||
| 65 | release_firmware(test_firmware); | 65 | release_firmware(test_firmware); |
| 66 | test_firmware = NULL; | 66 | test_firmware = NULL; |
| 67 | rc = request_firmware(&test_firmware, name, dev); | 67 | rc = request_firmware(&test_firmware, name, dev); |
| 68 | if (rc) | 68 | if (rc) { |
| 69 | pr_info("load of '%s' failed: %d\n", name, rc); | 69 | pr_info("load of '%s' failed: %d\n", name, rc); |
| 70 | pr_info("loaded: %zu\n", test_firmware ? test_firmware->size : 0); | 70 | goto out; |
| 71 | } | ||
| 72 | pr_info("loaded: %zu\n", test_firmware->size); | ||
| 73 | rc = count; | ||
| 74 | |||
| 75 | out: | ||
| 71 | mutex_unlock(&test_fw_mutex); | 76 | mutex_unlock(&test_fw_mutex); |
| 72 | 77 | ||
| 73 | kfree(name); | 78 | kfree(name); |
| 74 | 79 | ||
| 75 | return count; | 80 | return rc; |
| 76 | } | 81 | } |
| 77 | static DEVICE_ATTR_WO(trigger_request); | 82 | static DEVICE_ATTR_WO(trigger_request); |
| 78 | 83 | ||
| 84 | static DECLARE_COMPLETION(async_fw_done); | ||
| 85 | |||
| 86 | static void trigger_async_request_cb(const struct firmware *fw, void *context) | ||
| 87 | { | ||
| 88 | test_firmware = fw; | ||
| 89 | complete(&async_fw_done); | ||
| 90 | } | ||
| 91 | |||
| 92 | static ssize_t trigger_async_request_store(struct device *dev, | ||
| 93 | struct device_attribute *attr, | ||
| 94 | const char *buf, size_t count) | ||
| 95 | { | ||
| 96 | int rc; | ||
| 97 | char *name; | ||
| 98 | |||
| 99 | name = kstrndup(buf, count, GFP_KERNEL); | ||
| 100 | if (!name) | ||
| 101 | return -ENOSPC; | ||
| 102 | |||
| 103 | pr_info("loading '%s'\n", name); | ||
| 104 | |||
| 105 | mutex_lock(&test_fw_mutex); | ||
| 106 | release_firmware(test_firmware); | ||
| 107 | test_firmware = NULL; | ||
| 108 | rc = request_firmware_nowait(THIS_MODULE, 1, name, dev, GFP_KERNEL, | ||
| 109 | NULL, trigger_async_request_cb); | ||
| 110 | if (rc) { | ||
| 111 | pr_info("async load of '%s' failed: %d\n", name, rc); | ||
| 112 | kfree(name); | ||
| 113 | goto out; | ||
| 114 | } | ||
| 115 | /* Free 'name' ASAP, to test for race conditions */ | ||
| 116 | kfree(name); | ||
| 117 | |||
| 118 | wait_for_completion(&async_fw_done); | ||
| 119 | |||
| 120 | if (test_firmware) { | ||
| 121 | pr_info("loaded: %zu\n", test_firmware->size); | ||
| 122 | rc = count; | ||
| 123 | } else { | ||
| 124 | pr_err("failed to async load firmware\n"); | ||
| 125 | rc = -ENODEV; | ||
| 126 | } | ||
| 127 | |||
| 128 | out: | ||
| 129 | mutex_unlock(&test_fw_mutex); | ||
| 130 | |||
| 131 | return rc; | ||
| 132 | } | ||
| 133 | static DEVICE_ATTR_WO(trigger_async_request); | ||
| 134 | |||
| 79 | static int __init test_firmware_init(void) | 135 | static int __init test_firmware_init(void) |
| 80 | { | 136 | { |
| 81 | int rc; | 137 | int rc; |
| @@ -92,9 +148,20 @@ static int __init test_firmware_init(void) | |||
| 92 | goto dereg; | 148 | goto dereg; |
| 93 | } | 149 | } |
| 94 | 150 | ||
| 151 | rc = device_create_file(test_fw_misc_device.this_device, | ||
| 152 | &dev_attr_trigger_async_request); | ||
| 153 | if (rc) { | ||
| 154 | pr_err("could not create async sysfs interface: %d\n", rc); | ||
| 155 | goto remove_file; | ||
| 156 | } | ||
| 157 | |||
| 95 | pr_warn("interface ready\n"); | 158 | pr_warn("interface ready\n"); |
| 96 | 159 | ||
| 97 | return 0; | 160 | return 0; |
| 161 | |||
| 162 | remove_file: | ||
| 163 | device_remove_file(test_fw_misc_device.this_device, | ||
| 164 | &dev_attr_trigger_async_request); | ||
| 98 | dereg: | 165 | dereg: |
| 99 | misc_deregister(&test_fw_misc_device); | 166 | misc_deregister(&test_fw_misc_device); |
| 100 | return rc; | 167 | return rc; |
| @@ -106,6 +173,8 @@ static void __exit test_firmware_exit(void) | |||
| 106 | { | 173 | { |
| 107 | release_firmware(test_firmware); | 174 | release_firmware(test_firmware); |
| 108 | device_remove_file(test_fw_misc_device.this_device, | 175 | device_remove_file(test_fw_misc_device.this_device, |
| 176 | &dev_attr_trigger_async_request); | ||
| 177 | device_remove_file(test_fw_misc_device.this_device, | ||
| 109 | &dev_attr_trigger_request); | 178 | &dev_attr_trigger_request); |
| 110 | misc_deregister(&test_fw_misc_device); | 179 | misc_deregister(&test_fw_misc_device); |
| 111 | pr_warn("removed interface\n"); | 180 | pr_warn("removed interface\n"); |
diff --git a/lib/test-hexdump.c b/lib/test_hexdump.c index 5241df36eedf..3f415d8101f3 100644 --- a/lib/test-hexdump.c +++ b/lib/test_hexdump.c | |||
| @@ -42,19 +42,21 @@ static const char * const test_data_8_le[] __initconst = { | |||
| 42 | "e9ac0f9cad319ca6", "0cafb1439919d14c", | 42 | "e9ac0f9cad319ca6", "0cafb1439919d14c", |
| 43 | }; | 43 | }; |
| 44 | 44 | ||
| 45 | static void __init test_hexdump(size_t len, int rowsize, int groupsize, | 45 | #define FILL_CHAR '#' |
| 46 | bool ascii) | 46 | |
| 47 | static unsigned total_tests __initdata; | ||
| 48 | static unsigned failed_tests __initdata; | ||
| 49 | |||
| 50 | static void __init test_hexdump_prepare_test(size_t len, int rowsize, | ||
| 51 | int groupsize, char *test, | ||
| 52 | size_t testlen, bool ascii) | ||
| 47 | { | 53 | { |
| 48 | char test[32 * 3 + 2 + 32 + 1]; | ||
| 49 | char real[32 * 3 + 2 + 32 + 1]; | ||
| 50 | char *p; | 54 | char *p; |
| 51 | const char * const *result; | 55 | const char * const *result; |
| 52 | size_t l = len; | 56 | size_t l = len; |
| 53 | int gs = groupsize, rs = rowsize; | 57 | int gs = groupsize, rs = rowsize; |
| 54 | unsigned int i; | 58 | unsigned int i; |
| 55 | 59 | ||
| 56 | hex_dump_to_buffer(data_b, l, rs, gs, real, sizeof(real), ascii); | ||
| 57 | |||
| 58 | if (rs != 16 && rs != 32) | 60 | if (rs != 16 && rs != 32) |
| 59 | rs = 16; | 61 | rs = 16; |
| 60 | 62 | ||
| @@ -73,8 +75,6 @@ static void __init test_hexdump(size_t len, int rowsize, int groupsize, | |||
| 73 | else | 75 | else |
| 74 | result = test_data_1_le; | 76 | result = test_data_1_le; |
| 75 | 77 | ||
| 76 | memset(test, ' ', sizeof(test)); | ||
| 77 | |||
| 78 | /* hex dump */ | 78 | /* hex dump */ |
| 79 | p = test; | 79 | p = test; |
| 80 | for (i = 0; i < l / gs; i++) { | 80 | for (i = 0; i < l / gs; i++) { |
| @@ -82,24 +82,49 @@ static void __init test_hexdump(size_t len, int rowsize, int groupsize, | |||
| 82 | size_t amount = strlen(q); | 82 | size_t amount = strlen(q); |
| 83 | 83 | ||
| 84 | strncpy(p, q, amount); | 84 | strncpy(p, q, amount); |
| 85 | p += amount + 1; | 85 | p += amount; |
| 86 | |||
| 87 | *p++ = ' '; | ||
| 86 | } | 88 | } |
| 87 | if (i) | 89 | if (i) |
| 88 | p--; | 90 | p--; |
| 89 | 91 | ||
| 90 | /* ASCII part */ | 92 | /* ASCII part */ |
| 91 | if (ascii) { | 93 | if (ascii) { |
| 92 | p = test + rs * 2 + rs / gs + 1; | 94 | do { |
| 95 | *p++ = ' '; | ||
| 96 | } while (p < test + rs * 2 + rs / gs + 1); | ||
| 97 | |||
| 93 | strncpy(p, data_a, l); | 98 | strncpy(p, data_a, l); |
| 94 | p += l; | 99 | p += l; |
| 95 | } | 100 | } |
| 96 | 101 | ||
| 97 | *p = '\0'; | 102 | *p = '\0'; |
| 103 | } | ||
| 98 | 104 | ||
| 99 | if (strcmp(test, real)) { | 105 | #define TEST_HEXDUMP_BUF_SIZE (32 * 3 + 2 + 32 + 1) |
| 106 | |||
| 107 | static void __init test_hexdump(size_t len, int rowsize, int groupsize, | ||
| 108 | bool ascii) | ||
| 109 | { | ||
| 110 | char test[TEST_HEXDUMP_BUF_SIZE]; | ||
| 111 | char real[TEST_HEXDUMP_BUF_SIZE]; | ||
| 112 | |||
| 113 | total_tests++; | ||
| 114 | |||
| 115 | memset(real, FILL_CHAR, sizeof(real)); | ||
| 116 | hex_dump_to_buffer(data_b, len, rowsize, groupsize, real, sizeof(real), | ||
| 117 | ascii); | ||
| 118 | |||
| 119 | memset(test, FILL_CHAR, sizeof(test)); | ||
| 120 | test_hexdump_prepare_test(len, rowsize, groupsize, test, sizeof(test), | ||
| 121 | ascii); | ||
| 122 | |||
| 123 | if (memcmp(test, real, TEST_HEXDUMP_BUF_SIZE)) { | ||
| 100 | pr_err("Len: %zu row: %d group: %d\n", len, rowsize, groupsize); | 124 | pr_err("Len: %zu row: %d group: %d\n", len, rowsize, groupsize); |
| 101 | pr_err("Result: '%s'\n", real); | 125 | pr_err("Result: '%s'\n", real); |
| 102 | pr_err("Expect: '%s'\n", test); | 126 | pr_err("Expect: '%s'\n", test); |
| 127 | failed_tests++; | ||
| 103 | } | 128 | } |
| 104 | } | 129 | } |
| 105 | 130 | ||
| @@ -114,52 +139,72 @@ static void __init test_hexdump_set(int rowsize, bool ascii) | |||
| 114 | test_hexdump(len, rowsize, 1, ascii); | 139 | test_hexdump(len, rowsize, 1, ascii); |
| 115 | } | 140 | } |
| 116 | 141 | ||
| 117 | static void __init test_hexdump_overflow(bool ascii) | 142 | static void __init test_hexdump_overflow(size_t buflen, size_t len, |
| 143 | int rowsize, int groupsize, | ||
| 144 | bool ascii) | ||
| 118 | { | 145 | { |
| 119 | char buf[56]; | 146 | char test[TEST_HEXDUMP_BUF_SIZE]; |
| 120 | const char *t = test_data_1_le[0]; | 147 | char buf[TEST_HEXDUMP_BUF_SIZE]; |
| 121 | size_t l = get_random_int() % sizeof(buf); | 148 | int rs = rowsize, gs = groupsize; |
| 149 | int ae, he, e, f, r; | ||
| 122 | bool a; | 150 | bool a; |
| 123 | int e, r; | ||
| 124 | 151 | ||
| 125 | memset(buf, ' ', sizeof(buf)); | 152 | total_tests++; |
| 153 | |||
| 154 | memset(buf, FILL_CHAR, sizeof(buf)); | ||
| 126 | 155 | ||
| 127 | r = hex_dump_to_buffer(data_b, 1, 16, 1, buf, l, ascii); | 156 | r = hex_dump_to_buffer(data_b, len, rs, gs, buf, buflen, ascii); |
| 157 | |||
| 158 | /* | ||
| 159 | * Caller must provide the data length multiple of groupsize. The | ||
| 160 | * calculations below are made with that assumption in mind. | ||
| 161 | */ | ||
| 162 | ae = rs * 2 /* hex */ + rs / gs /* spaces */ + 1 /* space */ + len /* ascii */; | ||
| 163 | he = (gs * 2 /* hex */ + 1 /* space */) * len / gs - 1 /* no trailing space */; | ||
| 128 | 164 | ||
| 129 | if (ascii) | 165 | if (ascii) |
| 130 | e = 50; | 166 | e = ae; |
| 131 | else | 167 | else |
| 132 | e = 2; | 168 | e = he; |
| 133 | buf[e + 2] = '\0'; | 169 | |
| 134 | 170 | f = min_t(int, e + 1, buflen); | |
| 135 | if (!l) { | 171 | if (buflen) { |
| 136 | a = r == e && buf[0] == ' '; | 172 | test_hexdump_prepare_test(len, rs, gs, test, sizeof(test), ascii); |
| 137 | } else if (l < 3) { | 173 | test[f - 1] = '\0'; |
| 138 | a = r == e && buf[0] == '\0'; | ||
| 139 | } else if (l < 4) { | ||
| 140 | a = r == e && !strcmp(buf, t); | ||
| 141 | } else if (ascii) { | ||
| 142 | if (l < 51) | ||
| 143 | a = r == e && buf[l - 1] == '\0' && buf[l - 2] == ' '; | ||
| 144 | else | ||
| 145 | a = r == e && buf[50] == '\0' && buf[49] == '.'; | ||
| 146 | } else { | ||
| 147 | a = r == e && buf[e] == '\0'; | ||
| 148 | } | 174 | } |
| 175 | memset(test + f, FILL_CHAR, sizeof(test) - f); | ||
| 176 | |||
| 177 | a = r == e && !memcmp(test, buf, TEST_HEXDUMP_BUF_SIZE); | ||
| 178 | |||
| 179 | buf[sizeof(buf) - 1] = '\0'; | ||
| 149 | 180 | ||
| 150 | if (!a) { | 181 | if (!a) { |
| 151 | pr_err("Len: %zu rc: %u strlen: %zu\n", l, r, strlen(buf)); | 182 | pr_err("Len: %zu buflen: %zu strlen: %zu\n", |
| 152 | pr_err("Result: '%s'\n", buf); | 183 | len, buflen, strnlen(buf, sizeof(buf))); |
| 184 | pr_err("Result: %d '%s'\n", r, buf); | ||
| 185 | pr_err("Expect: %d '%s'\n", e, test); | ||
| 186 | failed_tests++; | ||
| 153 | } | 187 | } |
| 154 | } | 188 | } |
| 155 | 189 | ||
| 190 | static void __init test_hexdump_overflow_set(size_t buflen, bool ascii) | ||
| 191 | { | ||
| 192 | unsigned int i = 0; | ||
| 193 | int rs = (get_random_int() % 2 + 1) * 16; | ||
| 194 | |||
| 195 | do { | ||
| 196 | int gs = 1 << i; | ||
| 197 | size_t len = get_random_int() % rs + gs; | ||
| 198 | |||
| 199 | test_hexdump_overflow(buflen, rounddown(len, gs), rs, gs, ascii); | ||
| 200 | } while (i++ < 3); | ||
| 201 | } | ||
| 202 | |||
| 156 | static int __init test_hexdump_init(void) | 203 | static int __init test_hexdump_init(void) |
| 157 | { | 204 | { |
| 158 | unsigned int i; | 205 | unsigned int i; |
| 159 | int rowsize; | 206 | int rowsize; |
| 160 | 207 | ||
| 161 | pr_info("Running tests...\n"); | ||
| 162 | |||
| 163 | rowsize = (get_random_int() % 2 + 1) * 16; | 208 | rowsize = (get_random_int() % 2 + 1) * 16; |
| 164 | for (i = 0; i < 16; i++) | 209 | for (i = 0; i < 16; i++) |
| 165 | test_hexdump_set(rowsize, false); | 210 | test_hexdump_set(rowsize, false); |
| @@ -168,13 +213,26 @@ static int __init test_hexdump_init(void) | |||
| 168 | for (i = 0; i < 16; i++) | 213 | for (i = 0; i < 16; i++) |
| 169 | test_hexdump_set(rowsize, true); | 214 | test_hexdump_set(rowsize, true); |
| 170 | 215 | ||
| 171 | for (i = 0; i < 16; i++) | 216 | for (i = 0; i <= TEST_HEXDUMP_BUF_SIZE; i++) |
| 172 | test_hexdump_overflow(false); | 217 | test_hexdump_overflow_set(i, false); |
| 173 | 218 | ||
| 174 | for (i = 0; i < 16; i++) | 219 | for (i = 0; i <= TEST_HEXDUMP_BUF_SIZE; i++) |
| 175 | test_hexdump_overflow(true); | 220 | test_hexdump_overflow_set(i, true); |
| 221 | |||
| 222 | if (failed_tests == 0) | ||
| 223 | pr_info("all %u tests passed\n", total_tests); | ||
| 224 | else | ||
| 225 | pr_err("failed %u out of %u tests\n", failed_tests, total_tests); | ||
| 176 | 226 | ||
| 177 | return -EINVAL; | 227 | return failed_tests ? -EINVAL : 0; |
| 178 | } | 228 | } |
| 179 | module_init(test_hexdump_init); | 229 | module_init(test_hexdump_init); |
| 230 | |||
| 231 | static void __exit test_hexdump_exit(void) | ||
| 232 | { | ||
| 233 | /* do nothing */ | ||
| 234 | } | ||
| 235 | module_exit(test_hexdump_exit); | ||
| 236 | |||
| 237 | MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>"); | ||
| 180 | MODULE_LICENSE("Dual BSD/GPL"); | 238 | MODULE_LICENSE("Dual BSD/GPL"); |
diff --git a/lib/test_printf.c b/lib/test_printf.c index c5a666af9ba5..4f6ae60433bc 100644 --- a/lib/test_printf.c +++ b/lib/test_printf.c | |||
| @@ -12,10 +12,13 @@ | |||
| 12 | #include <linux/slab.h> | 12 | #include <linux/slab.h> |
| 13 | #include <linux/string.h> | 13 | #include <linux/string.h> |
| 14 | 14 | ||
| 15 | #include <linux/bitmap.h> | ||
| 16 | #include <linux/dcache.h> | ||
| 15 | #include <linux/socket.h> | 17 | #include <linux/socket.h> |
| 16 | #include <linux/in.h> | 18 | #include <linux/in.h> |
| 17 | 19 | ||
| 18 | #define BUF_SIZE 256 | 20 | #define BUF_SIZE 256 |
| 21 | #define PAD_SIZE 16 | ||
| 19 | #define FILL_CHAR '$' | 22 | #define FILL_CHAR '$' |
| 20 | 23 | ||
| 21 | #define PTR1 ((void*)0x01234567) | 24 | #define PTR1 ((void*)0x01234567) |
| @@ -39,6 +42,7 @@ | |||
| 39 | static unsigned total_tests __initdata; | 42 | static unsigned total_tests __initdata; |
| 40 | static unsigned failed_tests __initdata; | 43 | static unsigned failed_tests __initdata; |
| 41 | static char *test_buffer __initdata; | 44 | static char *test_buffer __initdata; |
| 45 | static char *alloced_buffer __initdata; | ||
| 42 | 46 | ||
| 43 | static int __printf(4, 0) __init | 47 | static int __printf(4, 0) __init |
| 44 | do_test(int bufsize, const char *expect, int elen, | 48 | do_test(int bufsize, const char *expect, int elen, |
| @@ -49,7 +53,7 @@ do_test(int bufsize, const char *expect, int elen, | |||
| 49 | 53 | ||
| 50 | total_tests++; | 54 | total_tests++; |
| 51 | 55 | ||
| 52 | memset(test_buffer, FILL_CHAR, BUF_SIZE); | 56 | memset(alloced_buffer, FILL_CHAR, BUF_SIZE + 2*PAD_SIZE); |
| 53 | va_copy(aq, ap); | 57 | va_copy(aq, ap); |
| 54 | ret = vsnprintf(test_buffer, bufsize, fmt, aq); | 58 | ret = vsnprintf(test_buffer, bufsize, fmt, aq); |
| 55 | va_end(aq); | 59 | va_end(aq); |
| @@ -60,8 +64,13 @@ do_test(int bufsize, const char *expect, int elen, | |||
| 60 | return 1; | 64 | return 1; |
| 61 | } | 65 | } |
| 62 | 66 | ||
| 67 | if (memchr_inv(alloced_buffer, FILL_CHAR, PAD_SIZE)) { | ||
| 68 | pr_warn("vsnprintf(buf, %d, \"%s\", ...) wrote before buffer\n", bufsize, fmt); | ||
| 69 | return 1; | ||
| 70 | } | ||
| 71 | |||
| 63 | if (!bufsize) { | 72 | if (!bufsize) { |
| 64 | if (memchr_inv(test_buffer, FILL_CHAR, BUF_SIZE)) { | 73 | if (memchr_inv(test_buffer, FILL_CHAR, BUF_SIZE + PAD_SIZE)) { |
| 65 | pr_warn("vsnprintf(buf, 0, \"%s\", ...) wrote to buffer\n", | 74 | pr_warn("vsnprintf(buf, 0, \"%s\", ...) wrote to buffer\n", |
| 66 | fmt); | 75 | fmt); |
| 67 | return 1; | 76 | return 1; |
| @@ -76,6 +85,12 @@ do_test(int bufsize, const char *expect, int elen, | |||
| 76 | return 1; | 85 | return 1; |
| 77 | } | 86 | } |
| 78 | 87 | ||
| 88 | if (memchr_inv(test_buffer + written + 1, FILL_CHAR, BUF_SIZE + PAD_SIZE - (written + 1))) { | ||
| 89 | pr_warn("vsnprintf(buf, %d, \"%s\", ...) wrote beyond the nul-terminator\n", | ||
| 90 | bufsize, fmt); | ||
| 91 | return 1; | ||
| 92 | } | ||
| 93 | |||
| 79 | if (memcmp(test_buffer, expect, written)) { | 94 | if (memcmp(test_buffer, expect, written)) { |
| 80 | pr_warn("vsnprintf(buf, %d, \"%s\", ...) wrote '%s', expected '%.*s'\n", | 95 | pr_warn("vsnprintf(buf, %d, \"%s\", ...) wrote '%s', expected '%.*s'\n", |
| 81 | bufsize, fmt, test_buffer, written, expect); | 96 | bufsize, fmt, test_buffer, written, expect); |
| @@ -91,7 +106,12 @@ __test(const char *expect, int elen, const char *fmt, ...) | |||
| 91 | int rand; | 106 | int rand; |
| 92 | char *p; | 107 | char *p; |
| 93 | 108 | ||
| 94 | BUG_ON(elen >= BUF_SIZE); | 109 | if (elen >= BUF_SIZE) { |
| 110 | pr_err("error in test suite: expected output length %d too long. Format was '%s'.\n", | ||
| 111 | elen, fmt); | ||
| 112 | failed_tests++; | ||
| 113 | return; | ||
| 114 | } | ||
| 95 | 115 | ||
| 96 | va_start(ap, fmt); | 116 | va_start(ap, fmt); |
| 97 | 117 | ||
| @@ -109,6 +129,7 @@ __test(const char *expect, int elen, const char *fmt, ...) | |||
| 109 | 129 | ||
| 110 | p = kvasprintf(GFP_KERNEL, fmt, ap); | 130 | p = kvasprintf(GFP_KERNEL, fmt, ap); |
| 111 | if (p) { | 131 | if (p) { |
| 132 | total_tests++; | ||
| 112 | if (memcmp(p, expect, elen+1)) { | 133 | if (memcmp(p, expect, elen+1)) { |
| 113 | pr_warn("kvasprintf(..., \"%s\", ...) returned '%s', expected '%s'\n", | 134 | pr_warn("kvasprintf(..., \"%s\", ...) returned '%s', expected '%s'\n", |
| 114 | fmt, p, expect); | 135 | fmt, p, expect); |
| @@ -140,6 +161,30 @@ test_number(void) | |||
| 140 | test("0x1234abcd ", "%#-12x", 0x1234abcd); | 161 | test("0x1234abcd ", "%#-12x", 0x1234abcd); |
| 141 | test(" 0x1234abcd", "%#12x", 0x1234abcd); | 162 | test(" 0x1234abcd", "%#12x", 0x1234abcd); |
| 142 | test("0|001| 12|+123| 1234|-123|-1234", "%d|%03d|%3d|%+d|% d|%+d|% d", 0, 1, 12, 123, 1234, -123, -1234); | 163 | test("0|001| 12|+123| 1234|-123|-1234", "%d|%03d|%3d|%+d|% d|%+d|% d", 0, 1, 12, 123, 1234, -123, -1234); |
| 164 | test("0|1|1|128|255", "%hhu|%hhu|%hhu|%hhu|%hhu", 0, 1, 257, 128, -1); | ||
| 165 | test("0|1|1|-128|-1", "%hhd|%hhd|%hhd|%hhd|%hhd", 0, 1, 257, 128, -1); | ||
| 166 | test("2015122420151225", "%ho%ho%#ho", 1037, 5282, -11627); | ||
| 167 | /* | ||
| 168 | * POSIX/C99: »The result of converting zero with an explicit | ||
| 169 | * precision of zero shall be no characters.« Hence the output | ||
| 170 | * from the below test should really be "00|0||| ". However, | ||
| 171 | * the kernel's printf also produces a single 0 in that | ||
| 172 | * case. This test case simply documents the current | ||
| 173 | * behaviour. | ||
| 174 | */ | ||
| 175 | test("00|0|0|0|0", "%.2d|%.1d|%.0d|%.*d|%1.0d", 0, 0, 0, 0, 0, 0); | ||
| 176 | #ifndef __CHAR_UNSIGNED__ | ||
| 177 | { | ||
| 178 | /* | ||
| 179 | * Passing a 'char' to a %02x specifier doesn't do | ||
| 180 | * what was presumably the intention when char is | ||
| 181 | * signed and the value is negative. One must either & | ||
| 182 | * with 0xff or cast to u8. | ||
| 183 | */ | ||
| 184 | char val = -16; | ||
| 185 | test("0xfffffff0|0xf0|0xf0", "%#02x|%#02x|%#02x", val, val & 0xff, (u8)val); | ||
| 186 | } | ||
| 187 | #endif | ||
| 143 | } | 188 | } |
| 144 | 189 | ||
| 145 | static void __init | 190 | static void __init |
| @@ -148,14 +193,23 @@ test_string(void) | |||
| 148 | test("", "%s%.0s", "", "123"); | 193 | test("", "%s%.0s", "", "123"); |
| 149 | test("ABCD|abc|123", "%s|%.3s|%.*s", "ABCD", "abcdef", 3, "123456"); | 194 | test("ABCD|abc|123", "%s|%.3s|%.*s", "ABCD", "abcdef", 3, "123456"); |
| 150 | test("1 | 2|3 | 4|5 ", "%-3s|%3s|%-*s|%*s|%*s", "1", "2", 3, "3", 3, "4", -3, "5"); | 195 | test("1 | 2|3 | 4|5 ", "%-3s|%3s|%-*s|%*s|%*s", "1", "2", 3, "3", 3, "4", -3, "5"); |
| 196 | test("1234 ", "%-10.4s", "123456"); | ||
| 197 | test(" 1234", "%10.4s", "123456"); | ||
| 151 | /* | 198 | /* |
| 152 | * POSIX and C99 say that a missing precision should be | 199 | * POSIX and C99 say that a negative precision (which is only |
| 153 | * treated as a precision of 0. However, the kernel's printf | 200 | * possible to pass via a * argument) should be treated as if |
| 154 | * implementation treats this case as if the . wasn't | 201 | * the precision wasn't present, and that if the precision is |
| 155 | * present. Let's add a test case documenting the current | 202 | * omitted (as in %.s), the precision should be taken to be |
| 156 | * behaviour; should anyone ever feel the need to follow the | 203 | * 0. However, the kernel's printf behave exactly opposite, |
| 157 | * standards more closely, this can be revisited. | 204 | * treating a negative precision as 0 and treating an omitted |
| 205 | * precision specifier as if no precision was given. | ||
| 206 | * | ||
| 207 | * These test cases document the current behaviour; should | ||
| 208 | * anyone ever feel the need to follow the standards more | ||
| 209 | * closely, this can be revisited. | ||
| 158 | */ | 210 | */ |
| 211 | test(" ", "%4.*s", -5, "123456"); | ||
| 212 | test("123456", "%.s", "123456"); | ||
| 159 | test("a||", "%.s|%.0s|%.*s", "a", "b", 0, "c"); | 213 | test("a||", "%.s|%.0s|%.*s", "a", "b", 0, "c"); |
| 160 | test("a | | ", "%-3.s|%-3.0s|%-3.*s", "a", "b", 0, "c"); | 214 | test("a | | ", "%-3.s|%-3.0s|%-3.*s", "a", "b", 0, "c"); |
| 161 | } | 215 | } |
| @@ -273,9 +327,35 @@ uuid(void) | |||
| 273 | test("03020100-0504-0706-0809-0A0B0C0D0E0F", "%pUL", uuid); | 327 | test("03020100-0504-0706-0809-0A0B0C0D0E0F", "%pUL", uuid); |
| 274 | } | 328 | } |
| 275 | 329 | ||
| 330 | static struct dentry test_dentry[4] __initdata = { | ||
| 331 | { .d_parent = &test_dentry[0], | ||
| 332 | .d_name = QSTR_INIT(test_dentry[0].d_iname, 3), | ||
| 333 | .d_iname = "foo" }, | ||
| 334 | { .d_parent = &test_dentry[0], | ||
| 335 | .d_name = QSTR_INIT(test_dentry[1].d_iname, 5), | ||
| 336 | .d_iname = "bravo" }, | ||
| 337 | { .d_parent = &test_dentry[1], | ||
| 338 | .d_name = QSTR_INIT(test_dentry[2].d_iname, 4), | ||
| 339 | .d_iname = "alfa" }, | ||
| 340 | { .d_parent = &test_dentry[2], | ||
| 341 | .d_name = QSTR_INIT(test_dentry[3].d_iname, 5), | ||
| 342 | .d_iname = "romeo" }, | ||
| 343 | }; | ||
| 344 | |||
| 276 | static void __init | 345 | static void __init |
| 277 | dentry(void) | 346 | dentry(void) |
| 278 | { | 347 | { |
| 348 | test("foo", "%pd", &test_dentry[0]); | ||
| 349 | test("foo", "%pd2", &test_dentry[0]); | ||
| 350 | |||
| 351 | test("romeo", "%pd", &test_dentry[3]); | ||
| 352 | test("alfa/romeo", "%pd2", &test_dentry[3]); | ||
| 353 | test("bravo/alfa/romeo", "%pd3", &test_dentry[3]); | ||
| 354 | test("/bravo/alfa/romeo", "%pd4", &test_dentry[3]); | ||
| 355 | test("/bravo/alfa", "%pd4", &test_dentry[2]); | ||
| 356 | |||
| 357 | test("bravo/alfa |bravo/alfa ", "%-12pd2|%*pd2", &test_dentry[2], -12, &test_dentry[2]); | ||
| 358 | test(" bravo/alfa| bravo/alfa", "%12pd2|%*pd2", &test_dentry[2], 12, &test_dentry[2]); | ||
| 279 | } | 359 | } |
| 280 | 360 | ||
| 281 | static void __init | 361 | static void __init |
| @@ -289,6 +369,20 @@ struct_clk(void) | |||
| 289 | } | 369 | } |
| 290 | 370 | ||
| 291 | static void __init | 371 | static void __init |
| 372 | large_bitmap(void) | ||
| 373 | { | ||
| 374 | const int nbits = 1 << 16; | ||
| 375 | unsigned long *bits = kcalloc(BITS_TO_LONGS(nbits), sizeof(long), GFP_KERNEL); | ||
| 376 | if (!bits) | ||
| 377 | return; | ||
| 378 | |||
| 379 | bitmap_set(bits, 1, 20); | ||
| 380 | bitmap_set(bits, 60000, 15); | ||
| 381 | test("1-20,60000-60014", "%*pbl", nbits, bits); | ||
| 382 | kfree(bits); | ||
| 383 | } | ||
| 384 | |||
| 385 | static void __init | ||
| 292 | bitmap(void) | 386 | bitmap(void) |
| 293 | { | 387 | { |
| 294 | DECLARE_BITMAP(bits, 20); | 388 | DECLARE_BITMAP(bits, 20); |
| @@ -307,6 +401,8 @@ bitmap(void) | |||
| 307 | bitmap_fill(bits, 20); | 401 | bitmap_fill(bits, 20); |
| 308 | test("fffff|fffff", "%20pb|%*pb", bits, 20, bits); | 402 | test("fffff|fffff", "%20pb|%*pb", bits, 20, bits); |
| 309 | test("0-19|0-19", "%20pbl|%*pbl", bits, 20, bits); | 403 | test("0-19|0-19", "%20pbl|%*pbl", bits, 20, bits); |
| 404 | |||
| 405 | large_bitmap(); | ||
| 310 | } | 406 | } |
| 311 | 407 | ||
| 312 | static void __init | 408 | static void __init |
| @@ -337,16 +433,17 @@ test_pointer(void) | |||
| 337 | static int __init | 433 | static int __init |
| 338 | test_printf_init(void) | 434 | test_printf_init(void) |
| 339 | { | 435 | { |
| 340 | test_buffer = kmalloc(BUF_SIZE, GFP_KERNEL); | 436 | alloced_buffer = kmalloc(BUF_SIZE + 2*PAD_SIZE, GFP_KERNEL); |
| 341 | if (!test_buffer) | 437 | if (!alloced_buffer) |
| 342 | return -ENOMEM; | 438 | return -ENOMEM; |
| 439 | test_buffer = alloced_buffer + PAD_SIZE; | ||
| 343 | 440 | ||
| 344 | test_basic(); | 441 | test_basic(); |
| 345 | test_number(); | 442 | test_number(); |
| 346 | test_string(); | 443 | test_string(); |
| 347 | test_pointer(); | 444 | test_pointer(); |
| 348 | 445 | ||
| 349 | kfree(test_buffer); | 446 | kfree(alloced_buffer); |
| 350 | 447 | ||
| 351 | if (failed_tests == 0) | 448 | if (failed_tests == 0) |
| 352 | pr_info("all %u tests passed\n", total_tests); | 449 | pr_info("all %u tests passed\n", total_tests); |
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c index 8c1ad1ced72c..270bf7289b1e 100644 --- a/lib/test_rhashtable.c +++ b/lib/test_rhashtable.c | |||
| @@ -36,9 +36,9 @@ static int runs = 4; | |||
| 36 | module_param(runs, int, 0); | 36 | module_param(runs, int, 0); |
| 37 | MODULE_PARM_DESC(runs, "Number of test runs per variant (default: 4)"); | 37 | MODULE_PARM_DESC(runs, "Number of test runs per variant (default: 4)"); |
| 38 | 38 | ||
| 39 | static int max_size = 65536; | 39 | static int max_size = 0; |
| 40 | module_param(max_size, int, 0); | 40 | module_param(max_size, int, 0); |
| 41 | MODULE_PARM_DESC(runs, "Maximum table size (default: 65536)"); | 41 | MODULE_PARM_DESC(runs, "Maximum table size (default: calculated)"); |
| 42 | 42 | ||
| 43 | static bool shrinking = false; | 43 | static bool shrinking = false; |
| 44 | module_param(shrinking, bool, 0); | 44 | module_param(shrinking, bool, 0); |
| @@ -52,6 +52,10 @@ static int tcount = 10; | |||
| 52 | module_param(tcount, int, 0); | 52 | module_param(tcount, int, 0); |
| 53 | MODULE_PARM_DESC(tcount, "Number of threads to spawn (default: 10)"); | 53 | MODULE_PARM_DESC(tcount, "Number of threads to spawn (default: 10)"); |
| 54 | 54 | ||
| 55 | static bool enomem_retry = false; | ||
| 56 | module_param(enomem_retry, bool, 0); | ||
| 57 | MODULE_PARM_DESC(enomem_retry, "Retry insert even if -ENOMEM was returned (default: off)"); | ||
| 58 | |||
| 55 | struct test_obj { | 59 | struct test_obj { |
| 56 | int value; | 60 | int value; |
| 57 | struct rhash_head node; | 61 | struct rhash_head node; |
| @@ -76,6 +80,28 @@ static struct rhashtable_params test_rht_params = { | |||
| 76 | static struct semaphore prestart_sem; | 80 | static struct semaphore prestart_sem; |
| 77 | static struct semaphore startup_sem = __SEMAPHORE_INITIALIZER(startup_sem, 0); | 81 | static struct semaphore startup_sem = __SEMAPHORE_INITIALIZER(startup_sem, 0); |
| 78 | 82 | ||
| 83 | static int insert_retry(struct rhashtable *ht, struct rhash_head *obj, | ||
| 84 | const struct rhashtable_params params) | ||
| 85 | { | ||
| 86 | int err, retries = -1, enomem_retries = 0; | ||
| 87 | |||
| 88 | do { | ||
| 89 | retries++; | ||
| 90 | cond_resched(); | ||
| 91 | err = rhashtable_insert_fast(ht, obj, params); | ||
| 92 | if (err == -ENOMEM && enomem_retry) { | ||
| 93 | enomem_retries++; | ||
| 94 | err = -EBUSY; | ||
| 95 | } | ||
| 96 | } while (err == -EBUSY); | ||
| 97 | |||
| 98 | if (enomem_retries) | ||
| 99 | pr_info(" %u insertions retried after -ENOMEM\n", | ||
| 100 | enomem_retries); | ||
| 101 | |||
| 102 | return err ? : retries; | ||
| 103 | } | ||
| 104 | |||
| 79 | static int __init test_rht_lookup(struct rhashtable *ht) | 105 | static int __init test_rht_lookup(struct rhashtable *ht) |
| 80 | { | 106 | { |
| 81 | unsigned int i; | 107 | unsigned int i; |
| @@ -157,7 +183,7 @@ static s64 __init test_rhashtable(struct rhashtable *ht) | |||
| 157 | { | 183 | { |
| 158 | struct test_obj *obj; | 184 | struct test_obj *obj; |
| 159 | int err; | 185 | int err; |
| 160 | unsigned int i, insert_fails = 0; | 186 | unsigned int i, insert_retries = 0; |
| 161 | s64 start, end; | 187 | s64 start, end; |
| 162 | 188 | ||
| 163 | /* | 189 | /* |
| @@ -170,22 +196,16 @@ static s64 __init test_rhashtable(struct rhashtable *ht) | |||
| 170 | struct test_obj *obj = &array[i]; | 196 | struct test_obj *obj = &array[i]; |
| 171 | 197 | ||
| 172 | obj->value = i * 2; | 198 | obj->value = i * 2; |
| 173 | 199 | err = insert_retry(ht, &obj->node, test_rht_params); | |
| 174 | err = rhashtable_insert_fast(ht, &obj->node, test_rht_params); | 200 | if (err > 0) |
| 175 | if (err == -ENOMEM || err == -EBUSY) { | 201 | insert_retries += err; |
| 176 | /* Mark failed inserts but continue */ | 202 | else if (err) |
| 177 | obj->value = TEST_INSERT_FAIL; | ||
| 178 | insert_fails++; | ||
| 179 | } else if (err) { | ||
| 180 | return err; | 203 | return err; |
| 181 | } | ||
| 182 | |||
| 183 | cond_resched(); | ||
| 184 | } | 204 | } |
| 185 | 205 | ||
| 186 | if (insert_fails) | 206 | if (insert_retries) |
| 187 | pr_info(" %u insertions failed due to memory pressure\n", | 207 | pr_info(" %u insertions retried due to memory pressure\n", |
| 188 | insert_fails); | 208 | insert_retries); |
| 189 | 209 | ||
| 190 | test_bucket_stats(ht); | 210 | test_bucket_stats(ht); |
| 191 | rcu_read_lock(); | 211 | rcu_read_lock(); |
| @@ -236,13 +256,15 @@ static int thread_lookup_test(struct thread_data *tdata) | |||
| 236 | obj->value, key); | 256 | obj->value, key); |
| 237 | err++; | 257 | err++; |
| 238 | } | 258 | } |
| 259 | |||
| 260 | cond_resched(); | ||
| 239 | } | 261 | } |
| 240 | return err; | 262 | return err; |
| 241 | } | 263 | } |
| 242 | 264 | ||
| 243 | static int threadfunc(void *data) | 265 | static int threadfunc(void *data) |
| 244 | { | 266 | { |
| 245 | int i, step, err = 0, insert_fails = 0; | 267 | int i, step, err = 0, insert_retries = 0; |
| 246 | struct thread_data *tdata = data; | 268 | struct thread_data *tdata = data; |
| 247 | 269 | ||
| 248 | up(&prestart_sem); | 270 | up(&prestart_sem); |
| @@ -251,20 +273,18 @@ static int threadfunc(void *data) | |||
| 251 | 273 | ||
| 252 | for (i = 0; i < entries; i++) { | 274 | for (i = 0; i < entries; i++) { |
| 253 | tdata->objs[i].value = (tdata->id << 16) | i; | 275 | tdata->objs[i].value = (tdata->id << 16) | i; |
| 254 | err = rhashtable_insert_fast(&ht, &tdata->objs[i].node, | 276 | err = insert_retry(&ht, &tdata->objs[i].node, test_rht_params); |
| 255 | test_rht_params); | 277 | if (err > 0) { |
| 256 | if (err == -ENOMEM || err == -EBUSY) { | 278 | insert_retries += err; |
| 257 | tdata->objs[i].value = TEST_INSERT_FAIL; | ||
| 258 | insert_fails++; | ||
| 259 | } else if (err) { | 279 | } else if (err) { |
| 260 | pr_err(" thread[%d]: rhashtable_insert_fast failed\n", | 280 | pr_err(" thread[%d]: rhashtable_insert_fast failed\n", |
| 261 | tdata->id); | 281 | tdata->id); |
| 262 | goto out; | 282 | goto out; |
| 263 | } | 283 | } |
| 264 | } | 284 | } |
| 265 | if (insert_fails) | 285 | if (insert_retries) |
| 266 | pr_info(" thread[%d]: %d insert failures\n", | 286 | pr_info(" thread[%d]: %u insertions retried due to memory pressure\n", |
| 267 | tdata->id, insert_fails); | 287 | tdata->id, insert_retries); |
| 268 | 288 | ||
| 269 | err = thread_lookup_test(tdata); | 289 | err = thread_lookup_test(tdata); |
| 270 | if (err) { | 290 | if (err) { |
| @@ -285,6 +305,8 @@ static int threadfunc(void *data) | |||
| 285 | goto out; | 305 | goto out; |
| 286 | } | 306 | } |
| 287 | tdata->objs[i].value = TEST_INSERT_FAIL; | 307 | tdata->objs[i].value = TEST_INSERT_FAIL; |
| 308 | |||
| 309 | cond_resched(); | ||
| 288 | } | 310 | } |
| 289 | err = thread_lookup_test(tdata); | 311 | err = thread_lookup_test(tdata); |
| 290 | if (err) { | 312 | if (err) { |
| @@ -311,7 +333,7 @@ static int __init test_rht_init(void) | |||
| 311 | entries = min(entries, MAX_ENTRIES); | 333 | entries = min(entries, MAX_ENTRIES); |
| 312 | 334 | ||
| 313 | test_rht_params.automatic_shrinking = shrinking; | 335 | test_rht_params.automatic_shrinking = shrinking; |
| 314 | test_rht_params.max_size = max_size; | 336 | test_rht_params.max_size = max_size ? : roundup_pow_of_two(entries); |
| 315 | test_rht_params.nelem_hint = size; | 337 | test_rht_params.nelem_hint = size; |
| 316 | 338 | ||
| 317 | pr_info("Running rhashtable test nelem=%d, max_size=%d, shrinking=%d\n", | 339 | pr_info("Running rhashtable test nelem=%d, max_size=%d, shrinking=%d\n", |
| @@ -357,6 +379,8 @@ static int __init test_rht_init(void) | |||
| 357 | return -ENOMEM; | 379 | return -ENOMEM; |
| 358 | } | 380 | } |
| 359 | 381 | ||
| 382 | test_rht_params.max_size = max_size ? : | ||
| 383 | roundup_pow_of_two(tcount * entries); | ||
| 360 | err = rhashtable_init(&ht, &test_rht_params); | 384 | err = rhashtable_init(&ht, &test_rht_params); |
| 361 | if (err < 0) { | 385 | if (err < 0) { |
| 362 | pr_warn("Test failed: Unable to initialize hashtable: %d\n", | 386 | pr_warn("Test failed: Unable to initialize hashtable: %d\n", |
diff --git a/lib/ubsan.c b/lib/ubsan.c new file mode 100644 index 000000000000..8799ae5e2e42 --- /dev/null +++ b/lib/ubsan.c | |||
| @@ -0,0 +1,456 @@ | |||
| 1 | /* | ||
| 2 | * UBSAN error reporting functions | ||
| 3 | * | ||
| 4 | * Copyright (c) 2014 Samsung Electronics Co., Ltd. | ||
| 5 | * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com> | ||
| 6 | * | ||
| 7 | * This program is free software; you can redistribute it and/or modify | ||
| 8 | * it under the terms of the GNU General Public License version 2 as | ||
| 9 | * published by the Free Software Foundation. | ||
| 10 | * | ||
| 11 | */ | ||
| 12 | |||
| 13 | #include <linux/bitops.h> | ||
| 14 | #include <linux/bug.h> | ||
| 15 | #include <linux/ctype.h> | ||
| 16 | #include <linux/init.h> | ||
| 17 | #include <linux/kernel.h> | ||
| 18 | #include <linux/types.h> | ||
| 19 | #include <linux/sched.h> | ||
| 20 | |||
| 21 | #include "ubsan.h" | ||
| 22 | |||
| 23 | const char *type_check_kinds[] = { | ||
| 24 | "load of", | ||
| 25 | "store to", | ||
| 26 | "reference binding to", | ||
| 27 | "member access within", | ||
| 28 | "member call on", | ||
| 29 | "constructor call on", | ||
| 30 | "downcast of", | ||
| 31 | "downcast of" | ||
| 32 | }; | ||
| 33 | |||
| 34 | #define REPORTED_BIT 31 | ||
| 35 | |||
| 36 | #if (BITS_PER_LONG == 64) && defined(__BIG_ENDIAN) | ||
| 37 | #define COLUMN_MASK (~(1U << REPORTED_BIT)) | ||
| 38 | #define LINE_MASK (~0U) | ||
| 39 | #else | ||
| 40 | #define COLUMN_MASK (~0U) | ||
| 41 | #define LINE_MASK (~(1U << REPORTED_BIT)) | ||
| 42 | #endif | ||
| 43 | |||
| 44 | #define VALUE_LENGTH 40 | ||
| 45 | |||
| 46 | static bool was_reported(struct source_location *location) | ||
| 47 | { | ||
| 48 | return test_and_set_bit(REPORTED_BIT, &location->reported); | ||
| 49 | } | ||
| 50 | |||
| 51 | static void print_source_location(const char *prefix, | ||
| 52 | struct source_location *loc) | ||
| 53 | { | ||
| 54 | pr_err("%s %s:%d:%d\n", prefix, loc->file_name, | ||
| 55 | loc->line & LINE_MASK, loc->column & COLUMN_MASK); | ||
| 56 | } | ||
| 57 | |||
| 58 | static bool suppress_report(struct source_location *loc) | ||
| 59 | { | ||
| 60 | return current->in_ubsan || was_reported(loc); | ||
| 61 | } | ||
| 62 | |||
| 63 | static bool type_is_int(struct type_descriptor *type) | ||
| 64 | { | ||
| 65 | return type->type_kind == type_kind_int; | ||
| 66 | } | ||
| 67 | |||
| 68 | static bool type_is_signed(struct type_descriptor *type) | ||
| 69 | { | ||
| 70 | WARN_ON(!type_is_int(type)); | ||
| 71 | return type->type_info & 1; | ||
| 72 | } | ||
| 73 | |||
| 74 | static unsigned type_bit_width(struct type_descriptor *type) | ||
| 75 | { | ||
| 76 | return 1 << (type->type_info >> 1); | ||
| 77 | } | ||
| 78 | |||
| 79 | static bool is_inline_int(struct type_descriptor *type) | ||
| 80 | { | ||
| 81 | unsigned inline_bits = sizeof(unsigned long)*8; | ||
| 82 | unsigned bits = type_bit_width(type); | ||
| 83 | |||
| 84 | WARN_ON(!type_is_int(type)); | ||
| 85 | |||
| 86 | return bits <= inline_bits; | ||
| 87 | } | ||
| 88 | |||
| 89 | static s_max get_signed_val(struct type_descriptor *type, unsigned long val) | ||
| 90 | { | ||
| 91 | if (is_inline_int(type)) { | ||
| 92 | unsigned extra_bits = sizeof(s_max)*8 - type_bit_width(type); | ||
| 93 | return ((s_max)val) << extra_bits >> extra_bits; | ||
| 94 | } | ||
| 95 | |||
| 96 | if (type_bit_width(type) == 64) | ||
| 97 | return *(s64 *)val; | ||
| 98 | |||
| 99 | return *(s_max *)val; | ||
| 100 | } | ||
| 101 | |||
| 102 | static bool val_is_negative(struct type_descriptor *type, unsigned long val) | ||
| 103 | { | ||
| 104 | return type_is_signed(type) && get_signed_val(type, val) < 0; | ||
| 105 | } | ||
| 106 | |||
| 107 | static u_max get_unsigned_val(struct type_descriptor *type, unsigned long val) | ||
| 108 | { | ||
| 109 | if (is_inline_int(type)) | ||
| 110 | return val; | ||
| 111 | |||
| 112 | if (type_bit_width(type) == 64) | ||
| 113 | return *(u64 *)val; | ||
| 114 | |||
| 115 | return *(u_max *)val; | ||
| 116 | } | ||
| 117 | |||
| 118 | static void val_to_string(char *str, size_t size, struct type_descriptor *type, | ||
| 119 | unsigned long value) | ||
| 120 | { | ||
| 121 | if (type_is_int(type)) { | ||
| 122 | if (type_bit_width(type) == 128) { | ||
| 123 | #if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__) | ||
| 124 | u_max val = get_unsigned_val(type, value); | ||
| 125 | |||
| 126 | scnprintf(str, size, "0x%08x%08x%08x%08x", | ||
| 127 | (u32)(val >> 96), | ||
| 128 | (u32)(val >> 64), | ||
| 129 | (u32)(val >> 32), | ||
| 130 | (u32)(val)); | ||
| 131 | #else | ||
| 132 | WARN_ON(1); | ||
| 133 | #endif | ||
| 134 | } else if (type_is_signed(type)) { | ||
| 135 | scnprintf(str, size, "%lld", | ||
| 136 | (s64)get_signed_val(type, value)); | ||
| 137 | } else { | ||
| 138 | scnprintf(str, size, "%llu", | ||
| 139 | (u64)get_unsigned_val(type, value)); | ||
| 140 | } | ||
| 141 | } | ||
| 142 | } | ||
| 143 | |||
| 144 | static bool location_is_valid(struct source_location *loc) | ||
| 145 | { | ||
| 146 | return loc->file_name != NULL; | ||
| 147 | } | ||
| 148 | |||
| 149 | static DEFINE_SPINLOCK(report_lock); | ||
| 150 | |||
| 151 | static void ubsan_prologue(struct source_location *location, | ||
| 152 | unsigned long *flags) | ||
| 153 | { | ||
| 154 | current->in_ubsan++; | ||
| 155 | spin_lock_irqsave(&report_lock, *flags); | ||
| 156 | |||
| 157 | pr_err("========================================" | ||
| 158 | "========================================\n"); | ||
| 159 | print_source_location("UBSAN: Undefined behaviour in", location); | ||
| 160 | } | ||
| 161 | |||
| 162 | static void ubsan_epilogue(unsigned long *flags) | ||
| 163 | { | ||
| 164 | dump_stack(); | ||
| 165 | pr_err("========================================" | ||
| 166 | "========================================\n"); | ||
| 167 | spin_unlock_irqrestore(&report_lock, *flags); | ||
| 168 | current->in_ubsan--; | ||
| 169 | } | ||
| 170 | |||
| 171 | static void handle_overflow(struct overflow_data *data, unsigned long lhs, | ||
| 172 | unsigned long rhs, char op) | ||
| 173 | { | ||
| 174 | |||
| 175 | struct type_descriptor *type = data->type; | ||
| 176 | unsigned long flags; | ||
| 177 | char lhs_val_str[VALUE_LENGTH]; | ||
| 178 | char rhs_val_str[VALUE_LENGTH]; | ||
| 179 | |||
| 180 | if (suppress_report(&data->location)) | ||
| 181 | return; | ||
| 182 | |||
| 183 | ubsan_prologue(&data->location, &flags); | ||
| 184 | |||
| 185 | val_to_string(lhs_val_str, sizeof(lhs_val_str), type, lhs); | ||
| 186 | val_to_string(rhs_val_str, sizeof(rhs_val_str), type, rhs); | ||
| 187 | pr_err("%s integer overflow:\n", | ||
| 188 | type_is_signed(type) ? "signed" : "unsigned"); | ||
| 189 | pr_err("%s %c %s cannot be represented in type %s\n", | ||
| 190 | lhs_val_str, | ||
| 191 | op, | ||
| 192 | rhs_val_str, | ||
| 193 | type->type_name); | ||
| 194 | |||
| 195 | ubsan_epilogue(&flags); | ||
| 196 | } | ||
| 197 | |||
| 198 | void __ubsan_handle_add_overflow(struct overflow_data *data, | ||
| 199 | unsigned long lhs, | ||
| 200 | unsigned long rhs) | ||
| 201 | { | ||
| 202 | |||
| 203 | handle_overflow(data, lhs, rhs, '+'); | ||
| 204 | } | ||
| 205 | EXPORT_SYMBOL(__ubsan_handle_add_overflow); | ||
| 206 | |||
| 207 | void __ubsan_handle_sub_overflow(struct overflow_data *data, | ||
| 208 | unsigned long lhs, | ||
| 209 | unsigned long rhs) | ||
| 210 | { | ||
| 211 | handle_overflow(data, lhs, rhs, '-'); | ||
| 212 | } | ||
| 213 | EXPORT_SYMBOL(__ubsan_handle_sub_overflow); | ||
| 214 | |||
| 215 | void __ubsan_handle_mul_overflow(struct overflow_data *data, | ||
| 216 | unsigned long lhs, | ||
| 217 | unsigned long rhs) | ||
| 218 | { | ||
| 219 | handle_overflow(data, lhs, rhs, '*'); | ||
| 220 | } | ||
| 221 | EXPORT_SYMBOL(__ubsan_handle_mul_overflow); | ||
| 222 | |||
| 223 | void __ubsan_handle_negate_overflow(struct overflow_data *data, | ||
| 224 | unsigned long old_val) | ||
| 225 | { | ||
| 226 | unsigned long flags; | ||
| 227 | char old_val_str[VALUE_LENGTH]; | ||
| 228 | |||
| 229 | if (suppress_report(&data->location)) | ||
| 230 | return; | ||
| 231 | |||
| 232 | ubsan_prologue(&data->location, &flags); | ||
| 233 | |||
| 234 | val_to_string(old_val_str, sizeof(old_val_str), data->type, old_val); | ||
| 235 | |||
| 236 | pr_err("negation of %s cannot be represented in type %s:\n", | ||
| 237 | old_val_str, data->type->type_name); | ||
| 238 | |||
| 239 | ubsan_epilogue(&flags); | ||
| 240 | } | ||
| 241 | EXPORT_SYMBOL(__ubsan_handle_negate_overflow); | ||
| 242 | |||
| 243 | |||
| 244 | void __ubsan_handle_divrem_overflow(struct overflow_data *data, | ||
| 245 | unsigned long lhs, | ||
| 246 | unsigned long rhs) | ||
| 247 | { | ||
| 248 | unsigned long flags; | ||
| 249 | char rhs_val_str[VALUE_LENGTH]; | ||
| 250 | |||
| 251 | if (suppress_report(&data->location)) | ||
| 252 | return; | ||
| 253 | |||
| 254 | ubsan_prologue(&data->location, &flags); | ||
| 255 | |||
| 256 | val_to_string(rhs_val_str, sizeof(rhs_val_str), data->type, rhs); | ||
| 257 | |||
| 258 | if (type_is_signed(data->type) && get_signed_val(data->type, rhs) == -1) | ||
| 259 | pr_err("division of %s by -1 cannot be represented in type %s\n", | ||
| 260 | rhs_val_str, data->type->type_name); | ||
| 261 | else | ||
| 262 | pr_err("division by zero\n"); | ||
| 263 | |||
| 264 | ubsan_epilogue(&flags); | ||
| 265 | } | ||
| 266 | EXPORT_SYMBOL(__ubsan_handle_divrem_overflow); | ||
| 267 | |||
| 268 | static void handle_null_ptr_deref(struct type_mismatch_data *data) | ||
| 269 | { | ||
| 270 | unsigned long flags; | ||
| 271 | |||
| 272 | if (suppress_report(&data->location)) | ||
| 273 | return; | ||
| 274 | |||
| 275 | ubsan_prologue(&data->location, &flags); | ||
| 276 | |||
| 277 | pr_err("%s null pointer of type %s\n", | ||
| 278 | type_check_kinds[data->type_check_kind], | ||
| 279 | data->type->type_name); | ||
| 280 | |||
| 281 | ubsan_epilogue(&flags); | ||
| 282 | } | ||
| 283 | |||
| 284 | static void handle_missaligned_access(struct type_mismatch_data *data, | ||
| 285 | unsigned long ptr) | ||
| 286 | { | ||
| 287 | unsigned long flags; | ||
| 288 | |||
| 289 | if (suppress_report(&data->location)) | ||
| 290 | return; | ||
| 291 | |||
| 292 | ubsan_prologue(&data->location, &flags); | ||
| 293 | |||
| 294 | pr_err("%s misaligned address %p for type %s\n", | ||
| 295 | type_check_kinds[data->type_check_kind], | ||
| 296 | (void *)ptr, data->type->type_name); | ||
| 297 | pr_err("which requires %ld byte alignment\n", data->alignment); | ||
| 298 | |||
| 299 | ubsan_epilogue(&flags); | ||
| 300 | } | ||
| 301 | |||
| 302 | static void handle_object_size_mismatch(struct type_mismatch_data *data, | ||
| 303 | unsigned long ptr) | ||
| 304 | { | ||
| 305 | unsigned long flags; | ||
| 306 | |||
| 307 | if (suppress_report(&data->location)) | ||
| 308 | return; | ||
| 309 | |||
| 310 | ubsan_prologue(&data->location, &flags); | ||
| 311 | pr_err("%s address %pk with insufficient space\n", | ||
| 312 | type_check_kinds[data->type_check_kind], | ||
| 313 | (void *) ptr); | ||
| 314 | pr_err("for an object of type %s\n", data->type->type_name); | ||
| 315 | ubsan_epilogue(&flags); | ||
| 316 | } | ||
| 317 | |||
| 318 | void __ubsan_handle_type_mismatch(struct type_mismatch_data *data, | ||
| 319 | unsigned long ptr) | ||
| 320 | { | ||
| 321 | |||
| 322 | if (!ptr) | ||
| 323 | handle_null_ptr_deref(data); | ||
| 324 | else if (data->alignment && !IS_ALIGNED(ptr, data->alignment)) | ||
| 325 | handle_missaligned_access(data, ptr); | ||
| 326 | else | ||
| 327 | handle_object_size_mismatch(data, ptr); | ||
| 328 | } | ||
| 329 | EXPORT_SYMBOL(__ubsan_handle_type_mismatch); | ||
| 330 | |||
| 331 | void __ubsan_handle_nonnull_return(struct nonnull_return_data *data) | ||
| 332 | { | ||
| 333 | unsigned long flags; | ||
| 334 | |||
| 335 | if (suppress_report(&data->location)) | ||
| 336 | return; | ||
| 337 | |||
| 338 | ubsan_prologue(&data->location, &flags); | ||
| 339 | |||
| 340 | pr_err("null pointer returned from function declared to never return null\n"); | ||
| 341 | |||
| 342 | if (location_is_valid(&data->attr_location)) | ||
| 343 | print_source_location("returns_nonnull attribute specified in", | ||
| 344 | &data->attr_location); | ||
| 345 | |||
| 346 | ubsan_epilogue(&flags); | ||
| 347 | } | ||
| 348 | EXPORT_SYMBOL(__ubsan_handle_nonnull_return); | ||
| 349 | |||
| 350 | void __ubsan_handle_vla_bound_not_positive(struct vla_bound_data *data, | ||
| 351 | unsigned long bound) | ||
| 352 | { | ||
| 353 | unsigned long flags; | ||
| 354 | char bound_str[VALUE_LENGTH]; | ||
| 355 | |||
| 356 | if (suppress_report(&data->location)) | ||
| 357 | return; | ||
| 358 | |||
| 359 | ubsan_prologue(&data->location, &flags); | ||
| 360 | |||
| 361 | val_to_string(bound_str, sizeof(bound_str), data->type, bound); | ||
| 362 | pr_err("variable length array bound value %s <= 0\n", bound_str); | ||
| 363 | |||
| 364 | ubsan_epilogue(&flags); | ||
| 365 | } | ||
| 366 | EXPORT_SYMBOL(__ubsan_handle_vla_bound_not_positive); | ||
| 367 | |||
| 368 | void __ubsan_handle_out_of_bounds(struct out_of_bounds_data *data, | ||
| 369 | unsigned long index) | ||
| 370 | { | ||
| 371 | unsigned long flags; | ||
| 372 | char index_str[VALUE_LENGTH]; | ||
| 373 | |||
| 374 | if (suppress_report(&data->location)) | ||
| 375 | return; | ||
| 376 | |||
| 377 | ubsan_prologue(&data->location, &flags); | ||
| 378 | |||
| 379 | val_to_string(index_str, sizeof(index_str), data->index_type, index); | ||
| 380 | pr_err("index %s is out of range for type %s\n", index_str, | ||
| 381 | data->array_type->type_name); | ||
| 382 | ubsan_epilogue(&flags); | ||
| 383 | } | ||
| 384 | EXPORT_SYMBOL(__ubsan_handle_out_of_bounds); | ||
| 385 | |||
| 386 | void __ubsan_handle_shift_out_of_bounds(struct shift_out_of_bounds_data *data, | ||
| 387 | unsigned long lhs, unsigned long rhs) | ||
| 388 | { | ||
| 389 | unsigned long flags; | ||
| 390 | struct type_descriptor *rhs_type = data->rhs_type; | ||
| 391 | struct type_descriptor *lhs_type = data->lhs_type; | ||
| 392 | char rhs_str[VALUE_LENGTH]; | ||
| 393 | char lhs_str[VALUE_LENGTH]; | ||
| 394 | |||
| 395 | if (suppress_report(&data->location)) | ||
| 396 | return; | ||
| 397 | |||
| 398 | ubsan_prologue(&data->location, &flags); | ||
| 399 | |||
| 400 | val_to_string(rhs_str, sizeof(rhs_str), rhs_type, rhs); | ||
| 401 | val_to_string(lhs_str, sizeof(lhs_str), lhs_type, lhs); | ||
| 402 | |||
| 403 | if (val_is_negative(rhs_type, rhs)) | ||
| 404 | pr_err("shift exponent %s is negative\n", rhs_str); | ||
| 405 | |||
| 406 | else if (get_unsigned_val(rhs_type, rhs) >= | ||
| 407 | type_bit_width(lhs_type)) | ||
| 408 | pr_err("shift exponent %s is too large for %u-bit type %s\n", | ||
| 409 | rhs_str, | ||
| 410 | type_bit_width(lhs_type), | ||
| 411 | lhs_type->type_name); | ||
| 412 | else if (val_is_negative(lhs_type, lhs)) | ||
| 413 | pr_err("left shift of negative value %s\n", | ||
| 414 | lhs_str); | ||
| 415 | else | ||
| 416 | pr_err("left shift of %s by %s places cannot be" | ||
| 417 | " represented in type %s\n", | ||
| 418 | lhs_str, rhs_str, | ||
| 419 | lhs_type->type_name); | ||
| 420 | |||
| 421 | ubsan_epilogue(&flags); | ||
| 422 | } | ||
| 423 | EXPORT_SYMBOL(__ubsan_handle_shift_out_of_bounds); | ||
| 424 | |||
| 425 | |||
| 426 | void __noreturn | ||
| 427 | __ubsan_handle_builtin_unreachable(struct unreachable_data *data) | ||
| 428 | { | ||
| 429 | unsigned long flags; | ||
| 430 | |||
| 431 | ubsan_prologue(&data->location, &flags); | ||
| 432 | pr_err("calling __builtin_unreachable()\n"); | ||
| 433 | ubsan_epilogue(&flags); | ||
| 434 | panic("can't return from __builtin_unreachable()"); | ||
| 435 | } | ||
| 436 | EXPORT_SYMBOL(__ubsan_handle_builtin_unreachable); | ||
| 437 | |||
| 438 | void __ubsan_handle_load_invalid_value(struct invalid_value_data *data, | ||
| 439 | unsigned long val) | ||
| 440 | { | ||
| 441 | unsigned long flags; | ||
| 442 | char val_str[VALUE_LENGTH]; | ||
| 443 | |||
| 444 | if (suppress_report(&data->location)) | ||
| 445 | return; | ||
| 446 | |||
| 447 | ubsan_prologue(&data->location, &flags); | ||
| 448 | |||
| 449 | val_to_string(val_str, sizeof(val_str), data->type, val); | ||
| 450 | |||
| 451 | pr_err("load of value %s is not a valid value for type %s\n", | ||
| 452 | val_str, data->type->type_name); | ||
| 453 | |||
| 454 | ubsan_epilogue(&flags); | ||
| 455 | } | ||
| 456 | EXPORT_SYMBOL(__ubsan_handle_load_invalid_value); | ||
diff --git a/lib/ubsan.h b/lib/ubsan.h new file mode 100644 index 000000000000..b2d18d4a53f5 --- /dev/null +++ b/lib/ubsan.h | |||
| @@ -0,0 +1,84 @@ | |||
| 1 | #ifndef _LIB_UBSAN_H | ||
| 2 | #define _LIB_UBSAN_H | ||
| 3 | |||
| 4 | enum { | ||
| 5 | type_kind_int = 0, | ||
| 6 | type_kind_float = 1, | ||
| 7 | type_unknown = 0xffff | ||
| 8 | }; | ||
| 9 | |||
| 10 | struct type_descriptor { | ||
| 11 | u16 type_kind; | ||
| 12 | u16 type_info; | ||
| 13 | char type_name[1]; | ||
| 14 | }; | ||
| 15 | |||
| 16 | struct source_location { | ||
| 17 | const char *file_name; | ||
| 18 | union { | ||
| 19 | unsigned long reported; | ||
| 20 | struct { | ||
| 21 | u32 line; | ||
| 22 | u32 column; | ||
| 23 | }; | ||
| 24 | }; | ||
| 25 | }; | ||
| 26 | |||
| 27 | struct overflow_data { | ||
| 28 | struct source_location location; | ||
| 29 | struct type_descriptor *type; | ||
| 30 | }; | ||
| 31 | |||
| 32 | struct type_mismatch_data { | ||
| 33 | struct source_location location; | ||
| 34 | struct type_descriptor *type; | ||
| 35 | unsigned long alignment; | ||
| 36 | unsigned char type_check_kind; | ||
| 37 | }; | ||
| 38 | |||
| 39 | struct nonnull_arg_data { | ||
| 40 | struct source_location location; | ||
| 41 | struct source_location attr_location; | ||
| 42 | int arg_index; | ||
| 43 | }; | ||
| 44 | |||
| 45 | struct nonnull_return_data { | ||
| 46 | struct source_location location; | ||
| 47 | struct source_location attr_location; | ||
| 48 | }; | ||
| 49 | |||
| 50 | struct vla_bound_data { | ||
| 51 | struct source_location location; | ||
| 52 | struct type_descriptor *type; | ||
| 53 | }; | ||
| 54 | |||
| 55 | struct out_of_bounds_data { | ||
| 56 | struct source_location location; | ||
| 57 | struct type_descriptor *array_type; | ||
| 58 | struct type_descriptor *index_type; | ||
| 59 | }; | ||
| 60 | |||
| 61 | struct shift_out_of_bounds_data { | ||
| 62 | struct source_location location; | ||
| 63 | struct type_descriptor *lhs_type; | ||
| 64 | struct type_descriptor *rhs_type; | ||
| 65 | }; | ||
| 66 | |||
| 67 | struct unreachable_data { | ||
| 68 | struct source_location location; | ||
| 69 | }; | ||
| 70 | |||
| 71 | struct invalid_value_data { | ||
| 72 | struct source_location location; | ||
| 73 | struct type_descriptor *type; | ||
| 74 | }; | ||
| 75 | |||
| 76 | #if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__) | ||
| 77 | typedef __int128 s_max; | ||
| 78 | typedef unsigned __int128 u_max; | ||
| 79 | #else | ||
| 80 | typedef s64 s_max; | ||
| 81 | typedef u64 u_max; | ||
| 82 | #endif | ||
| 83 | |||
| 84 | #endif | ||
diff --git a/lib/vsprintf.c b/lib/vsprintf.c index f9cee8e1233c..48ff9c36644d 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c | |||
| @@ -31,6 +31,9 @@ | |||
| 31 | #include <linux/dcache.h> | 31 | #include <linux/dcache.h> |
| 32 | #include <linux/cred.h> | 32 | #include <linux/cred.h> |
| 33 | #include <net/addrconf.h> | 33 | #include <net/addrconf.h> |
| 34 | #ifdef CONFIG_BLOCK | ||
| 35 | #include <linux/blkdev.h> | ||
| 36 | #endif | ||
| 34 | 37 | ||
| 35 | #include <asm/page.h> /* for PAGE_SIZE */ | 38 | #include <asm/page.h> /* for PAGE_SIZE */ |
| 36 | #include <asm/sections.h> /* for dereference_function_descriptor() */ | 39 | #include <asm/sections.h> /* for dereference_function_descriptor() */ |
| @@ -380,13 +383,14 @@ enum format_type { | |||
| 380 | }; | 383 | }; |
| 381 | 384 | ||
| 382 | struct printf_spec { | 385 | struct printf_spec { |
| 383 | u8 type; /* format_type enum */ | 386 | unsigned int type:8; /* format_type enum */ |
| 384 | u8 flags; /* flags to number() */ | 387 | signed int field_width:24; /* width of output field */ |
| 385 | u8 base; /* number base, 8, 10 or 16 only */ | 388 | unsigned int flags:8; /* flags to number() */ |
| 386 | u8 qualifier; /* number qualifier, one of 'hHlLtzZ' */ | 389 | unsigned int base:8; /* number base, 8, 10 or 16 only */ |
| 387 | s16 field_width; /* width of output field */ | 390 | signed int precision:16; /* # of digits/chars */ |
| 388 | s16 precision; /* # of digits/chars */ | 391 | } __packed; |
| 389 | }; | 392 | #define FIELD_WIDTH_MAX ((1 << 23) - 1) |
| 393 | #define PRECISION_MAX ((1 << 15) - 1) | ||
| 390 | 394 | ||
| 391 | static noinline_for_stack | 395 | static noinline_for_stack |
| 392 | char *number(char *buf, char *end, unsigned long long num, | 396 | char *number(char *buf, char *end, unsigned long long num, |
| @@ -399,6 +403,10 @@ char *number(char *buf, char *end, unsigned long long num, | |||
| 399 | int need_pfx = ((spec.flags & SPECIAL) && spec.base != 10); | 403 | int need_pfx = ((spec.flags & SPECIAL) && spec.base != 10); |
| 400 | int i; | 404 | int i; |
| 401 | bool is_zero = num == 0LL; | 405 | bool is_zero = num == 0LL; |
| 406 | int field_width = spec.field_width; | ||
| 407 | int precision = spec.precision; | ||
| 408 | |||
| 409 | BUILD_BUG_ON(sizeof(struct printf_spec) != 8); | ||
| 402 | 410 | ||
| 403 | /* locase = 0 or 0x20. ORing digits or letters with 'locase' | 411 | /* locase = 0 or 0x20. ORing digits or letters with 'locase' |
| 404 | * produces same digits or (maybe lowercased) letters */ | 412 | * produces same digits or (maybe lowercased) letters */ |
| @@ -410,20 +418,20 @@ char *number(char *buf, char *end, unsigned long long num, | |||
| 410 | if ((signed long long)num < 0) { | 418 | if ((signed long long)num < 0) { |
| 411 | sign = '-'; | 419 | sign = '-'; |
| 412 | num = -(signed long long)num; | 420 | num = -(signed long long)num; |
| 413 | spec.field_width--; | 421 | field_width--; |
| 414 | } else if (spec.flags & PLUS) { | 422 | } else if (spec.flags & PLUS) { |
| 415 | sign = '+'; | 423 | sign = '+'; |
| 416 | spec.field_width--; | 424 | field_width--; |
| 417 | } else if (spec.flags & SPACE) { | 425 | } else if (spec.flags & SPACE) { |
| 418 | sign = ' '; | 426 | sign = ' '; |
| 419 | spec.field_width--; | 427 | field_width--; |
| 420 | } | 428 | } |
| 421 | } | 429 | } |
| 422 | if (need_pfx) { | 430 | if (need_pfx) { |
| 423 | if (spec.base == 16) | 431 | if (spec.base == 16) |
| 424 | spec.field_width -= 2; | 432 | field_width -= 2; |
| 425 | else if (!is_zero) | 433 | else if (!is_zero) |
| 426 | spec.field_width--; | 434 | field_width--; |
| 427 | } | 435 | } |
| 428 | 436 | ||
| 429 | /* generate full string in tmp[], in reverse order */ | 437 | /* generate full string in tmp[], in reverse order */ |
| @@ -445,12 +453,12 @@ char *number(char *buf, char *end, unsigned long long num, | |||
| 445 | } | 453 | } |
| 446 | 454 | ||
| 447 | /* printing 100 using %2d gives "100", not "00" */ | 455 | /* printing 100 using %2d gives "100", not "00" */ |
| 448 | if (i > spec.precision) | 456 | if (i > precision) |
| 449 | spec.precision = i; | 457 | precision = i; |
| 450 | /* leading space padding */ | 458 | /* leading space padding */ |
| 451 | spec.field_width -= spec.precision; | 459 | field_width -= precision; |
| 452 | if (!(spec.flags & (ZEROPAD | LEFT))) { | 460 | if (!(spec.flags & (ZEROPAD | LEFT))) { |
| 453 | while (--spec.field_width >= 0) { | 461 | while (--field_width >= 0) { |
| 454 | if (buf < end) | 462 | if (buf < end) |
| 455 | *buf = ' '; | 463 | *buf = ' '; |
| 456 | ++buf; | 464 | ++buf; |
| @@ -479,14 +487,14 @@ char *number(char *buf, char *end, unsigned long long num, | |||
| 479 | if (!(spec.flags & LEFT)) { | 487 | if (!(spec.flags & LEFT)) { |
| 480 | char c = ' ' + (spec.flags & ZEROPAD); | 488 | char c = ' ' + (spec.flags & ZEROPAD); |
| 481 | BUILD_BUG_ON(' ' + ZEROPAD != '0'); | 489 | BUILD_BUG_ON(' ' + ZEROPAD != '0'); |
| 482 | while (--spec.field_width >= 0) { | 490 | while (--field_width >= 0) { |
| 483 | if (buf < end) | 491 | if (buf < end) |
| 484 | *buf = c; | 492 | *buf = c; |
| 485 | ++buf; | 493 | ++buf; |
| 486 | } | 494 | } |
| 487 | } | 495 | } |
| 488 | /* hmm even more zero padding? */ | 496 | /* hmm even more zero padding? */ |
| 489 | while (i <= --spec.precision) { | 497 | while (i <= --precision) { |
| 490 | if (buf < end) | 498 | if (buf < end) |
| 491 | *buf = '0'; | 499 | *buf = '0'; |
| 492 | ++buf; | 500 | ++buf; |
| @@ -498,7 +506,7 @@ char *number(char *buf, char *end, unsigned long long num, | |||
| 498 | ++buf; | 506 | ++buf; |
| 499 | } | 507 | } |
| 500 | /* trailing space padding */ | 508 | /* trailing space padding */ |
| 501 | while (--spec.field_width >= 0) { | 509 | while (--field_width >= 0) { |
| 502 | if (buf < end) | 510 | if (buf < end) |
| 503 | *buf = ' '; | 511 | *buf = ' '; |
| 504 | ++buf; | 512 | ++buf; |
| @@ -508,37 +516,20 @@ char *number(char *buf, char *end, unsigned long long num, | |||
| 508 | } | 516 | } |
| 509 | 517 | ||
| 510 | static noinline_for_stack | 518 | static noinline_for_stack |
| 511 | char *string(char *buf, char *end, const char *s, struct printf_spec spec) | 519 | char *special_hex_number(char *buf, char *end, unsigned long long num, int size) |
| 512 | { | 520 | { |
| 513 | int len, i; | 521 | struct printf_spec spec; |
| 514 | |||
| 515 | if ((unsigned long)s < PAGE_SIZE) | ||
| 516 | s = "(null)"; | ||
| 517 | |||
| 518 | len = strnlen(s, spec.precision); | ||
| 519 | 522 | ||
| 520 | if (!(spec.flags & LEFT)) { | 523 | spec.type = FORMAT_TYPE_PTR; |
| 521 | while (len < spec.field_width--) { | 524 | spec.field_width = 2 + 2 * size; /* 0x + hex */ |
| 522 | if (buf < end) | 525 | spec.flags = SPECIAL | SMALL | ZEROPAD; |
| 523 | *buf = ' '; | 526 | spec.base = 16; |
| 524 | ++buf; | 527 | spec.precision = -1; |
| 525 | } | ||
| 526 | } | ||
| 527 | for (i = 0; i < len; ++i) { | ||
| 528 | if (buf < end) | ||
| 529 | *buf = *s; | ||
| 530 | ++buf; ++s; | ||
| 531 | } | ||
| 532 | while (len < spec.field_width--) { | ||
| 533 | if (buf < end) | ||
| 534 | *buf = ' '; | ||
| 535 | ++buf; | ||
| 536 | } | ||
| 537 | 528 | ||
| 538 | return buf; | 529 | return number(buf, end, num, spec); |
| 539 | } | 530 | } |
| 540 | 531 | ||
| 541 | static void widen(char *buf, char *end, unsigned len, unsigned spaces) | 532 | static void move_right(char *buf, char *end, unsigned len, unsigned spaces) |
| 542 | { | 533 | { |
| 543 | size_t size; | 534 | size_t size; |
| 544 | if (buf >= end) /* nowhere to put anything */ | 535 | if (buf >= end) /* nowhere to put anything */ |
| @@ -556,6 +547,56 @@ static void widen(char *buf, char *end, unsigned len, unsigned spaces) | |||
| 556 | memset(buf, ' ', spaces); | 547 | memset(buf, ' ', spaces); |
| 557 | } | 548 | } |
| 558 | 549 | ||
| 550 | /* | ||
| 551 | * Handle field width padding for a string. | ||
| 552 | * @buf: current buffer position | ||
| 553 | * @n: length of string | ||
| 554 | * @end: end of output buffer | ||
| 555 | * @spec: for field width and flags | ||
| 556 | * Returns: new buffer position after padding. | ||
| 557 | */ | ||
| 558 | static noinline_for_stack | ||
| 559 | char *widen_string(char *buf, int n, char *end, struct printf_spec spec) | ||
| 560 | { | ||
| 561 | unsigned spaces; | ||
| 562 | |||
| 563 | if (likely(n >= spec.field_width)) | ||
| 564 | return buf; | ||
| 565 | /* we want to pad the sucker */ | ||
| 566 | spaces = spec.field_width - n; | ||
| 567 | if (!(spec.flags & LEFT)) { | ||
| 568 | move_right(buf - n, end, n, spaces); | ||
| 569 | return buf + spaces; | ||
| 570 | } | ||
| 571 | while (spaces--) { | ||
| 572 | if (buf < end) | ||
| 573 | *buf = ' '; | ||
| 574 | ++buf; | ||
| 575 | } | ||
| 576 | return buf; | ||
| 577 | } | ||
| 578 | |||
| 579 | static noinline_for_stack | ||
| 580 | char *string(char *buf, char *end, const char *s, struct printf_spec spec) | ||
| 581 | { | ||
| 582 | int len = 0; | ||
| 583 | size_t lim = spec.precision; | ||
| 584 | |||
| 585 | if ((unsigned long)s < PAGE_SIZE) | ||
| 586 | s = "(null)"; | ||
| 587 | |||
| 588 | while (lim--) { | ||
| 589 | char c = *s++; | ||
| 590 | if (!c) | ||
| 591 | break; | ||
| 592 | if (buf < end) | ||
| 593 | *buf = c; | ||
| 594 | ++buf; | ||
| 595 | ++len; | ||
| 596 | } | ||
| 597 | return widen_string(buf, len, end, spec); | ||
| 598 | } | ||
| 599 | |||
| 559 | static noinline_for_stack | 600 | static noinline_for_stack |
| 560 | char *dentry_name(char *buf, char *end, const struct dentry *d, struct printf_spec spec, | 601 | char *dentry_name(char *buf, char *end, const struct dentry *d, struct printf_spec spec, |
| 561 | const char *fmt) | 602 | const char *fmt) |
| @@ -597,21 +638,28 @@ char *dentry_name(char *buf, char *end, const struct dentry *d, struct printf_sp | |||
| 597 | *buf = c; | 638 | *buf = c; |
| 598 | } | 639 | } |
| 599 | rcu_read_unlock(); | 640 | rcu_read_unlock(); |
| 600 | if (n < spec.field_width) { | 641 | return widen_string(buf, n, end, spec); |
| 601 | /* we want to pad the sucker */ | 642 | } |
| 602 | unsigned spaces = spec.field_width - n; | 643 | |
| 603 | if (!(spec.flags & LEFT)) { | 644 | #ifdef CONFIG_BLOCK |
| 604 | widen(buf - n, end, n, spaces); | 645 | static noinline_for_stack |
| 605 | return buf + spaces; | 646 | char *bdev_name(char *buf, char *end, struct block_device *bdev, |
| 606 | } | 647 | struct printf_spec spec, const char *fmt) |
| 607 | while (spaces--) { | 648 | { |
| 649 | struct gendisk *hd = bdev->bd_disk; | ||
| 650 | |||
| 651 | buf = string(buf, end, hd->disk_name, spec); | ||
| 652 | if (bdev->bd_part->partno) { | ||
| 653 | if (isdigit(hd->disk_name[strlen(hd->disk_name)-1])) { | ||
| 608 | if (buf < end) | 654 | if (buf < end) |
| 609 | *buf = ' '; | 655 | *buf = 'p'; |
| 610 | ++buf; | 656 | buf++; |
| 611 | } | 657 | } |
| 658 | buf = number(buf, end, bdev->bd_part->partno, spec); | ||
| 612 | } | 659 | } |
| 613 | return buf; | 660 | return buf; |
| 614 | } | 661 | } |
| 662 | #endif | ||
| 615 | 663 | ||
| 616 | static noinline_for_stack | 664 | static noinline_for_stack |
| 617 | char *symbol_string(char *buf, char *end, void *ptr, | 665 | char *symbol_string(char *buf, char *end, void *ptr, |
| @@ -636,11 +684,7 @@ char *symbol_string(char *buf, char *end, void *ptr, | |||
| 636 | 684 | ||
| 637 | return string(buf, end, sym, spec); | 685 | return string(buf, end, sym, spec); |
| 638 | #else | 686 | #else |
| 639 | spec.field_width = 2 * sizeof(void *); | 687 | return special_hex_number(buf, end, value, sizeof(void *)); |
| 640 | spec.flags |= SPECIAL | SMALL | ZEROPAD; | ||
| 641 | spec.base = 16; | ||
| 642 | |||
| 643 | return number(buf, end, value, spec); | ||
| 644 | #endif | 688 | #endif |
| 645 | } | 689 | } |
| 646 | 690 | ||
| @@ -1301,40 +1345,45 @@ char *uuid_string(char *buf, char *end, const u8 *addr, | |||
| 1301 | return string(buf, end, uuid, spec); | 1345 | return string(buf, end, uuid, spec); |
| 1302 | } | 1346 | } |
| 1303 | 1347 | ||
| 1304 | static | 1348 | static noinline_for_stack |
| 1305 | char *netdev_feature_string(char *buf, char *end, const u8 *addr, | 1349 | char *netdev_bits(char *buf, char *end, const void *addr, const char *fmt) |
| 1306 | struct printf_spec spec) | ||
| 1307 | { | 1350 | { |
| 1308 | spec.flags |= SPECIAL | SMALL | ZEROPAD; | 1351 | unsigned long long num; |
| 1309 | if (spec.field_width == -1) | 1352 | int size; |
| 1310 | spec.field_width = 2 + 2 * sizeof(netdev_features_t); | ||
| 1311 | spec.base = 16; | ||
| 1312 | 1353 | ||
| 1313 | return number(buf, end, *(const netdev_features_t *)addr, spec); | 1354 | switch (fmt[1]) { |
| 1355 | case 'F': | ||
| 1356 | num = *(const netdev_features_t *)addr; | ||
| 1357 | size = sizeof(netdev_features_t); | ||
| 1358 | break; | ||
| 1359 | default: | ||
| 1360 | num = (unsigned long)addr; | ||
| 1361 | size = sizeof(unsigned long); | ||
| 1362 | break; | ||
| 1363 | } | ||
| 1364 | |||
| 1365 | return special_hex_number(buf, end, num, size); | ||
| 1314 | } | 1366 | } |
| 1315 | 1367 | ||
| 1316 | static noinline_for_stack | 1368 | static noinline_for_stack |
| 1317 | char *address_val(char *buf, char *end, const void *addr, | 1369 | char *address_val(char *buf, char *end, const void *addr, const char *fmt) |
| 1318 | struct printf_spec spec, const char *fmt) | ||
| 1319 | { | 1370 | { |
| 1320 | unsigned long long num; | 1371 | unsigned long long num; |
| 1321 | 1372 | int size; | |
| 1322 | spec.flags |= SPECIAL | SMALL | ZEROPAD; | ||
| 1323 | spec.base = 16; | ||
| 1324 | 1373 | ||
| 1325 | switch (fmt[1]) { | 1374 | switch (fmt[1]) { |
| 1326 | case 'd': | 1375 | case 'd': |
| 1327 | num = *(const dma_addr_t *)addr; | 1376 | num = *(const dma_addr_t *)addr; |
| 1328 | spec.field_width = sizeof(dma_addr_t) * 2 + 2; | 1377 | size = sizeof(dma_addr_t); |
| 1329 | break; | 1378 | break; |
| 1330 | case 'p': | 1379 | case 'p': |
| 1331 | default: | 1380 | default: |
| 1332 | num = *(const phys_addr_t *)addr; | 1381 | num = *(const phys_addr_t *)addr; |
| 1333 | spec.field_width = sizeof(phys_addr_t) * 2 + 2; | 1382 | size = sizeof(phys_addr_t); |
| 1334 | break; | 1383 | break; |
| 1335 | } | 1384 | } |
| 1336 | 1385 | ||
| 1337 | return number(buf, end, num, spec); | 1386 | return special_hex_number(buf, end, num, size); |
| 1338 | } | 1387 | } |
| 1339 | 1388 | ||
| 1340 | static noinline_for_stack | 1389 | static noinline_for_stack |
| @@ -1353,10 +1402,7 @@ char *clock(char *buf, char *end, struct clk *clk, struct printf_spec spec, | |||
| 1353 | #ifdef CONFIG_COMMON_CLK | 1402 | #ifdef CONFIG_COMMON_CLK |
| 1354 | return string(buf, end, __clk_get_name(clk), spec); | 1403 | return string(buf, end, __clk_get_name(clk), spec); |
| 1355 | #else | 1404 | #else |
| 1356 | spec.base = 16; | 1405 | return special_hex_number(buf, end, (unsigned long)clk, sizeof(unsigned long)); |
| 1357 | spec.field_width = sizeof(unsigned long) * 2 + 2; | ||
| 1358 | spec.flags |= SPECIAL | SMALL | ZEROPAD; | ||
| 1359 | return number(buf, end, (unsigned long)clk, spec); | ||
| 1360 | #endif | 1406 | #endif |
| 1361 | } | 1407 | } |
| 1362 | } | 1408 | } |
| @@ -1443,6 +1489,7 @@ int kptr_restrict __read_mostly; | |||
| 1443 | * (default assumed to be phys_addr_t, passed by reference) | 1489 | * (default assumed to be phys_addr_t, passed by reference) |
| 1444 | * - 'd[234]' For a dentry name (optionally 2-4 last components) | 1490 | * - 'd[234]' For a dentry name (optionally 2-4 last components) |
| 1445 | * - 'D[234]' Same as 'd' but for a struct file | 1491 | * - 'D[234]' Same as 'd' but for a struct file |
| 1492 | * - 'g' For block_device name (gendisk + partition number) | ||
| 1446 | * - 'C' For a clock, it prints the name (Common Clock Framework) or address | 1493 | * - 'C' For a clock, it prints the name (Common Clock Framework) or address |
| 1447 | * (legacy clock framework) of the clock | 1494 | * (legacy clock framework) of the clock |
| 1448 | * - 'Cn' For a clock, it prints the name (Common Clock Framework) or address | 1495 | * - 'Cn' For a clock, it prints the name (Common Clock Framework) or address |
| @@ -1585,13 +1632,9 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr, | |||
| 1585 | break; | 1632 | break; |
| 1586 | 1633 | ||
| 1587 | case 'N': | 1634 | case 'N': |
| 1588 | switch (fmt[1]) { | 1635 | return netdev_bits(buf, end, ptr, fmt); |
| 1589 | case 'F': | ||
| 1590 | return netdev_feature_string(buf, end, ptr, spec); | ||
| 1591 | } | ||
| 1592 | break; | ||
| 1593 | case 'a': | 1636 | case 'a': |
| 1594 | return address_val(buf, end, ptr, spec, fmt); | 1637 | return address_val(buf, end, ptr, fmt); |
| 1595 | case 'd': | 1638 | case 'd': |
| 1596 | return dentry_name(buf, end, ptr, spec, fmt); | 1639 | return dentry_name(buf, end, ptr, spec, fmt); |
| 1597 | case 'C': | 1640 | case 'C': |
| @@ -1600,6 +1643,11 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr, | |||
| 1600 | return dentry_name(buf, end, | 1643 | return dentry_name(buf, end, |
| 1601 | ((const struct file *)ptr)->f_path.dentry, | 1644 | ((const struct file *)ptr)->f_path.dentry, |
| 1602 | spec, fmt); | 1645 | spec, fmt); |
| 1646 | #ifdef CONFIG_BLOCK | ||
| 1647 | case 'g': | ||
| 1648 | return bdev_name(buf, end, ptr, spec, fmt); | ||
| 1649 | #endif | ||
| 1650 | |||
| 1603 | } | 1651 | } |
| 1604 | spec.flags |= SMALL; | 1652 | spec.flags |= SMALL; |
| 1605 | if (spec.field_width == -1) { | 1653 | if (spec.field_width == -1) { |
| @@ -1635,6 +1683,7 @@ static noinline_for_stack | |||
| 1635 | int format_decode(const char *fmt, struct printf_spec *spec) | 1683 | int format_decode(const char *fmt, struct printf_spec *spec) |
| 1636 | { | 1684 | { |
| 1637 | const char *start = fmt; | 1685 | const char *start = fmt; |
| 1686 | char qualifier; | ||
| 1638 | 1687 | ||
| 1639 | /* we finished early by reading the field width */ | 1688 | /* we finished early by reading the field width */ |
| 1640 | if (spec->type == FORMAT_TYPE_WIDTH) { | 1689 | if (spec->type == FORMAT_TYPE_WIDTH) { |
| @@ -1717,16 +1766,16 @@ precision: | |||
| 1717 | 1766 | ||
| 1718 | qualifier: | 1767 | qualifier: |
| 1719 | /* get the conversion qualifier */ | 1768 | /* get the conversion qualifier */ |
| 1720 | spec->qualifier = -1; | 1769 | qualifier = 0; |
| 1721 | if (*fmt == 'h' || _tolower(*fmt) == 'l' || | 1770 | if (*fmt == 'h' || _tolower(*fmt) == 'l' || |
| 1722 | _tolower(*fmt) == 'z' || *fmt == 't') { | 1771 | _tolower(*fmt) == 'z' || *fmt == 't') { |
| 1723 | spec->qualifier = *fmt++; | 1772 | qualifier = *fmt++; |
| 1724 | if (unlikely(spec->qualifier == *fmt)) { | 1773 | if (unlikely(qualifier == *fmt)) { |
| 1725 | if (spec->qualifier == 'l') { | 1774 | if (qualifier == 'l') { |
| 1726 | spec->qualifier = 'L'; | 1775 | qualifier = 'L'; |
| 1727 | ++fmt; | 1776 | ++fmt; |
| 1728 | } else if (spec->qualifier == 'h') { | 1777 | } else if (qualifier == 'h') { |
| 1729 | spec->qualifier = 'H'; | 1778 | qualifier = 'H'; |
| 1730 | ++fmt; | 1779 | ++fmt; |
| 1731 | } | 1780 | } |
| 1732 | } | 1781 | } |
| @@ -1783,19 +1832,19 @@ qualifier: | |||
| 1783 | return fmt - start; | 1832 | return fmt - start; |
| 1784 | } | 1833 | } |
| 1785 | 1834 | ||
| 1786 | if (spec->qualifier == 'L') | 1835 | if (qualifier == 'L') |
| 1787 | spec->type = FORMAT_TYPE_LONG_LONG; | 1836 | spec->type = FORMAT_TYPE_LONG_LONG; |
| 1788 | else if (spec->qualifier == 'l') { | 1837 | else if (qualifier == 'l') { |
| 1789 | BUILD_BUG_ON(FORMAT_TYPE_ULONG + SIGN != FORMAT_TYPE_LONG); | 1838 | BUILD_BUG_ON(FORMAT_TYPE_ULONG + SIGN != FORMAT_TYPE_LONG); |
| 1790 | spec->type = FORMAT_TYPE_ULONG + (spec->flags & SIGN); | 1839 | spec->type = FORMAT_TYPE_ULONG + (spec->flags & SIGN); |
| 1791 | } else if (_tolower(spec->qualifier) == 'z') { | 1840 | } else if (_tolower(qualifier) == 'z') { |
| 1792 | spec->type = FORMAT_TYPE_SIZE_T; | 1841 | spec->type = FORMAT_TYPE_SIZE_T; |
| 1793 | } else if (spec->qualifier == 't') { | 1842 | } else if (qualifier == 't') { |
| 1794 | spec->type = FORMAT_TYPE_PTRDIFF; | 1843 | spec->type = FORMAT_TYPE_PTRDIFF; |
| 1795 | } else if (spec->qualifier == 'H') { | 1844 | } else if (qualifier == 'H') { |
| 1796 | BUILD_BUG_ON(FORMAT_TYPE_UBYTE + SIGN != FORMAT_TYPE_BYTE); | 1845 | BUILD_BUG_ON(FORMAT_TYPE_UBYTE + SIGN != FORMAT_TYPE_BYTE); |
| 1797 | spec->type = FORMAT_TYPE_UBYTE + (spec->flags & SIGN); | 1846 | spec->type = FORMAT_TYPE_UBYTE + (spec->flags & SIGN); |
| 1798 | } else if (spec->qualifier == 'h') { | 1847 | } else if (qualifier == 'h') { |
| 1799 | BUILD_BUG_ON(FORMAT_TYPE_USHORT + SIGN != FORMAT_TYPE_SHORT); | 1848 | BUILD_BUG_ON(FORMAT_TYPE_USHORT + SIGN != FORMAT_TYPE_SHORT); |
| 1800 | spec->type = FORMAT_TYPE_USHORT + (spec->flags & SIGN); | 1849 | spec->type = FORMAT_TYPE_USHORT + (spec->flags & SIGN); |
| 1801 | } else { | 1850 | } else { |
| @@ -1806,6 +1855,24 @@ qualifier: | |||
| 1806 | return ++fmt - start; | 1855 | return ++fmt - start; |
| 1807 | } | 1856 | } |
| 1808 | 1857 | ||
| 1858 | static void | ||
| 1859 | set_field_width(struct printf_spec *spec, int width) | ||
| 1860 | { | ||
| 1861 | spec->field_width = width; | ||
| 1862 | if (WARN_ONCE(spec->field_width != width, "field width %d too large", width)) { | ||
| 1863 | spec->field_width = clamp(width, -FIELD_WIDTH_MAX, FIELD_WIDTH_MAX); | ||
| 1864 | } | ||
| 1865 | } | ||
| 1866 | |||
| 1867 | static void | ||
| 1868 | set_precision(struct printf_spec *spec, int prec) | ||
| 1869 | { | ||
| 1870 | spec->precision = prec; | ||
| 1871 | if (WARN_ONCE(spec->precision != prec, "precision %d too large", prec)) { | ||
| 1872 | spec->precision = clamp(prec, 0, PRECISION_MAX); | ||
| 1873 | } | ||
| 1874 | } | ||
| 1875 | |||
| 1809 | /** | 1876 | /** |
| 1810 | * vsnprintf - Format a string and place it in a buffer | 1877 | * vsnprintf - Format a string and place it in a buffer |
| 1811 | * @buf: The buffer to place the result into | 1878 | * @buf: The buffer to place the result into |
| @@ -1873,11 +1940,11 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) | |||
| 1873 | } | 1940 | } |
| 1874 | 1941 | ||
| 1875 | case FORMAT_TYPE_WIDTH: | 1942 | case FORMAT_TYPE_WIDTH: |
| 1876 | spec.field_width = va_arg(args, int); | 1943 | set_field_width(&spec, va_arg(args, int)); |
| 1877 | break; | 1944 | break; |
| 1878 | 1945 | ||
| 1879 | case FORMAT_TYPE_PRECISION: | 1946 | case FORMAT_TYPE_PRECISION: |
| 1880 | spec.precision = va_arg(args, int); | 1947 | set_precision(&spec, va_arg(args, int)); |
| 1881 | break; | 1948 | break; |
| 1882 | 1949 | ||
| 1883 | case FORMAT_TYPE_CHAR: { | 1950 | case FORMAT_TYPE_CHAR: { |
| @@ -2317,11 +2384,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf) | |||
| 2317 | } | 2384 | } |
| 2318 | 2385 | ||
| 2319 | case FORMAT_TYPE_WIDTH: | 2386 | case FORMAT_TYPE_WIDTH: |
| 2320 | spec.field_width = get_arg(int); | 2387 | set_field_width(&spec, get_arg(int)); |
| 2321 | break; | 2388 | break; |
| 2322 | 2389 | ||
| 2323 | case FORMAT_TYPE_PRECISION: | 2390 | case FORMAT_TYPE_PRECISION: |
| 2324 | spec.precision = get_arg(int); | 2391 | set_precision(&spec, get_arg(int)); |
| 2325 | break; | 2392 | break; |
| 2326 | 2393 | ||
| 2327 | case FORMAT_TYPE_CHAR: { | 2394 | case FORMAT_TYPE_CHAR: { |
