diff options
Diffstat (limited to 'lib')
40 files changed, 1076 insertions, 493 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index 1e80cb3c77a9..4771fb3f4da4 100644 --- a/lib/Kconfig +++ b/lib/Kconfig | |||
@@ -351,9 +351,9 @@ config HAS_IOMEM | |||
351 | select GENERIC_IO | 351 | select GENERIC_IO |
352 | default y | 352 | default y |
353 | 353 | ||
354 | config HAS_IOPORT | 354 | config HAS_IOPORT_MAP |
355 | boolean | 355 | boolean |
356 | depends on HAS_IOMEM && !NO_IOPORT | 356 | depends on HAS_IOMEM && !NO_IOPORT_MAP |
357 | default y | 357 | default y |
358 | 358 | ||
359 | config HAS_DMA | 359 | config HAS_DMA |
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index db25707aa41b..dd7f8858188a 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
@@ -119,7 +119,7 @@ menu "Compile-time checks and compiler options" | |||
119 | 119 | ||
120 | config DEBUG_INFO | 120 | config DEBUG_INFO |
121 | bool "Compile the kernel with debug info" | 121 | bool "Compile the kernel with debug info" |
122 | depends on DEBUG_KERNEL | 122 | depends on DEBUG_KERNEL && !COMPILE_TEST |
123 | help | 123 | help |
124 | If you say Y here the resulting kernel image will include | 124 | If you say Y here the resulting kernel image will include |
125 | debugging info resulting in a larger kernel image. | 125 | debugging info resulting in a larger kernel image. |
@@ -761,6 +761,15 @@ config PANIC_ON_OOPS_VALUE | |||
761 | default 0 if !PANIC_ON_OOPS | 761 | default 0 if !PANIC_ON_OOPS |
762 | default 1 if PANIC_ON_OOPS | 762 | default 1 if PANIC_ON_OOPS |
763 | 763 | ||
764 | config PANIC_TIMEOUT | ||
765 | int "panic timeout" | ||
766 | default 0 | ||
767 | help | ||
768 | Set the timeout value (in seconds) until a reboot occurs when the | ||
769 | the kernel panics. If n = 0, then we wait forever. A timeout | ||
770 | value n > 0 will wait n seconds before rebooting, while a timeout | ||
771 | value n < 0 will reboot immediately. | ||
772 | |||
764 | config SCHED_DEBUG | 773 | config SCHED_DEBUG |
765 | bool "Collect scheduler debugging info" | 774 | bool "Collect scheduler debugging info" |
766 | depends on DEBUG_KERNEL && PROC_FS | 775 | depends on DEBUG_KERNEL && PROC_FS |
@@ -971,6 +980,21 @@ config DEBUG_LOCKING_API_SELFTESTS | |||
971 | The following locking APIs are covered: spinlocks, rwlocks, | 980 | The following locking APIs are covered: spinlocks, rwlocks, |
972 | mutexes and rwsems. | 981 | mutexes and rwsems. |
973 | 982 | ||
983 | config LOCK_TORTURE_TEST | ||
984 | tristate "torture tests for locking" | ||
985 | depends on DEBUG_KERNEL | ||
986 | select TORTURE_TEST | ||
987 | default n | ||
988 | help | ||
989 | This option provides a kernel module that runs torture tests | ||
990 | on kernel locking primitives. The kernel module may be built | ||
991 | after the fact on the running kernel to be tested, if desired. | ||
992 | |||
993 | Say Y here if you want kernel locking-primitive torture tests | ||
994 | to be built into the kernel. | ||
995 | Say M if you want these torture tests to build as a module. | ||
996 | Say N if you are unsure. | ||
997 | |||
974 | endmenu # lock debugging | 998 | endmenu # lock debugging |
975 | 999 | ||
976 | config TRACE_IRQFLAGS | 1000 | config TRACE_IRQFLAGS |
@@ -1132,9 +1156,14 @@ config SPARSE_RCU_POINTER | |||
1132 | 1156 | ||
1133 | Say N if you are unsure. | 1157 | Say N if you are unsure. |
1134 | 1158 | ||
1159 | config TORTURE_TEST | ||
1160 | tristate | ||
1161 | default n | ||
1162 | |||
1135 | config RCU_TORTURE_TEST | 1163 | config RCU_TORTURE_TEST |
1136 | tristate "torture tests for RCU" | 1164 | tristate "torture tests for RCU" |
1137 | depends on DEBUG_KERNEL | 1165 | depends on DEBUG_KERNEL |
1166 | select TORTURE_TEST | ||
1138 | default n | 1167 | default n |
1139 | help | 1168 | help |
1140 | This option provides a kernel module that runs torture tests | 1169 | This option provides a kernel module that runs torture tests |
@@ -1547,17 +1576,6 @@ config PROVIDE_OHCI1394_DMA_INIT | |||
1547 | 1576 | ||
1548 | See Documentation/debugging-via-ohci1394.txt for more information. | 1577 | See Documentation/debugging-via-ohci1394.txt for more information. |
1549 | 1578 | ||
1550 | config FIREWIRE_OHCI_REMOTE_DMA | ||
1551 | bool "Remote debugging over FireWire with firewire-ohci" | ||
1552 | depends on FIREWIRE_OHCI | ||
1553 | help | ||
1554 | This option lets you use the FireWire bus for remote debugging | ||
1555 | with help of the firewire-ohci driver. It enables unfiltered | ||
1556 | remote DMA in firewire-ohci. | ||
1557 | See Documentation/debugging-via-ohci1394.txt for more information. | ||
1558 | |||
1559 | If unsure, say N. | ||
1560 | |||
1561 | config BUILD_DOCSRC | 1579 | config BUILD_DOCSRC |
1562 | bool "Build targets in Documentation/ tree" | 1580 | bool "Build targets in Documentation/ tree" |
1563 | depends on HEADERS_CHECK | 1581 | depends on HEADERS_CHECK |
@@ -1575,8 +1593,43 @@ config DMA_API_DEBUG | |||
1575 | With this option you will be able to detect common bugs in device | 1593 | With this option you will be able to detect common bugs in device |
1576 | drivers like double-freeing of DMA mappings or freeing mappings that | 1594 | drivers like double-freeing of DMA mappings or freeing mappings that |
1577 | were never allocated. | 1595 | were never allocated. |
1578 | This option causes a performance degredation. Use only if you want | 1596 | |
1579 | to debug device drivers. If unsure, say N. | 1597 | This also attempts to catch cases where a page owned by DMA is |
1598 | accessed by the cpu in a way that could cause data corruption. For | ||
1599 | example, this enables cow_user_page() to check that the source page is | ||
1600 | not undergoing DMA. | ||
1601 | |||
1602 | This option causes a performance degradation. Use only if you want to | ||
1603 | debug device drivers and dma interactions. | ||
1604 | |||
1605 | If unsure, say N. | ||
1606 | |||
1607 | config TEST_MODULE | ||
1608 | tristate "Test module loading with 'hello world' module" | ||
1609 | default n | ||
1610 | depends on m | ||
1611 | help | ||
1612 | This builds the "test_module" module that emits "Hello, world" | ||
1613 | on printk when loaded. It is designed to be used for basic | ||
1614 | evaluation of the module loading subsystem (for example when | ||
1615 | validating module verification). It lacks any extra dependencies, | ||
1616 | and will not normally be loaded by the system unless explicitly | ||
1617 | requested by name. | ||
1618 | |||
1619 | If unsure, say N. | ||
1620 | |||
1621 | config TEST_USER_COPY | ||
1622 | tristate "Test user/kernel boundary protections" | ||
1623 | default n | ||
1624 | depends on m | ||
1625 | help | ||
1626 | This builds the "test_user_copy" module that runs sanity checks | ||
1627 | on the copy_to/from_user infrastructure, making sure basic | ||
1628 | user/kernel boundary testing is working. If it fails to load, | ||
1629 | a regression has been detected in the user/kernel memory boundary | ||
1630 | protections. | ||
1631 | |||
1632 | If unsure, say N. | ||
1580 | 1633 | ||
1581 | source "samples/Kconfig" | 1634 | source "samples/Kconfig" |
1582 | 1635 | ||
diff --git a/lib/Makefile b/lib/Makefile index 972552b39cf5..0cd7b68e1382 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
@@ -26,11 +26,13 @@ obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ | |||
26 | bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ | 26 | bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ |
27 | gcd.o lcm.o list_sort.o uuid.o flex_array.o iovec.o clz_ctz.o \ | 27 | gcd.o lcm.o list_sort.o uuid.o flex_array.o iovec.o clz_ctz.o \ |
28 | bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \ | 28 | bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \ |
29 | percpu-refcount.o percpu_ida.o | 29 | percpu-refcount.o percpu_ida.o hash.o |
30 | obj-y += string_helpers.o | 30 | obj-y += string_helpers.o |
31 | obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o | 31 | obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o |
32 | obj-y += kstrtox.o | 32 | obj-y += kstrtox.o |
33 | obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o | 33 | obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o |
34 | obj-$(CONFIG_TEST_MODULE) += test_module.o | ||
35 | obj-$(CONFIG_TEST_USER_COPY) += test_user_copy.o | ||
34 | 36 | ||
35 | ifeq ($(CONFIG_DEBUG_KOBJECT),y) | 37 | ifeq ($(CONFIG_DEBUG_KOBJECT),y) |
36 | CFLAGS_kobject.o += -DDEBUG | 38 | CFLAGS_kobject.o += -DDEBUG |
@@ -43,6 +45,7 @@ obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o | |||
43 | obj-$(CONFIG_CHECK_SIGNATURE) += check_signature.o | 45 | obj-$(CONFIG_CHECK_SIGNATURE) += check_signature.o |
44 | obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o | 46 | obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o |
45 | 47 | ||
48 | GCOV_PROFILE_hweight.o := n | ||
46 | CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS)) | 49 | CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS)) |
47 | obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o | 50 | obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o |
48 | 51 | ||
diff --git a/lib/assoc_array.c b/lib/assoc_array.c index 1b6a44f1ec3e..c0b1007011e1 100644 --- a/lib/assoc_array.c +++ b/lib/assoc_array.c | |||
@@ -157,7 +157,7 @@ enum assoc_array_walk_status { | |||
157 | assoc_array_walk_tree_empty, | 157 | assoc_array_walk_tree_empty, |
158 | assoc_array_walk_found_terminal_node, | 158 | assoc_array_walk_found_terminal_node, |
159 | assoc_array_walk_found_wrong_shortcut, | 159 | assoc_array_walk_found_wrong_shortcut, |
160 | } status; | 160 | }; |
161 | 161 | ||
162 | struct assoc_array_walk_result { | 162 | struct assoc_array_walk_result { |
163 | struct { | 163 | struct { |
diff --git a/lib/average.c b/lib/average.c index 99a67e662b3c..114d1beae0c7 100644 --- a/lib/average.c +++ b/lib/average.c | |||
@@ -53,8 +53,10 @@ EXPORT_SYMBOL(ewma_init); | |||
53 | */ | 53 | */ |
54 | struct ewma *ewma_add(struct ewma *avg, unsigned long val) | 54 | struct ewma *ewma_add(struct ewma *avg, unsigned long val) |
55 | { | 55 | { |
56 | avg->internal = avg->internal ? | 56 | unsigned long internal = ACCESS_ONCE(avg->internal); |
57 | (((avg->internal << avg->weight) - avg->internal) + | 57 | |
58 | ACCESS_ONCE(avg->internal) = internal ? | ||
59 | (((internal << avg->weight) - internal) + | ||
58 | (val << avg->factor)) >> avg->weight : | 60 | (val << avg->factor)) >> avg->weight : |
59 | (val << avg->factor); | 61 | (val << avg->factor); |
60 | return avg; | 62 | return avg; |
diff --git a/lib/clz_ctz.c b/lib/clz_ctz.c index a8f8379eb49f..2e11e48446ab 100644 --- a/lib/clz_ctz.c +++ b/lib/clz_ctz.c | |||
@@ -6,6 +6,9 @@ | |||
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License version 2 as | 7 | * it under the terms of the GNU General Public License version 2 as |
8 | * published by the Free Software Foundation. | 8 | * published by the Free Software Foundation. |
9 | * The functions in this file aren't called directly, but are required by | ||
10 | * GCC builtins such as __builtin_ctz, and therefore they can't be removed | ||
11 | * despite appearing unreferenced in kernel source. | ||
9 | * | 12 | * |
10 | * __c[lt]z[sd]i2 can be overridden by linking arch-specific versions. | 13 | * __c[lt]z[sd]i2 can be overridden by linking arch-specific versions. |
11 | */ | 14 | */ |
@@ -13,18 +16,22 @@ | |||
13 | #include <linux/export.h> | 16 | #include <linux/export.h> |
14 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
15 | 18 | ||
19 | int __weak __ctzsi2(int val); | ||
16 | int __weak __ctzsi2(int val) | 20 | int __weak __ctzsi2(int val) |
17 | { | 21 | { |
18 | return __ffs(val); | 22 | return __ffs(val); |
19 | } | 23 | } |
20 | EXPORT_SYMBOL(__ctzsi2); | 24 | EXPORT_SYMBOL(__ctzsi2); |
21 | 25 | ||
26 | int __weak __clzsi2(int val); | ||
22 | int __weak __clzsi2(int val) | 27 | int __weak __clzsi2(int val) |
23 | { | 28 | { |
24 | return 32 - fls(val); | 29 | return 32 - fls(val); |
25 | } | 30 | } |
26 | EXPORT_SYMBOL(__clzsi2); | 31 | EXPORT_SYMBOL(__clzsi2); |
27 | 32 | ||
33 | int __weak __clzdi2(long val); | ||
34 | int __weak __ctzdi2(long val); | ||
28 | #if BITS_PER_LONG == 32 | 35 | #if BITS_PER_LONG == 32 |
29 | 36 | ||
30 | int __weak __clzdi2(long val) | 37 | int __weak __clzdi2(long val) |
diff --git a/lib/cmdline.c b/lib/cmdline.c index eb6791188cf5..d4932f745e92 100644 --- a/lib/cmdline.c +++ b/lib/cmdline.c | |||
@@ -49,13 +49,13 @@ static int get_range(char **str, int *pint) | |||
49 | * 3 - hyphen found to denote a range | 49 | * 3 - hyphen found to denote a range |
50 | */ | 50 | */ |
51 | 51 | ||
52 | int get_option (char **str, int *pint) | 52 | int get_option(char **str, int *pint) |
53 | { | 53 | { |
54 | char *cur = *str; | 54 | char *cur = *str; |
55 | 55 | ||
56 | if (!cur || !(*cur)) | 56 | if (!cur || !(*cur)) |
57 | return 0; | 57 | return 0; |
58 | *pint = simple_strtol (cur, str, 0); | 58 | *pint = simple_strtol(cur, str, 0); |
59 | if (cur == *str) | 59 | if (cur == *str) |
60 | return 0; | 60 | return 0; |
61 | if (**str == ',') { | 61 | if (**str == ',') { |
@@ -67,6 +67,7 @@ int get_option (char **str, int *pint) | |||
67 | 67 | ||
68 | return 1; | 68 | return 1; |
69 | } | 69 | } |
70 | EXPORT_SYMBOL(get_option); | ||
70 | 71 | ||
71 | /** | 72 | /** |
72 | * get_options - Parse a string into a list of integers | 73 | * get_options - Parse a string into a list of integers |
@@ -84,13 +85,13 @@ int get_option (char **str, int *pint) | |||
84 | * the parse to end (typically a null terminator, if @str is | 85 | * the parse to end (typically a null terminator, if @str is |
85 | * completely parseable). | 86 | * completely parseable). |
86 | */ | 87 | */ |
87 | 88 | ||
88 | char *get_options(const char *str, int nints, int *ints) | 89 | char *get_options(const char *str, int nints, int *ints) |
89 | { | 90 | { |
90 | int res, i = 1; | 91 | int res, i = 1; |
91 | 92 | ||
92 | while (i < nints) { | 93 | while (i < nints) { |
93 | res = get_option ((char **)&str, ints + i); | 94 | res = get_option((char **)&str, ints + i); |
94 | if (res == 0) | 95 | if (res == 0) |
95 | break; | 96 | break; |
96 | if (res == 3) { | 97 | if (res == 3) { |
@@ -112,6 +113,7 @@ char *get_options(const char *str, int nints, int *ints) | |||
112 | ints[0] = i - 1; | 113 | ints[0] = i - 1; |
113 | return (char *)str; | 114 | return (char *)str; |
114 | } | 115 | } |
116 | EXPORT_SYMBOL(get_options); | ||
115 | 117 | ||
116 | /** | 118 | /** |
117 | * memparse - parse a string with mem suffixes into a number | 119 | * memparse - parse a string with mem suffixes into a number |
@@ -152,8 +154,4 @@ unsigned long long memparse(const char *ptr, char **retptr) | |||
152 | 154 | ||
153 | return ret; | 155 | return ret; |
154 | } | 156 | } |
155 | |||
156 | |||
157 | EXPORT_SYMBOL(memparse); | 157 | EXPORT_SYMBOL(memparse); |
158 | EXPORT_SYMBOL(get_option); | ||
159 | EXPORT_SYMBOL(get_options); | ||
diff --git a/lib/cpumask.c b/lib/cpumask.c index d327b87c99b7..b810b753c607 100644 --- a/lib/cpumask.c +++ b/lib/cpumask.c | |||
@@ -140,7 +140,7 @@ EXPORT_SYMBOL(zalloc_cpumask_var); | |||
140 | */ | 140 | */ |
141 | void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask) | 141 | void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask) |
142 | { | 142 | { |
143 | *mask = alloc_bootmem(cpumask_size()); | 143 | *mask = memblock_virt_alloc(cpumask_size(), 0); |
144 | } | 144 | } |
145 | 145 | ||
146 | /** | 146 | /** |
@@ -161,6 +161,6 @@ EXPORT_SYMBOL(free_cpumask_var); | |||
161 | */ | 161 | */ |
162 | void __init free_bootmem_cpumask_var(cpumask_var_t mask) | 162 | void __init free_bootmem_cpumask_var(cpumask_var_t mask) |
163 | { | 163 | { |
164 | free_bootmem(__pa(mask), cpumask_size()); | 164 | memblock_free_early(__pa(mask), cpumask_size()); |
165 | } | 165 | } |
166 | #endif | 166 | #endif |
diff --git a/lib/decompress.c b/lib/decompress.c index 4d1cd0397aab..86069d74c062 100644 --- a/lib/decompress.c +++ b/lib/decompress.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/types.h> | 16 | #include <linux/types.h> |
17 | #include <linux/string.h> | 17 | #include <linux/string.h> |
18 | #include <linux/init.h> | 18 | #include <linux/init.h> |
19 | #include <linux/printk.h> | ||
19 | 20 | ||
20 | #ifndef CONFIG_DECOMPRESS_GZIP | 21 | #ifndef CONFIG_DECOMPRESS_GZIP |
21 | # define gunzip NULL | 22 | # define gunzip NULL |
@@ -61,6 +62,8 @@ decompress_fn __init decompress_method(const unsigned char *inbuf, int len, | |||
61 | if (len < 2) | 62 | if (len < 2) |
62 | return NULL; /* Need at least this much... */ | 63 | return NULL; /* Need at least this much... */ |
63 | 64 | ||
65 | pr_debug("Compressed data magic: %#.2x %#.2x\n", inbuf[0], inbuf[1]); | ||
66 | |||
64 | for (cf = compressed_formats; cf->name; cf++) { | 67 | for (cf = compressed_formats; cf->name; cf++) { |
65 | if (!memcmp(inbuf, cf->magic, 2)) | 68 | if (!memcmp(inbuf, cf->magic, 2)) |
66 | break; | 69 | break; |
diff --git a/lib/decompress_inflate.c b/lib/decompress_inflate.c index d619b28c456f..0edfd742a154 100644 --- a/lib/decompress_inflate.c +++ b/lib/decompress_inflate.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include "zlib_inflate/inflate.h" | 19 | #include "zlib_inflate/inflate.h" |
20 | 20 | ||
21 | #include "zlib_inflate/infutil.h" | 21 | #include "zlib_inflate/infutil.h" |
22 | #include <linux/decompress/inflate.h> | ||
22 | 23 | ||
23 | #endif /* STATIC */ | 24 | #endif /* STATIC */ |
24 | 25 | ||
diff --git a/lib/decompress_unlz4.c b/lib/decompress_unlz4.c index 3e67cfad16ad..7d1e83caf8ad 100644 --- a/lib/decompress_unlz4.c +++ b/lib/decompress_unlz4.c | |||
@@ -141,6 +141,7 @@ STATIC inline int INIT unlz4(u8 *input, int in_len, | |||
141 | goto exit_2; | 141 | goto exit_2; |
142 | } | 142 | } |
143 | 143 | ||
144 | ret = -1; | ||
144 | if (flush && flush(outp, dest_len) != dest_len) | 145 | if (flush && flush(outp, dest_len) != dest_len) |
145 | goto exit_2; | 146 | goto exit_2; |
146 | if (output) | 147 | if (output) |
diff --git a/lib/devres.c b/lib/devres.c index 823533138fa0..2f16c133fd36 100644 --- a/lib/devres.c +++ b/lib/devres.c | |||
@@ -81,11 +81,13 @@ EXPORT_SYMBOL(devm_ioremap_nocache); | |||
81 | void devm_iounmap(struct device *dev, void __iomem *addr) | 81 | void devm_iounmap(struct device *dev, void __iomem *addr) |
82 | { | 82 | { |
83 | WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match, | 83 | WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match, |
84 | (void *)addr)); | 84 | (__force void *)addr)); |
85 | iounmap(addr); | 85 | iounmap(addr); |
86 | } | 86 | } |
87 | EXPORT_SYMBOL(devm_iounmap); | 87 | EXPORT_SYMBOL(devm_iounmap); |
88 | 88 | ||
89 | #define IOMEM_ERR_PTR(err) (__force void __iomem *)ERR_PTR(err) | ||
90 | |||
89 | /** | 91 | /** |
90 | * devm_ioremap_resource() - check, request region, and ioremap resource | 92 | * devm_ioremap_resource() - check, request region, and ioremap resource |
91 | * @dev: generic device to handle the resource for | 93 | * @dev: generic device to handle the resource for |
@@ -114,7 +116,7 @@ void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res) | |||
114 | 116 | ||
115 | if (!res || resource_type(res) != IORESOURCE_MEM) { | 117 | if (!res || resource_type(res) != IORESOURCE_MEM) { |
116 | dev_err(dev, "invalid resource\n"); | 118 | dev_err(dev, "invalid resource\n"); |
117 | return ERR_PTR(-EINVAL); | 119 | return IOMEM_ERR_PTR(-EINVAL); |
118 | } | 120 | } |
119 | 121 | ||
120 | size = resource_size(res); | 122 | size = resource_size(res); |
@@ -122,7 +124,7 @@ void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res) | |||
122 | 124 | ||
123 | if (!devm_request_mem_region(dev, res->start, size, name)) { | 125 | if (!devm_request_mem_region(dev, res->start, size, name)) { |
124 | dev_err(dev, "can't request region for resource %pR\n", res); | 126 | dev_err(dev, "can't request region for resource %pR\n", res); |
125 | return ERR_PTR(-EBUSY); | 127 | return IOMEM_ERR_PTR(-EBUSY); |
126 | } | 128 | } |
127 | 129 | ||
128 | if (res->flags & IORESOURCE_CACHEABLE) | 130 | if (res->flags & IORESOURCE_CACHEABLE) |
@@ -133,7 +135,7 @@ void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res) | |||
133 | if (!dest_ptr) { | 135 | if (!dest_ptr) { |
134 | dev_err(dev, "ioremap failed for resource %pR\n", res); | 136 | dev_err(dev, "ioremap failed for resource %pR\n", res); |
135 | devm_release_mem_region(dev, res->start, size); | 137 | devm_release_mem_region(dev, res->start, size); |
136 | dest_ptr = ERR_PTR(-ENOMEM); | 138 | dest_ptr = IOMEM_ERR_PTR(-ENOMEM); |
137 | } | 139 | } |
138 | 140 | ||
139 | return dest_ptr; | 141 | return dest_ptr; |
@@ -168,7 +170,7 @@ void __iomem *devm_request_and_ioremap(struct device *device, | |||
168 | } | 170 | } |
169 | EXPORT_SYMBOL(devm_request_and_ioremap); | 171 | EXPORT_SYMBOL(devm_request_and_ioremap); |
170 | 172 | ||
171 | #ifdef CONFIG_HAS_IOPORT | 173 | #ifdef CONFIG_HAS_IOPORT_MAP |
172 | /* | 174 | /* |
173 | * Generic iomap devres | 175 | * Generic iomap devres |
174 | */ | 176 | */ |
@@ -224,10 +226,10 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr) | |||
224 | { | 226 | { |
225 | ioport_unmap(addr); | 227 | ioport_unmap(addr); |
226 | WARN_ON(devres_destroy(dev, devm_ioport_map_release, | 228 | WARN_ON(devres_destroy(dev, devm_ioport_map_release, |
227 | devm_ioport_map_match, (void *)addr)); | 229 | devm_ioport_map_match, (__force void *)addr)); |
228 | } | 230 | } |
229 | EXPORT_SYMBOL(devm_ioport_unmap); | 231 | EXPORT_SYMBOL(devm_ioport_unmap); |
230 | #endif /* CONFIG_HAS_IOPORT */ | 232 | #endif /* CONFIG_HAS_IOPORT_MAP */ |
231 | 233 | ||
232 | #ifdef CONFIG_PCI | 234 | #ifdef CONFIG_PCI |
233 | /* | 235 | /* |
diff --git a/lib/dma-debug.c b/lib/dma-debug.c index d87a17a819d0..98f2d7e91a91 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c | |||
@@ -53,11 +53,26 @@ enum map_err_types { | |||
53 | 53 | ||
54 | #define DMA_DEBUG_STACKTRACE_ENTRIES 5 | 54 | #define DMA_DEBUG_STACKTRACE_ENTRIES 5 |
55 | 55 | ||
56 | /** | ||
57 | * struct dma_debug_entry - track a dma_map* or dma_alloc_coherent mapping | ||
58 | * @list: node on pre-allocated free_entries list | ||
59 | * @dev: 'dev' argument to dma_map_{page|single|sg} or dma_alloc_coherent | ||
60 | * @type: single, page, sg, coherent | ||
61 | * @pfn: page frame of the start address | ||
62 | * @offset: offset of mapping relative to pfn | ||
63 | * @size: length of the mapping | ||
64 | * @direction: enum dma_data_direction | ||
65 | * @sg_call_ents: 'nents' from dma_map_sg | ||
66 | * @sg_mapped_ents: 'mapped_ents' from dma_map_sg | ||
67 | * @map_err_type: track whether dma_mapping_error() was checked | ||
68 | * @stacktrace: support backtraces when a violation is detected | ||
69 | */ | ||
56 | struct dma_debug_entry { | 70 | struct dma_debug_entry { |
57 | struct list_head list; | 71 | struct list_head list; |
58 | struct device *dev; | 72 | struct device *dev; |
59 | int type; | 73 | int type; |
60 | phys_addr_t paddr; | 74 | unsigned long pfn; |
75 | size_t offset; | ||
61 | u64 dev_addr; | 76 | u64 dev_addr; |
62 | u64 size; | 77 | u64 size; |
63 | int direction; | 78 | int direction; |
@@ -372,6 +387,11 @@ static void hash_bucket_del(struct dma_debug_entry *entry) | |||
372 | list_del(&entry->list); | 387 | list_del(&entry->list); |
373 | } | 388 | } |
374 | 389 | ||
390 | static unsigned long long phys_addr(struct dma_debug_entry *entry) | ||
391 | { | ||
392 | return page_to_phys(pfn_to_page(entry->pfn)) + entry->offset; | ||
393 | } | ||
394 | |||
375 | /* | 395 | /* |
376 | * Dump mapping entries for debugging purposes | 396 | * Dump mapping entries for debugging purposes |
377 | */ | 397 | */ |
@@ -389,9 +409,9 @@ void debug_dma_dump_mappings(struct device *dev) | |||
389 | list_for_each_entry(entry, &bucket->list, list) { | 409 | list_for_each_entry(entry, &bucket->list, list) { |
390 | if (!dev || dev == entry->dev) { | 410 | if (!dev || dev == entry->dev) { |
391 | dev_info(entry->dev, | 411 | dev_info(entry->dev, |
392 | "%s idx %d P=%Lx D=%Lx L=%Lx %s %s\n", | 412 | "%s idx %d P=%Lx N=%lx D=%Lx L=%Lx %s %s\n", |
393 | type2name[entry->type], idx, | 413 | type2name[entry->type], idx, |
394 | (unsigned long long)entry->paddr, | 414 | phys_addr(entry), entry->pfn, |
395 | entry->dev_addr, entry->size, | 415 | entry->dev_addr, entry->size, |
396 | dir2name[entry->direction], | 416 | dir2name[entry->direction], |
397 | maperr2str[entry->map_err_type]); | 417 | maperr2str[entry->map_err_type]); |
@@ -404,6 +424,176 @@ void debug_dma_dump_mappings(struct device *dev) | |||
404 | EXPORT_SYMBOL(debug_dma_dump_mappings); | 424 | EXPORT_SYMBOL(debug_dma_dump_mappings); |
405 | 425 | ||
406 | /* | 426 | /* |
427 | * For each mapping (initial cacheline in the case of | ||
428 | * dma_alloc_coherent/dma_map_page, initial cacheline in each page of a | ||
429 | * scatterlist, or the cacheline specified in dma_map_single) insert | ||
430 | * into this tree using the cacheline as the key. At | ||
431 | * dma_unmap_{single|sg|page} or dma_free_coherent delete the entry. If | ||
432 | * the entry already exists at insertion time add a tag as a reference | ||
433 | * count for the overlapping mappings. For now, the overlap tracking | ||
434 | * just ensures that 'unmaps' balance 'maps' before marking the | ||
435 | * cacheline idle, but we should also be flagging overlaps as an API | ||
436 | * violation. | ||
437 | * | ||
438 | * Memory usage is mostly constrained by the maximum number of available | ||
439 | * dma-debug entries in that we need a free dma_debug_entry before | ||
440 | * inserting into the tree. In the case of dma_map_page and | ||
441 | * dma_alloc_coherent there is only one dma_debug_entry and one | ||
442 | * dma_active_cacheline entry to track per event. dma_map_sg(), on the | ||
443 | * other hand, consumes a single dma_debug_entry, but inserts 'nents' | ||
444 | * entries into the tree. | ||
445 | * | ||
446 | * At any time debug_dma_assert_idle() can be called to trigger a | ||
447 | * warning if any cachelines in the given page are in the active set. | ||
448 | */ | ||
449 | static RADIX_TREE(dma_active_cacheline, GFP_NOWAIT); | ||
450 | static DEFINE_SPINLOCK(radix_lock); | ||
451 | #define ACTIVE_CACHELINE_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1) | ||
452 | #define CACHELINE_PER_PAGE_SHIFT (PAGE_SHIFT - L1_CACHE_SHIFT) | ||
453 | #define CACHELINES_PER_PAGE (1 << CACHELINE_PER_PAGE_SHIFT) | ||
454 | |||
455 | static phys_addr_t to_cacheline_number(struct dma_debug_entry *entry) | ||
456 | { | ||
457 | return (entry->pfn << CACHELINE_PER_PAGE_SHIFT) + | ||
458 | (entry->offset >> L1_CACHE_SHIFT); | ||
459 | } | ||
460 | |||
461 | static int active_cacheline_read_overlap(phys_addr_t cln) | ||
462 | { | ||
463 | int overlap = 0, i; | ||
464 | |||
465 | for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--) | ||
466 | if (radix_tree_tag_get(&dma_active_cacheline, cln, i)) | ||
467 | overlap |= 1 << i; | ||
468 | return overlap; | ||
469 | } | ||
470 | |||
471 | static int active_cacheline_set_overlap(phys_addr_t cln, int overlap) | ||
472 | { | ||
473 | int i; | ||
474 | |||
475 | if (overlap > ACTIVE_CACHELINE_MAX_OVERLAP || overlap < 0) | ||
476 | return overlap; | ||
477 | |||
478 | for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--) | ||
479 | if (overlap & 1 << i) | ||
480 | radix_tree_tag_set(&dma_active_cacheline, cln, i); | ||
481 | else | ||
482 | radix_tree_tag_clear(&dma_active_cacheline, cln, i); | ||
483 | |||
484 | return overlap; | ||
485 | } | ||
486 | |||
487 | static void active_cacheline_inc_overlap(phys_addr_t cln) | ||
488 | { | ||
489 | int overlap = active_cacheline_read_overlap(cln); | ||
490 | |||
491 | overlap = active_cacheline_set_overlap(cln, ++overlap); | ||
492 | |||
493 | /* If we overflowed the overlap counter then we're potentially | ||
494 | * leaking dma-mappings. Otherwise, if maps and unmaps are | ||
495 | * balanced then this overflow may cause false negatives in | ||
496 | * debug_dma_assert_idle() as the cacheline may be marked idle | ||
497 | * prematurely. | ||
498 | */ | ||
499 | WARN_ONCE(overlap > ACTIVE_CACHELINE_MAX_OVERLAP, | ||
500 | "DMA-API: exceeded %d overlapping mappings of cacheline %pa\n", | ||
501 | ACTIVE_CACHELINE_MAX_OVERLAP, &cln); | ||
502 | } | ||
503 | |||
504 | static int active_cacheline_dec_overlap(phys_addr_t cln) | ||
505 | { | ||
506 | int overlap = active_cacheline_read_overlap(cln); | ||
507 | |||
508 | return active_cacheline_set_overlap(cln, --overlap); | ||
509 | } | ||
510 | |||
511 | static int active_cacheline_insert(struct dma_debug_entry *entry) | ||
512 | { | ||
513 | phys_addr_t cln = to_cacheline_number(entry); | ||
514 | unsigned long flags; | ||
515 | int rc; | ||
516 | |||
517 | /* If the device is not writing memory then we don't have any | ||
518 | * concerns about the cpu consuming stale data. This mitigates | ||
519 | * legitimate usages of overlapping mappings. | ||
520 | */ | ||
521 | if (entry->direction == DMA_TO_DEVICE) | ||
522 | return 0; | ||
523 | |||
524 | spin_lock_irqsave(&radix_lock, flags); | ||
525 | rc = radix_tree_insert(&dma_active_cacheline, cln, entry); | ||
526 | if (rc == -EEXIST) | ||
527 | active_cacheline_inc_overlap(cln); | ||
528 | spin_unlock_irqrestore(&radix_lock, flags); | ||
529 | |||
530 | return rc; | ||
531 | } | ||
532 | |||
533 | static void active_cacheline_remove(struct dma_debug_entry *entry) | ||
534 | { | ||
535 | phys_addr_t cln = to_cacheline_number(entry); | ||
536 | unsigned long flags; | ||
537 | |||
538 | /* ...mirror the insert case */ | ||
539 | if (entry->direction == DMA_TO_DEVICE) | ||
540 | return; | ||
541 | |||
542 | spin_lock_irqsave(&radix_lock, flags); | ||
543 | /* since we are counting overlaps the final put of the | ||
544 | * cacheline will occur when the overlap count is 0. | ||
545 | * active_cacheline_dec_overlap() returns -1 in that case | ||
546 | */ | ||
547 | if (active_cacheline_dec_overlap(cln) < 0) | ||
548 | radix_tree_delete(&dma_active_cacheline, cln); | ||
549 | spin_unlock_irqrestore(&radix_lock, flags); | ||
550 | } | ||
551 | |||
552 | /** | ||
553 | * debug_dma_assert_idle() - assert that a page is not undergoing dma | ||
554 | * @page: page to lookup in the dma_active_cacheline tree | ||
555 | * | ||
556 | * Place a call to this routine in cases where the cpu touching the page | ||
557 | * before the dma completes (page is dma_unmapped) will lead to data | ||
558 | * corruption. | ||
559 | */ | ||
560 | void debug_dma_assert_idle(struct page *page) | ||
561 | { | ||
562 | static struct dma_debug_entry *ents[CACHELINES_PER_PAGE]; | ||
563 | struct dma_debug_entry *entry = NULL; | ||
564 | void **results = (void **) &ents; | ||
565 | unsigned int nents, i; | ||
566 | unsigned long flags; | ||
567 | phys_addr_t cln; | ||
568 | |||
569 | if (!page) | ||
570 | return; | ||
571 | |||
572 | cln = (phys_addr_t) page_to_pfn(page) << CACHELINE_PER_PAGE_SHIFT; | ||
573 | spin_lock_irqsave(&radix_lock, flags); | ||
574 | nents = radix_tree_gang_lookup(&dma_active_cacheline, results, cln, | ||
575 | CACHELINES_PER_PAGE); | ||
576 | for (i = 0; i < nents; i++) { | ||
577 | phys_addr_t ent_cln = to_cacheline_number(ents[i]); | ||
578 | |||
579 | if (ent_cln == cln) { | ||
580 | entry = ents[i]; | ||
581 | break; | ||
582 | } else if (ent_cln >= cln + CACHELINES_PER_PAGE) | ||
583 | break; | ||
584 | } | ||
585 | spin_unlock_irqrestore(&radix_lock, flags); | ||
586 | |||
587 | if (!entry) | ||
588 | return; | ||
589 | |||
590 | cln = to_cacheline_number(entry); | ||
591 | err_printk(entry->dev, entry, | ||
592 | "DMA-API: cpu touching an active dma mapped cacheline [cln=%pa]\n", | ||
593 | &cln); | ||
594 | } | ||
595 | |||
596 | /* | ||
407 | * Wrapper function for adding an entry to the hash. | 597 | * Wrapper function for adding an entry to the hash. |
408 | * This function takes care of locking itself. | 598 | * This function takes care of locking itself. |
409 | */ | 599 | */ |
@@ -411,10 +601,21 @@ static void add_dma_entry(struct dma_debug_entry *entry) | |||
411 | { | 601 | { |
412 | struct hash_bucket *bucket; | 602 | struct hash_bucket *bucket; |
413 | unsigned long flags; | 603 | unsigned long flags; |
604 | int rc; | ||
414 | 605 | ||
415 | bucket = get_hash_bucket(entry, &flags); | 606 | bucket = get_hash_bucket(entry, &flags); |
416 | hash_bucket_add(bucket, entry); | 607 | hash_bucket_add(bucket, entry); |
417 | put_hash_bucket(bucket, &flags); | 608 | put_hash_bucket(bucket, &flags); |
609 | |||
610 | rc = active_cacheline_insert(entry); | ||
611 | if (rc == -ENOMEM) { | ||
612 | pr_err("DMA-API: cacheline tracking ENOMEM, dma-debug disabled\n"); | ||
613 | global_disable = true; | ||
614 | } | ||
615 | |||
616 | /* TODO: report -EEXIST errors here as overlapping mappings are | ||
617 | * not supported by the DMA API | ||
618 | */ | ||
418 | } | 619 | } |
419 | 620 | ||
420 | static struct dma_debug_entry *__dma_entry_alloc(void) | 621 | static struct dma_debug_entry *__dma_entry_alloc(void) |
@@ -469,6 +670,8 @@ static void dma_entry_free(struct dma_debug_entry *entry) | |||
469 | { | 670 | { |
470 | unsigned long flags; | 671 | unsigned long flags; |
471 | 672 | ||
673 | active_cacheline_remove(entry); | ||
674 | |||
472 | /* | 675 | /* |
473 | * add to beginning of the list - this way the entries are | 676 | * add to beginning of the list - this way the entries are |
474 | * more likely cache hot when they are reallocated. | 677 | * more likely cache hot when they are reallocated. |
@@ -895,15 +1098,15 @@ static void check_unmap(struct dma_debug_entry *ref) | |||
895 | ref->dev_addr, ref->size, | 1098 | ref->dev_addr, ref->size, |
896 | type2name[entry->type], type2name[ref->type]); | 1099 | type2name[entry->type], type2name[ref->type]); |
897 | } else if ((entry->type == dma_debug_coherent) && | 1100 | } else if ((entry->type == dma_debug_coherent) && |
898 | (ref->paddr != entry->paddr)) { | 1101 | (phys_addr(ref) != phys_addr(entry))) { |
899 | err_printk(ref->dev, entry, "DMA-API: device driver frees " | 1102 | err_printk(ref->dev, entry, "DMA-API: device driver frees " |
900 | "DMA memory with different CPU address " | 1103 | "DMA memory with different CPU address " |
901 | "[device address=0x%016llx] [size=%llu bytes] " | 1104 | "[device address=0x%016llx] [size=%llu bytes] " |
902 | "[cpu alloc address=0x%016llx] " | 1105 | "[cpu alloc address=0x%016llx] " |
903 | "[cpu free address=0x%016llx]", | 1106 | "[cpu free address=0x%016llx]", |
904 | ref->dev_addr, ref->size, | 1107 | ref->dev_addr, ref->size, |
905 | (unsigned long long)entry->paddr, | 1108 | phys_addr(entry), |
906 | (unsigned long long)ref->paddr); | 1109 | phys_addr(ref)); |
907 | } | 1110 | } |
908 | 1111 | ||
909 | if (ref->sg_call_ents && ref->type == dma_debug_sg && | 1112 | if (ref->sg_call_ents && ref->type == dma_debug_sg && |
@@ -1052,7 +1255,8 @@ void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, | |||
1052 | 1255 | ||
1053 | entry->dev = dev; | 1256 | entry->dev = dev; |
1054 | entry->type = dma_debug_page; | 1257 | entry->type = dma_debug_page; |
1055 | entry->paddr = page_to_phys(page) + offset; | 1258 | entry->pfn = page_to_pfn(page); |
1259 | entry->offset = offset, | ||
1056 | entry->dev_addr = dma_addr; | 1260 | entry->dev_addr = dma_addr; |
1057 | entry->size = size; | 1261 | entry->size = size; |
1058 | entry->direction = direction; | 1262 | entry->direction = direction; |
@@ -1148,7 +1352,8 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, | |||
1148 | 1352 | ||
1149 | entry->type = dma_debug_sg; | 1353 | entry->type = dma_debug_sg; |
1150 | entry->dev = dev; | 1354 | entry->dev = dev; |
1151 | entry->paddr = sg_phys(s); | 1355 | entry->pfn = page_to_pfn(sg_page(s)); |
1356 | entry->offset = s->offset, | ||
1152 | entry->size = sg_dma_len(s); | 1357 | entry->size = sg_dma_len(s); |
1153 | entry->dev_addr = sg_dma_address(s); | 1358 | entry->dev_addr = sg_dma_address(s); |
1154 | entry->direction = direction; | 1359 | entry->direction = direction; |
@@ -1198,7 +1403,8 @@ void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, | |||
1198 | struct dma_debug_entry ref = { | 1403 | struct dma_debug_entry ref = { |
1199 | .type = dma_debug_sg, | 1404 | .type = dma_debug_sg, |
1200 | .dev = dev, | 1405 | .dev = dev, |
1201 | .paddr = sg_phys(s), | 1406 | .pfn = page_to_pfn(sg_page(s)), |
1407 | .offset = s->offset, | ||
1202 | .dev_addr = sg_dma_address(s), | 1408 | .dev_addr = sg_dma_address(s), |
1203 | .size = sg_dma_len(s), | 1409 | .size = sg_dma_len(s), |
1204 | .direction = dir, | 1410 | .direction = dir, |
@@ -1233,7 +1439,8 @@ void debug_dma_alloc_coherent(struct device *dev, size_t size, | |||
1233 | 1439 | ||
1234 | entry->type = dma_debug_coherent; | 1440 | entry->type = dma_debug_coherent; |
1235 | entry->dev = dev; | 1441 | entry->dev = dev; |
1236 | entry->paddr = virt_to_phys(virt); | 1442 | entry->pfn = page_to_pfn(virt_to_page(virt)); |
1443 | entry->offset = (size_t) virt & PAGE_MASK; | ||
1237 | entry->size = size; | 1444 | entry->size = size; |
1238 | entry->dev_addr = dma_addr; | 1445 | entry->dev_addr = dma_addr; |
1239 | entry->direction = DMA_BIDIRECTIONAL; | 1446 | entry->direction = DMA_BIDIRECTIONAL; |
@@ -1248,7 +1455,8 @@ void debug_dma_free_coherent(struct device *dev, size_t size, | |||
1248 | struct dma_debug_entry ref = { | 1455 | struct dma_debug_entry ref = { |
1249 | .type = dma_debug_coherent, | 1456 | .type = dma_debug_coherent, |
1250 | .dev = dev, | 1457 | .dev = dev, |
1251 | .paddr = virt_to_phys(virt), | 1458 | .pfn = page_to_pfn(virt_to_page(virt)), |
1459 | .offset = (size_t) virt & PAGE_MASK, | ||
1252 | .dev_addr = addr, | 1460 | .dev_addr = addr, |
1253 | .size = size, | 1461 | .size = size, |
1254 | .direction = DMA_BIDIRECTIONAL, | 1462 | .direction = DMA_BIDIRECTIONAL, |
@@ -1356,7 +1564,8 @@ void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | |||
1356 | struct dma_debug_entry ref = { | 1564 | struct dma_debug_entry ref = { |
1357 | .type = dma_debug_sg, | 1565 | .type = dma_debug_sg, |
1358 | .dev = dev, | 1566 | .dev = dev, |
1359 | .paddr = sg_phys(s), | 1567 | .pfn = page_to_pfn(sg_page(s)), |
1568 | .offset = s->offset, | ||
1360 | .dev_addr = sg_dma_address(s), | 1569 | .dev_addr = sg_dma_address(s), |
1361 | .size = sg_dma_len(s), | 1570 | .size = sg_dma_len(s), |
1362 | .direction = direction, | 1571 | .direction = direction, |
@@ -1388,7 +1597,8 @@ void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | |||
1388 | struct dma_debug_entry ref = { | 1597 | struct dma_debug_entry ref = { |
1389 | .type = dma_debug_sg, | 1598 | .type = dma_debug_sg, |
1390 | .dev = dev, | 1599 | .dev = dev, |
1391 | .paddr = sg_phys(s), | 1600 | .pfn = page_to_pfn(sg_page(s)), |
1601 | .offset = s->offset, | ||
1392 | .dev_addr = sg_dma_address(s), | 1602 | .dev_addr = sg_dma_address(s), |
1393 | .size = sg_dma_len(s), | 1603 | .size = sg_dma_len(s), |
1394 | .direction = direction, | 1604 | .direction = direction, |
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c index c37aeacd7651..7288e38e1757 100644 --- a/lib/dynamic_debug.c +++ b/lib/dynamic_debug.c | |||
@@ -8,6 +8,7 @@ | |||
8 | * By Greg Banks <gnb@melbourne.sgi.com> | 8 | * By Greg Banks <gnb@melbourne.sgi.com> |
9 | * Copyright (c) 2008 Silicon Graphics Inc. All Rights Reserved. | 9 | * Copyright (c) 2008 Silicon Graphics Inc. All Rights Reserved. |
10 | * Copyright (C) 2011 Bart Van Assche. All Rights Reserved. | 10 | * Copyright (C) 2011 Bart Van Assche. All Rights Reserved. |
11 | * Copyright (C) 2013 Du, Changbin <changbin.du@gmail.com> | ||
11 | */ | 12 | */ |
12 | 13 | ||
13 | #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ | 14 | #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ |
@@ -24,6 +25,7 @@ | |||
24 | #include <linux/sysctl.h> | 25 | #include <linux/sysctl.h> |
25 | #include <linux/ctype.h> | 26 | #include <linux/ctype.h> |
26 | #include <linux/string.h> | 27 | #include <linux/string.h> |
28 | #include <linux/parser.h> | ||
27 | #include <linux/string_helpers.h> | 29 | #include <linux/string_helpers.h> |
28 | #include <linux/uaccess.h> | 30 | #include <linux/uaccess.h> |
29 | #include <linux/dynamic_debug.h> | 31 | #include <linux/dynamic_debug.h> |
@@ -147,7 +149,8 @@ static int ddebug_change(const struct ddebug_query *query, | |||
147 | list_for_each_entry(dt, &ddebug_tables, link) { | 149 | list_for_each_entry(dt, &ddebug_tables, link) { |
148 | 150 | ||
149 | /* match against the module name */ | 151 | /* match against the module name */ |
150 | if (query->module && strcmp(query->module, dt->mod_name)) | 152 | if (query->module && |
153 | !match_wildcard(query->module, dt->mod_name)) | ||
151 | continue; | 154 | continue; |
152 | 155 | ||
153 | for (i = 0; i < dt->num_ddebugs; i++) { | 156 | for (i = 0; i < dt->num_ddebugs; i++) { |
@@ -155,14 +158,16 @@ static int ddebug_change(const struct ddebug_query *query, | |||
155 | 158 | ||
156 | /* match against the source filename */ | 159 | /* match against the source filename */ |
157 | if (query->filename && | 160 | if (query->filename && |
158 | strcmp(query->filename, dp->filename) && | 161 | !match_wildcard(query->filename, dp->filename) && |
159 | strcmp(query->filename, kbasename(dp->filename)) && | 162 | !match_wildcard(query->filename, |
160 | strcmp(query->filename, trim_prefix(dp->filename))) | 163 | kbasename(dp->filename)) && |
164 | !match_wildcard(query->filename, | ||
165 | trim_prefix(dp->filename))) | ||
161 | continue; | 166 | continue; |
162 | 167 | ||
163 | /* match against the function */ | 168 | /* match against the function */ |
164 | if (query->function && | 169 | if (query->function && |
165 | strcmp(query->function, dp->function)) | 170 | !match_wildcard(query->function, dp->function)) |
166 | continue; | 171 | continue; |
167 | 172 | ||
168 | /* match against the format */ | 173 | /* match against the format */ |
@@ -263,14 +268,12 @@ static int ddebug_tokenize(char *buf, char *words[], int maxwords) | |||
263 | */ | 268 | */ |
264 | static inline int parse_lineno(const char *str, unsigned int *val) | 269 | static inline int parse_lineno(const char *str, unsigned int *val) |
265 | { | 270 | { |
266 | char *end = NULL; | ||
267 | BUG_ON(str == NULL); | 271 | BUG_ON(str == NULL); |
268 | if (*str == '\0') { | 272 | if (*str == '\0') { |
269 | *val = 0; | 273 | *val = 0; |
270 | return 0; | 274 | return 0; |
271 | } | 275 | } |
272 | *val = simple_strtoul(str, &end, 10); | 276 | if (kstrtouint(str, 10, val) < 0) { |
273 | if (end == NULL || end == str || *end != '\0') { | ||
274 | pr_err("bad line-number: %s\n", str); | 277 | pr_err("bad line-number: %s\n", str); |
275 | return -EINVAL; | 278 | return -EINVAL; |
276 | } | 279 | } |
@@ -343,14 +346,14 @@ static int ddebug_parse_query(char *words[], int nwords, | |||
343 | } | 346 | } |
344 | if (last) | 347 | if (last) |
345 | *last++ = '\0'; | 348 | *last++ = '\0'; |
346 | if (parse_lineno(first, &query->first_lineno) < 0) { | 349 | if (parse_lineno(first, &query->first_lineno) < 0) |
347 | pr_err("line-number is <0\n"); | ||
348 | return -EINVAL; | 350 | return -EINVAL; |
349 | } | ||
350 | if (last) { | 351 | if (last) { |
351 | /* range <first>-<last> */ | 352 | /* range <first>-<last> */ |
352 | if (parse_lineno(last, &query->last_lineno) | 353 | if (parse_lineno(last, &query->last_lineno) < 0) |
353 | < query->first_lineno) { | 354 | return -EINVAL; |
355 | |||
356 | if (query->last_lineno < query->first_lineno) { | ||
354 | pr_err("last-line:%d < 1st-line:%d\n", | 357 | pr_err("last-line:%d < 1st-line:%d\n", |
355 | query->last_lineno, | 358 | query->last_lineno, |
356 | query->first_lineno); | 359 | query->first_lineno); |
diff --git a/lib/flex_array.c b/lib/flex_array.c index 6948a6692fc4..2eed22fa507c 100644 --- a/lib/flex_array.c +++ b/lib/flex_array.c | |||
@@ -90,8 +90,8 @@ struct flex_array *flex_array_alloc(int element_size, unsigned int total, | |||
90 | { | 90 | { |
91 | struct flex_array *ret; | 91 | struct flex_array *ret; |
92 | int elems_per_part = 0; | 92 | int elems_per_part = 0; |
93 | int reciprocal_elems = 0; | ||
94 | int max_size = 0; | 93 | int max_size = 0; |
94 | struct reciprocal_value reciprocal_elems = { 0 }; | ||
95 | 95 | ||
96 | if (element_size) { | 96 | if (element_size) { |
97 | elems_per_part = FLEX_ARRAY_ELEMENTS_PER_PART(element_size); | 97 | elems_per_part = FLEX_ARRAY_ELEMENTS_PER_PART(element_size); |
@@ -119,6 +119,11 @@ EXPORT_SYMBOL(flex_array_alloc); | |||
119 | static int fa_element_to_part_nr(struct flex_array *fa, | 119 | static int fa_element_to_part_nr(struct flex_array *fa, |
120 | unsigned int element_nr) | 120 | unsigned int element_nr) |
121 | { | 121 | { |
122 | /* | ||
123 | * if element_size == 0 we don't get here, so we never touch | ||
124 | * the zeroed fa->reciprocal_elems, which would yield invalid | ||
125 | * results | ||
126 | */ | ||
122 | return reciprocal_divide(element_nr, fa->reciprocal_elems); | 127 | return reciprocal_divide(element_nr, fa->reciprocal_elems); |
123 | } | 128 | } |
124 | 129 | ||
diff --git a/lib/genalloc.c b/lib/genalloc.c index dda31168844f..bdb9a456bcbb 100644 --- a/lib/genalloc.c +++ b/lib/genalloc.c | |||
@@ -316,7 +316,7 @@ EXPORT_SYMBOL(gen_pool_alloc); | |||
316 | * gen_pool_dma_alloc - allocate special memory from the pool for DMA usage | 316 | * gen_pool_dma_alloc - allocate special memory from the pool for DMA usage |
317 | * @pool: pool to allocate from | 317 | * @pool: pool to allocate from |
318 | * @size: number of bytes to allocate from the pool | 318 | * @size: number of bytes to allocate from the pool |
319 | * @dma: dma-view physical address | 319 | * @dma: dma-view physical address return value. Use NULL if unneeded. |
320 | * | 320 | * |
321 | * Allocate the requested number of bytes from the specified pool. | 321 | * Allocate the requested number of bytes from the specified pool. |
322 | * Uses the pool allocation function (with first-fit algorithm by default). | 322 | * Uses the pool allocation function (with first-fit algorithm by default). |
@@ -334,7 +334,8 @@ void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size, dma_addr_t *dma) | |||
334 | if (!vaddr) | 334 | if (!vaddr) |
335 | return NULL; | 335 | return NULL; |
336 | 336 | ||
337 | *dma = gen_pool_virt_to_phys(pool, vaddr); | 337 | if (dma) |
338 | *dma = gen_pool_virt_to_phys(pool, vaddr); | ||
338 | 339 | ||
339 | return (void *)vaddr; | 340 | return (void *)vaddr; |
340 | } | 341 | } |
diff --git a/lib/hash.c b/lib/hash.c new file mode 100644 index 000000000000..fea973f4bd57 --- /dev/null +++ b/lib/hash.c | |||
@@ -0,0 +1,39 @@ | |||
1 | /* General purpose hashing library | ||
2 | * | ||
3 | * That's a start of a kernel hashing library, which can be extended | ||
4 | * with further algorithms in future. arch_fast_hash{2,}() will | ||
5 | * eventually resolve to an architecture optimized implementation. | ||
6 | * | ||
7 | * Copyright 2013 Francesco Fusco <ffusco@redhat.com> | ||
8 | * Copyright 2013 Daniel Borkmann <dborkman@redhat.com> | ||
9 | * Copyright 2013 Thomas Graf <tgraf@redhat.com> | ||
10 | * Licensed under the GNU General Public License, version 2.0 (GPLv2) | ||
11 | */ | ||
12 | |||
13 | #include <linux/jhash.h> | ||
14 | #include <linux/hash.h> | ||
15 | #include <linux/cache.h> | ||
16 | |||
17 | static struct fast_hash_ops arch_hash_ops __read_mostly = { | ||
18 | .hash = jhash, | ||
19 | .hash2 = jhash2, | ||
20 | }; | ||
21 | |||
22 | u32 arch_fast_hash(const void *data, u32 len, u32 seed) | ||
23 | { | ||
24 | return arch_hash_ops.hash(data, len, seed); | ||
25 | } | ||
26 | EXPORT_SYMBOL_GPL(arch_fast_hash); | ||
27 | |||
28 | u32 arch_fast_hash2(const u32 *data, u32 len, u32 seed) | ||
29 | { | ||
30 | return arch_hash_ops.hash2(data, len, seed); | ||
31 | } | ||
32 | EXPORT_SYMBOL_GPL(arch_fast_hash2); | ||
33 | |||
34 | static int __init hashlib_init(void) | ||
35 | { | ||
36 | setup_arch_fast_hash(&arch_hash_ops); | ||
37 | return 0; | ||
38 | } | ||
39 | early_initcall(hashlib_init); | ||
@@ -196,7 +196,7 @@ static void idr_mark_full(struct idr_layer **pa, int id) | |||
196 | } | 196 | } |
197 | } | 197 | } |
198 | 198 | ||
199 | int __idr_pre_get(struct idr *idp, gfp_t gfp_mask) | 199 | static int __idr_pre_get(struct idr *idp, gfp_t gfp_mask) |
200 | { | 200 | { |
201 | while (idp->id_free_cnt < MAX_IDR_FREE) { | 201 | while (idp->id_free_cnt < MAX_IDR_FREE) { |
202 | struct idr_layer *new; | 202 | struct idr_layer *new; |
@@ -207,7 +207,6 @@ int __idr_pre_get(struct idr *idp, gfp_t gfp_mask) | |||
207 | } | 207 | } |
208 | return 1; | 208 | return 1; |
209 | } | 209 | } |
210 | EXPORT_SYMBOL(__idr_pre_get); | ||
211 | 210 | ||
212 | /** | 211 | /** |
213 | * sub_alloc - try to allocate an id without growing the tree depth | 212 | * sub_alloc - try to allocate an id without growing the tree depth |
@@ -374,20 +373,6 @@ static void idr_fill_slot(struct idr *idr, void *ptr, int id, | |||
374 | idr_mark_full(pa, id); | 373 | idr_mark_full(pa, id); |
375 | } | 374 | } |
376 | 375 | ||
377 | int __idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) | ||
378 | { | ||
379 | struct idr_layer *pa[MAX_IDR_LEVEL + 1]; | ||
380 | int rv; | ||
381 | |||
382 | rv = idr_get_empty_slot(idp, starting_id, pa, 0, idp); | ||
383 | if (rv < 0) | ||
384 | return rv == -ENOMEM ? -EAGAIN : rv; | ||
385 | |||
386 | idr_fill_slot(idp, ptr, rv, pa); | ||
387 | *id = rv; | ||
388 | return 0; | ||
389 | } | ||
390 | EXPORT_SYMBOL(__idr_get_new_above); | ||
391 | 376 | ||
392 | /** | 377 | /** |
393 | * idr_preload - preload for idr_alloc() | 378 | * idr_preload - preload for idr_alloc() |
@@ -548,7 +533,7 @@ static void sub_remove(struct idr *idp, int shift, int id) | |||
548 | n = id & IDR_MASK; | 533 | n = id & IDR_MASK; |
549 | if (likely(p != NULL && test_bit(n, p->bitmap))) { | 534 | if (likely(p != NULL && test_bit(n, p->bitmap))) { |
550 | __clear_bit(n, p->bitmap); | 535 | __clear_bit(n, p->bitmap); |
551 | rcu_assign_pointer(p->ary[n], NULL); | 536 | RCU_INIT_POINTER(p->ary[n], NULL); |
552 | to_free = NULL; | 537 | to_free = NULL; |
553 | while(*paa && ! --((**paa)->count)){ | 538 | while(*paa && ! --((**paa)->count)){ |
554 | if (to_free) | 539 | if (to_free) |
@@ -607,7 +592,7 @@ void idr_remove(struct idr *idp, int id) | |||
607 | } | 592 | } |
608 | EXPORT_SYMBOL(idr_remove); | 593 | EXPORT_SYMBOL(idr_remove); |
609 | 594 | ||
610 | void __idr_remove_all(struct idr *idp) | 595 | static void __idr_remove_all(struct idr *idp) |
611 | { | 596 | { |
612 | int n, id, max; | 597 | int n, id, max; |
613 | int bt_mask; | 598 | int bt_mask; |
@@ -617,7 +602,7 @@ void __idr_remove_all(struct idr *idp) | |||
617 | 602 | ||
618 | n = idp->layers * IDR_BITS; | 603 | n = idp->layers * IDR_BITS; |
619 | p = idp->top; | 604 | p = idp->top; |
620 | rcu_assign_pointer(idp->top, NULL); | 605 | RCU_INIT_POINTER(idp->top, NULL); |
621 | max = idr_max(idp->layers); | 606 | max = idr_max(idp->layers); |
622 | 607 | ||
623 | id = 0; | 608 | id = 0; |
@@ -640,7 +625,6 @@ void __idr_remove_all(struct idr *idp) | |||
640 | } | 625 | } |
641 | idp->layers = 0; | 626 | idp->layers = 0; |
642 | } | 627 | } |
643 | EXPORT_SYMBOL(__idr_remove_all); | ||
644 | 628 | ||
645 | /** | 629 | /** |
646 | * idr_destroy - release all cached layers within an idr tree | 630 | * idr_destroy - release all cached layers within an idr tree |
@@ -869,6 +853,16 @@ void idr_init(struct idr *idp) | |||
869 | } | 853 | } |
870 | EXPORT_SYMBOL(idr_init); | 854 | EXPORT_SYMBOL(idr_init); |
871 | 855 | ||
856 | static int idr_has_entry(int id, void *p, void *data) | ||
857 | { | ||
858 | return 1; | ||
859 | } | ||
860 | |||
861 | bool idr_is_empty(struct idr *idp) | ||
862 | { | ||
863 | return !idr_for_each(idp, idr_has_entry, NULL); | ||
864 | } | ||
865 | EXPORT_SYMBOL(idr_is_empty); | ||
872 | 866 | ||
873 | /** | 867 | /** |
874 | * DOC: IDA description | 868 | * DOC: IDA description |
diff --git a/lib/iomap.c b/lib/iomap.c index 2c08f36862eb..fc3dcb4b238e 100644 --- a/lib/iomap.c +++ b/lib/iomap.c | |||
@@ -224,7 +224,7 @@ EXPORT_SYMBOL(iowrite8_rep); | |||
224 | EXPORT_SYMBOL(iowrite16_rep); | 224 | EXPORT_SYMBOL(iowrite16_rep); |
225 | EXPORT_SYMBOL(iowrite32_rep); | 225 | EXPORT_SYMBOL(iowrite32_rep); |
226 | 226 | ||
227 | #ifdef CONFIG_HAS_IOPORT | 227 | #ifdef CONFIG_HAS_IOPORT_MAP |
228 | /* Create a virtual mapping cookie for an IO port range */ | 228 | /* Create a virtual mapping cookie for an IO port range */ |
229 | void __iomem *ioport_map(unsigned long port, unsigned int nr) | 229 | void __iomem *ioport_map(unsigned long port, unsigned int nr) |
230 | { | 230 | { |
@@ -239,7 +239,7 @@ void ioport_unmap(void __iomem *addr) | |||
239 | } | 239 | } |
240 | EXPORT_SYMBOL(ioport_map); | 240 | EXPORT_SYMBOL(ioport_map); |
241 | EXPORT_SYMBOL(ioport_unmap); | 241 | EXPORT_SYMBOL(ioport_unmap); |
242 | #endif /* CONFIG_HAS_IOPORT */ | 242 | #endif /* CONFIG_HAS_IOPORT_MAP */ |
243 | 243 | ||
244 | #ifdef CONFIG_PCI | 244 | #ifdef CONFIG_PCI |
245 | /* Hide the details if this is a MMIO or PIO address space and just do what | 245 | /* Hide the details if this is a MMIO or PIO address space and just do what |
diff --git a/lib/kobject.c b/lib/kobject.c index 5b4b8886435e..58751bb80a7c 100644 --- a/lib/kobject.c +++ b/lib/kobject.c | |||
@@ -13,11 +13,11 @@ | |||
13 | */ | 13 | */ |
14 | 14 | ||
15 | #include <linux/kobject.h> | 15 | #include <linux/kobject.h> |
16 | #include <linux/kobj_completion.h> | ||
17 | #include <linux/string.h> | 16 | #include <linux/string.h> |
18 | #include <linux/export.h> | 17 | #include <linux/export.h> |
19 | #include <linux/stat.h> | 18 | #include <linux/stat.h> |
20 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
20 | #include <linux/random.h> | ||
21 | 21 | ||
22 | /** | 22 | /** |
23 | * kobject_namespace - return @kobj's namespace tag | 23 | * kobject_namespace - return @kobj's namespace tag |
@@ -65,13 +65,17 @@ static int populate_dir(struct kobject *kobj) | |||
65 | 65 | ||
66 | static int create_dir(struct kobject *kobj) | 66 | static int create_dir(struct kobject *kobj) |
67 | { | 67 | { |
68 | const struct kobj_ns_type_operations *ops; | ||
68 | int error; | 69 | int error; |
69 | 70 | ||
70 | error = sysfs_create_dir_ns(kobj, kobject_namespace(kobj)); | 71 | error = sysfs_create_dir_ns(kobj, kobject_namespace(kobj)); |
71 | if (!error) { | 72 | if (error) |
72 | error = populate_dir(kobj); | 73 | return error; |
73 | if (error) | 74 | |
74 | sysfs_remove_dir(kobj); | 75 | error = populate_dir(kobj); |
76 | if (error) { | ||
77 | sysfs_remove_dir(kobj); | ||
78 | return error; | ||
75 | } | 79 | } |
76 | 80 | ||
77 | /* | 81 | /* |
@@ -80,7 +84,20 @@ static int create_dir(struct kobject *kobj) | |||
80 | */ | 84 | */ |
81 | sysfs_get(kobj->sd); | 85 | sysfs_get(kobj->sd); |
82 | 86 | ||
83 | return error; | 87 | /* |
88 | * If @kobj has ns_ops, its children need to be filtered based on | ||
89 | * their namespace tags. Enable namespace support on @kobj->sd. | ||
90 | */ | ||
91 | ops = kobj_child_ns_ops(kobj); | ||
92 | if (ops) { | ||
93 | BUG_ON(ops->type <= KOBJ_NS_TYPE_NONE); | ||
94 | BUG_ON(ops->type >= KOBJ_NS_TYPES); | ||
95 | BUG_ON(!kobj_ns_type_registered(ops->type)); | ||
96 | |||
97 | sysfs_enable_ns(kobj->sd); | ||
98 | } | ||
99 | |||
100 | return 0; | ||
84 | } | 101 | } |
85 | 102 | ||
86 | static int get_kobj_path_length(struct kobject *kobj) | 103 | static int get_kobj_path_length(struct kobject *kobj) |
@@ -247,8 +264,10 @@ int kobject_set_name_vargs(struct kobject *kobj, const char *fmt, | |||
247 | return 0; | 264 | return 0; |
248 | 265 | ||
249 | kobj->name = kvasprintf(GFP_KERNEL, fmt, vargs); | 266 | kobj->name = kvasprintf(GFP_KERNEL, fmt, vargs); |
250 | if (!kobj->name) | 267 | if (!kobj->name) { |
268 | kobj->name = old_name; | ||
251 | return -ENOMEM; | 269 | return -ENOMEM; |
270 | } | ||
252 | 271 | ||
253 | /* ewww... some of these buggers have '/' in the name ... */ | 272 | /* ewww... some of these buggers have '/' in the name ... */ |
254 | while ((s = strchr(kobj->name, '/'))) | 273 | while ((s = strchr(kobj->name, '/'))) |
@@ -346,7 +365,7 @@ static int kobject_add_varg(struct kobject *kobj, struct kobject *parent, | |||
346 | * | 365 | * |
347 | * If @parent is set, then the parent of the @kobj will be set to it. | 366 | * If @parent is set, then the parent of the @kobj will be set to it. |
348 | * If @parent is NULL, then the parent of the @kobj will be set to the | 367 | * If @parent is NULL, then the parent of the @kobj will be set to the |
349 | * kobject associted with the kset assigned to this kobject. If no kset | 368 | * kobject associated with the kset assigned to this kobject. If no kset |
350 | * is assigned to the kobject, then the kobject will be located in the | 369 | * is assigned to the kobject, then the kobject will be located in the |
351 | * root of the sysfs tree. | 370 | * root of the sysfs tree. |
352 | * | 371 | * |
@@ -536,7 +555,7 @@ out: | |||
536 | */ | 555 | */ |
537 | void kobject_del(struct kobject *kobj) | 556 | void kobject_del(struct kobject *kobj) |
538 | { | 557 | { |
539 | struct sysfs_dirent *sd; | 558 | struct kernfs_node *sd; |
540 | 559 | ||
541 | if (!kobj) | 560 | if (!kobj) |
542 | return; | 561 | return; |
@@ -625,10 +644,12 @@ static void kobject_release(struct kref *kref) | |||
625 | { | 644 | { |
626 | struct kobject *kobj = container_of(kref, struct kobject, kref); | 645 | struct kobject *kobj = container_of(kref, struct kobject, kref); |
627 | #ifdef CONFIG_DEBUG_KOBJECT_RELEASE | 646 | #ifdef CONFIG_DEBUG_KOBJECT_RELEASE |
628 | pr_info("kobject: '%s' (%p): %s, parent %p (delayed)\n", | 647 | unsigned long delay = HZ + HZ * (get_random_int() & 0x3); |
629 | kobject_name(kobj), kobj, __func__, kobj->parent); | 648 | pr_info("kobject: '%s' (%p): %s, parent %p (delayed %ld)\n", |
649 | kobject_name(kobj), kobj, __func__, kobj->parent, delay); | ||
630 | INIT_DELAYED_WORK(&kobj->release, kobject_delayed_cleanup); | 650 | INIT_DELAYED_WORK(&kobj->release, kobject_delayed_cleanup); |
631 | schedule_delayed_work(&kobj->release, HZ); | 651 | |
652 | schedule_delayed_work(&kobj->release, delay); | ||
632 | #else | 653 | #else |
633 | kobject_cleanup(kobj); | 654 | kobject_cleanup(kobj); |
634 | #endif | 655 | #endif |
@@ -758,55 +779,7 @@ const struct sysfs_ops kobj_sysfs_ops = { | |||
758 | .show = kobj_attr_show, | 779 | .show = kobj_attr_show, |
759 | .store = kobj_attr_store, | 780 | .store = kobj_attr_store, |
760 | }; | 781 | }; |
761 | 782 | EXPORT_SYMBOL_GPL(kobj_sysfs_ops); | |
762 | /** | ||
763 | * kobj_completion_init - initialize a kobj_completion object. | ||
764 | * @kc: kobj_completion | ||
765 | * @ktype: type of kobject to initialize | ||
766 | * | ||
767 | * kobj_completion structures can be embedded within structures with different | ||
768 | * lifetime rules. During the release of the enclosing object, we can | ||
769 | * wait on the release of the kobject so that we don't free it while it's | ||
770 | * still busy. | ||
771 | */ | ||
772 | void kobj_completion_init(struct kobj_completion *kc, struct kobj_type *ktype) | ||
773 | { | ||
774 | init_completion(&kc->kc_unregister); | ||
775 | kobject_init(&kc->kc_kobj, ktype); | ||
776 | } | ||
777 | EXPORT_SYMBOL_GPL(kobj_completion_init); | ||
778 | |||
779 | /** | ||
780 | * kobj_completion_release - release a kobj_completion object | ||
781 | * @kobj: kobject embedded in kobj_completion | ||
782 | * | ||
783 | * Used with kobject_release to notify waiters that the kobject has been | ||
784 | * released. | ||
785 | */ | ||
786 | void kobj_completion_release(struct kobject *kobj) | ||
787 | { | ||
788 | struct kobj_completion *kc = kobj_to_kobj_completion(kobj); | ||
789 | complete(&kc->kc_unregister); | ||
790 | } | ||
791 | EXPORT_SYMBOL_GPL(kobj_completion_release); | ||
792 | |||
793 | /** | ||
794 | * kobj_completion_del_and_wait - release the kobject and wait for it | ||
795 | * @kc: kobj_completion object to release | ||
796 | * | ||
797 | * Delete the kobject from sysfs and drop the reference count. Then wait | ||
798 | * until any other outstanding references are also dropped. This routine | ||
799 | * is only necessary once other references may have been taken on the | ||
800 | * kobject. Typically this happens when the kobject has been published | ||
801 | * to sysfs via kobject_add. | ||
802 | */ | ||
803 | void kobj_completion_del_and_wait(struct kobj_completion *kc) | ||
804 | { | ||
805 | kobject_del(&kc->kc_kobj); | ||
806 | kobject_put(&kc->kc_kobj); | ||
807 | wait_for_completion(&kc->kc_unregister); | ||
808 | } | ||
809 | EXPORT_SYMBOL_GPL(kobj_completion_del_and_wait); | ||
810 | 783 | ||
811 | /** | 784 | /** |
812 | * kset_register - initialize and add a kset. | 785 | * kset_register - initialize and add a kset. |
@@ -835,6 +808,7 @@ void kset_unregister(struct kset *k) | |||
835 | { | 808 | { |
836 | if (!k) | 809 | if (!k) |
837 | return; | 810 | return; |
811 | kobject_del(&k->kobj); | ||
838 | kobject_put(&k->kobj); | 812 | kobject_put(&k->kobj); |
839 | } | 813 | } |
840 | 814 | ||
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index 52e5abbc41db..4e3bd71bd949 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c | |||
@@ -88,11 +88,17 @@ out: | |||
88 | #ifdef CONFIG_NET | 88 | #ifdef CONFIG_NET |
89 | static int kobj_bcast_filter(struct sock *dsk, struct sk_buff *skb, void *data) | 89 | static int kobj_bcast_filter(struct sock *dsk, struct sk_buff *skb, void *data) |
90 | { | 90 | { |
91 | struct kobject *kobj = data; | 91 | struct kobject *kobj = data, *ksobj; |
92 | const struct kobj_ns_type_operations *ops; | 92 | const struct kobj_ns_type_operations *ops; |
93 | 93 | ||
94 | ops = kobj_ns_ops(kobj); | 94 | ops = kobj_ns_ops(kobj); |
95 | if (ops) { | 95 | if (!ops && kobj->kset) { |
96 | ksobj = &kobj->kset->kobj; | ||
97 | if (ksobj->parent != NULL) | ||
98 | ops = kobj_ns_ops(ksobj->parent); | ||
99 | } | ||
100 | |||
101 | if (ops && ops->netlink_ns && kobj->ktype->namespace) { | ||
96 | const void *sock_ns, *ns; | 102 | const void *sock_ns, *ns; |
97 | ns = kobj->ktype->namespace(kobj); | 103 | ns = kobj->ktype->namespace(kobj); |
98 | sock_ns = ops->netlink_ns(dsk); | 104 | sock_ns = ops->netlink_ns(dsk); |
@@ -118,6 +124,30 @@ static int kobj_usermode_filter(struct kobject *kobj) | |||
118 | return 0; | 124 | return 0; |
119 | } | 125 | } |
120 | 126 | ||
127 | static int init_uevent_argv(struct kobj_uevent_env *env, const char *subsystem) | ||
128 | { | ||
129 | int len; | ||
130 | |||
131 | len = strlcpy(&env->buf[env->buflen], subsystem, | ||
132 | sizeof(env->buf) - env->buflen); | ||
133 | if (len >= (sizeof(env->buf) - env->buflen)) { | ||
134 | WARN(1, KERN_ERR "init_uevent_argv: buffer size too small\n"); | ||
135 | return -ENOMEM; | ||
136 | } | ||
137 | |||
138 | env->argv[0] = uevent_helper; | ||
139 | env->argv[1] = &env->buf[env->buflen]; | ||
140 | env->argv[2] = NULL; | ||
141 | |||
142 | env->buflen += len + 1; | ||
143 | return 0; | ||
144 | } | ||
145 | |||
146 | static void cleanup_uevent_env(struct subprocess_info *info) | ||
147 | { | ||
148 | kfree(info->data); | ||
149 | } | ||
150 | |||
121 | /** | 151 | /** |
122 | * kobject_uevent_env - send an uevent with environmental data | 152 | * kobject_uevent_env - send an uevent with environmental data |
123 | * | 153 | * |
@@ -295,11 +325,8 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, | |||
295 | 325 | ||
296 | /* call uevent_helper, usually only enabled during early boot */ | 326 | /* call uevent_helper, usually only enabled during early boot */ |
297 | if (uevent_helper[0] && !kobj_usermode_filter(kobj)) { | 327 | if (uevent_helper[0] && !kobj_usermode_filter(kobj)) { |
298 | char *argv [3]; | 328 | struct subprocess_info *info; |
299 | 329 | ||
300 | argv [0] = uevent_helper; | ||
301 | argv [1] = (char *)subsystem; | ||
302 | argv [2] = NULL; | ||
303 | retval = add_uevent_var(env, "HOME=/"); | 330 | retval = add_uevent_var(env, "HOME=/"); |
304 | if (retval) | 331 | if (retval) |
305 | goto exit; | 332 | goto exit; |
@@ -307,9 +334,18 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, | |||
307 | "PATH=/sbin:/bin:/usr/sbin:/usr/bin"); | 334 | "PATH=/sbin:/bin:/usr/sbin:/usr/bin"); |
308 | if (retval) | 335 | if (retval) |
309 | goto exit; | 336 | goto exit; |
337 | retval = init_uevent_argv(env, subsystem); | ||
338 | if (retval) | ||
339 | goto exit; | ||
310 | 340 | ||
311 | retval = call_usermodehelper(argv[0], argv, | 341 | retval = -ENOMEM; |
312 | env->envp, UMH_WAIT_EXEC); | 342 | info = call_usermodehelper_setup(env->argv[0], env->argv, |
343 | env->envp, GFP_KERNEL, | ||
344 | NULL, cleanup_uevent_env, env); | ||
345 | if (info) { | ||
346 | retval = call_usermodehelper_exec(info, UMH_NO_WAIT); | ||
347 | env = NULL; /* freed by cleanup_uevent_env */ | ||
348 | } | ||
313 | } | 349 | } |
314 | 350 | ||
315 | exit: | 351 | exit: |
diff --git a/lib/kstrtox.c b/lib/kstrtox.c index f78ae0c0c4e2..ec8da78df9be 100644 --- a/lib/kstrtox.c +++ b/lib/kstrtox.c | |||
@@ -92,7 +92,6 @@ static int _kstrtoull(const char *s, unsigned int base, unsigned long long *res) | |||
92 | rv = _parse_integer(s, base, &_res); | 92 | rv = _parse_integer(s, base, &_res); |
93 | if (rv & KSTRTOX_OVERFLOW) | 93 | if (rv & KSTRTOX_OVERFLOW) |
94 | return -ERANGE; | 94 | return -ERANGE; |
95 | rv &= ~KSTRTOX_OVERFLOW; | ||
96 | if (rv == 0) | 95 | if (rv == 0) |
97 | return -EINVAL; | 96 | return -EINVAL; |
98 | s += rv; | 97 | s += rv; |
diff --git a/lib/nlattr.c b/lib/nlattr.c index 18eca7809b08..fc6754720ced 100644 --- a/lib/nlattr.c +++ b/lib/nlattr.c | |||
@@ -303,9 +303,15 @@ int nla_memcmp(const struct nlattr *nla, const void *data, | |||
303 | */ | 303 | */ |
304 | int nla_strcmp(const struct nlattr *nla, const char *str) | 304 | int nla_strcmp(const struct nlattr *nla, const char *str) |
305 | { | 305 | { |
306 | int len = strlen(str) + 1; | 306 | int len = strlen(str); |
307 | int d = nla_len(nla) - len; | 307 | char *buf = nla_data(nla); |
308 | int attrlen = nla_len(nla); | ||
309 | int d; | ||
308 | 310 | ||
311 | if (attrlen > 0 && buf[attrlen - 1] == '\0') | ||
312 | attrlen--; | ||
313 | |||
314 | d = attrlen - len; | ||
309 | if (d == 0) | 315 | if (d == 0) |
310 | d = memcmp(nla_data(nla), str, len); | 316 | d = memcmp(nla_data(nla), str, len); |
311 | 317 | ||
diff --git a/lib/parser.c b/lib/parser.c index 807b2aaa33fa..b6d11631231b 100644 --- a/lib/parser.c +++ b/lib/parser.c | |||
@@ -113,6 +113,7 @@ int match_token(char *s, const match_table_t table, substring_t args[]) | |||
113 | 113 | ||
114 | return p->token; | 114 | return p->token; |
115 | } | 115 | } |
116 | EXPORT_SYMBOL(match_token); | ||
116 | 117 | ||
117 | /** | 118 | /** |
118 | * match_number: scan a number in the given base from a substring_t | 119 | * match_number: scan a number in the given base from a substring_t |
@@ -163,6 +164,7 @@ int match_int(substring_t *s, int *result) | |||
163 | { | 164 | { |
164 | return match_number(s, result, 0); | 165 | return match_number(s, result, 0); |
165 | } | 166 | } |
167 | EXPORT_SYMBOL(match_int); | ||
166 | 168 | ||
167 | /** | 169 | /** |
168 | * match_octal: - scan an octal representation of an integer from a substring_t | 170 | * match_octal: - scan an octal representation of an integer from a substring_t |
@@ -177,6 +179,7 @@ int match_octal(substring_t *s, int *result) | |||
177 | { | 179 | { |
178 | return match_number(s, result, 8); | 180 | return match_number(s, result, 8); |
179 | } | 181 | } |
182 | EXPORT_SYMBOL(match_octal); | ||
180 | 183 | ||
181 | /** | 184 | /** |
182 | * match_hex: - scan a hex representation of an integer from a substring_t | 185 | * match_hex: - scan a hex representation of an integer from a substring_t |
@@ -191,6 +194,58 @@ int match_hex(substring_t *s, int *result) | |||
191 | { | 194 | { |
192 | return match_number(s, result, 16); | 195 | return match_number(s, result, 16); |
193 | } | 196 | } |
197 | EXPORT_SYMBOL(match_hex); | ||
198 | |||
199 | /** | ||
200 | * match_wildcard: - parse if a string matches given wildcard pattern | ||
201 | * @pattern: wildcard pattern | ||
202 | * @str: the string to be parsed | ||
203 | * | ||
204 | * Description: Parse the string @str to check if matches wildcard | ||
205 | * pattern @pattern. The pattern may contain two type wildcardes: | ||
206 | * '*' - matches zero or more characters | ||
207 | * '?' - matches one character | ||
208 | * If it's matched, return true, else return false. | ||
209 | */ | ||
210 | bool match_wildcard(const char *pattern, const char *str) | ||
211 | { | ||
212 | const char *s = str; | ||
213 | const char *p = pattern; | ||
214 | bool star = false; | ||
215 | |||
216 | while (*s) { | ||
217 | switch (*p) { | ||
218 | case '?': | ||
219 | s++; | ||
220 | p++; | ||
221 | break; | ||
222 | case '*': | ||
223 | star = true; | ||
224 | str = s; | ||
225 | if (!*++p) | ||
226 | return true; | ||
227 | pattern = p; | ||
228 | break; | ||
229 | default: | ||
230 | if (*s == *p) { | ||
231 | s++; | ||
232 | p++; | ||
233 | } else { | ||
234 | if (!star) | ||
235 | return false; | ||
236 | str++; | ||
237 | s = str; | ||
238 | p = pattern; | ||
239 | } | ||
240 | break; | ||
241 | } | ||
242 | } | ||
243 | |||
244 | if (*p == '*') | ||
245 | ++p; | ||
246 | return !*p; | ||
247 | } | ||
248 | EXPORT_SYMBOL(match_wildcard); | ||
194 | 249 | ||
195 | /** | 250 | /** |
196 | * match_strlcpy: - Copy the characters from a substring_t to a sized buffer | 251 | * match_strlcpy: - Copy the characters from a substring_t to a sized buffer |
@@ -213,6 +268,7 @@ size_t match_strlcpy(char *dest, const substring_t *src, size_t size) | |||
213 | } | 268 | } |
214 | return ret; | 269 | return ret; |
215 | } | 270 | } |
271 | EXPORT_SYMBOL(match_strlcpy); | ||
216 | 272 | ||
217 | /** | 273 | /** |
218 | * match_strdup: - allocate a new string with the contents of a substring_t | 274 | * match_strdup: - allocate a new string with the contents of a substring_t |
@@ -230,10 +286,4 @@ char *match_strdup(const substring_t *s) | |||
230 | match_strlcpy(p, s, sz); | 286 | match_strlcpy(p, s, sz); |
231 | return p; | 287 | return p; |
232 | } | 288 | } |
233 | |||
234 | EXPORT_SYMBOL(match_token); | ||
235 | EXPORT_SYMBOL(match_int); | ||
236 | EXPORT_SYMBOL(match_octal); | ||
237 | EXPORT_SYMBOL(match_hex); | ||
238 | EXPORT_SYMBOL(match_strlcpy); | ||
239 | EXPORT_SYMBOL(match_strdup); | 289 | EXPORT_SYMBOL(match_strdup); |
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c index 1a53d497a8c5..963b7034a51b 100644 --- a/lib/percpu-refcount.c +++ b/lib/percpu-refcount.c | |||
@@ -120,6 +120,9 @@ static void percpu_ref_kill_rcu(struct rcu_head *rcu) | |||
120 | 120 | ||
121 | atomic_add((int) count - PCPU_COUNT_BIAS, &ref->count); | 121 | atomic_add((int) count - PCPU_COUNT_BIAS, &ref->count); |
122 | 122 | ||
123 | WARN_ONCE(atomic_read(&ref->count) <= 0, "percpu ref <= 0 (%i)", | ||
124 | atomic_read(&ref->count)); | ||
125 | |||
123 | /* @ref is viewed as dead on all CPUs, send out kill confirmation */ | 126 | /* @ref is viewed as dead on all CPUs, send out kill confirmation */ |
124 | if (ref->confirm_kill) | 127 | if (ref->confirm_kill) |
125 | ref->confirm_kill(ref); | 128 | ref->confirm_kill(ref); |
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c index 8280a5dd1727..7dd33577b905 100644 --- a/lib/percpu_counter.c +++ b/lib/percpu_counter.c | |||
@@ -169,7 +169,7 @@ static int percpu_counter_hotcpu_callback(struct notifier_block *nb, | |||
169 | struct percpu_counter *fbc; | 169 | struct percpu_counter *fbc; |
170 | 170 | ||
171 | compute_batch_value(); | 171 | compute_batch_value(); |
172 | if (action != CPU_DEAD) | 172 | if (action != CPU_DEAD && action != CPU_DEAD_FROZEN) |
173 | return NOTIFY_OK; | 173 | return NOTIFY_OK; |
174 | 174 | ||
175 | cpu = (unsigned long)hcpu; | 175 | cpu = (unsigned long)hcpu; |
diff --git a/lib/percpu_ida.c b/lib/percpu_ida.c index 9d054bf91d0f..93d145e5539c 100644 --- a/lib/percpu_ida.c +++ b/lib/percpu_ida.c | |||
@@ -54,9 +54,7 @@ static inline void move_tags(unsigned *dst, unsigned *dst_nr, | |||
54 | /* | 54 | /* |
55 | * Try to steal tags from a remote cpu's percpu freelist. | 55 | * Try to steal tags from a remote cpu's percpu freelist. |
56 | * | 56 | * |
57 | * We first check how many percpu freelists have tags - we don't steal tags | 57 | * We first check how many percpu freelists have tags |
58 | * unless enough percpu freelists have tags on them that it's possible more than | ||
59 | * half the total tags could be stuck on remote percpu freelists. | ||
60 | * | 58 | * |
61 | * Then we iterate through the cpus until we find some tags - we don't attempt | 59 | * Then we iterate through the cpus until we find some tags - we don't attempt |
62 | * to find the "best" cpu to steal from, to keep cacheline bouncing to a | 60 | * to find the "best" cpu to steal from, to keep cacheline bouncing to a |
@@ -69,8 +67,7 @@ static inline void steal_tags(struct percpu_ida *pool, | |||
69 | struct percpu_ida_cpu *remote; | 67 | struct percpu_ida_cpu *remote; |
70 | 68 | ||
71 | for (cpus_have_tags = cpumask_weight(&pool->cpus_have_tags); | 69 | for (cpus_have_tags = cpumask_weight(&pool->cpus_have_tags); |
72 | cpus_have_tags * pool->percpu_max_size > pool->nr_tags / 2; | 70 | cpus_have_tags; cpus_have_tags--) { |
73 | cpus_have_tags--) { | ||
74 | cpu = cpumask_next(cpu, &pool->cpus_have_tags); | 71 | cpu = cpumask_next(cpu, &pool->cpus_have_tags); |
75 | 72 | ||
76 | if (cpu >= nr_cpu_ids) { | 73 | if (cpu >= nr_cpu_ids) { |
@@ -132,22 +129,22 @@ static inline unsigned alloc_local_tag(struct percpu_ida_cpu *tags) | |||
132 | /** | 129 | /** |
133 | * percpu_ida_alloc - allocate a tag | 130 | * percpu_ida_alloc - allocate a tag |
134 | * @pool: pool to allocate from | 131 | * @pool: pool to allocate from |
135 | * @gfp: gfp flags | 132 | * @state: task state for prepare_to_wait |
136 | * | 133 | * |
137 | * Returns a tag - an integer in the range [0..nr_tags) (passed to | 134 | * Returns a tag - an integer in the range [0..nr_tags) (passed to |
138 | * tag_pool_init()), or otherwise -ENOSPC on allocation failure. | 135 | * tag_pool_init()), or otherwise -ENOSPC on allocation failure. |
139 | * | 136 | * |
140 | * Safe to be called from interrupt context (assuming it isn't passed | 137 | * Safe to be called from interrupt context (assuming it isn't passed |
141 | * __GFP_WAIT, of course). | 138 | * TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, of course). |
142 | * | 139 | * |
143 | * @gfp indicates whether or not to wait until a free id is available (it's not | 140 | * @gfp indicates whether or not to wait until a free id is available (it's not |
144 | * used for internal memory allocations); thus if passed __GFP_WAIT we may sleep | 141 | * used for internal memory allocations); thus if passed __GFP_WAIT we may sleep |
145 | * however long it takes until another thread frees an id (same semantics as a | 142 | * however long it takes until another thread frees an id (same semantics as a |
146 | * mempool). | 143 | * mempool). |
147 | * | 144 | * |
148 | * Will not fail if passed __GFP_WAIT. | 145 | * Will not fail if passed TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE. |
149 | */ | 146 | */ |
150 | int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp) | 147 | int percpu_ida_alloc(struct percpu_ida *pool, int state) |
151 | { | 148 | { |
152 | DEFINE_WAIT(wait); | 149 | DEFINE_WAIT(wait); |
153 | struct percpu_ida_cpu *tags; | 150 | struct percpu_ida_cpu *tags; |
@@ -174,7 +171,8 @@ int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp) | |||
174 | * | 171 | * |
175 | * global lock held and irqs disabled, don't need percpu lock | 172 | * global lock held and irqs disabled, don't need percpu lock |
176 | */ | 173 | */ |
177 | prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE); | 174 | if (state != TASK_RUNNING) |
175 | prepare_to_wait(&pool->wait, &wait, state); | ||
178 | 176 | ||
179 | if (!tags->nr_free) | 177 | if (!tags->nr_free) |
180 | alloc_global_tags(pool, tags); | 178 | alloc_global_tags(pool, tags); |
@@ -191,16 +189,22 @@ int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp) | |||
191 | spin_unlock(&pool->lock); | 189 | spin_unlock(&pool->lock); |
192 | local_irq_restore(flags); | 190 | local_irq_restore(flags); |
193 | 191 | ||
194 | if (tag >= 0 || !(gfp & __GFP_WAIT)) | 192 | if (tag >= 0 || state == TASK_RUNNING) |
195 | break; | 193 | break; |
196 | 194 | ||
195 | if (signal_pending_state(state, current)) { | ||
196 | tag = -ERESTARTSYS; | ||
197 | break; | ||
198 | } | ||
199 | |||
197 | schedule(); | 200 | schedule(); |
198 | 201 | ||
199 | local_irq_save(flags); | 202 | local_irq_save(flags); |
200 | tags = this_cpu_ptr(pool->tag_cpu); | 203 | tags = this_cpu_ptr(pool->tag_cpu); |
201 | } | 204 | } |
205 | if (state != TASK_RUNNING) | ||
206 | finish_wait(&pool->wait, &wait); | ||
202 | 207 | ||
203 | finish_wait(&pool->wait, &wait); | ||
204 | return tag; | 208 | return tag; |
205 | } | 209 | } |
206 | EXPORT_SYMBOL_GPL(percpu_ida_alloc); | 210 | EXPORT_SYMBOL_GPL(percpu_ida_alloc); |
diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 7811ed3b4e70..9599aa72d7a0 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c | |||
@@ -35,33 +35,6 @@ | |||
35 | #include <linux/hardirq.h> /* in_interrupt() */ | 35 | #include <linux/hardirq.h> /* in_interrupt() */ |
36 | 36 | ||
37 | 37 | ||
38 | #ifdef __KERNEL__ | ||
39 | #define RADIX_TREE_MAP_SHIFT (CONFIG_BASE_SMALL ? 4 : 6) | ||
40 | #else | ||
41 | #define RADIX_TREE_MAP_SHIFT 3 /* For more stressful testing */ | ||
42 | #endif | ||
43 | |||
44 | #define RADIX_TREE_MAP_SIZE (1UL << RADIX_TREE_MAP_SHIFT) | ||
45 | #define RADIX_TREE_MAP_MASK (RADIX_TREE_MAP_SIZE-1) | ||
46 | |||
47 | #define RADIX_TREE_TAG_LONGS \ | ||
48 | ((RADIX_TREE_MAP_SIZE + BITS_PER_LONG - 1) / BITS_PER_LONG) | ||
49 | |||
50 | struct radix_tree_node { | ||
51 | unsigned int height; /* Height from the bottom */ | ||
52 | unsigned int count; | ||
53 | union { | ||
54 | struct radix_tree_node *parent; /* Used when ascending tree */ | ||
55 | struct rcu_head rcu_head; /* Used when freeing node */ | ||
56 | }; | ||
57 | void __rcu *slots[RADIX_TREE_MAP_SIZE]; | ||
58 | unsigned long tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS]; | ||
59 | }; | ||
60 | |||
61 | #define RADIX_TREE_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(unsigned long)) | ||
62 | #define RADIX_TREE_MAX_PATH (DIV_ROUND_UP(RADIX_TREE_INDEX_BITS, \ | ||
63 | RADIX_TREE_MAP_SHIFT)) | ||
64 | |||
65 | /* | 38 | /* |
66 | * The height_to_maxindex array needs to be one deeper than the maximum | 39 | * The height_to_maxindex array needs to be one deeper than the maximum |
67 | * path as height 0 holds only 1 entry. | 40 | * path as height 0 holds only 1 entry. |
@@ -369,7 +342,8 @@ static int radix_tree_extend(struct radix_tree_root *root, unsigned long index) | |||
369 | 342 | ||
370 | /* Increase the height. */ | 343 | /* Increase the height. */ |
371 | newheight = root->height+1; | 344 | newheight = root->height+1; |
372 | node->height = newheight; | 345 | BUG_ON(newheight & ~RADIX_TREE_HEIGHT_MASK); |
346 | node->path = newheight; | ||
373 | node->count = 1; | 347 | node->count = 1; |
374 | node->parent = NULL; | 348 | node->parent = NULL; |
375 | slot = root->rnode; | 349 | slot = root->rnode; |
@@ -387,23 +361,28 @@ out: | |||
387 | } | 361 | } |
388 | 362 | ||
389 | /** | 363 | /** |
390 | * radix_tree_insert - insert into a radix tree | 364 | * __radix_tree_create - create a slot in a radix tree |
391 | * @root: radix tree root | 365 | * @root: radix tree root |
392 | * @index: index key | 366 | * @index: index key |
393 | * @item: item to insert | 367 | * @nodep: returns node |
368 | * @slotp: returns slot | ||
394 | * | 369 | * |
395 | * Insert an item into the radix tree at position @index. | 370 | * Create, if necessary, and return the node and slot for an item |
371 | * at position @index in the radix tree @root. | ||
372 | * | ||
373 | * Until there is more than one item in the tree, no nodes are | ||
374 | * allocated and @root->rnode is used as a direct slot instead of | ||
375 | * pointing to a node, in which case *@nodep will be NULL. | ||
376 | * | ||
377 | * Returns -ENOMEM, or 0 for success. | ||
396 | */ | 378 | */ |
397 | int radix_tree_insert(struct radix_tree_root *root, | 379 | int __radix_tree_create(struct radix_tree_root *root, unsigned long index, |
398 | unsigned long index, void *item) | 380 | struct radix_tree_node **nodep, void ***slotp) |
399 | { | 381 | { |
400 | struct radix_tree_node *node = NULL, *slot; | 382 | struct radix_tree_node *node = NULL, *slot; |
401 | unsigned int height, shift; | 383 | unsigned int height, shift, offset; |
402 | int offset; | ||
403 | int error; | 384 | int error; |
404 | 385 | ||
405 | BUG_ON(radix_tree_is_indirect_ptr(item)); | ||
406 | |||
407 | /* Make sure the tree is high enough. */ | 386 | /* Make sure the tree is high enough. */ |
408 | if (index > radix_tree_maxindex(root->height)) { | 387 | if (index > radix_tree_maxindex(root->height)) { |
409 | error = radix_tree_extend(root, index); | 388 | error = radix_tree_extend(root, index); |
@@ -422,11 +401,12 @@ int radix_tree_insert(struct radix_tree_root *root, | |||
422 | /* Have to add a child node. */ | 401 | /* Have to add a child node. */ |
423 | if (!(slot = radix_tree_node_alloc(root))) | 402 | if (!(slot = radix_tree_node_alloc(root))) |
424 | return -ENOMEM; | 403 | return -ENOMEM; |
425 | slot->height = height; | 404 | slot->path = height; |
426 | slot->parent = node; | 405 | slot->parent = node; |
427 | if (node) { | 406 | if (node) { |
428 | rcu_assign_pointer(node->slots[offset], slot); | 407 | rcu_assign_pointer(node->slots[offset], slot); |
429 | node->count++; | 408 | node->count++; |
409 | slot->path |= offset << RADIX_TREE_HEIGHT_SHIFT; | ||
430 | } else | 410 | } else |
431 | rcu_assign_pointer(root->rnode, ptr_to_indirect(slot)); | 411 | rcu_assign_pointer(root->rnode, ptr_to_indirect(slot)); |
432 | } | 412 | } |
@@ -439,16 +419,42 @@ int radix_tree_insert(struct radix_tree_root *root, | |||
439 | height--; | 419 | height--; |
440 | } | 420 | } |
441 | 421 | ||
442 | if (slot != NULL) | 422 | if (nodep) |
423 | *nodep = node; | ||
424 | if (slotp) | ||
425 | *slotp = node ? node->slots + offset : (void **)&root->rnode; | ||
426 | return 0; | ||
427 | } | ||
428 | |||
429 | /** | ||
430 | * radix_tree_insert - insert into a radix tree | ||
431 | * @root: radix tree root | ||
432 | * @index: index key | ||
433 | * @item: item to insert | ||
434 | * | ||
435 | * Insert an item into the radix tree at position @index. | ||
436 | */ | ||
437 | int radix_tree_insert(struct radix_tree_root *root, | ||
438 | unsigned long index, void *item) | ||
439 | { | ||
440 | struct radix_tree_node *node; | ||
441 | void **slot; | ||
442 | int error; | ||
443 | |||
444 | BUG_ON(radix_tree_is_indirect_ptr(item)); | ||
445 | |||
446 | error = __radix_tree_create(root, index, &node, &slot); | ||
447 | if (error) | ||
448 | return error; | ||
449 | if (*slot != NULL) | ||
443 | return -EEXIST; | 450 | return -EEXIST; |
451 | rcu_assign_pointer(*slot, item); | ||
444 | 452 | ||
445 | if (node) { | 453 | if (node) { |
446 | node->count++; | 454 | node->count++; |
447 | rcu_assign_pointer(node->slots[offset], item); | 455 | BUG_ON(tag_get(node, 0, index & RADIX_TREE_MAP_MASK)); |
448 | BUG_ON(tag_get(node, 0, offset)); | 456 | BUG_ON(tag_get(node, 1, index & RADIX_TREE_MAP_MASK)); |
449 | BUG_ON(tag_get(node, 1, offset)); | ||
450 | } else { | 457 | } else { |
451 | rcu_assign_pointer(root->rnode, item); | ||
452 | BUG_ON(root_tag_get(root, 0)); | 458 | BUG_ON(root_tag_get(root, 0)); |
453 | BUG_ON(root_tag_get(root, 1)); | 459 | BUG_ON(root_tag_get(root, 1)); |
454 | } | 460 | } |
@@ -457,15 +463,26 @@ int radix_tree_insert(struct radix_tree_root *root, | |||
457 | } | 463 | } |
458 | EXPORT_SYMBOL(radix_tree_insert); | 464 | EXPORT_SYMBOL(radix_tree_insert); |
459 | 465 | ||
460 | /* | 466 | /** |
461 | * is_slot == 1 : search for the slot. | 467 | * __radix_tree_lookup - lookup an item in a radix tree |
462 | * is_slot == 0 : search for the node. | 468 | * @root: radix tree root |
469 | * @index: index key | ||
470 | * @nodep: returns node | ||
471 | * @slotp: returns slot | ||
472 | * | ||
473 | * Lookup and return the item at position @index in the radix | ||
474 | * tree @root. | ||
475 | * | ||
476 | * Until there is more than one item in the tree, no nodes are | ||
477 | * allocated and @root->rnode is used as a direct slot instead of | ||
478 | * pointing to a node, in which case *@nodep will be NULL. | ||
463 | */ | 479 | */ |
464 | static void *radix_tree_lookup_element(struct radix_tree_root *root, | 480 | void *__radix_tree_lookup(struct radix_tree_root *root, unsigned long index, |
465 | unsigned long index, int is_slot) | 481 | struct radix_tree_node **nodep, void ***slotp) |
466 | { | 482 | { |
483 | struct radix_tree_node *node, *parent; | ||
467 | unsigned int height, shift; | 484 | unsigned int height, shift; |
468 | struct radix_tree_node *node, **slot; | 485 | void **slot; |
469 | 486 | ||
470 | node = rcu_dereference_raw(root->rnode); | 487 | node = rcu_dereference_raw(root->rnode); |
471 | if (node == NULL) | 488 | if (node == NULL) |
@@ -474,19 +491,24 @@ static void *radix_tree_lookup_element(struct radix_tree_root *root, | |||
474 | if (!radix_tree_is_indirect_ptr(node)) { | 491 | if (!radix_tree_is_indirect_ptr(node)) { |
475 | if (index > 0) | 492 | if (index > 0) |
476 | return NULL; | 493 | return NULL; |
477 | return is_slot ? (void *)&root->rnode : node; | 494 | |
495 | if (nodep) | ||
496 | *nodep = NULL; | ||
497 | if (slotp) | ||
498 | *slotp = (void **)&root->rnode; | ||
499 | return node; | ||
478 | } | 500 | } |
479 | node = indirect_to_ptr(node); | 501 | node = indirect_to_ptr(node); |
480 | 502 | ||
481 | height = node->height; | 503 | height = node->path & RADIX_TREE_HEIGHT_MASK; |
482 | if (index > radix_tree_maxindex(height)) | 504 | if (index > radix_tree_maxindex(height)) |
483 | return NULL; | 505 | return NULL; |
484 | 506 | ||
485 | shift = (height-1) * RADIX_TREE_MAP_SHIFT; | 507 | shift = (height-1) * RADIX_TREE_MAP_SHIFT; |
486 | 508 | ||
487 | do { | 509 | do { |
488 | slot = (struct radix_tree_node **) | 510 | parent = node; |
489 | (node->slots + ((index>>shift) & RADIX_TREE_MAP_MASK)); | 511 | slot = node->slots + ((index >> shift) & RADIX_TREE_MAP_MASK); |
490 | node = rcu_dereference_raw(*slot); | 512 | node = rcu_dereference_raw(*slot); |
491 | if (node == NULL) | 513 | if (node == NULL) |
492 | return NULL; | 514 | return NULL; |
@@ -495,7 +517,11 @@ static void *radix_tree_lookup_element(struct radix_tree_root *root, | |||
495 | height--; | 517 | height--; |
496 | } while (height > 0); | 518 | } while (height > 0); |
497 | 519 | ||
498 | return is_slot ? (void *)slot : indirect_to_ptr(node); | 520 | if (nodep) |
521 | *nodep = parent; | ||
522 | if (slotp) | ||
523 | *slotp = slot; | ||
524 | return node; | ||
499 | } | 525 | } |
500 | 526 | ||
501 | /** | 527 | /** |
@@ -513,7 +539,11 @@ static void *radix_tree_lookup_element(struct radix_tree_root *root, | |||
513 | */ | 539 | */ |
514 | void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index) | 540 | void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index) |
515 | { | 541 | { |
516 | return (void **)radix_tree_lookup_element(root, index, 1); | 542 | void **slot; |
543 | |||
544 | if (!__radix_tree_lookup(root, index, NULL, &slot)) | ||
545 | return NULL; | ||
546 | return slot; | ||
517 | } | 547 | } |
518 | EXPORT_SYMBOL(radix_tree_lookup_slot); | 548 | EXPORT_SYMBOL(radix_tree_lookup_slot); |
519 | 549 | ||
@@ -531,7 +561,7 @@ EXPORT_SYMBOL(radix_tree_lookup_slot); | |||
531 | */ | 561 | */ |
532 | void *radix_tree_lookup(struct radix_tree_root *root, unsigned long index) | 562 | void *radix_tree_lookup(struct radix_tree_root *root, unsigned long index) |
533 | { | 563 | { |
534 | return radix_tree_lookup_element(root, index, 0); | 564 | return __radix_tree_lookup(root, index, NULL, NULL); |
535 | } | 565 | } |
536 | EXPORT_SYMBOL(radix_tree_lookup); | 566 | EXPORT_SYMBOL(radix_tree_lookup); |
537 | 567 | ||
@@ -676,7 +706,7 @@ int radix_tree_tag_get(struct radix_tree_root *root, | |||
676 | return (index == 0); | 706 | return (index == 0); |
677 | node = indirect_to_ptr(node); | 707 | node = indirect_to_ptr(node); |
678 | 708 | ||
679 | height = node->height; | 709 | height = node->path & RADIX_TREE_HEIGHT_MASK; |
680 | if (index > radix_tree_maxindex(height)) | 710 | if (index > radix_tree_maxindex(height)) |
681 | return 0; | 711 | return 0; |
682 | 712 | ||
@@ -713,7 +743,7 @@ void **radix_tree_next_chunk(struct radix_tree_root *root, | |||
713 | { | 743 | { |
714 | unsigned shift, tag = flags & RADIX_TREE_ITER_TAG_MASK; | 744 | unsigned shift, tag = flags & RADIX_TREE_ITER_TAG_MASK; |
715 | struct radix_tree_node *rnode, *node; | 745 | struct radix_tree_node *rnode, *node; |
716 | unsigned long index, offset; | 746 | unsigned long index, offset, height; |
717 | 747 | ||
718 | if ((flags & RADIX_TREE_ITER_TAGGED) && !root_tag_get(root, tag)) | 748 | if ((flags & RADIX_TREE_ITER_TAGGED) && !root_tag_get(root, tag)) |
719 | return NULL; | 749 | return NULL; |
@@ -744,7 +774,8 @@ void **radix_tree_next_chunk(struct radix_tree_root *root, | |||
744 | return NULL; | 774 | return NULL; |
745 | 775 | ||
746 | restart: | 776 | restart: |
747 | shift = (rnode->height - 1) * RADIX_TREE_MAP_SHIFT; | 777 | height = rnode->path & RADIX_TREE_HEIGHT_MASK; |
778 | shift = (height - 1) * RADIX_TREE_MAP_SHIFT; | ||
748 | offset = index >> shift; | 779 | offset = index >> shift; |
749 | 780 | ||
750 | /* Index outside of the tree */ | 781 | /* Index outside of the tree */ |
@@ -946,81 +977,6 @@ next: | |||
946 | } | 977 | } |
947 | EXPORT_SYMBOL(radix_tree_range_tag_if_tagged); | 978 | EXPORT_SYMBOL(radix_tree_range_tag_if_tagged); |
948 | 979 | ||
949 | |||
950 | /** | ||
951 | * radix_tree_next_hole - find the next hole (not-present entry) | ||
952 | * @root: tree root | ||
953 | * @index: index key | ||
954 | * @max_scan: maximum range to search | ||
955 | * | ||
956 | * Search the set [index, min(index+max_scan-1, MAX_INDEX)] for the lowest | ||
957 | * indexed hole. | ||
958 | * | ||
959 | * Returns: the index of the hole if found, otherwise returns an index | ||
960 | * outside of the set specified (in which case 'return - index >= max_scan' | ||
961 | * will be true). In rare cases of index wrap-around, 0 will be returned. | ||
962 | * | ||
963 | * radix_tree_next_hole may be called under rcu_read_lock. However, like | ||
964 | * radix_tree_gang_lookup, this will not atomically search a snapshot of | ||
965 | * the tree at a single point in time. For example, if a hole is created | ||
966 | * at index 5, then subsequently a hole is created at index 10, | ||
967 | * radix_tree_next_hole covering both indexes may return 10 if called | ||
968 | * under rcu_read_lock. | ||
969 | */ | ||
970 | unsigned long radix_tree_next_hole(struct radix_tree_root *root, | ||
971 | unsigned long index, unsigned long max_scan) | ||
972 | { | ||
973 | unsigned long i; | ||
974 | |||
975 | for (i = 0; i < max_scan; i++) { | ||
976 | if (!radix_tree_lookup(root, index)) | ||
977 | break; | ||
978 | index++; | ||
979 | if (index == 0) | ||
980 | break; | ||
981 | } | ||
982 | |||
983 | return index; | ||
984 | } | ||
985 | EXPORT_SYMBOL(radix_tree_next_hole); | ||
986 | |||
987 | /** | ||
988 | * radix_tree_prev_hole - find the prev hole (not-present entry) | ||
989 | * @root: tree root | ||
990 | * @index: index key | ||
991 | * @max_scan: maximum range to search | ||
992 | * | ||
993 | * Search backwards in the range [max(index-max_scan+1, 0), index] | ||
994 | * for the first hole. | ||
995 | * | ||
996 | * Returns: the index of the hole if found, otherwise returns an index | ||
997 | * outside of the set specified (in which case 'index - return >= max_scan' | ||
998 | * will be true). In rare cases of wrap-around, ULONG_MAX will be returned. | ||
999 | * | ||
1000 | * radix_tree_next_hole may be called under rcu_read_lock. However, like | ||
1001 | * radix_tree_gang_lookup, this will not atomically search a snapshot of | ||
1002 | * the tree at a single point in time. For example, if a hole is created | ||
1003 | * at index 10, then subsequently a hole is created at index 5, | ||
1004 | * radix_tree_prev_hole covering both indexes may return 5 if called under | ||
1005 | * rcu_read_lock. | ||
1006 | */ | ||
1007 | unsigned long radix_tree_prev_hole(struct radix_tree_root *root, | ||
1008 | unsigned long index, unsigned long max_scan) | ||
1009 | { | ||
1010 | unsigned long i; | ||
1011 | |||
1012 | for (i = 0; i < max_scan; i++) { | ||
1013 | if (!radix_tree_lookup(root, index)) | ||
1014 | break; | ||
1015 | index--; | ||
1016 | if (index == ULONG_MAX) | ||
1017 | break; | ||
1018 | } | ||
1019 | |||
1020 | return index; | ||
1021 | } | ||
1022 | EXPORT_SYMBOL(radix_tree_prev_hole); | ||
1023 | |||
1024 | /** | 980 | /** |
1025 | * radix_tree_gang_lookup - perform multiple lookup on a radix tree | 981 | * radix_tree_gang_lookup - perform multiple lookup on a radix tree |
1026 | * @root: radix tree root | 982 | * @root: radix tree root |
@@ -1189,7 +1145,7 @@ static unsigned long __locate(struct radix_tree_node *slot, void *item, | |||
1189 | unsigned int shift, height; | 1145 | unsigned int shift, height; |
1190 | unsigned long i; | 1146 | unsigned long i; |
1191 | 1147 | ||
1192 | height = slot->height; | 1148 | height = slot->path & RADIX_TREE_HEIGHT_MASK; |
1193 | shift = (height-1) * RADIX_TREE_MAP_SHIFT; | 1149 | shift = (height-1) * RADIX_TREE_MAP_SHIFT; |
1194 | 1150 | ||
1195 | for ( ; height > 1; height--) { | 1151 | for ( ; height > 1; height--) { |
@@ -1252,9 +1208,12 @@ unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item) | |||
1252 | } | 1208 | } |
1253 | 1209 | ||
1254 | node = indirect_to_ptr(node); | 1210 | node = indirect_to_ptr(node); |
1255 | max_index = radix_tree_maxindex(node->height); | 1211 | max_index = radix_tree_maxindex(node->path & |
1256 | if (cur_index > max_index) | 1212 | RADIX_TREE_HEIGHT_MASK); |
1213 | if (cur_index > max_index) { | ||
1214 | rcu_read_unlock(); | ||
1257 | break; | 1215 | break; |
1216 | } | ||
1258 | 1217 | ||
1259 | cur_index = __locate(node, item, cur_index, &found_index); | 1218 | cur_index = __locate(node, item, cur_index, &found_index); |
1260 | rcu_read_unlock(); | 1219 | rcu_read_unlock(); |
@@ -1335,48 +1294,90 @@ static inline void radix_tree_shrink(struct radix_tree_root *root) | |||
1335 | } | 1294 | } |
1336 | 1295 | ||
1337 | /** | 1296 | /** |
1338 | * radix_tree_delete - delete an item from a radix tree | 1297 | * __radix_tree_delete_node - try to free node after clearing a slot |
1339 | * @root: radix tree root | 1298 | * @root: radix tree root |
1340 | * @index: index key | 1299 | * @index: index key |
1300 | * @node: node containing @index | ||
1341 | * | 1301 | * |
1342 | * Remove the item at @index from the radix tree rooted at @root. | 1302 | * After clearing the slot at @index in @node from radix tree |
1303 | * rooted at @root, call this function to attempt freeing the | ||
1304 | * node and shrinking the tree. | ||
1343 | * | 1305 | * |
1344 | * Returns the address of the deleted item, or NULL if it was not present. | 1306 | * Returns %true if @node was freed, %false otherwise. |
1345 | */ | 1307 | */ |
1346 | void *radix_tree_delete(struct radix_tree_root *root, unsigned long index) | 1308 | bool __radix_tree_delete_node(struct radix_tree_root *root, |
1309 | struct radix_tree_node *node) | ||
1347 | { | 1310 | { |
1348 | struct radix_tree_node *node = NULL; | 1311 | bool deleted = false; |
1349 | struct radix_tree_node *slot = NULL; | 1312 | |
1350 | struct radix_tree_node *to_free; | 1313 | do { |
1351 | unsigned int height, shift; | 1314 | struct radix_tree_node *parent; |
1315 | |||
1316 | if (node->count) { | ||
1317 | if (node == indirect_to_ptr(root->rnode)) { | ||
1318 | radix_tree_shrink(root); | ||
1319 | if (root->height == 0) | ||
1320 | deleted = true; | ||
1321 | } | ||
1322 | return deleted; | ||
1323 | } | ||
1324 | |||
1325 | parent = node->parent; | ||
1326 | if (parent) { | ||
1327 | unsigned int offset; | ||
1328 | |||
1329 | offset = node->path >> RADIX_TREE_HEIGHT_SHIFT; | ||
1330 | parent->slots[offset] = NULL; | ||
1331 | parent->count--; | ||
1332 | } else { | ||
1333 | root_tag_clear_all(root); | ||
1334 | root->height = 0; | ||
1335 | root->rnode = NULL; | ||
1336 | } | ||
1337 | |||
1338 | radix_tree_node_free(node); | ||
1339 | deleted = true; | ||
1340 | |||
1341 | node = parent; | ||
1342 | } while (node); | ||
1343 | |||
1344 | return deleted; | ||
1345 | } | ||
1346 | |||
1347 | /** | ||
1348 | * radix_tree_delete_item - delete an item from a radix tree | ||
1349 | * @root: radix tree root | ||
1350 | * @index: index key | ||
1351 | * @item: expected item | ||
1352 | * | ||
1353 | * Remove @item at @index from the radix tree rooted at @root. | ||
1354 | * | ||
1355 | * Returns the address of the deleted item, or NULL if it was not present | ||
1356 | * or the entry at the given @index was not @item. | ||
1357 | */ | ||
1358 | void *radix_tree_delete_item(struct radix_tree_root *root, | ||
1359 | unsigned long index, void *item) | ||
1360 | { | ||
1361 | struct radix_tree_node *node; | ||
1362 | unsigned int offset; | ||
1363 | void **slot; | ||
1364 | void *entry; | ||
1352 | int tag; | 1365 | int tag; |
1353 | int uninitialized_var(offset); | ||
1354 | 1366 | ||
1355 | height = root->height; | 1367 | entry = __radix_tree_lookup(root, index, &node, &slot); |
1356 | if (index > radix_tree_maxindex(height)) | 1368 | if (!entry) |
1357 | goto out; | 1369 | return NULL; |
1358 | 1370 | ||
1359 | slot = root->rnode; | 1371 | if (item && entry != item) |
1360 | if (height == 0) { | 1372 | return NULL; |
1373 | |||
1374 | if (!node) { | ||
1361 | root_tag_clear_all(root); | 1375 | root_tag_clear_all(root); |
1362 | root->rnode = NULL; | 1376 | root->rnode = NULL; |
1363 | goto out; | 1377 | return entry; |
1364 | } | 1378 | } |
1365 | slot = indirect_to_ptr(slot); | ||
1366 | shift = height * RADIX_TREE_MAP_SHIFT; | ||
1367 | 1379 | ||
1368 | do { | 1380 | offset = index & RADIX_TREE_MAP_MASK; |
1369 | if (slot == NULL) | ||
1370 | goto out; | ||
1371 | |||
1372 | shift -= RADIX_TREE_MAP_SHIFT; | ||
1373 | offset = (index >> shift) & RADIX_TREE_MAP_MASK; | ||
1374 | node = slot; | ||
1375 | slot = slot->slots[offset]; | ||
1376 | } while (shift); | ||
1377 | |||
1378 | if (slot == NULL) | ||
1379 | goto out; | ||
1380 | 1381 | ||
1381 | /* | 1382 | /* |
1382 | * Clear all tags associated with the item to be deleted. | 1383 | * Clear all tags associated with the item to be deleted. |
@@ -1387,40 +1388,27 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index) | |||
1387 | radix_tree_tag_clear(root, index, tag); | 1388 | radix_tree_tag_clear(root, index, tag); |
1388 | } | 1389 | } |
1389 | 1390 | ||
1390 | to_free = NULL; | 1391 | node->slots[offset] = NULL; |
1391 | /* Now free the nodes we do not need anymore */ | 1392 | node->count--; |
1392 | while (node) { | ||
1393 | node->slots[offset] = NULL; | ||
1394 | node->count--; | ||
1395 | /* | ||
1396 | * Queue the node for deferred freeing after the | ||
1397 | * last reference to it disappears (set NULL, above). | ||
1398 | */ | ||
1399 | if (to_free) | ||
1400 | radix_tree_node_free(to_free); | ||
1401 | |||
1402 | if (node->count) { | ||
1403 | if (node == indirect_to_ptr(root->rnode)) | ||
1404 | radix_tree_shrink(root); | ||
1405 | goto out; | ||
1406 | } | ||
1407 | |||
1408 | /* Node with zero slots in use so free it */ | ||
1409 | to_free = node; | ||
1410 | 1393 | ||
1411 | index >>= RADIX_TREE_MAP_SHIFT; | 1394 | __radix_tree_delete_node(root, node); |
1412 | offset = index & RADIX_TREE_MAP_MASK; | ||
1413 | node = node->parent; | ||
1414 | } | ||
1415 | 1395 | ||
1416 | root_tag_clear_all(root); | 1396 | return entry; |
1417 | root->height = 0; | 1397 | } |
1418 | root->rnode = NULL; | 1398 | EXPORT_SYMBOL(radix_tree_delete_item); |
1419 | if (to_free) | ||
1420 | radix_tree_node_free(to_free); | ||
1421 | 1399 | ||
1422 | out: | 1400 | /** |
1423 | return slot; | 1401 | * radix_tree_delete - delete an item from a radix tree |
1402 | * @root: radix tree root | ||
1403 | * @index: index key | ||
1404 | * | ||
1405 | * Remove the item at @index from the radix tree rooted at @root. | ||
1406 | * | ||
1407 | * Returns the address of the deleted item, or NULL if it was not present. | ||
1408 | */ | ||
1409 | void *radix_tree_delete(struct radix_tree_root *root, unsigned long index) | ||
1410 | { | ||
1411 | return radix_tree_delete_item(root, index, NULL); | ||
1424 | } | 1412 | } |
1425 | EXPORT_SYMBOL(radix_tree_delete); | 1413 | EXPORT_SYMBOL(radix_tree_delete); |
1426 | 1414 | ||
@@ -1436,9 +1424,12 @@ int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag) | |||
1436 | EXPORT_SYMBOL(radix_tree_tagged); | 1424 | EXPORT_SYMBOL(radix_tree_tagged); |
1437 | 1425 | ||
1438 | static void | 1426 | static void |
1439 | radix_tree_node_ctor(void *node) | 1427 | radix_tree_node_ctor(void *arg) |
1440 | { | 1428 | { |
1441 | memset(node, 0, sizeof(struct radix_tree_node)); | 1429 | struct radix_tree_node *node = arg; |
1430 | |||
1431 | memset(node, 0, sizeof(*node)); | ||
1432 | INIT_LIST_HEAD(&node->private_list); | ||
1442 | } | 1433 | } |
1443 | 1434 | ||
1444 | static __init unsigned long __maxindex(unsigned int height) | 1435 | static __init unsigned long __maxindex(unsigned int height) |
diff --git a/lib/random32.c b/lib/random32.c index 1e5b2df44291..fa5da61ce7ad 100644 --- a/lib/random32.c +++ b/lib/random32.c | |||
@@ -1,37 +1,35 @@ | |||
1 | /* | 1 | /* |
2 | This is a maximally equidistributed combined Tausworthe generator | 2 | * This is a maximally equidistributed combined Tausworthe generator |
3 | based on code from GNU Scientific Library 1.5 (30 Jun 2004) | 3 | * based on code from GNU Scientific Library 1.5 (30 Jun 2004) |
4 | 4 | * | |
5 | lfsr113 version: | 5 | * lfsr113 version: |
6 | 6 | * | |
7 | x_n = (s1_n ^ s2_n ^ s3_n ^ s4_n) | 7 | * x_n = (s1_n ^ s2_n ^ s3_n ^ s4_n) |
8 | 8 | * | |
9 | s1_{n+1} = (((s1_n & 4294967294) << 18) ^ (((s1_n << 6) ^ s1_n) >> 13)) | 9 | * s1_{n+1} = (((s1_n & 4294967294) << 18) ^ (((s1_n << 6) ^ s1_n) >> 13)) |
10 | s2_{n+1} = (((s2_n & 4294967288) << 2) ^ (((s2_n << 2) ^ s2_n) >> 27)) | 10 | * s2_{n+1} = (((s2_n & 4294967288) << 2) ^ (((s2_n << 2) ^ s2_n) >> 27)) |
11 | s3_{n+1} = (((s3_n & 4294967280) << 7) ^ (((s3_n << 13) ^ s3_n) >> 21)) | 11 | * s3_{n+1} = (((s3_n & 4294967280) << 7) ^ (((s3_n << 13) ^ s3_n) >> 21)) |
12 | s4_{n+1} = (((s4_n & 4294967168) << 13) ^ (((s4_n << 3) ^ s4_n) >> 12)) | 12 | * s4_{n+1} = (((s4_n & 4294967168) << 13) ^ (((s4_n << 3) ^ s4_n) >> 12)) |
13 | 13 | * | |
14 | The period of this generator is about 2^113 (see erratum paper). | 14 | * The period of this generator is about 2^113 (see erratum paper). |
15 | 15 | * | |
16 | From: P. L'Ecuyer, "Maximally Equidistributed Combined Tausworthe | 16 | * From: P. L'Ecuyer, "Maximally Equidistributed Combined Tausworthe |
17 | Generators", Mathematics of Computation, 65, 213 (1996), 203--213: | 17 | * Generators", Mathematics of Computation, 65, 213 (1996), 203--213: |
18 | http://www.iro.umontreal.ca/~lecuyer/myftp/papers/tausme.ps | 18 | * http://www.iro.umontreal.ca/~lecuyer/myftp/papers/tausme.ps |
19 | ftp://ftp.iro.umontreal.ca/pub/simulation/lecuyer/papers/tausme.ps | 19 | * ftp://ftp.iro.umontreal.ca/pub/simulation/lecuyer/papers/tausme.ps |
20 | 20 | * | |
21 | There is an erratum in the paper "Tables of Maximally | 21 | * There is an erratum in the paper "Tables of Maximally Equidistributed |
22 | Equidistributed Combined LFSR Generators", Mathematics of | 22 | * Combined LFSR Generators", Mathematics of Computation, 68, 225 (1999), |
23 | Computation, 68, 225 (1999), 261--269: | 23 | * 261--269: http://www.iro.umontreal.ca/~lecuyer/myftp/papers/tausme2.ps |
24 | http://www.iro.umontreal.ca/~lecuyer/myftp/papers/tausme2.ps | 24 | * |
25 | 25 | * ... the k_j most significant bits of z_j must be non-zero, | |
26 | ... the k_j most significant bits of z_j must be non- | 26 | * for each j. (Note: this restriction also applies to the |
27 | zero, for each j. (Note: this restriction also applies to the | 27 | * computer code given in [4], but was mistakenly not mentioned |
28 | computer code given in [4], but was mistakenly not mentioned in | 28 | * in that paper.) |
29 | that paper.) | 29 | * |
30 | 30 | * This affects the seeding procedure by imposing the requirement | |
31 | This affects the seeding procedure by imposing the requirement | 31 | * s1 > 1, s2 > 7, s3 > 15, s4 > 127. |
32 | s1 > 1, s2 > 7, s3 > 15, s4 > 127. | 32 | */ |
33 | |||
34 | */ | ||
35 | 33 | ||
36 | #include <linux/types.h> | 34 | #include <linux/types.h> |
37 | #include <linux/percpu.h> | 35 | #include <linux/percpu.h> |
@@ -75,15 +73,17 @@ EXPORT_SYMBOL(prandom_u32_state); | |||
75 | */ | 73 | */ |
76 | u32 prandom_u32(void) | 74 | u32 prandom_u32(void) |
77 | { | 75 | { |
78 | unsigned long r; | ||
79 | struct rnd_state *state = &get_cpu_var(net_rand_state); | 76 | struct rnd_state *state = &get_cpu_var(net_rand_state); |
80 | r = prandom_u32_state(state); | 77 | u32 res; |
78 | |||
79 | res = prandom_u32_state(state); | ||
81 | put_cpu_var(state); | 80 | put_cpu_var(state); |
82 | return r; | 81 | |
82 | return res; | ||
83 | } | 83 | } |
84 | EXPORT_SYMBOL(prandom_u32); | 84 | EXPORT_SYMBOL(prandom_u32); |
85 | 85 | ||
86 | /* | 86 | /** |
87 | * prandom_bytes_state - get the requested number of pseudo-random bytes | 87 | * prandom_bytes_state - get the requested number of pseudo-random bytes |
88 | * | 88 | * |
89 | * @state: pointer to state structure holding seeded state. | 89 | * @state: pointer to state structure holding seeded state. |
@@ -204,6 +204,7 @@ static int __init prandom_init(void) | |||
204 | prandom_seed_very_weak(state, (i + jiffies) ^ random_get_entropy()); | 204 | prandom_seed_very_weak(state, (i + jiffies) ^ random_get_entropy()); |
205 | prandom_warmup(state); | 205 | prandom_warmup(state); |
206 | } | 206 | } |
207 | |||
207 | return 0; | 208 | return 0; |
208 | } | 209 | } |
209 | core_initcall(prandom_init); | 210 | core_initcall(prandom_init); |
@@ -244,10 +245,22 @@ static void __prandom_reseed(bool late) | |||
244 | static bool latch = false; | 245 | static bool latch = false; |
245 | static DEFINE_SPINLOCK(lock); | 246 | static DEFINE_SPINLOCK(lock); |
246 | 247 | ||
248 | /* Asking for random bytes might result in bytes getting | ||
249 | * moved into the nonblocking pool and thus marking it | ||
250 | * as initialized. In this case we would double back into | ||
251 | * this function and attempt to do a late reseed. | ||
252 | * Ignore the pointless attempt to reseed again if we're | ||
253 | * already waiting for bytes when the nonblocking pool | ||
254 | * got initialized. | ||
255 | */ | ||
256 | |||
247 | /* only allow initial seeding (late == false) once */ | 257 | /* only allow initial seeding (late == false) once */ |
248 | spin_lock_irqsave(&lock, flags); | 258 | if (!spin_trylock_irqsave(&lock, flags)) |
259 | return; | ||
260 | |||
249 | if (latch && !late) | 261 | if (latch && !late) |
250 | goto out; | 262 | goto out; |
263 | |||
251 | latch = true; | 264 | latch = true; |
252 | 265 | ||
253 | for_each_possible_cpu(i) { | 266 | for_each_possible_cpu(i) { |
diff --git a/lib/rbtree_test.c b/lib/rbtree_test.c index 31dd4ccd3baa..8b3c9dc88262 100644 --- a/lib/rbtree_test.c +++ b/lib/rbtree_test.c | |||
@@ -8,8 +8,8 @@ | |||
8 | #define CHECK_LOOPS 100 | 8 | #define CHECK_LOOPS 100 |
9 | 9 | ||
10 | struct test_node { | 10 | struct test_node { |
11 | struct rb_node rb; | ||
12 | u32 key; | 11 | u32 key; |
12 | struct rb_node rb; | ||
13 | 13 | ||
14 | /* following fields used for testing augmented rbtree functionality */ | 14 | /* following fields used for testing augmented rbtree functionality */ |
15 | u32 val; | 15 | u32 val; |
@@ -114,6 +114,16 @@ static int black_path_count(struct rb_node *rb) | |||
114 | return count; | 114 | return count; |
115 | } | 115 | } |
116 | 116 | ||
117 | static void check_postorder_foreach(int nr_nodes) | ||
118 | { | ||
119 | struct test_node *cur, *n; | ||
120 | int count = 0; | ||
121 | rbtree_postorder_for_each_entry_safe(cur, n, &root, rb) | ||
122 | count++; | ||
123 | |||
124 | WARN_ON_ONCE(count != nr_nodes); | ||
125 | } | ||
126 | |||
117 | static void check_postorder(int nr_nodes) | 127 | static void check_postorder(int nr_nodes) |
118 | { | 128 | { |
119 | struct rb_node *rb; | 129 | struct rb_node *rb; |
@@ -148,6 +158,7 @@ static void check(int nr_nodes) | |||
148 | WARN_ON_ONCE(count < (1 << black_path_count(rb_last(&root))) - 1); | 158 | WARN_ON_ONCE(count < (1 << black_path_count(rb_last(&root))) - 1); |
149 | 159 | ||
150 | check_postorder(nr_nodes); | 160 | check_postorder(nr_nodes); |
161 | check_postorder_foreach(nr_nodes); | ||
151 | } | 162 | } |
152 | 163 | ||
153 | static void check_augmented(int nr_nodes) | 164 | static void check_augmented(int nr_nodes) |
diff --git a/lib/reciprocal_div.c b/lib/reciprocal_div.c index 75510e94f7d0..464152410c51 100644 --- a/lib/reciprocal_div.c +++ b/lib/reciprocal_div.c | |||
@@ -1,11 +1,27 @@ | |||
1 | #include <linux/kernel.h> | ||
1 | #include <asm/div64.h> | 2 | #include <asm/div64.h> |
2 | #include <linux/reciprocal_div.h> | 3 | #include <linux/reciprocal_div.h> |
3 | #include <linux/export.h> | 4 | #include <linux/export.h> |
4 | 5 | ||
5 | u32 reciprocal_value(u32 k) | 6 | /* |
7 | * For a description of the algorithm please have a look at | ||
8 | * include/linux/reciprocal_div.h | ||
9 | */ | ||
10 | |||
11 | struct reciprocal_value reciprocal_value(u32 d) | ||
6 | { | 12 | { |
7 | u64 val = (1LL << 32) + (k - 1); | 13 | struct reciprocal_value R; |
8 | do_div(val, k); | 14 | u64 m; |
9 | return (u32)val; | 15 | int l; |
16 | |||
17 | l = fls(d - 1); | ||
18 | m = ((1ULL << 32) * ((1ULL << l) - d)); | ||
19 | do_div(m, d); | ||
20 | ++m; | ||
21 | R.m = (u32)m; | ||
22 | R.sh1 = min(l, 1); | ||
23 | R.sh2 = max(l - 1, 0); | ||
24 | |||
25 | return R; | ||
10 | } | 26 | } |
11 | EXPORT_SYMBOL(reciprocal_value); | 27 | EXPORT_SYMBOL(reciprocal_value); |
diff --git a/lib/scatterlist.c b/lib/scatterlist.c index d16fa295ae1d..3a8e8e8fb2a5 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c | |||
@@ -495,7 +495,7 @@ static bool sg_miter_get_next_page(struct sg_mapping_iter *miter) | |||
495 | * true if @miter contains the valid mapping. false if end of sg | 495 | * true if @miter contains the valid mapping. false if end of sg |
496 | * list is reached. | 496 | * list is reached. |
497 | */ | 497 | */ |
498 | static bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset) | 498 | bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset) |
499 | { | 499 | { |
500 | sg_miter_stop(miter); | 500 | sg_miter_stop(miter); |
501 | 501 | ||
@@ -513,6 +513,7 @@ static bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset) | |||
513 | 513 | ||
514 | return true; | 514 | return true; |
515 | } | 515 | } |
516 | EXPORT_SYMBOL(sg_miter_skip); | ||
516 | 517 | ||
517 | /** | 518 | /** |
518 | * sg_miter_next - proceed mapping iterator to the next mapping | 519 | * sg_miter_next - proceed mapping iterator to the next mapping |
diff --git a/lib/show_mem.c b/lib/show_mem.c index 5847a4921b8e..09225796991a 100644 --- a/lib/show_mem.c +++ b/lib/show_mem.c | |||
@@ -17,9 +17,6 @@ void show_mem(unsigned int filter) | |||
17 | printk("Mem-Info:\n"); | 17 | printk("Mem-Info:\n"); |
18 | show_free_areas(filter); | 18 | show_free_areas(filter); |
19 | 19 | ||
20 | if (filter & SHOW_MEM_FILTER_PAGE_COUNT) | ||
21 | return; | ||
22 | |||
23 | for_each_online_pgdat(pgdat) { | 20 | for_each_online_pgdat(pgdat) { |
24 | unsigned long flags; | 21 | unsigned long flags; |
25 | int zoneid; | 22 | int zoneid; |
@@ -46,4 +43,7 @@ void show_mem(unsigned int filter) | |||
46 | printk("%lu pages in pagetable cache\n", | 43 | printk("%lu pages in pagetable cache\n", |
47 | quicklist_total_size()); | 44 | quicklist_total_size()); |
48 | #endif | 45 | #endif |
46 | #ifdef CONFIG_MEMORY_FAILURE | ||
47 | printk("%lu pages hwpoisoned\n", atomic_long_read(&num_poisoned_pages)); | ||
48 | #endif | ||
49 | } | 49 | } |
diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c index 04abe53f12a1..1afec32de6f2 100644 --- a/lib/smp_processor_id.c +++ b/lib/smp_processor_id.c | |||
@@ -7,7 +7,8 @@ | |||
7 | #include <linux/kallsyms.h> | 7 | #include <linux/kallsyms.h> |
8 | #include <linux/sched.h> | 8 | #include <linux/sched.h> |
9 | 9 | ||
10 | notrace unsigned int debug_smp_processor_id(void) | 10 | notrace static unsigned int check_preemption_disabled(const char *what1, |
11 | const char *what2) | ||
11 | { | 12 | { |
12 | int this_cpu = raw_smp_processor_id(); | 13 | int this_cpu = raw_smp_processor_id(); |
13 | 14 | ||
@@ -38,9 +39,9 @@ notrace unsigned int debug_smp_processor_id(void) | |||
38 | if (!printk_ratelimit()) | 39 | if (!printk_ratelimit()) |
39 | goto out_enable; | 40 | goto out_enable; |
40 | 41 | ||
41 | printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x] " | 42 | printk(KERN_ERR "BUG: using %s%s() in preemptible [%08x] code: %s/%d\n", |
42 | "code: %s/%d\n", | 43 | what1, what2, preempt_count() - 1, current->comm, current->pid); |
43 | preempt_count() - 1, current->comm, current->pid); | 44 | |
44 | print_symbol("caller is %s\n", (long)__builtin_return_address(0)); | 45 | print_symbol("caller is %s\n", (long)__builtin_return_address(0)); |
45 | dump_stack(); | 46 | dump_stack(); |
46 | 47 | ||
@@ -50,5 +51,14 @@ out: | |||
50 | return this_cpu; | 51 | return this_cpu; |
51 | } | 52 | } |
52 | 53 | ||
54 | notrace unsigned int debug_smp_processor_id(void) | ||
55 | { | ||
56 | return check_preemption_disabled("smp_processor_id", ""); | ||
57 | } | ||
53 | EXPORT_SYMBOL(debug_smp_processor_id); | 58 | EXPORT_SYMBOL(debug_smp_processor_id); |
54 | 59 | ||
60 | notrace void __this_cpu_preempt_check(const char *op) | ||
61 | { | ||
62 | check_preemption_disabled("__this_cpu_", op); | ||
63 | } | ||
64 | EXPORT_SYMBOL(__this_cpu_preempt_check); | ||
diff --git a/lib/string.c b/lib/string.c index e5878de4f101..9b1f9062a202 100644 --- a/lib/string.c +++ b/lib/string.c | |||
@@ -648,7 +648,7 @@ EXPORT_SYMBOL(memmove); | |||
648 | * @count: The size of the area. | 648 | * @count: The size of the area. |
649 | */ | 649 | */ |
650 | #undef memcmp | 650 | #undef memcmp |
651 | int memcmp(const void *cs, const void *ct, size_t count) | 651 | __visible int memcmp(const void *cs, const void *ct, size_t count) |
652 | { | 652 | { |
653 | const unsigned char *su1, *su2; | 653 | const unsigned char *su1, *su2; |
654 | int res = 0; | 654 | int res = 0; |
diff --git a/lib/swiotlb.c b/lib/swiotlb.c index e4399fa65ad6..b604b831f4d1 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c | |||
@@ -172,8 +172,9 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) | |||
172 | /* | 172 | /* |
173 | * Get the overflow emergency buffer | 173 | * Get the overflow emergency buffer |
174 | */ | 174 | */ |
175 | v_overflow_buffer = alloc_bootmem_low_pages_nopanic( | 175 | v_overflow_buffer = memblock_virt_alloc_low_nopanic( |
176 | PAGE_ALIGN(io_tlb_overflow)); | 176 | PAGE_ALIGN(io_tlb_overflow), |
177 | PAGE_SIZE); | ||
177 | if (!v_overflow_buffer) | 178 | if (!v_overflow_buffer) |
178 | return -ENOMEM; | 179 | return -ENOMEM; |
179 | 180 | ||
@@ -184,11 +185,15 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) | |||
184 | * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE | 185 | * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE |
185 | * between io_tlb_start and io_tlb_end. | 186 | * between io_tlb_start and io_tlb_end. |
186 | */ | 187 | */ |
187 | io_tlb_list = alloc_bootmem_pages(PAGE_ALIGN(io_tlb_nslabs * sizeof(int))); | 188 | io_tlb_list = memblock_virt_alloc( |
189 | PAGE_ALIGN(io_tlb_nslabs * sizeof(int)), | ||
190 | PAGE_SIZE); | ||
188 | for (i = 0; i < io_tlb_nslabs; i++) | 191 | for (i = 0; i < io_tlb_nslabs; i++) |
189 | io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); | 192 | io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); |
190 | io_tlb_index = 0; | 193 | io_tlb_index = 0; |
191 | io_tlb_orig_addr = alloc_bootmem_pages(PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t))); | 194 | io_tlb_orig_addr = memblock_virt_alloc( |
195 | PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)), | ||
196 | PAGE_SIZE); | ||
192 | 197 | ||
193 | if (verbose) | 198 | if (verbose) |
194 | swiotlb_print_info(); | 199 | swiotlb_print_info(); |
@@ -215,13 +220,13 @@ swiotlb_init(int verbose) | |||
215 | bytes = io_tlb_nslabs << IO_TLB_SHIFT; | 220 | bytes = io_tlb_nslabs << IO_TLB_SHIFT; |
216 | 221 | ||
217 | /* Get IO TLB memory from the low pages */ | 222 | /* Get IO TLB memory from the low pages */ |
218 | vstart = alloc_bootmem_low_pages_nopanic(PAGE_ALIGN(bytes)); | 223 | vstart = memblock_virt_alloc_low_nopanic(PAGE_ALIGN(bytes), PAGE_SIZE); |
219 | if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose)) | 224 | if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose)) |
220 | return; | 225 | return; |
221 | 226 | ||
222 | if (io_tlb_start) | 227 | if (io_tlb_start) |
223 | free_bootmem(io_tlb_start, | 228 | memblock_free_early(io_tlb_start, |
224 | PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); | 229 | PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); |
225 | pr_warn("Cannot allocate SWIOTLB buffer"); | 230 | pr_warn("Cannot allocate SWIOTLB buffer"); |
226 | no_iotlb_memory = true; | 231 | no_iotlb_memory = true; |
227 | } | 232 | } |
@@ -357,14 +362,14 @@ void __init swiotlb_free(void) | |||
357 | free_pages((unsigned long)phys_to_virt(io_tlb_start), | 362 | free_pages((unsigned long)phys_to_virt(io_tlb_start), |
358 | get_order(io_tlb_nslabs << IO_TLB_SHIFT)); | 363 | get_order(io_tlb_nslabs << IO_TLB_SHIFT)); |
359 | } else { | 364 | } else { |
360 | free_bootmem_late(io_tlb_overflow_buffer, | 365 | memblock_free_late(io_tlb_overflow_buffer, |
361 | PAGE_ALIGN(io_tlb_overflow)); | 366 | PAGE_ALIGN(io_tlb_overflow)); |
362 | free_bootmem_late(__pa(io_tlb_orig_addr), | 367 | memblock_free_late(__pa(io_tlb_orig_addr), |
363 | PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t))); | 368 | PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t))); |
364 | free_bootmem_late(__pa(io_tlb_list), | 369 | memblock_free_late(__pa(io_tlb_list), |
365 | PAGE_ALIGN(io_tlb_nslabs * sizeof(int))); | 370 | PAGE_ALIGN(io_tlb_nslabs * sizeof(int))); |
366 | free_bootmem_late(io_tlb_start, | 371 | memblock_free_late(io_tlb_start, |
367 | PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); | 372 | PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); |
368 | } | 373 | } |
369 | io_tlb_nslabs = 0; | 374 | io_tlb_nslabs = 0; |
370 | } | 375 | } |
@@ -505,7 +510,8 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, | |||
505 | 510 | ||
506 | not_found: | 511 | not_found: |
507 | spin_unlock_irqrestore(&io_tlb_lock, flags); | 512 | spin_unlock_irqrestore(&io_tlb_lock, flags); |
508 | dev_warn(hwdev, "swiotlb buffer is full\n"); | 513 | if (printk_ratelimit()) |
514 | dev_warn(hwdev, "swiotlb buffer is full (sz: %zd bytes)\n", size); | ||
509 | return SWIOTLB_MAP_ERROR; | 515 | return SWIOTLB_MAP_ERROR; |
510 | found: | 516 | found: |
511 | spin_unlock_irqrestore(&io_tlb_lock, flags); | 517 | spin_unlock_irqrestore(&io_tlb_lock, flags); |
diff --git a/lib/syscall.c b/lib/syscall.c index 58710eefeac8..e30e03932480 100644 --- a/lib/syscall.c +++ b/lib/syscall.c | |||
@@ -72,4 +72,3 @@ int task_current_syscall(struct task_struct *target, long *callno, | |||
72 | 72 | ||
73 | return 0; | 73 | return 0; |
74 | } | 74 | } |
75 | EXPORT_SYMBOL_GPL(task_current_syscall); | ||
diff --git a/lib/test_module.c b/lib/test_module.c new file mode 100644 index 000000000000..319b66f1ff61 --- /dev/null +++ b/lib/test_module.c | |||
@@ -0,0 +1,33 @@ | |||
1 | /* | ||
2 | * This module emits "Hello, world" on printk when loaded. | ||
3 | * | ||
4 | * It is designed to be used for basic evaluation of the module loading | ||
5 | * subsystem (for example when validating module signing/verification). It | ||
6 | * lacks any extra dependencies, and will not normally be loaded by the | ||
7 | * system unless explicitly requested by name. | ||
8 | */ | ||
9 | |||
10 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
11 | |||
12 | #include <linux/init.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/printk.h> | ||
15 | |||
16 | static int __init test_module_init(void) | ||
17 | { | ||
18 | pr_warn("Hello, world\n"); | ||
19 | |||
20 | return 0; | ||
21 | } | ||
22 | |||
23 | module_init(test_module_init); | ||
24 | |||
25 | static void __exit test_module_exit(void) | ||
26 | { | ||
27 | pr_warn("Goodbye\n"); | ||
28 | } | ||
29 | |||
30 | module_exit(test_module_exit); | ||
31 | |||
32 | MODULE_AUTHOR("Kees Cook <keescook@chromium.org>"); | ||
33 | MODULE_LICENSE("GPL"); | ||
diff --git a/lib/test_user_copy.c b/lib/test_user_copy.c new file mode 100644 index 000000000000..0ecef3e4690e --- /dev/null +++ b/lib/test_user_copy.c | |||
@@ -0,0 +1,110 @@ | |||
1 | /* | ||
2 | * Kernel module for testing copy_to/from_user infrastructure. | ||
3 | * | ||
4 | * Copyright 2013 Google Inc. All Rights Reserved | ||
5 | * | ||
6 | * Authors: | ||
7 | * Kees Cook <keescook@chromium.org> | ||
8 | * | ||
9 | * This software is licensed under the terms of the GNU General Public | ||
10 | * License version 2, as published by the Free Software Foundation, and | ||
11 | * may be copied, distributed, and modified under those terms. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | */ | ||
18 | |||
19 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
20 | |||
21 | #include <linux/mman.h> | ||
22 | #include <linux/module.h> | ||
23 | #include <linux/sched.h> | ||
24 | #include <linux/slab.h> | ||
25 | #include <linux/uaccess.h> | ||
26 | #include <linux/vmalloc.h> | ||
27 | |||
28 | #define test(condition, msg) \ | ||
29 | ({ \ | ||
30 | int cond = (condition); \ | ||
31 | if (cond) \ | ||
32 | pr_warn("%s\n", msg); \ | ||
33 | cond; \ | ||
34 | }) | ||
35 | |||
36 | static int __init test_user_copy_init(void) | ||
37 | { | ||
38 | int ret = 0; | ||
39 | char *kmem; | ||
40 | char __user *usermem; | ||
41 | char *bad_usermem; | ||
42 | unsigned long user_addr; | ||
43 | unsigned long value = 0x5A; | ||
44 | |||
45 | kmem = kmalloc(PAGE_SIZE * 2, GFP_KERNEL); | ||
46 | if (!kmem) | ||
47 | return -ENOMEM; | ||
48 | |||
49 | user_addr = vm_mmap(NULL, 0, PAGE_SIZE * 2, | ||
50 | PROT_READ | PROT_WRITE | PROT_EXEC, | ||
51 | MAP_ANONYMOUS | MAP_PRIVATE, 0); | ||
52 | if (user_addr >= (unsigned long)(TASK_SIZE)) { | ||
53 | pr_warn("Failed to allocate user memory\n"); | ||
54 | kfree(kmem); | ||
55 | return -ENOMEM; | ||
56 | } | ||
57 | |||
58 | usermem = (char __user *)user_addr; | ||
59 | bad_usermem = (char *)user_addr; | ||
60 | |||
61 | /* Legitimate usage: none of these should fail. */ | ||
62 | ret |= test(copy_from_user(kmem, usermem, PAGE_SIZE), | ||
63 | "legitimate copy_from_user failed"); | ||
64 | ret |= test(copy_to_user(usermem, kmem, PAGE_SIZE), | ||
65 | "legitimate copy_to_user failed"); | ||
66 | ret |= test(get_user(value, (unsigned long __user *)usermem), | ||
67 | "legitimate get_user failed"); | ||
68 | ret |= test(put_user(value, (unsigned long __user *)usermem), | ||
69 | "legitimate put_user failed"); | ||
70 | |||
71 | /* Invalid usage: none of these should succeed. */ | ||
72 | ret |= test(!copy_from_user(kmem, (char __user *)(kmem + PAGE_SIZE), | ||
73 | PAGE_SIZE), | ||
74 | "illegal all-kernel copy_from_user passed"); | ||
75 | ret |= test(!copy_from_user(bad_usermem, (char __user *)kmem, | ||
76 | PAGE_SIZE), | ||
77 | "illegal reversed copy_from_user passed"); | ||
78 | ret |= test(!copy_to_user((char __user *)kmem, kmem + PAGE_SIZE, | ||
79 | PAGE_SIZE), | ||
80 | "illegal all-kernel copy_to_user passed"); | ||
81 | ret |= test(!copy_to_user((char __user *)kmem, bad_usermem, | ||
82 | PAGE_SIZE), | ||
83 | "illegal reversed copy_to_user passed"); | ||
84 | ret |= test(!get_user(value, (unsigned long __user *)kmem), | ||
85 | "illegal get_user passed"); | ||
86 | ret |= test(!put_user(value, (unsigned long __user *)kmem), | ||
87 | "illegal put_user passed"); | ||
88 | |||
89 | vm_munmap(user_addr, PAGE_SIZE * 2); | ||
90 | kfree(kmem); | ||
91 | |||
92 | if (ret == 0) { | ||
93 | pr_info("tests passed.\n"); | ||
94 | return 0; | ||
95 | } | ||
96 | |||
97 | return -EINVAL; | ||
98 | } | ||
99 | |||
100 | module_init(test_user_copy_init); | ||
101 | |||
102 | static void __exit test_user_copy_exit(void) | ||
103 | { | ||
104 | pr_info("unloaded.\n"); | ||
105 | } | ||
106 | |||
107 | module_exit(test_user_copy_exit); | ||
108 | |||
109 | MODULE_AUTHOR("Kees Cook <keescook@chromium.org>"); | ||
110 | MODULE_LICENSE("GPL"); | ||
diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 10909c571494..0648291cdafe 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c | |||
@@ -364,7 +364,6 @@ enum format_type { | |||
364 | FORMAT_TYPE_SHORT, | 364 | FORMAT_TYPE_SHORT, |
365 | FORMAT_TYPE_UINT, | 365 | FORMAT_TYPE_UINT, |
366 | FORMAT_TYPE_INT, | 366 | FORMAT_TYPE_INT, |
367 | FORMAT_TYPE_NRCHARS, | ||
368 | FORMAT_TYPE_SIZE_T, | 367 | FORMAT_TYPE_SIZE_T, |
369 | FORMAT_TYPE_PTRDIFF | 368 | FORMAT_TYPE_PTRDIFF |
370 | }; | 369 | }; |
@@ -719,10 +718,15 @@ char *resource_string(char *buf, char *end, struct resource *res, | |||
719 | specp = &mem_spec; | 718 | specp = &mem_spec; |
720 | decode = 0; | 719 | decode = 0; |
721 | } | 720 | } |
722 | p = number(p, pend, res->start, *specp); | 721 | if (decode && res->flags & IORESOURCE_UNSET) { |
723 | if (res->start != res->end) { | 722 | p = string(p, pend, "size ", str_spec); |
724 | *p++ = '-'; | 723 | p = number(p, pend, resource_size(res), *specp); |
725 | p = number(p, pend, res->end, *specp); | 724 | } else { |
725 | p = number(p, pend, res->start, *specp); | ||
726 | if (res->start != res->end) { | ||
727 | *p++ = '-'; | ||
728 | p = number(p, pend, res->end, *specp); | ||
729 | } | ||
726 | } | 730 | } |
727 | if (decode) { | 731 | if (decode) { |
728 | if (res->flags & IORESOURCE_MEM_64) | 732 | if (res->flags & IORESOURCE_MEM_64) |
@@ -1155,6 +1159,30 @@ char *netdev_feature_string(char *buf, char *end, const u8 *addr, | |||
1155 | return number(buf, end, *(const netdev_features_t *)addr, spec); | 1159 | return number(buf, end, *(const netdev_features_t *)addr, spec); |
1156 | } | 1160 | } |
1157 | 1161 | ||
1162 | static noinline_for_stack | ||
1163 | char *address_val(char *buf, char *end, const void *addr, | ||
1164 | struct printf_spec spec, const char *fmt) | ||
1165 | { | ||
1166 | unsigned long long num; | ||
1167 | |||
1168 | spec.flags |= SPECIAL | SMALL | ZEROPAD; | ||
1169 | spec.base = 16; | ||
1170 | |||
1171 | switch (fmt[1]) { | ||
1172 | case 'd': | ||
1173 | num = *(const dma_addr_t *)addr; | ||
1174 | spec.field_width = sizeof(dma_addr_t) * 2 + 2; | ||
1175 | break; | ||
1176 | case 'p': | ||
1177 | default: | ||
1178 | num = *(const phys_addr_t *)addr; | ||
1179 | spec.field_width = sizeof(phys_addr_t) * 2 + 2; | ||
1180 | break; | ||
1181 | } | ||
1182 | |||
1183 | return number(buf, end, num, spec); | ||
1184 | } | ||
1185 | |||
1158 | int kptr_restrict __read_mostly; | 1186 | int kptr_restrict __read_mostly; |
1159 | 1187 | ||
1160 | /* | 1188 | /* |
@@ -1218,7 +1246,8 @@ int kptr_restrict __read_mostly; | |||
1218 | * N no separator | 1246 | * N no separator |
1219 | * The maximum supported length is 64 bytes of the input. Consider | 1247 | * The maximum supported length is 64 bytes of the input. Consider |
1220 | * to use print_hex_dump() for the larger input. | 1248 | * to use print_hex_dump() for the larger input. |
1221 | * - 'a' For a phys_addr_t type and its derivative types (passed by reference) | 1249 | * - 'a[pd]' For address types [p] phys_addr_t, [d] dma_addr_t and derivatives |
1250 | * (default assumed to be phys_addr_t, passed by reference) | ||
1222 | * - 'd[234]' For a dentry name (optionally 2-4 last components) | 1251 | * - 'd[234]' For a dentry name (optionally 2-4 last components) |
1223 | * - 'D[234]' Same as 'd' but for a struct file | 1252 | * - 'D[234]' Same as 'd' but for a struct file |
1224 | * | 1253 | * |
@@ -1353,11 +1382,7 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr, | |||
1353 | } | 1382 | } |
1354 | break; | 1383 | break; |
1355 | case 'a': | 1384 | case 'a': |
1356 | spec.flags |= SPECIAL | SMALL | ZEROPAD; | 1385 | return address_val(buf, end, ptr, spec, fmt); |
1357 | spec.field_width = sizeof(phys_addr_t) * 2 + 2; | ||
1358 | spec.base = 16; | ||
1359 | return number(buf, end, | ||
1360 | (unsigned long long) *((phys_addr_t *)ptr), spec); | ||
1361 | case 'd': | 1386 | case 'd': |
1362 | return dentry_name(buf, end, ptr, spec, fmt); | 1387 | return dentry_name(buf, end, ptr, spec, fmt); |
1363 | case 'D': | 1388 | case 'D': |
@@ -1512,10 +1537,6 @@ qualifier: | |||
1512 | return fmt - start; | 1537 | return fmt - start; |
1513 | /* skip alnum */ | 1538 | /* skip alnum */ |
1514 | 1539 | ||
1515 | case 'n': | ||
1516 | spec->type = FORMAT_TYPE_NRCHARS; | ||
1517 | return ++fmt - start; | ||
1518 | |||
1519 | case '%': | 1540 | case '%': |
1520 | spec->type = FORMAT_TYPE_PERCENT_CHAR; | 1541 | spec->type = FORMAT_TYPE_PERCENT_CHAR; |
1521 | return ++fmt - start; | 1542 | return ++fmt - start; |
@@ -1538,6 +1559,15 @@ qualifier: | |||
1538 | case 'u': | 1559 | case 'u': |
1539 | break; | 1560 | break; |
1540 | 1561 | ||
1562 | case 'n': | ||
1563 | /* | ||
1564 | * Since %n poses a greater security risk than utility, treat | ||
1565 | * it as an invalid format specifier. Warn about its use so | ||
1566 | * that new instances don't get added. | ||
1567 | */ | ||
1568 | WARN_ONCE(1, "Please remove ignored %%n in '%s'\n", fmt); | ||
1569 | /* Fall-through */ | ||
1570 | |||
1541 | default: | 1571 | default: |
1542 | spec->type = FORMAT_TYPE_INVALID; | 1572 | spec->type = FORMAT_TYPE_INVALID; |
1543 | return fmt - start; | 1573 | return fmt - start; |
@@ -1711,20 +1741,6 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) | |||
1711 | ++str; | 1741 | ++str; |
1712 | break; | 1742 | break; |
1713 | 1743 | ||
1714 | case FORMAT_TYPE_NRCHARS: { | ||
1715 | /* | ||
1716 | * Since %n poses a greater security risk than | ||
1717 | * utility, ignore %n and skip its argument. | ||
1718 | */ | ||
1719 | void *skip_arg; | ||
1720 | |||
1721 | WARN_ONCE(1, "Please remove ignored %%n in '%s'\n", | ||
1722 | old_fmt); | ||
1723 | |||
1724 | skip_arg = va_arg(args, void *); | ||
1725 | break; | ||
1726 | } | ||
1727 | |||
1728 | default: | 1744 | default: |
1729 | switch (spec.type) { | 1745 | switch (spec.type) { |
1730 | case FORMAT_TYPE_LONG_LONG: | 1746 | case FORMAT_TYPE_LONG_LONG: |
@@ -1999,19 +2015,6 @@ do { \ | |||
1999 | fmt++; | 2015 | fmt++; |
2000 | break; | 2016 | break; |
2001 | 2017 | ||
2002 | case FORMAT_TYPE_NRCHARS: { | ||
2003 | /* skip %n 's argument */ | ||
2004 | u8 qualifier = spec.qualifier; | ||
2005 | void *skip_arg; | ||
2006 | if (qualifier == 'l') | ||
2007 | skip_arg = va_arg(args, long *); | ||
2008 | else if (_tolower(qualifier) == 'z') | ||
2009 | skip_arg = va_arg(args, size_t *); | ||
2010 | else | ||
2011 | skip_arg = va_arg(args, int *); | ||
2012 | break; | ||
2013 | } | ||
2014 | |||
2015 | default: | 2018 | default: |
2016 | switch (spec.type) { | 2019 | switch (spec.type) { |
2017 | 2020 | ||
@@ -2170,10 +2173,6 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf) | |||
2170 | ++str; | 2173 | ++str; |
2171 | break; | 2174 | break; |
2172 | 2175 | ||
2173 | case FORMAT_TYPE_NRCHARS: | ||
2174 | /* skip */ | ||
2175 | break; | ||
2176 | |||
2177 | default: { | 2176 | default: { |
2178 | unsigned long long num; | 2177 | unsigned long long num; |
2179 | 2178 | ||