diff options
Diffstat (limited to 'lib')
| -rw-r--r-- | lib/Kconfig.debug | 61 | ||||
| -rw-r--r-- | lib/Makefile | 5 | ||||
| -rw-r--r-- | lib/assoc_array.c | 2 | ||||
| -rw-r--r-- | lib/average.c | 6 | ||||
| -rw-r--r-- | lib/cmdline.c | 14 | ||||
| -rw-r--r-- | lib/cpumask.c | 4 | ||||
| -rw-r--r-- | lib/decompress_unlz4.c | 1 | ||||
| -rw-r--r-- | lib/dma-debug.c | 197 | ||||
| -rw-r--r-- | lib/dynamic_debug.c | 29 | ||||
| -rw-r--r-- | lib/flex_array.c | 7 | ||||
| -rw-r--r-- | lib/fonts/Kconfig | 6 | ||||
| -rw-r--r-- | lib/genalloc.c | 5 | ||||
| -rw-r--r-- | lib/hash.c | 39 | ||||
| -rw-r--r-- | lib/kobject.c | 96 | ||||
| -rw-r--r-- | lib/kobject_uevent.c | 10 | ||||
| -rw-r--r-- | lib/kstrtox.c | 1 | ||||
| -rw-r--r-- | lib/parser.c | 62 | ||||
| -rw-r--r-- | lib/percpu-refcount.c | 3 | ||||
| -rw-r--r-- | lib/percpu_ida.c | 28 | ||||
| -rw-r--r-- | lib/rbtree_test.c | 13 | ||||
| -rw-r--r-- | lib/reciprocal_div.c | 24 | ||||
| -rw-r--r-- | lib/scatterlist.c | 3 | ||||
| -rw-r--r-- | lib/show_mem.c | 6 | ||||
| -rw-r--r-- | lib/swiotlb.c | 38 | ||||
| -rw-r--r-- | lib/test_module.c | 33 | ||||
| -rw-r--r-- | lib/test_user_copy.c | 110 | ||||
| -rw-r--r-- | lib/vsprintf.c | 33 |
27 files changed, 663 insertions, 173 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index db25707aa41b..a48abeac753f 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
| @@ -119,7 +119,7 @@ menu "Compile-time checks and compiler options" | |||
| 119 | 119 | ||
| 120 | config DEBUG_INFO | 120 | config DEBUG_INFO |
| 121 | bool "Compile the kernel with debug info" | 121 | bool "Compile the kernel with debug info" |
| 122 | depends on DEBUG_KERNEL | 122 | depends on DEBUG_KERNEL && !COMPILE_TEST |
| 123 | help | 123 | help |
| 124 | If you say Y here the resulting kernel image will include | 124 | If you say Y here the resulting kernel image will include |
| 125 | debugging info resulting in a larger kernel image. | 125 | debugging info resulting in a larger kernel image. |
| @@ -761,6 +761,15 @@ config PANIC_ON_OOPS_VALUE | |||
| 761 | default 0 if !PANIC_ON_OOPS | 761 | default 0 if !PANIC_ON_OOPS |
| 762 | default 1 if PANIC_ON_OOPS | 762 | default 1 if PANIC_ON_OOPS |
| 763 | 763 | ||
| 764 | config PANIC_TIMEOUT | ||
| 765 | int "panic timeout" | ||
| 766 | default 0 | ||
| 767 | help | ||
| 768 | Set the timeout value (in seconds) until a reboot occurs when the | ||
| 769 | the kernel panics. If n = 0, then we wait forever. A timeout | ||
| 770 | value n > 0 will wait n seconds before rebooting, while a timeout | ||
| 771 | value n < 0 will reboot immediately. | ||
| 772 | |||
| 764 | config SCHED_DEBUG | 773 | config SCHED_DEBUG |
| 765 | bool "Collect scheduler debugging info" | 774 | bool "Collect scheduler debugging info" |
| 766 | depends on DEBUG_KERNEL && PROC_FS | 775 | depends on DEBUG_KERNEL && PROC_FS |
| @@ -1547,17 +1556,6 @@ config PROVIDE_OHCI1394_DMA_INIT | |||
| 1547 | 1556 | ||
| 1548 | See Documentation/debugging-via-ohci1394.txt for more information. | 1557 | See Documentation/debugging-via-ohci1394.txt for more information. |
| 1549 | 1558 | ||
| 1550 | config FIREWIRE_OHCI_REMOTE_DMA | ||
| 1551 | bool "Remote debugging over FireWire with firewire-ohci" | ||
| 1552 | depends on FIREWIRE_OHCI | ||
| 1553 | help | ||
| 1554 | This option lets you use the FireWire bus for remote debugging | ||
| 1555 | with help of the firewire-ohci driver. It enables unfiltered | ||
| 1556 | remote DMA in firewire-ohci. | ||
| 1557 | See Documentation/debugging-via-ohci1394.txt for more information. | ||
| 1558 | |||
| 1559 | If unsure, say N. | ||
| 1560 | |||
| 1561 | config BUILD_DOCSRC | 1559 | config BUILD_DOCSRC |
| 1562 | bool "Build targets in Documentation/ tree" | 1560 | bool "Build targets in Documentation/ tree" |
| 1563 | depends on HEADERS_CHECK | 1561 | depends on HEADERS_CHECK |
| @@ -1575,8 +1573,43 @@ config DMA_API_DEBUG | |||
| 1575 | With this option you will be able to detect common bugs in device | 1573 | With this option you will be able to detect common bugs in device |
| 1576 | drivers like double-freeing of DMA mappings or freeing mappings that | 1574 | drivers like double-freeing of DMA mappings or freeing mappings that |
| 1577 | were never allocated. | 1575 | were never allocated. |
| 1578 | This option causes a performance degredation. Use only if you want | 1576 | |
| 1579 | to debug device drivers. If unsure, say N. | 1577 | This also attempts to catch cases where a page owned by DMA is |
| 1578 | accessed by the cpu in a way that could cause data corruption. For | ||
| 1579 | example, this enables cow_user_page() to check that the source page is | ||
| 1580 | not undergoing DMA. | ||
| 1581 | |||
| 1582 | This option causes a performance degradation. Use only if you want to | ||
| 1583 | debug device drivers and dma interactions. | ||
| 1584 | |||
| 1585 | If unsure, say N. | ||
| 1586 | |||
| 1587 | config TEST_MODULE | ||
| 1588 | tristate "Test module loading with 'hello world' module" | ||
| 1589 | default n | ||
| 1590 | depends on m | ||
| 1591 | help | ||
| 1592 | This builds the "test_module" module that emits "Hello, world" | ||
| 1593 | on printk when loaded. It is designed to be used for basic | ||
| 1594 | evaluation of the module loading subsystem (for example when | ||
| 1595 | validating module verification). It lacks any extra dependencies, | ||
| 1596 | and will not normally be loaded by the system unless explicitly | ||
| 1597 | requested by name. | ||
| 1598 | |||
| 1599 | If unsure, say N. | ||
| 1600 | |||
| 1601 | config TEST_USER_COPY | ||
| 1602 | tristate "Test user/kernel boundary protections" | ||
| 1603 | default n | ||
| 1604 | depends on m | ||
| 1605 | help | ||
| 1606 | This builds the "test_user_copy" module that runs sanity checks | ||
| 1607 | on the copy_to/from_user infrastructure, making sure basic | ||
| 1608 | user/kernel boundary testing is working. If it fails to load, | ||
| 1609 | a regression has been detected in the user/kernel memory boundary | ||
| 1610 | protections. | ||
| 1611 | |||
| 1612 | If unsure, say N. | ||
| 1580 | 1613 | ||
| 1581 | source "samples/Kconfig" | 1614 | source "samples/Kconfig" |
| 1582 | 1615 | ||
diff --git a/lib/Makefile b/lib/Makefile index a459c31e8c6b..48140e3ba73f 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
| @@ -26,11 +26,13 @@ obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ | |||
| 26 | bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ | 26 | bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ |
| 27 | gcd.o lcm.o list_sort.o uuid.o flex_array.o iovec.o clz_ctz.o \ | 27 | gcd.o lcm.o list_sort.o uuid.o flex_array.o iovec.o clz_ctz.o \ |
| 28 | bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \ | 28 | bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \ |
| 29 | percpu-refcount.o percpu_ida.o | 29 | percpu-refcount.o percpu_ida.o hash.o |
| 30 | obj-y += string_helpers.o | 30 | obj-y += string_helpers.o |
| 31 | obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o | 31 | obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o |
| 32 | obj-y += kstrtox.o | 32 | obj-y += kstrtox.o |
| 33 | obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o | 33 | obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o |
| 34 | obj-$(CONFIG_TEST_MODULE) += test_module.o | ||
| 35 | obj-$(CONFIG_TEST_USER_COPY) += test_user_copy.o | ||
| 34 | 36 | ||
| 35 | ifeq ($(CONFIG_DEBUG_KOBJECT),y) | 37 | ifeq ($(CONFIG_DEBUG_KOBJECT),y) |
| 36 | CFLAGS_kobject.o += -DDEBUG | 38 | CFLAGS_kobject.o += -DDEBUG |
| @@ -43,6 +45,7 @@ obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o | |||
| 43 | obj-$(CONFIG_CHECK_SIGNATURE) += check_signature.o | 45 | obj-$(CONFIG_CHECK_SIGNATURE) += check_signature.o |
| 44 | obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o | 46 | obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o |
| 45 | 47 | ||
| 48 | GCOV_PROFILE_hweight.o := n | ||
| 46 | CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS)) | 49 | CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS)) |
| 47 | obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o | 50 | obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o |
| 48 | 51 | ||
diff --git a/lib/assoc_array.c b/lib/assoc_array.c index 1b6a44f1ec3e..c0b1007011e1 100644 --- a/lib/assoc_array.c +++ b/lib/assoc_array.c | |||
| @@ -157,7 +157,7 @@ enum assoc_array_walk_status { | |||
| 157 | assoc_array_walk_tree_empty, | 157 | assoc_array_walk_tree_empty, |
| 158 | assoc_array_walk_found_terminal_node, | 158 | assoc_array_walk_found_terminal_node, |
| 159 | assoc_array_walk_found_wrong_shortcut, | 159 | assoc_array_walk_found_wrong_shortcut, |
| 160 | } status; | 160 | }; |
| 161 | 161 | ||
| 162 | struct assoc_array_walk_result { | 162 | struct assoc_array_walk_result { |
| 163 | struct { | 163 | struct { |
diff --git a/lib/average.c b/lib/average.c index 99a67e662b3c..114d1beae0c7 100644 --- a/lib/average.c +++ b/lib/average.c | |||
| @@ -53,8 +53,10 @@ EXPORT_SYMBOL(ewma_init); | |||
| 53 | */ | 53 | */ |
| 54 | struct ewma *ewma_add(struct ewma *avg, unsigned long val) | 54 | struct ewma *ewma_add(struct ewma *avg, unsigned long val) |
| 55 | { | 55 | { |
| 56 | avg->internal = avg->internal ? | 56 | unsigned long internal = ACCESS_ONCE(avg->internal); |
| 57 | (((avg->internal << avg->weight) - avg->internal) + | 57 | |
| 58 | ACCESS_ONCE(avg->internal) = internal ? | ||
| 59 | (((internal << avg->weight) - internal) + | ||
| 58 | (val << avg->factor)) >> avg->weight : | 60 | (val << avg->factor)) >> avg->weight : |
| 59 | (val << avg->factor); | 61 | (val << avg->factor); |
| 60 | return avg; | 62 | return avg; |
diff --git a/lib/cmdline.c b/lib/cmdline.c index eb6791188cf5..d4932f745e92 100644 --- a/lib/cmdline.c +++ b/lib/cmdline.c | |||
| @@ -49,13 +49,13 @@ static int get_range(char **str, int *pint) | |||
| 49 | * 3 - hyphen found to denote a range | 49 | * 3 - hyphen found to denote a range |
| 50 | */ | 50 | */ |
| 51 | 51 | ||
| 52 | int get_option (char **str, int *pint) | 52 | int get_option(char **str, int *pint) |
| 53 | { | 53 | { |
| 54 | char *cur = *str; | 54 | char *cur = *str; |
| 55 | 55 | ||
| 56 | if (!cur || !(*cur)) | 56 | if (!cur || !(*cur)) |
| 57 | return 0; | 57 | return 0; |
| 58 | *pint = simple_strtol (cur, str, 0); | 58 | *pint = simple_strtol(cur, str, 0); |
| 59 | if (cur == *str) | 59 | if (cur == *str) |
| 60 | return 0; | 60 | return 0; |
| 61 | if (**str == ',') { | 61 | if (**str == ',') { |
| @@ -67,6 +67,7 @@ int get_option (char **str, int *pint) | |||
| 67 | 67 | ||
| 68 | return 1; | 68 | return 1; |
| 69 | } | 69 | } |
| 70 | EXPORT_SYMBOL(get_option); | ||
| 70 | 71 | ||
| 71 | /** | 72 | /** |
| 72 | * get_options - Parse a string into a list of integers | 73 | * get_options - Parse a string into a list of integers |
| @@ -84,13 +85,13 @@ int get_option (char **str, int *pint) | |||
| 84 | * the parse to end (typically a null terminator, if @str is | 85 | * the parse to end (typically a null terminator, if @str is |
| 85 | * completely parseable). | 86 | * completely parseable). |
| 86 | */ | 87 | */ |
| 87 | 88 | ||
| 88 | char *get_options(const char *str, int nints, int *ints) | 89 | char *get_options(const char *str, int nints, int *ints) |
| 89 | { | 90 | { |
| 90 | int res, i = 1; | 91 | int res, i = 1; |
| 91 | 92 | ||
| 92 | while (i < nints) { | 93 | while (i < nints) { |
| 93 | res = get_option ((char **)&str, ints + i); | 94 | res = get_option((char **)&str, ints + i); |
| 94 | if (res == 0) | 95 | if (res == 0) |
| 95 | break; | 96 | break; |
| 96 | if (res == 3) { | 97 | if (res == 3) { |
| @@ -112,6 +113,7 @@ char *get_options(const char *str, int nints, int *ints) | |||
| 112 | ints[0] = i - 1; | 113 | ints[0] = i - 1; |
| 113 | return (char *)str; | 114 | return (char *)str; |
| 114 | } | 115 | } |
| 116 | EXPORT_SYMBOL(get_options); | ||
| 115 | 117 | ||
| 116 | /** | 118 | /** |
| 117 | * memparse - parse a string with mem suffixes into a number | 119 | * memparse - parse a string with mem suffixes into a number |
| @@ -152,8 +154,4 @@ unsigned long long memparse(const char *ptr, char **retptr) | |||
| 152 | 154 | ||
| 153 | return ret; | 155 | return ret; |
| 154 | } | 156 | } |
| 155 | |||
| 156 | |||
| 157 | EXPORT_SYMBOL(memparse); | 157 | EXPORT_SYMBOL(memparse); |
| 158 | EXPORT_SYMBOL(get_option); | ||
| 159 | EXPORT_SYMBOL(get_options); | ||
diff --git a/lib/cpumask.c b/lib/cpumask.c index d327b87c99b7..b810b753c607 100644 --- a/lib/cpumask.c +++ b/lib/cpumask.c | |||
| @@ -140,7 +140,7 @@ EXPORT_SYMBOL(zalloc_cpumask_var); | |||
| 140 | */ | 140 | */ |
| 141 | void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask) | 141 | void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask) |
| 142 | { | 142 | { |
| 143 | *mask = alloc_bootmem(cpumask_size()); | 143 | *mask = memblock_virt_alloc(cpumask_size(), 0); |
| 144 | } | 144 | } |
| 145 | 145 | ||
| 146 | /** | 146 | /** |
| @@ -161,6 +161,6 @@ EXPORT_SYMBOL(free_cpumask_var); | |||
| 161 | */ | 161 | */ |
| 162 | void __init free_bootmem_cpumask_var(cpumask_var_t mask) | 162 | void __init free_bootmem_cpumask_var(cpumask_var_t mask) |
| 163 | { | 163 | { |
| 164 | free_bootmem(__pa(mask), cpumask_size()); | 164 | memblock_free_early(__pa(mask), cpumask_size()); |
| 165 | } | 165 | } |
| 166 | #endif | 166 | #endif |
diff --git a/lib/decompress_unlz4.c b/lib/decompress_unlz4.c index 3e67cfad16ad..7d1e83caf8ad 100644 --- a/lib/decompress_unlz4.c +++ b/lib/decompress_unlz4.c | |||
| @@ -141,6 +141,7 @@ STATIC inline int INIT unlz4(u8 *input, int in_len, | |||
| 141 | goto exit_2; | 141 | goto exit_2; |
| 142 | } | 142 | } |
| 143 | 143 | ||
| 144 | ret = -1; | ||
| 144 | if (flush && flush(outp, dest_len) != dest_len) | 145 | if (flush && flush(outp, dest_len) != dest_len) |
| 145 | goto exit_2; | 146 | goto exit_2; |
| 146 | if (output) | 147 | if (output) |
diff --git a/lib/dma-debug.c b/lib/dma-debug.c index d87a17a819d0..2defd1308b04 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c | |||
| @@ -53,11 +53,26 @@ enum map_err_types { | |||
| 53 | 53 | ||
| 54 | #define DMA_DEBUG_STACKTRACE_ENTRIES 5 | 54 | #define DMA_DEBUG_STACKTRACE_ENTRIES 5 |
| 55 | 55 | ||
| 56 | /** | ||
| 57 | * struct dma_debug_entry - track a dma_map* or dma_alloc_coherent mapping | ||
| 58 | * @list: node on pre-allocated free_entries list | ||
| 59 | * @dev: 'dev' argument to dma_map_{page|single|sg} or dma_alloc_coherent | ||
| 60 | * @type: single, page, sg, coherent | ||
| 61 | * @pfn: page frame of the start address | ||
| 62 | * @offset: offset of mapping relative to pfn | ||
| 63 | * @size: length of the mapping | ||
| 64 | * @direction: enum dma_data_direction | ||
| 65 | * @sg_call_ents: 'nents' from dma_map_sg | ||
| 66 | * @sg_mapped_ents: 'mapped_ents' from dma_map_sg | ||
| 67 | * @map_err_type: track whether dma_mapping_error() was checked | ||
| 68 | * @stacktrace: support backtraces when a violation is detected | ||
| 69 | */ | ||
| 56 | struct dma_debug_entry { | 70 | struct dma_debug_entry { |
| 57 | struct list_head list; | 71 | struct list_head list; |
| 58 | struct device *dev; | 72 | struct device *dev; |
| 59 | int type; | 73 | int type; |
| 60 | phys_addr_t paddr; | 74 | unsigned long pfn; |
| 75 | size_t offset; | ||
| 61 | u64 dev_addr; | 76 | u64 dev_addr; |
| 62 | u64 size; | 77 | u64 size; |
| 63 | int direction; | 78 | int direction; |
| @@ -372,6 +387,11 @@ static void hash_bucket_del(struct dma_debug_entry *entry) | |||
| 372 | list_del(&entry->list); | 387 | list_del(&entry->list); |
| 373 | } | 388 | } |
| 374 | 389 | ||
| 390 | static unsigned long long phys_addr(struct dma_debug_entry *entry) | ||
| 391 | { | ||
| 392 | return page_to_phys(pfn_to_page(entry->pfn)) + entry->offset; | ||
| 393 | } | ||
| 394 | |||
| 375 | /* | 395 | /* |
| 376 | * Dump mapping entries for debugging purposes | 396 | * Dump mapping entries for debugging purposes |
| 377 | */ | 397 | */ |
| @@ -389,9 +409,9 @@ void debug_dma_dump_mappings(struct device *dev) | |||
| 389 | list_for_each_entry(entry, &bucket->list, list) { | 409 | list_for_each_entry(entry, &bucket->list, list) { |
| 390 | if (!dev || dev == entry->dev) { | 410 | if (!dev || dev == entry->dev) { |
| 391 | dev_info(entry->dev, | 411 | dev_info(entry->dev, |
| 392 | "%s idx %d P=%Lx D=%Lx L=%Lx %s %s\n", | 412 | "%s idx %d P=%Lx N=%lx D=%Lx L=%Lx %s %s\n", |
| 393 | type2name[entry->type], idx, | 413 | type2name[entry->type], idx, |
| 394 | (unsigned long long)entry->paddr, | 414 | phys_addr(entry), entry->pfn, |
| 395 | entry->dev_addr, entry->size, | 415 | entry->dev_addr, entry->size, |
| 396 | dir2name[entry->direction], | 416 | dir2name[entry->direction], |
| 397 | maperr2str[entry->map_err_type]); | 417 | maperr2str[entry->map_err_type]); |
| @@ -404,6 +424,137 @@ void debug_dma_dump_mappings(struct device *dev) | |||
| 404 | EXPORT_SYMBOL(debug_dma_dump_mappings); | 424 | EXPORT_SYMBOL(debug_dma_dump_mappings); |
| 405 | 425 | ||
| 406 | /* | 426 | /* |
| 427 | * For each page mapped (initial page in the case of | ||
| 428 | * dma_alloc_coherent/dma_map_{single|page}, or each page in a | ||
| 429 | * scatterlist) insert into this tree using the pfn as the key. At | ||
| 430 | * dma_unmap_{single|sg|page} or dma_free_coherent delete the entry. If | ||
| 431 | * the pfn already exists at insertion time add a tag as a reference | ||
| 432 | * count for the overlapping mappings. For now, the overlap tracking | ||
| 433 | * just ensures that 'unmaps' balance 'maps' before marking the pfn | ||
| 434 | * idle, but we should also be flagging overlaps as an API violation. | ||
| 435 | * | ||
| 436 | * Memory usage is mostly constrained by the maximum number of available | ||
| 437 | * dma-debug entries in that we need a free dma_debug_entry before | ||
| 438 | * inserting into the tree. In the case of dma_map_{single|page} and | ||
| 439 | * dma_alloc_coherent there is only one dma_debug_entry and one pfn to | ||
| 440 | * track per event. dma_map_sg(), on the other hand, | ||
| 441 | * consumes a single dma_debug_entry, but inserts 'nents' entries into | ||
| 442 | * the tree. | ||
| 443 | * | ||
| 444 | * At any time debug_dma_assert_idle() can be called to trigger a | ||
| 445 | * warning if the given page is in the active set. | ||
| 446 | */ | ||
| 447 | static RADIX_TREE(dma_active_pfn, GFP_NOWAIT); | ||
| 448 | static DEFINE_SPINLOCK(radix_lock); | ||
| 449 | #define ACTIVE_PFN_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1) | ||
| 450 | |||
| 451 | static int active_pfn_read_overlap(unsigned long pfn) | ||
| 452 | { | ||
| 453 | int overlap = 0, i; | ||
| 454 | |||
| 455 | for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--) | ||
| 456 | if (radix_tree_tag_get(&dma_active_pfn, pfn, i)) | ||
| 457 | overlap |= 1 << i; | ||
| 458 | return overlap; | ||
| 459 | } | ||
| 460 | |||
| 461 | static int active_pfn_set_overlap(unsigned long pfn, int overlap) | ||
| 462 | { | ||
| 463 | int i; | ||
| 464 | |||
| 465 | if (overlap > ACTIVE_PFN_MAX_OVERLAP || overlap < 0) | ||
| 466 | return overlap; | ||
| 467 | |||
| 468 | for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--) | ||
| 469 | if (overlap & 1 << i) | ||
| 470 | radix_tree_tag_set(&dma_active_pfn, pfn, i); | ||
| 471 | else | ||
| 472 | radix_tree_tag_clear(&dma_active_pfn, pfn, i); | ||
| 473 | |||
| 474 | return overlap; | ||
| 475 | } | ||
| 476 | |||
| 477 | static void active_pfn_inc_overlap(unsigned long pfn) | ||
| 478 | { | ||
| 479 | int overlap = active_pfn_read_overlap(pfn); | ||
| 480 | |||
| 481 | overlap = active_pfn_set_overlap(pfn, ++overlap); | ||
| 482 | |||
| 483 | /* If we overflowed the overlap counter then we're potentially | ||
| 484 | * leaking dma-mappings. Otherwise, if maps and unmaps are | ||
| 485 | * balanced then this overflow may cause false negatives in | ||
| 486 | * debug_dma_assert_idle() as the pfn may be marked idle | ||
| 487 | * prematurely. | ||
| 488 | */ | ||
| 489 | WARN_ONCE(overlap > ACTIVE_PFN_MAX_OVERLAP, | ||
| 490 | "DMA-API: exceeded %d overlapping mappings of pfn %lx\n", | ||
| 491 | ACTIVE_PFN_MAX_OVERLAP, pfn); | ||
| 492 | } | ||
| 493 | |||
| 494 | static int active_pfn_dec_overlap(unsigned long pfn) | ||
| 495 | { | ||
| 496 | int overlap = active_pfn_read_overlap(pfn); | ||
| 497 | |||
| 498 | return active_pfn_set_overlap(pfn, --overlap); | ||
| 499 | } | ||
| 500 | |||
| 501 | static int active_pfn_insert(struct dma_debug_entry *entry) | ||
| 502 | { | ||
| 503 | unsigned long flags; | ||
| 504 | int rc; | ||
| 505 | |||
| 506 | spin_lock_irqsave(&radix_lock, flags); | ||
| 507 | rc = radix_tree_insert(&dma_active_pfn, entry->pfn, entry); | ||
| 508 | if (rc == -EEXIST) | ||
| 509 | active_pfn_inc_overlap(entry->pfn); | ||
| 510 | spin_unlock_irqrestore(&radix_lock, flags); | ||
| 511 | |||
| 512 | return rc; | ||
| 513 | } | ||
| 514 | |||
| 515 | static void active_pfn_remove(struct dma_debug_entry *entry) | ||
| 516 | { | ||
| 517 | unsigned long flags; | ||
| 518 | |||
| 519 | spin_lock_irqsave(&radix_lock, flags); | ||
| 520 | /* since we are counting overlaps the final put of the | ||
| 521 | * entry->pfn will occur when the overlap count is 0. | ||
| 522 | * active_pfn_dec_overlap() returns -1 in that case | ||
| 523 | */ | ||
| 524 | if (active_pfn_dec_overlap(entry->pfn) < 0) | ||
| 525 | radix_tree_delete(&dma_active_pfn, entry->pfn); | ||
| 526 | spin_unlock_irqrestore(&radix_lock, flags); | ||
| 527 | } | ||
| 528 | |||
| 529 | /** | ||
| 530 | * debug_dma_assert_idle() - assert that a page is not undergoing dma | ||
| 531 | * @page: page to lookup in the dma_active_pfn tree | ||
| 532 | * | ||
| 533 | * Place a call to this routine in cases where the cpu touching the page | ||
| 534 | * before the dma completes (page is dma_unmapped) will lead to data | ||
| 535 | * corruption. | ||
| 536 | */ | ||
| 537 | void debug_dma_assert_idle(struct page *page) | ||
| 538 | { | ||
| 539 | unsigned long flags; | ||
| 540 | struct dma_debug_entry *entry; | ||
| 541 | |||
| 542 | if (!page) | ||
| 543 | return; | ||
| 544 | |||
| 545 | spin_lock_irqsave(&radix_lock, flags); | ||
| 546 | entry = radix_tree_lookup(&dma_active_pfn, page_to_pfn(page)); | ||
| 547 | spin_unlock_irqrestore(&radix_lock, flags); | ||
| 548 | |||
| 549 | if (!entry) | ||
| 550 | return; | ||
| 551 | |||
| 552 | err_printk(entry->dev, entry, | ||
| 553 | "DMA-API: cpu touching an active dma mapped page " | ||
| 554 | "[pfn=0x%lx]\n", entry->pfn); | ||
| 555 | } | ||
| 556 | |||
| 557 | /* | ||
| 407 | * Wrapper function for adding an entry to the hash. | 558 | * Wrapper function for adding an entry to the hash. |
| 408 | * This function takes care of locking itself. | 559 | * This function takes care of locking itself. |
| 409 | */ | 560 | */ |
| @@ -411,10 +562,21 @@ static void add_dma_entry(struct dma_debug_entry *entry) | |||
| 411 | { | 562 | { |
| 412 | struct hash_bucket *bucket; | 563 | struct hash_bucket *bucket; |
| 413 | unsigned long flags; | 564 | unsigned long flags; |
| 565 | int rc; | ||
| 414 | 566 | ||
| 415 | bucket = get_hash_bucket(entry, &flags); | 567 | bucket = get_hash_bucket(entry, &flags); |
| 416 | hash_bucket_add(bucket, entry); | 568 | hash_bucket_add(bucket, entry); |
| 417 | put_hash_bucket(bucket, &flags); | 569 | put_hash_bucket(bucket, &flags); |
| 570 | |||
| 571 | rc = active_pfn_insert(entry); | ||
| 572 | if (rc == -ENOMEM) { | ||
| 573 | pr_err("DMA-API: pfn tracking ENOMEM, dma-debug disabled\n"); | ||
| 574 | global_disable = true; | ||
| 575 | } | ||
| 576 | |||
| 577 | /* TODO: report -EEXIST errors here as overlapping mappings are | ||
| 578 | * not supported by the DMA API | ||
| 579 | */ | ||
| 418 | } | 580 | } |
| 419 | 581 | ||
| 420 | static struct dma_debug_entry *__dma_entry_alloc(void) | 582 | static struct dma_debug_entry *__dma_entry_alloc(void) |
| @@ -469,6 +631,8 @@ static void dma_entry_free(struct dma_debug_entry *entry) | |||
| 469 | { | 631 | { |
| 470 | unsigned long flags; | 632 | unsigned long flags; |
| 471 | 633 | ||
| 634 | active_pfn_remove(entry); | ||
| 635 | |||
| 472 | /* | 636 | /* |
| 473 | * add to beginning of the list - this way the entries are | 637 | * add to beginning of the list - this way the entries are |
| 474 | * more likely cache hot when they are reallocated. | 638 | * more likely cache hot when they are reallocated. |
| @@ -895,15 +1059,15 @@ static void check_unmap(struct dma_debug_entry *ref) | |||
| 895 | ref->dev_addr, ref->size, | 1059 | ref->dev_addr, ref->size, |
| 896 | type2name[entry->type], type2name[ref->type]); | 1060 | type2name[entry->type], type2name[ref->type]); |
| 897 | } else if ((entry->type == dma_debug_coherent) && | 1061 | } else if ((entry->type == dma_debug_coherent) && |
| 898 | (ref->paddr != entry->paddr)) { | 1062 | (phys_addr(ref) != phys_addr(entry))) { |
| 899 | err_printk(ref->dev, entry, "DMA-API: device driver frees " | 1063 | err_printk(ref->dev, entry, "DMA-API: device driver frees " |
| 900 | "DMA memory with different CPU address " | 1064 | "DMA memory with different CPU address " |
| 901 | "[device address=0x%016llx] [size=%llu bytes] " | 1065 | "[device address=0x%016llx] [size=%llu bytes] " |
| 902 | "[cpu alloc address=0x%016llx] " | 1066 | "[cpu alloc address=0x%016llx] " |
| 903 | "[cpu free address=0x%016llx]", | 1067 | "[cpu free address=0x%016llx]", |
| 904 | ref->dev_addr, ref->size, | 1068 | ref->dev_addr, ref->size, |
| 905 | (unsigned long long)entry->paddr, | 1069 | phys_addr(entry), |
| 906 | (unsigned long long)ref->paddr); | 1070 | phys_addr(ref)); |
| 907 | } | 1071 | } |
| 908 | 1072 | ||
| 909 | if (ref->sg_call_ents && ref->type == dma_debug_sg && | 1073 | if (ref->sg_call_ents && ref->type == dma_debug_sg && |
| @@ -1052,7 +1216,8 @@ void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, | |||
| 1052 | 1216 | ||
| 1053 | entry->dev = dev; | 1217 | entry->dev = dev; |
| 1054 | entry->type = dma_debug_page; | 1218 | entry->type = dma_debug_page; |
| 1055 | entry->paddr = page_to_phys(page) + offset; | 1219 | entry->pfn = page_to_pfn(page); |
| 1220 | entry->offset = offset, | ||
| 1056 | entry->dev_addr = dma_addr; | 1221 | entry->dev_addr = dma_addr; |
| 1057 | entry->size = size; | 1222 | entry->size = size; |
| 1058 | entry->direction = direction; | 1223 | entry->direction = direction; |
| @@ -1148,7 +1313,8 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, | |||
| 1148 | 1313 | ||
| 1149 | entry->type = dma_debug_sg; | 1314 | entry->type = dma_debug_sg; |
| 1150 | entry->dev = dev; | 1315 | entry->dev = dev; |
| 1151 | entry->paddr = sg_phys(s); | 1316 | entry->pfn = page_to_pfn(sg_page(s)); |
| 1317 | entry->offset = s->offset, | ||
| 1152 | entry->size = sg_dma_len(s); | 1318 | entry->size = sg_dma_len(s); |
| 1153 | entry->dev_addr = sg_dma_address(s); | 1319 | entry->dev_addr = sg_dma_address(s); |
| 1154 | entry->direction = direction; | 1320 | entry->direction = direction; |
| @@ -1198,7 +1364,8 @@ void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, | |||
| 1198 | struct dma_debug_entry ref = { | 1364 | struct dma_debug_entry ref = { |
| 1199 | .type = dma_debug_sg, | 1365 | .type = dma_debug_sg, |
| 1200 | .dev = dev, | 1366 | .dev = dev, |
| 1201 | .paddr = sg_phys(s), | 1367 | .pfn = page_to_pfn(sg_page(s)), |
| 1368 | .offset = s->offset, | ||
| 1202 | .dev_addr = sg_dma_address(s), | 1369 | .dev_addr = sg_dma_address(s), |
| 1203 | .size = sg_dma_len(s), | 1370 | .size = sg_dma_len(s), |
| 1204 | .direction = dir, | 1371 | .direction = dir, |
| @@ -1233,7 +1400,8 @@ void debug_dma_alloc_coherent(struct device *dev, size_t size, | |||
| 1233 | 1400 | ||
| 1234 | entry->type = dma_debug_coherent; | 1401 | entry->type = dma_debug_coherent; |
| 1235 | entry->dev = dev; | 1402 | entry->dev = dev; |
| 1236 | entry->paddr = virt_to_phys(virt); | 1403 | entry->pfn = page_to_pfn(virt_to_page(virt)); |
| 1404 | entry->offset = (size_t) virt & PAGE_MASK; | ||
| 1237 | entry->size = size; | 1405 | entry->size = size; |
| 1238 | entry->dev_addr = dma_addr; | 1406 | entry->dev_addr = dma_addr; |
| 1239 | entry->direction = DMA_BIDIRECTIONAL; | 1407 | entry->direction = DMA_BIDIRECTIONAL; |
| @@ -1248,7 +1416,8 @@ void debug_dma_free_coherent(struct device *dev, size_t size, | |||
| 1248 | struct dma_debug_entry ref = { | 1416 | struct dma_debug_entry ref = { |
| 1249 | .type = dma_debug_coherent, | 1417 | .type = dma_debug_coherent, |
| 1250 | .dev = dev, | 1418 | .dev = dev, |
| 1251 | .paddr = virt_to_phys(virt), | 1419 | .pfn = page_to_pfn(virt_to_page(virt)), |
| 1420 | .offset = (size_t) virt & PAGE_MASK, | ||
| 1252 | .dev_addr = addr, | 1421 | .dev_addr = addr, |
| 1253 | .size = size, | 1422 | .size = size, |
| 1254 | .direction = DMA_BIDIRECTIONAL, | 1423 | .direction = DMA_BIDIRECTIONAL, |
| @@ -1356,7 +1525,8 @@ void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | |||
| 1356 | struct dma_debug_entry ref = { | 1525 | struct dma_debug_entry ref = { |
| 1357 | .type = dma_debug_sg, | 1526 | .type = dma_debug_sg, |
| 1358 | .dev = dev, | 1527 | .dev = dev, |
| 1359 | .paddr = sg_phys(s), | 1528 | .pfn = page_to_pfn(sg_page(s)), |
| 1529 | .offset = s->offset, | ||
| 1360 | .dev_addr = sg_dma_address(s), | 1530 | .dev_addr = sg_dma_address(s), |
| 1361 | .size = sg_dma_len(s), | 1531 | .size = sg_dma_len(s), |
| 1362 | .direction = direction, | 1532 | .direction = direction, |
| @@ -1388,7 +1558,8 @@ void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | |||
| 1388 | struct dma_debug_entry ref = { | 1558 | struct dma_debug_entry ref = { |
| 1389 | .type = dma_debug_sg, | 1559 | .type = dma_debug_sg, |
| 1390 | .dev = dev, | 1560 | .dev = dev, |
| 1391 | .paddr = sg_phys(s), | 1561 | .pfn = page_to_pfn(sg_page(s)), |
| 1562 | .offset = s->offset, | ||
| 1392 | .dev_addr = sg_dma_address(s), | 1563 | .dev_addr = sg_dma_address(s), |
| 1393 | .size = sg_dma_len(s), | 1564 | .size = sg_dma_len(s), |
| 1394 | .direction = direction, | 1565 | .direction = direction, |
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c index c37aeacd7651..7288e38e1757 100644 --- a/lib/dynamic_debug.c +++ b/lib/dynamic_debug.c | |||
| @@ -8,6 +8,7 @@ | |||
| 8 | * By Greg Banks <gnb@melbourne.sgi.com> | 8 | * By Greg Banks <gnb@melbourne.sgi.com> |
| 9 | * Copyright (c) 2008 Silicon Graphics Inc. All Rights Reserved. | 9 | * Copyright (c) 2008 Silicon Graphics Inc. All Rights Reserved. |
| 10 | * Copyright (C) 2011 Bart Van Assche. All Rights Reserved. | 10 | * Copyright (C) 2011 Bart Van Assche. All Rights Reserved. |
| 11 | * Copyright (C) 2013 Du, Changbin <changbin.du@gmail.com> | ||
| 11 | */ | 12 | */ |
| 12 | 13 | ||
| 13 | #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ | 14 | #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ |
| @@ -24,6 +25,7 @@ | |||
| 24 | #include <linux/sysctl.h> | 25 | #include <linux/sysctl.h> |
| 25 | #include <linux/ctype.h> | 26 | #include <linux/ctype.h> |
| 26 | #include <linux/string.h> | 27 | #include <linux/string.h> |
| 28 | #include <linux/parser.h> | ||
| 27 | #include <linux/string_helpers.h> | 29 | #include <linux/string_helpers.h> |
| 28 | #include <linux/uaccess.h> | 30 | #include <linux/uaccess.h> |
| 29 | #include <linux/dynamic_debug.h> | 31 | #include <linux/dynamic_debug.h> |
| @@ -147,7 +149,8 @@ static int ddebug_change(const struct ddebug_query *query, | |||
| 147 | list_for_each_entry(dt, &ddebug_tables, link) { | 149 | list_for_each_entry(dt, &ddebug_tables, link) { |
| 148 | 150 | ||
| 149 | /* match against the module name */ | 151 | /* match against the module name */ |
| 150 | if (query->module && strcmp(query->module, dt->mod_name)) | 152 | if (query->module && |
| 153 | !match_wildcard(query->module, dt->mod_name)) | ||
| 151 | continue; | 154 | continue; |
| 152 | 155 | ||
| 153 | for (i = 0; i < dt->num_ddebugs; i++) { | 156 | for (i = 0; i < dt->num_ddebugs; i++) { |
| @@ -155,14 +158,16 @@ static int ddebug_change(const struct ddebug_query *query, | |||
| 155 | 158 | ||
| 156 | /* match against the source filename */ | 159 | /* match against the source filename */ |
| 157 | if (query->filename && | 160 | if (query->filename && |
| 158 | strcmp(query->filename, dp->filename) && | 161 | !match_wildcard(query->filename, dp->filename) && |
| 159 | strcmp(query->filename, kbasename(dp->filename)) && | 162 | !match_wildcard(query->filename, |
| 160 | strcmp(query->filename, trim_prefix(dp->filename))) | 163 | kbasename(dp->filename)) && |
| 164 | !match_wildcard(query->filename, | ||
| 165 | trim_prefix(dp->filename))) | ||
| 161 | continue; | 166 | continue; |
| 162 | 167 | ||
| 163 | /* match against the function */ | 168 | /* match against the function */ |
| 164 | if (query->function && | 169 | if (query->function && |
| 165 | strcmp(query->function, dp->function)) | 170 | !match_wildcard(query->function, dp->function)) |
| 166 | continue; | 171 | continue; |
| 167 | 172 | ||
| 168 | /* match against the format */ | 173 | /* match against the format */ |
| @@ -263,14 +268,12 @@ static int ddebug_tokenize(char *buf, char *words[], int maxwords) | |||
| 263 | */ | 268 | */ |
| 264 | static inline int parse_lineno(const char *str, unsigned int *val) | 269 | static inline int parse_lineno(const char *str, unsigned int *val) |
| 265 | { | 270 | { |
| 266 | char *end = NULL; | ||
| 267 | BUG_ON(str == NULL); | 271 | BUG_ON(str == NULL); |
| 268 | if (*str == '\0') { | 272 | if (*str == '\0') { |
| 269 | *val = 0; | 273 | *val = 0; |
| 270 | return 0; | 274 | return 0; |
| 271 | } | 275 | } |
| 272 | *val = simple_strtoul(str, &end, 10); | 276 | if (kstrtouint(str, 10, val) < 0) { |
| 273 | if (end == NULL || end == str || *end != '\0') { | ||
| 274 | pr_err("bad line-number: %s\n", str); | 277 | pr_err("bad line-number: %s\n", str); |
| 275 | return -EINVAL; | 278 | return -EINVAL; |
| 276 | } | 279 | } |
| @@ -343,14 +346,14 @@ static int ddebug_parse_query(char *words[], int nwords, | |||
| 343 | } | 346 | } |
| 344 | if (last) | 347 | if (last) |
| 345 | *last++ = '\0'; | 348 | *last++ = '\0'; |
| 346 | if (parse_lineno(first, &query->first_lineno) < 0) { | 349 | if (parse_lineno(first, &query->first_lineno) < 0) |
| 347 | pr_err("line-number is <0\n"); | ||
| 348 | return -EINVAL; | 350 | return -EINVAL; |
| 349 | } | ||
| 350 | if (last) { | 351 | if (last) { |
| 351 | /* range <first>-<last> */ | 352 | /* range <first>-<last> */ |
| 352 | if (parse_lineno(last, &query->last_lineno) | 353 | if (parse_lineno(last, &query->last_lineno) < 0) |
| 353 | < query->first_lineno) { | 354 | return -EINVAL; |
| 355 | |||
| 356 | if (query->last_lineno < query->first_lineno) { | ||
| 354 | pr_err("last-line:%d < 1st-line:%d\n", | 357 | pr_err("last-line:%d < 1st-line:%d\n", |
| 355 | query->last_lineno, | 358 | query->last_lineno, |
| 356 | query->first_lineno); | 359 | query->first_lineno); |
diff --git a/lib/flex_array.c b/lib/flex_array.c index 6948a6692fc4..2eed22fa507c 100644 --- a/lib/flex_array.c +++ b/lib/flex_array.c | |||
| @@ -90,8 +90,8 @@ struct flex_array *flex_array_alloc(int element_size, unsigned int total, | |||
| 90 | { | 90 | { |
| 91 | struct flex_array *ret; | 91 | struct flex_array *ret; |
| 92 | int elems_per_part = 0; | 92 | int elems_per_part = 0; |
| 93 | int reciprocal_elems = 0; | ||
| 94 | int max_size = 0; | 93 | int max_size = 0; |
| 94 | struct reciprocal_value reciprocal_elems = { 0 }; | ||
| 95 | 95 | ||
| 96 | if (element_size) { | 96 | if (element_size) { |
| 97 | elems_per_part = FLEX_ARRAY_ELEMENTS_PER_PART(element_size); | 97 | elems_per_part = FLEX_ARRAY_ELEMENTS_PER_PART(element_size); |
| @@ -119,6 +119,11 @@ EXPORT_SYMBOL(flex_array_alloc); | |||
| 119 | static int fa_element_to_part_nr(struct flex_array *fa, | 119 | static int fa_element_to_part_nr(struct flex_array *fa, |
| 120 | unsigned int element_nr) | 120 | unsigned int element_nr) |
| 121 | { | 121 | { |
| 122 | /* | ||
| 123 | * if element_size == 0 we don't get here, so we never touch | ||
| 124 | * the zeroed fa->reciprocal_elems, which would yield invalid | ||
| 125 | * results | ||
| 126 | */ | ||
| 122 | return reciprocal_divide(element_nr, fa->reciprocal_elems); | 127 | return reciprocal_divide(element_nr, fa->reciprocal_elems); |
| 123 | } | 128 | } |
| 124 | 129 | ||
diff --git a/lib/fonts/Kconfig b/lib/fonts/Kconfig index 34fd931b54b5..4dc1b990aa23 100644 --- a/lib/fonts/Kconfig +++ b/lib/fonts/Kconfig | |||
| @@ -9,7 +9,7 @@ if FONT_SUPPORT | |||
| 9 | 9 | ||
| 10 | config FONTS | 10 | config FONTS |
| 11 | bool "Select compiled-in fonts" | 11 | bool "Select compiled-in fonts" |
| 12 | depends on FRAMEBUFFER_CONSOLE || STI_CONSOLE | 12 | depends on FRAMEBUFFER_CONSOLE |
| 13 | help | 13 | help |
| 14 | Say Y here if you would like to use fonts other than the default | 14 | Say Y here if you would like to use fonts other than the default |
| 15 | your frame buffer console usually use. | 15 | your frame buffer console usually use. |
| @@ -22,7 +22,7 @@ config FONTS | |||
| 22 | 22 | ||
| 23 | config FONT_8x8 | 23 | config FONT_8x8 |
| 24 | bool "VGA 8x8 font" if FONTS | 24 | bool "VGA 8x8 font" if FONTS |
| 25 | depends on FRAMEBUFFER_CONSOLE || STI_CONSOLE | 25 | depends on FRAMEBUFFER_CONSOLE |
| 26 | default y if !SPARC && !FONTS | 26 | default y if !SPARC && !FONTS |
| 27 | help | 27 | help |
| 28 | This is the "high resolution" font for the VGA frame buffer (the one | 28 | This is the "high resolution" font for the VGA frame buffer (the one |
| @@ -45,7 +45,7 @@ config FONT_8x16 | |||
| 45 | 45 | ||
| 46 | config FONT_6x11 | 46 | config FONT_6x11 |
| 47 | bool "Mac console 6x11 font (not supported by all drivers)" if FONTS | 47 | bool "Mac console 6x11 font (not supported by all drivers)" if FONTS |
| 48 | depends on FRAMEBUFFER_CONSOLE || STI_CONSOLE | 48 | depends on FRAMEBUFFER_CONSOLE |
| 49 | default y if !SPARC && !FONTS && MAC | 49 | default y if !SPARC && !FONTS && MAC |
| 50 | help | 50 | help |
| 51 | Small console font with Macintosh-style high-half glyphs. Some Mac | 51 | Small console font with Macintosh-style high-half glyphs. Some Mac |
diff --git a/lib/genalloc.c b/lib/genalloc.c index dda31168844f..bdb9a456bcbb 100644 --- a/lib/genalloc.c +++ b/lib/genalloc.c | |||
| @@ -316,7 +316,7 @@ EXPORT_SYMBOL(gen_pool_alloc); | |||
| 316 | * gen_pool_dma_alloc - allocate special memory from the pool for DMA usage | 316 | * gen_pool_dma_alloc - allocate special memory from the pool for DMA usage |
| 317 | * @pool: pool to allocate from | 317 | * @pool: pool to allocate from |
| 318 | * @size: number of bytes to allocate from the pool | 318 | * @size: number of bytes to allocate from the pool |
| 319 | * @dma: dma-view physical address | 319 | * @dma: dma-view physical address return value. Use NULL if unneeded. |
| 320 | * | 320 | * |
| 321 | * Allocate the requested number of bytes from the specified pool. | 321 | * Allocate the requested number of bytes from the specified pool. |
| 322 | * Uses the pool allocation function (with first-fit algorithm by default). | 322 | * Uses the pool allocation function (with first-fit algorithm by default). |
| @@ -334,7 +334,8 @@ void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size, dma_addr_t *dma) | |||
| 334 | if (!vaddr) | 334 | if (!vaddr) |
| 335 | return NULL; | 335 | return NULL; |
| 336 | 336 | ||
| 337 | *dma = gen_pool_virt_to_phys(pool, vaddr); | 337 | if (dma) |
| 338 | *dma = gen_pool_virt_to_phys(pool, vaddr); | ||
| 338 | 339 | ||
| 339 | return (void *)vaddr; | 340 | return (void *)vaddr; |
| 340 | } | 341 | } |
diff --git a/lib/hash.c b/lib/hash.c new file mode 100644 index 000000000000..fea973f4bd57 --- /dev/null +++ b/lib/hash.c | |||
| @@ -0,0 +1,39 @@ | |||
| 1 | /* General purpose hashing library | ||
| 2 | * | ||
| 3 | * That's a start of a kernel hashing library, which can be extended | ||
| 4 | * with further algorithms in future. arch_fast_hash{2,}() will | ||
| 5 | * eventually resolve to an architecture optimized implementation. | ||
| 6 | * | ||
| 7 | * Copyright 2013 Francesco Fusco <ffusco@redhat.com> | ||
| 8 | * Copyright 2013 Daniel Borkmann <dborkman@redhat.com> | ||
| 9 | * Copyright 2013 Thomas Graf <tgraf@redhat.com> | ||
| 10 | * Licensed under the GNU General Public License, version 2.0 (GPLv2) | ||
| 11 | */ | ||
| 12 | |||
| 13 | #include <linux/jhash.h> | ||
| 14 | #include <linux/hash.h> | ||
| 15 | #include <linux/cache.h> | ||
| 16 | |||
| 17 | static struct fast_hash_ops arch_hash_ops __read_mostly = { | ||
| 18 | .hash = jhash, | ||
| 19 | .hash2 = jhash2, | ||
| 20 | }; | ||
| 21 | |||
| 22 | u32 arch_fast_hash(const void *data, u32 len, u32 seed) | ||
| 23 | { | ||
| 24 | return arch_hash_ops.hash(data, len, seed); | ||
| 25 | } | ||
| 26 | EXPORT_SYMBOL_GPL(arch_fast_hash); | ||
| 27 | |||
| 28 | u32 arch_fast_hash2(const u32 *data, u32 len, u32 seed) | ||
| 29 | { | ||
| 30 | return arch_hash_ops.hash2(data, len, seed); | ||
| 31 | } | ||
| 32 | EXPORT_SYMBOL_GPL(arch_fast_hash2); | ||
| 33 | |||
| 34 | static int __init hashlib_init(void) | ||
| 35 | { | ||
| 36 | setup_arch_fast_hash(&arch_hash_ops); | ||
| 37 | return 0; | ||
| 38 | } | ||
| 39 | early_initcall(hashlib_init); | ||
diff --git a/lib/kobject.c b/lib/kobject.c index 5b4b8886435e..cb14aeac4cca 100644 --- a/lib/kobject.c +++ b/lib/kobject.c | |||
| @@ -13,11 +13,11 @@ | |||
| 13 | */ | 13 | */ |
| 14 | 14 | ||
| 15 | #include <linux/kobject.h> | 15 | #include <linux/kobject.h> |
| 16 | #include <linux/kobj_completion.h> | ||
| 17 | #include <linux/string.h> | 16 | #include <linux/string.h> |
| 18 | #include <linux/export.h> | 17 | #include <linux/export.h> |
| 19 | #include <linux/stat.h> | 18 | #include <linux/stat.h> |
| 20 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
| 20 | #include <linux/random.h> | ||
| 21 | 21 | ||
| 22 | /** | 22 | /** |
| 23 | * kobject_namespace - return @kobj's namespace tag | 23 | * kobject_namespace - return @kobj's namespace tag |
| @@ -65,13 +65,17 @@ static int populate_dir(struct kobject *kobj) | |||
| 65 | 65 | ||
| 66 | static int create_dir(struct kobject *kobj) | 66 | static int create_dir(struct kobject *kobj) |
| 67 | { | 67 | { |
| 68 | const struct kobj_ns_type_operations *ops; | ||
| 68 | int error; | 69 | int error; |
| 69 | 70 | ||
| 70 | error = sysfs_create_dir_ns(kobj, kobject_namespace(kobj)); | 71 | error = sysfs_create_dir_ns(kobj, kobject_namespace(kobj)); |
| 71 | if (!error) { | 72 | if (error) |
| 72 | error = populate_dir(kobj); | 73 | return error; |
| 73 | if (error) | 74 | |
| 74 | sysfs_remove_dir(kobj); | 75 | error = populate_dir(kobj); |
| 76 | if (error) { | ||
| 77 | sysfs_remove_dir(kobj); | ||
| 78 | return error; | ||
| 75 | } | 79 | } |
| 76 | 80 | ||
| 77 | /* | 81 | /* |
| @@ -80,7 +84,20 @@ static int create_dir(struct kobject *kobj) | |||
| 80 | */ | 84 | */ |
| 81 | sysfs_get(kobj->sd); | 85 | sysfs_get(kobj->sd); |
| 82 | 86 | ||
| 83 | return error; | 87 | /* |
| 88 | * If @kobj has ns_ops, its children need to be filtered based on | ||
| 89 | * their namespace tags. Enable namespace support on @kobj->sd. | ||
| 90 | */ | ||
| 91 | ops = kobj_child_ns_ops(kobj); | ||
| 92 | if (ops) { | ||
| 93 | BUG_ON(ops->type <= KOBJ_NS_TYPE_NONE); | ||
| 94 | BUG_ON(ops->type >= KOBJ_NS_TYPES); | ||
| 95 | BUG_ON(!kobj_ns_type_registered(ops->type)); | ||
| 96 | |||
| 97 | kernfs_enable_ns(kobj->sd); | ||
| 98 | } | ||
| 99 | |||
| 100 | return 0; | ||
| 84 | } | 101 | } |
| 85 | 102 | ||
| 86 | static int get_kobj_path_length(struct kobject *kobj) | 103 | static int get_kobj_path_length(struct kobject *kobj) |
| @@ -247,8 +264,10 @@ int kobject_set_name_vargs(struct kobject *kobj, const char *fmt, | |||
| 247 | return 0; | 264 | return 0; |
| 248 | 265 | ||
| 249 | kobj->name = kvasprintf(GFP_KERNEL, fmt, vargs); | 266 | kobj->name = kvasprintf(GFP_KERNEL, fmt, vargs); |
| 250 | if (!kobj->name) | 267 | if (!kobj->name) { |
| 268 | kobj->name = old_name; | ||
| 251 | return -ENOMEM; | 269 | return -ENOMEM; |
| 270 | } | ||
| 252 | 271 | ||
| 253 | /* ewww... some of these buggers have '/' in the name ... */ | 272 | /* ewww... some of these buggers have '/' in the name ... */ |
| 254 | while ((s = strchr(kobj->name, '/'))) | 273 | while ((s = strchr(kobj->name, '/'))) |
| @@ -346,7 +365,7 @@ static int kobject_add_varg(struct kobject *kobj, struct kobject *parent, | |||
| 346 | * | 365 | * |
| 347 | * If @parent is set, then the parent of the @kobj will be set to it. | 366 | * If @parent is set, then the parent of the @kobj will be set to it. |
| 348 | * If @parent is NULL, then the parent of the @kobj will be set to the | 367 | * If @parent is NULL, then the parent of the @kobj will be set to the |
| 349 | * kobject associted with the kset assigned to this kobject. If no kset | 368 | * kobject associated with the kset assigned to this kobject. If no kset |
| 350 | * is assigned to the kobject, then the kobject will be located in the | 369 | * is assigned to the kobject, then the kobject will be located in the |
| 351 | * root of the sysfs tree. | 370 | * root of the sysfs tree. |
| 352 | * | 371 | * |
| @@ -536,7 +555,7 @@ out: | |||
| 536 | */ | 555 | */ |
| 537 | void kobject_del(struct kobject *kobj) | 556 | void kobject_del(struct kobject *kobj) |
| 538 | { | 557 | { |
| 539 | struct sysfs_dirent *sd; | 558 | struct kernfs_node *sd; |
| 540 | 559 | ||
| 541 | if (!kobj) | 560 | if (!kobj) |
| 542 | return; | 561 | return; |
| @@ -625,10 +644,12 @@ static void kobject_release(struct kref *kref) | |||
| 625 | { | 644 | { |
| 626 | struct kobject *kobj = container_of(kref, struct kobject, kref); | 645 | struct kobject *kobj = container_of(kref, struct kobject, kref); |
| 627 | #ifdef CONFIG_DEBUG_KOBJECT_RELEASE | 646 | #ifdef CONFIG_DEBUG_KOBJECT_RELEASE |
| 628 | pr_info("kobject: '%s' (%p): %s, parent %p (delayed)\n", | 647 | unsigned long delay = HZ + HZ * (get_random_int() & 0x3); |
| 629 | kobject_name(kobj), kobj, __func__, kobj->parent); | 648 | pr_info("kobject: '%s' (%p): %s, parent %p (delayed %ld)\n", |
| 649 | kobject_name(kobj), kobj, __func__, kobj->parent, delay); | ||
| 630 | INIT_DELAYED_WORK(&kobj->release, kobject_delayed_cleanup); | 650 | INIT_DELAYED_WORK(&kobj->release, kobject_delayed_cleanup); |
| 631 | schedule_delayed_work(&kobj->release, HZ); | 651 | |
| 652 | schedule_delayed_work(&kobj->release, delay); | ||
| 632 | #else | 653 | #else |
| 633 | kobject_cleanup(kobj); | 654 | kobject_cleanup(kobj); |
| 634 | #endif | 655 | #endif |
| @@ -758,55 +779,7 @@ const struct sysfs_ops kobj_sysfs_ops = { | |||
| 758 | .show = kobj_attr_show, | 779 | .show = kobj_attr_show, |
| 759 | .store = kobj_attr_store, | 780 | .store = kobj_attr_store, |
| 760 | }; | 781 | }; |
| 761 | 782 | EXPORT_SYMBOL_GPL(kobj_sysfs_ops); | |
| 762 | /** | ||
| 763 | * kobj_completion_init - initialize a kobj_completion object. | ||
| 764 | * @kc: kobj_completion | ||
| 765 | * @ktype: type of kobject to initialize | ||
| 766 | * | ||
| 767 | * kobj_completion structures can be embedded within structures with different | ||
| 768 | * lifetime rules. During the release of the enclosing object, we can | ||
| 769 | * wait on the release of the kobject so that we don't free it while it's | ||
| 770 | * still busy. | ||
| 771 | */ | ||
| 772 | void kobj_completion_init(struct kobj_completion *kc, struct kobj_type *ktype) | ||
| 773 | { | ||
| 774 | init_completion(&kc->kc_unregister); | ||
| 775 | kobject_init(&kc->kc_kobj, ktype); | ||
| 776 | } | ||
| 777 | EXPORT_SYMBOL_GPL(kobj_completion_init); | ||
| 778 | |||
| 779 | /** | ||
| 780 | * kobj_completion_release - release a kobj_completion object | ||
| 781 | * @kobj: kobject embedded in kobj_completion | ||
| 782 | * | ||
| 783 | * Used with kobject_release to notify waiters that the kobject has been | ||
| 784 | * released. | ||
| 785 | */ | ||
| 786 | void kobj_completion_release(struct kobject *kobj) | ||
| 787 | { | ||
| 788 | struct kobj_completion *kc = kobj_to_kobj_completion(kobj); | ||
| 789 | complete(&kc->kc_unregister); | ||
| 790 | } | ||
| 791 | EXPORT_SYMBOL_GPL(kobj_completion_release); | ||
| 792 | |||
| 793 | /** | ||
| 794 | * kobj_completion_del_and_wait - release the kobject and wait for it | ||
| 795 | * @kc: kobj_completion object to release | ||
| 796 | * | ||
| 797 | * Delete the kobject from sysfs and drop the reference count. Then wait | ||
| 798 | * until any other outstanding references are also dropped. This routine | ||
| 799 | * is only necessary once other references may have been taken on the | ||
| 800 | * kobject. Typically this happens when the kobject has been published | ||
| 801 | * to sysfs via kobject_add. | ||
| 802 | */ | ||
| 803 | void kobj_completion_del_and_wait(struct kobj_completion *kc) | ||
| 804 | { | ||
| 805 | kobject_del(&kc->kc_kobj); | ||
| 806 | kobject_put(&kc->kc_kobj); | ||
| 807 | wait_for_completion(&kc->kc_unregister); | ||
| 808 | } | ||
| 809 | EXPORT_SYMBOL_GPL(kobj_completion_del_and_wait); | ||
| 810 | 783 | ||
| 811 | /** | 784 | /** |
| 812 | * kset_register - initialize and add a kset. | 785 | * kset_register - initialize and add a kset. |
| @@ -835,6 +808,7 @@ void kset_unregister(struct kset *k) | |||
| 835 | { | 808 | { |
| 836 | if (!k) | 809 | if (!k) |
| 837 | return; | 810 | return; |
| 811 | kobject_del(&k->kobj); | ||
| 838 | kobject_put(&k->kobj); | 812 | kobject_put(&k->kobj); |
| 839 | } | 813 | } |
| 840 | 814 | ||
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index 52e5abbc41db..5f72767ddd9b 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c | |||
| @@ -88,11 +88,17 @@ out: | |||
| 88 | #ifdef CONFIG_NET | 88 | #ifdef CONFIG_NET |
| 89 | static int kobj_bcast_filter(struct sock *dsk, struct sk_buff *skb, void *data) | 89 | static int kobj_bcast_filter(struct sock *dsk, struct sk_buff *skb, void *data) |
| 90 | { | 90 | { |
| 91 | struct kobject *kobj = data; | 91 | struct kobject *kobj = data, *ksobj; |
| 92 | const struct kobj_ns_type_operations *ops; | 92 | const struct kobj_ns_type_operations *ops; |
| 93 | 93 | ||
| 94 | ops = kobj_ns_ops(kobj); | 94 | ops = kobj_ns_ops(kobj); |
| 95 | if (ops) { | 95 | if (!ops && kobj->kset) { |
| 96 | ksobj = &kobj->kset->kobj; | ||
| 97 | if (ksobj->parent != NULL) | ||
| 98 | ops = kobj_ns_ops(ksobj->parent); | ||
| 99 | } | ||
| 100 | |||
| 101 | if (ops && ops->netlink_ns && kobj->ktype->namespace) { | ||
| 96 | const void *sock_ns, *ns; | 102 | const void *sock_ns, *ns; |
| 97 | ns = kobj->ktype->namespace(kobj); | 103 | ns = kobj->ktype->namespace(kobj); |
| 98 | sock_ns = ops->netlink_ns(dsk); | 104 | sock_ns = ops->netlink_ns(dsk); |
diff --git a/lib/kstrtox.c b/lib/kstrtox.c index f78ae0c0c4e2..ec8da78df9be 100644 --- a/lib/kstrtox.c +++ b/lib/kstrtox.c | |||
| @@ -92,7 +92,6 @@ static int _kstrtoull(const char *s, unsigned int base, unsigned long long *res) | |||
| 92 | rv = _parse_integer(s, base, &_res); | 92 | rv = _parse_integer(s, base, &_res); |
| 93 | if (rv & KSTRTOX_OVERFLOW) | 93 | if (rv & KSTRTOX_OVERFLOW) |
| 94 | return -ERANGE; | 94 | return -ERANGE; |
| 95 | rv &= ~KSTRTOX_OVERFLOW; | ||
| 96 | if (rv == 0) | 95 | if (rv == 0) |
| 97 | return -EINVAL; | 96 | return -EINVAL; |
| 98 | s += rv; | 97 | s += rv; |
diff --git a/lib/parser.c b/lib/parser.c index 807b2aaa33fa..b6d11631231b 100644 --- a/lib/parser.c +++ b/lib/parser.c | |||
| @@ -113,6 +113,7 @@ int match_token(char *s, const match_table_t table, substring_t args[]) | |||
| 113 | 113 | ||
| 114 | return p->token; | 114 | return p->token; |
| 115 | } | 115 | } |
| 116 | EXPORT_SYMBOL(match_token); | ||
| 116 | 117 | ||
| 117 | /** | 118 | /** |
| 118 | * match_number: scan a number in the given base from a substring_t | 119 | * match_number: scan a number in the given base from a substring_t |
| @@ -163,6 +164,7 @@ int match_int(substring_t *s, int *result) | |||
| 163 | { | 164 | { |
| 164 | return match_number(s, result, 0); | 165 | return match_number(s, result, 0); |
| 165 | } | 166 | } |
| 167 | EXPORT_SYMBOL(match_int); | ||
| 166 | 168 | ||
| 167 | /** | 169 | /** |
| 168 | * match_octal: - scan an octal representation of an integer from a substring_t | 170 | * match_octal: - scan an octal representation of an integer from a substring_t |
| @@ -177,6 +179,7 @@ int match_octal(substring_t *s, int *result) | |||
| 177 | { | 179 | { |
| 178 | return match_number(s, result, 8); | 180 | return match_number(s, result, 8); |
| 179 | } | 181 | } |
| 182 | EXPORT_SYMBOL(match_octal); | ||
| 180 | 183 | ||
| 181 | /** | 184 | /** |
| 182 | * match_hex: - scan a hex representation of an integer from a substring_t | 185 | * match_hex: - scan a hex representation of an integer from a substring_t |
| @@ -191,6 +194,58 @@ int match_hex(substring_t *s, int *result) | |||
| 191 | { | 194 | { |
| 192 | return match_number(s, result, 16); | 195 | return match_number(s, result, 16); |
| 193 | } | 196 | } |
| 197 | EXPORT_SYMBOL(match_hex); | ||
| 198 | |||
| 199 | /** | ||
| 200 | * match_wildcard: - parse if a string matches given wildcard pattern | ||
| 201 | * @pattern: wildcard pattern | ||
| 202 | * @str: the string to be parsed | ||
| 203 | * | ||
| 204 | * Description: Parse the string @str to check if matches wildcard | ||
| 205 | * pattern @pattern. The pattern may contain two type wildcardes: | ||
| 206 | * '*' - matches zero or more characters | ||
| 207 | * '?' - matches one character | ||
| 208 | * If it's matched, return true, else return false. | ||
| 209 | */ | ||
| 210 | bool match_wildcard(const char *pattern, const char *str) | ||
| 211 | { | ||
| 212 | const char *s = str; | ||
| 213 | const char *p = pattern; | ||
| 214 | bool star = false; | ||
| 215 | |||
| 216 | while (*s) { | ||
| 217 | switch (*p) { | ||
| 218 | case '?': | ||
| 219 | s++; | ||
| 220 | p++; | ||
| 221 | break; | ||
| 222 | case '*': | ||
| 223 | star = true; | ||
| 224 | str = s; | ||
| 225 | if (!*++p) | ||
| 226 | return true; | ||
| 227 | pattern = p; | ||
| 228 | break; | ||
| 229 | default: | ||
| 230 | if (*s == *p) { | ||
| 231 | s++; | ||
| 232 | p++; | ||
| 233 | } else { | ||
| 234 | if (!star) | ||
| 235 | return false; | ||
| 236 | str++; | ||
| 237 | s = str; | ||
| 238 | p = pattern; | ||
| 239 | } | ||
| 240 | break; | ||
| 241 | } | ||
| 242 | } | ||
| 243 | |||
| 244 | if (*p == '*') | ||
| 245 | ++p; | ||
| 246 | return !*p; | ||
| 247 | } | ||
| 248 | EXPORT_SYMBOL(match_wildcard); | ||
| 194 | 249 | ||
| 195 | /** | 250 | /** |
| 196 | * match_strlcpy: - Copy the characters from a substring_t to a sized buffer | 251 | * match_strlcpy: - Copy the characters from a substring_t to a sized buffer |
| @@ -213,6 +268,7 @@ size_t match_strlcpy(char *dest, const substring_t *src, size_t size) | |||
| 213 | } | 268 | } |
| 214 | return ret; | 269 | return ret; |
| 215 | } | 270 | } |
| 271 | EXPORT_SYMBOL(match_strlcpy); | ||
| 216 | 272 | ||
| 217 | /** | 273 | /** |
| 218 | * match_strdup: - allocate a new string with the contents of a substring_t | 274 | * match_strdup: - allocate a new string with the contents of a substring_t |
| @@ -230,10 +286,4 @@ char *match_strdup(const substring_t *s) | |||
| 230 | match_strlcpy(p, s, sz); | 286 | match_strlcpy(p, s, sz); |
| 231 | return p; | 287 | return p; |
| 232 | } | 288 | } |
| 233 | |||
| 234 | EXPORT_SYMBOL(match_token); | ||
| 235 | EXPORT_SYMBOL(match_int); | ||
| 236 | EXPORT_SYMBOL(match_octal); | ||
| 237 | EXPORT_SYMBOL(match_hex); | ||
| 238 | EXPORT_SYMBOL(match_strlcpy); | ||
| 239 | EXPORT_SYMBOL(match_strdup); | 289 | EXPORT_SYMBOL(match_strdup); |
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c index 1a53d497a8c5..963b7034a51b 100644 --- a/lib/percpu-refcount.c +++ b/lib/percpu-refcount.c | |||
| @@ -120,6 +120,9 @@ static void percpu_ref_kill_rcu(struct rcu_head *rcu) | |||
| 120 | 120 | ||
| 121 | atomic_add((int) count - PCPU_COUNT_BIAS, &ref->count); | 121 | atomic_add((int) count - PCPU_COUNT_BIAS, &ref->count); |
| 122 | 122 | ||
| 123 | WARN_ONCE(atomic_read(&ref->count) <= 0, "percpu ref <= 0 (%i)", | ||
| 124 | atomic_read(&ref->count)); | ||
| 125 | |||
| 123 | /* @ref is viewed as dead on all CPUs, send out kill confirmation */ | 126 | /* @ref is viewed as dead on all CPUs, send out kill confirmation */ |
| 124 | if (ref->confirm_kill) | 127 | if (ref->confirm_kill) |
| 125 | ref->confirm_kill(ref); | 128 | ref->confirm_kill(ref); |
diff --git a/lib/percpu_ida.c b/lib/percpu_ida.c index 9d054bf91d0f..93d145e5539c 100644 --- a/lib/percpu_ida.c +++ b/lib/percpu_ida.c | |||
| @@ -54,9 +54,7 @@ static inline void move_tags(unsigned *dst, unsigned *dst_nr, | |||
| 54 | /* | 54 | /* |
| 55 | * Try to steal tags from a remote cpu's percpu freelist. | 55 | * Try to steal tags from a remote cpu's percpu freelist. |
| 56 | * | 56 | * |
| 57 | * We first check how many percpu freelists have tags - we don't steal tags | 57 | * We first check how many percpu freelists have tags |
| 58 | * unless enough percpu freelists have tags on them that it's possible more than | ||
| 59 | * half the total tags could be stuck on remote percpu freelists. | ||
| 60 | * | 58 | * |
| 61 | * Then we iterate through the cpus until we find some tags - we don't attempt | 59 | * Then we iterate through the cpus until we find some tags - we don't attempt |
| 62 | * to find the "best" cpu to steal from, to keep cacheline bouncing to a | 60 | * to find the "best" cpu to steal from, to keep cacheline bouncing to a |
| @@ -69,8 +67,7 @@ static inline void steal_tags(struct percpu_ida *pool, | |||
| 69 | struct percpu_ida_cpu *remote; | 67 | struct percpu_ida_cpu *remote; |
| 70 | 68 | ||
| 71 | for (cpus_have_tags = cpumask_weight(&pool->cpus_have_tags); | 69 | for (cpus_have_tags = cpumask_weight(&pool->cpus_have_tags); |
| 72 | cpus_have_tags * pool->percpu_max_size > pool->nr_tags / 2; | 70 | cpus_have_tags; cpus_have_tags--) { |
| 73 | cpus_have_tags--) { | ||
| 74 | cpu = cpumask_next(cpu, &pool->cpus_have_tags); | 71 | cpu = cpumask_next(cpu, &pool->cpus_have_tags); |
| 75 | 72 | ||
| 76 | if (cpu >= nr_cpu_ids) { | 73 | if (cpu >= nr_cpu_ids) { |
| @@ -132,22 +129,22 @@ static inline unsigned alloc_local_tag(struct percpu_ida_cpu *tags) | |||
| 132 | /** | 129 | /** |
| 133 | * percpu_ida_alloc - allocate a tag | 130 | * percpu_ida_alloc - allocate a tag |
| 134 | * @pool: pool to allocate from | 131 | * @pool: pool to allocate from |
| 135 | * @gfp: gfp flags | 132 | * @state: task state for prepare_to_wait |
| 136 | * | 133 | * |
| 137 | * Returns a tag - an integer in the range [0..nr_tags) (passed to | 134 | * Returns a tag - an integer in the range [0..nr_tags) (passed to |
| 138 | * tag_pool_init()), or otherwise -ENOSPC on allocation failure. | 135 | * tag_pool_init()), or otherwise -ENOSPC on allocation failure. |
| 139 | * | 136 | * |
| 140 | * Safe to be called from interrupt context (assuming it isn't passed | 137 | * Safe to be called from interrupt context (assuming it isn't passed |
| 141 | * __GFP_WAIT, of course). | 138 | * TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, of course). |
| 142 | * | 139 | * |
| 143 | * @gfp indicates whether or not to wait until a free id is available (it's not | 140 | * @gfp indicates whether or not to wait until a free id is available (it's not |
| 144 | * used for internal memory allocations); thus if passed __GFP_WAIT we may sleep | 141 | * used for internal memory allocations); thus if passed __GFP_WAIT we may sleep |
| 145 | * however long it takes until another thread frees an id (same semantics as a | 142 | * however long it takes until another thread frees an id (same semantics as a |
| 146 | * mempool). | 143 | * mempool). |
| 147 | * | 144 | * |
| 148 | * Will not fail if passed __GFP_WAIT. | 145 | * Will not fail if passed TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE. |
| 149 | */ | 146 | */ |
| 150 | int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp) | 147 | int percpu_ida_alloc(struct percpu_ida *pool, int state) |
| 151 | { | 148 | { |
| 152 | DEFINE_WAIT(wait); | 149 | DEFINE_WAIT(wait); |
| 153 | struct percpu_ida_cpu *tags; | 150 | struct percpu_ida_cpu *tags; |
| @@ -174,7 +171,8 @@ int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp) | |||
| 174 | * | 171 | * |
| 175 | * global lock held and irqs disabled, don't need percpu lock | 172 | * global lock held and irqs disabled, don't need percpu lock |
| 176 | */ | 173 | */ |
| 177 | prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE); | 174 | if (state != TASK_RUNNING) |
| 175 | prepare_to_wait(&pool->wait, &wait, state); | ||
| 178 | 176 | ||
| 179 | if (!tags->nr_free) | 177 | if (!tags->nr_free) |
| 180 | alloc_global_tags(pool, tags); | 178 | alloc_global_tags(pool, tags); |
| @@ -191,16 +189,22 @@ int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp) | |||
| 191 | spin_unlock(&pool->lock); | 189 | spin_unlock(&pool->lock); |
| 192 | local_irq_restore(flags); | 190 | local_irq_restore(flags); |
| 193 | 191 | ||
| 194 | if (tag >= 0 || !(gfp & __GFP_WAIT)) | 192 | if (tag >= 0 || state == TASK_RUNNING) |
| 195 | break; | 193 | break; |
| 196 | 194 | ||
| 195 | if (signal_pending_state(state, current)) { | ||
| 196 | tag = -ERESTARTSYS; | ||
| 197 | break; | ||
| 198 | } | ||
| 199 | |||
| 197 | schedule(); | 200 | schedule(); |
| 198 | 201 | ||
| 199 | local_irq_save(flags); | 202 | local_irq_save(flags); |
| 200 | tags = this_cpu_ptr(pool->tag_cpu); | 203 | tags = this_cpu_ptr(pool->tag_cpu); |
| 201 | } | 204 | } |
| 205 | if (state != TASK_RUNNING) | ||
| 206 | finish_wait(&pool->wait, &wait); | ||
| 202 | 207 | ||
| 203 | finish_wait(&pool->wait, &wait); | ||
| 204 | return tag; | 208 | return tag; |
| 205 | } | 209 | } |
| 206 | EXPORT_SYMBOL_GPL(percpu_ida_alloc); | 210 | EXPORT_SYMBOL_GPL(percpu_ida_alloc); |
diff --git a/lib/rbtree_test.c b/lib/rbtree_test.c index 31dd4ccd3baa..8b3c9dc88262 100644 --- a/lib/rbtree_test.c +++ b/lib/rbtree_test.c | |||
| @@ -8,8 +8,8 @@ | |||
| 8 | #define CHECK_LOOPS 100 | 8 | #define CHECK_LOOPS 100 |
| 9 | 9 | ||
| 10 | struct test_node { | 10 | struct test_node { |
| 11 | struct rb_node rb; | ||
| 12 | u32 key; | 11 | u32 key; |
| 12 | struct rb_node rb; | ||
| 13 | 13 | ||
| 14 | /* following fields used for testing augmented rbtree functionality */ | 14 | /* following fields used for testing augmented rbtree functionality */ |
| 15 | u32 val; | 15 | u32 val; |
| @@ -114,6 +114,16 @@ static int black_path_count(struct rb_node *rb) | |||
| 114 | return count; | 114 | return count; |
| 115 | } | 115 | } |
| 116 | 116 | ||
| 117 | static void check_postorder_foreach(int nr_nodes) | ||
| 118 | { | ||
| 119 | struct test_node *cur, *n; | ||
| 120 | int count = 0; | ||
| 121 | rbtree_postorder_for_each_entry_safe(cur, n, &root, rb) | ||
| 122 | count++; | ||
| 123 | |||
| 124 | WARN_ON_ONCE(count != nr_nodes); | ||
| 125 | } | ||
| 126 | |||
| 117 | static void check_postorder(int nr_nodes) | 127 | static void check_postorder(int nr_nodes) |
| 118 | { | 128 | { |
| 119 | struct rb_node *rb; | 129 | struct rb_node *rb; |
| @@ -148,6 +158,7 @@ static void check(int nr_nodes) | |||
| 148 | WARN_ON_ONCE(count < (1 << black_path_count(rb_last(&root))) - 1); | 158 | WARN_ON_ONCE(count < (1 << black_path_count(rb_last(&root))) - 1); |
| 149 | 159 | ||
| 150 | check_postorder(nr_nodes); | 160 | check_postorder(nr_nodes); |
| 161 | check_postorder_foreach(nr_nodes); | ||
| 151 | } | 162 | } |
| 152 | 163 | ||
| 153 | static void check_augmented(int nr_nodes) | 164 | static void check_augmented(int nr_nodes) |
diff --git a/lib/reciprocal_div.c b/lib/reciprocal_div.c index 75510e94f7d0..464152410c51 100644 --- a/lib/reciprocal_div.c +++ b/lib/reciprocal_div.c | |||
| @@ -1,11 +1,27 @@ | |||
| 1 | #include <linux/kernel.h> | ||
| 1 | #include <asm/div64.h> | 2 | #include <asm/div64.h> |
| 2 | #include <linux/reciprocal_div.h> | 3 | #include <linux/reciprocal_div.h> |
| 3 | #include <linux/export.h> | 4 | #include <linux/export.h> |
| 4 | 5 | ||
| 5 | u32 reciprocal_value(u32 k) | 6 | /* |
| 7 | * For a description of the algorithm please have a look at | ||
| 8 | * include/linux/reciprocal_div.h | ||
| 9 | */ | ||
| 10 | |||
| 11 | struct reciprocal_value reciprocal_value(u32 d) | ||
| 6 | { | 12 | { |
| 7 | u64 val = (1LL << 32) + (k - 1); | 13 | struct reciprocal_value R; |
| 8 | do_div(val, k); | 14 | u64 m; |
| 9 | return (u32)val; | 15 | int l; |
| 16 | |||
| 17 | l = fls(d - 1); | ||
| 18 | m = ((1ULL << 32) * ((1ULL << l) - d)); | ||
| 19 | do_div(m, d); | ||
| 20 | ++m; | ||
| 21 | R.m = (u32)m; | ||
| 22 | R.sh1 = min(l, 1); | ||
| 23 | R.sh2 = max(l - 1, 0); | ||
| 24 | |||
| 25 | return R; | ||
| 10 | } | 26 | } |
| 11 | EXPORT_SYMBOL(reciprocal_value); | 27 | EXPORT_SYMBOL(reciprocal_value); |
diff --git a/lib/scatterlist.c b/lib/scatterlist.c index d16fa295ae1d..3a8e8e8fb2a5 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c | |||
| @@ -495,7 +495,7 @@ static bool sg_miter_get_next_page(struct sg_mapping_iter *miter) | |||
| 495 | * true if @miter contains the valid mapping. false if end of sg | 495 | * true if @miter contains the valid mapping. false if end of sg |
| 496 | * list is reached. | 496 | * list is reached. |
| 497 | */ | 497 | */ |
| 498 | static bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset) | 498 | bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset) |
| 499 | { | 499 | { |
| 500 | sg_miter_stop(miter); | 500 | sg_miter_stop(miter); |
| 501 | 501 | ||
| @@ -513,6 +513,7 @@ static bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset) | |||
| 513 | 513 | ||
| 514 | return true; | 514 | return true; |
| 515 | } | 515 | } |
| 516 | EXPORT_SYMBOL(sg_miter_skip); | ||
| 516 | 517 | ||
| 517 | /** | 518 | /** |
| 518 | * sg_miter_next - proceed mapping iterator to the next mapping | 519 | * sg_miter_next - proceed mapping iterator to the next mapping |
diff --git a/lib/show_mem.c b/lib/show_mem.c index 5847a4921b8e..09225796991a 100644 --- a/lib/show_mem.c +++ b/lib/show_mem.c | |||
| @@ -17,9 +17,6 @@ void show_mem(unsigned int filter) | |||
| 17 | printk("Mem-Info:\n"); | 17 | printk("Mem-Info:\n"); |
| 18 | show_free_areas(filter); | 18 | show_free_areas(filter); |
| 19 | 19 | ||
| 20 | if (filter & SHOW_MEM_FILTER_PAGE_COUNT) | ||
| 21 | return; | ||
| 22 | |||
| 23 | for_each_online_pgdat(pgdat) { | 20 | for_each_online_pgdat(pgdat) { |
| 24 | unsigned long flags; | 21 | unsigned long flags; |
| 25 | int zoneid; | 22 | int zoneid; |
| @@ -46,4 +43,7 @@ void show_mem(unsigned int filter) | |||
| 46 | printk("%lu pages in pagetable cache\n", | 43 | printk("%lu pages in pagetable cache\n", |
| 47 | quicklist_total_size()); | 44 | quicklist_total_size()); |
| 48 | #endif | 45 | #endif |
| 46 | #ifdef CONFIG_MEMORY_FAILURE | ||
| 47 | printk("%lu pages hwpoisoned\n", atomic_long_read(&num_poisoned_pages)); | ||
| 48 | #endif | ||
| 49 | } | 49 | } |
diff --git a/lib/swiotlb.c b/lib/swiotlb.c index e4399fa65ad6..b604b831f4d1 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c | |||
| @@ -172,8 +172,9 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) | |||
| 172 | /* | 172 | /* |
| 173 | * Get the overflow emergency buffer | 173 | * Get the overflow emergency buffer |
| 174 | */ | 174 | */ |
| 175 | v_overflow_buffer = alloc_bootmem_low_pages_nopanic( | 175 | v_overflow_buffer = memblock_virt_alloc_low_nopanic( |
| 176 | PAGE_ALIGN(io_tlb_overflow)); | 176 | PAGE_ALIGN(io_tlb_overflow), |
| 177 | PAGE_SIZE); | ||
| 177 | if (!v_overflow_buffer) | 178 | if (!v_overflow_buffer) |
| 178 | return -ENOMEM; | 179 | return -ENOMEM; |
| 179 | 180 | ||
| @@ -184,11 +185,15 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) | |||
| 184 | * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE | 185 | * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE |
| 185 | * between io_tlb_start and io_tlb_end. | 186 | * between io_tlb_start and io_tlb_end. |
| 186 | */ | 187 | */ |
| 187 | io_tlb_list = alloc_bootmem_pages(PAGE_ALIGN(io_tlb_nslabs * sizeof(int))); | 188 | io_tlb_list = memblock_virt_alloc( |
| 189 | PAGE_ALIGN(io_tlb_nslabs * sizeof(int)), | ||
| 190 | PAGE_SIZE); | ||
| 188 | for (i = 0; i < io_tlb_nslabs; i++) | 191 | for (i = 0; i < io_tlb_nslabs; i++) |
| 189 | io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); | 192 | io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); |
| 190 | io_tlb_index = 0; | 193 | io_tlb_index = 0; |
| 191 | io_tlb_orig_addr = alloc_bootmem_pages(PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t))); | 194 | io_tlb_orig_addr = memblock_virt_alloc( |
| 195 | PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)), | ||
| 196 | PAGE_SIZE); | ||
| 192 | 197 | ||
| 193 | if (verbose) | 198 | if (verbose) |
| 194 | swiotlb_print_info(); | 199 | swiotlb_print_info(); |
| @@ -215,13 +220,13 @@ swiotlb_init(int verbose) | |||
| 215 | bytes = io_tlb_nslabs << IO_TLB_SHIFT; | 220 | bytes = io_tlb_nslabs << IO_TLB_SHIFT; |
| 216 | 221 | ||
| 217 | /* Get IO TLB memory from the low pages */ | 222 | /* Get IO TLB memory from the low pages */ |
| 218 | vstart = alloc_bootmem_low_pages_nopanic(PAGE_ALIGN(bytes)); | 223 | vstart = memblock_virt_alloc_low_nopanic(PAGE_ALIGN(bytes), PAGE_SIZE); |
| 219 | if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose)) | 224 | if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose)) |
| 220 | return; | 225 | return; |
| 221 | 226 | ||
| 222 | if (io_tlb_start) | 227 | if (io_tlb_start) |
| 223 | free_bootmem(io_tlb_start, | 228 | memblock_free_early(io_tlb_start, |
| 224 | PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); | 229 | PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); |
| 225 | pr_warn("Cannot allocate SWIOTLB buffer"); | 230 | pr_warn("Cannot allocate SWIOTLB buffer"); |
| 226 | no_iotlb_memory = true; | 231 | no_iotlb_memory = true; |
| 227 | } | 232 | } |
| @@ -357,14 +362,14 @@ void __init swiotlb_free(void) | |||
| 357 | free_pages((unsigned long)phys_to_virt(io_tlb_start), | 362 | free_pages((unsigned long)phys_to_virt(io_tlb_start), |
| 358 | get_order(io_tlb_nslabs << IO_TLB_SHIFT)); | 363 | get_order(io_tlb_nslabs << IO_TLB_SHIFT)); |
| 359 | } else { | 364 | } else { |
| 360 | free_bootmem_late(io_tlb_overflow_buffer, | 365 | memblock_free_late(io_tlb_overflow_buffer, |
| 361 | PAGE_ALIGN(io_tlb_overflow)); | 366 | PAGE_ALIGN(io_tlb_overflow)); |
| 362 | free_bootmem_late(__pa(io_tlb_orig_addr), | 367 | memblock_free_late(__pa(io_tlb_orig_addr), |
| 363 | PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t))); | 368 | PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t))); |
| 364 | free_bootmem_late(__pa(io_tlb_list), | 369 | memblock_free_late(__pa(io_tlb_list), |
| 365 | PAGE_ALIGN(io_tlb_nslabs * sizeof(int))); | 370 | PAGE_ALIGN(io_tlb_nslabs * sizeof(int))); |
| 366 | free_bootmem_late(io_tlb_start, | 371 | memblock_free_late(io_tlb_start, |
| 367 | PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); | 372 | PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); |
| 368 | } | 373 | } |
| 369 | io_tlb_nslabs = 0; | 374 | io_tlb_nslabs = 0; |
| 370 | } | 375 | } |
| @@ -505,7 +510,8 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, | |||
| 505 | 510 | ||
| 506 | not_found: | 511 | not_found: |
| 507 | spin_unlock_irqrestore(&io_tlb_lock, flags); | 512 | spin_unlock_irqrestore(&io_tlb_lock, flags); |
| 508 | dev_warn(hwdev, "swiotlb buffer is full\n"); | 513 | if (printk_ratelimit()) |
| 514 | dev_warn(hwdev, "swiotlb buffer is full (sz: %zd bytes)\n", size); | ||
| 509 | return SWIOTLB_MAP_ERROR; | 515 | return SWIOTLB_MAP_ERROR; |
| 510 | found: | 516 | found: |
| 511 | spin_unlock_irqrestore(&io_tlb_lock, flags); | 517 | spin_unlock_irqrestore(&io_tlb_lock, flags); |
diff --git a/lib/test_module.c b/lib/test_module.c new file mode 100644 index 000000000000..319b66f1ff61 --- /dev/null +++ b/lib/test_module.c | |||
| @@ -0,0 +1,33 @@ | |||
| 1 | /* | ||
| 2 | * This module emits "Hello, world" on printk when loaded. | ||
| 3 | * | ||
| 4 | * It is designed to be used for basic evaluation of the module loading | ||
| 5 | * subsystem (for example when validating module signing/verification). It | ||
| 6 | * lacks any extra dependencies, and will not normally be loaded by the | ||
| 7 | * system unless explicitly requested by name. | ||
| 8 | */ | ||
| 9 | |||
| 10 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
| 11 | |||
| 12 | #include <linux/init.h> | ||
| 13 | #include <linux/module.h> | ||
| 14 | #include <linux/printk.h> | ||
| 15 | |||
| 16 | static int __init test_module_init(void) | ||
| 17 | { | ||
| 18 | pr_warn("Hello, world\n"); | ||
| 19 | |||
| 20 | return 0; | ||
| 21 | } | ||
| 22 | |||
| 23 | module_init(test_module_init); | ||
| 24 | |||
| 25 | static void __exit test_module_exit(void) | ||
| 26 | { | ||
| 27 | pr_warn("Goodbye\n"); | ||
| 28 | } | ||
| 29 | |||
| 30 | module_exit(test_module_exit); | ||
| 31 | |||
| 32 | MODULE_AUTHOR("Kees Cook <keescook@chromium.org>"); | ||
| 33 | MODULE_LICENSE("GPL"); | ||
diff --git a/lib/test_user_copy.c b/lib/test_user_copy.c new file mode 100644 index 000000000000..0ecef3e4690e --- /dev/null +++ b/lib/test_user_copy.c | |||
| @@ -0,0 +1,110 @@ | |||
| 1 | /* | ||
| 2 | * Kernel module for testing copy_to/from_user infrastructure. | ||
| 3 | * | ||
| 4 | * Copyright 2013 Google Inc. All Rights Reserved | ||
| 5 | * | ||
| 6 | * Authors: | ||
| 7 | * Kees Cook <keescook@chromium.org> | ||
| 8 | * | ||
| 9 | * This software is licensed under the terms of the GNU General Public | ||
| 10 | * License version 2, as published by the Free Software Foundation, and | ||
| 11 | * may be copied, distributed, and modified under those terms. | ||
| 12 | * | ||
| 13 | * This program is distributed in the hope that it will be useful, | ||
| 14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 16 | * GNU General Public License for more details. | ||
| 17 | */ | ||
| 18 | |||
| 19 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
| 20 | |||
| 21 | #include <linux/mman.h> | ||
| 22 | #include <linux/module.h> | ||
| 23 | #include <linux/sched.h> | ||
| 24 | #include <linux/slab.h> | ||
| 25 | #include <linux/uaccess.h> | ||
| 26 | #include <linux/vmalloc.h> | ||
| 27 | |||
| 28 | #define test(condition, msg) \ | ||
| 29 | ({ \ | ||
| 30 | int cond = (condition); \ | ||
| 31 | if (cond) \ | ||
| 32 | pr_warn("%s\n", msg); \ | ||
| 33 | cond; \ | ||
| 34 | }) | ||
| 35 | |||
| 36 | static int __init test_user_copy_init(void) | ||
| 37 | { | ||
| 38 | int ret = 0; | ||
| 39 | char *kmem; | ||
| 40 | char __user *usermem; | ||
| 41 | char *bad_usermem; | ||
| 42 | unsigned long user_addr; | ||
| 43 | unsigned long value = 0x5A; | ||
| 44 | |||
| 45 | kmem = kmalloc(PAGE_SIZE * 2, GFP_KERNEL); | ||
| 46 | if (!kmem) | ||
| 47 | return -ENOMEM; | ||
| 48 | |||
| 49 | user_addr = vm_mmap(NULL, 0, PAGE_SIZE * 2, | ||
| 50 | PROT_READ | PROT_WRITE | PROT_EXEC, | ||
| 51 | MAP_ANONYMOUS | MAP_PRIVATE, 0); | ||
| 52 | if (user_addr >= (unsigned long)(TASK_SIZE)) { | ||
| 53 | pr_warn("Failed to allocate user memory\n"); | ||
| 54 | kfree(kmem); | ||
| 55 | return -ENOMEM; | ||
| 56 | } | ||
| 57 | |||
| 58 | usermem = (char __user *)user_addr; | ||
| 59 | bad_usermem = (char *)user_addr; | ||
| 60 | |||
| 61 | /* Legitimate usage: none of these should fail. */ | ||
| 62 | ret |= test(copy_from_user(kmem, usermem, PAGE_SIZE), | ||
| 63 | "legitimate copy_from_user failed"); | ||
| 64 | ret |= test(copy_to_user(usermem, kmem, PAGE_SIZE), | ||
| 65 | "legitimate copy_to_user failed"); | ||
| 66 | ret |= test(get_user(value, (unsigned long __user *)usermem), | ||
| 67 | "legitimate get_user failed"); | ||
| 68 | ret |= test(put_user(value, (unsigned long __user *)usermem), | ||
| 69 | "legitimate put_user failed"); | ||
| 70 | |||
| 71 | /* Invalid usage: none of these should succeed. */ | ||
| 72 | ret |= test(!copy_from_user(kmem, (char __user *)(kmem + PAGE_SIZE), | ||
| 73 | PAGE_SIZE), | ||
| 74 | "illegal all-kernel copy_from_user passed"); | ||
| 75 | ret |= test(!copy_from_user(bad_usermem, (char __user *)kmem, | ||
| 76 | PAGE_SIZE), | ||
| 77 | "illegal reversed copy_from_user passed"); | ||
| 78 | ret |= test(!copy_to_user((char __user *)kmem, kmem + PAGE_SIZE, | ||
| 79 | PAGE_SIZE), | ||
| 80 | "illegal all-kernel copy_to_user passed"); | ||
| 81 | ret |= test(!copy_to_user((char __user *)kmem, bad_usermem, | ||
| 82 | PAGE_SIZE), | ||
| 83 | "illegal reversed copy_to_user passed"); | ||
| 84 | ret |= test(!get_user(value, (unsigned long __user *)kmem), | ||
| 85 | "illegal get_user passed"); | ||
| 86 | ret |= test(!put_user(value, (unsigned long __user *)kmem), | ||
| 87 | "illegal put_user passed"); | ||
| 88 | |||
| 89 | vm_munmap(user_addr, PAGE_SIZE * 2); | ||
| 90 | kfree(kmem); | ||
| 91 | |||
| 92 | if (ret == 0) { | ||
| 93 | pr_info("tests passed.\n"); | ||
| 94 | return 0; | ||
| 95 | } | ||
| 96 | |||
| 97 | return -EINVAL; | ||
| 98 | } | ||
| 99 | |||
| 100 | module_init(test_user_copy_init); | ||
| 101 | |||
| 102 | static void __exit test_user_copy_exit(void) | ||
| 103 | { | ||
| 104 | pr_info("unloaded.\n"); | ||
| 105 | } | ||
| 106 | |||
| 107 | module_exit(test_user_copy_exit); | ||
| 108 | |||
| 109 | MODULE_AUTHOR("Kees Cook <keescook@chromium.org>"); | ||
| 110 | MODULE_LICENSE("GPL"); | ||
diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 10909c571494..185b6d300ebc 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c | |||
| @@ -1155,6 +1155,30 @@ char *netdev_feature_string(char *buf, char *end, const u8 *addr, | |||
| 1155 | return number(buf, end, *(const netdev_features_t *)addr, spec); | 1155 | return number(buf, end, *(const netdev_features_t *)addr, spec); |
| 1156 | } | 1156 | } |
| 1157 | 1157 | ||
| 1158 | static noinline_for_stack | ||
| 1159 | char *address_val(char *buf, char *end, const void *addr, | ||
| 1160 | struct printf_spec spec, const char *fmt) | ||
| 1161 | { | ||
| 1162 | unsigned long long num; | ||
| 1163 | |||
| 1164 | spec.flags |= SPECIAL | SMALL | ZEROPAD; | ||
| 1165 | spec.base = 16; | ||
| 1166 | |||
| 1167 | switch (fmt[1]) { | ||
| 1168 | case 'd': | ||
| 1169 | num = *(const dma_addr_t *)addr; | ||
| 1170 | spec.field_width = sizeof(dma_addr_t) * 2 + 2; | ||
| 1171 | break; | ||
| 1172 | case 'p': | ||
| 1173 | default: | ||
| 1174 | num = *(const phys_addr_t *)addr; | ||
| 1175 | spec.field_width = sizeof(phys_addr_t) * 2 + 2; | ||
| 1176 | break; | ||
| 1177 | } | ||
| 1178 | |||
| 1179 | return number(buf, end, num, spec); | ||
| 1180 | } | ||
| 1181 | |||
| 1158 | int kptr_restrict __read_mostly; | 1182 | int kptr_restrict __read_mostly; |
| 1159 | 1183 | ||
| 1160 | /* | 1184 | /* |
| @@ -1218,7 +1242,8 @@ int kptr_restrict __read_mostly; | |||
| 1218 | * N no separator | 1242 | * N no separator |
| 1219 | * The maximum supported length is 64 bytes of the input. Consider | 1243 | * The maximum supported length is 64 bytes of the input. Consider |
| 1220 | * to use print_hex_dump() for the larger input. | 1244 | * to use print_hex_dump() for the larger input. |
| 1221 | * - 'a' For a phys_addr_t type and its derivative types (passed by reference) | 1245 | * - 'a[pd]' For address types [p] phys_addr_t, [d] dma_addr_t and derivatives |
| 1246 | * (default assumed to be phys_addr_t, passed by reference) | ||
| 1222 | * - 'd[234]' For a dentry name (optionally 2-4 last components) | 1247 | * - 'd[234]' For a dentry name (optionally 2-4 last components) |
| 1223 | * - 'D[234]' Same as 'd' but for a struct file | 1248 | * - 'D[234]' Same as 'd' but for a struct file |
| 1224 | * | 1249 | * |
| @@ -1353,11 +1378,7 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr, | |||
| 1353 | } | 1378 | } |
| 1354 | break; | 1379 | break; |
| 1355 | case 'a': | 1380 | case 'a': |
| 1356 | spec.flags |= SPECIAL | SMALL | ZEROPAD; | 1381 | return address_val(buf, end, ptr, spec, fmt); |
| 1357 | spec.field_width = sizeof(phys_addr_t) * 2 + 2; | ||
| 1358 | spec.base = 16; | ||
| 1359 | return number(buf, end, | ||
| 1360 | (unsigned long long) *((phys_addr_t *)ptr), spec); | ||
| 1361 | case 'd': | 1382 | case 'd': |
| 1362 | return dentry_name(buf, end, ptr, spec, fmt); | 1383 | return dentry_name(buf, end, ptr, spec, fmt); |
| 1363 | case 'D': | 1384 | case 'D': |
