diff options
Diffstat (limited to 'lib')
| -rw-r--r-- | lib/Kconfig.debug | 48 | ||||
| -rw-r--r-- | lib/Makefile | 4 | ||||
| -rw-r--r-- | lib/assoc_array.c | 6 | ||||
| -rw-r--r-- | lib/average.c | 6 | ||||
| -rw-r--r-- | lib/cmdline.c | 14 | ||||
| -rw-r--r-- | lib/cpumask.c | 4 | ||||
| -rw-r--r-- | lib/decompress_unlz4.c | 1 | ||||
| -rw-r--r-- | lib/dma-debug.c | 193 | ||||
| -rw-r--r-- | lib/dynamic_debug.c | 15 | ||||
| -rw-r--r-- | lib/flex_array.c | 7 | ||||
| -rw-r--r-- | lib/hash.c | 39 | ||||
| -rw-r--r-- | lib/kobject.c | 95 | ||||
| -rw-r--r-- | lib/kobject_uevent.c | 10 | ||||
| -rw-r--r-- | lib/kstrtox.c | 1 | ||||
| -rw-r--r-- | lib/lockref.c | 9 | ||||
| -rw-r--r-- | lib/parser.c | 62 | ||||
| -rw-r--r-- | lib/percpu-refcount.c | 3 | ||||
| -rw-r--r-- | lib/percpu_counter.c | 4 | ||||
| -rw-r--r-- | lib/rbtree_test.c | 13 | ||||
| -rw-r--r-- | lib/reciprocal_div.c | 24 | ||||
| -rw-r--r-- | lib/scatterlist.c | 3 | ||||
| -rw-r--r-- | lib/show_mem.c | 6 | ||||
| -rw-r--r-- | lib/swiotlb.c | 35 | ||||
| -rw-r--r-- | lib/test_module.c | 33 | ||||
| -rw-r--r-- | lib/test_user_copy.c | 110 | ||||
| -rw-r--r-- | lib/vsprintf.c | 33 |
26 files changed, 631 insertions, 147 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index db25707aa41b..e0e2eebf7ab3 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
| @@ -761,6 +761,15 @@ config PANIC_ON_OOPS_VALUE | |||
| 761 | default 0 if !PANIC_ON_OOPS | 761 | default 0 if !PANIC_ON_OOPS |
| 762 | default 1 if PANIC_ON_OOPS | 762 | default 1 if PANIC_ON_OOPS |
| 763 | 763 | ||
| 764 | config PANIC_TIMEOUT | ||
| 765 | int "panic timeout" | ||
| 766 | default 0 | ||
| 767 | help | ||
| 768 | Set the timeout value (in seconds) until a reboot occurs when the | ||
| 769 | the kernel panics. If n = 0, then we wait forever. A timeout | ||
| 770 | value n > 0 will wait n seconds before rebooting, while a timeout | ||
| 771 | value n < 0 will reboot immediately. | ||
| 772 | |||
| 764 | config SCHED_DEBUG | 773 | config SCHED_DEBUG |
| 765 | bool "Collect scheduler debugging info" | 774 | bool "Collect scheduler debugging info" |
| 766 | depends on DEBUG_KERNEL && PROC_FS | 775 | depends on DEBUG_KERNEL && PROC_FS |
| @@ -1575,8 +1584,43 @@ config DMA_API_DEBUG | |||
| 1575 | With this option you will be able to detect common bugs in device | 1584 | With this option you will be able to detect common bugs in device |
| 1576 | drivers like double-freeing of DMA mappings or freeing mappings that | 1585 | drivers like double-freeing of DMA mappings or freeing mappings that |
| 1577 | were never allocated. | 1586 | were never allocated. |
| 1578 | This option causes a performance degredation. Use only if you want | 1587 | |
| 1579 | to debug device drivers. If unsure, say N. | 1588 | This also attempts to catch cases where a page owned by DMA is |
| 1589 | accessed by the cpu in a way that could cause data corruption. For | ||
| 1590 | example, this enables cow_user_page() to check that the source page is | ||
| 1591 | not undergoing DMA. | ||
| 1592 | |||
| 1593 | This option causes a performance degradation. Use only if you want to | ||
| 1594 | debug device drivers and dma interactions. | ||
| 1595 | |||
| 1596 | If unsure, say N. | ||
| 1597 | |||
| 1598 | config TEST_MODULE | ||
| 1599 | tristate "Test module loading with 'hello world' module" | ||
| 1600 | default n | ||
| 1601 | depends on m | ||
| 1602 | help | ||
| 1603 | This builds the "test_module" module that emits "Hello, world" | ||
| 1604 | on printk when loaded. It is designed to be used for basic | ||
| 1605 | evaluation of the module loading subsystem (for example when | ||
| 1606 | validating module verification). It lacks any extra dependencies, | ||
| 1607 | and will not normally be loaded by the system unless explicitly | ||
| 1608 | requested by name. | ||
| 1609 | |||
| 1610 | If unsure, say N. | ||
| 1611 | |||
| 1612 | config TEST_USER_COPY | ||
| 1613 | tristate "Test user/kernel boundary protections" | ||
| 1614 | default n | ||
| 1615 | depends on m | ||
| 1616 | help | ||
| 1617 | This builds the "test_user_copy" module that runs sanity checks | ||
| 1618 | on the copy_to/from_user infrastructure, making sure basic | ||
| 1619 | user/kernel boundary testing is working. If it fails to load, | ||
| 1620 | a regression has been detected in the user/kernel memory boundary | ||
| 1621 | protections. | ||
| 1622 | |||
| 1623 | If unsure, say N. | ||
| 1580 | 1624 | ||
| 1581 | source "samples/Kconfig" | 1625 | source "samples/Kconfig" |
| 1582 | 1626 | ||
diff --git a/lib/Makefile b/lib/Makefile index a459c31e8c6b..126b34f2eb16 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
| @@ -26,11 +26,13 @@ obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ | |||
| 26 | bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ | 26 | bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ |
| 27 | gcd.o lcm.o list_sort.o uuid.o flex_array.o iovec.o clz_ctz.o \ | 27 | gcd.o lcm.o list_sort.o uuid.o flex_array.o iovec.o clz_ctz.o \ |
| 28 | bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \ | 28 | bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \ |
| 29 | percpu-refcount.o percpu_ida.o | 29 | percpu-refcount.o percpu_ida.o hash.o |
| 30 | obj-y += string_helpers.o | 30 | obj-y += string_helpers.o |
| 31 | obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o | 31 | obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o |
| 32 | obj-y += kstrtox.o | 32 | obj-y += kstrtox.o |
| 33 | obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o | 33 | obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o |
| 34 | obj-$(CONFIG_TEST_MODULE) += test_module.o | ||
| 35 | obj-$(CONFIG_TEST_USER_COPY) += test_user_copy.o | ||
| 34 | 36 | ||
| 35 | ifeq ($(CONFIG_DEBUG_KOBJECT),y) | 37 | ifeq ($(CONFIG_DEBUG_KOBJECT),y) |
| 36 | CFLAGS_kobject.o += -DDEBUG | 38 | CFLAGS_kobject.o += -DDEBUG |
diff --git a/lib/assoc_array.c b/lib/assoc_array.c index 17edeaf19180..c0b1007011e1 100644 --- a/lib/assoc_array.c +++ b/lib/assoc_array.c | |||
| @@ -157,7 +157,7 @@ enum assoc_array_walk_status { | |||
| 157 | assoc_array_walk_tree_empty, | 157 | assoc_array_walk_tree_empty, |
| 158 | assoc_array_walk_found_terminal_node, | 158 | assoc_array_walk_found_terminal_node, |
| 159 | assoc_array_walk_found_wrong_shortcut, | 159 | assoc_array_walk_found_wrong_shortcut, |
| 160 | } status; | 160 | }; |
| 161 | 161 | ||
| 162 | struct assoc_array_walk_result { | 162 | struct assoc_array_walk_result { |
| 163 | struct { | 163 | struct { |
| @@ -759,8 +759,8 @@ all_leaves_cluster_together: | |||
| 759 | pr_devel("all leaves cluster together\n"); | 759 | pr_devel("all leaves cluster together\n"); |
| 760 | diff = INT_MAX; | 760 | diff = INT_MAX; |
| 761 | for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) { | 761 | for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) { |
| 762 | int x = ops->diff_objects(assoc_array_ptr_to_leaf(edit->leaf), | 762 | int x = ops->diff_objects(assoc_array_ptr_to_leaf(node->slots[i]), |
| 763 | assoc_array_ptr_to_leaf(node->slots[i])); | 763 | index_key); |
| 764 | if (x < diff) { | 764 | if (x < diff) { |
| 765 | BUG_ON(x < 0); | 765 | BUG_ON(x < 0); |
| 766 | diff = x; | 766 | diff = x; |
diff --git a/lib/average.c b/lib/average.c index 99a67e662b3c..114d1beae0c7 100644 --- a/lib/average.c +++ b/lib/average.c | |||
| @@ -53,8 +53,10 @@ EXPORT_SYMBOL(ewma_init); | |||
| 53 | */ | 53 | */ |
| 54 | struct ewma *ewma_add(struct ewma *avg, unsigned long val) | 54 | struct ewma *ewma_add(struct ewma *avg, unsigned long val) |
| 55 | { | 55 | { |
| 56 | avg->internal = avg->internal ? | 56 | unsigned long internal = ACCESS_ONCE(avg->internal); |
| 57 | (((avg->internal << avg->weight) - avg->internal) + | 57 | |
| 58 | ACCESS_ONCE(avg->internal) = internal ? | ||
| 59 | (((internal << avg->weight) - internal) + | ||
| 58 | (val << avg->factor)) >> avg->weight : | 60 | (val << avg->factor)) >> avg->weight : |
| 59 | (val << avg->factor); | 61 | (val << avg->factor); |
| 60 | return avg; | 62 | return avg; |
diff --git a/lib/cmdline.c b/lib/cmdline.c index eb6791188cf5..d4932f745e92 100644 --- a/lib/cmdline.c +++ b/lib/cmdline.c | |||
| @@ -49,13 +49,13 @@ static int get_range(char **str, int *pint) | |||
| 49 | * 3 - hyphen found to denote a range | 49 | * 3 - hyphen found to denote a range |
| 50 | */ | 50 | */ |
| 51 | 51 | ||
| 52 | int get_option (char **str, int *pint) | 52 | int get_option(char **str, int *pint) |
| 53 | { | 53 | { |
| 54 | char *cur = *str; | 54 | char *cur = *str; |
| 55 | 55 | ||
| 56 | if (!cur || !(*cur)) | 56 | if (!cur || !(*cur)) |
| 57 | return 0; | 57 | return 0; |
| 58 | *pint = simple_strtol (cur, str, 0); | 58 | *pint = simple_strtol(cur, str, 0); |
| 59 | if (cur == *str) | 59 | if (cur == *str) |
| 60 | return 0; | 60 | return 0; |
| 61 | if (**str == ',') { | 61 | if (**str == ',') { |
| @@ -67,6 +67,7 @@ int get_option (char **str, int *pint) | |||
| 67 | 67 | ||
| 68 | return 1; | 68 | return 1; |
| 69 | } | 69 | } |
| 70 | EXPORT_SYMBOL(get_option); | ||
| 70 | 71 | ||
| 71 | /** | 72 | /** |
| 72 | * get_options - Parse a string into a list of integers | 73 | * get_options - Parse a string into a list of integers |
| @@ -84,13 +85,13 @@ int get_option (char **str, int *pint) | |||
| 84 | * the parse to end (typically a null terminator, if @str is | 85 | * the parse to end (typically a null terminator, if @str is |
| 85 | * completely parseable). | 86 | * completely parseable). |
| 86 | */ | 87 | */ |
| 87 | 88 | ||
| 88 | char *get_options(const char *str, int nints, int *ints) | 89 | char *get_options(const char *str, int nints, int *ints) |
| 89 | { | 90 | { |
| 90 | int res, i = 1; | 91 | int res, i = 1; |
| 91 | 92 | ||
| 92 | while (i < nints) { | 93 | while (i < nints) { |
| 93 | res = get_option ((char **)&str, ints + i); | 94 | res = get_option((char **)&str, ints + i); |
| 94 | if (res == 0) | 95 | if (res == 0) |
| 95 | break; | 96 | break; |
| 96 | if (res == 3) { | 97 | if (res == 3) { |
| @@ -112,6 +113,7 @@ char *get_options(const char *str, int nints, int *ints) | |||
| 112 | ints[0] = i - 1; | 113 | ints[0] = i - 1; |
| 113 | return (char *)str; | 114 | return (char *)str; |
| 114 | } | 115 | } |
| 116 | EXPORT_SYMBOL(get_options); | ||
| 115 | 117 | ||
| 116 | /** | 118 | /** |
| 117 | * memparse - parse a string with mem suffixes into a number | 119 | * memparse - parse a string with mem suffixes into a number |
| @@ -152,8 +154,4 @@ unsigned long long memparse(const char *ptr, char **retptr) | |||
| 152 | 154 | ||
| 153 | return ret; | 155 | return ret; |
| 154 | } | 156 | } |
| 155 | |||
| 156 | |||
| 157 | EXPORT_SYMBOL(memparse); | 157 | EXPORT_SYMBOL(memparse); |
| 158 | EXPORT_SYMBOL(get_option); | ||
| 159 | EXPORT_SYMBOL(get_options); | ||
diff --git a/lib/cpumask.c b/lib/cpumask.c index d327b87c99b7..b810b753c607 100644 --- a/lib/cpumask.c +++ b/lib/cpumask.c | |||
| @@ -140,7 +140,7 @@ EXPORT_SYMBOL(zalloc_cpumask_var); | |||
| 140 | */ | 140 | */ |
| 141 | void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask) | 141 | void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask) |
| 142 | { | 142 | { |
| 143 | *mask = alloc_bootmem(cpumask_size()); | 143 | *mask = memblock_virt_alloc(cpumask_size(), 0); |
| 144 | } | 144 | } |
| 145 | 145 | ||
| 146 | /** | 146 | /** |
| @@ -161,6 +161,6 @@ EXPORT_SYMBOL(free_cpumask_var); | |||
| 161 | */ | 161 | */ |
| 162 | void __init free_bootmem_cpumask_var(cpumask_var_t mask) | 162 | void __init free_bootmem_cpumask_var(cpumask_var_t mask) |
| 163 | { | 163 | { |
| 164 | free_bootmem(__pa(mask), cpumask_size()); | 164 | memblock_free_early(__pa(mask), cpumask_size()); |
| 165 | } | 165 | } |
| 166 | #endif | 166 | #endif |
diff --git a/lib/decompress_unlz4.c b/lib/decompress_unlz4.c index 3e67cfad16ad..7d1e83caf8ad 100644 --- a/lib/decompress_unlz4.c +++ b/lib/decompress_unlz4.c | |||
| @@ -141,6 +141,7 @@ STATIC inline int INIT unlz4(u8 *input, int in_len, | |||
| 141 | goto exit_2; | 141 | goto exit_2; |
| 142 | } | 142 | } |
| 143 | 143 | ||
| 144 | ret = -1; | ||
| 144 | if (flush && flush(outp, dest_len) != dest_len) | 145 | if (flush && flush(outp, dest_len) != dest_len) |
| 145 | goto exit_2; | 146 | goto exit_2; |
| 146 | if (output) | 147 | if (output) |
diff --git a/lib/dma-debug.c b/lib/dma-debug.c index d87a17a819d0..c38083871f11 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c | |||
| @@ -53,11 +53,26 @@ enum map_err_types { | |||
| 53 | 53 | ||
| 54 | #define DMA_DEBUG_STACKTRACE_ENTRIES 5 | 54 | #define DMA_DEBUG_STACKTRACE_ENTRIES 5 |
| 55 | 55 | ||
| 56 | /** | ||
| 57 | * struct dma_debug_entry - track a dma_map* or dma_alloc_coherent mapping | ||
| 58 | * @list: node on pre-allocated free_entries list | ||
| 59 | * @dev: 'dev' argument to dma_map_{page|single|sg} or dma_alloc_coherent | ||
| 60 | * @type: single, page, sg, coherent | ||
| 61 | * @pfn: page frame of the start address | ||
| 62 | * @offset: offset of mapping relative to pfn | ||
| 63 | * @size: length of the mapping | ||
| 64 | * @direction: enum dma_data_direction | ||
| 65 | * @sg_call_ents: 'nents' from dma_map_sg | ||
| 66 | * @sg_mapped_ents: 'mapped_ents' from dma_map_sg | ||
| 67 | * @map_err_type: track whether dma_mapping_error() was checked | ||
| 68 | * @stacktrace: support backtraces when a violation is detected | ||
| 69 | */ | ||
| 56 | struct dma_debug_entry { | 70 | struct dma_debug_entry { |
| 57 | struct list_head list; | 71 | struct list_head list; |
| 58 | struct device *dev; | 72 | struct device *dev; |
| 59 | int type; | 73 | int type; |
| 60 | phys_addr_t paddr; | 74 | unsigned long pfn; |
| 75 | size_t offset; | ||
| 61 | u64 dev_addr; | 76 | u64 dev_addr; |
| 62 | u64 size; | 77 | u64 size; |
| 63 | int direction; | 78 | int direction; |
| @@ -372,6 +387,11 @@ static void hash_bucket_del(struct dma_debug_entry *entry) | |||
| 372 | list_del(&entry->list); | 387 | list_del(&entry->list); |
| 373 | } | 388 | } |
| 374 | 389 | ||
| 390 | static unsigned long long phys_addr(struct dma_debug_entry *entry) | ||
| 391 | { | ||
| 392 | return page_to_phys(pfn_to_page(entry->pfn)) + entry->offset; | ||
| 393 | } | ||
| 394 | |||
| 375 | /* | 395 | /* |
| 376 | * Dump mapping entries for debugging purposes | 396 | * Dump mapping entries for debugging purposes |
| 377 | */ | 397 | */ |
| @@ -389,9 +409,9 @@ void debug_dma_dump_mappings(struct device *dev) | |||
| 389 | list_for_each_entry(entry, &bucket->list, list) { | 409 | list_for_each_entry(entry, &bucket->list, list) { |
| 390 | if (!dev || dev == entry->dev) { | 410 | if (!dev || dev == entry->dev) { |
| 391 | dev_info(entry->dev, | 411 | dev_info(entry->dev, |
| 392 | "%s idx %d P=%Lx D=%Lx L=%Lx %s %s\n", | 412 | "%s idx %d P=%Lx N=%lx D=%Lx L=%Lx %s %s\n", |
| 393 | type2name[entry->type], idx, | 413 | type2name[entry->type], idx, |
| 394 | (unsigned long long)entry->paddr, | 414 | phys_addr(entry), entry->pfn, |
| 395 | entry->dev_addr, entry->size, | 415 | entry->dev_addr, entry->size, |
| 396 | dir2name[entry->direction], | 416 | dir2name[entry->direction], |
| 397 | maperr2str[entry->map_err_type]); | 417 | maperr2str[entry->map_err_type]); |
| @@ -404,6 +424,133 @@ void debug_dma_dump_mappings(struct device *dev) | |||
| 404 | EXPORT_SYMBOL(debug_dma_dump_mappings); | 424 | EXPORT_SYMBOL(debug_dma_dump_mappings); |
| 405 | 425 | ||
| 406 | /* | 426 | /* |
| 427 | * For each page mapped (initial page in the case of | ||
| 428 | * dma_alloc_coherent/dma_map_{single|page}, or each page in a | ||
| 429 | * scatterlist) insert into this tree using the pfn as the key. At | ||
| 430 | * dma_unmap_{single|sg|page} or dma_free_coherent delete the entry. If | ||
| 431 | * the pfn already exists at insertion time add a tag as a reference | ||
| 432 | * count for the overlapping mappings. For now, the overlap tracking | ||
| 433 | * just ensures that 'unmaps' balance 'maps' before marking the pfn | ||
| 434 | * idle, but we should also be flagging overlaps as an API violation. | ||
| 435 | * | ||
| 436 | * Memory usage is mostly constrained by the maximum number of available | ||
| 437 | * dma-debug entries in that we need a free dma_debug_entry before | ||
| 438 | * inserting into the tree. In the case of dma_map_{single|page} and | ||
| 439 | * dma_alloc_coherent there is only one dma_debug_entry and one pfn to | ||
| 440 | * track per event. dma_map_sg(), on the other hand, | ||
| 441 | * consumes a single dma_debug_entry, but inserts 'nents' entries into | ||
| 442 | * the tree. | ||
| 443 | * | ||
| 444 | * At any time debug_dma_assert_idle() can be called to trigger a | ||
| 445 | * warning if the given page is in the active set. | ||
| 446 | */ | ||
| 447 | static RADIX_TREE(dma_active_pfn, GFP_NOWAIT); | ||
| 448 | static DEFINE_SPINLOCK(radix_lock); | ||
| 449 | #define ACTIVE_PFN_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1) | ||
| 450 | |||
| 451 | static int active_pfn_read_overlap(unsigned long pfn) | ||
| 452 | { | ||
| 453 | int overlap = 0, i; | ||
| 454 | |||
| 455 | for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--) | ||
| 456 | if (radix_tree_tag_get(&dma_active_pfn, pfn, i)) | ||
| 457 | overlap |= 1 << i; | ||
| 458 | return overlap; | ||
| 459 | } | ||
| 460 | |||
| 461 | static int active_pfn_set_overlap(unsigned long pfn, int overlap) | ||
| 462 | { | ||
| 463 | int i; | ||
| 464 | |||
| 465 | if (overlap > ACTIVE_PFN_MAX_OVERLAP || overlap < 0) | ||
| 466 | return 0; | ||
| 467 | |||
| 468 | for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--) | ||
| 469 | if (overlap & 1 << i) | ||
| 470 | radix_tree_tag_set(&dma_active_pfn, pfn, i); | ||
| 471 | else | ||
| 472 | radix_tree_tag_clear(&dma_active_pfn, pfn, i); | ||
| 473 | |||
| 474 | return overlap; | ||
| 475 | } | ||
| 476 | |||
| 477 | static void active_pfn_inc_overlap(unsigned long pfn) | ||
| 478 | { | ||
| 479 | int overlap = active_pfn_read_overlap(pfn); | ||
| 480 | |||
| 481 | overlap = active_pfn_set_overlap(pfn, ++overlap); | ||
| 482 | |||
| 483 | /* If we overflowed the overlap counter then we're potentially | ||
| 484 | * leaking dma-mappings. Otherwise, if maps and unmaps are | ||
| 485 | * balanced then this overflow may cause false negatives in | ||
| 486 | * debug_dma_assert_idle() as the pfn may be marked idle | ||
| 487 | * prematurely. | ||
| 488 | */ | ||
| 489 | WARN_ONCE(overlap == 0, | ||
| 490 | "DMA-API: exceeded %d overlapping mappings of pfn %lx\n", | ||
| 491 | ACTIVE_PFN_MAX_OVERLAP, pfn); | ||
| 492 | } | ||
| 493 | |||
| 494 | static int active_pfn_dec_overlap(unsigned long pfn) | ||
| 495 | { | ||
| 496 | int overlap = active_pfn_read_overlap(pfn); | ||
| 497 | |||
| 498 | return active_pfn_set_overlap(pfn, --overlap); | ||
| 499 | } | ||
| 500 | |||
| 501 | static int active_pfn_insert(struct dma_debug_entry *entry) | ||
| 502 | { | ||
| 503 | unsigned long flags; | ||
| 504 | int rc; | ||
| 505 | |||
| 506 | spin_lock_irqsave(&radix_lock, flags); | ||
| 507 | rc = radix_tree_insert(&dma_active_pfn, entry->pfn, entry); | ||
| 508 | if (rc == -EEXIST) | ||
| 509 | active_pfn_inc_overlap(entry->pfn); | ||
| 510 | spin_unlock_irqrestore(&radix_lock, flags); | ||
| 511 | |||
| 512 | return rc; | ||
| 513 | } | ||
| 514 | |||
| 515 | static void active_pfn_remove(struct dma_debug_entry *entry) | ||
| 516 | { | ||
| 517 | unsigned long flags; | ||
| 518 | |||
| 519 | spin_lock_irqsave(&radix_lock, flags); | ||
| 520 | if (active_pfn_dec_overlap(entry->pfn) == 0) | ||
| 521 | radix_tree_delete(&dma_active_pfn, entry->pfn); | ||
| 522 | spin_unlock_irqrestore(&radix_lock, flags); | ||
| 523 | } | ||
| 524 | |||
| 525 | /** | ||
| 526 | * debug_dma_assert_idle() - assert that a page is not undergoing dma | ||
| 527 | * @page: page to lookup in the dma_active_pfn tree | ||
| 528 | * | ||
| 529 | * Place a call to this routine in cases where the cpu touching the page | ||
| 530 | * before the dma completes (page is dma_unmapped) will lead to data | ||
| 531 | * corruption. | ||
| 532 | */ | ||
| 533 | void debug_dma_assert_idle(struct page *page) | ||
| 534 | { | ||
| 535 | unsigned long flags; | ||
| 536 | struct dma_debug_entry *entry; | ||
| 537 | |||
| 538 | if (!page) | ||
| 539 | return; | ||
| 540 | |||
| 541 | spin_lock_irqsave(&radix_lock, flags); | ||
| 542 | entry = radix_tree_lookup(&dma_active_pfn, page_to_pfn(page)); | ||
| 543 | spin_unlock_irqrestore(&radix_lock, flags); | ||
| 544 | |||
| 545 | if (!entry) | ||
| 546 | return; | ||
| 547 | |||
| 548 | err_printk(entry->dev, entry, | ||
| 549 | "DMA-API: cpu touching an active dma mapped page " | ||
| 550 | "[pfn=0x%lx]\n", entry->pfn); | ||
| 551 | } | ||
| 552 | |||
| 553 | /* | ||
| 407 | * Wrapper function for adding an entry to the hash. | 554 | * Wrapper function for adding an entry to the hash. |
| 408 | * This function takes care of locking itself. | 555 | * This function takes care of locking itself. |
| 409 | */ | 556 | */ |
| @@ -411,10 +558,21 @@ static void add_dma_entry(struct dma_debug_entry *entry) | |||
| 411 | { | 558 | { |
| 412 | struct hash_bucket *bucket; | 559 | struct hash_bucket *bucket; |
| 413 | unsigned long flags; | 560 | unsigned long flags; |
| 561 | int rc; | ||
| 414 | 562 | ||
| 415 | bucket = get_hash_bucket(entry, &flags); | 563 | bucket = get_hash_bucket(entry, &flags); |
| 416 | hash_bucket_add(bucket, entry); | 564 | hash_bucket_add(bucket, entry); |
| 417 | put_hash_bucket(bucket, &flags); | 565 | put_hash_bucket(bucket, &flags); |
| 566 | |||
| 567 | rc = active_pfn_insert(entry); | ||
| 568 | if (rc == -ENOMEM) { | ||
| 569 | pr_err("DMA-API: pfn tracking ENOMEM, dma-debug disabled\n"); | ||
| 570 | global_disable = true; | ||
| 571 | } | ||
| 572 | |||
| 573 | /* TODO: report -EEXIST errors here as overlapping mappings are | ||
| 574 | * not supported by the DMA API | ||
| 575 | */ | ||
| 418 | } | 576 | } |
| 419 | 577 | ||
| 420 | static struct dma_debug_entry *__dma_entry_alloc(void) | 578 | static struct dma_debug_entry *__dma_entry_alloc(void) |
| @@ -469,6 +627,8 @@ static void dma_entry_free(struct dma_debug_entry *entry) | |||
| 469 | { | 627 | { |
| 470 | unsigned long flags; | 628 | unsigned long flags; |
| 471 | 629 | ||
| 630 | active_pfn_remove(entry); | ||
| 631 | |||
| 472 | /* | 632 | /* |
| 473 | * add to beginning of the list - this way the entries are | 633 | * add to beginning of the list - this way the entries are |
| 474 | * more likely cache hot when they are reallocated. | 634 | * more likely cache hot when they are reallocated. |
| @@ -895,15 +1055,15 @@ static void check_unmap(struct dma_debug_entry *ref) | |||
| 895 | ref->dev_addr, ref->size, | 1055 | ref->dev_addr, ref->size, |
| 896 | type2name[entry->type], type2name[ref->type]); | 1056 | type2name[entry->type], type2name[ref->type]); |
| 897 | } else if ((entry->type == dma_debug_coherent) && | 1057 | } else if ((entry->type == dma_debug_coherent) && |
| 898 | (ref->paddr != entry->paddr)) { | 1058 | (phys_addr(ref) != phys_addr(entry))) { |
| 899 | err_printk(ref->dev, entry, "DMA-API: device driver frees " | 1059 | err_printk(ref->dev, entry, "DMA-API: device driver frees " |
| 900 | "DMA memory with different CPU address " | 1060 | "DMA memory with different CPU address " |
| 901 | "[device address=0x%016llx] [size=%llu bytes] " | 1061 | "[device address=0x%016llx] [size=%llu bytes] " |
| 902 | "[cpu alloc address=0x%016llx] " | 1062 | "[cpu alloc address=0x%016llx] " |
| 903 | "[cpu free address=0x%016llx]", | 1063 | "[cpu free address=0x%016llx]", |
| 904 | ref->dev_addr, ref->size, | 1064 | ref->dev_addr, ref->size, |
| 905 | (unsigned long long)entry->paddr, | 1065 | phys_addr(entry), |
| 906 | (unsigned long long)ref->paddr); | 1066 | phys_addr(ref)); |
| 907 | } | 1067 | } |
| 908 | 1068 | ||
| 909 | if (ref->sg_call_ents && ref->type == dma_debug_sg && | 1069 | if (ref->sg_call_ents && ref->type == dma_debug_sg && |
| @@ -1052,7 +1212,8 @@ void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, | |||
| 1052 | 1212 | ||
| 1053 | entry->dev = dev; | 1213 | entry->dev = dev; |
| 1054 | entry->type = dma_debug_page; | 1214 | entry->type = dma_debug_page; |
| 1055 | entry->paddr = page_to_phys(page) + offset; | 1215 | entry->pfn = page_to_pfn(page); |
| 1216 | entry->offset = offset, | ||
| 1056 | entry->dev_addr = dma_addr; | 1217 | entry->dev_addr = dma_addr; |
| 1057 | entry->size = size; | 1218 | entry->size = size; |
| 1058 | entry->direction = direction; | 1219 | entry->direction = direction; |
| @@ -1148,7 +1309,8 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, | |||
| 1148 | 1309 | ||
| 1149 | entry->type = dma_debug_sg; | 1310 | entry->type = dma_debug_sg; |
| 1150 | entry->dev = dev; | 1311 | entry->dev = dev; |
| 1151 | entry->paddr = sg_phys(s); | 1312 | entry->pfn = page_to_pfn(sg_page(s)); |
| 1313 | entry->offset = s->offset, | ||
| 1152 | entry->size = sg_dma_len(s); | 1314 | entry->size = sg_dma_len(s); |
| 1153 | entry->dev_addr = sg_dma_address(s); | 1315 | entry->dev_addr = sg_dma_address(s); |
| 1154 | entry->direction = direction; | 1316 | entry->direction = direction; |
| @@ -1198,7 +1360,8 @@ void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, | |||
| 1198 | struct dma_debug_entry ref = { | 1360 | struct dma_debug_entry ref = { |
| 1199 | .type = dma_debug_sg, | 1361 | .type = dma_debug_sg, |
| 1200 | .dev = dev, | 1362 | .dev = dev, |
| 1201 | .paddr = sg_phys(s), | 1363 | .pfn = page_to_pfn(sg_page(s)), |
| 1364 | .offset = s->offset, | ||
| 1202 | .dev_addr = sg_dma_address(s), | 1365 | .dev_addr = sg_dma_address(s), |
| 1203 | .size = sg_dma_len(s), | 1366 | .size = sg_dma_len(s), |
| 1204 | .direction = dir, | 1367 | .direction = dir, |
| @@ -1233,7 +1396,8 @@ void debug_dma_alloc_coherent(struct device *dev, size_t size, | |||
| 1233 | 1396 | ||
| 1234 | entry->type = dma_debug_coherent; | 1397 | entry->type = dma_debug_coherent; |
| 1235 | entry->dev = dev; | 1398 | entry->dev = dev; |
| 1236 | entry->paddr = virt_to_phys(virt); | 1399 | entry->pfn = page_to_pfn(virt_to_page(virt)); |
| 1400 | entry->offset = (size_t) virt & PAGE_MASK; | ||
| 1237 | entry->size = size; | 1401 | entry->size = size; |
| 1238 | entry->dev_addr = dma_addr; | 1402 | entry->dev_addr = dma_addr; |
| 1239 | entry->direction = DMA_BIDIRECTIONAL; | 1403 | entry->direction = DMA_BIDIRECTIONAL; |
| @@ -1248,7 +1412,8 @@ void debug_dma_free_coherent(struct device *dev, size_t size, | |||
| 1248 | struct dma_debug_entry ref = { | 1412 | struct dma_debug_entry ref = { |
| 1249 | .type = dma_debug_coherent, | 1413 | .type = dma_debug_coherent, |
| 1250 | .dev = dev, | 1414 | .dev = dev, |
| 1251 | .paddr = virt_to_phys(virt), | 1415 | .pfn = page_to_pfn(virt_to_page(virt)), |
| 1416 | .offset = (size_t) virt & PAGE_MASK, | ||
| 1252 | .dev_addr = addr, | 1417 | .dev_addr = addr, |
| 1253 | .size = size, | 1418 | .size = size, |
| 1254 | .direction = DMA_BIDIRECTIONAL, | 1419 | .direction = DMA_BIDIRECTIONAL, |
| @@ -1356,7 +1521,8 @@ void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | |||
| 1356 | struct dma_debug_entry ref = { | 1521 | struct dma_debug_entry ref = { |
| 1357 | .type = dma_debug_sg, | 1522 | .type = dma_debug_sg, |
| 1358 | .dev = dev, | 1523 | .dev = dev, |
| 1359 | .paddr = sg_phys(s), | 1524 | .pfn = page_to_pfn(sg_page(s)), |
| 1525 | .offset = s->offset, | ||
| 1360 | .dev_addr = sg_dma_address(s), | 1526 | .dev_addr = sg_dma_address(s), |
| 1361 | .size = sg_dma_len(s), | 1527 | .size = sg_dma_len(s), |
| 1362 | .direction = direction, | 1528 | .direction = direction, |
| @@ -1388,7 +1554,8 @@ void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | |||
| 1388 | struct dma_debug_entry ref = { | 1554 | struct dma_debug_entry ref = { |
| 1389 | .type = dma_debug_sg, | 1555 | .type = dma_debug_sg, |
| 1390 | .dev = dev, | 1556 | .dev = dev, |
| 1391 | .paddr = sg_phys(s), | 1557 | .pfn = page_to_pfn(sg_page(s)), |
| 1558 | .offset = s->offset, | ||
| 1392 | .dev_addr = sg_dma_address(s), | 1559 | .dev_addr = sg_dma_address(s), |
| 1393 | .size = sg_dma_len(s), | 1560 | .size = sg_dma_len(s), |
| 1394 | .direction = direction, | 1561 | .direction = direction, |
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c index c37aeacd7651..600ac57e2777 100644 --- a/lib/dynamic_debug.c +++ b/lib/dynamic_debug.c | |||
| @@ -8,6 +8,7 @@ | |||
| 8 | * By Greg Banks <gnb@melbourne.sgi.com> | 8 | * By Greg Banks <gnb@melbourne.sgi.com> |
| 9 | * Copyright (c) 2008 Silicon Graphics Inc. All Rights Reserved. | 9 | * Copyright (c) 2008 Silicon Graphics Inc. All Rights Reserved. |
| 10 | * Copyright (C) 2011 Bart Van Assche. All Rights Reserved. | 10 | * Copyright (C) 2011 Bart Van Assche. All Rights Reserved. |
| 11 | * Copyright (C) 2013 Du, Changbin <changbin.du@gmail.com> | ||
| 11 | */ | 12 | */ |
| 12 | 13 | ||
| 13 | #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ | 14 | #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ |
| @@ -24,6 +25,7 @@ | |||
| 24 | #include <linux/sysctl.h> | 25 | #include <linux/sysctl.h> |
| 25 | #include <linux/ctype.h> | 26 | #include <linux/ctype.h> |
| 26 | #include <linux/string.h> | 27 | #include <linux/string.h> |
| 28 | #include <linux/parser.h> | ||
| 27 | #include <linux/string_helpers.h> | 29 | #include <linux/string_helpers.h> |
| 28 | #include <linux/uaccess.h> | 30 | #include <linux/uaccess.h> |
| 29 | #include <linux/dynamic_debug.h> | 31 | #include <linux/dynamic_debug.h> |
| @@ -147,7 +149,8 @@ static int ddebug_change(const struct ddebug_query *query, | |||
| 147 | list_for_each_entry(dt, &ddebug_tables, link) { | 149 | list_for_each_entry(dt, &ddebug_tables, link) { |
| 148 | 150 | ||
| 149 | /* match against the module name */ | 151 | /* match against the module name */ |
| 150 | if (query->module && strcmp(query->module, dt->mod_name)) | 152 | if (query->module && |
| 153 | !match_wildcard(query->module, dt->mod_name)) | ||
| 151 | continue; | 154 | continue; |
| 152 | 155 | ||
| 153 | for (i = 0; i < dt->num_ddebugs; i++) { | 156 | for (i = 0; i < dt->num_ddebugs; i++) { |
| @@ -155,14 +158,16 @@ static int ddebug_change(const struct ddebug_query *query, | |||
| 155 | 158 | ||
| 156 | /* match against the source filename */ | 159 | /* match against the source filename */ |
| 157 | if (query->filename && | 160 | if (query->filename && |
| 158 | strcmp(query->filename, dp->filename) && | 161 | !match_wildcard(query->filename, dp->filename) && |
| 159 | strcmp(query->filename, kbasename(dp->filename)) && | 162 | !match_wildcard(query->filename, |
| 160 | strcmp(query->filename, trim_prefix(dp->filename))) | 163 | kbasename(dp->filename)) && |
| 164 | !match_wildcard(query->filename, | ||
| 165 | trim_prefix(dp->filename))) | ||
| 161 | continue; | 166 | continue; |
| 162 | 167 | ||
| 163 | /* match against the function */ | 168 | /* match against the function */ |
| 164 | if (query->function && | 169 | if (query->function && |
| 165 | strcmp(query->function, dp->function)) | 170 | !match_wildcard(query->function, dp->function)) |
| 166 | continue; | 171 | continue; |
| 167 | 172 | ||
| 168 | /* match against the format */ | 173 | /* match against the format */ |
diff --git a/lib/flex_array.c b/lib/flex_array.c index 6948a6692fc4..2eed22fa507c 100644 --- a/lib/flex_array.c +++ b/lib/flex_array.c | |||
| @@ -90,8 +90,8 @@ struct flex_array *flex_array_alloc(int element_size, unsigned int total, | |||
| 90 | { | 90 | { |
| 91 | struct flex_array *ret; | 91 | struct flex_array *ret; |
| 92 | int elems_per_part = 0; | 92 | int elems_per_part = 0; |
| 93 | int reciprocal_elems = 0; | ||
| 94 | int max_size = 0; | 93 | int max_size = 0; |
| 94 | struct reciprocal_value reciprocal_elems = { 0 }; | ||
| 95 | 95 | ||
| 96 | if (element_size) { | 96 | if (element_size) { |
| 97 | elems_per_part = FLEX_ARRAY_ELEMENTS_PER_PART(element_size); | 97 | elems_per_part = FLEX_ARRAY_ELEMENTS_PER_PART(element_size); |
| @@ -119,6 +119,11 @@ EXPORT_SYMBOL(flex_array_alloc); | |||
| 119 | static int fa_element_to_part_nr(struct flex_array *fa, | 119 | static int fa_element_to_part_nr(struct flex_array *fa, |
| 120 | unsigned int element_nr) | 120 | unsigned int element_nr) |
| 121 | { | 121 | { |
| 122 | /* | ||
| 123 | * if element_size == 0 we don't get here, so we never touch | ||
| 124 | * the zeroed fa->reciprocal_elems, which would yield invalid | ||
| 125 | * results | ||
| 126 | */ | ||
| 122 | return reciprocal_divide(element_nr, fa->reciprocal_elems); | 127 | return reciprocal_divide(element_nr, fa->reciprocal_elems); |
| 123 | } | 128 | } |
| 124 | 129 | ||
diff --git a/lib/hash.c b/lib/hash.c new file mode 100644 index 000000000000..fea973f4bd57 --- /dev/null +++ b/lib/hash.c | |||
| @@ -0,0 +1,39 @@ | |||
| 1 | /* General purpose hashing library | ||
| 2 | * | ||
| 3 | * That's a start of a kernel hashing library, which can be extended | ||
| 4 | * with further algorithms in future. arch_fast_hash{2,}() will | ||
| 5 | * eventually resolve to an architecture optimized implementation. | ||
| 6 | * | ||
| 7 | * Copyright 2013 Francesco Fusco <ffusco@redhat.com> | ||
| 8 | * Copyright 2013 Daniel Borkmann <dborkman@redhat.com> | ||
| 9 | * Copyright 2013 Thomas Graf <tgraf@redhat.com> | ||
| 10 | * Licensed under the GNU General Public License, version 2.0 (GPLv2) | ||
| 11 | */ | ||
| 12 | |||
| 13 | #include <linux/jhash.h> | ||
| 14 | #include <linux/hash.h> | ||
| 15 | #include <linux/cache.h> | ||
| 16 | |||
| 17 | static struct fast_hash_ops arch_hash_ops __read_mostly = { | ||
| 18 | .hash = jhash, | ||
| 19 | .hash2 = jhash2, | ||
| 20 | }; | ||
| 21 | |||
| 22 | u32 arch_fast_hash(const void *data, u32 len, u32 seed) | ||
| 23 | { | ||
| 24 | return arch_hash_ops.hash(data, len, seed); | ||
| 25 | } | ||
| 26 | EXPORT_SYMBOL_GPL(arch_fast_hash); | ||
| 27 | |||
| 28 | u32 arch_fast_hash2(const u32 *data, u32 len, u32 seed) | ||
| 29 | { | ||
| 30 | return arch_hash_ops.hash2(data, len, seed); | ||
| 31 | } | ||
| 32 | EXPORT_SYMBOL_GPL(arch_fast_hash2); | ||
| 33 | |||
| 34 | static int __init hashlib_init(void) | ||
| 35 | { | ||
| 36 | setup_arch_fast_hash(&arch_hash_ops); | ||
| 37 | return 0; | ||
| 38 | } | ||
| 39 | early_initcall(hashlib_init); | ||
diff --git a/lib/kobject.c b/lib/kobject.c index 5b4b8886435e..b0b26665c611 100644 --- a/lib/kobject.c +++ b/lib/kobject.c | |||
| @@ -13,11 +13,11 @@ | |||
| 13 | */ | 13 | */ |
| 14 | 14 | ||
| 15 | #include <linux/kobject.h> | 15 | #include <linux/kobject.h> |
| 16 | #include <linux/kobj_completion.h> | ||
| 17 | #include <linux/string.h> | 16 | #include <linux/string.h> |
| 18 | #include <linux/export.h> | 17 | #include <linux/export.h> |
| 19 | #include <linux/stat.h> | 18 | #include <linux/stat.h> |
| 20 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
| 20 | #include <linux/random.h> | ||
| 21 | 21 | ||
| 22 | /** | 22 | /** |
| 23 | * kobject_namespace - return @kobj's namespace tag | 23 | * kobject_namespace - return @kobj's namespace tag |
| @@ -65,13 +65,17 @@ static int populate_dir(struct kobject *kobj) | |||
| 65 | 65 | ||
| 66 | static int create_dir(struct kobject *kobj) | 66 | static int create_dir(struct kobject *kobj) |
| 67 | { | 67 | { |
| 68 | const struct kobj_ns_type_operations *ops; | ||
| 68 | int error; | 69 | int error; |
| 69 | 70 | ||
| 70 | error = sysfs_create_dir_ns(kobj, kobject_namespace(kobj)); | 71 | error = sysfs_create_dir_ns(kobj, kobject_namespace(kobj)); |
| 71 | if (!error) { | 72 | if (error) |
| 72 | error = populate_dir(kobj); | 73 | return error; |
| 73 | if (error) | 74 | |
| 74 | sysfs_remove_dir(kobj); | 75 | error = populate_dir(kobj); |
| 76 | if (error) { | ||
| 77 | sysfs_remove_dir(kobj); | ||
| 78 | return error; | ||
| 75 | } | 79 | } |
| 76 | 80 | ||
| 77 | /* | 81 | /* |
| @@ -80,7 +84,20 @@ static int create_dir(struct kobject *kobj) | |||
| 80 | */ | 84 | */ |
| 81 | sysfs_get(kobj->sd); | 85 | sysfs_get(kobj->sd); |
| 82 | 86 | ||
| 83 | return error; | 87 | /* |
| 88 | * If @kobj has ns_ops, its children need to be filtered based on | ||
| 89 | * their namespace tags. Enable namespace support on @kobj->sd. | ||
| 90 | */ | ||
| 91 | ops = kobj_child_ns_ops(kobj); | ||
| 92 | if (ops) { | ||
| 93 | BUG_ON(ops->type <= KOBJ_NS_TYPE_NONE); | ||
| 94 | BUG_ON(ops->type >= KOBJ_NS_TYPES); | ||
| 95 | BUG_ON(!kobj_ns_type_registered(ops->type)); | ||
| 96 | |||
| 97 | kernfs_enable_ns(kobj->sd); | ||
| 98 | } | ||
| 99 | |||
| 100 | return 0; | ||
| 84 | } | 101 | } |
| 85 | 102 | ||
| 86 | static int get_kobj_path_length(struct kobject *kobj) | 103 | static int get_kobj_path_length(struct kobject *kobj) |
| @@ -247,8 +264,10 @@ int kobject_set_name_vargs(struct kobject *kobj, const char *fmt, | |||
| 247 | return 0; | 264 | return 0; |
| 248 | 265 | ||
| 249 | kobj->name = kvasprintf(GFP_KERNEL, fmt, vargs); | 266 | kobj->name = kvasprintf(GFP_KERNEL, fmt, vargs); |
| 250 | if (!kobj->name) | 267 | if (!kobj->name) { |
| 268 | kobj->name = old_name; | ||
| 251 | return -ENOMEM; | 269 | return -ENOMEM; |
| 270 | } | ||
| 252 | 271 | ||
| 253 | /* ewww... some of these buggers have '/' in the name ... */ | 272 | /* ewww... some of these buggers have '/' in the name ... */ |
| 254 | while ((s = strchr(kobj->name, '/'))) | 273 | while ((s = strchr(kobj->name, '/'))) |
| @@ -346,7 +365,7 @@ static int kobject_add_varg(struct kobject *kobj, struct kobject *parent, | |||
| 346 | * | 365 | * |
| 347 | * If @parent is set, then the parent of the @kobj will be set to it. | 366 | * If @parent is set, then the parent of the @kobj will be set to it. |
| 348 | * If @parent is NULL, then the parent of the @kobj will be set to the | 367 | * If @parent is NULL, then the parent of the @kobj will be set to the |
| 349 | * kobject associted with the kset assigned to this kobject. If no kset | 368 | * kobject associated with the kset assigned to this kobject. If no kset |
| 350 | * is assigned to the kobject, then the kobject will be located in the | 369 | * is assigned to the kobject, then the kobject will be located in the |
| 351 | * root of the sysfs tree. | 370 | * root of the sysfs tree. |
| 352 | * | 371 | * |
| @@ -536,7 +555,7 @@ out: | |||
| 536 | */ | 555 | */ |
| 537 | void kobject_del(struct kobject *kobj) | 556 | void kobject_del(struct kobject *kobj) |
| 538 | { | 557 | { |
| 539 | struct sysfs_dirent *sd; | 558 | struct kernfs_node *sd; |
| 540 | 559 | ||
| 541 | if (!kobj) | 560 | if (!kobj) |
| 542 | return; | 561 | return; |
| @@ -625,10 +644,12 @@ static void kobject_release(struct kref *kref) | |||
| 625 | { | 644 | { |
| 626 | struct kobject *kobj = container_of(kref, struct kobject, kref); | 645 | struct kobject *kobj = container_of(kref, struct kobject, kref); |
| 627 | #ifdef CONFIG_DEBUG_KOBJECT_RELEASE | 646 | #ifdef CONFIG_DEBUG_KOBJECT_RELEASE |
| 628 | pr_info("kobject: '%s' (%p): %s, parent %p (delayed)\n", | 647 | unsigned long delay = HZ + HZ * (get_random_int() & 0x3); |
| 629 | kobject_name(kobj), kobj, __func__, kobj->parent); | 648 | pr_info("kobject: '%s' (%p): %s, parent %p (delayed %ld)\n", |
| 649 | kobject_name(kobj), kobj, __func__, kobj->parent, delay); | ||
| 630 | INIT_DELAYED_WORK(&kobj->release, kobject_delayed_cleanup); | 650 | INIT_DELAYED_WORK(&kobj->release, kobject_delayed_cleanup); |
| 631 | schedule_delayed_work(&kobj->release, HZ); | 651 | |
| 652 | schedule_delayed_work(&kobj->release, delay); | ||
| 632 | #else | 653 | #else |
| 633 | kobject_cleanup(kobj); | 654 | kobject_cleanup(kobj); |
| 634 | #endif | 655 | #endif |
| @@ -760,55 +781,6 @@ const struct sysfs_ops kobj_sysfs_ops = { | |||
| 760 | }; | 781 | }; |
| 761 | 782 | ||
| 762 | /** | 783 | /** |
| 763 | * kobj_completion_init - initialize a kobj_completion object. | ||
| 764 | * @kc: kobj_completion | ||
| 765 | * @ktype: type of kobject to initialize | ||
| 766 | * | ||
| 767 | * kobj_completion structures can be embedded within structures with different | ||
| 768 | * lifetime rules. During the release of the enclosing object, we can | ||
| 769 | * wait on the release of the kobject so that we don't free it while it's | ||
| 770 | * still busy. | ||
| 771 | */ | ||
| 772 | void kobj_completion_init(struct kobj_completion *kc, struct kobj_type *ktype) | ||
| 773 | { | ||
| 774 | init_completion(&kc->kc_unregister); | ||
| 775 | kobject_init(&kc->kc_kobj, ktype); | ||
| 776 | } | ||
| 777 | EXPORT_SYMBOL_GPL(kobj_completion_init); | ||
| 778 | |||
| 779 | /** | ||
| 780 | * kobj_completion_release - release a kobj_completion object | ||
| 781 | * @kobj: kobject embedded in kobj_completion | ||
| 782 | * | ||
| 783 | * Used with kobject_release to notify waiters that the kobject has been | ||
| 784 | * released. | ||
| 785 | */ | ||
| 786 | void kobj_completion_release(struct kobject *kobj) | ||
| 787 | { | ||
| 788 | struct kobj_completion *kc = kobj_to_kobj_completion(kobj); | ||
| 789 | complete(&kc->kc_unregister); | ||
| 790 | } | ||
| 791 | EXPORT_SYMBOL_GPL(kobj_completion_release); | ||
| 792 | |||
| 793 | /** | ||
| 794 | * kobj_completion_del_and_wait - release the kobject and wait for it | ||
| 795 | * @kc: kobj_completion object to release | ||
| 796 | * | ||
| 797 | * Delete the kobject from sysfs and drop the reference count. Then wait | ||
| 798 | * until any other outstanding references are also dropped. This routine | ||
| 799 | * is only necessary once other references may have been taken on the | ||
| 800 | * kobject. Typically this happens when the kobject has been published | ||
| 801 | * to sysfs via kobject_add. | ||
| 802 | */ | ||
| 803 | void kobj_completion_del_and_wait(struct kobj_completion *kc) | ||
| 804 | { | ||
| 805 | kobject_del(&kc->kc_kobj); | ||
| 806 | kobject_put(&kc->kc_kobj); | ||
| 807 | wait_for_completion(&kc->kc_unregister); | ||
| 808 | } | ||
| 809 | EXPORT_SYMBOL_GPL(kobj_completion_del_and_wait); | ||
| 810 | |||
| 811 | /** | ||
| 812 | * kset_register - initialize and add a kset. | 784 | * kset_register - initialize and add a kset. |
| 813 | * @k: kset. | 785 | * @k: kset. |
| 814 | */ | 786 | */ |
| @@ -835,6 +807,7 @@ void kset_unregister(struct kset *k) | |||
| 835 | { | 807 | { |
| 836 | if (!k) | 808 | if (!k) |
| 837 | return; | 809 | return; |
| 810 | kobject_del(&k->kobj); | ||
| 838 | kobject_put(&k->kobj); | 811 | kobject_put(&k->kobj); |
| 839 | } | 812 | } |
| 840 | 813 | ||
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index 52e5abbc41db..5f72767ddd9b 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c | |||
| @@ -88,11 +88,17 @@ out: | |||
| 88 | #ifdef CONFIG_NET | 88 | #ifdef CONFIG_NET |
| 89 | static int kobj_bcast_filter(struct sock *dsk, struct sk_buff *skb, void *data) | 89 | static int kobj_bcast_filter(struct sock *dsk, struct sk_buff *skb, void *data) |
| 90 | { | 90 | { |
| 91 | struct kobject *kobj = data; | 91 | struct kobject *kobj = data, *ksobj; |
| 92 | const struct kobj_ns_type_operations *ops; | 92 | const struct kobj_ns_type_operations *ops; |
| 93 | 93 | ||
| 94 | ops = kobj_ns_ops(kobj); | 94 | ops = kobj_ns_ops(kobj); |
| 95 | if (ops) { | 95 | if (!ops && kobj->kset) { |
| 96 | ksobj = &kobj->kset->kobj; | ||
| 97 | if (ksobj->parent != NULL) | ||
| 98 | ops = kobj_ns_ops(ksobj->parent); | ||
| 99 | } | ||
| 100 | |||
| 101 | if (ops && ops->netlink_ns && kobj->ktype->namespace) { | ||
| 96 | const void *sock_ns, *ns; | 102 | const void *sock_ns, *ns; |
| 97 | ns = kobj->ktype->namespace(kobj); | 103 | ns = kobj->ktype->namespace(kobj); |
| 98 | sock_ns = ops->netlink_ns(dsk); | 104 | sock_ns = ops->netlink_ns(dsk); |
diff --git a/lib/kstrtox.c b/lib/kstrtox.c index f78ae0c0c4e2..ec8da78df9be 100644 --- a/lib/kstrtox.c +++ b/lib/kstrtox.c | |||
| @@ -92,7 +92,6 @@ static int _kstrtoull(const char *s, unsigned int base, unsigned long long *res) | |||
| 92 | rv = _parse_integer(s, base, &_res); | 92 | rv = _parse_integer(s, base, &_res); |
| 93 | if (rv & KSTRTOX_OVERFLOW) | 93 | if (rv & KSTRTOX_OVERFLOW) |
| 94 | return -ERANGE; | 94 | return -ERANGE; |
| 95 | rv &= ~KSTRTOX_OVERFLOW; | ||
| 96 | if (rv == 0) | 95 | if (rv == 0) |
| 97 | return -EINVAL; | 96 | return -EINVAL; |
| 98 | s += rv; | 97 | s += rv; |
diff --git a/lib/lockref.c b/lib/lockref.c index d2b123f8456b..f07a40d33871 100644 --- a/lib/lockref.c +++ b/lib/lockref.c | |||
| @@ -1,5 +1,6 @@ | |||
| 1 | #include <linux/export.h> | 1 | #include <linux/export.h> |
| 2 | #include <linux/lockref.h> | 2 | #include <linux/lockref.h> |
| 3 | #include <linux/mutex.h> | ||
| 3 | 4 | ||
| 4 | #if USE_CMPXCHG_LOCKREF | 5 | #if USE_CMPXCHG_LOCKREF |
| 5 | 6 | ||
| @@ -12,14 +13,6 @@ | |||
| 12 | #endif | 13 | #endif |
| 13 | 14 | ||
| 14 | /* | 15 | /* |
| 15 | * Allow architectures to override the default cpu_relax() within CMPXCHG_LOOP. | ||
| 16 | * This is useful for architectures with an expensive cpu_relax(). | ||
| 17 | */ | ||
| 18 | #ifndef arch_mutex_cpu_relax | ||
| 19 | # define arch_mutex_cpu_relax() cpu_relax() | ||
| 20 | #endif | ||
| 21 | |||
| 22 | /* | ||
| 23 | * Note that the "cmpxchg()" reloads the "old" value for the | 16 | * Note that the "cmpxchg()" reloads the "old" value for the |
| 24 | * failure case. | 17 | * failure case. |
| 25 | */ | 18 | */ |
diff --git a/lib/parser.c b/lib/parser.c index 807b2aaa33fa..b6d11631231b 100644 --- a/lib/parser.c +++ b/lib/parser.c | |||
| @@ -113,6 +113,7 @@ int match_token(char *s, const match_table_t table, substring_t args[]) | |||
| 113 | 113 | ||
| 114 | return p->token; | 114 | return p->token; |
| 115 | } | 115 | } |
| 116 | EXPORT_SYMBOL(match_token); | ||
| 116 | 117 | ||
| 117 | /** | 118 | /** |
| 118 | * match_number: scan a number in the given base from a substring_t | 119 | * match_number: scan a number in the given base from a substring_t |
| @@ -163,6 +164,7 @@ int match_int(substring_t *s, int *result) | |||
| 163 | { | 164 | { |
| 164 | return match_number(s, result, 0); | 165 | return match_number(s, result, 0); |
| 165 | } | 166 | } |
| 167 | EXPORT_SYMBOL(match_int); | ||
| 166 | 168 | ||
| 167 | /** | 169 | /** |
| 168 | * match_octal: - scan an octal representation of an integer from a substring_t | 170 | * match_octal: - scan an octal representation of an integer from a substring_t |
| @@ -177,6 +179,7 @@ int match_octal(substring_t *s, int *result) | |||
| 177 | { | 179 | { |
| 178 | return match_number(s, result, 8); | 180 | return match_number(s, result, 8); |
| 179 | } | 181 | } |
| 182 | EXPORT_SYMBOL(match_octal); | ||
| 180 | 183 | ||
| 181 | /** | 184 | /** |
| 182 | * match_hex: - scan a hex representation of an integer from a substring_t | 185 | * match_hex: - scan a hex representation of an integer from a substring_t |
| @@ -191,6 +194,58 @@ int match_hex(substring_t *s, int *result) | |||
| 191 | { | 194 | { |
| 192 | return match_number(s, result, 16); | 195 | return match_number(s, result, 16); |
| 193 | } | 196 | } |
| 197 | EXPORT_SYMBOL(match_hex); | ||
| 198 | |||
| 199 | /** | ||
| 200 | * match_wildcard: - parse if a string matches given wildcard pattern | ||
| 201 | * @pattern: wildcard pattern | ||
| 202 | * @str: the string to be parsed | ||
| 203 | * | ||
| 204 | * Description: Parse the string @str to check if matches wildcard | ||
| 205 | * pattern @pattern. The pattern may contain two type wildcardes: | ||
| 206 | * '*' - matches zero or more characters | ||
| 207 | * '?' - matches one character | ||
| 208 | * If it's matched, return true, else return false. | ||
| 209 | */ | ||
| 210 | bool match_wildcard(const char *pattern, const char *str) | ||
| 211 | { | ||
| 212 | const char *s = str; | ||
| 213 | const char *p = pattern; | ||
| 214 | bool star = false; | ||
| 215 | |||
| 216 | while (*s) { | ||
| 217 | switch (*p) { | ||
| 218 | case '?': | ||
| 219 | s++; | ||
| 220 | p++; | ||
| 221 | break; | ||
| 222 | case '*': | ||
| 223 | star = true; | ||
| 224 | str = s; | ||
| 225 | if (!*++p) | ||
| 226 | return true; | ||
| 227 | pattern = p; | ||
| 228 | break; | ||
| 229 | default: | ||
| 230 | if (*s == *p) { | ||
| 231 | s++; | ||
| 232 | p++; | ||
| 233 | } else { | ||
| 234 | if (!star) | ||
| 235 | return false; | ||
| 236 | str++; | ||
| 237 | s = str; | ||
| 238 | p = pattern; | ||
| 239 | } | ||
| 240 | break; | ||
| 241 | } | ||
| 242 | } | ||
| 243 | |||
| 244 | if (*p == '*') | ||
| 245 | ++p; | ||
| 246 | return !*p; | ||
| 247 | } | ||
| 248 | EXPORT_SYMBOL(match_wildcard); | ||
| 194 | 249 | ||
| 195 | /** | 250 | /** |
| 196 | * match_strlcpy: - Copy the characters from a substring_t to a sized buffer | 251 | * match_strlcpy: - Copy the characters from a substring_t to a sized buffer |
| @@ -213,6 +268,7 @@ size_t match_strlcpy(char *dest, const substring_t *src, size_t size) | |||
| 213 | } | 268 | } |
| 214 | return ret; | 269 | return ret; |
| 215 | } | 270 | } |
| 271 | EXPORT_SYMBOL(match_strlcpy); | ||
| 216 | 272 | ||
| 217 | /** | 273 | /** |
| 218 | * match_strdup: - allocate a new string with the contents of a substring_t | 274 | * match_strdup: - allocate a new string with the contents of a substring_t |
| @@ -230,10 +286,4 @@ char *match_strdup(const substring_t *s) | |||
| 230 | match_strlcpy(p, s, sz); | 286 | match_strlcpy(p, s, sz); |
| 231 | return p; | 287 | return p; |
| 232 | } | 288 | } |
| 233 | |||
| 234 | EXPORT_SYMBOL(match_token); | ||
| 235 | EXPORT_SYMBOL(match_int); | ||
| 236 | EXPORT_SYMBOL(match_octal); | ||
| 237 | EXPORT_SYMBOL(match_hex); | ||
| 238 | EXPORT_SYMBOL(match_strlcpy); | ||
| 239 | EXPORT_SYMBOL(match_strdup); | 289 | EXPORT_SYMBOL(match_strdup); |
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c index 1a53d497a8c5..963b7034a51b 100644 --- a/lib/percpu-refcount.c +++ b/lib/percpu-refcount.c | |||
| @@ -120,6 +120,9 @@ static void percpu_ref_kill_rcu(struct rcu_head *rcu) | |||
| 120 | 120 | ||
| 121 | atomic_add((int) count - PCPU_COUNT_BIAS, &ref->count); | 121 | atomic_add((int) count - PCPU_COUNT_BIAS, &ref->count); |
| 122 | 122 | ||
| 123 | WARN_ONCE(atomic_read(&ref->count) <= 0, "percpu ref <= 0 (%i)", | ||
| 124 | atomic_read(&ref->count)); | ||
| 125 | |||
| 123 | /* @ref is viewed as dead on all CPUs, send out kill confirmation */ | 126 | /* @ref is viewed as dead on all CPUs, send out kill confirmation */ |
| 124 | if (ref->confirm_kill) | 127 | if (ref->confirm_kill) |
| 125 | ref->confirm_kill(ref); | 128 | ref->confirm_kill(ref); |
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c index 7473ee3b4ee7..8280a5dd1727 100644 --- a/lib/percpu_counter.c +++ b/lib/percpu_counter.c | |||
| @@ -82,10 +82,10 @@ void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch) | |||
| 82 | unsigned long flags; | 82 | unsigned long flags; |
| 83 | raw_spin_lock_irqsave(&fbc->lock, flags); | 83 | raw_spin_lock_irqsave(&fbc->lock, flags); |
| 84 | fbc->count += count; | 84 | fbc->count += count; |
| 85 | __this_cpu_sub(*fbc->counters, count - amount); | ||
| 85 | raw_spin_unlock_irqrestore(&fbc->lock, flags); | 86 | raw_spin_unlock_irqrestore(&fbc->lock, flags); |
| 86 | __this_cpu_write(*fbc->counters, 0); | ||
| 87 | } else { | 87 | } else { |
| 88 | __this_cpu_write(*fbc->counters, count); | 88 | this_cpu_add(*fbc->counters, amount); |
| 89 | } | 89 | } |
| 90 | preempt_enable(); | 90 | preempt_enable(); |
| 91 | } | 91 | } |
diff --git a/lib/rbtree_test.c b/lib/rbtree_test.c index 31dd4ccd3baa..8b3c9dc88262 100644 --- a/lib/rbtree_test.c +++ b/lib/rbtree_test.c | |||
| @@ -8,8 +8,8 @@ | |||
| 8 | #define CHECK_LOOPS 100 | 8 | #define CHECK_LOOPS 100 |
| 9 | 9 | ||
| 10 | struct test_node { | 10 | struct test_node { |
| 11 | struct rb_node rb; | ||
| 12 | u32 key; | 11 | u32 key; |
| 12 | struct rb_node rb; | ||
| 13 | 13 | ||
| 14 | /* following fields used for testing augmented rbtree functionality */ | 14 | /* following fields used for testing augmented rbtree functionality */ |
| 15 | u32 val; | 15 | u32 val; |
| @@ -114,6 +114,16 @@ static int black_path_count(struct rb_node *rb) | |||
| 114 | return count; | 114 | return count; |
| 115 | } | 115 | } |
| 116 | 116 | ||
| 117 | static void check_postorder_foreach(int nr_nodes) | ||
| 118 | { | ||
| 119 | struct test_node *cur, *n; | ||
| 120 | int count = 0; | ||
| 121 | rbtree_postorder_for_each_entry_safe(cur, n, &root, rb) | ||
| 122 | count++; | ||
| 123 | |||
| 124 | WARN_ON_ONCE(count != nr_nodes); | ||
| 125 | } | ||
| 126 | |||
| 117 | static void check_postorder(int nr_nodes) | 127 | static void check_postorder(int nr_nodes) |
| 118 | { | 128 | { |
| 119 | struct rb_node *rb; | 129 | struct rb_node *rb; |
| @@ -148,6 +158,7 @@ static void check(int nr_nodes) | |||
| 148 | WARN_ON_ONCE(count < (1 << black_path_count(rb_last(&root))) - 1); | 158 | WARN_ON_ONCE(count < (1 << black_path_count(rb_last(&root))) - 1); |
| 149 | 159 | ||
| 150 | check_postorder(nr_nodes); | 160 | check_postorder(nr_nodes); |
| 161 | check_postorder_foreach(nr_nodes); | ||
| 151 | } | 162 | } |
| 152 | 163 | ||
| 153 | static void check_augmented(int nr_nodes) | 164 | static void check_augmented(int nr_nodes) |
diff --git a/lib/reciprocal_div.c b/lib/reciprocal_div.c index 75510e94f7d0..464152410c51 100644 --- a/lib/reciprocal_div.c +++ b/lib/reciprocal_div.c | |||
| @@ -1,11 +1,27 @@ | |||
| 1 | #include <linux/kernel.h> | ||
| 1 | #include <asm/div64.h> | 2 | #include <asm/div64.h> |
| 2 | #include <linux/reciprocal_div.h> | 3 | #include <linux/reciprocal_div.h> |
| 3 | #include <linux/export.h> | 4 | #include <linux/export.h> |
| 4 | 5 | ||
| 5 | u32 reciprocal_value(u32 k) | 6 | /* |
| 7 | * For a description of the algorithm please have a look at | ||
| 8 | * include/linux/reciprocal_div.h | ||
| 9 | */ | ||
| 10 | |||
| 11 | struct reciprocal_value reciprocal_value(u32 d) | ||
| 6 | { | 12 | { |
| 7 | u64 val = (1LL << 32) + (k - 1); | 13 | struct reciprocal_value R; |
| 8 | do_div(val, k); | 14 | u64 m; |
| 9 | return (u32)val; | 15 | int l; |
| 16 | |||
| 17 | l = fls(d - 1); | ||
| 18 | m = ((1ULL << 32) * ((1ULL << l) - d)); | ||
| 19 | do_div(m, d); | ||
| 20 | ++m; | ||
| 21 | R.m = (u32)m; | ||
| 22 | R.sh1 = min(l, 1); | ||
| 23 | R.sh2 = max(l - 1, 0); | ||
| 24 | |||
| 25 | return R; | ||
| 10 | } | 26 | } |
| 11 | EXPORT_SYMBOL(reciprocal_value); | 27 | EXPORT_SYMBOL(reciprocal_value); |
diff --git a/lib/scatterlist.c b/lib/scatterlist.c index d16fa295ae1d..3a8e8e8fb2a5 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c | |||
| @@ -495,7 +495,7 @@ static bool sg_miter_get_next_page(struct sg_mapping_iter *miter) | |||
| 495 | * true if @miter contains the valid mapping. false if end of sg | 495 | * true if @miter contains the valid mapping. false if end of sg |
| 496 | * list is reached. | 496 | * list is reached. |
| 497 | */ | 497 | */ |
| 498 | static bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset) | 498 | bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset) |
| 499 | { | 499 | { |
| 500 | sg_miter_stop(miter); | 500 | sg_miter_stop(miter); |
| 501 | 501 | ||
| @@ -513,6 +513,7 @@ static bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset) | |||
| 513 | 513 | ||
| 514 | return true; | 514 | return true; |
| 515 | } | 515 | } |
| 516 | EXPORT_SYMBOL(sg_miter_skip); | ||
| 516 | 517 | ||
| 517 | /** | 518 | /** |
| 518 | * sg_miter_next - proceed mapping iterator to the next mapping | 519 | * sg_miter_next - proceed mapping iterator to the next mapping |
diff --git a/lib/show_mem.c b/lib/show_mem.c index 5847a4921b8e..09225796991a 100644 --- a/lib/show_mem.c +++ b/lib/show_mem.c | |||
| @@ -17,9 +17,6 @@ void show_mem(unsigned int filter) | |||
| 17 | printk("Mem-Info:\n"); | 17 | printk("Mem-Info:\n"); |
| 18 | show_free_areas(filter); | 18 | show_free_areas(filter); |
| 19 | 19 | ||
| 20 | if (filter & SHOW_MEM_FILTER_PAGE_COUNT) | ||
| 21 | return; | ||
| 22 | |||
| 23 | for_each_online_pgdat(pgdat) { | 20 | for_each_online_pgdat(pgdat) { |
| 24 | unsigned long flags; | 21 | unsigned long flags; |
| 25 | int zoneid; | 22 | int zoneid; |
| @@ -46,4 +43,7 @@ void show_mem(unsigned int filter) | |||
| 46 | printk("%lu pages in pagetable cache\n", | 43 | printk("%lu pages in pagetable cache\n", |
| 47 | quicklist_total_size()); | 44 | quicklist_total_size()); |
| 48 | #endif | 45 | #endif |
| 46 | #ifdef CONFIG_MEMORY_FAILURE | ||
| 47 | printk("%lu pages hwpoisoned\n", atomic_long_read(&num_poisoned_pages)); | ||
| 48 | #endif | ||
| 49 | } | 49 | } |
diff --git a/lib/swiotlb.c b/lib/swiotlb.c index e4399fa65ad6..615f3de4b5ce 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c | |||
| @@ -172,8 +172,9 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) | |||
| 172 | /* | 172 | /* |
| 173 | * Get the overflow emergency buffer | 173 | * Get the overflow emergency buffer |
| 174 | */ | 174 | */ |
| 175 | v_overflow_buffer = alloc_bootmem_low_pages_nopanic( | 175 | v_overflow_buffer = memblock_virt_alloc_nopanic( |
| 176 | PAGE_ALIGN(io_tlb_overflow)); | 176 | PAGE_ALIGN(io_tlb_overflow), |
| 177 | PAGE_SIZE); | ||
| 177 | if (!v_overflow_buffer) | 178 | if (!v_overflow_buffer) |
| 178 | return -ENOMEM; | 179 | return -ENOMEM; |
| 179 | 180 | ||
| @@ -184,11 +185,15 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) | |||
| 184 | * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE | 185 | * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE |
| 185 | * between io_tlb_start and io_tlb_end. | 186 | * between io_tlb_start and io_tlb_end. |
| 186 | */ | 187 | */ |
| 187 | io_tlb_list = alloc_bootmem_pages(PAGE_ALIGN(io_tlb_nslabs * sizeof(int))); | 188 | io_tlb_list = memblock_virt_alloc( |
| 189 | PAGE_ALIGN(io_tlb_nslabs * sizeof(int)), | ||
| 190 | PAGE_SIZE); | ||
| 188 | for (i = 0; i < io_tlb_nslabs; i++) | 191 | for (i = 0; i < io_tlb_nslabs; i++) |
| 189 | io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); | 192 | io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); |
| 190 | io_tlb_index = 0; | 193 | io_tlb_index = 0; |
| 191 | io_tlb_orig_addr = alloc_bootmem_pages(PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t))); | 194 | io_tlb_orig_addr = memblock_virt_alloc( |
| 195 | PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)), | ||
| 196 | PAGE_SIZE); | ||
| 192 | 197 | ||
| 193 | if (verbose) | 198 | if (verbose) |
| 194 | swiotlb_print_info(); | 199 | swiotlb_print_info(); |
| @@ -215,13 +220,13 @@ swiotlb_init(int verbose) | |||
| 215 | bytes = io_tlb_nslabs << IO_TLB_SHIFT; | 220 | bytes = io_tlb_nslabs << IO_TLB_SHIFT; |
| 216 | 221 | ||
| 217 | /* Get IO TLB memory from the low pages */ | 222 | /* Get IO TLB memory from the low pages */ |
| 218 | vstart = alloc_bootmem_low_pages_nopanic(PAGE_ALIGN(bytes)); | 223 | vstart = memblock_virt_alloc_nopanic(PAGE_ALIGN(bytes), PAGE_SIZE); |
| 219 | if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose)) | 224 | if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose)) |
| 220 | return; | 225 | return; |
| 221 | 226 | ||
| 222 | if (io_tlb_start) | 227 | if (io_tlb_start) |
| 223 | free_bootmem(io_tlb_start, | 228 | memblock_free_early(io_tlb_start, |
| 224 | PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); | 229 | PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); |
| 225 | pr_warn("Cannot allocate SWIOTLB buffer"); | 230 | pr_warn("Cannot allocate SWIOTLB buffer"); |
| 226 | no_iotlb_memory = true; | 231 | no_iotlb_memory = true; |
| 227 | } | 232 | } |
| @@ -357,14 +362,14 @@ void __init swiotlb_free(void) | |||
| 357 | free_pages((unsigned long)phys_to_virt(io_tlb_start), | 362 | free_pages((unsigned long)phys_to_virt(io_tlb_start), |
| 358 | get_order(io_tlb_nslabs << IO_TLB_SHIFT)); | 363 | get_order(io_tlb_nslabs << IO_TLB_SHIFT)); |
| 359 | } else { | 364 | } else { |
| 360 | free_bootmem_late(io_tlb_overflow_buffer, | 365 | memblock_free_late(io_tlb_overflow_buffer, |
| 361 | PAGE_ALIGN(io_tlb_overflow)); | 366 | PAGE_ALIGN(io_tlb_overflow)); |
| 362 | free_bootmem_late(__pa(io_tlb_orig_addr), | 367 | memblock_free_late(__pa(io_tlb_orig_addr), |
| 363 | PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t))); | 368 | PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t))); |
| 364 | free_bootmem_late(__pa(io_tlb_list), | 369 | memblock_free_late(__pa(io_tlb_list), |
| 365 | PAGE_ALIGN(io_tlb_nslabs * sizeof(int))); | 370 | PAGE_ALIGN(io_tlb_nslabs * sizeof(int))); |
| 366 | free_bootmem_late(io_tlb_start, | 371 | memblock_free_late(io_tlb_start, |
| 367 | PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); | 372 | PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); |
| 368 | } | 373 | } |
| 369 | io_tlb_nslabs = 0; | 374 | io_tlb_nslabs = 0; |
| 370 | } | 375 | } |
diff --git a/lib/test_module.c b/lib/test_module.c new file mode 100644 index 000000000000..319b66f1ff61 --- /dev/null +++ b/lib/test_module.c | |||
| @@ -0,0 +1,33 @@ | |||
| 1 | /* | ||
| 2 | * This module emits "Hello, world" on printk when loaded. | ||
| 3 | * | ||
| 4 | * It is designed to be used for basic evaluation of the module loading | ||
| 5 | * subsystem (for example when validating module signing/verification). It | ||
| 6 | * lacks any extra dependencies, and will not normally be loaded by the | ||
| 7 | * system unless explicitly requested by name. | ||
| 8 | */ | ||
| 9 | |||
| 10 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
| 11 | |||
| 12 | #include <linux/init.h> | ||
| 13 | #include <linux/module.h> | ||
| 14 | #include <linux/printk.h> | ||
| 15 | |||
| 16 | static int __init test_module_init(void) | ||
| 17 | { | ||
| 18 | pr_warn("Hello, world\n"); | ||
| 19 | |||
| 20 | return 0; | ||
| 21 | } | ||
| 22 | |||
| 23 | module_init(test_module_init); | ||
| 24 | |||
| 25 | static void __exit test_module_exit(void) | ||
| 26 | { | ||
| 27 | pr_warn("Goodbye\n"); | ||
| 28 | } | ||
| 29 | |||
| 30 | module_exit(test_module_exit); | ||
| 31 | |||
| 32 | MODULE_AUTHOR("Kees Cook <keescook@chromium.org>"); | ||
| 33 | MODULE_LICENSE("GPL"); | ||
diff --git a/lib/test_user_copy.c b/lib/test_user_copy.c new file mode 100644 index 000000000000..0ecef3e4690e --- /dev/null +++ b/lib/test_user_copy.c | |||
| @@ -0,0 +1,110 @@ | |||
| 1 | /* | ||
| 2 | * Kernel module for testing copy_to/from_user infrastructure. | ||
| 3 | * | ||
| 4 | * Copyright 2013 Google Inc. All Rights Reserved | ||
| 5 | * | ||
| 6 | * Authors: | ||
| 7 | * Kees Cook <keescook@chromium.org> | ||
| 8 | * | ||
| 9 | * This software is licensed under the terms of the GNU General Public | ||
| 10 | * License version 2, as published by the Free Software Foundation, and | ||
| 11 | * may be copied, distributed, and modified under those terms. | ||
| 12 | * | ||
| 13 | * This program is distributed in the hope that it will be useful, | ||
| 14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 16 | * GNU General Public License for more details. | ||
| 17 | */ | ||
| 18 | |||
| 19 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
| 20 | |||
| 21 | #include <linux/mman.h> | ||
| 22 | #include <linux/module.h> | ||
| 23 | #include <linux/sched.h> | ||
| 24 | #include <linux/slab.h> | ||
| 25 | #include <linux/uaccess.h> | ||
| 26 | #include <linux/vmalloc.h> | ||
| 27 | |||
| 28 | #define test(condition, msg) \ | ||
| 29 | ({ \ | ||
| 30 | int cond = (condition); \ | ||
| 31 | if (cond) \ | ||
| 32 | pr_warn("%s\n", msg); \ | ||
| 33 | cond; \ | ||
| 34 | }) | ||
| 35 | |||
| 36 | static int __init test_user_copy_init(void) | ||
| 37 | { | ||
| 38 | int ret = 0; | ||
| 39 | char *kmem; | ||
| 40 | char __user *usermem; | ||
| 41 | char *bad_usermem; | ||
| 42 | unsigned long user_addr; | ||
| 43 | unsigned long value = 0x5A; | ||
| 44 | |||
| 45 | kmem = kmalloc(PAGE_SIZE * 2, GFP_KERNEL); | ||
| 46 | if (!kmem) | ||
| 47 | return -ENOMEM; | ||
| 48 | |||
| 49 | user_addr = vm_mmap(NULL, 0, PAGE_SIZE * 2, | ||
| 50 | PROT_READ | PROT_WRITE | PROT_EXEC, | ||
| 51 | MAP_ANONYMOUS | MAP_PRIVATE, 0); | ||
| 52 | if (user_addr >= (unsigned long)(TASK_SIZE)) { | ||
| 53 | pr_warn("Failed to allocate user memory\n"); | ||
| 54 | kfree(kmem); | ||
| 55 | return -ENOMEM; | ||
| 56 | } | ||
| 57 | |||
| 58 | usermem = (char __user *)user_addr; | ||
| 59 | bad_usermem = (char *)user_addr; | ||
| 60 | |||
| 61 | /* Legitimate usage: none of these should fail. */ | ||
| 62 | ret |= test(copy_from_user(kmem, usermem, PAGE_SIZE), | ||
| 63 | "legitimate copy_from_user failed"); | ||
| 64 | ret |= test(copy_to_user(usermem, kmem, PAGE_SIZE), | ||
| 65 | "legitimate copy_to_user failed"); | ||
| 66 | ret |= test(get_user(value, (unsigned long __user *)usermem), | ||
| 67 | "legitimate get_user failed"); | ||
| 68 | ret |= test(put_user(value, (unsigned long __user *)usermem), | ||
| 69 | "legitimate put_user failed"); | ||
| 70 | |||
| 71 | /* Invalid usage: none of these should succeed. */ | ||
| 72 | ret |= test(!copy_from_user(kmem, (char __user *)(kmem + PAGE_SIZE), | ||
| 73 | PAGE_SIZE), | ||
| 74 | "illegal all-kernel copy_from_user passed"); | ||
| 75 | ret |= test(!copy_from_user(bad_usermem, (char __user *)kmem, | ||
| 76 | PAGE_SIZE), | ||
| 77 | "illegal reversed copy_from_user passed"); | ||
| 78 | ret |= test(!copy_to_user((char __user *)kmem, kmem + PAGE_SIZE, | ||
| 79 | PAGE_SIZE), | ||
| 80 | "illegal all-kernel copy_to_user passed"); | ||
| 81 | ret |= test(!copy_to_user((char __user *)kmem, bad_usermem, | ||
| 82 | PAGE_SIZE), | ||
| 83 | "illegal reversed copy_to_user passed"); | ||
| 84 | ret |= test(!get_user(value, (unsigned long __user *)kmem), | ||
| 85 | "illegal get_user passed"); | ||
| 86 | ret |= test(!put_user(value, (unsigned long __user *)kmem), | ||
| 87 | "illegal put_user passed"); | ||
| 88 | |||
| 89 | vm_munmap(user_addr, PAGE_SIZE * 2); | ||
| 90 | kfree(kmem); | ||
| 91 | |||
| 92 | if (ret == 0) { | ||
| 93 | pr_info("tests passed.\n"); | ||
| 94 | return 0; | ||
| 95 | } | ||
| 96 | |||
| 97 | return -EINVAL; | ||
| 98 | } | ||
| 99 | |||
| 100 | module_init(test_user_copy_init); | ||
| 101 | |||
| 102 | static void __exit test_user_copy_exit(void) | ||
| 103 | { | ||
| 104 | pr_info("unloaded.\n"); | ||
| 105 | } | ||
| 106 | |||
| 107 | module_exit(test_user_copy_exit); | ||
| 108 | |||
| 109 | MODULE_AUTHOR("Kees Cook <keescook@chromium.org>"); | ||
| 110 | MODULE_LICENSE("GPL"); | ||
diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 10909c571494..185b6d300ebc 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c | |||
| @@ -1155,6 +1155,30 @@ char *netdev_feature_string(char *buf, char *end, const u8 *addr, | |||
| 1155 | return number(buf, end, *(const netdev_features_t *)addr, spec); | 1155 | return number(buf, end, *(const netdev_features_t *)addr, spec); |
| 1156 | } | 1156 | } |
| 1157 | 1157 | ||
| 1158 | static noinline_for_stack | ||
| 1159 | char *address_val(char *buf, char *end, const void *addr, | ||
| 1160 | struct printf_spec spec, const char *fmt) | ||
| 1161 | { | ||
| 1162 | unsigned long long num; | ||
| 1163 | |||
| 1164 | spec.flags |= SPECIAL | SMALL | ZEROPAD; | ||
| 1165 | spec.base = 16; | ||
| 1166 | |||
| 1167 | switch (fmt[1]) { | ||
| 1168 | case 'd': | ||
| 1169 | num = *(const dma_addr_t *)addr; | ||
| 1170 | spec.field_width = sizeof(dma_addr_t) * 2 + 2; | ||
| 1171 | break; | ||
| 1172 | case 'p': | ||
| 1173 | default: | ||
| 1174 | num = *(const phys_addr_t *)addr; | ||
| 1175 | spec.field_width = sizeof(phys_addr_t) * 2 + 2; | ||
| 1176 | break; | ||
| 1177 | } | ||
| 1178 | |||
| 1179 | return number(buf, end, num, spec); | ||
| 1180 | } | ||
| 1181 | |||
| 1158 | int kptr_restrict __read_mostly; | 1182 | int kptr_restrict __read_mostly; |
| 1159 | 1183 | ||
| 1160 | /* | 1184 | /* |
| @@ -1218,7 +1242,8 @@ int kptr_restrict __read_mostly; | |||
| 1218 | * N no separator | 1242 | * N no separator |
| 1219 | * The maximum supported length is 64 bytes of the input. Consider | 1243 | * The maximum supported length is 64 bytes of the input. Consider |
| 1220 | * to use print_hex_dump() for the larger input. | 1244 | * to use print_hex_dump() for the larger input. |
| 1221 | * - 'a' For a phys_addr_t type and its derivative types (passed by reference) | 1245 | * - 'a[pd]' For address types [p] phys_addr_t, [d] dma_addr_t and derivatives |
| 1246 | * (default assumed to be phys_addr_t, passed by reference) | ||
| 1222 | * - 'd[234]' For a dentry name (optionally 2-4 last components) | 1247 | * - 'd[234]' For a dentry name (optionally 2-4 last components) |
| 1223 | * - 'D[234]' Same as 'd' but for a struct file | 1248 | * - 'D[234]' Same as 'd' but for a struct file |
| 1224 | * | 1249 | * |
| @@ -1353,11 +1378,7 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr, | |||
| 1353 | } | 1378 | } |
| 1354 | break; | 1379 | break; |
| 1355 | case 'a': | 1380 | case 'a': |
| 1356 | spec.flags |= SPECIAL | SMALL | ZEROPAD; | 1381 | return address_val(buf, end, ptr, spec, fmt); |
| 1357 | spec.field_width = sizeof(phys_addr_t) * 2 + 2; | ||
| 1358 | spec.base = 16; | ||
| 1359 | return number(buf, end, | ||
| 1360 | (unsigned long long) *((phys_addr_t *)ptr), spec); | ||
| 1361 | case 'd': | 1382 | case 'd': |
| 1362 | return dentry_name(buf, end, ptr, spec, fmt); | 1383 | return dentry_name(buf, end, ptr, spec, fmt); |
| 1363 | case 'D': | 1384 | case 'D': |
