aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorPaul Moore <pmoore@redhat.com>2014-03-31 09:49:07 -0400
committerPaul Moore <pmoore@redhat.com>2014-03-31 09:49:07 -0400
commit6d32c850621b0be75777b9102b14f6268bbd9f0f (patch)
treefec325f5c1ae763f5eccb3ca1254ab9d9d164b05 /lib
parenteee3094683fbc7fe6bcdaef58c1ef31f8460cdca (diff)
parent455c6fdbd219161bd09b1165f11699d6d73de11c (diff)
Merge tag 'v3.14' into next
Linux 3.14
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug61
-rw-r--r--lib/Makefile5
-rw-r--r--lib/assoc_array.c2
-rw-r--r--lib/average.c6
-rw-r--r--lib/cmdline.c14
-rw-r--r--lib/cpumask.c4
-rw-r--r--lib/decompress_unlz4.c1
-rw-r--r--lib/dma-debug.c236
-rw-r--r--lib/dynamic_debug.c29
-rw-r--r--lib/flex_array.c7
-rw-r--r--lib/genalloc.c5
-rw-r--r--lib/hash.c39
-rw-r--r--lib/kobject.c96
-rw-r--r--lib/kobject_uevent.c10
-rw-r--r--lib/kstrtox.c1
-rw-r--r--lib/parser.c62
-rw-r--r--lib/percpu-refcount.c3
-rw-r--r--lib/percpu_ida.c28
-rw-r--r--lib/radix-tree.c4
-rw-r--r--lib/random32.c13
-rw-r--r--lib/rbtree_test.c13
-rw-r--r--lib/reciprocal_div.c24
-rw-r--r--lib/scatterlist.c3
-rw-r--r--lib/show_mem.c6
-rw-r--r--lib/swiotlb.c38
-rw-r--r--lib/test_module.c33
-rw-r--r--lib/test_user_copy.c110
-rw-r--r--lib/vsprintf.c33
28 files changed, 714 insertions, 172 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index db25707aa41b..a48abeac753f 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -119,7 +119,7 @@ menu "Compile-time checks and compiler options"
119 119
120config DEBUG_INFO 120config DEBUG_INFO
121 bool "Compile the kernel with debug info" 121 bool "Compile the kernel with debug info"
122 depends on DEBUG_KERNEL 122 depends on DEBUG_KERNEL && !COMPILE_TEST
123 help 123 help
124 If you say Y here the resulting kernel image will include 124 If you say Y here the resulting kernel image will include
125 debugging info resulting in a larger kernel image. 125 debugging info resulting in a larger kernel image.
@@ -761,6 +761,15 @@ config PANIC_ON_OOPS_VALUE
761 default 0 if !PANIC_ON_OOPS 761 default 0 if !PANIC_ON_OOPS
762 default 1 if PANIC_ON_OOPS 762 default 1 if PANIC_ON_OOPS
763 763
764config PANIC_TIMEOUT
765 int "panic timeout"
766 default 0
767 help
768 Set the timeout value (in seconds) until a reboot occurs when the
769 the kernel panics. If n = 0, then we wait forever. A timeout
770 value n > 0 will wait n seconds before rebooting, while a timeout
771 value n < 0 will reboot immediately.
772
764config SCHED_DEBUG 773config SCHED_DEBUG
765 bool "Collect scheduler debugging info" 774 bool "Collect scheduler debugging info"
766 depends on DEBUG_KERNEL && PROC_FS 775 depends on DEBUG_KERNEL && PROC_FS
@@ -1547,17 +1556,6 @@ config PROVIDE_OHCI1394_DMA_INIT
1547 1556
1548 See Documentation/debugging-via-ohci1394.txt for more information. 1557 See Documentation/debugging-via-ohci1394.txt for more information.
1549 1558
1550config FIREWIRE_OHCI_REMOTE_DMA
1551 bool "Remote debugging over FireWire with firewire-ohci"
1552 depends on FIREWIRE_OHCI
1553 help
1554 This option lets you use the FireWire bus for remote debugging
1555 with help of the firewire-ohci driver. It enables unfiltered
1556 remote DMA in firewire-ohci.
1557 See Documentation/debugging-via-ohci1394.txt for more information.
1558
1559 If unsure, say N.
1560
1561config BUILD_DOCSRC 1559config BUILD_DOCSRC
1562 bool "Build targets in Documentation/ tree" 1560 bool "Build targets in Documentation/ tree"
1563 depends on HEADERS_CHECK 1561 depends on HEADERS_CHECK
@@ -1575,8 +1573,43 @@ config DMA_API_DEBUG
1575 With this option you will be able to detect common bugs in device 1573 With this option you will be able to detect common bugs in device
1576 drivers like double-freeing of DMA mappings or freeing mappings that 1574 drivers like double-freeing of DMA mappings or freeing mappings that
1577 were never allocated. 1575 were never allocated.
1578 This option causes a performance degredation. Use only if you want 1576
1579 to debug device drivers. If unsure, say N. 1577 This also attempts to catch cases where a page owned by DMA is
1578 accessed by the cpu in a way that could cause data corruption. For
1579 example, this enables cow_user_page() to check that the source page is
1580 not undergoing DMA.
1581
1582 This option causes a performance degradation. Use only if you want to
1583 debug device drivers and dma interactions.
1584
1585 If unsure, say N.
1586
1587config TEST_MODULE
1588 tristate "Test module loading with 'hello world' module"
1589 default n
1590 depends on m
1591 help
1592 This builds the "test_module" module that emits "Hello, world"
1593 on printk when loaded. It is designed to be used for basic
1594 evaluation of the module loading subsystem (for example when
1595 validating module verification). It lacks any extra dependencies,
1596 and will not normally be loaded by the system unless explicitly
1597 requested by name.
1598
1599 If unsure, say N.
1600
1601config TEST_USER_COPY
1602 tristate "Test user/kernel boundary protections"
1603 default n
1604 depends on m
1605 help
1606 This builds the "test_user_copy" module that runs sanity checks
1607 on the copy_to/from_user infrastructure, making sure basic
1608 user/kernel boundary testing is working. If it fails to load,
1609 a regression has been detected in the user/kernel memory boundary
1610 protections.
1611
1612 If unsure, say N.
1580 1613
1581source "samples/Kconfig" 1614source "samples/Kconfig"
1582 1615
diff --git a/lib/Makefile b/lib/Makefile
index a459c31e8c6b..48140e3ba73f 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -26,11 +26,13 @@ obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
26 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ 26 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
27 gcd.o lcm.o list_sort.o uuid.o flex_array.o iovec.o clz_ctz.o \ 27 gcd.o lcm.o list_sort.o uuid.o flex_array.o iovec.o clz_ctz.o \
28 bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \ 28 bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \
29 percpu-refcount.o percpu_ida.o 29 percpu-refcount.o percpu_ida.o hash.o
30obj-y += string_helpers.o 30obj-y += string_helpers.o
31obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o 31obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o
32obj-y += kstrtox.o 32obj-y += kstrtox.o
33obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o 33obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
34obj-$(CONFIG_TEST_MODULE) += test_module.o
35obj-$(CONFIG_TEST_USER_COPY) += test_user_copy.o
34 36
35ifeq ($(CONFIG_DEBUG_KOBJECT),y) 37ifeq ($(CONFIG_DEBUG_KOBJECT),y)
36CFLAGS_kobject.o += -DDEBUG 38CFLAGS_kobject.o += -DDEBUG
@@ -43,6 +45,7 @@ obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o
43obj-$(CONFIG_CHECK_SIGNATURE) += check_signature.o 45obj-$(CONFIG_CHECK_SIGNATURE) += check_signature.o
44obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o 46obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o
45 47
48GCOV_PROFILE_hweight.o := n
46CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS)) 49CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
47obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o 50obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
48 51
diff --git a/lib/assoc_array.c b/lib/assoc_array.c
index 1b6a44f1ec3e..c0b1007011e1 100644
--- a/lib/assoc_array.c
+++ b/lib/assoc_array.c
@@ -157,7 +157,7 @@ enum assoc_array_walk_status {
157 assoc_array_walk_tree_empty, 157 assoc_array_walk_tree_empty,
158 assoc_array_walk_found_terminal_node, 158 assoc_array_walk_found_terminal_node,
159 assoc_array_walk_found_wrong_shortcut, 159 assoc_array_walk_found_wrong_shortcut,
160} status; 160};
161 161
162struct assoc_array_walk_result { 162struct assoc_array_walk_result {
163 struct { 163 struct {
diff --git a/lib/average.c b/lib/average.c
index 99a67e662b3c..114d1beae0c7 100644
--- a/lib/average.c
+++ b/lib/average.c
@@ -53,8 +53,10 @@ EXPORT_SYMBOL(ewma_init);
53 */ 53 */
54struct ewma *ewma_add(struct ewma *avg, unsigned long val) 54struct ewma *ewma_add(struct ewma *avg, unsigned long val)
55{ 55{
56 avg->internal = avg->internal ? 56 unsigned long internal = ACCESS_ONCE(avg->internal);
57 (((avg->internal << avg->weight) - avg->internal) + 57
58 ACCESS_ONCE(avg->internal) = internal ?
59 (((internal << avg->weight) - internal) +
58 (val << avg->factor)) >> avg->weight : 60 (val << avg->factor)) >> avg->weight :
59 (val << avg->factor); 61 (val << avg->factor);
60 return avg; 62 return avg;
diff --git a/lib/cmdline.c b/lib/cmdline.c
index eb6791188cf5..d4932f745e92 100644
--- a/lib/cmdline.c
+++ b/lib/cmdline.c
@@ -49,13 +49,13 @@ static int get_range(char **str, int *pint)
49 * 3 - hyphen found to denote a range 49 * 3 - hyphen found to denote a range
50 */ 50 */
51 51
52int get_option (char **str, int *pint) 52int get_option(char **str, int *pint)
53{ 53{
54 char *cur = *str; 54 char *cur = *str;
55 55
56 if (!cur || !(*cur)) 56 if (!cur || !(*cur))
57 return 0; 57 return 0;
58 *pint = simple_strtol (cur, str, 0); 58 *pint = simple_strtol(cur, str, 0);
59 if (cur == *str) 59 if (cur == *str)
60 return 0; 60 return 0;
61 if (**str == ',') { 61 if (**str == ',') {
@@ -67,6 +67,7 @@ int get_option (char **str, int *pint)
67 67
68 return 1; 68 return 1;
69} 69}
70EXPORT_SYMBOL(get_option);
70 71
71/** 72/**
72 * get_options - Parse a string into a list of integers 73 * get_options - Parse a string into a list of integers
@@ -84,13 +85,13 @@ int get_option (char **str, int *pint)
84 * the parse to end (typically a null terminator, if @str is 85 * the parse to end (typically a null terminator, if @str is
85 * completely parseable). 86 * completely parseable).
86 */ 87 */
87 88
88char *get_options(const char *str, int nints, int *ints) 89char *get_options(const char *str, int nints, int *ints)
89{ 90{
90 int res, i = 1; 91 int res, i = 1;
91 92
92 while (i < nints) { 93 while (i < nints) {
93 res = get_option ((char **)&str, ints + i); 94 res = get_option((char **)&str, ints + i);
94 if (res == 0) 95 if (res == 0)
95 break; 96 break;
96 if (res == 3) { 97 if (res == 3) {
@@ -112,6 +113,7 @@ char *get_options(const char *str, int nints, int *ints)
112 ints[0] = i - 1; 113 ints[0] = i - 1;
113 return (char *)str; 114 return (char *)str;
114} 115}
116EXPORT_SYMBOL(get_options);
115 117
116/** 118/**
117 * memparse - parse a string with mem suffixes into a number 119 * memparse - parse a string with mem suffixes into a number
@@ -152,8 +154,4 @@ unsigned long long memparse(const char *ptr, char **retptr)
152 154
153 return ret; 155 return ret;
154} 156}
155
156
157EXPORT_SYMBOL(memparse); 157EXPORT_SYMBOL(memparse);
158EXPORT_SYMBOL(get_option);
159EXPORT_SYMBOL(get_options);
diff --git a/lib/cpumask.c b/lib/cpumask.c
index d327b87c99b7..b810b753c607 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -140,7 +140,7 @@ EXPORT_SYMBOL(zalloc_cpumask_var);
140 */ 140 */
141void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask) 141void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask)
142{ 142{
143 *mask = alloc_bootmem(cpumask_size()); 143 *mask = memblock_virt_alloc(cpumask_size(), 0);
144} 144}
145 145
146/** 146/**
@@ -161,6 +161,6 @@ EXPORT_SYMBOL(free_cpumask_var);
161 */ 161 */
162void __init free_bootmem_cpumask_var(cpumask_var_t mask) 162void __init free_bootmem_cpumask_var(cpumask_var_t mask)
163{ 163{
164 free_bootmem(__pa(mask), cpumask_size()); 164 memblock_free_early(__pa(mask), cpumask_size());
165} 165}
166#endif 166#endif
diff --git a/lib/decompress_unlz4.c b/lib/decompress_unlz4.c
index 3e67cfad16ad..7d1e83caf8ad 100644
--- a/lib/decompress_unlz4.c
+++ b/lib/decompress_unlz4.c
@@ -141,6 +141,7 @@ STATIC inline int INIT unlz4(u8 *input, int in_len,
141 goto exit_2; 141 goto exit_2;
142 } 142 }
143 143
144 ret = -1;
144 if (flush && flush(outp, dest_len) != dest_len) 145 if (flush && flush(outp, dest_len) != dest_len)
145 goto exit_2; 146 goto exit_2;
146 if (output) 147 if (output)
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index d87a17a819d0..98f2d7e91a91 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -53,11 +53,26 @@ enum map_err_types {
53 53
54#define DMA_DEBUG_STACKTRACE_ENTRIES 5 54#define DMA_DEBUG_STACKTRACE_ENTRIES 5
55 55
56/**
57 * struct dma_debug_entry - track a dma_map* or dma_alloc_coherent mapping
58 * @list: node on pre-allocated free_entries list
59 * @dev: 'dev' argument to dma_map_{page|single|sg} or dma_alloc_coherent
60 * @type: single, page, sg, coherent
61 * @pfn: page frame of the start address
62 * @offset: offset of mapping relative to pfn
63 * @size: length of the mapping
64 * @direction: enum dma_data_direction
65 * @sg_call_ents: 'nents' from dma_map_sg
66 * @sg_mapped_ents: 'mapped_ents' from dma_map_sg
67 * @map_err_type: track whether dma_mapping_error() was checked
68 * @stacktrace: support backtraces when a violation is detected
69 */
56struct dma_debug_entry { 70struct dma_debug_entry {
57 struct list_head list; 71 struct list_head list;
58 struct device *dev; 72 struct device *dev;
59 int type; 73 int type;
60 phys_addr_t paddr; 74 unsigned long pfn;
75 size_t offset;
61 u64 dev_addr; 76 u64 dev_addr;
62 u64 size; 77 u64 size;
63 int direction; 78 int direction;
@@ -372,6 +387,11 @@ static void hash_bucket_del(struct dma_debug_entry *entry)
372 list_del(&entry->list); 387 list_del(&entry->list);
373} 388}
374 389
390static unsigned long long phys_addr(struct dma_debug_entry *entry)
391{
392 return page_to_phys(pfn_to_page(entry->pfn)) + entry->offset;
393}
394
375/* 395/*
376 * Dump mapping entries for debugging purposes 396 * Dump mapping entries for debugging purposes
377 */ 397 */
@@ -389,9 +409,9 @@ void debug_dma_dump_mappings(struct device *dev)
389 list_for_each_entry(entry, &bucket->list, list) { 409 list_for_each_entry(entry, &bucket->list, list) {
390 if (!dev || dev == entry->dev) { 410 if (!dev || dev == entry->dev) {
391 dev_info(entry->dev, 411 dev_info(entry->dev,
392 "%s idx %d P=%Lx D=%Lx L=%Lx %s %s\n", 412 "%s idx %d P=%Lx N=%lx D=%Lx L=%Lx %s %s\n",
393 type2name[entry->type], idx, 413 type2name[entry->type], idx,
394 (unsigned long long)entry->paddr, 414 phys_addr(entry), entry->pfn,
395 entry->dev_addr, entry->size, 415 entry->dev_addr, entry->size,
396 dir2name[entry->direction], 416 dir2name[entry->direction],
397 maperr2str[entry->map_err_type]); 417 maperr2str[entry->map_err_type]);
@@ -404,6 +424,176 @@ void debug_dma_dump_mappings(struct device *dev)
404EXPORT_SYMBOL(debug_dma_dump_mappings); 424EXPORT_SYMBOL(debug_dma_dump_mappings);
405 425
406/* 426/*
427 * For each mapping (initial cacheline in the case of
428 * dma_alloc_coherent/dma_map_page, initial cacheline in each page of a
429 * scatterlist, or the cacheline specified in dma_map_single) insert
430 * into this tree using the cacheline as the key. At
431 * dma_unmap_{single|sg|page} or dma_free_coherent delete the entry. If
432 * the entry already exists at insertion time add a tag as a reference
433 * count for the overlapping mappings. For now, the overlap tracking
434 * just ensures that 'unmaps' balance 'maps' before marking the
435 * cacheline idle, but we should also be flagging overlaps as an API
436 * violation.
437 *
438 * Memory usage is mostly constrained by the maximum number of available
439 * dma-debug entries in that we need a free dma_debug_entry before
440 * inserting into the tree. In the case of dma_map_page and
441 * dma_alloc_coherent there is only one dma_debug_entry and one
442 * dma_active_cacheline entry to track per event. dma_map_sg(), on the
443 * other hand, consumes a single dma_debug_entry, but inserts 'nents'
444 * entries into the tree.
445 *
446 * At any time debug_dma_assert_idle() can be called to trigger a
447 * warning if any cachelines in the given page are in the active set.
448 */
449static RADIX_TREE(dma_active_cacheline, GFP_NOWAIT);
450static DEFINE_SPINLOCK(radix_lock);
451#define ACTIVE_CACHELINE_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1)
452#define CACHELINE_PER_PAGE_SHIFT (PAGE_SHIFT - L1_CACHE_SHIFT)
453#define CACHELINES_PER_PAGE (1 << CACHELINE_PER_PAGE_SHIFT)
454
455static phys_addr_t to_cacheline_number(struct dma_debug_entry *entry)
456{
457 return (entry->pfn << CACHELINE_PER_PAGE_SHIFT) +
458 (entry->offset >> L1_CACHE_SHIFT);
459}
460
461static int active_cacheline_read_overlap(phys_addr_t cln)
462{
463 int overlap = 0, i;
464
465 for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
466 if (radix_tree_tag_get(&dma_active_cacheline, cln, i))
467 overlap |= 1 << i;
468 return overlap;
469}
470
471static int active_cacheline_set_overlap(phys_addr_t cln, int overlap)
472{
473 int i;
474
475 if (overlap > ACTIVE_CACHELINE_MAX_OVERLAP || overlap < 0)
476 return overlap;
477
478 for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
479 if (overlap & 1 << i)
480 radix_tree_tag_set(&dma_active_cacheline, cln, i);
481 else
482 radix_tree_tag_clear(&dma_active_cacheline, cln, i);
483
484 return overlap;
485}
486
487static void active_cacheline_inc_overlap(phys_addr_t cln)
488{
489 int overlap = active_cacheline_read_overlap(cln);
490
491 overlap = active_cacheline_set_overlap(cln, ++overlap);
492
493 /* If we overflowed the overlap counter then we're potentially
494 * leaking dma-mappings. Otherwise, if maps and unmaps are
495 * balanced then this overflow may cause false negatives in
496 * debug_dma_assert_idle() as the cacheline may be marked idle
497 * prematurely.
498 */
499 WARN_ONCE(overlap > ACTIVE_CACHELINE_MAX_OVERLAP,
500 "DMA-API: exceeded %d overlapping mappings of cacheline %pa\n",
501 ACTIVE_CACHELINE_MAX_OVERLAP, &cln);
502}
503
504static int active_cacheline_dec_overlap(phys_addr_t cln)
505{
506 int overlap = active_cacheline_read_overlap(cln);
507
508 return active_cacheline_set_overlap(cln, --overlap);
509}
510
511static int active_cacheline_insert(struct dma_debug_entry *entry)
512{
513 phys_addr_t cln = to_cacheline_number(entry);
514 unsigned long flags;
515 int rc;
516
517 /* If the device is not writing memory then we don't have any
518 * concerns about the cpu consuming stale data. This mitigates
519 * legitimate usages of overlapping mappings.
520 */
521 if (entry->direction == DMA_TO_DEVICE)
522 return 0;
523
524 spin_lock_irqsave(&radix_lock, flags);
525 rc = radix_tree_insert(&dma_active_cacheline, cln, entry);
526 if (rc == -EEXIST)
527 active_cacheline_inc_overlap(cln);
528 spin_unlock_irqrestore(&radix_lock, flags);
529
530 return rc;
531}
532
533static void active_cacheline_remove(struct dma_debug_entry *entry)
534{
535 phys_addr_t cln = to_cacheline_number(entry);
536 unsigned long flags;
537
538 /* ...mirror the insert case */
539 if (entry->direction == DMA_TO_DEVICE)
540 return;
541
542 spin_lock_irqsave(&radix_lock, flags);
543 /* since we are counting overlaps the final put of the
544 * cacheline will occur when the overlap count is 0.
545 * active_cacheline_dec_overlap() returns -1 in that case
546 */
547 if (active_cacheline_dec_overlap(cln) < 0)
548 radix_tree_delete(&dma_active_cacheline, cln);
549 spin_unlock_irqrestore(&radix_lock, flags);
550}
551
552/**
553 * debug_dma_assert_idle() - assert that a page is not undergoing dma
554 * @page: page to lookup in the dma_active_cacheline tree
555 *
556 * Place a call to this routine in cases where the cpu touching the page
557 * before the dma completes (page is dma_unmapped) will lead to data
558 * corruption.
559 */
560void debug_dma_assert_idle(struct page *page)
561{
562 static struct dma_debug_entry *ents[CACHELINES_PER_PAGE];
563 struct dma_debug_entry *entry = NULL;
564 void **results = (void **) &ents;
565 unsigned int nents, i;
566 unsigned long flags;
567 phys_addr_t cln;
568
569 if (!page)
570 return;
571
572 cln = (phys_addr_t) page_to_pfn(page) << CACHELINE_PER_PAGE_SHIFT;
573 spin_lock_irqsave(&radix_lock, flags);
574 nents = radix_tree_gang_lookup(&dma_active_cacheline, results, cln,
575 CACHELINES_PER_PAGE);
576 for (i = 0; i < nents; i++) {
577 phys_addr_t ent_cln = to_cacheline_number(ents[i]);
578
579 if (ent_cln == cln) {
580 entry = ents[i];
581 break;
582 } else if (ent_cln >= cln + CACHELINES_PER_PAGE)
583 break;
584 }
585 spin_unlock_irqrestore(&radix_lock, flags);
586
587 if (!entry)
588 return;
589
590 cln = to_cacheline_number(entry);
591 err_printk(entry->dev, entry,
592 "DMA-API: cpu touching an active dma mapped cacheline [cln=%pa]\n",
593 &cln);
594}
595
596/*
407 * Wrapper function for adding an entry to the hash. 597 * Wrapper function for adding an entry to the hash.
408 * This function takes care of locking itself. 598 * This function takes care of locking itself.
409 */ 599 */
@@ -411,10 +601,21 @@ static void add_dma_entry(struct dma_debug_entry *entry)
411{ 601{
412 struct hash_bucket *bucket; 602 struct hash_bucket *bucket;
413 unsigned long flags; 603 unsigned long flags;
604 int rc;
414 605
415 bucket = get_hash_bucket(entry, &flags); 606 bucket = get_hash_bucket(entry, &flags);
416 hash_bucket_add(bucket, entry); 607 hash_bucket_add(bucket, entry);
417 put_hash_bucket(bucket, &flags); 608 put_hash_bucket(bucket, &flags);
609
610 rc = active_cacheline_insert(entry);
611 if (rc == -ENOMEM) {
612 pr_err("DMA-API: cacheline tracking ENOMEM, dma-debug disabled\n");
613 global_disable = true;
614 }
615
616 /* TODO: report -EEXIST errors here as overlapping mappings are
617 * not supported by the DMA API
618 */
418} 619}
419 620
420static struct dma_debug_entry *__dma_entry_alloc(void) 621static struct dma_debug_entry *__dma_entry_alloc(void)
@@ -469,6 +670,8 @@ static void dma_entry_free(struct dma_debug_entry *entry)
469{ 670{
470 unsigned long flags; 671 unsigned long flags;
471 672
673 active_cacheline_remove(entry);
674
472 /* 675 /*
473 * add to beginning of the list - this way the entries are 676 * add to beginning of the list - this way the entries are
474 * more likely cache hot when they are reallocated. 677 * more likely cache hot when they are reallocated.
@@ -895,15 +1098,15 @@ static void check_unmap(struct dma_debug_entry *ref)
895 ref->dev_addr, ref->size, 1098 ref->dev_addr, ref->size,
896 type2name[entry->type], type2name[ref->type]); 1099 type2name[entry->type], type2name[ref->type]);
897 } else if ((entry->type == dma_debug_coherent) && 1100 } else if ((entry->type == dma_debug_coherent) &&
898 (ref->paddr != entry->paddr)) { 1101 (phys_addr(ref) != phys_addr(entry))) {
899 err_printk(ref->dev, entry, "DMA-API: device driver frees " 1102 err_printk(ref->dev, entry, "DMA-API: device driver frees "
900 "DMA memory with different CPU address " 1103 "DMA memory with different CPU address "
901 "[device address=0x%016llx] [size=%llu bytes] " 1104 "[device address=0x%016llx] [size=%llu bytes] "
902 "[cpu alloc address=0x%016llx] " 1105 "[cpu alloc address=0x%016llx] "
903 "[cpu free address=0x%016llx]", 1106 "[cpu free address=0x%016llx]",
904 ref->dev_addr, ref->size, 1107 ref->dev_addr, ref->size,
905 (unsigned long long)entry->paddr, 1108 phys_addr(entry),
906 (unsigned long long)ref->paddr); 1109 phys_addr(ref));
907 } 1110 }
908 1111
909 if (ref->sg_call_ents && ref->type == dma_debug_sg && 1112 if (ref->sg_call_ents && ref->type == dma_debug_sg &&
@@ -1052,7 +1255,8 @@ void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
1052 1255
1053 entry->dev = dev; 1256 entry->dev = dev;
1054 entry->type = dma_debug_page; 1257 entry->type = dma_debug_page;
1055 entry->paddr = page_to_phys(page) + offset; 1258 entry->pfn = page_to_pfn(page);
1259 entry->offset = offset,
1056 entry->dev_addr = dma_addr; 1260 entry->dev_addr = dma_addr;
1057 entry->size = size; 1261 entry->size = size;
1058 entry->direction = direction; 1262 entry->direction = direction;
@@ -1148,7 +1352,8 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
1148 1352
1149 entry->type = dma_debug_sg; 1353 entry->type = dma_debug_sg;
1150 entry->dev = dev; 1354 entry->dev = dev;
1151 entry->paddr = sg_phys(s); 1355 entry->pfn = page_to_pfn(sg_page(s));
1356 entry->offset = s->offset,
1152 entry->size = sg_dma_len(s); 1357 entry->size = sg_dma_len(s);
1153 entry->dev_addr = sg_dma_address(s); 1358 entry->dev_addr = sg_dma_address(s);
1154 entry->direction = direction; 1359 entry->direction = direction;
@@ -1198,7 +1403,8 @@ void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
1198 struct dma_debug_entry ref = { 1403 struct dma_debug_entry ref = {
1199 .type = dma_debug_sg, 1404 .type = dma_debug_sg,
1200 .dev = dev, 1405 .dev = dev,
1201 .paddr = sg_phys(s), 1406 .pfn = page_to_pfn(sg_page(s)),
1407 .offset = s->offset,
1202 .dev_addr = sg_dma_address(s), 1408 .dev_addr = sg_dma_address(s),
1203 .size = sg_dma_len(s), 1409 .size = sg_dma_len(s),
1204 .direction = dir, 1410 .direction = dir,
@@ -1233,7 +1439,8 @@ void debug_dma_alloc_coherent(struct device *dev, size_t size,
1233 1439
1234 entry->type = dma_debug_coherent; 1440 entry->type = dma_debug_coherent;
1235 entry->dev = dev; 1441 entry->dev = dev;
1236 entry->paddr = virt_to_phys(virt); 1442 entry->pfn = page_to_pfn(virt_to_page(virt));
1443 entry->offset = (size_t) virt & PAGE_MASK;
1237 entry->size = size; 1444 entry->size = size;
1238 entry->dev_addr = dma_addr; 1445 entry->dev_addr = dma_addr;
1239 entry->direction = DMA_BIDIRECTIONAL; 1446 entry->direction = DMA_BIDIRECTIONAL;
@@ -1248,7 +1455,8 @@ void debug_dma_free_coherent(struct device *dev, size_t size,
1248 struct dma_debug_entry ref = { 1455 struct dma_debug_entry ref = {
1249 .type = dma_debug_coherent, 1456 .type = dma_debug_coherent,
1250 .dev = dev, 1457 .dev = dev,
1251 .paddr = virt_to_phys(virt), 1458 .pfn = page_to_pfn(virt_to_page(virt)),
1459 .offset = (size_t) virt & PAGE_MASK,
1252 .dev_addr = addr, 1460 .dev_addr = addr,
1253 .size = size, 1461 .size = size,
1254 .direction = DMA_BIDIRECTIONAL, 1462 .direction = DMA_BIDIRECTIONAL,
@@ -1356,7 +1564,8 @@ void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
1356 struct dma_debug_entry ref = { 1564 struct dma_debug_entry ref = {
1357 .type = dma_debug_sg, 1565 .type = dma_debug_sg,
1358 .dev = dev, 1566 .dev = dev,
1359 .paddr = sg_phys(s), 1567 .pfn = page_to_pfn(sg_page(s)),
1568 .offset = s->offset,
1360 .dev_addr = sg_dma_address(s), 1569 .dev_addr = sg_dma_address(s),
1361 .size = sg_dma_len(s), 1570 .size = sg_dma_len(s),
1362 .direction = direction, 1571 .direction = direction,
@@ -1388,7 +1597,8 @@ void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
1388 struct dma_debug_entry ref = { 1597 struct dma_debug_entry ref = {
1389 .type = dma_debug_sg, 1598 .type = dma_debug_sg,
1390 .dev = dev, 1599 .dev = dev,
1391 .paddr = sg_phys(s), 1600 .pfn = page_to_pfn(sg_page(s)),
1601 .offset = s->offset,
1392 .dev_addr = sg_dma_address(s), 1602 .dev_addr = sg_dma_address(s),
1393 .size = sg_dma_len(s), 1603 .size = sg_dma_len(s),
1394 .direction = direction, 1604 .direction = direction,
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index c37aeacd7651..7288e38e1757 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -8,6 +8,7 @@
8 * By Greg Banks <gnb@melbourne.sgi.com> 8 * By Greg Banks <gnb@melbourne.sgi.com>
9 * Copyright (c) 2008 Silicon Graphics Inc. All Rights Reserved. 9 * Copyright (c) 2008 Silicon Graphics Inc. All Rights Reserved.
10 * Copyright (C) 2011 Bart Van Assche. All Rights Reserved. 10 * Copyright (C) 2011 Bart Van Assche. All Rights Reserved.
11 * Copyright (C) 2013 Du, Changbin <changbin.du@gmail.com>
11 */ 12 */
12 13
13#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ 14#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
@@ -24,6 +25,7 @@
24#include <linux/sysctl.h> 25#include <linux/sysctl.h>
25#include <linux/ctype.h> 26#include <linux/ctype.h>
26#include <linux/string.h> 27#include <linux/string.h>
28#include <linux/parser.h>
27#include <linux/string_helpers.h> 29#include <linux/string_helpers.h>
28#include <linux/uaccess.h> 30#include <linux/uaccess.h>
29#include <linux/dynamic_debug.h> 31#include <linux/dynamic_debug.h>
@@ -147,7 +149,8 @@ static int ddebug_change(const struct ddebug_query *query,
147 list_for_each_entry(dt, &ddebug_tables, link) { 149 list_for_each_entry(dt, &ddebug_tables, link) {
148 150
149 /* match against the module name */ 151 /* match against the module name */
150 if (query->module && strcmp(query->module, dt->mod_name)) 152 if (query->module &&
153 !match_wildcard(query->module, dt->mod_name))
151 continue; 154 continue;
152 155
153 for (i = 0; i < dt->num_ddebugs; i++) { 156 for (i = 0; i < dt->num_ddebugs; i++) {
@@ -155,14 +158,16 @@ static int ddebug_change(const struct ddebug_query *query,
155 158
156 /* match against the source filename */ 159 /* match against the source filename */
157 if (query->filename && 160 if (query->filename &&
158 strcmp(query->filename, dp->filename) && 161 !match_wildcard(query->filename, dp->filename) &&
159 strcmp(query->filename, kbasename(dp->filename)) && 162 !match_wildcard(query->filename,
160 strcmp(query->filename, trim_prefix(dp->filename))) 163 kbasename(dp->filename)) &&
164 !match_wildcard(query->filename,
165 trim_prefix(dp->filename)))
161 continue; 166 continue;
162 167
163 /* match against the function */ 168 /* match against the function */
164 if (query->function && 169 if (query->function &&
165 strcmp(query->function, dp->function)) 170 !match_wildcard(query->function, dp->function))
166 continue; 171 continue;
167 172
168 /* match against the format */ 173 /* match against the format */
@@ -263,14 +268,12 @@ static int ddebug_tokenize(char *buf, char *words[], int maxwords)
263 */ 268 */
264static inline int parse_lineno(const char *str, unsigned int *val) 269static inline int parse_lineno(const char *str, unsigned int *val)
265{ 270{
266 char *end = NULL;
267 BUG_ON(str == NULL); 271 BUG_ON(str == NULL);
268 if (*str == '\0') { 272 if (*str == '\0') {
269 *val = 0; 273 *val = 0;
270 return 0; 274 return 0;
271 } 275 }
272 *val = simple_strtoul(str, &end, 10); 276 if (kstrtouint(str, 10, val) < 0) {
273 if (end == NULL || end == str || *end != '\0') {
274 pr_err("bad line-number: %s\n", str); 277 pr_err("bad line-number: %s\n", str);
275 return -EINVAL; 278 return -EINVAL;
276 } 279 }
@@ -343,14 +346,14 @@ static int ddebug_parse_query(char *words[], int nwords,
343 } 346 }
344 if (last) 347 if (last)
345 *last++ = '\0'; 348 *last++ = '\0';
346 if (parse_lineno(first, &query->first_lineno) < 0) { 349 if (parse_lineno(first, &query->first_lineno) < 0)
347 pr_err("line-number is <0\n");
348 return -EINVAL; 350 return -EINVAL;
349 }
350 if (last) { 351 if (last) {
351 /* range <first>-<last> */ 352 /* range <first>-<last> */
352 if (parse_lineno(last, &query->last_lineno) 353 if (parse_lineno(last, &query->last_lineno) < 0)
353 < query->first_lineno) { 354 return -EINVAL;
355
356 if (query->last_lineno < query->first_lineno) {
354 pr_err("last-line:%d < 1st-line:%d\n", 357 pr_err("last-line:%d < 1st-line:%d\n",
355 query->last_lineno, 358 query->last_lineno,
356 query->first_lineno); 359 query->first_lineno);
diff --git a/lib/flex_array.c b/lib/flex_array.c
index 6948a6692fc4..2eed22fa507c 100644
--- a/lib/flex_array.c
+++ b/lib/flex_array.c
@@ -90,8 +90,8 @@ struct flex_array *flex_array_alloc(int element_size, unsigned int total,
90{ 90{
91 struct flex_array *ret; 91 struct flex_array *ret;
92 int elems_per_part = 0; 92 int elems_per_part = 0;
93 int reciprocal_elems = 0;
94 int max_size = 0; 93 int max_size = 0;
94 struct reciprocal_value reciprocal_elems = { 0 };
95 95
96 if (element_size) { 96 if (element_size) {
97 elems_per_part = FLEX_ARRAY_ELEMENTS_PER_PART(element_size); 97 elems_per_part = FLEX_ARRAY_ELEMENTS_PER_PART(element_size);
@@ -119,6 +119,11 @@ EXPORT_SYMBOL(flex_array_alloc);
119static int fa_element_to_part_nr(struct flex_array *fa, 119static int fa_element_to_part_nr(struct flex_array *fa,
120 unsigned int element_nr) 120 unsigned int element_nr)
121{ 121{
122 /*
123 * if element_size == 0 we don't get here, so we never touch
124 * the zeroed fa->reciprocal_elems, which would yield invalid
125 * results
126 */
122 return reciprocal_divide(element_nr, fa->reciprocal_elems); 127 return reciprocal_divide(element_nr, fa->reciprocal_elems);
123} 128}
124 129
diff --git a/lib/genalloc.c b/lib/genalloc.c
index dda31168844f..bdb9a456bcbb 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -316,7 +316,7 @@ EXPORT_SYMBOL(gen_pool_alloc);
316 * gen_pool_dma_alloc - allocate special memory from the pool for DMA usage 316 * gen_pool_dma_alloc - allocate special memory from the pool for DMA usage
317 * @pool: pool to allocate from 317 * @pool: pool to allocate from
318 * @size: number of bytes to allocate from the pool 318 * @size: number of bytes to allocate from the pool
319 * @dma: dma-view physical address 319 * @dma: dma-view physical address return value. Use NULL if unneeded.
320 * 320 *
321 * Allocate the requested number of bytes from the specified pool. 321 * Allocate the requested number of bytes from the specified pool.
322 * Uses the pool allocation function (with first-fit algorithm by default). 322 * Uses the pool allocation function (with first-fit algorithm by default).
@@ -334,7 +334,8 @@ void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size, dma_addr_t *dma)
334 if (!vaddr) 334 if (!vaddr)
335 return NULL; 335 return NULL;
336 336
337 *dma = gen_pool_virt_to_phys(pool, vaddr); 337 if (dma)
338 *dma = gen_pool_virt_to_phys(pool, vaddr);
338 339
339 return (void *)vaddr; 340 return (void *)vaddr;
340} 341}
diff --git a/lib/hash.c b/lib/hash.c
new file mode 100644
index 000000000000..fea973f4bd57
--- /dev/null
+++ b/lib/hash.c
@@ -0,0 +1,39 @@
1/* General purpose hashing library
2 *
3 * That's a start of a kernel hashing library, which can be extended
4 * with further algorithms in future. arch_fast_hash{2,}() will
5 * eventually resolve to an architecture optimized implementation.
6 *
7 * Copyright 2013 Francesco Fusco <ffusco@redhat.com>
8 * Copyright 2013 Daniel Borkmann <dborkman@redhat.com>
9 * Copyright 2013 Thomas Graf <tgraf@redhat.com>
10 * Licensed under the GNU General Public License, version 2.0 (GPLv2)
11 */
12
13#include <linux/jhash.h>
14#include <linux/hash.h>
15#include <linux/cache.h>
16
17static struct fast_hash_ops arch_hash_ops __read_mostly = {
18 .hash = jhash,
19 .hash2 = jhash2,
20};
21
22u32 arch_fast_hash(const void *data, u32 len, u32 seed)
23{
24 return arch_hash_ops.hash(data, len, seed);
25}
26EXPORT_SYMBOL_GPL(arch_fast_hash);
27
28u32 arch_fast_hash2(const u32 *data, u32 len, u32 seed)
29{
30 return arch_hash_ops.hash2(data, len, seed);
31}
32EXPORT_SYMBOL_GPL(arch_fast_hash2);
33
34static int __init hashlib_init(void)
35{
36 setup_arch_fast_hash(&arch_hash_ops);
37 return 0;
38}
39early_initcall(hashlib_init);
diff --git a/lib/kobject.c b/lib/kobject.c
index 5b4b8886435e..cb14aeac4cca 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -13,11 +13,11 @@
13 */ 13 */
14 14
15#include <linux/kobject.h> 15#include <linux/kobject.h>
16#include <linux/kobj_completion.h>
17#include <linux/string.h> 16#include <linux/string.h>
18#include <linux/export.h> 17#include <linux/export.h>
19#include <linux/stat.h> 18#include <linux/stat.h>
20#include <linux/slab.h> 19#include <linux/slab.h>
20#include <linux/random.h>
21 21
22/** 22/**
23 * kobject_namespace - return @kobj's namespace tag 23 * kobject_namespace - return @kobj's namespace tag
@@ -65,13 +65,17 @@ static int populate_dir(struct kobject *kobj)
65 65
66static int create_dir(struct kobject *kobj) 66static int create_dir(struct kobject *kobj)
67{ 67{
68 const struct kobj_ns_type_operations *ops;
68 int error; 69 int error;
69 70
70 error = sysfs_create_dir_ns(kobj, kobject_namespace(kobj)); 71 error = sysfs_create_dir_ns(kobj, kobject_namespace(kobj));
71 if (!error) { 72 if (error)
72 error = populate_dir(kobj); 73 return error;
73 if (error) 74
74 sysfs_remove_dir(kobj); 75 error = populate_dir(kobj);
76 if (error) {
77 sysfs_remove_dir(kobj);
78 return error;
75 } 79 }
76 80
77 /* 81 /*
@@ -80,7 +84,20 @@ static int create_dir(struct kobject *kobj)
80 */ 84 */
81 sysfs_get(kobj->sd); 85 sysfs_get(kobj->sd);
82 86
83 return error; 87 /*
88 * If @kobj has ns_ops, its children need to be filtered based on
89 * their namespace tags. Enable namespace support on @kobj->sd.
90 */
91 ops = kobj_child_ns_ops(kobj);
92 if (ops) {
93 BUG_ON(ops->type <= KOBJ_NS_TYPE_NONE);
94 BUG_ON(ops->type >= KOBJ_NS_TYPES);
95 BUG_ON(!kobj_ns_type_registered(ops->type));
96
97 kernfs_enable_ns(kobj->sd);
98 }
99
100 return 0;
84} 101}
85 102
86static int get_kobj_path_length(struct kobject *kobj) 103static int get_kobj_path_length(struct kobject *kobj)
@@ -247,8 +264,10 @@ int kobject_set_name_vargs(struct kobject *kobj, const char *fmt,
247 return 0; 264 return 0;
248 265
249 kobj->name = kvasprintf(GFP_KERNEL, fmt, vargs); 266 kobj->name = kvasprintf(GFP_KERNEL, fmt, vargs);
250 if (!kobj->name) 267 if (!kobj->name) {
268 kobj->name = old_name;
251 return -ENOMEM; 269 return -ENOMEM;
270 }
252 271
253 /* ewww... some of these buggers have '/' in the name ... */ 272 /* ewww... some of these buggers have '/' in the name ... */
254 while ((s = strchr(kobj->name, '/'))) 273 while ((s = strchr(kobj->name, '/')))
@@ -346,7 +365,7 @@ static int kobject_add_varg(struct kobject *kobj, struct kobject *parent,
346 * 365 *
347 * If @parent is set, then the parent of the @kobj will be set to it. 366 * If @parent is set, then the parent of the @kobj will be set to it.
348 * If @parent is NULL, then the parent of the @kobj will be set to the 367 * If @parent is NULL, then the parent of the @kobj will be set to the
349 * kobject associted with the kset assigned to this kobject. If no kset 368 * kobject associated with the kset assigned to this kobject. If no kset
350 * is assigned to the kobject, then the kobject will be located in the 369 * is assigned to the kobject, then the kobject will be located in the
351 * root of the sysfs tree. 370 * root of the sysfs tree.
352 * 371 *
@@ -536,7 +555,7 @@ out:
536 */ 555 */
537void kobject_del(struct kobject *kobj) 556void kobject_del(struct kobject *kobj)
538{ 557{
539 struct sysfs_dirent *sd; 558 struct kernfs_node *sd;
540 559
541 if (!kobj) 560 if (!kobj)
542 return; 561 return;
@@ -625,10 +644,12 @@ static void kobject_release(struct kref *kref)
625{ 644{
626 struct kobject *kobj = container_of(kref, struct kobject, kref); 645 struct kobject *kobj = container_of(kref, struct kobject, kref);
627#ifdef CONFIG_DEBUG_KOBJECT_RELEASE 646#ifdef CONFIG_DEBUG_KOBJECT_RELEASE
628 pr_info("kobject: '%s' (%p): %s, parent %p (delayed)\n", 647 unsigned long delay = HZ + HZ * (get_random_int() & 0x3);
629 kobject_name(kobj), kobj, __func__, kobj->parent); 648 pr_info("kobject: '%s' (%p): %s, parent %p (delayed %ld)\n",
649 kobject_name(kobj), kobj, __func__, kobj->parent, delay);
630 INIT_DELAYED_WORK(&kobj->release, kobject_delayed_cleanup); 650 INIT_DELAYED_WORK(&kobj->release, kobject_delayed_cleanup);
631 schedule_delayed_work(&kobj->release, HZ); 651
652 schedule_delayed_work(&kobj->release, delay);
632#else 653#else
633 kobject_cleanup(kobj); 654 kobject_cleanup(kobj);
634#endif 655#endif
@@ -758,55 +779,7 @@ const struct sysfs_ops kobj_sysfs_ops = {
758 .show = kobj_attr_show, 779 .show = kobj_attr_show,
759 .store = kobj_attr_store, 780 .store = kobj_attr_store,
760}; 781};
761 782EXPORT_SYMBOL_GPL(kobj_sysfs_ops);
762/**
763 * kobj_completion_init - initialize a kobj_completion object.
764 * @kc: kobj_completion
765 * @ktype: type of kobject to initialize
766 *
767 * kobj_completion structures can be embedded within structures with different
768 * lifetime rules. During the release of the enclosing object, we can
769 * wait on the release of the kobject so that we don't free it while it's
770 * still busy.
771 */
772void kobj_completion_init(struct kobj_completion *kc, struct kobj_type *ktype)
773{
774 init_completion(&kc->kc_unregister);
775 kobject_init(&kc->kc_kobj, ktype);
776}
777EXPORT_SYMBOL_GPL(kobj_completion_init);
778
779/**
780 * kobj_completion_release - release a kobj_completion object
781 * @kobj: kobject embedded in kobj_completion
782 *
783 * Used with kobject_release to notify waiters that the kobject has been
784 * released.
785 */
786void kobj_completion_release(struct kobject *kobj)
787{
788 struct kobj_completion *kc = kobj_to_kobj_completion(kobj);
789 complete(&kc->kc_unregister);
790}
791EXPORT_SYMBOL_GPL(kobj_completion_release);
792
793/**
794 * kobj_completion_del_and_wait - release the kobject and wait for it
795 * @kc: kobj_completion object to release
796 *
797 * Delete the kobject from sysfs and drop the reference count. Then wait
798 * until any other outstanding references are also dropped. This routine
799 * is only necessary once other references may have been taken on the
800 * kobject. Typically this happens when the kobject has been published
801 * to sysfs via kobject_add.
802 */
803void kobj_completion_del_and_wait(struct kobj_completion *kc)
804{
805 kobject_del(&kc->kc_kobj);
806 kobject_put(&kc->kc_kobj);
807 wait_for_completion(&kc->kc_unregister);
808}
809EXPORT_SYMBOL_GPL(kobj_completion_del_and_wait);
810 783
811/** 784/**
812 * kset_register - initialize and add a kset. 785 * kset_register - initialize and add a kset.
@@ -835,6 +808,7 @@ void kset_unregister(struct kset *k)
835{ 808{
836 if (!k) 809 if (!k)
837 return; 810 return;
811 kobject_del(&k->kobj);
838 kobject_put(&k->kobj); 812 kobject_put(&k->kobj);
839} 813}
840 814
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 52e5abbc41db..5f72767ddd9b 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -88,11 +88,17 @@ out:
88#ifdef CONFIG_NET 88#ifdef CONFIG_NET
89static int kobj_bcast_filter(struct sock *dsk, struct sk_buff *skb, void *data) 89static int kobj_bcast_filter(struct sock *dsk, struct sk_buff *skb, void *data)
90{ 90{
91 struct kobject *kobj = data; 91 struct kobject *kobj = data, *ksobj;
92 const struct kobj_ns_type_operations *ops; 92 const struct kobj_ns_type_operations *ops;
93 93
94 ops = kobj_ns_ops(kobj); 94 ops = kobj_ns_ops(kobj);
95 if (ops) { 95 if (!ops && kobj->kset) {
96 ksobj = &kobj->kset->kobj;
97 if (ksobj->parent != NULL)
98 ops = kobj_ns_ops(ksobj->parent);
99 }
100
101 if (ops && ops->netlink_ns && kobj->ktype->namespace) {
96 const void *sock_ns, *ns; 102 const void *sock_ns, *ns;
97 ns = kobj->ktype->namespace(kobj); 103 ns = kobj->ktype->namespace(kobj);
98 sock_ns = ops->netlink_ns(dsk); 104 sock_ns = ops->netlink_ns(dsk);
diff --git a/lib/kstrtox.c b/lib/kstrtox.c
index f78ae0c0c4e2..ec8da78df9be 100644
--- a/lib/kstrtox.c
+++ b/lib/kstrtox.c
@@ -92,7 +92,6 @@ static int _kstrtoull(const char *s, unsigned int base, unsigned long long *res)
92 rv = _parse_integer(s, base, &_res); 92 rv = _parse_integer(s, base, &_res);
93 if (rv & KSTRTOX_OVERFLOW) 93 if (rv & KSTRTOX_OVERFLOW)
94 return -ERANGE; 94 return -ERANGE;
95 rv &= ~KSTRTOX_OVERFLOW;
96 if (rv == 0) 95 if (rv == 0)
97 return -EINVAL; 96 return -EINVAL;
98 s += rv; 97 s += rv;
diff --git a/lib/parser.c b/lib/parser.c
index 807b2aaa33fa..b6d11631231b 100644
--- a/lib/parser.c
+++ b/lib/parser.c
@@ -113,6 +113,7 @@ int match_token(char *s, const match_table_t table, substring_t args[])
113 113
114 return p->token; 114 return p->token;
115} 115}
116EXPORT_SYMBOL(match_token);
116 117
117/** 118/**
118 * match_number: scan a number in the given base from a substring_t 119 * match_number: scan a number in the given base from a substring_t
@@ -163,6 +164,7 @@ int match_int(substring_t *s, int *result)
163{ 164{
164 return match_number(s, result, 0); 165 return match_number(s, result, 0);
165} 166}
167EXPORT_SYMBOL(match_int);
166 168
167/** 169/**
168 * match_octal: - scan an octal representation of an integer from a substring_t 170 * match_octal: - scan an octal representation of an integer from a substring_t
@@ -177,6 +179,7 @@ int match_octal(substring_t *s, int *result)
177{ 179{
178 return match_number(s, result, 8); 180 return match_number(s, result, 8);
179} 181}
182EXPORT_SYMBOL(match_octal);
180 183
181/** 184/**
182 * match_hex: - scan a hex representation of an integer from a substring_t 185 * match_hex: - scan a hex representation of an integer from a substring_t
@@ -191,6 +194,58 @@ int match_hex(substring_t *s, int *result)
191{ 194{
192 return match_number(s, result, 16); 195 return match_number(s, result, 16);
193} 196}
197EXPORT_SYMBOL(match_hex);
198
199/**
200 * match_wildcard: - parse if a string matches given wildcard pattern
201 * @pattern: wildcard pattern
202 * @str: the string to be parsed
203 *
204 * Description: Parse the string @str to check if matches wildcard
205 * pattern @pattern. The pattern may contain two type wildcardes:
206 * '*' - matches zero or more characters
207 * '?' - matches one character
208 * If it's matched, return true, else return false.
209 */
210bool match_wildcard(const char *pattern, const char *str)
211{
212 const char *s = str;
213 const char *p = pattern;
214 bool star = false;
215
216 while (*s) {
217 switch (*p) {
218 case '?':
219 s++;
220 p++;
221 break;
222 case '*':
223 star = true;
224 str = s;
225 if (!*++p)
226 return true;
227 pattern = p;
228 break;
229 default:
230 if (*s == *p) {
231 s++;
232 p++;
233 } else {
234 if (!star)
235 return false;
236 str++;
237 s = str;
238 p = pattern;
239 }
240 break;
241 }
242 }
243
244 if (*p == '*')
245 ++p;
246 return !*p;
247}
248EXPORT_SYMBOL(match_wildcard);
194 249
195/** 250/**
196 * match_strlcpy: - Copy the characters from a substring_t to a sized buffer 251 * match_strlcpy: - Copy the characters from a substring_t to a sized buffer
@@ -213,6 +268,7 @@ size_t match_strlcpy(char *dest, const substring_t *src, size_t size)
213 } 268 }
214 return ret; 269 return ret;
215} 270}
271EXPORT_SYMBOL(match_strlcpy);
216 272
217/** 273/**
218 * match_strdup: - allocate a new string with the contents of a substring_t 274 * match_strdup: - allocate a new string with the contents of a substring_t
@@ -230,10 +286,4 @@ char *match_strdup(const substring_t *s)
230 match_strlcpy(p, s, sz); 286 match_strlcpy(p, s, sz);
231 return p; 287 return p;
232} 288}
233
234EXPORT_SYMBOL(match_token);
235EXPORT_SYMBOL(match_int);
236EXPORT_SYMBOL(match_octal);
237EXPORT_SYMBOL(match_hex);
238EXPORT_SYMBOL(match_strlcpy);
239EXPORT_SYMBOL(match_strdup); 289EXPORT_SYMBOL(match_strdup);
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
index 1a53d497a8c5..963b7034a51b 100644
--- a/lib/percpu-refcount.c
+++ b/lib/percpu-refcount.c
@@ -120,6 +120,9 @@ static void percpu_ref_kill_rcu(struct rcu_head *rcu)
120 120
121 atomic_add((int) count - PCPU_COUNT_BIAS, &ref->count); 121 atomic_add((int) count - PCPU_COUNT_BIAS, &ref->count);
122 122
123 WARN_ONCE(atomic_read(&ref->count) <= 0, "percpu ref <= 0 (%i)",
124 atomic_read(&ref->count));
125
123 /* @ref is viewed as dead on all CPUs, send out kill confirmation */ 126 /* @ref is viewed as dead on all CPUs, send out kill confirmation */
124 if (ref->confirm_kill) 127 if (ref->confirm_kill)
125 ref->confirm_kill(ref); 128 ref->confirm_kill(ref);
diff --git a/lib/percpu_ida.c b/lib/percpu_ida.c
index 9d054bf91d0f..93d145e5539c 100644
--- a/lib/percpu_ida.c
+++ b/lib/percpu_ida.c
@@ -54,9 +54,7 @@ static inline void move_tags(unsigned *dst, unsigned *dst_nr,
54/* 54/*
55 * Try to steal tags from a remote cpu's percpu freelist. 55 * Try to steal tags from a remote cpu's percpu freelist.
56 * 56 *
57 * We first check how many percpu freelists have tags - we don't steal tags 57 * We first check how many percpu freelists have tags
58 * unless enough percpu freelists have tags on them that it's possible more than
59 * half the total tags could be stuck on remote percpu freelists.
60 * 58 *
61 * Then we iterate through the cpus until we find some tags - we don't attempt 59 * Then we iterate through the cpus until we find some tags - we don't attempt
62 * to find the "best" cpu to steal from, to keep cacheline bouncing to a 60 * to find the "best" cpu to steal from, to keep cacheline bouncing to a
@@ -69,8 +67,7 @@ static inline void steal_tags(struct percpu_ida *pool,
69 struct percpu_ida_cpu *remote; 67 struct percpu_ida_cpu *remote;
70 68
71 for (cpus_have_tags = cpumask_weight(&pool->cpus_have_tags); 69 for (cpus_have_tags = cpumask_weight(&pool->cpus_have_tags);
72 cpus_have_tags * pool->percpu_max_size > pool->nr_tags / 2; 70 cpus_have_tags; cpus_have_tags--) {
73 cpus_have_tags--) {
74 cpu = cpumask_next(cpu, &pool->cpus_have_tags); 71 cpu = cpumask_next(cpu, &pool->cpus_have_tags);
75 72
76 if (cpu >= nr_cpu_ids) { 73 if (cpu >= nr_cpu_ids) {
@@ -132,22 +129,22 @@ static inline unsigned alloc_local_tag(struct percpu_ida_cpu *tags)
132/** 129/**
133 * percpu_ida_alloc - allocate a tag 130 * percpu_ida_alloc - allocate a tag
134 * @pool: pool to allocate from 131 * @pool: pool to allocate from
135 * @gfp: gfp flags 132 * @state: task state for prepare_to_wait
136 * 133 *
137 * Returns a tag - an integer in the range [0..nr_tags) (passed to 134 * Returns a tag - an integer in the range [0..nr_tags) (passed to
138 * tag_pool_init()), or otherwise -ENOSPC on allocation failure. 135 * tag_pool_init()), or otherwise -ENOSPC on allocation failure.
139 * 136 *
140 * Safe to be called from interrupt context (assuming it isn't passed 137 * Safe to be called from interrupt context (assuming it isn't passed
141 * __GFP_WAIT, of course). 138 * TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, of course).
142 * 139 *
143 * @gfp indicates whether or not to wait until a free id is available (it's not 140 * @gfp indicates whether or not to wait until a free id is available (it's not
144 * used for internal memory allocations); thus if passed __GFP_WAIT we may sleep 141 * used for internal memory allocations); thus if passed __GFP_WAIT we may sleep
145 * however long it takes until another thread frees an id (same semantics as a 142 * however long it takes until another thread frees an id (same semantics as a
146 * mempool). 143 * mempool).
147 * 144 *
148 * Will not fail if passed __GFP_WAIT. 145 * Will not fail if passed TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE.
149 */ 146 */
150int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp) 147int percpu_ida_alloc(struct percpu_ida *pool, int state)
151{ 148{
152 DEFINE_WAIT(wait); 149 DEFINE_WAIT(wait);
153 struct percpu_ida_cpu *tags; 150 struct percpu_ida_cpu *tags;
@@ -174,7 +171,8 @@ int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp)
174 * 171 *
175 * global lock held and irqs disabled, don't need percpu lock 172 * global lock held and irqs disabled, don't need percpu lock
176 */ 173 */
177 prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE); 174 if (state != TASK_RUNNING)
175 prepare_to_wait(&pool->wait, &wait, state);
178 176
179 if (!tags->nr_free) 177 if (!tags->nr_free)
180 alloc_global_tags(pool, tags); 178 alloc_global_tags(pool, tags);
@@ -191,16 +189,22 @@ int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp)
191 spin_unlock(&pool->lock); 189 spin_unlock(&pool->lock);
192 local_irq_restore(flags); 190 local_irq_restore(flags);
193 191
194 if (tag >= 0 || !(gfp & __GFP_WAIT)) 192 if (tag >= 0 || state == TASK_RUNNING)
195 break; 193 break;
196 194
195 if (signal_pending_state(state, current)) {
196 tag = -ERESTARTSYS;
197 break;
198 }
199
197 schedule(); 200 schedule();
198 201
199 local_irq_save(flags); 202 local_irq_save(flags);
200 tags = this_cpu_ptr(pool->tag_cpu); 203 tags = this_cpu_ptr(pool->tag_cpu);
201 } 204 }
205 if (state != TASK_RUNNING)
206 finish_wait(&pool->wait, &wait);
202 207
203 finish_wait(&pool->wait, &wait);
204 return tag; 208 return tag;
205} 209}
206EXPORT_SYMBOL_GPL(percpu_ida_alloc); 210EXPORT_SYMBOL_GPL(percpu_ida_alloc);
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 7811ed3b4e70..bd4a8dfdf0b8 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -1253,8 +1253,10 @@ unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item)
1253 1253
1254 node = indirect_to_ptr(node); 1254 node = indirect_to_ptr(node);
1255 max_index = radix_tree_maxindex(node->height); 1255 max_index = radix_tree_maxindex(node->height);
1256 if (cur_index > max_index) 1256 if (cur_index > max_index) {
1257 rcu_read_unlock();
1257 break; 1258 break;
1259 }
1258 1260
1259 cur_index = __locate(node, item, cur_index, &found_index); 1261 cur_index = __locate(node, item, cur_index, &found_index);
1260 rcu_read_unlock(); 1262 rcu_read_unlock();
diff --git a/lib/random32.c b/lib/random32.c
index 1e5b2df44291..614896778700 100644
--- a/lib/random32.c
+++ b/lib/random32.c
@@ -244,8 +244,19 @@ static void __prandom_reseed(bool late)
244 static bool latch = false; 244 static bool latch = false;
245 static DEFINE_SPINLOCK(lock); 245 static DEFINE_SPINLOCK(lock);
246 246
247 /* Asking for random bytes might result in bytes getting
248 * moved into the nonblocking pool and thus marking it
249 * as initialized. In this case we would double back into
250 * this function and attempt to do a late reseed.
251 * Ignore the pointless attempt to reseed again if we're
252 * already waiting for bytes when the nonblocking pool
253 * got initialized.
254 */
255
247 /* only allow initial seeding (late == false) once */ 256 /* only allow initial seeding (late == false) once */
248 spin_lock_irqsave(&lock, flags); 257 if (!spin_trylock_irqsave(&lock, flags))
258 return;
259
249 if (latch && !late) 260 if (latch && !late)
250 goto out; 261 goto out;
251 latch = true; 262 latch = true;
diff --git a/lib/rbtree_test.c b/lib/rbtree_test.c
index 31dd4ccd3baa..8b3c9dc88262 100644
--- a/lib/rbtree_test.c
+++ b/lib/rbtree_test.c
@@ -8,8 +8,8 @@
8#define CHECK_LOOPS 100 8#define CHECK_LOOPS 100
9 9
10struct test_node { 10struct test_node {
11 struct rb_node rb;
12 u32 key; 11 u32 key;
12 struct rb_node rb;
13 13
14 /* following fields used for testing augmented rbtree functionality */ 14 /* following fields used for testing augmented rbtree functionality */
15 u32 val; 15 u32 val;
@@ -114,6 +114,16 @@ static int black_path_count(struct rb_node *rb)
114 return count; 114 return count;
115} 115}
116 116
117static void check_postorder_foreach(int nr_nodes)
118{
119 struct test_node *cur, *n;
120 int count = 0;
121 rbtree_postorder_for_each_entry_safe(cur, n, &root, rb)
122 count++;
123
124 WARN_ON_ONCE(count != nr_nodes);
125}
126
117static void check_postorder(int nr_nodes) 127static void check_postorder(int nr_nodes)
118{ 128{
119 struct rb_node *rb; 129 struct rb_node *rb;
@@ -148,6 +158,7 @@ static void check(int nr_nodes)
148 WARN_ON_ONCE(count < (1 << black_path_count(rb_last(&root))) - 1); 158 WARN_ON_ONCE(count < (1 << black_path_count(rb_last(&root))) - 1);
149 159
150 check_postorder(nr_nodes); 160 check_postorder(nr_nodes);
161 check_postorder_foreach(nr_nodes);
151} 162}
152 163
153static void check_augmented(int nr_nodes) 164static void check_augmented(int nr_nodes)
diff --git a/lib/reciprocal_div.c b/lib/reciprocal_div.c
index 75510e94f7d0..464152410c51 100644
--- a/lib/reciprocal_div.c
+++ b/lib/reciprocal_div.c
@@ -1,11 +1,27 @@
1#include <linux/kernel.h>
1#include <asm/div64.h> 2#include <asm/div64.h>
2#include <linux/reciprocal_div.h> 3#include <linux/reciprocal_div.h>
3#include <linux/export.h> 4#include <linux/export.h>
4 5
5u32 reciprocal_value(u32 k) 6/*
7 * For a description of the algorithm please have a look at
8 * include/linux/reciprocal_div.h
9 */
10
11struct reciprocal_value reciprocal_value(u32 d)
6{ 12{
7 u64 val = (1LL << 32) + (k - 1); 13 struct reciprocal_value R;
8 do_div(val, k); 14 u64 m;
9 return (u32)val; 15 int l;
16
17 l = fls(d - 1);
18 m = ((1ULL << 32) * ((1ULL << l) - d));
19 do_div(m, d);
20 ++m;
21 R.m = (u32)m;
22 R.sh1 = min(l, 1);
23 R.sh2 = max(l - 1, 0);
24
25 return R;
10} 26}
11EXPORT_SYMBOL(reciprocal_value); 27EXPORT_SYMBOL(reciprocal_value);
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index d16fa295ae1d..3a8e8e8fb2a5 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -495,7 +495,7 @@ static bool sg_miter_get_next_page(struct sg_mapping_iter *miter)
495 * true if @miter contains the valid mapping. false if end of sg 495 * true if @miter contains the valid mapping. false if end of sg
496 * list is reached. 496 * list is reached.
497 */ 497 */
498static bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset) 498bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset)
499{ 499{
500 sg_miter_stop(miter); 500 sg_miter_stop(miter);
501 501
@@ -513,6 +513,7 @@ static bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset)
513 513
514 return true; 514 return true;
515} 515}
516EXPORT_SYMBOL(sg_miter_skip);
516 517
517/** 518/**
518 * sg_miter_next - proceed mapping iterator to the next mapping 519 * sg_miter_next - proceed mapping iterator to the next mapping
diff --git a/lib/show_mem.c b/lib/show_mem.c
index 5847a4921b8e..09225796991a 100644
--- a/lib/show_mem.c
+++ b/lib/show_mem.c
@@ -17,9 +17,6 @@ void show_mem(unsigned int filter)
17 printk("Mem-Info:\n"); 17 printk("Mem-Info:\n");
18 show_free_areas(filter); 18 show_free_areas(filter);
19 19
20 if (filter & SHOW_MEM_FILTER_PAGE_COUNT)
21 return;
22
23 for_each_online_pgdat(pgdat) { 20 for_each_online_pgdat(pgdat) {
24 unsigned long flags; 21 unsigned long flags;
25 int zoneid; 22 int zoneid;
@@ -46,4 +43,7 @@ void show_mem(unsigned int filter)
46 printk("%lu pages in pagetable cache\n", 43 printk("%lu pages in pagetable cache\n",
47 quicklist_total_size()); 44 quicklist_total_size());
48#endif 45#endif
46#ifdef CONFIG_MEMORY_FAILURE
47 printk("%lu pages hwpoisoned\n", atomic_long_read(&num_poisoned_pages));
48#endif
49} 49}
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index e4399fa65ad6..b604b831f4d1 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -172,8 +172,9 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
172 /* 172 /*
173 * Get the overflow emergency buffer 173 * Get the overflow emergency buffer
174 */ 174 */
175 v_overflow_buffer = alloc_bootmem_low_pages_nopanic( 175 v_overflow_buffer = memblock_virt_alloc_low_nopanic(
176 PAGE_ALIGN(io_tlb_overflow)); 176 PAGE_ALIGN(io_tlb_overflow),
177 PAGE_SIZE);
177 if (!v_overflow_buffer) 178 if (!v_overflow_buffer)
178 return -ENOMEM; 179 return -ENOMEM;
179 180
@@ -184,11 +185,15 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
184 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE 185 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
185 * between io_tlb_start and io_tlb_end. 186 * between io_tlb_start and io_tlb_end.
186 */ 187 */
187 io_tlb_list = alloc_bootmem_pages(PAGE_ALIGN(io_tlb_nslabs * sizeof(int))); 188 io_tlb_list = memblock_virt_alloc(
189 PAGE_ALIGN(io_tlb_nslabs * sizeof(int)),
190 PAGE_SIZE);
188 for (i = 0; i < io_tlb_nslabs; i++) 191 for (i = 0; i < io_tlb_nslabs; i++)
189 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); 192 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
190 io_tlb_index = 0; 193 io_tlb_index = 0;
191 io_tlb_orig_addr = alloc_bootmem_pages(PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t))); 194 io_tlb_orig_addr = memblock_virt_alloc(
195 PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)),
196 PAGE_SIZE);
192 197
193 if (verbose) 198 if (verbose)
194 swiotlb_print_info(); 199 swiotlb_print_info();
@@ -215,13 +220,13 @@ swiotlb_init(int verbose)
215 bytes = io_tlb_nslabs << IO_TLB_SHIFT; 220 bytes = io_tlb_nslabs << IO_TLB_SHIFT;
216 221
217 /* Get IO TLB memory from the low pages */ 222 /* Get IO TLB memory from the low pages */
218 vstart = alloc_bootmem_low_pages_nopanic(PAGE_ALIGN(bytes)); 223 vstart = memblock_virt_alloc_low_nopanic(PAGE_ALIGN(bytes), PAGE_SIZE);
219 if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose)) 224 if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose))
220 return; 225 return;
221 226
222 if (io_tlb_start) 227 if (io_tlb_start)
223 free_bootmem(io_tlb_start, 228 memblock_free_early(io_tlb_start,
224 PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); 229 PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
225 pr_warn("Cannot allocate SWIOTLB buffer"); 230 pr_warn("Cannot allocate SWIOTLB buffer");
226 no_iotlb_memory = true; 231 no_iotlb_memory = true;
227} 232}
@@ -357,14 +362,14 @@ void __init swiotlb_free(void)
357 free_pages((unsigned long)phys_to_virt(io_tlb_start), 362 free_pages((unsigned long)phys_to_virt(io_tlb_start),
358 get_order(io_tlb_nslabs << IO_TLB_SHIFT)); 363 get_order(io_tlb_nslabs << IO_TLB_SHIFT));
359 } else { 364 } else {
360 free_bootmem_late(io_tlb_overflow_buffer, 365 memblock_free_late(io_tlb_overflow_buffer,
361 PAGE_ALIGN(io_tlb_overflow)); 366 PAGE_ALIGN(io_tlb_overflow));
362 free_bootmem_late(__pa(io_tlb_orig_addr), 367 memblock_free_late(__pa(io_tlb_orig_addr),
363 PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t))); 368 PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)));
364 free_bootmem_late(__pa(io_tlb_list), 369 memblock_free_late(__pa(io_tlb_list),
365 PAGE_ALIGN(io_tlb_nslabs * sizeof(int))); 370 PAGE_ALIGN(io_tlb_nslabs * sizeof(int)));
366 free_bootmem_late(io_tlb_start, 371 memblock_free_late(io_tlb_start,
367 PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); 372 PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
368 } 373 }
369 io_tlb_nslabs = 0; 374 io_tlb_nslabs = 0;
370} 375}
@@ -505,7 +510,8 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
505 510
506not_found: 511not_found:
507 spin_unlock_irqrestore(&io_tlb_lock, flags); 512 spin_unlock_irqrestore(&io_tlb_lock, flags);
508 dev_warn(hwdev, "swiotlb buffer is full\n"); 513 if (printk_ratelimit())
514 dev_warn(hwdev, "swiotlb buffer is full (sz: %zd bytes)\n", size);
509 return SWIOTLB_MAP_ERROR; 515 return SWIOTLB_MAP_ERROR;
510found: 516found:
511 spin_unlock_irqrestore(&io_tlb_lock, flags); 517 spin_unlock_irqrestore(&io_tlb_lock, flags);
diff --git a/lib/test_module.c b/lib/test_module.c
new file mode 100644
index 000000000000..319b66f1ff61
--- /dev/null
+++ b/lib/test_module.c
@@ -0,0 +1,33 @@
1/*
2 * This module emits "Hello, world" on printk when loaded.
3 *
4 * It is designed to be used for basic evaluation of the module loading
5 * subsystem (for example when validating module signing/verification). It
6 * lacks any extra dependencies, and will not normally be loaded by the
7 * system unless explicitly requested by name.
8 */
9
10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
12#include <linux/init.h>
13#include <linux/module.h>
14#include <linux/printk.h>
15
16static int __init test_module_init(void)
17{
18 pr_warn("Hello, world\n");
19
20 return 0;
21}
22
23module_init(test_module_init);
24
25static void __exit test_module_exit(void)
26{
27 pr_warn("Goodbye\n");
28}
29
30module_exit(test_module_exit);
31
32MODULE_AUTHOR("Kees Cook <keescook@chromium.org>");
33MODULE_LICENSE("GPL");
diff --git a/lib/test_user_copy.c b/lib/test_user_copy.c
new file mode 100644
index 000000000000..0ecef3e4690e
--- /dev/null
+++ b/lib/test_user_copy.c
@@ -0,0 +1,110 @@
1/*
2 * Kernel module for testing copy_to/from_user infrastructure.
3 *
4 * Copyright 2013 Google Inc. All Rights Reserved
5 *
6 * Authors:
7 * Kees Cook <keescook@chromium.org>
8 *
9 * This software is licensed under the terms of the GNU General Public
10 * License version 2, as published by the Free Software Foundation, and
11 * may be copied, distributed, and modified under those terms.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 */
18
19#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
21#include <linux/mman.h>
22#include <linux/module.h>
23#include <linux/sched.h>
24#include <linux/slab.h>
25#include <linux/uaccess.h>
26#include <linux/vmalloc.h>
27
28#define test(condition, msg) \
29({ \
30 int cond = (condition); \
31 if (cond) \
32 pr_warn("%s\n", msg); \
33 cond; \
34})
35
36static int __init test_user_copy_init(void)
37{
38 int ret = 0;
39 char *kmem;
40 char __user *usermem;
41 char *bad_usermem;
42 unsigned long user_addr;
43 unsigned long value = 0x5A;
44
45 kmem = kmalloc(PAGE_SIZE * 2, GFP_KERNEL);
46 if (!kmem)
47 return -ENOMEM;
48
49 user_addr = vm_mmap(NULL, 0, PAGE_SIZE * 2,
50 PROT_READ | PROT_WRITE | PROT_EXEC,
51 MAP_ANONYMOUS | MAP_PRIVATE, 0);
52 if (user_addr >= (unsigned long)(TASK_SIZE)) {
53 pr_warn("Failed to allocate user memory\n");
54 kfree(kmem);
55 return -ENOMEM;
56 }
57
58 usermem = (char __user *)user_addr;
59 bad_usermem = (char *)user_addr;
60
61 /* Legitimate usage: none of these should fail. */
62 ret |= test(copy_from_user(kmem, usermem, PAGE_SIZE),
63 "legitimate copy_from_user failed");
64 ret |= test(copy_to_user(usermem, kmem, PAGE_SIZE),
65 "legitimate copy_to_user failed");
66 ret |= test(get_user(value, (unsigned long __user *)usermem),
67 "legitimate get_user failed");
68 ret |= test(put_user(value, (unsigned long __user *)usermem),
69 "legitimate put_user failed");
70
71 /* Invalid usage: none of these should succeed. */
72 ret |= test(!copy_from_user(kmem, (char __user *)(kmem + PAGE_SIZE),
73 PAGE_SIZE),
74 "illegal all-kernel copy_from_user passed");
75 ret |= test(!copy_from_user(bad_usermem, (char __user *)kmem,
76 PAGE_SIZE),
77 "illegal reversed copy_from_user passed");
78 ret |= test(!copy_to_user((char __user *)kmem, kmem + PAGE_SIZE,
79 PAGE_SIZE),
80 "illegal all-kernel copy_to_user passed");
81 ret |= test(!copy_to_user((char __user *)kmem, bad_usermem,
82 PAGE_SIZE),
83 "illegal reversed copy_to_user passed");
84 ret |= test(!get_user(value, (unsigned long __user *)kmem),
85 "illegal get_user passed");
86 ret |= test(!put_user(value, (unsigned long __user *)kmem),
87 "illegal put_user passed");
88
89 vm_munmap(user_addr, PAGE_SIZE * 2);
90 kfree(kmem);
91
92 if (ret == 0) {
93 pr_info("tests passed.\n");
94 return 0;
95 }
96
97 return -EINVAL;
98}
99
100module_init(test_user_copy_init);
101
102static void __exit test_user_copy_exit(void)
103{
104 pr_info("unloaded.\n");
105}
106
107module_exit(test_user_copy_exit);
108
109MODULE_AUTHOR("Kees Cook <keescook@chromium.org>");
110MODULE_LICENSE("GPL");
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 10909c571494..185b6d300ebc 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -1155,6 +1155,30 @@ char *netdev_feature_string(char *buf, char *end, const u8 *addr,
1155 return number(buf, end, *(const netdev_features_t *)addr, spec); 1155 return number(buf, end, *(const netdev_features_t *)addr, spec);
1156} 1156}
1157 1157
1158static noinline_for_stack
1159char *address_val(char *buf, char *end, const void *addr,
1160 struct printf_spec spec, const char *fmt)
1161{
1162 unsigned long long num;
1163
1164 spec.flags |= SPECIAL | SMALL | ZEROPAD;
1165 spec.base = 16;
1166
1167 switch (fmt[1]) {
1168 case 'd':
1169 num = *(const dma_addr_t *)addr;
1170 spec.field_width = sizeof(dma_addr_t) * 2 + 2;
1171 break;
1172 case 'p':
1173 default:
1174 num = *(const phys_addr_t *)addr;
1175 spec.field_width = sizeof(phys_addr_t) * 2 + 2;
1176 break;
1177 }
1178
1179 return number(buf, end, num, spec);
1180}
1181
1158int kptr_restrict __read_mostly; 1182int kptr_restrict __read_mostly;
1159 1183
1160/* 1184/*
@@ -1218,7 +1242,8 @@ int kptr_restrict __read_mostly;
1218 * N no separator 1242 * N no separator
1219 * The maximum supported length is 64 bytes of the input. Consider 1243 * The maximum supported length is 64 bytes of the input. Consider
1220 * to use print_hex_dump() for the larger input. 1244 * to use print_hex_dump() for the larger input.
1221 * - 'a' For a phys_addr_t type and its derivative types (passed by reference) 1245 * - 'a[pd]' For address types [p] phys_addr_t, [d] dma_addr_t and derivatives
1246 * (default assumed to be phys_addr_t, passed by reference)
1222 * - 'd[234]' For a dentry name (optionally 2-4 last components) 1247 * - 'd[234]' For a dentry name (optionally 2-4 last components)
1223 * - 'D[234]' Same as 'd' but for a struct file 1248 * - 'D[234]' Same as 'd' but for a struct file
1224 * 1249 *
@@ -1353,11 +1378,7 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
1353 } 1378 }
1354 break; 1379 break;
1355 case 'a': 1380 case 'a':
1356 spec.flags |= SPECIAL | SMALL | ZEROPAD; 1381 return address_val(buf, end, ptr, spec, fmt);
1357 spec.field_width = sizeof(phys_addr_t) * 2 + 2;
1358 spec.base = 16;
1359 return number(buf, end,
1360 (unsigned long long) *((phys_addr_t *)ptr), spec);
1361 case 'd': 1382 case 'd':
1362 return dentry_name(buf, end, ptr, spec, fmt); 1383 return dentry_name(buf, end, ptr, spec, fmt);
1363 case 'D': 1384 case 'D':