aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-11-13 01:45:43 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-11-13 01:45:43 -0500
commit5cbb3d216e2041700231bcfc383ee5f8b7fc8b74 (patch)
treea738fa82dbcefa9bd283c08bc67f38827be63937 /lib
parent9bc9ccd7db1c9f043f75380b5a5b94912046a60e (diff)
parent4e9b45a19241354daec281d7a785739829b52359 (diff)
Merge branch 'akpm' (patches from Andrew Morton)
Merge first patch-bomb from Andrew Morton: "Quite a lot of other stuff is banked up awaiting further next->mainline merging, but this batch contains: - Lots of random misc patches - OCFS2 - Most of MM - backlight updates - lib/ updates - printk updates - checkpatch updates - epoll tweaking - rtc updates - hfs - hfsplus - documentation - procfs - update gcov to gcc-4.7 format - IPC" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (269 commits) ipc, msg: fix message length check for negative values ipc/util.c: remove unnecessary work pending test devpts: plug the memory leak in kill_sb ./Makefile: export initial ramdisk compression config option init/Kconfig: add option to disable kernel compression drivers: w1: make w1_slave::flags long to avoid memory corruption drivers/w1/masters/ds1wm.cuse dev_get_platdata() drivers/memstick/core/ms_block.c: fix unreachable state in h_msb_read_page() drivers/memstick/core/mspro_block.c: fix attributes array allocation drivers/pps/clients/pps-gpio.c: remove redundant of_match_ptr kernel/panic.c: reduce 1 byte usage for print tainted buffer gcov: reuse kbasename helper kernel/gcov/fs.c: use pr_warn() kernel/module.c: use pr_foo() gcov: compile specific gcov implementation based on gcc version gcov: add support for gcc 4.7 gcov format gcov: move gcov structs definitions to a gcc version specific file kernel/taskstats.c: return -ENOMEM when alloc memory fails in add_del_listener() kernel/taskstats.c: add nla_nest_cancel() for failure processing between nla_nest_start() and nla_nest_end() kernel/sysctl_binary.c: use scnprintf() instead of snprintf() ...
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug9
-rw-r--r--lib/Makefile2
-rw-r--r--lib/debugobjects.c2
-rw-r--r--lib/digsig.c2
-rw-r--r--lib/genalloc.c28
-rw-r--r--lib/percpu_test.c138
-rw-r--r--lib/show_mem.c39
-rw-r--r--lib/vsprintf.c35
8 files changed, 222 insertions, 33 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index ebef88f61b7d..db25707aa41b 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1481,6 +1481,15 @@ config INTERVAL_TREE_TEST
1481 help 1481 help
1482 A benchmark measuring the performance of the interval tree library 1482 A benchmark measuring the performance of the interval tree library
1483 1483
1484config PERCPU_TEST
1485 tristate "Per cpu operations test"
1486 depends on m && DEBUG_KERNEL
1487 help
1488 Enable this option to build test module which validates per-cpu
1489 operations.
1490
1491 If unsure, say N.
1492
1484config ATOMIC64_SELFTEST 1493config ATOMIC64_SELFTEST
1485 bool "Perform an atomic64_t self-test at boot" 1494 bool "Perform an atomic64_t self-test at boot"
1486 help 1495 help
diff --git a/lib/Makefile b/lib/Makefile
index f3bb2cb98adf..bb016e116ba4 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -157,6 +157,8 @@ obj-$(CONFIG_INTERVAL_TREE_TEST) += interval_tree_test.o
157 157
158interval_tree_test-objs := interval_tree_test_main.o interval_tree.o 158interval_tree_test-objs := interval_tree_test_main.o interval_tree.o
159 159
160obj-$(CONFIG_PERCPU_TEST) += percpu_test.o
161
160obj-$(CONFIG_ASN1) += asn1_decoder.o 162obj-$(CONFIG_ASN1) += asn1_decoder.o
161 163
162obj-$(CONFIG_FONT_SUPPORT) += fonts/ 164obj-$(CONFIG_FONT_SUPPORT) += fonts/
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index bf2c8b1043d8..e0731c3db706 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -196,7 +196,7 @@ static void free_object(struct debug_obj *obj)
196 * initialized: 196 * initialized:
197 */ 197 */
198 if (obj_pool_free > ODEBUG_POOL_SIZE && obj_cache) 198 if (obj_pool_free > ODEBUG_POOL_SIZE && obj_cache)
199 sched = keventd_up() && !work_pending(&debug_obj_work); 199 sched = keventd_up();
200 hlist_add_head(&obj->node, &obj_pool); 200 hlist_add_head(&obj->node, &obj_pool);
201 obj_pool_free++; 201 obj_pool_free++;
202 obj_pool_used--; 202 obj_pool_used--;
diff --git a/lib/digsig.c b/lib/digsig.c
index 2f31e6a45f0a..8793aeda30ca 100644
--- a/lib/digsig.c
+++ b/lib/digsig.c
@@ -209,7 +209,7 @@ int digsig_verify(struct key *keyring, const char *sig, int siglen,
209 kref = keyring_search(make_key_ref(keyring, 1UL), 209 kref = keyring_search(make_key_ref(keyring, 1UL),
210 &key_type_user, name); 210 &key_type_user, name);
211 if (IS_ERR(kref)) 211 if (IS_ERR(kref))
212 key = ERR_PTR(PTR_ERR(kref)); 212 key = ERR_CAST(kref);
213 else 213 else
214 key = key_ref_to_ptr(kref); 214 key = key_ref_to_ptr(kref);
215 } else { 215 } else {
diff --git a/lib/genalloc.c b/lib/genalloc.c
index 26cf20be72b7..dda31168844f 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -313,6 +313,34 @@ retry:
313EXPORT_SYMBOL(gen_pool_alloc); 313EXPORT_SYMBOL(gen_pool_alloc);
314 314
315/** 315/**
316 * gen_pool_dma_alloc - allocate special memory from the pool for DMA usage
317 * @pool: pool to allocate from
318 * @size: number of bytes to allocate from the pool
319 * @dma: dma-view physical address
320 *
321 * Allocate the requested number of bytes from the specified pool.
322 * Uses the pool allocation function (with first-fit algorithm by default).
323 * Can not be used in NMI handler on architectures without
324 * NMI-safe cmpxchg implementation.
325 */
326void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size, dma_addr_t *dma)
327{
328 unsigned long vaddr;
329
330 if (!pool)
331 return NULL;
332
333 vaddr = gen_pool_alloc(pool, size);
334 if (!vaddr)
335 return NULL;
336
337 *dma = gen_pool_virt_to_phys(pool, vaddr);
338
339 return (void *)vaddr;
340}
341EXPORT_SYMBOL(gen_pool_dma_alloc);
342
343/**
316 * gen_pool_free - free allocated special memory back to the pool 344 * gen_pool_free - free allocated special memory back to the pool
317 * @pool: pool to free to 345 * @pool: pool to free to
318 * @addr: starting address of memory to free back to pool 346 * @addr: starting address of memory to free back to pool
diff --git a/lib/percpu_test.c b/lib/percpu_test.c
new file mode 100644
index 000000000000..0b5d14dadd1a
--- /dev/null
+++ b/lib/percpu_test.c
@@ -0,0 +1,138 @@
1#include <linux/module.h>
2
3/* validate @native and @pcp counter values match @expected */
4#define CHECK(native, pcp, expected) \
5 do { \
6 WARN((native) != (expected), \
7 "raw %ld (0x%lx) != expected %lld (0x%llx)", \
8 (native), (native), \
9 (long long)(expected), (long long)(expected)); \
10 WARN(__this_cpu_read(pcp) != (expected), \
11 "pcp %ld (0x%lx) != expected %lld (0x%llx)", \
12 __this_cpu_read(pcp), __this_cpu_read(pcp), \
13 (long long)(expected), (long long)(expected)); \
14 } while (0)
15
16static DEFINE_PER_CPU(long, long_counter);
17static DEFINE_PER_CPU(unsigned long, ulong_counter);
18
19static int __init percpu_test_init(void)
20{
21 /*
22 * volatile prevents compiler from optimizing it uses, otherwise the
23 * +ul_one/-ul_one below would replace with inc/dec instructions.
24 */
25 volatile unsigned int ui_one = 1;
26 long l = 0;
27 unsigned long ul = 0;
28
29 pr_info("percpu test start\n");
30
31 preempt_disable();
32
33 l += -1;
34 __this_cpu_add(long_counter, -1);
35 CHECK(l, long_counter, -1);
36
37 l += 1;
38 __this_cpu_add(long_counter, 1);
39 CHECK(l, long_counter, 0);
40
41 ul = 0;
42 __this_cpu_write(ulong_counter, 0);
43
44 ul += 1UL;
45 __this_cpu_add(ulong_counter, 1UL);
46 CHECK(ul, ulong_counter, 1);
47
48 ul += -1UL;
49 __this_cpu_add(ulong_counter, -1UL);
50 CHECK(ul, ulong_counter, 0);
51
52 ul += -(unsigned long)1;
53 __this_cpu_add(ulong_counter, -(unsigned long)1);
54 CHECK(ul, ulong_counter, -1);
55
56 ul = 0;
57 __this_cpu_write(ulong_counter, 0);
58
59 ul -= 1;
60 __this_cpu_dec(ulong_counter);
61 CHECK(ul, ulong_counter, -1);
62 CHECK(ul, ulong_counter, ULONG_MAX);
63
64 l += -ui_one;
65 __this_cpu_add(long_counter, -ui_one);
66 CHECK(l, long_counter, 0xffffffff);
67
68 l += ui_one;
69 __this_cpu_add(long_counter, ui_one);
70 CHECK(l, long_counter, (long)0x100000000LL);
71
72
73 l = 0;
74 __this_cpu_write(long_counter, 0);
75
76 l -= ui_one;
77 __this_cpu_sub(long_counter, ui_one);
78 CHECK(l, long_counter, -1);
79
80 l = 0;
81 __this_cpu_write(long_counter, 0);
82
83 l += ui_one;
84 __this_cpu_add(long_counter, ui_one);
85 CHECK(l, long_counter, 1);
86
87 l += -ui_one;
88 __this_cpu_add(long_counter, -ui_one);
89 CHECK(l, long_counter, (long)0x100000000LL);
90
91 l = 0;
92 __this_cpu_write(long_counter, 0);
93
94 l -= ui_one;
95 this_cpu_sub(long_counter, ui_one);
96 CHECK(l, long_counter, -1);
97 CHECK(l, long_counter, ULONG_MAX);
98
99 ul = 0;
100 __this_cpu_write(ulong_counter, 0);
101
102 ul += ui_one;
103 __this_cpu_add(ulong_counter, ui_one);
104 CHECK(ul, ulong_counter, 1);
105
106 ul = 0;
107 __this_cpu_write(ulong_counter, 0);
108
109 ul -= ui_one;
110 __this_cpu_sub(ulong_counter, ui_one);
111 CHECK(ul, ulong_counter, -1);
112 CHECK(ul, ulong_counter, ULONG_MAX);
113
114 ul = 3;
115 __this_cpu_write(ulong_counter, 3);
116
117 ul = this_cpu_sub_return(ulong_counter, ui_one);
118 CHECK(ul, ulong_counter, 2);
119
120 ul = __this_cpu_sub_return(ulong_counter, ui_one);
121 CHECK(ul, ulong_counter, 1);
122
123 preempt_enable();
124
125 pr_info("percpu test done\n");
126 return -EAGAIN; /* Fail will directly unload the module */
127}
128
129static void __exit percpu_test_exit(void)
130{
131}
132
133module_init(percpu_test_init)
134module_exit(percpu_test_exit)
135
136MODULE_LICENSE("GPL");
137MODULE_AUTHOR("Greg Thelen");
138MODULE_DESCRIPTION("percpu operations test");
diff --git a/lib/show_mem.c b/lib/show_mem.c
index b7c72311ad0c..5847a4921b8e 100644
--- a/lib/show_mem.c
+++ b/lib/show_mem.c
@@ -12,8 +12,7 @@
12void show_mem(unsigned int filter) 12void show_mem(unsigned int filter)
13{ 13{
14 pg_data_t *pgdat; 14 pg_data_t *pgdat;
15 unsigned long total = 0, reserved = 0, shared = 0, 15 unsigned long total = 0, reserved = 0, highmem = 0;
16 nonshared = 0, highmem = 0;
17 16
18 printk("Mem-Info:\n"); 17 printk("Mem-Info:\n");
19 show_free_areas(filter); 18 show_free_areas(filter);
@@ -22,43 +21,27 @@ void show_mem(unsigned int filter)
22 return; 21 return;
23 22
24 for_each_online_pgdat(pgdat) { 23 for_each_online_pgdat(pgdat) {
25 unsigned long i, flags; 24 unsigned long flags;
25 int zoneid;
26 26
27 pgdat_resize_lock(pgdat, &flags); 27 pgdat_resize_lock(pgdat, &flags);
28 for (i = 0; i < pgdat->node_spanned_pages; i++) { 28 for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
29 struct page *page; 29 struct zone *zone = &pgdat->node_zones[zoneid];
30 unsigned long pfn = pgdat->node_start_pfn + i; 30 if (!populated_zone(zone))
31
32 if (unlikely(!(i % MAX_ORDER_NR_PAGES)))
33 touch_nmi_watchdog();
34
35 if (!pfn_valid(pfn))
36 continue; 31 continue;
37 32
38 page = pfn_to_page(pfn); 33 total += zone->present_pages;
39 34 reserved = zone->present_pages - zone->managed_pages;
40 if (PageHighMem(page))
41 highmem++;
42 35
43 if (PageReserved(page)) 36 if (is_highmem_idx(zoneid))
44 reserved++; 37 highmem += zone->present_pages;
45 else if (page_count(page) == 1)
46 nonshared++;
47 else if (page_count(page) > 1)
48 shared += page_count(page) - 1;
49
50 total++;
51 } 38 }
52 pgdat_resize_unlock(pgdat, &flags); 39 pgdat_resize_unlock(pgdat, &flags);
53 } 40 }
54 41
55 printk("%lu pages RAM\n", total); 42 printk("%lu pages RAM\n", total);
56#ifdef CONFIG_HIGHMEM 43 printk("%lu pages HighMem/MovableOnly\n", highmem);
57 printk("%lu pages HighMem\n", highmem);
58#endif
59 printk("%lu pages reserved\n", reserved); 44 printk("%lu pages reserved\n", reserved);
60 printk("%lu pages shared\n", shared);
61 printk("%lu pages non-shared\n", nonshared);
62#ifdef CONFIG_QUICKLIST 45#ifdef CONFIG_QUICKLIST
63 printk("%lu pages in pagetable cache\n", 46 printk("%lu pages in pagetable cache\n",
64 quicklist_total_size()); 47 quicklist_total_size());
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 26559bdb4c49..48586ac3a62e 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -27,6 +27,7 @@
27#include <linux/uaccess.h> 27#include <linux/uaccess.h>
28#include <linux/ioport.h> 28#include <linux/ioport.h>
29#include <linux/dcache.h> 29#include <linux/dcache.h>
30#include <linux/cred.h>
30#include <net/addrconf.h> 31#include <net/addrconf.h>
31 32
32#include <asm/page.h> /* for PAGE_SIZE */ 33#include <asm/page.h> /* for PAGE_SIZE */
@@ -1218,6 +1219,8 @@ int kptr_restrict __read_mostly;
1218 * The maximum supported length is 64 bytes of the input. Consider 1219 * The maximum supported length is 64 bytes of the input. Consider
1219 * to use print_hex_dump() for the larger input. 1220 * to use print_hex_dump() for the larger input.
1220 * - 'a' For a phys_addr_t type and its derivative types (passed by reference) 1221 * - 'a' For a phys_addr_t type and its derivative types (passed by reference)
1222 * - 'd[234]' For a dentry name (optionally 2-4 last components)
1223 * - 'D[234]' Same as 'd' but for a struct file
1221 * 1224 *
1222 * Note: The difference between 'S' and 'F' is that on ia64 and ppc64 1225 * Note: The difference between 'S' and 'F' is that on ia64 and ppc64
1223 * function pointers are really function descriptors, which contain a 1226 * function pointers are really function descriptors, which contain a
@@ -1312,11 +1315,37 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
1312 spec.field_width = default_width; 1315 spec.field_width = default_width;
1313 return string(buf, end, "pK-error", spec); 1316 return string(buf, end, "pK-error", spec);
1314 } 1317 }
1315 if (!((kptr_restrict == 0) || 1318
1316 (kptr_restrict == 1 && 1319 switch (kptr_restrict) {
1317 has_capability_noaudit(current, CAP_SYSLOG)))) 1320 case 0:
1321 /* Always print %pK values */
1322 break;
1323 case 1: {
1324 /*
1325 * Only print the real pointer value if the current
1326 * process has CAP_SYSLOG and is running with the
1327 * same credentials it started with. This is because
1328 * access to files is checked at open() time, but %pK
1329 * checks permission at read() time. We don't want to
1330 * leak pointer values if a binary opens a file using
1331 * %pK and then elevates privileges before reading it.
1332 */
1333 const struct cred *cred = current_cred();
1334
1335 if (!has_capability_noaudit(current, CAP_SYSLOG) ||
1336 !uid_eq(cred->euid, cred->uid) ||
1337 !gid_eq(cred->egid, cred->gid))
1338 ptr = NULL;
1339 break;
1340 }
1341 case 2:
1342 default:
1343 /* Always print 0's for %pK */
1318 ptr = NULL; 1344 ptr = NULL;
1345 break;
1346 }
1319 break; 1347 break;
1348
1320 case 'N': 1349 case 'N':
1321 switch (fmt[1]) { 1350 switch (fmt[1]) {
1322 case 'F': 1351 case 'F':