aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/842/842_decompress.c14
-rw-r--r--lib/Kconfig.debug76
-rw-r--r--lib/Makefile1
-rw-r--r--lib/atomic64_test.c124
-rw-r--r--lib/dynamic_debug.c11
-rw-r--r--lib/iov_iter.c11
-rw-r--r--lib/list_debug.c2
-rw-r--r--lib/mpi/mpicoder.c21
-rw-r--r--lib/netdev-notifier-error-inject.c55
-rw-r--r--lib/rhashtable.c6
-rw-r--r--lib/seq_buf.c6
-rw-r--r--lib/test_bpf.c120
-rw-r--r--lib/test_rhashtable.c76
-rw-r--r--lib/vsprintf.c29
14 files changed, 454 insertions, 98 deletions
diff --git a/lib/842/842_decompress.c b/lib/842/842_decompress.c
index 8881dad2a6a0..a7f278d2ed8f 100644
--- a/lib/842/842_decompress.c
+++ b/lib/842/842_decompress.c
@@ -69,7 +69,7 @@ struct sw842_param {
69 ((s) == 2 ? be16_to_cpu(get_unaligned((__be16 *)d)) : \ 69 ((s) == 2 ? be16_to_cpu(get_unaligned((__be16 *)d)) : \
70 (s) == 4 ? be32_to_cpu(get_unaligned((__be32 *)d)) : \ 70 (s) == 4 ? be32_to_cpu(get_unaligned((__be32 *)d)) : \
71 (s) == 8 ? be64_to_cpu(get_unaligned((__be64 *)d)) : \ 71 (s) == 8 ? be64_to_cpu(get_unaligned((__be64 *)d)) : \
72 WARN(1, "pr_debug param err invalid size %x\n", s)) 72 0)
73 73
74static int next_bits(struct sw842_param *p, u64 *d, u8 n); 74static int next_bits(struct sw842_param *p, u64 *d, u8 n);
75 75
@@ -202,10 +202,14 @@ static int __do_index(struct sw842_param *p, u8 size, u8 bits, u64 fsize)
202 return -EINVAL; 202 return -EINVAL;
203 } 203 }
204 204
205 pr_debug("index%x to %lx off %lx adjoff %lx tot %lx data %lx\n", 205 if (size != 2 && size != 4 && size != 8)
206 size, (unsigned long)index, (unsigned long)(index * size), 206 WARN(1, "__do_index invalid size %x\n", size);
207 (unsigned long)offset, (unsigned long)total, 207 else
208 (unsigned long)beN_to_cpu(&p->ostart[offset], size)); 208 pr_debug("index%x to %lx off %lx adjoff %lx tot %lx data %lx\n",
209 size, (unsigned long)index,
210 (unsigned long)(index * size), (unsigned long)offset,
211 (unsigned long)total,
212 (unsigned long)beN_to_cpu(&p->ostart[offset], size));
209 213
210 memcpy(p->out, &p->ostart[offset], size); 214 memcpy(p->out, &p->ostart[offset], size);
211 p->out += size; 215 p->out += size;
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 8c15b29d5adc..ee1ac1cc082c 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -812,6 +812,17 @@ config BOOTPARAM_HUNG_TASK_PANIC_VALUE
812 default 0 if !BOOTPARAM_HUNG_TASK_PANIC 812 default 0 if !BOOTPARAM_HUNG_TASK_PANIC
813 default 1 if BOOTPARAM_HUNG_TASK_PANIC 813 default 1 if BOOTPARAM_HUNG_TASK_PANIC
814 814
815config WQ_WATCHDOG
816 bool "Detect Workqueue Stalls"
817 depends on DEBUG_KERNEL
818 help
819 Say Y here to enable stall detection on workqueues. If a
820 worker pool doesn't make forward progress on a pending work
821 item for over a given amount of time, 30s by default, a
822 warning message is printed along with dump of workqueue
823 state. This can be configured through kernel parameter
824 "workqueue.watchdog_thresh" and its sysfs counterpart.
825
815endmenu # "Debug lockups and hangs" 826endmenu # "Debug lockups and hangs"
816 827
817config PANIC_ON_OOPS 828config PANIC_ON_OOPS
@@ -1484,6 +1495,29 @@ config OF_RECONFIG_NOTIFIER_ERROR_INJECT
1484 1495
1485 If unsure, say N. 1496 If unsure, say N.
1486 1497
1498config NETDEV_NOTIFIER_ERROR_INJECT
1499 tristate "Netdev notifier error injection module"
1500 depends on NET && NOTIFIER_ERROR_INJECTION
1501 help
1502 This option provides the ability to inject artificial errors to
1503 netdevice notifier chain callbacks. It is controlled through debugfs
1504 interface /sys/kernel/debug/notifier-error-inject/netdev
1505
1506 If the notifier call chain should be failed with some events
1507 notified, write the error code to "actions/<notifier event>/error".
1508
1509 Example: Inject netdevice mtu change error (-22 = -EINVAL)
1510
1511 # cd /sys/kernel/debug/notifier-error-inject/netdev
1512 # echo -22 > actions/NETDEV_CHANGEMTU/error
1513 # ip link set eth0 mtu 1024
1514 RTNETLINK answers: Invalid argument
1515
1516 To compile this code as a module, choose M here: the module will
1517 be called netdev-notifier-error-inject.
1518
1519 If unsure, say N.
1520
1487config FAULT_INJECTION 1521config FAULT_INJECTION
1488 bool "Fault-injection framework" 1522 bool "Fault-injection framework"
1489 depends on DEBUG_KERNEL 1523 depends on DEBUG_KERNEL
@@ -1523,8 +1557,7 @@ config FAIL_IO_TIMEOUT
1523 1557
1524config FAIL_MMC_REQUEST 1558config FAIL_MMC_REQUEST
1525 bool "Fault-injection capability for MMC IO" 1559 bool "Fault-injection capability for MMC IO"
1526 select DEBUG_FS 1560 depends on FAULT_INJECTION_DEBUG_FS && MMC
1527 depends on FAULT_INJECTION && MMC
1528 help 1561 help
1529 Provide fault-injection capability for MMC IO. 1562 Provide fault-injection capability for MMC IO.
1530 This will make the mmc core return data errors. This is 1563 This will make the mmc core return data errors. This is
@@ -1853,3 +1886,42 @@ source "samples/Kconfig"
1853 1886
1854source "lib/Kconfig.kgdb" 1887source "lib/Kconfig.kgdb"
1855 1888
1889config ARCH_HAS_DEVMEM_IS_ALLOWED
1890 bool
1891
1892config STRICT_DEVMEM
1893 bool "Filter access to /dev/mem"
1894 depends on MMU
1895 depends on ARCH_HAS_DEVMEM_IS_ALLOWED
1896 default y if TILE || PPC
1897 ---help---
1898 If this option is disabled, you allow userspace (root) access to all
1899 of memory, including kernel and userspace memory. Accidental
1900 access to this is obviously disastrous, but specific access can
1901 be used by people debugging the kernel. Note that with PAT support
1902 enabled, even in this case there are restrictions on /dev/mem
1903 use due to the cache aliasing requirements.
1904
1905 If this option is switched on, and IO_STRICT_DEVMEM=n, the /dev/mem
1906 file only allows userspace access to PCI space and the BIOS code and
1907 data regions. This is sufficient for dosemu and X and all common
1908 users of /dev/mem.
1909
1910 If in doubt, say Y.
1911
1912config IO_STRICT_DEVMEM
1913 bool "Filter I/O access to /dev/mem"
1914 depends on STRICT_DEVMEM
1915 default STRICT_DEVMEM
1916 ---help---
1917 If this option is disabled, you allow userspace (root) access to all
1918 io-memory regardless of whether a driver is actively using that
1919 range. Accidental access to this is obviously disastrous, but
1920 specific access can be used by people debugging kernel drivers.
1921
1922 If this option is switched on, the /dev/mem file only allows
1923 userspace access to *idle* io-memory ranges (see /proc/iomem) This
1924 may break traditional users of /dev/mem (dosemu, legacy X, etc...)
1925 if the driver using a given range cannot be disabled.
1926
1927 If in doubt, say Y.
diff --git a/lib/Makefile b/lib/Makefile
index 7f1de26613d2..180dd4d0dd41 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -120,6 +120,7 @@ obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o
120obj-$(CONFIG_NOTIFIER_ERROR_INJECTION) += notifier-error-inject.o 120obj-$(CONFIG_NOTIFIER_ERROR_INJECTION) += notifier-error-inject.o
121obj-$(CONFIG_CPU_NOTIFIER_ERROR_INJECT) += cpu-notifier-error-inject.o 121obj-$(CONFIG_CPU_NOTIFIER_ERROR_INJECT) += cpu-notifier-error-inject.o
122obj-$(CONFIG_PM_NOTIFIER_ERROR_INJECT) += pm-notifier-error-inject.o 122obj-$(CONFIG_PM_NOTIFIER_ERROR_INJECT) += pm-notifier-error-inject.o
123obj-$(CONFIG_NETDEV_NOTIFIER_ERROR_INJECT) += netdev-notifier-error-inject.o
123obj-$(CONFIG_MEMORY_NOTIFIER_ERROR_INJECT) += memory-notifier-error-inject.o 124obj-$(CONFIG_MEMORY_NOTIFIER_ERROR_INJECT) += memory-notifier-error-inject.o
124obj-$(CONFIG_OF_RECONFIG_NOTIFIER_ERROR_INJECT) += \ 125obj-$(CONFIG_OF_RECONFIG_NOTIFIER_ERROR_INJECT) += \
125 of-reconfig-notifier-error-inject.o 126 of-reconfig-notifier-error-inject.o
diff --git a/lib/atomic64_test.c b/lib/atomic64_test.c
index 83c33a5bcffb..d62de8bf022d 100644
--- a/lib/atomic64_test.c
+++ b/lib/atomic64_test.c
@@ -16,6 +16,10 @@
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/atomic.h> 17#include <linux/atomic.h>
18 18
19#ifdef CONFIG_X86
20#include <asm/processor.h> /* for boot_cpu_has below */
21#endif
22
19#define TEST(bit, op, c_op, val) \ 23#define TEST(bit, op, c_op, val) \
20do { \ 24do { \
21 atomic##bit##_set(&v, v0); \ 25 atomic##bit##_set(&v, v0); \
@@ -27,6 +31,65 @@ do { \
27 (unsigned long long)r); \ 31 (unsigned long long)r); \
28} while (0) 32} while (0)
29 33
34/*
35 * Test for a atomic operation family,
36 * @test should be a macro accepting parameters (bit, op, ...)
37 */
38
39#define FAMILY_TEST(test, bit, op, args...) \
40do { \
41 test(bit, op, ##args); \
42 test(bit, op##_acquire, ##args); \
43 test(bit, op##_release, ##args); \
44 test(bit, op##_relaxed, ##args); \
45} while (0)
46
47#define TEST_RETURN(bit, op, c_op, val) \
48do { \
49 atomic##bit##_set(&v, v0); \
50 r = v0; \
51 r c_op val; \
52 BUG_ON(atomic##bit##_##op(val, &v) != r); \
53 BUG_ON(atomic##bit##_read(&v) != r); \
54} while (0)
55
56#define RETURN_FAMILY_TEST(bit, op, c_op, val) \
57do { \
58 FAMILY_TEST(TEST_RETURN, bit, op, c_op, val); \
59} while (0)
60
61#define TEST_ARGS(bit, op, init, ret, expect, args...) \
62do { \
63 atomic##bit##_set(&v, init); \
64 BUG_ON(atomic##bit##_##op(&v, ##args) != ret); \
65 BUG_ON(atomic##bit##_read(&v) != expect); \
66} while (0)
67
68#define XCHG_FAMILY_TEST(bit, init, new) \
69do { \
70 FAMILY_TEST(TEST_ARGS, bit, xchg, init, init, new, new); \
71} while (0)
72
73#define CMPXCHG_FAMILY_TEST(bit, init, new, wrong) \
74do { \
75 FAMILY_TEST(TEST_ARGS, bit, cmpxchg, \
76 init, init, new, init, new); \
77 FAMILY_TEST(TEST_ARGS, bit, cmpxchg, \
78 init, init, init, wrong, new); \
79} while (0)
80
81#define INC_RETURN_FAMILY_TEST(bit, i) \
82do { \
83 FAMILY_TEST(TEST_ARGS, bit, inc_return, \
84 i, (i) + one, (i) + one); \
85} while (0)
86
87#define DEC_RETURN_FAMILY_TEST(bit, i) \
88do { \
89 FAMILY_TEST(TEST_ARGS, bit, dec_return, \
90 i, (i) - one, (i) - one); \
91} while (0)
92
30static __init void test_atomic(void) 93static __init void test_atomic(void)
31{ 94{
32 int v0 = 0xaaa31337; 95 int v0 = 0xaaa31337;
@@ -45,6 +108,18 @@ static __init void test_atomic(void)
45 TEST(, and, &=, v1); 108 TEST(, and, &=, v1);
46 TEST(, xor, ^=, v1); 109 TEST(, xor, ^=, v1);
47 TEST(, andnot, &= ~, v1); 110 TEST(, andnot, &= ~, v1);
111
112 RETURN_FAMILY_TEST(, add_return, +=, onestwos);
113 RETURN_FAMILY_TEST(, add_return, +=, -one);
114 RETURN_FAMILY_TEST(, sub_return, -=, onestwos);
115 RETURN_FAMILY_TEST(, sub_return, -=, -one);
116
117 INC_RETURN_FAMILY_TEST(, v0);
118 DEC_RETURN_FAMILY_TEST(, v0);
119
120 XCHG_FAMILY_TEST(, v0, v1);
121 CMPXCHG_FAMILY_TEST(, v0, v1, onestwos);
122
48} 123}
49 124
50#define INIT(c) do { atomic64_set(&v, c); r = c; } while (0) 125#define INIT(c) do { atomic64_set(&v, c); r = c; } while (0)
@@ -74,25 +149,10 @@ static __init void test_atomic64(void)
74 TEST(64, xor, ^=, v1); 149 TEST(64, xor, ^=, v1);
75 TEST(64, andnot, &= ~, v1); 150 TEST(64, andnot, &= ~, v1);
76 151
77 INIT(v0); 152 RETURN_FAMILY_TEST(64, add_return, +=, onestwos);
78 r += onestwos; 153 RETURN_FAMILY_TEST(64, add_return, +=, -one);
79 BUG_ON(atomic64_add_return(onestwos, &v) != r); 154 RETURN_FAMILY_TEST(64, sub_return, -=, onestwos);
80 BUG_ON(v.counter != r); 155 RETURN_FAMILY_TEST(64, sub_return, -=, -one);
81
82 INIT(v0);
83 r += -one;
84 BUG_ON(atomic64_add_return(-one, &v) != r);
85 BUG_ON(v.counter != r);
86
87 INIT(v0);
88 r -= onestwos;
89 BUG_ON(atomic64_sub_return(onestwos, &v) != r);
90 BUG_ON(v.counter != r);
91
92 INIT(v0);
93 r -= -one;
94 BUG_ON(atomic64_sub_return(-one, &v) != r);
95 BUG_ON(v.counter != r);
96 156
97 INIT(v0); 157 INIT(v0);
98 atomic64_inc(&v); 158 atomic64_inc(&v);
@@ -100,33 +160,15 @@ static __init void test_atomic64(void)
100 BUG_ON(v.counter != r); 160 BUG_ON(v.counter != r);
101 161
102 INIT(v0); 162 INIT(v0);
103 r += one;
104 BUG_ON(atomic64_inc_return(&v) != r);
105 BUG_ON(v.counter != r);
106
107 INIT(v0);
108 atomic64_dec(&v); 163 atomic64_dec(&v);
109 r -= one; 164 r -= one;
110 BUG_ON(v.counter != r); 165 BUG_ON(v.counter != r);
111 166
112 INIT(v0); 167 INC_RETURN_FAMILY_TEST(64, v0);
113 r -= one; 168 DEC_RETURN_FAMILY_TEST(64, v0);
114 BUG_ON(atomic64_dec_return(&v) != r);
115 BUG_ON(v.counter != r);
116 169
117 INIT(v0); 170 XCHG_FAMILY_TEST(64, v0, v1);
118 BUG_ON(atomic64_xchg(&v, v1) != v0); 171 CMPXCHG_FAMILY_TEST(64, v0, v1, v2);
119 r = v1;
120 BUG_ON(v.counter != r);
121
122 INIT(v0);
123 BUG_ON(atomic64_cmpxchg(&v, v0, v1) != v0);
124 r = v1;
125 BUG_ON(v.counter != r);
126
127 INIT(v0);
128 BUG_ON(atomic64_cmpxchg(&v, v2, v1) != v0);
129 BUG_ON(v.counter != r);
130 172
131 INIT(v0); 173 INIT(v0);
132 BUG_ON(atomic64_add_unless(&v, one, v0)); 174 BUG_ON(atomic64_add_unless(&v, one, v0));
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index e3952e9c8ec0..fe42b6ec3f0c 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -657,14 +657,9 @@ static ssize_t ddebug_proc_write(struct file *file, const char __user *ubuf,
657 pr_warn("expected <%d bytes into control\n", USER_BUF_PAGE); 657 pr_warn("expected <%d bytes into control\n", USER_BUF_PAGE);
658 return -E2BIG; 658 return -E2BIG;
659 } 659 }
660 tmpbuf = kmalloc(len + 1, GFP_KERNEL); 660 tmpbuf = memdup_user_nul(ubuf, len);
661 if (!tmpbuf) 661 if (IS_ERR(tmpbuf))
662 return -ENOMEM; 662 return PTR_ERR(tmpbuf);
663 if (copy_from_user(tmpbuf, ubuf, len)) {
664 kfree(tmpbuf);
665 return -EFAULT;
666 }
667 tmpbuf[len] = '\0';
668 vpr_info("read %d bytes from userspace\n", (int)len); 663 vpr_info("read %d bytes from userspace\n", (int)len);
669 664
670 ret = ddebug_exec_queries(tmpbuf, NULL); 665 ret = ddebug_exec_queries(tmpbuf, NULL);
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 75232ad0a5e7..5fecddc32b1b 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -369,7 +369,7 @@ static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t
369 kunmap_atomic(from); 369 kunmap_atomic(from);
370} 370}
371 371
372static void memcpy_to_page(struct page *page, size_t offset, char *from, size_t len) 372static void memcpy_to_page(struct page *page, size_t offset, const char *from, size_t len)
373{ 373{
374 char *to = kmap_atomic(page); 374 char *to = kmap_atomic(page);
375 memcpy(to + offset, from, len); 375 memcpy(to + offset, from, len);
@@ -383,9 +383,9 @@ static void memzero_page(struct page *page, size_t offset, size_t len)
383 kunmap_atomic(addr); 383 kunmap_atomic(addr);
384} 384}
385 385
386size_t copy_to_iter(void *addr, size_t bytes, struct iov_iter *i) 386size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
387{ 387{
388 char *from = addr; 388 const char *from = addr;
389 if (unlikely(bytes > i->count)) 389 if (unlikely(bytes > i->count))
390 bytes = i->count; 390 bytes = i->count;
391 391
@@ -704,10 +704,10 @@ size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
704} 704}
705EXPORT_SYMBOL(csum_and_copy_from_iter); 705EXPORT_SYMBOL(csum_and_copy_from_iter);
706 706
707size_t csum_and_copy_to_iter(void *addr, size_t bytes, __wsum *csum, 707size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum,
708 struct iov_iter *i) 708 struct iov_iter *i)
709{ 709{
710 char *from = addr; 710 const char *from = addr;
711 __wsum sum, next; 711 __wsum sum, next;
712 size_t off = 0; 712 size_t off = 0;
713 if (unlikely(bytes > i->count)) 713 if (unlikely(bytes > i->count))
@@ -849,3 +849,4 @@ int import_single_range(int rw, void __user *buf, size_t len,
849 iov_iter_init(i, rw, iov, 1, len); 849 iov_iter_init(i, rw, iov, 1, len);
850 return 0; 850 return 0;
851} 851}
852EXPORT_SYMBOL(import_single_range);
diff --git a/lib/list_debug.c b/lib/list_debug.c
index c24c2f7e296f..3859bf63561c 100644
--- a/lib/list_debug.c
+++ b/lib/list_debug.c
@@ -37,7 +37,7 @@ void __list_add(struct list_head *new,
37 next->prev = new; 37 next->prev = new;
38 new->next = next; 38 new->next = next;
39 new->prev = prev; 39 new->prev = prev;
40 prev->next = new; 40 WRITE_ONCE(prev->next, new);
41} 41}
42EXPORT_SYMBOL(__list_add); 42EXPORT_SYMBOL(__list_add);
43 43
diff --git a/lib/mpi/mpicoder.c b/lib/mpi/mpicoder.c
index 3db76b8c1115..ec533a6c77b5 100644
--- a/lib/mpi/mpicoder.c
+++ b/lib/mpi/mpicoder.c
@@ -135,7 +135,9 @@ EXPORT_SYMBOL_GPL(mpi_read_from_buffer);
135 * @buf: bufer to which the output will be written to. Needs to be at 135 * @buf: bufer to which the output will be written to. Needs to be at
136 * leaset mpi_get_size(a) long. 136 * leaset mpi_get_size(a) long.
137 * @buf_len: size of the buf. 137 * @buf_len: size of the buf.
138 * @nbytes: receives the actual length of the data written. 138 * @nbytes: receives the actual length of the data written on success and
139 * the data to-be-written on -EOVERFLOW in case buf_len was too
140 * small.
139 * @sign: if not NULL, it will be set to the sign of a. 141 * @sign: if not NULL, it will be set to the sign of a.
140 * 142 *
141 * Return: 0 on success or error code in case of error 143 * Return: 0 on success or error code in case of error
@@ -148,7 +150,7 @@ int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes,
148 unsigned int n = mpi_get_size(a); 150 unsigned int n = mpi_get_size(a);
149 int i, lzeros = 0; 151 int i, lzeros = 0;
150 152
151 if (buf_len < n || !buf || !nbytes) 153 if (!buf || !nbytes)
152 return -EINVAL; 154 return -EINVAL;
153 155
154 if (sign) 156 if (sign)
@@ -163,6 +165,11 @@ int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes,
163 break; 165 break;
164 } 166 }
165 167
168 if (buf_len < n - lzeros) {
169 *nbytes = n - lzeros;
170 return -EOVERFLOW;
171 }
172
166 p = buf; 173 p = buf;
167 *nbytes = n - lzeros; 174 *nbytes = n - lzeros;
168 175
@@ -332,7 +339,8 @@ EXPORT_SYMBOL_GPL(mpi_set_buffer);
332 * @nbytes: in/out param - it has the be set to the maximum number of 339 * @nbytes: in/out param - it has the be set to the maximum number of
333 * bytes that can be written to sgl. This has to be at least 340 * bytes that can be written to sgl. This has to be at least
334 * the size of the integer a. On return it receives the actual 341 * the size of the integer a. On return it receives the actual
335 * length of the data written. 342 * length of the data written on success or the data that would
343 * be written if buffer was too small.
336 * @sign: if not NULL, it will be set to the sign of a. 344 * @sign: if not NULL, it will be set to the sign of a.
337 * 345 *
338 * Return: 0 on success or error code in case of error 346 * Return: 0 on success or error code in case of error
@@ -345,7 +353,7 @@ int mpi_write_to_sgl(MPI a, struct scatterlist *sgl, unsigned *nbytes,
345 unsigned int n = mpi_get_size(a); 353 unsigned int n = mpi_get_size(a);
346 int i, x, y = 0, lzeros = 0, buf_len; 354 int i, x, y = 0, lzeros = 0, buf_len;
347 355
348 if (!nbytes || *nbytes < n) 356 if (!nbytes)
349 return -EINVAL; 357 return -EINVAL;
350 358
351 if (sign) 359 if (sign)
@@ -360,6 +368,11 @@ int mpi_write_to_sgl(MPI a, struct scatterlist *sgl, unsigned *nbytes,
360 break; 368 break;
361 } 369 }
362 370
371 if (*nbytes < n - lzeros) {
372 *nbytes = n - lzeros;
373 return -EOVERFLOW;
374 }
375
363 *nbytes = n - lzeros; 376 *nbytes = n - lzeros;
364 buf_len = sgl->length; 377 buf_len = sgl->length;
365 p2 = sg_virt(sgl); 378 p2 = sg_virt(sgl);
diff --git a/lib/netdev-notifier-error-inject.c b/lib/netdev-notifier-error-inject.c
new file mode 100644
index 000000000000..13e9c62e216f
--- /dev/null
+++ b/lib/netdev-notifier-error-inject.c
@@ -0,0 +1,55 @@
1#include <linux/kernel.h>
2#include <linux/module.h>
3#include <linux/netdevice.h>
4
5#include "notifier-error-inject.h"
6
7static int priority;
8module_param(priority, int, 0);
9MODULE_PARM_DESC(priority, "specify netdevice notifier priority");
10
11static struct notifier_err_inject netdev_notifier_err_inject = {
12 .actions = {
13 { NOTIFIER_ERR_INJECT_ACTION(NETDEV_REGISTER) },
14 { NOTIFIER_ERR_INJECT_ACTION(NETDEV_CHANGEMTU) },
15 { NOTIFIER_ERR_INJECT_ACTION(NETDEV_CHANGENAME) },
16 { NOTIFIER_ERR_INJECT_ACTION(NETDEV_PRE_UP) },
17 { NOTIFIER_ERR_INJECT_ACTION(NETDEV_PRE_TYPE_CHANGE) },
18 { NOTIFIER_ERR_INJECT_ACTION(NETDEV_POST_INIT) },
19 { NOTIFIER_ERR_INJECT_ACTION(NETDEV_PRECHANGEMTU) },
20 { NOTIFIER_ERR_INJECT_ACTION(NETDEV_PRECHANGEUPPER) },
21 { NOTIFIER_ERR_INJECT_ACTION(NETDEV_CHANGEUPPER) },
22 {}
23 }
24};
25
26static struct dentry *dir;
27
28static int netdev_err_inject_init(void)
29{
30 int err;
31
32 dir = notifier_err_inject_init("netdev", notifier_err_inject_dir,
33 &netdev_notifier_err_inject, priority);
34 if (IS_ERR(dir))
35 return PTR_ERR(dir);
36
37 err = register_netdevice_notifier(&netdev_notifier_err_inject.nb);
38 if (err)
39 debugfs_remove_recursive(dir);
40
41 return err;
42}
43
44static void netdev_err_inject_exit(void)
45{
46 unregister_netdevice_notifier(&netdev_notifier_err_inject.nb);
47 debugfs_remove_recursive(dir);
48}
49
50module_init(netdev_err_inject_init);
51module_exit(netdev_err_inject_exit);
52
53MODULE_DESCRIPTION("Netdevice notifier error injection module");
54MODULE_LICENSE("GPL");
55MODULE_AUTHOR("Nikolay Aleksandrov <razor@blackwall.org>");
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index eb9240c458fa..cc808707d1cf 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -231,9 +231,6 @@ static int rhashtable_rehash_attach(struct rhashtable *ht,
231 */ 231 */
232 rcu_assign_pointer(old_tbl->future_tbl, new_tbl); 232 rcu_assign_pointer(old_tbl->future_tbl, new_tbl);
233 233
234 /* Ensure the new table is visible to readers. */
235 smp_wmb();
236
237 spin_unlock_bh(old_tbl->locks); 234 spin_unlock_bh(old_tbl->locks);
238 235
239 return 0; 236 return 0;
@@ -519,7 +516,8 @@ int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter)
519 return -ENOMEM; 516 return -ENOMEM;
520 517
521 spin_lock(&ht->lock); 518 spin_lock(&ht->lock);
522 iter->walker->tbl = rht_dereference(ht->tbl, ht); 519 iter->walker->tbl =
520 rcu_dereference_protected(ht->tbl, lockdep_is_held(&ht->lock));
523 list_add(&iter->walker->list, &iter->walker->tbl->walkers); 521 list_add(&iter->walker->list, &iter->walker->tbl->walkers);
524 spin_unlock(&ht->lock); 522 spin_unlock(&ht->lock);
525 523
diff --git a/lib/seq_buf.c b/lib/seq_buf.c
index 5c94e1012a91..cb18469e1f49 100644
--- a/lib/seq_buf.c
+++ b/lib/seq_buf.c
@@ -306,10 +306,12 @@ int seq_buf_to_user(struct seq_buf *s, char __user *ubuf, int cnt)
306 if (!cnt) 306 if (!cnt)
307 return 0; 307 return 0;
308 308
309 if (s->len <= s->readpos) 309 len = seq_buf_used(s);
310
311 if (len <= s->readpos)
310 return -EBUSY; 312 return -EBUSY;
311 313
312 len = seq_buf_used(s) - s->readpos; 314 len -= s->readpos;
313 if (cnt > len) 315 if (cnt > len)
314 cnt = len; 316 cnt = len;
315 ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt); 317 ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 10cd1860e5b0..27a7a26b1ece 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -1685,6 +1685,126 @@ static struct bpf_test tests[] = {
1685 { }, 1685 { },
1686 { { 0, 0x35d97ef2 } } 1686 { { 0, 0x35d97ef2 } }
1687 }, 1687 },
1688 { /* Mainly checking JIT here. */
1689 "MOV REG64",
1690 .u.insns_int = {
1691 BPF_LD_IMM64(R0, 0xffffffffffffffffLL),
1692 BPF_MOV64_REG(R1, R0),
1693 BPF_MOV64_REG(R2, R1),
1694 BPF_MOV64_REG(R3, R2),
1695 BPF_MOV64_REG(R4, R3),
1696 BPF_MOV64_REG(R5, R4),
1697 BPF_MOV64_REG(R6, R5),
1698 BPF_MOV64_REG(R7, R6),
1699 BPF_MOV64_REG(R8, R7),
1700 BPF_MOV64_REG(R9, R8),
1701 BPF_ALU64_IMM(BPF_MOV, R0, 0),
1702 BPF_ALU64_IMM(BPF_MOV, R1, 0),
1703 BPF_ALU64_IMM(BPF_MOV, R2, 0),
1704 BPF_ALU64_IMM(BPF_MOV, R3, 0),
1705 BPF_ALU64_IMM(BPF_MOV, R4, 0),
1706 BPF_ALU64_IMM(BPF_MOV, R5, 0),
1707 BPF_ALU64_IMM(BPF_MOV, R6, 0),
1708 BPF_ALU64_IMM(BPF_MOV, R7, 0),
1709 BPF_ALU64_IMM(BPF_MOV, R8, 0),
1710 BPF_ALU64_IMM(BPF_MOV, R9, 0),
1711 BPF_ALU64_REG(BPF_ADD, R0, R0),
1712 BPF_ALU64_REG(BPF_ADD, R0, R1),
1713 BPF_ALU64_REG(BPF_ADD, R0, R2),
1714 BPF_ALU64_REG(BPF_ADD, R0, R3),
1715 BPF_ALU64_REG(BPF_ADD, R0, R4),
1716 BPF_ALU64_REG(BPF_ADD, R0, R5),
1717 BPF_ALU64_REG(BPF_ADD, R0, R6),
1718 BPF_ALU64_REG(BPF_ADD, R0, R7),
1719 BPF_ALU64_REG(BPF_ADD, R0, R8),
1720 BPF_ALU64_REG(BPF_ADD, R0, R9),
1721 BPF_ALU64_IMM(BPF_ADD, R0, 0xfefe),
1722 BPF_EXIT_INSN(),
1723 },
1724 INTERNAL,
1725 { },
1726 { { 0, 0xfefe } }
1727 },
1728 { /* Mainly checking JIT here. */
1729 "MOV REG32",
1730 .u.insns_int = {
1731 BPF_LD_IMM64(R0, 0xffffffffffffffffLL),
1732 BPF_MOV64_REG(R1, R0),
1733 BPF_MOV64_REG(R2, R1),
1734 BPF_MOV64_REG(R3, R2),
1735 BPF_MOV64_REG(R4, R3),
1736 BPF_MOV64_REG(R5, R4),
1737 BPF_MOV64_REG(R6, R5),
1738 BPF_MOV64_REG(R7, R6),
1739 BPF_MOV64_REG(R8, R7),
1740 BPF_MOV64_REG(R9, R8),
1741 BPF_ALU32_IMM(BPF_MOV, R0, 0),
1742 BPF_ALU32_IMM(BPF_MOV, R1, 0),
1743 BPF_ALU32_IMM(BPF_MOV, R2, 0),
1744 BPF_ALU32_IMM(BPF_MOV, R3, 0),
1745 BPF_ALU32_IMM(BPF_MOV, R4, 0),
1746 BPF_ALU32_IMM(BPF_MOV, R5, 0),
1747 BPF_ALU32_IMM(BPF_MOV, R6, 0),
1748 BPF_ALU32_IMM(BPF_MOV, R7, 0),
1749 BPF_ALU32_IMM(BPF_MOV, R8, 0),
1750 BPF_ALU32_IMM(BPF_MOV, R9, 0),
1751 BPF_ALU64_REG(BPF_ADD, R0, R0),
1752 BPF_ALU64_REG(BPF_ADD, R0, R1),
1753 BPF_ALU64_REG(BPF_ADD, R0, R2),
1754 BPF_ALU64_REG(BPF_ADD, R0, R3),
1755 BPF_ALU64_REG(BPF_ADD, R0, R4),
1756 BPF_ALU64_REG(BPF_ADD, R0, R5),
1757 BPF_ALU64_REG(BPF_ADD, R0, R6),
1758 BPF_ALU64_REG(BPF_ADD, R0, R7),
1759 BPF_ALU64_REG(BPF_ADD, R0, R8),
1760 BPF_ALU64_REG(BPF_ADD, R0, R9),
1761 BPF_ALU64_IMM(BPF_ADD, R0, 0xfefe),
1762 BPF_EXIT_INSN(),
1763 },
1764 INTERNAL,
1765 { },
1766 { { 0, 0xfefe } }
1767 },
1768 { /* Mainly checking JIT here. */
1769 "LD IMM64",
1770 .u.insns_int = {
1771 BPF_LD_IMM64(R0, 0xffffffffffffffffLL),
1772 BPF_MOV64_REG(R1, R0),
1773 BPF_MOV64_REG(R2, R1),
1774 BPF_MOV64_REG(R3, R2),
1775 BPF_MOV64_REG(R4, R3),
1776 BPF_MOV64_REG(R5, R4),
1777 BPF_MOV64_REG(R6, R5),
1778 BPF_MOV64_REG(R7, R6),
1779 BPF_MOV64_REG(R8, R7),
1780 BPF_MOV64_REG(R9, R8),
1781 BPF_LD_IMM64(R0, 0x0LL),
1782 BPF_LD_IMM64(R1, 0x0LL),
1783 BPF_LD_IMM64(R2, 0x0LL),
1784 BPF_LD_IMM64(R3, 0x0LL),
1785 BPF_LD_IMM64(R4, 0x0LL),
1786 BPF_LD_IMM64(R5, 0x0LL),
1787 BPF_LD_IMM64(R6, 0x0LL),
1788 BPF_LD_IMM64(R7, 0x0LL),
1789 BPF_LD_IMM64(R8, 0x0LL),
1790 BPF_LD_IMM64(R9, 0x0LL),
1791 BPF_ALU64_REG(BPF_ADD, R0, R0),
1792 BPF_ALU64_REG(BPF_ADD, R0, R1),
1793 BPF_ALU64_REG(BPF_ADD, R0, R2),
1794 BPF_ALU64_REG(BPF_ADD, R0, R3),
1795 BPF_ALU64_REG(BPF_ADD, R0, R4),
1796 BPF_ALU64_REG(BPF_ADD, R0, R5),
1797 BPF_ALU64_REG(BPF_ADD, R0, R6),
1798 BPF_ALU64_REG(BPF_ADD, R0, R7),
1799 BPF_ALU64_REG(BPF_ADD, R0, R8),
1800 BPF_ALU64_REG(BPF_ADD, R0, R9),
1801 BPF_ALU64_IMM(BPF_ADD, R0, 0xfefe),
1802 BPF_EXIT_INSN(),
1803 },
1804 INTERNAL,
1805 { },
1806 { { 0, 0xfefe } }
1807 },
1688 { 1808 {
1689 "INT: ALU MIX", 1809 "INT: ALU MIX",
1690 .u.insns_int = { 1810 .u.insns_int = {
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
index 8c1ad1ced72c..270bf7289b1e 100644
--- a/lib/test_rhashtable.c
+++ b/lib/test_rhashtable.c
@@ -36,9 +36,9 @@ static int runs = 4;
36module_param(runs, int, 0); 36module_param(runs, int, 0);
37MODULE_PARM_DESC(runs, "Number of test runs per variant (default: 4)"); 37MODULE_PARM_DESC(runs, "Number of test runs per variant (default: 4)");
38 38
39static int max_size = 65536; 39static int max_size = 0;
40module_param(max_size, int, 0); 40module_param(max_size, int, 0);
41MODULE_PARM_DESC(runs, "Maximum table size (default: 65536)"); 41MODULE_PARM_DESC(runs, "Maximum table size (default: calculated)");
42 42
43static bool shrinking = false; 43static bool shrinking = false;
44module_param(shrinking, bool, 0); 44module_param(shrinking, bool, 0);
@@ -52,6 +52,10 @@ static int tcount = 10;
52module_param(tcount, int, 0); 52module_param(tcount, int, 0);
53MODULE_PARM_DESC(tcount, "Number of threads to spawn (default: 10)"); 53MODULE_PARM_DESC(tcount, "Number of threads to spawn (default: 10)");
54 54
55static bool enomem_retry = false;
56module_param(enomem_retry, bool, 0);
57MODULE_PARM_DESC(enomem_retry, "Retry insert even if -ENOMEM was returned (default: off)");
58
55struct test_obj { 59struct test_obj {
56 int value; 60 int value;
57 struct rhash_head node; 61 struct rhash_head node;
@@ -76,6 +80,28 @@ static struct rhashtable_params test_rht_params = {
76static struct semaphore prestart_sem; 80static struct semaphore prestart_sem;
77static struct semaphore startup_sem = __SEMAPHORE_INITIALIZER(startup_sem, 0); 81static struct semaphore startup_sem = __SEMAPHORE_INITIALIZER(startup_sem, 0);
78 82
83static int insert_retry(struct rhashtable *ht, struct rhash_head *obj,
84 const struct rhashtable_params params)
85{
86 int err, retries = -1, enomem_retries = 0;
87
88 do {
89 retries++;
90 cond_resched();
91 err = rhashtable_insert_fast(ht, obj, params);
92 if (err == -ENOMEM && enomem_retry) {
93 enomem_retries++;
94 err = -EBUSY;
95 }
96 } while (err == -EBUSY);
97
98 if (enomem_retries)
99 pr_info(" %u insertions retried after -ENOMEM\n",
100 enomem_retries);
101
102 return err ? : retries;
103}
104
79static int __init test_rht_lookup(struct rhashtable *ht) 105static int __init test_rht_lookup(struct rhashtable *ht)
80{ 106{
81 unsigned int i; 107 unsigned int i;
@@ -157,7 +183,7 @@ static s64 __init test_rhashtable(struct rhashtable *ht)
157{ 183{
158 struct test_obj *obj; 184 struct test_obj *obj;
159 int err; 185 int err;
160 unsigned int i, insert_fails = 0; 186 unsigned int i, insert_retries = 0;
161 s64 start, end; 187 s64 start, end;
162 188
163 /* 189 /*
@@ -170,22 +196,16 @@ static s64 __init test_rhashtable(struct rhashtable *ht)
170 struct test_obj *obj = &array[i]; 196 struct test_obj *obj = &array[i];
171 197
172 obj->value = i * 2; 198 obj->value = i * 2;
173 199 err = insert_retry(ht, &obj->node, test_rht_params);
174 err = rhashtable_insert_fast(ht, &obj->node, test_rht_params); 200 if (err > 0)
175 if (err == -ENOMEM || err == -EBUSY) { 201 insert_retries += err;
176 /* Mark failed inserts but continue */ 202 else if (err)
177 obj->value = TEST_INSERT_FAIL;
178 insert_fails++;
179 } else if (err) {
180 return err; 203 return err;
181 }
182
183 cond_resched();
184 } 204 }
185 205
186 if (insert_fails) 206 if (insert_retries)
187 pr_info(" %u insertions failed due to memory pressure\n", 207 pr_info(" %u insertions retried due to memory pressure\n",
188 insert_fails); 208 insert_retries);
189 209
190 test_bucket_stats(ht); 210 test_bucket_stats(ht);
191 rcu_read_lock(); 211 rcu_read_lock();
@@ -236,13 +256,15 @@ static int thread_lookup_test(struct thread_data *tdata)
236 obj->value, key); 256 obj->value, key);
237 err++; 257 err++;
238 } 258 }
259
260 cond_resched();
239 } 261 }
240 return err; 262 return err;
241} 263}
242 264
243static int threadfunc(void *data) 265static int threadfunc(void *data)
244{ 266{
245 int i, step, err = 0, insert_fails = 0; 267 int i, step, err = 0, insert_retries = 0;
246 struct thread_data *tdata = data; 268 struct thread_data *tdata = data;
247 269
248 up(&prestart_sem); 270 up(&prestart_sem);
@@ -251,20 +273,18 @@ static int threadfunc(void *data)
251 273
252 for (i = 0; i < entries; i++) { 274 for (i = 0; i < entries; i++) {
253 tdata->objs[i].value = (tdata->id << 16) | i; 275 tdata->objs[i].value = (tdata->id << 16) | i;
254 err = rhashtable_insert_fast(&ht, &tdata->objs[i].node, 276 err = insert_retry(&ht, &tdata->objs[i].node, test_rht_params);
255 test_rht_params); 277 if (err > 0) {
256 if (err == -ENOMEM || err == -EBUSY) { 278 insert_retries += err;
257 tdata->objs[i].value = TEST_INSERT_FAIL;
258 insert_fails++;
259 } else if (err) { 279 } else if (err) {
260 pr_err(" thread[%d]: rhashtable_insert_fast failed\n", 280 pr_err(" thread[%d]: rhashtable_insert_fast failed\n",
261 tdata->id); 281 tdata->id);
262 goto out; 282 goto out;
263 } 283 }
264 } 284 }
265 if (insert_fails) 285 if (insert_retries)
266 pr_info(" thread[%d]: %d insert failures\n", 286 pr_info(" thread[%d]: %u insertions retried due to memory pressure\n",
267 tdata->id, insert_fails); 287 tdata->id, insert_retries);
268 288
269 err = thread_lookup_test(tdata); 289 err = thread_lookup_test(tdata);
270 if (err) { 290 if (err) {
@@ -285,6 +305,8 @@ static int threadfunc(void *data)
285 goto out; 305 goto out;
286 } 306 }
287 tdata->objs[i].value = TEST_INSERT_FAIL; 307 tdata->objs[i].value = TEST_INSERT_FAIL;
308
309 cond_resched();
288 } 310 }
289 err = thread_lookup_test(tdata); 311 err = thread_lookup_test(tdata);
290 if (err) { 312 if (err) {
@@ -311,7 +333,7 @@ static int __init test_rht_init(void)
311 entries = min(entries, MAX_ENTRIES); 333 entries = min(entries, MAX_ENTRIES);
312 334
313 test_rht_params.automatic_shrinking = shrinking; 335 test_rht_params.automatic_shrinking = shrinking;
314 test_rht_params.max_size = max_size; 336 test_rht_params.max_size = max_size ? : roundup_pow_of_two(entries);
315 test_rht_params.nelem_hint = size; 337 test_rht_params.nelem_hint = size;
316 338
317 pr_info("Running rhashtable test nelem=%d, max_size=%d, shrinking=%d\n", 339 pr_info("Running rhashtable test nelem=%d, max_size=%d, shrinking=%d\n",
@@ -357,6 +379,8 @@ static int __init test_rht_init(void)
357 return -ENOMEM; 379 return -ENOMEM;
358 } 380 }
359 381
382 test_rht_params.max_size = max_size ? :
383 roundup_pow_of_two(tcount * entries);
360 err = rhashtable_init(&ht, &test_rht_params); 384 err = rhashtable_init(&ht, &test_rht_params);
361 if (err < 0) { 385 if (err < 0) {
362 pr_warn("Test failed: Unable to initialize hashtable: %d\n", 386 pr_warn("Test failed: Unable to initialize hashtable: %d\n",
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index f9cee8e1233c..ac3f9476b776 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -31,6 +31,9 @@
31#include <linux/dcache.h> 31#include <linux/dcache.h>
32#include <linux/cred.h> 32#include <linux/cred.h>
33#include <net/addrconf.h> 33#include <net/addrconf.h>
34#ifdef CONFIG_BLOCK
35#include <linux/blkdev.h>
36#endif
34 37
35#include <asm/page.h> /* for PAGE_SIZE */ 38#include <asm/page.h> /* for PAGE_SIZE */
36#include <asm/sections.h> /* for dereference_function_descriptor() */ 39#include <asm/sections.h> /* for dereference_function_descriptor() */
@@ -613,6 +616,26 @@ char *dentry_name(char *buf, char *end, const struct dentry *d, struct printf_sp
613 return buf; 616 return buf;
614} 617}
615 618
619#ifdef CONFIG_BLOCK
620static noinline_for_stack
621char *bdev_name(char *buf, char *end, struct block_device *bdev,
622 struct printf_spec spec, const char *fmt)
623{
624 struct gendisk *hd = bdev->bd_disk;
625
626 buf = string(buf, end, hd->disk_name, spec);
627 if (bdev->bd_part->partno) {
628 if (isdigit(hd->disk_name[strlen(hd->disk_name)-1])) {
629 if (buf < end)
630 *buf = 'p';
631 buf++;
632 }
633 buf = number(buf, end, bdev->bd_part->partno, spec);
634 }
635 return buf;
636}
637#endif
638
616static noinline_for_stack 639static noinline_for_stack
617char *symbol_string(char *buf, char *end, void *ptr, 640char *symbol_string(char *buf, char *end, void *ptr,
618 struct printf_spec spec, const char *fmt) 641 struct printf_spec spec, const char *fmt)
@@ -1443,6 +1466,7 @@ int kptr_restrict __read_mostly;
1443 * (default assumed to be phys_addr_t, passed by reference) 1466 * (default assumed to be phys_addr_t, passed by reference)
1444 * - 'd[234]' For a dentry name (optionally 2-4 last components) 1467 * - 'd[234]' For a dentry name (optionally 2-4 last components)
1445 * - 'D[234]' Same as 'd' but for a struct file 1468 * - 'D[234]' Same as 'd' but for a struct file
1469 * - 'g' For block_device name (gendisk + partition number)
1446 * - 'C' For a clock, it prints the name (Common Clock Framework) or address 1470 * - 'C' For a clock, it prints the name (Common Clock Framework) or address
1447 * (legacy clock framework) of the clock 1471 * (legacy clock framework) of the clock
1448 * - 'Cn' For a clock, it prints the name (Common Clock Framework) or address 1472 * - 'Cn' For a clock, it prints the name (Common Clock Framework) or address
@@ -1600,6 +1624,11 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
1600 return dentry_name(buf, end, 1624 return dentry_name(buf, end,
1601 ((const struct file *)ptr)->f_path.dentry, 1625 ((const struct file *)ptr)->f_path.dentry,
1602 spec, fmt); 1626 spec, fmt);
1627#ifdef CONFIG_BLOCK
1628 case 'g':
1629 return bdev_name(buf, end, ptr, spec, fmt);
1630#endif
1631
1603 } 1632 }
1604 spec.flags |= SMALL; 1633 spec.flags |= SMALL;
1605 if (spec.field_width == -1) { 1634 if (spec.field_width == -1) {