aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug60
-rw-r--r--lib/Makefile2
-rw-r--r--lib/div64.c2
-rw-r--r--lib/ioremap.c53
-rw-r--r--lib/iov_iter.c851
-rw-r--r--lib/kobject.c7
-rw-r--r--lib/lcm.c11
-rw-r--r--lib/lockref.c2
-rw-r--r--lib/lz4/lz4_decompress.c21
-rw-r--r--lib/nlattr.c2
-rw-r--r--lib/seq_buf.c4
11 files changed, 988 insertions, 27 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index c5cefb3c009c..17670573dda8 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -865,6 +865,19 @@ config SCHED_STACK_END_CHECK
865 data corruption or a sporadic crash at a later stage once the region 865 data corruption or a sporadic crash at a later stage once the region
866 is examined. The runtime overhead introduced is minimal. 866 is examined. The runtime overhead introduced is minimal.
867 867
868config DEBUG_TIMEKEEPING
869 bool "Enable extra timekeeping sanity checking"
870 help
871 This option will enable additional timekeeping sanity checks
872 which may be helpful when diagnosing issues where timekeeping
873 problems are suspected.
874
875 This may include checks in the timekeeping hotpaths, so this
876 option may have a (very small) performance impact to some
877 workloads.
878
879 If unsure, say N.
880
868config TIMER_STATS 881config TIMER_STATS
869 bool "Collect kernel timers statistics" 882 bool "Collect kernel timers statistics"
870 depends on DEBUG_KERNEL && PROC_FS 883 depends on DEBUG_KERNEL && PROC_FS
@@ -1180,16 +1193,7 @@ config DEBUG_CREDENTIALS
1180menu "RCU Debugging" 1193menu "RCU Debugging"
1181 1194
1182config PROVE_RCU 1195config PROVE_RCU
1183 bool "RCU debugging: prove RCU correctness" 1196 def_bool PROVE_LOCKING
1184 depends on PROVE_LOCKING
1185 default n
1186 help
1187 This feature enables lockdep extensions that check for correct
1188 use of RCU APIs. This is currently under development. Say Y
1189 if you want to debug RCU usage or help work on the PROVE_RCU
1190 feature.
1191
1192 Say N if you are unsure.
1193 1197
1194config PROVE_RCU_REPEATEDLY 1198config PROVE_RCU_REPEATEDLY
1195 bool "RCU debugging: don't disable PROVE_RCU on first splat" 1199 bool "RCU debugging: don't disable PROVE_RCU on first splat"
@@ -1257,6 +1261,30 @@ config RCU_TORTURE_TEST_RUNNABLE
1257 Say N here if you want the RCU torture tests to start only 1261 Say N here if you want the RCU torture tests to start only
1258 after being manually enabled via /proc. 1262 after being manually enabled via /proc.
1259 1263
1264config RCU_TORTURE_TEST_SLOW_INIT
1265 bool "Slow down RCU grace-period initialization to expose races"
1266 depends on RCU_TORTURE_TEST
1267 help
1268 This option makes grace-period initialization block for a
1269 few jiffies between initializing each pair of consecutive
1270 rcu_node structures. This helps to expose races involving
1271 grace-period initialization, in other words, it makes your
1272 kernel less stable. It can also greatly increase grace-period
1273 latency, especially on systems with large numbers of CPUs.
1274 This is useful when torture-testing RCU, but in almost no
1275 other circumstance.
1276
1277 Say Y here if you want your system to crash and hang more often.
1278 Say N if you want a sane system.
1279
1280config RCU_TORTURE_TEST_SLOW_INIT_DELAY
1281 int "How much to slow down RCU grace-period initialization"
1282 range 0 5
1283 default 3
1284 help
1285 This option specifies the number of jiffies to wait between
1286 each rcu_node structure initialization.
1287
1260config RCU_CPU_STALL_TIMEOUT 1288config RCU_CPU_STALL_TIMEOUT
1261 int "RCU CPU stall timeout in seconds" 1289 int "RCU CPU stall timeout in seconds"
1262 depends on RCU_STALL_COMMON 1290 depends on RCU_STALL_COMMON
@@ -1732,6 +1760,18 @@ config TEST_UDELAY
1732 1760
1733 If unsure, say N. 1761 If unsure, say N.
1734 1762
1763config MEMTEST
1764 bool "Memtest"
1765 depends on HAVE_MEMBLOCK
1766 ---help---
1767 This option adds a kernel parameter 'memtest', which allows memtest
1768 to be set.
1769 memtest=0, mean disabled; -- default
1770 memtest=1, mean do 1 test pattern;
1771 ...
1772 memtest=17, mean do 17 test patterns.
1773 If you are unsure how to answer this question, answer N.
1774
1735source "samples/Kconfig" 1775source "samples/Kconfig"
1736 1776
1737source "lib/Kconfig.kgdb" 1777source "lib/Kconfig.kgdb"
diff --git a/lib/Makefile b/lib/Makefile
index 87eb3bffc283..58f74d2dd396 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -24,7 +24,7 @@ obj-y += lockref.o
24 24
25obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ 25obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
26 bust_spinlocks.o kasprintf.o bitmap.o scatterlist.o \ 26 bust_spinlocks.o kasprintf.o bitmap.o scatterlist.o \
27 gcd.o lcm.o list_sort.o uuid.o flex_array.o clz_ctz.o \ 27 gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \
28 bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \ 28 bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \
29 percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o 29 percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o
30obj-y += string_helpers.o 30obj-y += string_helpers.o
diff --git a/lib/div64.c b/lib/div64.c
index 4382ad77777e..19ea7ed4b948 100644
--- a/lib/div64.c
+++ b/lib/div64.c
@@ -127,7 +127,7 @@ EXPORT_SYMBOL(div64_u64_rem);
127 * by the book 'Hacker's Delight'. The original source and full proof 127 * by the book 'Hacker's Delight'. The original source and full proof
128 * can be found here and is available for use without restriction. 128 * can be found here and is available for use without restriction.
129 * 129 *
130 * 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c.txt' 130 * 'http://www.hackersdelight.org/hdcodetxt/divDouble.c.txt'
131 */ 131 */
132#ifndef div64_u64 132#ifndef div64_u64
133u64 div64_u64(u64 dividend, u64 divisor) 133u64 div64_u64(u64 dividend, u64 divisor)
diff --git a/lib/ioremap.c b/lib/ioremap.c
index 0c9216c48762..86c8911b0e3a 100644
--- a/lib/ioremap.c
+++ b/lib/ioremap.c
@@ -13,6 +13,43 @@
13#include <asm/cacheflush.h> 13#include <asm/cacheflush.h>
14#include <asm/pgtable.h> 14#include <asm/pgtable.h>
15 15
16#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
17static int __read_mostly ioremap_pud_capable;
18static int __read_mostly ioremap_pmd_capable;
19static int __read_mostly ioremap_huge_disabled;
20
21static int __init set_nohugeiomap(char *str)
22{
23 ioremap_huge_disabled = 1;
24 return 0;
25}
26early_param("nohugeiomap", set_nohugeiomap);
27
28void __init ioremap_huge_init(void)
29{
30 if (!ioremap_huge_disabled) {
31 if (arch_ioremap_pud_supported())
32 ioremap_pud_capable = 1;
33 if (arch_ioremap_pmd_supported())
34 ioremap_pmd_capable = 1;
35 }
36}
37
38static inline int ioremap_pud_enabled(void)
39{
40 return ioremap_pud_capable;
41}
42
43static inline int ioremap_pmd_enabled(void)
44{
45 return ioremap_pmd_capable;
46}
47
48#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
49static inline int ioremap_pud_enabled(void) { return 0; }
50static inline int ioremap_pmd_enabled(void) { return 0; }
51#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
52
16static int ioremap_pte_range(pmd_t *pmd, unsigned long addr, 53static int ioremap_pte_range(pmd_t *pmd, unsigned long addr,
17 unsigned long end, phys_addr_t phys_addr, pgprot_t prot) 54 unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
18{ 55{
@@ -43,6 +80,14 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
43 return -ENOMEM; 80 return -ENOMEM;
44 do { 81 do {
45 next = pmd_addr_end(addr, end); 82 next = pmd_addr_end(addr, end);
83
84 if (ioremap_pmd_enabled() &&
85 ((next - addr) == PMD_SIZE) &&
86 IS_ALIGNED(phys_addr + addr, PMD_SIZE)) {
87 if (pmd_set_huge(pmd, phys_addr + addr, prot))
88 continue;
89 }
90
46 if (ioremap_pte_range(pmd, addr, next, phys_addr + addr, prot)) 91 if (ioremap_pte_range(pmd, addr, next, phys_addr + addr, prot))
47 return -ENOMEM; 92 return -ENOMEM;
48 } while (pmd++, addr = next, addr != end); 93 } while (pmd++, addr = next, addr != end);
@@ -61,6 +106,14 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
61 return -ENOMEM; 106 return -ENOMEM;
62 do { 107 do {
63 next = pud_addr_end(addr, end); 108 next = pud_addr_end(addr, end);
109
110 if (ioremap_pud_enabled() &&
111 ((next - addr) == PUD_SIZE) &&
112 IS_ALIGNED(phys_addr + addr, PUD_SIZE)) {
113 if (pud_set_huge(pud, phys_addr + addr, prot))
114 continue;
115 }
116
64 if (ioremap_pmd_range(pud, addr, next, phys_addr + addr, prot)) 117 if (ioremap_pmd_range(pud, addr, next, phys_addr + addr, prot))
65 return -ENOMEM; 118 return -ENOMEM;
66 } while (pud++, addr = next, addr != end); 119 } while (pud++, addr = next, addr != end);
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
new file mode 100644
index 000000000000..75232ad0a5e7
--- /dev/null
+++ b/lib/iov_iter.c
@@ -0,0 +1,851 @@
1#include <linux/export.h>
2#include <linux/uio.h>
3#include <linux/pagemap.h>
4#include <linux/slab.h>
5#include <linux/vmalloc.h>
6#include <net/checksum.h>
7
8#define iterate_iovec(i, n, __v, __p, skip, STEP) { \
9 size_t left; \
10 size_t wanted = n; \
11 __p = i->iov; \
12 __v.iov_len = min(n, __p->iov_len - skip); \
13 if (likely(__v.iov_len)) { \
14 __v.iov_base = __p->iov_base + skip; \
15 left = (STEP); \
16 __v.iov_len -= left; \
17 skip += __v.iov_len; \
18 n -= __v.iov_len; \
19 } else { \
20 left = 0; \
21 } \
22 while (unlikely(!left && n)) { \
23 __p++; \
24 __v.iov_len = min(n, __p->iov_len); \
25 if (unlikely(!__v.iov_len)) \
26 continue; \
27 __v.iov_base = __p->iov_base; \
28 left = (STEP); \
29 __v.iov_len -= left; \
30 skip = __v.iov_len; \
31 n -= __v.iov_len; \
32 } \
33 n = wanted - n; \
34}
35
36#define iterate_kvec(i, n, __v, __p, skip, STEP) { \
37 size_t wanted = n; \
38 __p = i->kvec; \
39 __v.iov_len = min(n, __p->iov_len - skip); \
40 if (likely(__v.iov_len)) { \
41 __v.iov_base = __p->iov_base + skip; \
42 (void)(STEP); \
43 skip += __v.iov_len; \
44 n -= __v.iov_len; \
45 } \
46 while (unlikely(n)) { \
47 __p++; \
48 __v.iov_len = min(n, __p->iov_len); \
49 if (unlikely(!__v.iov_len)) \
50 continue; \
51 __v.iov_base = __p->iov_base; \
52 (void)(STEP); \
53 skip = __v.iov_len; \
54 n -= __v.iov_len; \
55 } \
56 n = wanted; \
57}
58
59#define iterate_bvec(i, n, __v, __p, skip, STEP) { \
60 size_t wanted = n; \
61 __p = i->bvec; \
62 __v.bv_len = min_t(size_t, n, __p->bv_len - skip); \
63 if (likely(__v.bv_len)) { \
64 __v.bv_page = __p->bv_page; \
65 __v.bv_offset = __p->bv_offset + skip; \
66 (void)(STEP); \
67 skip += __v.bv_len; \
68 n -= __v.bv_len; \
69 } \
70 while (unlikely(n)) { \
71 __p++; \
72 __v.bv_len = min_t(size_t, n, __p->bv_len); \
73 if (unlikely(!__v.bv_len)) \
74 continue; \
75 __v.bv_page = __p->bv_page; \
76 __v.bv_offset = __p->bv_offset; \
77 (void)(STEP); \
78 skip = __v.bv_len; \
79 n -= __v.bv_len; \
80 } \
81 n = wanted; \
82}
83
84#define iterate_all_kinds(i, n, v, I, B, K) { \
85 size_t skip = i->iov_offset; \
86 if (unlikely(i->type & ITER_BVEC)) { \
87 const struct bio_vec *bvec; \
88 struct bio_vec v; \
89 iterate_bvec(i, n, v, bvec, skip, (B)) \
90 } else if (unlikely(i->type & ITER_KVEC)) { \
91 const struct kvec *kvec; \
92 struct kvec v; \
93 iterate_kvec(i, n, v, kvec, skip, (K)) \
94 } else { \
95 const struct iovec *iov; \
96 struct iovec v; \
97 iterate_iovec(i, n, v, iov, skip, (I)) \
98 } \
99}
100
101#define iterate_and_advance(i, n, v, I, B, K) { \
102 size_t skip = i->iov_offset; \
103 if (unlikely(i->type & ITER_BVEC)) { \
104 const struct bio_vec *bvec; \
105 struct bio_vec v; \
106 iterate_bvec(i, n, v, bvec, skip, (B)) \
107 if (skip == bvec->bv_len) { \
108 bvec++; \
109 skip = 0; \
110 } \
111 i->nr_segs -= bvec - i->bvec; \
112 i->bvec = bvec; \
113 } else if (unlikely(i->type & ITER_KVEC)) { \
114 const struct kvec *kvec; \
115 struct kvec v; \
116 iterate_kvec(i, n, v, kvec, skip, (K)) \
117 if (skip == kvec->iov_len) { \
118 kvec++; \
119 skip = 0; \
120 } \
121 i->nr_segs -= kvec - i->kvec; \
122 i->kvec = kvec; \
123 } else { \
124 const struct iovec *iov; \
125 struct iovec v; \
126 iterate_iovec(i, n, v, iov, skip, (I)) \
127 if (skip == iov->iov_len) { \
128 iov++; \
129 skip = 0; \
130 } \
131 i->nr_segs -= iov - i->iov; \
132 i->iov = iov; \
133 } \
134 i->count -= n; \
135 i->iov_offset = skip; \
136}
137
138static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
139 struct iov_iter *i)
140{
141 size_t skip, copy, left, wanted;
142 const struct iovec *iov;
143 char __user *buf;
144 void *kaddr, *from;
145
146 if (unlikely(bytes > i->count))
147 bytes = i->count;
148
149 if (unlikely(!bytes))
150 return 0;
151
152 wanted = bytes;
153 iov = i->iov;
154 skip = i->iov_offset;
155 buf = iov->iov_base + skip;
156 copy = min(bytes, iov->iov_len - skip);
157
158 if (!fault_in_pages_writeable(buf, copy)) {
159 kaddr = kmap_atomic(page);
160 from = kaddr + offset;
161
162 /* first chunk, usually the only one */
163 left = __copy_to_user_inatomic(buf, from, copy);
164 copy -= left;
165 skip += copy;
166 from += copy;
167 bytes -= copy;
168
169 while (unlikely(!left && bytes)) {
170 iov++;
171 buf = iov->iov_base;
172 copy = min(bytes, iov->iov_len);
173 left = __copy_to_user_inatomic(buf, from, copy);
174 copy -= left;
175 skip = copy;
176 from += copy;
177 bytes -= copy;
178 }
179 if (likely(!bytes)) {
180 kunmap_atomic(kaddr);
181 goto done;
182 }
183 offset = from - kaddr;
184 buf += copy;
185 kunmap_atomic(kaddr);
186 copy = min(bytes, iov->iov_len - skip);
187 }
188 /* Too bad - revert to non-atomic kmap */
189 kaddr = kmap(page);
190 from = kaddr + offset;
191 left = __copy_to_user(buf, from, copy);
192 copy -= left;
193 skip += copy;
194 from += copy;
195 bytes -= copy;
196 while (unlikely(!left && bytes)) {
197 iov++;
198 buf = iov->iov_base;
199 copy = min(bytes, iov->iov_len);
200 left = __copy_to_user(buf, from, copy);
201 copy -= left;
202 skip = copy;
203 from += copy;
204 bytes -= copy;
205 }
206 kunmap(page);
207done:
208 if (skip == iov->iov_len) {
209 iov++;
210 skip = 0;
211 }
212 i->count -= wanted - bytes;
213 i->nr_segs -= iov - i->iov;
214 i->iov = iov;
215 i->iov_offset = skip;
216 return wanted - bytes;
217}
218
219static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
220 struct iov_iter *i)
221{
222 size_t skip, copy, left, wanted;
223 const struct iovec *iov;
224 char __user *buf;
225 void *kaddr, *to;
226
227 if (unlikely(bytes > i->count))
228 bytes = i->count;
229
230 if (unlikely(!bytes))
231 return 0;
232
233 wanted = bytes;
234 iov = i->iov;
235 skip = i->iov_offset;
236 buf = iov->iov_base + skip;
237 copy = min(bytes, iov->iov_len - skip);
238
239 if (!fault_in_pages_readable(buf, copy)) {
240 kaddr = kmap_atomic(page);
241 to = kaddr + offset;
242
243 /* first chunk, usually the only one */
244 left = __copy_from_user_inatomic(to, buf, copy);
245 copy -= left;
246 skip += copy;
247 to += copy;
248 bytes -= copy;
249
250 while (unlikely(!left && bytes)) {
251 iov++;
252 buf = iov->iov_base;
253 copy = min(bytes, iov->iov_len);
254 left = __copy_from_user_inatomic(to, buf, copy);
255 copy -= left;
256 skip = copy;
257 to += copy;
258 bytes -= copy;
259 }
260 if (likely(!bytes)) {
261 kunmap_atomic(kaddr);
262 goto done;
263 }
264 offset = to - kaddr;
265 buf += copy;
266 kunmap_atomic(kaddr);
267 copy = min(bytes, iov->iov_len - skip);
268 }
269 /* Too bad - revert to non-atomic kmap */
270 kaddr = kmap(page);
271 to = kaddr + offset;
272 left = __copy_from_user(to, buf, copy);
273 copy -= left;
274 skip += copy;
275 to += copy;
276 bytes -= copy;
277 while (unlikely(!left && bytes)) {
278 iov++;
279 buf = iov->iov_base;
280 copy = min(bytes, iov->iov_len);
281 left = __copy_from_user(to, buf, copy);
282 copy -= left;
283 skip = copy;
284 to += copy;
285 bytes -= copy;
286 }
287 kunmap(page);
288done:
289 if (skip == iov->iov_len) {
290 iov++;
291 skip = 0;
292 }
293 i->count -= wanted - bytes;
294 i->nr_segs -= iov - i->iov;
295 i->iov = iov;
296 i->iov_offset = skip;
297 return wanted - bytes;
298}
299
300/*
301 * Fault in the first iovec of the given iov_iter, to a maximum length
302 * of bytes. Returns 0 on success, or non-zero if the memory could not be
303 * accessed (ie. because it is an invalid address).
304 *
305 * writev-intensive code may want this to prefault several iovecs -- that
306 * would be possible (callers must not rely on the fact that _only_ the
307 * first iovec will be faulted with the current implementation).
308 */
309int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
310{
311 if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
312 char __user *buf = i->iov->iov_base + i->iov_offset;
313 bytes = min(bytes, i->iov->iov_len - i->iov_offset);
314 return fault_in_pages_readable(buf, bytes);
315 }
316 return 0;
317}
318EXPORT_SYMBOL(iov_iter_fault_in_readable);
319
320/*
321 * Fault in one or more iovecs of the given iov_iter, to a maximum length of
322 * bytes. For each iovec, fault in each page that constitutes the iovec.
323 *
324 * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
325 * because it is an invalid address).
326 */
327int iov_iter_fault_in_multipages_readable(struct iov_iter *i, size_t bytes)
328{
329 size_t skip = i->iov_offset;
330 const struct iovec *iov;
331 int err;
332 struct iovec v;
333
334 if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
335 iterate_iovec(i, bytes, v, iov, skip, ({
336 err = fault_in_multipages_readable(v.iov_base,
337 v.iov_len);
338 if (unlikely(err))
339 return err;
340 0;}))
341 }
342 return 0;
343}
344EXPORT_SYMBOL(iov_iter_fault_in_multipages_readable);
345
346void iov_iter_init(struct iov_iter *i, int direction,
347 const struct iovec *iov, unsigned long nr_segs,
348 size_t count)
349{
350 /* It will get better. Eventually... */
351 if (segment_eq(get_fs(), KERNEL_DS)) {
352 direction |= ITER_KVEC;
353 i->type = direction;
354 i->kvec = (struct kvec *)iov;
355 } else {
356 i->type = direction;
357 i->iov = iov;
358 }
359 i->nr_segs = nr_segs;
360 i->iov_offset = 0;
361 i->count = count;
362}
363EXPORT_SYMBOL(iov_iter_init);
364
365static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
366{
367 char *from = kmap_atomic(page);
368 memcpy(to, from + offset, len);
369 kunmap_atomic(from);
370}
371
372static void memcpy_to_page(struct page *page, size_t offset, char *from, size_t len)
373{
374 char *to = kmap_atomic(page);
375 memcpy(to + offset, from, len);
376 kunmap_atomic(to);
377}
378
379static void memzero_page(struct page *page, size_t offset, size_t len)
380{
381 char *addr = kmap_atomic(page);
382 memset(addr + offset, 0, len);
383 kunmap_atomic(addr);
384}
385
386size_t copy_to_iter(void *addr, size_t bytes, struct iov_iter *i)
387{
388 char *from = addr;
389 if (unlikely(bytes > i->count))
390 bytes = i->count;
391
392 if (unlikely(!bytes))
393 return 0;
394
395 iterate_and_advance(i, bytes, v,
396 __copy_to_user(v.iov_base, (from += v.iov_len) - v.iov_len,
397 v.iov_len),
398 memcpy_to_page(v.bv_page, v.bv_offset,
399 (from += v.bv_len) - v.bv_len, v.bv_len),
400 memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len)
401 )
402
403 return bytes;
404}
405EXPORT_SYMBOL(copy_to_iter);
406
407size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
408{
409 char *to = addr;
410 if (unlikely(bytes > i->count))
411 bytes = i->count;
412
413 if (unlikely(!bytes))
414 return 0;
415
416 iterate_and_advance(i, bytes, v,
417 __copy_from_user((to += v.iov_len) - v.iov_len, v.iov_base,
418 v.iov_len),
419 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
420 v.bv_offset, v.bv_len),
421 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
422 )
423
424 return bytes;
425}
426EXPORT_SYMBOL(copy_from_iter);
427
428size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
429{
430 char *to = addr;
431 if (unlikely(bytes > i->count))
432 bytes = i->count;
433
434 if (unlikely(!bytes))
435 return 0;
436
437 iterate_and_advance(i, bytes, v,
438 __copy_from_user_nocache((to += v.iov_len) - v.iov_len,
439 v.iov_base, v.iov_len),
440 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
441 v.bv_offset, v.bv_len),
442 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
443 )
444
445 return bytes;
446}
447EXPORT_SYMBOL(copy_from_iter_nocache);
448
449size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
450 struct iov_iter *i)
451{
452 if (i->type & (ITER_BVEC|ITER_KVEC)) {
453 void *kaddr = kmap_atomic(page);
454 size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
455 kunmap_atomic(kaddr);
456 return wanted;
457 } else
458 return copy_page_to_iter_iovec(page, offset, bytes, i);
459}
460EXPORT_SYMBOL(copy_page_to_iter);
461
462size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
463 struct iov_iter *i)
464{
465 if (i->type & (ITER_BVEC|ITER_KVEC)) {
466 void *kaddr = kmap_atomic(page);
467 size_t wanted = copy_from_iter(kaddr + offset, bytes, i);
468 kunmap_atomic(kaddr);
469 return wanted;
470 } else
471 return copy_page_from_iter_iovec(page, offset, bytes, i);
472}
473EXPORT_SYMBOL(copy_page_from_iter);
474
475size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
476{
477 if (unlikely(bytes > i->count))
478 bytes = i->count;
479
480 if (unlikely(!bytes))
481 return 0;
482
483 iterate_and_advance(i, bytes, v,
484 __clear_user(v.iov_base, v.iov_len),
485 memzero_page(v.bv_page, v.bv_offset, v.bv_len),
486 memset(v.iov_base, 0, v.iov_len)
487 )
488
489 return bytes;
490}
491EXPORT_SYMBOL(iov_iter_zero);
492
493size_t iov_iter_copy_from_user_atomic(struct page *page,
494 struct iov_iter *i, unsigned long offset, size_t bytes)
495{
496 char *kaddr = kmap_atomic(page), *p = kaddr + offset;
497 iterate_all_kinds(i, bytes, v,
498 __copy_from_user_inatomic((p += v.iov_len) - v.iov_len,
499 v.iov_base, v.iov_len),
500 memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
501 v.bv_offset, v.bv_len),
502 memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
503 )
504 kunmap_atomic(kaddr);
505 return bytes;
506}
507EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
508
509void iov_iter_advance(struct iov_iter *i, size_t size)
510{
511 iterate_and_advance(i, size, v, 0, 0, 0)
512}
513EXPORT_SYMBOL(iov_iter_advance);
514
515/*
516 * Return the count of just the current iov_iter segment.
517 */
518size_t iov_iter_single_seg_count(const struct iov_iter *i)
519{
520 if (i->nr_segs == 1)
521 return i->count;
522 else if (i->type & ITER_BVEC)
523 return min(i->count, i->bvec->bv_len - i->iov_offset);
524 else
525 return min(i->count, i->iov->iov_len - i->iov_offset);
526}
527EXPORT_SYMBOL(iov_iter_single_seg_count);
528
529void iov_iter_kvec(struct iov_iter *i, int direction,
530 const struct kvec *kvec, unsigned long nr_segs,
531 size_t count)
532{
533 BUG_ON(!(direction & ITER_KVEC));
534 i->type = direction;
535 i->kvec = kvec;
536 i->nr_segs = nr_segs;
537 i->iov_offset = 0;
538 i->count = count;
539}
540EXPORT_SYMBOL(iov_iter_kvec);
541
542void iov_iter_bvec(struct iov_iter *i, int direction,
543 const struct bio_vec *bvec, unsigned long nr_segs,
544 size_t count)
545{
546 BUG_ON(!(direction & ITER_BVEC));
547 i->type = direction;
548 i->bvec = bvec;
549 i->nr_segs = nr_segs;
550 i->iov_offset = 0;
551 i->count = count;
552}
553EXPORT_SYMBOL(iov_iter_bvec);
554
555unsigned long iov_iter_alignment(const struct iov_iter *i)
556{
557 unsigned long res = 0;
558 size_t size = i->count;
559
560 if (!size)
561 return 0;
562
563 iterate_all_kinds(i, size, v,
564 (res |= (unsigned long)v.iov_base | v.iov_len, 0),
565 res |= v.bv_offset | v.bv_len,
566 res |= (unsigned long)v.iov_base | v.iov_len
567 )
568 return res;
569}
570EXPORT_SYMBOL(iov_iter_alignment);
571
572ssize_t iov_iter_get_pages(struct iov_iter *i,
573 struct page **pages, size_t maxsize, unsigned maxpages,
574 size_t *start)
575{
576 if (maxsize > i->count)
577 maxsize = i->count;
578
579 if (!maxsize)
580 return 0;
581
582 iterate_all_kinds(i, maxsize, v, ({
583 unsigned long addr = (unsigned long)v.iov_base;
584 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
585 int n;
586 int res;
587
588 if (len > maxpages * PAGE_SIZE)
589 len = maxpages * PAGE_SIZE;
590 addr &= ~(PAGE_SIZE - 1);
591 n = DIV_ROUND_UP(len, PAGE_SIZE);
592 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages);
593 if (unlikely(res < 0))
594 return res;
595 return (res == n ? len : res * PAGE_SIZE) - *start;
596 0;}),({
597 /* can't be more than PAGE_SIZE */
598 *start = v.bv_offset;
599 get_page(*pages = v.bv_page);
600 return v.bv_len;
601 }),({
602 return -EFAULT;
603 })
604 )
605 return 0;
606}
607EXPORT_SYMBOL(iov_iter_get_pages);
608
609static struct page **get_pages_array(size_t n)
610{
611 struct page **p = kmalloc(n * sizeof(struct page *), GFP_KERNEL);
612 if (!p)
613 p = vmalloc(n * sizeof(struct page *));
614 return p;
615}
616
617ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
618 struct page ***pages, size_t maxsize,
619 size_t *start)
620{
621 struct page **p;
622
623 if (maxsize > i->count)
624 maxsize = i->count;
625
626 if (!maxsize)
627 return 0;
628
629 iterate_all_kinds(i, maxsize, v, ({
630 unsigned long addr = (unsigned long)v.iov_base;
631 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
632 int n;
633 int res;
634
635 addr &= ~(PAGE_SIZE - 1);
636 n = DIV_ROUND_UP(len, PAGE_SIZE);
637 p = get_pages_array(n);
638 if (!p)
639 return -ENOMEM;
640 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p);
641 if (unlikely(res < 0)) {
642 kvfree(p);
643 return res;
644 }
645 *pages = p;
646 return (res == n ? len : res * PAGE_SIZE) - *start;
647 0;}),({
648 /* can't be more than PAGE_SIZE */
649 *start = v.bv_offset;
650 *pages = p = get_pages_array(1);
651 if (!p)
652 return -ENOMEM;
653 get_page(*p = v.bv_page);
654 return v.bv_len;
655 }),({
656 return -EFAULT;
657 })
658 )
659 return 0;
660}
661EXPORT_SYMBOL(iov_iter_get_pages_alloc);
662
663size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
664 struct iov_iter *i)
665{
666 char *to = addr;
667 __wsum sum, next;
668 size_t off = 0;
669 if (unlikely(bytes > i->count))
670 bytes = i->count;
671
672 if (unlikely(!bytes))
673 return 0;
674
675 sum = *csum;
676 iterate_and_advance(i, bytes, v, ({
677 int err = 0;
678 next = csum_and_copy_from_user(v.iov_base,
679 (to += v.iov_len) - v.iov_len,
680 v.iov_len, 0, &err);
681 if (!err) {
682 sum = csum_block_add(sum, next, off);
683 off += v.iov_len;
684 }
685 err ? v.iov_len : 0;
686 }), ({
687 char *p = kmap_atomic(v.bv_page);
688 next = csum_partial_copy_nocheck(p + v.bv_offset,
689 (to += v.bv_len) - v.bv_len,
690 v.bv_len, 0);
691 kunmap_atomic(p);
692 sum = csum_block_add(sum, next, off);
693 off += v.bv_len;
694 }),({
695 next = csum_partial_copy_nocheck(v.iov_base,
696 (to += v.iov_len) - v.iov_len,
697 v.iov_len, 0);
698 sum = csum_block_add(sum, next, off);
699 off += v.iov_len;
700 })
701 )
702 *csum = sum;
703 return bytes;
704}
705EXPORT_SYMBOL(csum_and_copy_from_iter);
706
707size_t csum_and_copy_to_iter(void *addr, size_t bytes, __wsum *csum,
708 struct iov_iter *i)
709{
710 char *from = addr;
711 __wsum sum, next;
712 size_t off = 0;
713 if (unlikely(bytes > i->count))
714 bytes = i->count;
715
716 if (unlikely(!bytes))
717 return 0;
718
719 sum = *csum;
720 iterate_and_advance(i, bytes, v, ({
721 int err = 0;
722 next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
723 v.iov_base,
724 v.iov_len, 0, &err);
725 if (!err) {
726 sum = csum_block_add(sum, next, off);
727 off += v.iov_len;
728 }
729 err ? v.iov_len : 0;
730 }), ({
731 char *p = kmap_atomic(v.bv_page);
732 next = csum_partial_copy_nocheck((from += v.bv_len) - v.bv_len,
733 p + v.bv_offset,
734 v.bv_len, 0);
735 kunmap_atomic(p);
736 sum = csum_block_add(sum, next, off);
737 off += v.bv_len;
738 }),({
739 next = csum_partial_copy_nocheck((from += v.iov_len) - v.iov_len,
740 v.iov_base,
741 v.iov_len, 0);
742 sum = csum_block_add(sum, next, off);
743 off += v.iov_len;
744 })
745 )
746 *csum = sum;
747 return bytes;
748}
749EXPORT_SYMBOL(csum_and_copy_to_iter);
750
751int iov_iter_npages(const struct iov_iter *i, int maxpages)
752{
753 size_t size = i->count;
754 int npages = 0;
755
756 if (!size)
757 return 0;
758
759 iterate_all_kinds(i, size, v, ({
760 unsigned long p = (unsigned long)v.iov_base;
761 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
762 - p / PAGE_SIZE;
763 if (npages >= maxpages)
764 return maxpages;
765 0;}),({
766 npages++;
767 if (npages >= maxpages)
768 return maxpages;
769 }),({
770 unsigned long p = (unsigned long)v.iov_base;
771 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
772 - p / PAGE_SIZE;
773 if (npages >= maxpages)
774 return maxpages;
775 })
776 )
777 return npages;
778}
779EXPORT_SYMBOL(iov_iter_npages);
780
781const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
782{
783 *new = *old;
784 if (new->type & ITER_BVEC)
785 return new->bvec = kmemdup(new->bvec,
786 new->nr_segs * sizeof(struct bio_vec),
787 flags);
788 else
789 /* iovec and kvec have identical layout */
790 return new->iov = kmemdup(new->iov,
791 new->nr_segs * sizeof(struct iovec),
792 flags);
793}
794EXPORT_SYMBOL(dup_iter);
795
796int import_iovec(int type, const struct iovec __user * uvector,
797 unsigned nr_segs, unsigned fast_segs,
798 struct iovec **iov, struct iov_iter *i)
799{
800 ssize_t n;
801 struct iovec *p;
802 n = rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
803 *iov, &p);
804 if (n < 0) {
805 if (p != *iov)
806 kfree(p);
807 *iov = NULL;
808 return n;
809 }
810 iov_iter_init(i, type, p, nr_segs, n);
811 *iov = p == *iov ? NULL : p;
812 return 0;
813}
814EXPORT_SYMBOL(import_iovec);
815
816#ifdef CONFIG_COMPAT
817#include <linux/compat.h>
818
819int compat_import_iovec(int type, const struct compat_iovec __user * uvector,
820 unsigned nr_segs, unsigned fast_segs,
821 struct iovec **iov, struct iov_iter *i)
822{
823 ssize_t n;
824 struct iovec *p;
825 n = compat_rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
826 *iov, &p);
827 if (n < 0) {
828 if (p != *iov)
829 kfree(p);
830 *iov = NULL;
831 return n;
832 }
833 iov_iter_init(i, type, p, nr_segs, n);
834 *iov = p == *iov ? NULL : p;
835 return 0;
836}
837#endif
838
839int import_single_range(int rw, void __user *buf, size_t len,
840 struct iovec *iov, struct iov_iter *i)
841{
842 if (len > MAX_RW_COUNT)
843 len = MAX_RW_COUNT;
844 if (unlikely(!access_ok(!rw, buf, len)))
845 return -EFAULT;
846
847 iov->iov_base = buf;
848 iov->iov_len = len;
849 iov_iter_init(i, rw, iov, 1, len);
850 return 0;
851}
diff --git a/lib/kobject.c b/lib/kobject.c
index 03d4ab349fa7..3b841b97fccd 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -576,8 +576,13 @@ void kobject_del(struct kobject *kobj)
576 */ 576 */
577struct kobject *kobject_get(struct kobject *kobj) 577struct kobject *kobject_get(struct kobject *kobj)
578{ 578{
579 if (kobj) 579 if (kobj) {
580 if (!kobj->state_initialized)
581 WARN(1, KERN_WARNING "kobject: '%s' (%p): is not "
582 "initialized, yet kobject_get() is being "
583 "called.\n", kobject_name(kobj), kobj);
580 kref_get(&kobj->kref); 584 kref_get(&kobj->kref);
585 }
581 return kobj; 586 return kobj;
582} 587}
583 588
diff --git a/lib/lcm.c b/lib/lcm.c
index e97dbd51e756..03d7fcb420b5 100644
--- a/lib/lcm.c
+++ b/lib/lcm.c
@@ -12,3 +12,14 @@ unsigned long lcm(unsigned long a, unsigned long b)
12 return 0; 12 return 0;
13} 13}
14EXPORT_SYMBOL_GPL(lcm); 14EXPORT_SYMBOL_GPL(lcm);
15
16unsigned long lcm_not_zero(unsigned long a, unsigned long b)
17{
18 unsigned long l = lcm(a, b);
19
20 if (l)
21 return l;
22
23 return (b ? : a);
24}
25EXPORT_SYMBOL_GPL(lcm_not_zero);
diff --git a/lib/lockref.c b/lib/lockref.c
index ecb9a665ec19..494994bf17c8 100644
--- a/lib/lockref.c
+++ b/lib/lockref.c
@@ -18,7 +18,7 @@
18#define CMPXCHG_LOOP(CODE, SUCCESS) do { \ 18#define CMPXCHG_LOOP(CODE, SUCCESS) do { \
19 struct lockref old; \ 19 struct lockref old; \
20 BUILD_BUG_ON(sizeof(old) != 8); \ 20 BUILD_BUG_ON(sizeof(old) != 8); \
21 old.lock_count = ACCESS_ONCE(lockref->lock_count); \ 21 old.lock_count = READ_ONCE(lockref->lock_count); \
22 while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \ 22 while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \
23 struct lockref new = old, prev = old; \ 23 struct lockref new = old, prev = old; \
24 CODE \ 24 CODE \
diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c
index 7a85967060a5..26cc6029b280 100644
--- a/lib/lz4/lz4_decompress.c
+++ b/lib/lz4/lz4_decompress.c
@@ -47,6 +47,11 @@
47 47
48#include "lz4defs.h" 48#include "lz4defs.h"
49 49
50static const int dec32table[] = {0, 3, 2, 3, 0, 0, 0, 0};
51#if LZ4_ARCH64
52static const int dec64table[] = {0, 0, 0, -1, 0, 1, 2, 3};
53#endif
54
50static int lz4_uncompress(const char *source, char *dest, int osize) 55static int lz4_uncompress(const char *source, char *dest, int osize)
51{ 56{
52 const BYTE *ip = (const BYTE *) source; 57 const BYTE *ip = (const BYTE *) source;
@@ -56,10 +61,6 @@ static int lz4_uncompress(const char *source, char *dest, int osize)
56 BYTE *cpy; 61 BYTE *cpy;
57 unsigned token; 62 unsigned token;
58 size_t length; 63 size_t length;
59 size_t dec32table[] = {0, 3, 2, 3, 0, 0, 0, 0};
60#if LZ4_ARCH64
61 size_t dec64table[] = {0, 0, 0, -1, 0, 1, 2, 3};
62#endif
63 64
64 while (1) { 65 while (1) {
65 66
@@ -116,7 +117,7 @@ static int lz4_uncompress(const char *source, char *dest, int osize)
116 /* copy repeated sequence */ 117 /* copy repeated sequence */
117 if (unlikely((op - ref) < STEPSIZE)) { 118 if (unlikely((op - ref) < STEPSIZE)) {
118#if LZ4_ARCH64 119#if LZ4_ARCH64
119 size_t dec64 = dec64table[op - ref]; 120 int dec64 = dec64table[op - ref];
120#else 121#else
121 const int dec64 = 0; 122 const int dec64 = 0;
122#endif 123#endif
@@ -139,6 +140,9 @@ static int lz4_uncompress(const char *source, char *dest, int osize)
139 /* Error: request to write beyond destination buffer */ 140 /* Error: request to write beyond destination buffer */
140 if (cpy > oend) 141 if (cpy > oend)
141 goto _output_error; 142 goto _output_error;
143 if ((ref + COPYLENGTH) > oend ||
144 (op + COPYLENGTH) > oend)
145 goto _output_error;
142 LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH)); 146 LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH));
143 while (op < cpy) 147 while (op < cpy)
144 *op++ = *ref++; 148 *op++ = *ref++;
@@ -174,11 +178,6 @@ static int lz4_uncompress_unknownoutputsize(const char *source, char *dest,
174 BYTE * const oend = op + maxoutputsize; 178 BYTE * const oend = op + maxoutputsize;
175 BYTE *cpy; 179 BYTE *cpy;
176 180
177 size_t dec32table[] = {0, 3, 2, 3, 0, 0, 0, 0};
178#if LZ4_ARCH64
179 size_t dec64table[] = {0, 0, 0, -1, 0, 1, 2, 3};
180#endif
181
182 /* Main Loop */ 181 /* Main Loop */
183 while (ip < iend) { 182 while (ip < iend) {
184 183
@@ -246,7 +245,7 @@ static int lz4_uncompress_unknownoutputsize(const char *source, char *dest,
246 /* copy repeated sequence */ 245 /* copy repeated sequence */
247 if (unlikely((op - ref) < STEPSIZE)) { 246 if (unlikely((op - ref) < STEPSIZE)) {
248#if LZ4_ARCH64 247#if LZ4_ARCH64
249 size_t dec64 = dec64table[op - ref]; 248 int dec64 = dec64table[op - ref];
250#else 249#else
251 const int dec64 = 0; 250 const int dec64 = 0;
252#endif 251#endif
diff --git a/lib/nlattr.c b/lib/nlattr.c
index 76a1b59523ab..f5907d23272d 100644
--- a/lib/nlattr.c
+++ b/lib/nlattr.c
@@ -279,6 +279,8 @@ int nla_memcpy(void *dest, const struct nlattr *src, int count)
279 int minlen = min_t(int, count, nla_len(src)); 279 int minlen = min_t(int, count, nla_len(src));
280 280
281 memcpy(dest, nla_data(src), minlen); 281 memcpy(dest, nla_data(src), minlen);
282 if (count > minlen)
283 memset(dest + minlen, 0, count - minlen);
282 284
283 return minlen; 285 return minlen;
284} 286}
diff --git a/lib/seq_buf.c b/lib/seq_buf.c
index 88c0854bd752..5c94e1012a91 100644
--- a/lib/seq_buf.c
+++ b/lib/seq_buf.c
@@ -61,7 +61,7 @@ int seq_buf_vprintf(struct seq_buf *s, const char *fmt, va_list args)
61 61
62 if (s->len < s->size) { 62 if (s->len < s->size) {
63 len = vsnprintf(s->buffer + s->len, s->size - s->len, fmt, args); 63 len = vsnprintf(s->buffer + s->len, s->size - s->len, fmt, args);
64 if (seq_buf_can_fit(s, len)) { 64 if (s->len + len < s->size) {
65 s->len += len; 65 s->len += len;
66 return 0; 66 return 0;
67 } 67 }
@@ -118,7 +118,7 @@ int seq_buf_bprintf(struct seq_buf *s, const char *fmt, const u32 *binary)
118 118
119 if (s->len < s->size) { 119 if (s->len < s->size) {
120 ret = bstr_printf(s->buffer + s->len, len, fmt, binary); 120 ret = bstr_printf(s->buffer + s->len, len, fmt, binary);
121 if (seq_buf_can_fit(s, ret)) { 121 if (s->len + ret < s->size) {
122 s->len += ret; 122 s->len += ret;
123 return 0; 123 return 0;
124 } 124 }