aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--MAINTAINERS12
-rw-r--r--arch/Kconfig11
-rw-r--r--arch/ia64/include/asm/uaccess.h12
-rw-r--r--arch/powerpc/include/asm/uaccess.h19
-rw-r--r--arch/sparc/include/asm/uaccess_32.h9
-rw-r--r--arch/sparc/include/asm/uaccess_64.h7
-rw-r--r--arch/um/kernel/skas/syscall.c10
-rw-r--r--arch/x86/include/asm/uaccess.h4
-rw-r--r--arch/x86/um/ptrace_32.c3
-rw-r--r--arch/x86/um/ptrace_64.c4
-rw-r--r--crypto/cryptd.c3
-rw-r--r--drivers/base/regmap/regcache-rbtree.c38
-rw-r--r--drivers/base/regmap/regcache.c5
-rw-r--r--drivers/base/regmap/regmap.c2
-rw-r--r--drivers/crypto/caam/caamalg.c77
-rw-r--r--drivers/infiniband/core/multicast.c13
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c2
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c92
-rw-r--r--drivers/infiniband/hw/hfi1/chip.h1
-rw-r--r--drivers/infiniband/hw/hfi1/debugfs.c132
-rw-r--r--drivers/infiniband/hw/hfi1/hfi.h4
-rw-r--r--drivers/infiniband/hw/hfi1/init.c3
-rw-r--r--drivers/infiniband/hw/hfi1/mad.c12
-rw-r--r--drivers/infiniband/hw/hfi1/pio_copy.c12
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.c5
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_hw.c1
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_main.c8
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c26
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c22
-rw-r--r--drivers/infiniband/hw/mlx5/main.c6
-rw-r--r--drivers/infiniband/hw/mlx5/mem.c6
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h1
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c13
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c16
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c2
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c23
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.h2
-rw-r--r--drivers/mailbox/Kconfig1
-rw-r--r--drivers/mailbox/bcm-pdc-mailbox.c11
-rw-r--r--drivers/misc/lkdtm_usercopy.c25
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c5
-rw-r--r--drivers/pinctrl/pinctrl-pistachio.c12
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c4
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c4
-rw-r--r--drivers/regulator/max14577-regulator.c4
-rw-r--r--drivers/regulator/max77693-regulator.c4
-rw-r--r--drivers/regulator/qcom_smd-regulator.c30
-rw-r--r--drivers/scsi/constants.c5
-rw-r--r--drivers/scsi/scsi_devinfo.c4
-rw-r--r--drivers/scsi/scsi_transport_sas.c16
-rw-r--r--drivers/scsi/ses.c2
-rw-r--r--drivers/spi/spi-img-spfi.c2
-rw-r--r--drivers/spi/spi-mt65xx.c1
-rw-r--r--drivers/spi/spi-pxa2xx-pci.c1
-rw-r--r--drivers/spi/spi-qup.c1
-rw-r--r--drivers/spi/spi-sh-msiof.c3
-rw-r--r--drivers/spi/spi.c10
-rw-r--r--include/linux/thread_info.h7
-rw-r--r--include/scsi/scsi_transport_sas.h5
-rw-r--r--mm/usercopy.c61
-rwxr-xr-xscripts/package/builddeb4
-rw-r--r--security/Kconfig11
63 files changed, 469 insertions, 383 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index db814a89599c..1fbd77d2de77 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1644,7 +1644,6 @@ F: drivers/*/*s3c64xx*
1644F: drivers/*/*s5pv210* 1644F: drivers/*/*s5pv210*
1645F: drivers/memory/samsung/* 1645F: drivers/memory/samsung/*
1646F: drivers/soc/samsung/* 1646F: drivers/soc/samsung/*
1647F: drivers/spi/spi-s3c*
1648F: Documentation/arm/Samsung/ 1647F: Documentation/arm/Samsung/
1649F: Documentation/devicetree/bindings/arm/samsung/ 1648F: Documentation/devicetree/bindings/arm/samsung/
1650F: Documentation/devicetree/bindings/sram/samsung-sram.txt 1649F: Documentation/devicetree/bindings/sram/samsung-sram.txt
@@ -10254,6 +10253,17 @@ S: Supported
10254L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers) 10253L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
10255F: drivers/clk/samsung/ 10254F: drivers/clk/samsung/
10256 10255
10256SAMSUNG SPI DRIVERS
10257M: Kukjin Kim <kgene@kernel.org>
10258M: Krzysztof Kozlowski <krzk@kernel.org>
10259M: Andi Shyti <andi.shyti@samsung.com>
10260L: linux-spi@vger.kernel.org
10261L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
10262S: Maintained
10263F: Documentation/devicetree/bindings/spi/spi-samsung.txt
10264F: drivers/spi/spi-s3c*
10265F: include/linux/platform_data/spi-s3c64xx.h
10266
10257SAMSUNG SXGBE DRIVERS 10267SAMSUNG SXGBE DRIVERS
10258M: Byungho An <bh74.an@samsung.com> 10268M: Byungho An <bh74.an@samsung.com>
10259M: Girish K S <ks.giri@samsung.com> 10269M: Girish K S <ks.giri@samsung.com>
diff --git a/arch/Kconfig b/arch/Kconfig
index e9c9334507dd..fd6e9712af81 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -336,17 +336,6 @@ config HAVE_ARCH_SECCOMP_FILTER
336 results in the system call being skipped immediately. 336 results in the system call being skipped immediately.
337 - seccomp syscall wired up 337 - seccomp syscall wired up
338 338
339 For best performance, an arch should use seccomp_phase1 and
340 seccomp_phase2 directly. It should call seccomp_phase1 for all
341 syscalls if TIF_SECCOMP is set, but seccomp_phase1 does not
342 need to be called from a ptrace-safe context. It must then
343 call seccomp_phase2 if seccomp_phase1 returns anything other
344 than SECCOMP_PHASE1_OK or SECCOMP_PHASE1_SKIP.
345
346 As an additional optimization, an arch may provide seccomp_data
347 directly to seccomp_phase1; this avoids multiple calls
348 to the syscall_xyz helpers for every syscall.
349
350config SECCOMP_FILTER 339config SECCOMP_FILTER
351 def_bool y 340 def_bool y
352 depends on HAVE_ARCH_SECCOMP_FILTER && SECCOMP && NET 341 depends on HAVE_ARCH_SECCOMP_FILTER && SECCOMP && NET
diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
index 465c70982f40..0472927ebb9b 100644
--- a/arch/ia64/include/asm/uaccess.h
+++ b/arch/ia64/include/asm/uaccess.h
@@ -241,8 +241,7 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
241static inline unsigned long 241static inline unsigned long
242__copy_to_user (void __user *to, const void *from, unsigned long count) 242__copy_to_user (void __user *to, const void *from, unsigned long count)
243{ 243{
244 if (!__builtin_constant_p(count)) 244 check_object_size(from, count, true);
245 check_object_size(from, count, true);
246 245
247 return __copy_user(to, (__force void __user *) from, count); 246 return __copy_user(to, (__force void __user *) from, count);
248} 247}
@@ -250,8 +249,7 @@ __copy_to_user (void __user *to, const void *from, unsigned long count)
250static inline unsigned long 249static inline unsigned long
251__copy_from_user (void *to, const void __user *from, unsigned long count) 250__copy_from_user (void *to, const void __user *from, unsigned long count)
252{ 251{
253 if (!__builtin_constant_p(count)) 252 check_object_size(to, count, false);
254 check_object_size(to, count, false);
255 253
256 return __copy_user((__force void __user *) to, from, count); 254 return __copy_user((__force void __user *) to, from, count);
257} 255}
@@ -265,8 +263,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
265 long __cu_len = (n); \ 263 long __cu_len = (n); \
266 \ 264 \
267 if (__access_ok(__cu_to, __cu_len, get_fs())) { \ 265 if (__access_ok(__cu_to, __cu_len, get_fs())) { \
268 if (!__builtin_constant_p(n)) \ 266 check_object_size(__cu_from, __cu_len, true); \
269 check_object_size(__cu_from, __cu_len, true); \
270 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \ 267 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
271 } \ 268 } \
272 __cu_len; \ 269 __cu_len; \
@@ -280,8 +277,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
280 \ 277 \
281 __chk_user_ptr(__cu_from); \ 278 __chk_user_ptr(__cu_from); \
282 if (__access_ok(__cu_from, __cu_len, get_fs())) { \ 279 if (__access_ok(__cu_from, __cu_len, get_fs())) { \
283 if (!__builtin_constant_p(n)) \ 280 check_object_size(__cu_to, __cu_len, false); \
284 check_object_size(__cu_to, __cu_len, false); \
285 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \ 281 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
286 } \ 282 } \
287 __cu_len; \ 283 __cu_len; \
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index c1dc6c14deb8..f1e382498bbb 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -311,14 +311,12 @@ static inline unsigned long copy_from_user(void *to,
311 unsigned long over; 311 unsigned long over;
312 312
313 if (access_ok(VERIFY_READ, from, n)) { 313 if (access_ok(VERIFY_READ, from, n)) {
314 if (!__builtin_constant_p(n)) 314 check_object_size(to, n, false);
315 check_object_size(to, n, false);
316 return __copy_tofrom_user((__force void __user *)to, from, n); 315 return __copy_tofrom_user((__force void __user *)to, from, n);
317 } 316 }
318 if ((unsigned long)from < TASK_SIZE) { 317 if ((unsigned long)from < TASK_SIZE) {
319 over = (unsigned long)from + n - TASK_SIZE; 318 over = (unsigned long)from + n - TASK_SIZE;
320 if (!__builtin_constant_p(n - over)) 319 check_object_size(to, n - over, false);
321 check_object_size(to, n - over, false);
322 return __copy_tofrom_user((__force void __user *)to, from, 320 return __copy_tofrom_user((__force void __user *)to, from,
323 n - over) + over; 321 n - over) + over;
324 } 322 }
@@ -331,14 +329,12 @@ static inline unsigned long copy_to_user(void __user *to,
331 unsigned long over; 329 unsigned long over;
332 330
333 if (access_ok(VERIFY_WRITE, to, n)) { 331 if (access_ok(VERIFY_WRITE, to, n)) {
334 if (!__builtin_constant_p(n)) 332 check_object_size(from, n, true);
335 check_object_size(from, n, true);
336 return __copy_tofrom_user(to, (__force void __user *)from, n); 333 return __copy_tofrom_user(to, (__force void __user *)from, n);
337 } 334 }
338 if ((unsigned long)to < TASK_SIZE) { 335 if ((unsigned long)to < TASK_SIZE) {
339 over = (unsigned long)to + n - TASK_SIZE; 336 over = (unsigned long)to + n - TASK_SIZE;
340 if (!__builtin_constant_p(n)) 337 check_object_size(from, n - over, true);
341 check_object_size(from, n - over, true);
342 return __copy_tofrom_user(to, (__force void __user *)from, 338 return __copy_tofrom_user(to, (__force void __user *)from,
343 n - over) + over; 339 n - over) + over;
344 } 340 }
@@ -383,8 +379,7 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
383 return 0; 379 return 0;
384 } 380 }
385 381
386 if (!__builtin_constant_p(n)) 382 check_object_size(to, n, false);
387 check_object_size(to, n, false);
388 383
389 return __copy_tofrom_user((__force void __user *)to, from, n); 384 return __copy_tofrom_user((__force void __user *)to, from, n);
390} 385}
@@ -412,8 +407,8 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
412 if (ret == 0) 407 if (ret == 0)
413 return 0; 408 return 0;
414 } 409 }
415 if (!__builtin_constant_p(n)) 410
416 check_object_size(from, n, true); 411 check_object_size(from, n, true);
417 412
418 return __copy_tofrom_user(to, (__force const void __user *)from, n); 413 return __copy_tofrom_user(to, (__force const void __user *)from, n);
419} 414}
diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
index 341a5a133f48..e722c510bb1b 100644
--- a/arch/sparc/include/asm/uaccess_32.h
+++ b/arch/sparc/include/asm/uaccess_32.h
@@ -249,8 +249,7 @@ unsigned long __copy_user(void __user *to, const void __user *from, unsigned lon
249static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n) 249static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
250{ 250{
251 if (n && __access_ok((unsigned long) to, n)) { 251 if (n && __access_ok((unsigned long) to, n)) {
252 if (!__builtin_constant_p(n)) 252 check_object_size(from, n, true);
253 check_object_size(from, n, true);
254 return __copy_user(to, (__force void __user *) from, n); 253 return __copy_user(to, (__force void __user *) from, n);
255 } else 254 } else
256 return n; 255 return n;
@@ -258,16 +257,14 @@ static inline unsigned long copy_to_user(void __user *to, const void *from, unsi
258 257
259static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n) 258static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
260{ 259{
261 if (!__builtin_constant_p(n)) 260 check_object_size(from, n, true);
262 check_object_size(from, n, true);
263 return __copy_user(to, (__force void __user *) from, n); 261 return __copy_user(to, (__force void __user *) from, n);
264} 262}
265 263
266static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n) 264static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
267{ 265{
268 if (n && __access_ok((unsigned long) from, n)) { 266 if (n && __access_ok((unsigned long) from, n)) {
269 if (!__builtin_constant_p(n)) 267 check_object_size(to, n, false);
270 check_object_size(to, n, false);
271 return __copy_user((__force void __user *) to, from, n); 268 return __copy_user((__force void __user *) to, from, n);
272 } else 269 } else
273 return n; 270 return n;
diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
index 8bda94fab8e8..37a315d0ddd4 100644
--- a/arch/sparc/include/asm/uaccess_64.h
+++ b/arch/sparc/include/asm/uaccess_64.h
@@ -212,8 +212,7 @@ copy_from_user(void *to, const void __user *from, unsigned long size)
212{ 212{
213 unsigned long ret; 213 unsigned long ret;
214 214
215 if (!__builtin_constant_p(size)) 215 check_object_size(to, size, false);
216 check_object_size(to, size, false);
217 216
218 ret = ___copy_from_user(to, from, size); 217 ret = ___copy_from_user(to, from, size);
219 if (unlikely(ret)) 218 if (unlikely(ret))
@@ -233,8 +232,8 @@ copy_to_user(void __user *to, const void *from, unsigned long size)
233{ 232{
234 unsigned long ret; 233 unsigned long ret;
235 234
236 if (!__builtin_constant_p(size)) 235 check_object_size(from, size, true);
237 check_object_size(from, size, true); 236
238 ret = ___copy_to_user(to, from, size); 237 ret = ___copy_to_user(to, from, size);
239 if (unlikely(ret)) 238 if (unlikely(ret))
240 ret = copy_to_user_fixup(to, from, size); 239 ret = copy_to_user_fixup(to, from, size);
diff --git a/arch/um/kernel/skas/syscall.c b/arch/um/kernel/skas/syscall.c
index ef4b8f949b51..b783ac87d98a 100644
--- a/arch/um/kernel/skas/syscall.c
+++ b/arch/um/kernel/skas/syscall.c
@@ -21,21 +21,17 @@ void handle_syscall(struct uml_pt_regs *r)
21 PT_REGS_SET_SYSCALL_RETURN(regs, -ENOSYS); 21 PT_REGS_SET_SYSCALL_RETURN(regs, -ENOSYS);
22 22
23 if (syscall_trace_enter(regs)) 23 if (syscall_trace_enter(regs))
24 return; 24 goto out;
25 25
26 /* Do the seccomp check after ptrace; failures should be fast. */ 26 /* Do the seccomp check after ptrace; failures should be fast. */
27 if (secure_computing(NULL) == -1) 27 if (secure_computing(NULL) == -1)
28 return; 28 goto out;
29 29
30 /* Update the syscall number after orig_ax has potentially been updated
31 * with ptrace.
32 */
33 UPT_SYSCALL_NR(r) = PT_SYSCALL_NR(r->gp);
34 syscall = UPT_SYSCALL_NR(r); 30 syscall = UPT_SYSCALL_NR(r);
35
36 if (syscall >= 0 && syscall <= __NR_syscall_max) 31 if (syscall >= 0 && syscall <= __NR_syscall_max)
37 PT_REGS_SET_SYSCALL_RETURN(regs, 32 PT_REGS_SET_SYSCALL_RETURN(regs,
38 EXECUTE_SYSCALL(syscall, regs)); 33 EXECUTE_SYSCALL(syscall, regs));
39 34
35out:
40 syscall_trace_leave(regs); 36 syscall_trace_leave(regs);
41} 37}
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index c3f291195294..e3af86f58eaf 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -705,7 +705,7 @@ static inline void copy_user_overflow(int size, unsigned long count)
705 WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count); 705 WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
706} 706}
707 707
708static inline unsigned long __must_check 708static __always_inline unsigned long __must_check
709copy_from_user(void *to, const void __user *from, unsigned long n) 709copy_from_user(void *to, const void __user *from, unsigned long n)
710{ 710{
711 int sz = __compiletime_object_size(to); 711 int sz = __compiletime_object_size(to);
@@ -725,7 +725,7 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
725 return n; 725 return n;
726} 726}
727 727
728static inline unsigned long __must_check 728static __always_inline unsigned long __must_check
729copy_to_user(void __user *to, const void *from, unsigned long n) 729copy_to_user(void __user *to, const void *from, unsigned long n)
730{ 730{
731 int sz = __compiletime_object_size(from); 731 int sz = __compiletime_object_size(from);
diff --git a/arch/x86/um/ptrace_32.c b/arch/x86/um/ptrace_32.c
index ebd4dd6ef73b..a7ef7b131e25 100644
--- a/arch/x86/um/ptrace_32.c
+++ b/arch/x86/um/ptrace_32.c
@@ -84,7 +84,10 @@ int putreg(struct task_struct *child, int regno, unsigned long value)
84 case EAX: 84 case EAX:
85 case EIP: 85 case EIP:
86 case UESP: 86 case UESP:
87 break;
87 case ORIG_EAX: 88 case ORIG_EAX:
89 /* Update the syscall number. */
90 UPT_SYSCALL_NR(&child->thread.regs.regs) = value;
88 break; 91 break;
89 case FS: 92 case FS:
90 if (value && (value & 3) != 3) 93 if (value && (value & 3) != 3)
diff --git a/arch/x86/um/ptrace_64.c b/arch/x86/um/ptrace_64.c
index faab418876ce..0b5c184dd5b3 100644
--- a/arch/x86/um/ptrace_64.c
+++ b/arch/x86/um/ptrace_64.c
@@ -78,7 +78,11 @@ int putreg(struct task_struct *child, int regno, unsigned long value)
78 case RSI: 78 case RSI:
79 case RDI: 79 case RDI:
80 case RBP: 80 case RBP:
81 break;
82
81 case ORIG_RAX: 83 case ORIG_RAX:
84 /* Update the syscall number. */
85 UPT_SYSCALL_NR(&child->thread.regs.regs) = value;
82 break; 86 break;
83 87
84 case FS: 88 case FS:
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index cf8037a87b2d..77207b41940c 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -733,13 +733,14 @@ static void cryptd_aead_crypt(struct aead_request *req,
733 rctx = aead_request_ctx(req); 733 rctx = aead_request_ctx(req);
734 compl = rctx->complete; 734 compl = rctx->complete;
735 735
736 tfm = crypto_aead_reqtfm(req);
737
736 if (unlikely(err == -EINPROGRESS)) 738 if (unlikely(err == -EINPROGRESS))
737 goto out; 739 goto out;
738 aead_request_set_tfm(req, child); 740 aead_request_set_tfm(req, child);
739 err = crypt( req ); 741 err = crypt( req );
740 742
741out: 743out:
742 tfm = crypto_aead_reqtfm(req);
743 ctx = crypto_aead_ctx(tfm); 744 ctx = crypto_aead_ctx(tfm);
744 refcnt = atomic_read(&ctx->refcnt); 745 refcnt = atomic_read(&ctx->refcnt);
745 746
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
index aa56af87d941..b11af3f2c1db 100644
--- a/drivers/base/regmap/regcache-rbtree.c
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -404,6 +404,7 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
404 unsigned int new_base_reg, new_top_reg; 404 unsigned int new_base_reg, new_top_reg;
405 unsigned int min, max; 405 unsigned int min, max;
406 unsigned int max_dist; 406 unsigned int max_dist;
407 unsigned int dist, best_dist = UINT_MAX;
407 408
408 max_dist = map->reg_stride * sizeof(*rbnode_tmp) / 409 max_dist = map->reg_stride * sizeof(*rbnode_tmp) /
409 map->cache_word_size; 410 map->cache_word_size;
@@ -423,24 +424,41 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
423 &base_reg, &top_reg); 424 &base_reg, &top_reg);
424 425
425 if (base_reg <= max && top_reg >= min) { 426 if (base_reg <= max && top_reg >= min) {
426 new_base_reg = min(reg, base_reg); 427 if (reg < base_reg)
427 new_top_reg = max(reg, top_reg); 428 dist = base_reg - reg;
428 } else { 429 else if (reg > top_reg)
429 if (max < base_reg) 430 dist = reg - top_reg;
430 node = node->rb_left;
431 else 431 else
432 node = node->rb_right; 432 dist = 0;
433 433 if (dist < best_dist) {
434 continue; 434 rbnode = rbnode_tmp;
435 best_dist = dist;
436 new_base_reg = min(reg, base_reg);
437 new_top_reg = max(reg, top_reg);
438 }
435 } 439 }
436 440
437 ret = regcache_rbtree_insert_to_block(map, rbnode_tmp, 441 /*
442 * Keep looking, we want to choose the closest block,
443 * otherwise we might end up creating overlapping
444 * blocks, which breaks the rbtree.
445 */
446 if (reg < base_reg)
447 node = node->rb_left;
448 else if (reg > top_reg)
449 node = node->rb_right;
450 else
451 break;
452 }
453
454 if (rbnode) {
455 ret = regcache_rbtree_insert_to_block(map, rbnode,
438 new_base_reg, 456 new_base_reg,
439 new_top_reg, reg, 457 new_top_reg, reg,
440 value); 458 value);
441 if (ret) 459 if (ret)
442 return ret; 460 return ret;
443 rbtree_ctx->cached_rbnode = rbnode_tmp; 461 rbtree_ctx->cached_rbnode = rbnode;
444 return 0; 462 return 0;
445 } 463 }
446 464
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index df7ff7290821..4e582561e1e7 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -38,10 +38,11 @@ static int regcache_hw_init(struct regmap *map)
38 38
39 /* calculate the size of reg_defaults */ 39 /* calculate the size of reg_defaults */
40 for (count = 0, i = 0; i < map->num_reg_defaults_raw; i++) 40 for (count = 0, i = 0; i < map->num_reg_defaults_raw; i++)
41 if (!regmap_volatile(map, i * map->reg_stride)) 41 if (regmap_readable(map, i * map->reg_stride) &&
42 !regmap_volatile(map, i * map->reg_stride))
42 count++; 43 count++;
43 44
44 /* all registers are volatile, so just bypass */ 45 /* all registers are unreadable or volatile, so just bypass */
45 if (!count) { 46 if (!count) {
46 map->cache_bypass = true; 47 map->cache_bypass = true;
47 return 0; 48 return 0;
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 51fa7d66a393..25d26bb18970 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -1474,6 +1474,8 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
1474 ret = map->bus->write(map->bus_context, buf, len); 1474 ret = map->bus->write(map->bus_context, buf, len);
1475 1475
1476 kfree(buf); 1476 kfree(buf);
1477 } else if (ret != 0 && !map->cache_bypass && map->format.parse_val) {
1478 regcache_drop_region(map, reg, reg + 1);
1477 } 1479 }
1478 1480
1479 trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes); 1481 trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes);
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 6dc597126b79..b3044219772c 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -556,7 +556,10 @@ skip_enc:
556 556
557 /* Read and write assoclen bytes */ 557 /* Read and write assoclen bytes */
558 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); 558 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
559 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); 559 if (alg->caam.geniv)
560 append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize);
561 else
562 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
560 563
561 /* Skip assoc data */ 564 /* Skip assoc data */
562 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); 565 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
@@ -565,6 +568,14 @@ skip_enc:
565 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | 568 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
566 KEY_VLF); 569 KEY_VLF);
567 570
571 if (alg->caam.geniv) {
572 append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
573 LDST_SRCDST_BYTE_CONTEXT |
574 (ctx1_iv_off << LDST_OFFSET_SHIFT));
575 append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO |
576 (ctx1_iv_off << MOVE_OFFSET_SHIFT) | ivsize);
577 }
578
568 /* Load Counter into CONTEXT1 reg */ 579 /* Load Counter into CONTEXT1 reg */
569 if (is_rfc3686) 580 if (is_rfc3686)
570 append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM | 581 append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
@@ -2150,7 +2161,7 @@ static void init_authenc_job(struct aead_request *req,
2150 2161
2151 init_aead_job(req, edesc, all_contig, encrypt); 2162 init_aead_job(req, edesc, all_contig, encrypt);
2152 2163
2153 if (ivsize && (is_rfc3686 || !(alg->caam.geniv && encrypt))) 2164 if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv))
2154 append_load_as_imm(desc, req->iv, ivsize, 2165 append_load_as_imm(desc, req->iv, ivsize,
2155 LDST_CLASS_1_CCB | 2166 LDST_CLASS_1_CCB |
2156 LDST_SRCDST_BYTE_CONTEXT | 2167 LDST_SRCDST_BYTE_CONTEXT |
@@ -2537,20 +2548,6 @@ static int aead_decrypt(struct aead_request *req)
2537 return ret; 2548 return ret;
2538} 2549}
2539 2550
2540static int aead_givdecrypt(struct aead_request *req)
2541{
2542 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2543 unsigned int ivsize = crypto_aead_ivsize(aead);
2544
2545 if (req->cryptlen < ivsize)
2546 return -EINVAL;
2547
2548 req->cryptlen -= ivsize;
2549 req->assoclen += ivsize;
2550
2551 return aead_decrypt(req);
2552}
2553
2554/* 2551/*
2555 * allocate and map the ablkcipher extended descriptor for ablkcipher 2552 * allocate and map the ablkcipher extended descriptor for ablkcipher
2556 */ 2553 */
@@ -3210,7 +3207,7 @@ static struct caam_aead_alg driver_aeads[] = {
3210 .setkey = aead_setkey, 3207 .setkey = aead_setkey,
3211 .setauthsize = aead_setauthsize, 3208 .setauthsize = aead_setauthsize,
3212 .encrypt = aead_encrypt, 3209 .encrypt = aead_encrypt,
3213 .decrypt = aead_givdecrypt, 3210 .decrypt = aead_decrypt,
3214 .ivsize = AES_BLOCK_SIZE, 3211 .ivsize = AES_BLOCK_SIZE,
3215 .maxauthsize = MD5_DIGEST_SIZE, 3212 .maxauthsize = MD5_DIGEST_SIZE,
3216 }, 3213 },
@@ -3256,7 +3253,7 @@ static struct caam_aead_alg driver_aeads[] = {
3256 .setkey = aead_setkey, 3253 .setkey = aead_setkey,
3257 .setauthsize = aead_setauthsize, 3254 .setauthsize = aead_setauthsize,
3258 .encrypt = aead_encrypt, 3255 .encrypt = aead_encrypt,
3259 .decrypt = aead_givdecrypt, 3256 .decrypt = aead_decrypt,
3260 .ivsize = AES_BLOCK_SIZE, 3257 .ivsize = AES_BLOCK_SIZE,
3261 .maxauthsize = SHA1_DIGEST_SIZE, 3258 .maxauthsize = SHA1_DIGEST_SIZE,
3262 }, 3259 },
@@ -3302,7 +3299,7 @@ static struct caam_aead_alg driver_aeads[] = {
3302 .setkey = aead_setkey, 3299 .setkey = aead_setkey,
3303 .setauthsize = aead_setauthsize, 3300 .setauthsize = aead_setauthsize,
3304 .encrypt = aead_encrypt, 3301 .encrypt = aead_encrypt,
3305 .decrypt = aead_givdecrypt, 3302 .decrypt = aead_decrypt,
3306 .ivsize = AES_BLOCK_SIZE, 3303 .ivsize = AES_BLOCK_SIZE,
3307 .maxauthsize = SHA224_DIGEST_SIZE, 3304 .maxauthsize = SHA224_DIGEST_SIZE,
3308 }, 3305 },
@@ -3348,7 +3345,7 @@ static struct caam_aead_alg driver_aeads[] = {
3348 .setkey = aead_setkey, 3345 .setkey = aead_setkey,
3349 .setauthsize = aead_setauthsize, 3346 .setauthsize = aead_setauthsize,
3350 .encrypt = aead_encrypt, 3347 .encrypt = aead_encrypt,
3351 .decrypt = aead_givdecrypt, 3348 .decrypt = aead_decrypt,
3352 .ivsize = AES_BLOCK_SIZE, 3349 .ivsize = AES_BLOCK_SIZE,
3353 .maxauthsize = SHA256_DIGEST_SIZE, 3350 .maxauthsize = SHA256_DIGEST_SIZE,
3354 }, 3351 },
@@ -3394,7 +3391,7 @@ static struct caam_aead_alg driver_aeads[] = {
3394 .setkey = aead_setkey, 3391 .setkey = aead_setkey,
3395 .setauthsize = aead_setauthsize, 3392 .setauthsize = aead_setauthsize,
3396 .encrypt = aead_encrypt, 3393 .encrypt = aead_encrypt,
3397 .decrypt = aead_givdecrypt, 3394 .decrypt = aead_decrypt,
3398 .ivsize = AES_BLOCK_SIZE, 3395 .ivsize = AES_BLOCK_SIZE,
3399 .maxauthsize = SHA384_DIGEST_SIZE, 3396 .maxauthsize = SHA384_DIGEST_SIZE,
3400 }, 3397 },
@@ -3440,7 +3437,7 @@ static struct caam_aead_alg driver_aeads[] = {
3440 .setkey = aead_setkey, 3437 .setkey = aead_setkey,
3441 .setauthsize = aead_setauthsize, 3438 .setauthsize = aead_setauthsize,
3442 .encrypt = aead_encrypt, 3439 .encrypt = aead_encrypt,
3443 .decrypt = aead_givdecrypt, 3440 .decrypt = aead_decrypt,
3444 .ivsize = AES_BLOCK_SIZE, 3441 .ivsize = AES_BLOCK_SIZE,
3445 .maxauthsize = SHA512_DIGEST_SIZE, 3442 .maxauthsize = SHA512_DIGEST_SIZE,
3446 }, 3443 },
@@ -3486,7 +3483,7 @@ static struct caam_aead_alg driver_aeads[] = {
3486 .setkey = aead_setkey, 3483 .setkey = aead_setkey,
3487 .setauthsize = aead_setauthsize, 3484 .setauthsize = aead_setauthsize,
3488 .encrypt = aead_encrypt, 3485 .encrypt = aead_encrypt,
3489 .decrypt = aead_givdecrypt, 3486 .decrypt = aead_decrypt,
3490 .ivsize = DES3_EDE_BLOCK_SIZE, 3487 .ivsize = DES3_EDE_BLOCK_SIZE,
3491 .maxauthsize = MD5_DIGEST_SIZE, 3488 .maxauthsize = MD5_DIGEST_SIZE,
3492 }, 3489 },
@@ -3534,7 +3531,7 @@ static struct caam_aead_alg driver_aeads[] = {
3534 .setkey = aead_setkey, 3531 .setkey = aead_setkey,
3535 .setauthsize = aead_setauthsize, 3532 .setauthsize = aead_setauthsize,
3536 .encrypt = aead_encrypt, 3533 .encrypt = aead_encrypt,
3537 .decrypt = aead_givdecrypt, 3534 .decrypt = aead_decrypt,
3538 .ivsize = DES3_EDE_BLOCK_SIZE, 3535 .ivsize = DES3_EDE_BLOCK_SIZE,
3539 .maxauthsize = SHA1_DIGEST_SIZE, 3536 .maxauthsize = SHA1_DIGEST_SIZE,
3540 }, 3537 },
@@ -3582,7 +3579,7 @@ static struct caam_aead_alg driver_aeads[] = {
3582 .setkey = aead_setkey, 3579 .setkey = aead_setkey,
3583 .setauthsize = aead_setauthsize, 3580 .setauthsize = aead_setauthsize,
3584 .encrypt = aead_encrypt, 3581 .encrypt = aead_encrypt,
3585 .decrypt = aead_givdecrypt, 3582 .decrypt = aead_decrypt,
3586 .ivsize = DES3_EDE_BLOCK_SIZE, 3583 .ivsize = DES3_EDE_BLOCK_SIZE,
3587 .maxauthsize = SHA224_DIGEST_SIZE, 3584 .maxauthsize = SHA224_DIGEST_SIZE,
3588 }, 3585 },
@@ -3630,7 +3627,7 @@ static struct caam_aead_alg driver_aeads[] = {
3630 .setkey = aead_setkey, 3627 .setkey = aead_setkey,
3631 .setauthsize = aead_setauthsize, 3628 .setauthsize = aead_setauthsize,
3632 .encrypt = aead_encrypt, 3629 .encrypt = aead_encrypt,
3633 .decrypt = aead_givdecrypt, 3630 .decrypt = aead_decrypt,
3634 .ivsize = DES3_EDE_BLOCK_SIZE, 3631 .ivsize = DES3_EDE_BLOCK_SIZE,
3635 .maxauthsize = SHA256_DIGEST_SIZE, 3632 .maxauthsize = SHA256_DIGEST_SIZE,
3636 }, 3633 },
@@ -3678,7 +3675,7 @@ static struct caam_aead_alg driver_aeads[] = {
3678 .setkey = aead_setkey, 3675 .setkey = aead_setkey,
3679 .setauthsize = aead_setauthsize, 3676 .setauthsize = aead_setauthsize,
3680 .encrypt = aead_encrypt, 3677 .encrypt = aead_encrypt,
3681 .decrypt = aead_givdecrypt, 3678 .decrypt = aead_decrypt,
3682 .ivsize = DES3_EDE_BLOCK_SIZE, 3679 .ivsize = DES3_EDE_BLOCK_SIZE,
3683 .maxauthsize = SHA384_DIGEST_SIZE, 3680 .maxauthsize = SHA384_DIGEST_SIZE,
3684 }, 3681 },
@@ -3726,7 +3723,7 @@ static struct caam_aead_alg driver_aeads[] = {
3726 .setkey = aead_setkey, 3723 .setkey = aead_setkey,
3727 .setauthsize = aead_setauthsize, 3724 .setauthsize = aead_setauthsize,
3728 .encrypt = aead_encrypt, 3725 .encrypt = aead_encrypt,
3729 .decrypt = aead_givdecrypt, 3726 .decrypt = aead_decrypt,
3730 .ivsize = DES3_EDE_BLOCK_SIZE, 3727 .ivsize = DES3_EDE_BLOCK_SIZE,
3731 .maxauthsize = SHA512_DIGEST_SIZE, 3728 .maxauthsize = SHA512_DIGEST_SIZE,
3732 }, 3729 },
@@ -3772,7 +3769,7 @@ static struct caam_aead_alg driver_aeads[] = {
3772 .setkey = aead_setkey, 3769 .setkey = aead_setkey,
3773 .setauthsize = aead_setauthsize, 3770 .setauthsize = aead_setauthsize,
3774 .encrypt = aead_encrypt, 3771 .encrypt = aead_encrypt,
3775 .decrypt = aead_givdecrypt, 3772 .decrypt = aead_decrypt,
3776 .ivsize = DES_BLOCK_SIZE, 3773 .ivsize = DES_BLOCK_SIZE,
3777 .maxauthsize = MD5_DIGEST_SIZE, 3774 .maxauthsize = MD5_DIGEST_SIZE,
3778 }, 3775 },
@@ -3818,7 +3815,7 @@ static struct caam_aead_alg driver_aeads[] = {
3818 .setkey = aead_setkey, 3815 .setkey = aead_setkey,
3819 .setauthsize = aead_setauthsize, 3816 .setauthsize = aead_setauthsize,
3820 .encrypt = aead_encrypt, 3817 .encrypt = aead_encrypt,
3821 .decrypt = aead_givdecrypt, 3818 .decrypt = aead_decrypt,
3822 .ivsize = DES_BLOCK_SIZE, 3819 .ivsize = DES_BLOCK_SIZE,
3823 .maxauthsize = SHA1_DIGEST_SIZE, 3820 .maxauthsize = SHA1_DIGEST_SIZE,
3824 }, 3821 },
@@ -3864,7 +3861,7 @@ static struct caam_aead_alg driver_aeads[] = {
3864 .setkey = aead_setkey, 3861 .setkey = aead_setkey,
3865 .setauthsize = aead_setauthsize, 3862 .setauthsize = aead_setauthsize,
3866 .encrypt = aead_encrypt, 3863 .encrypt = aead_encrypt,
3867 .decrypt = aead_givdecrypt, 3864 .decrypt = aead_decrypt,
3868 .ivsize = DES_BLOCK_SIZE, 3865 .ivsize = DES_BLOCK_SIZE,
3869 .maxauthsize = SHA224_DIGEST_SIZE, 3866 .maxauthsize = SHA224_DIGEST_SIZE,
3870 }, 3867 },
@@ -3910,7 +3907,7 @@ static struct caam_aead_alg driver_aeads[] = {
3910 .setkey = aead_setkey, 3907 .setkey = aead_setkey,
3911 .setauthsize = aead_setauthsize, 3908 .setauthsize = aead_setauthsize,
3912 .encrypt = aead_encrypt, 3909 .encrypt = aead_encrypt,
3913 .decrypt = aead_givdecrypt, 3910 .decrypt = aead_decrypt,
3914 .ivsize = DES_BLOCK_SIZE, 3911 .ivsize = DES_BLOCK_SIZE,
3915 .maxauthsize = SHA256_DIGEST_SIZE, 3912 .maxauthsize = SHA256_DIGEST_SIZE,
3916 }, 3913 },
@@ -3956,7 +3953,7 @@ static struct caam_aead_alg driver_aeads[] = {
3956 .setkey = aead_setkey, 3953 .setkey = aead_setkey,
3957 .setauthsize = aead_setauthsize, 3954 .setauthsize = aead_setauthsize,
3958 .encrypt = aead_encrypt, 3955 .encrypt = aead_encrypt,
3959 .decrypt = aead_givdecrypt, 3956 .decrypt = aead_decrypt,
3960 .ivsize = DES_BLOCK_SIZE, 3957 .ivsize = DES_BLOCK_SIZE,
3961 .maxauthsize = SHA384_DIGEST_SIZE, 3958 .maxauthsize = SHA384_DIGEST_SIZE,
3962 }, 3959 },
@@ -4002,7 +3999,7 @@ static struct caam_aead_alg driver_aeads[] = {
4002 .setkey = aead_setkey, 3999 .setkey = aead_setkey,
4003 .setauthsize = aead_setauthsize, 4000 .setauthsize = aead_setauthsize,
4004 .encrypt = aead_encrypt, 4001 .encrypt = aead_encrypt,
4005 .decrypt = aead_givdecrypt, 4002 .decrypt = aead_decrypt,
4006 .ivsize = DES_BLOCK_SIZE, 4003 .ivsize = DES_BLOCK_SIZE,
4007 .maxauthsize = SHA512_DIGEST_SIZE, 4004 .maxauthsize = SHA512_DIGEST_SIZE,
4008 }, 4005 },
@@ -4051,7 +4048,7 @@ static struct caam_aead_alg driver_aeads[] = {
4051 .setkey = aead_setkey, 4048 .setkey = aead_setkey,
4052 .setauthsize = aead_setauthsize, 4049 .setauthsize = aead_setauthsize,
4053 .encrypt = aead_encrypt, 4050 .encrypt = aead_encrypt,
4054 .decrypt = aead_givdecrypt, 4051 .decrypt = aead_decrypt,
4055 .ivsize = CTR_RFC3686_IV_SIZE, 4052 .ivsize = CTR_RFC3686_IV_SIZE,
4056 .maxauthsize = MD5_DIGEST_SIZE, 4053 .maxauthsize = MD5_DIGEST_SIZE,
4057 }, 4054 },
@@ -4102,7 +4099,7 @@ static struct caam_aead_alg driver_aeads[] = {
4102 .setkey = aead_setkey, 4099 .setkey = aead_setkey,
4103 .setauthsize = aead_setauthsize, 4100 .setauthsize = aead_setauthsize,
4104 .encrypt = aead_encrypt, 4101 .encrypt = aead_encrypt,
4105 .decrypt = aead_givdecrypt, 4102 .decrypt = aead_decrypt,
4106 .ivsize = CTR_RFC3686_IV_SIZE, 4103 .ivsize = CTR_RFC3686_IV_SIZE,
4107 .maxauthsize = SHA1_DIGEST_SIZE, 4104 .maxauthsize = SHA1_DIGEST_SIZE,
4108 }, 4105 },
@@ -4153,7 +4150,7 @@ static struct caam_aead_alg driver_aeads[] = {
4153 .setkey = aead_setkey, 4150 .setkey = aead_setkey,
4154 .setauthsize = aead_setauthsize, 4151 .setauthsize = aead_setauthsize,
4155 .encrypt = aead_encrypt, 4152 .encrypt = aead_encrypt,
4156 .decrypt = aead_givdecrypt, 4153 .decrypt = aead_decrypt,
4157 .ivsize = CTR_RFC3686_IV_SIZE, 4154 .ivsize = CTR_RFC3686_IV_SIZE,
4158 .maxauthsize = SHA224_DIGEST_SIZE, 4155 .maxauthsize = SHA224_DIGEST_SIZE,
4159 }, 4156 },
@@ -4204,7 +4201,7 @@ static struct caam_aead_alg driver_aeads[] = {
4204 .setkey = aead_setkey, 4201 .setkey = aead_setkey,
4205 .setauthsize = aead_setauthsize, 4202 .setauthsize = aead_setauthsize,
4206 .encrypt = aead_encrypt, 4203 .encrypt = aead_encrypt,
4207 .decrypt = aead_givdecrypt, 4204 .decrypt = aead_decrypt,
4208 .ivsize = CTR_RFC3686_IV_SIZE, 4205 .ivsize = CTR_RFC3686_IV_SIZE,
4209 .maxauthsize = SHA256_DIGEST_SIZE, 4206 .maxauthsize = SHA256_DIGEST_SIZE,
4210 }, 4207 },
@@ -4255,7 +4252,7 @@ static struct caam_aead_alg driver_aeads[] = {
4255 .setkey = aead_setkey, 4252 .setkey = aead_setkey,
4256 .setauthsize = aead_setauthsize, 4253 .setauthsize = aead_setauthsize,
4257 .encrypt = aead_encrypt, 4254 .encrypt = aead_encrypt,
4258 .decrypt = aead_givdecrypt, 4255 .decrypt = aead_decrypt,
4259 .ivsize = CTR_RFC3686_IV_SIZE, 4256 .ivsize = CTR_RFC3686_IV_SIZE,
4260 .maxauthsize = SHA384_DIGEST_SIZE, 4257 .maxauthsize = SHA384_DIGEST_SIZE,
4261 }, 4258 },
@@ -4306,7 +4303,7 @@ static struct caam_aead_alg driver_aeads[] = {
4306 .setkey = aead_setkey, 4303 .setkey = aead_setkey,
4307 .setauthsize = aead_setauthsize, 4304 .setauthsize = aead_setauthsize,
4308 .encrypt = aead_encrypt, 4305 .encrypt = aead_encrypt,
4309 .decrypt = aead_givdecrypt, 4306 .decrypt = aead_decrypt,
4310 .ivsize = CTR_RFC3686_IV_SIZE, 4307 .ivsize = CTR_RFC3686_IV_SIZE,
4311 .maxauthsize = SHA512_DIGEST_SIZE, 4308 .maxauthsize = SHA512_DIGEST_SIZE,
4312 }, 4309 },
diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c
index 3a3c5d73bbfc..51c79b2fb0b8 100644
--- a/drivers/infiniband/core/multicast.c
+++ b/drivers/infiniband/core/multicast.c
@@ -106,7 +106,6 @@ struct mcast_group {
106 atomic_t refcount; 106 atomic_t refcount;
107 enum mcast_group_state state; 107 enum mcast_group_state state;
108 struct ib_sa_query *query; 108 struct ib_sa_query *query;
109 int query_id;
110 u16 pkey_index; 109 u16 pkey_index;
111 u8 leave_state; 110 u8 leave_state;
112 int retries; 111 int retries;
@@ -340,11 +339,7 @@ static int send_join(struct mcast_group *group, struct mcast_member *member)
340 member->multicast.comp_mask, 339 member->multicast.comp_mask,
341 3000, GFP_KERNEL, join_handler, group, 340 3000, GFP_KERNEL, join_handler, group,
342 &group->query); 341 &group->query);
343 if (ret >= 0) { 342 return (ret > 0) ? 0 : ret;
344 group->query_id = ret;
345 ret = 0;
346 }
347 return ret;
348} 343}
349 344
350static int send_leave(struct mcast_group *group, u8 leave_state) 345static int send_leave(struct mcast_group *group, u8 leave_state)
@@ -364,11 +359,7 @@ static int send_leave(struct mcast_group *group, u8 leave_state)
364 IB_SA_MCMEMBER_REC_JOIN_STATE, 359 IB_SA_MCMEMBER_REC_JOIN_STATE,
365 3000, GFP_KERNEL, leave_handler, 360 3000, GFP_KERNEL, leave_handler,
366 group, &group->query); 361 group, &group->query);
367 if (ret >= 0) { 362 return (ret > 0) ? 0 : ret;
368 group->query_id = ret;
369 ret = 0;
370 }
371 return ret;
372} 363}
373 364
374static void join_group(struct mcast_group *group, struct mcast_member *member, 365static void join_group(struct mcast_group *group, struct mcast_member *member,
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index edb1172b6f54..690435229be7 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -683,7 +683,7 @@ static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr,
683 return 0; 683 return 0;
684} 684}
685 685
686void _free_qp(struct kref *kref) 686static void _free_qp(struct kref *kref)
687{ 687{
688 struct c4iw_qp *qhp; 688 struct c4iw_qp *qhp;
689 689
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index b32638d58ae8..cc38004cea42 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -9490,6 +9490,78 @@ static void init_lcb(struct hfi1_devdata *dd)
9490 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00); 9490 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00);
9491} 9491}
9492 9492
9493/*
9494 * Perform a test read on the QSFP. Return 0 on success, -ERRNO
9495 * on error.
9496 */
9497static int test_qsfp_read(struct hfi1_pportdata *ppd)
9498{
9499 int ret;
9500 u8 status;
9501
9502 /* report success if not a QSFP */
9503 if (ppd->port_type != PORT_TYPE_QSFP)
9504 return 0;
9505
9506 /* read byte 2, the status byte */
9507 ret = one_qsfp_read(ppd, ppd->dd->hfi1_id, 2, &status, 1);
9508 if (ret < 0)
9509 return ret;
9510 if (ret != 1)
9511 return -EIO;
9512
9513 return 0; /* success */
9514}
9515
9516/*
9517 * Values for QSFP retry.
9518 *
9519 * Give up after 10s (20 x 500ms). The overall timeout was empirically
9520 * arrived at from experience on a large cluster.
9521 */
9522#define MAX_QSFP_RETRIES 20
9523#define QSFP_RETRY_WAIT 500 /* msec */
9524
9525/*
9526 * Try a QSFP read. If it fails, schedule a retry for later.
9527 * Called on first link activation after driver load.
9528 */
9529static void try_start_link(struct hfi1_pportdata *ppd)
9530{
9531 if (test_qsfp_read(ppd)) {
9532 /* read failed */
9533 if (ppd->qsfp_retry_count >= MAX_QSFP_RETRIES) {
9534 dd_dev_err(ppd->dd, "QSFP not responding, giving up\n");
9535 return;
9536 }
9537 dd_dev_info(ppd->dd,
9538 "QSFP not responding, waiting and retrying %d\n",
9539 (int)ppd->qsfp_retry_count);
9540 ppd->qsfp_retry_count++;
9541 queue_delayed_work(ppd->hfi1_wq, &ppd->start_link_work,
9542 msecs_to_jiffies(QSFP_RETRY_WAIT));
9543 return;
9544 }
9545 ppd->qsfp_retry_count = 0;
9546
9547 /*
9548 * Tune the SerDes to a ballpark setting for optimal signal and bit
9549 * error rate. Needs to be done before starting the link.
9550 */
9551 tune_serdes(ppd);
9552 start_link(ppd);
9553}
9554
9555/*
9556 * Workqueue function to start the link after a delay.
9557 */
9558void handle_start_link(struct work_struct *work)
9559{
9560 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
9561 start_link_work.work);
9562 try_start_link(ppd);
9563}
9564
9493int bringup_serdes(struct hfi1_pportdata *ppd) 9565int bringup_serdes(struct hfi1_pportdata *ppd)
9494{ 9566{
9495 struct hfi1_devdata *dd = ppd->dd; 9567 struct hfi1_devdata *dd = ppd->dd;
@@ -9525,14 +9597,8 @@ int bringup_serdes(struct hfi1_pportdata *ppd)
9525 set_qsfp_int_n(ppd, 1); 9597 set_qsfp_int_n(ppd, 1);
9526 } 9598 }
9527 9599
9528 /* 9600 try_start_link(ppd);
9529 * Tune the SerDes to a ballpark setting for 9601 return 0;
9530 * optimal signal and bit error rate
9531 * Needs to be done before starting the link
9532 */
9533 tune_serdes(ppd);
9534
9535 return start_link(ppd);
9536} 9602}
9537 9603
9538void hfi1_quiet_serdes(struct hfi1_pportdata *ppd) 9604void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
@@ -9549,6 +9615,10 @@ void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
9549 ppd->driver_link_ready = 0; 9615 ppd->driver_link_ready = 0;
9550 ppd->link_enabled = 0; 9616 ppd->link_enabled = 0;
9551 9617
9618 ppd->qsfp_retry_count = MAX_QSFP_RETRIES; /* prevent more retries */
9619 flush_delayed_work(&ppd->start_link_work);
9620 cancel_delayed_work_sync(&ppd->start_link_work);
9621
9552 ppd->offline_disabled_reason = 9622 ppd->offline_disabled_reason =
9553 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED); 9623 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED);
9554 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SMA_DISABLED, 0, 9624 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SMA_DISABLED, 0,
@@ -12865,7 +12935,7 @@ fail:
12865 */ 12935 */
12866static int set_up_context_variables(struct hfi1_devdata *dd) 12936static int set_up_context_variables(struct hfi1_devdata *dd)
12867{ 12937{
12868 int num_kernel_contexts; 12938 unsigned long num_kernel_contexts;
12869 int total_contexts; 12939 int total_contexts;
12870 int ret; 12940 int ret;
12871 unsigned ngroups; 12941 unsigned ngroups;
@@ -12894,9 +12964,9 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
12894 */ 12964 */
12895 if (num_kernel_contexts > (dd->chip_send_contexts - num_vls - 1)) { 12965 if (num_kernel_contexts > (dd->chip_send_contexts - num_vls - 1)) {
12896 dd_dev_err(dd, 12966 dd_dev_err(dd,
12897 "Reducing # kernel rcv contexts to: %d, from %d\n", 12967 "Reducing # kernel rcv contexts to: %d, from %lu\n",
12898 (int)(dd->chip_send_contexts - num_vls - 1), 12968 (int)(dd->chip_send_contexts - num_vls - 1),
12899 (int)num_kernel_contexts); 12969 num_kernel_contexts);
12900 num_kernel_contexts = dd->chip_send_contexts - num_vls - 1; 12970 num_kernel_contexts = dd->chip_send_contexts - num_vls - 1;
12901 } 12971 }
12902 /* 12972 /*
diff --git a/drivers/infiniband/hw/hfi1/chip.h b/drivers/infiniband/hw/hfi1/chip.h
index ed11107c50fe..e29573769efc 100644
--- a/drivers/infiniband/hw/hfi1/chip.h
+++ b/drivers/infiniband/hw/hfi1/chip.h
@@ -706,6 +706,7 @@ void handle_link_up(struct work_struct *work);
706void handle_link_down(struct work_struct *work); 706void handle_link_down(struct work_struct *work);
707void handle_link_downgrade(struct work_struct *work); 707void handle_link_downgrade(struct work_struct *work);
708void handle_link_bounce(struct work_struct *work); 708void handle_link_bounce(struct work_struct *work);
709void handle_start_link(struct work_struct *work);
709void handle_sma_message(struct work_struct *work); 710void handle_sma_message(struct work_struct *work);
710void reset_qsfp(struct hfi1_pportdata *ppd); 711void reset_qsfp(struct hfi1_pportdata *ppd);
711void qsfp_event(struct work_struct *work); 712void qsfp_event(struct work_struct *work);
diff --git a/drivers/infiniband/hw/hfi1/debugfs.c b/drivers/infiniband/hw/hfi1/debugfs.c
index a49cc88f08a2..5e9be16f6cd3 100644
--- a/drivers/infiniband/hw/hfi1/debugfs.c
+++ b/drivers/infiniband/hw/hfi1/debugfs.c
@@ -59,6 +59,40 @@
59 59
60static struct dentry *hfi1_dbg_root; 60static struct dentry *hfi1_dbg_root;
61 61
62/* wrappers to enforce srcu in seq file */
63static ssize_t hfi1_seq_read(
64 struct file *file,
65 char __user *buf,
66 size_t size,
67 loff_t *ppos)
68{
69 struct dentry *d = file->f_path.dentry;
70 int srcu_idx;
71 ssize_t r;
72
73 r = debugfs_use_file_start(d, &srcu_idx);
74 if (likely(!r))
75 r = seq_read(file, buf, size, ppos);
76 debugfs_use_file_finish(srcu_idx);
77 return r;
78}
79
80static loff_t hfi1_seq_lseek(
81 struct file *file,
82 loff_t offset,
83 int whence)
84{
85 struct dentry *d = file->f_path.dentry;
86 int srcu_idx;
87 loff_t r;
88
89 r = debugfs_use_file_start(d, &srcu_idx);
90 if (likely(!r))
91 r = seq_lseek(file, offset, whence);
92 debugfs_use_file_finish(srcu_idx);
93 return r;
94}
95
62#define private2dd(file) (file_inode(file)->i_private) 96#define private2dd(file) (file_inode(file)->i_private)
63#define private2ppd(file) (file_inode(file)->i_private) 97#define private2ppd(file) (file_inode(file)->i_private)
64 98
@@ -87,8 +121,8 @@ static int _##name##_open(struct inode *inode, struct file *s) \
87static const struct file_operations _##name##_file_ops = { \ 121static const struct file_operations _##name##_file_ops = { \
88 .owner = THIS_MODULE, \ 122 .owner = THIS_MODULE, \
89 .open = _##name##_open, \ 123 .open = _##name##_open, \
90 .read = seq_read, \ 124 .read = hfi1_seq_read, \
91 .llseek = seq_lseek, \ 125 .llseek = hfi1_seq_lseek, \
92 .release = seq_release \ 126 .release = seq_release \
93} 127}
94 128
@@ -105,11 +139,9 @@ do { \
105 DEBUGFS_FILE_CREATE(#name, parent, data, &_##name##_file_ops, S_IRUGO) 139 DEBUGFS_FILE_CREATE(#name, parent, data, &_##name##_file_ops, S_IRUGO)
106 140
107static void *_opcode_stats_seq_start(struct seq_file *s, loff_t *pos) 141static void *_opcode_stats_seq_start(struct seq_file *s, loff_t *pos)
108__acquires(RCU)
109{ 142{
110 struct hfi1_opcode_stats_perctx *opstats; 143 struct hfi1_opcode_stats_perctx *opstats;
111 144
112 rcu_read_lock();
113 if (*pos >= ARRAY_SIZE(opstats->stats)) 145 if (*pos >= ARRAY_SIZE(opstats->stats))
114 return NULL; 146 return NULL;
115 return pos; 147 return pos;
@@ -126,9 +158,7 @@ static void *_opcode_stats_seq_next(struct seq_file *s, void *v, loff_t *pos)
126} 158}
127 159
128static void _opcode_stats_seq_stop(struct seq_file *s, void *v) 160static void _opcode_stats_seq_stop(struct seq_file *s, void *v)
129__releases(RCU)
130{ 161{
131 rcu_read_unlock();
132} 162}
133 163
134static int _opcode_stats_seq_show(struct seq_file *s, void *v) 164static int _opcode_stats_seq_show(struct seq_file *s, void *v)
@@ -285,12 +315,10 @@ DEBUGFS_SEQ_FILE_OPEN(qp_stats)
285DEBUGFS_FILE_OPS(qp_stats); 315DEBUGFS_FILE_OPS(qp_stats);
286 316
287static void *_sdes_seq_start(struct seq_file *s, loff_t *pos) 317static void *_sdes_seq_start(struct seq_file *s, loff_t *pos)
288__acquires(RCU)
289{ 318{
290 struct hfi1_ibdev *ibd; 319 struct hfi1_ibdev *ibd;
291 struct hfi1_devdata *dd; 320 struct hfi1_devdata *dd;
292 321
293 rcu_read_lock();
294 ibd = (struct hfi1_ibdev *)s->private; 322 ibd = (struct hfi1_ibdev *)s->private;
295 dd = dd_from_dev(ibd); 323 dd = dd_from_dev(ibd);
296 if (!dd->per_sdma || *pos >= dd->num_sdma) 324 if (!dd->per_sdma || *pos >= dd->num_sdma)
@@ -310,9 +338,7 @@ static void *_sdes_seq_next(struct seq_file *s, void *v, loff_t *pos)
310} 338}
311 339
312static void _sdes_seq_stop(struct seq_file *s, void *v) 340static void _sdes_seq_stop(struct seq_file *s, void *v)
313__releases(RCU)
314{ 341{
315 rcu_read_unlock();
316} 342}
317 343
318static int _sdes_seq_show(struct seq_file *s, void *v) 344static int _sdes_seq_show(struct seq_file *s, void *v)
@@ -339,11 +365,9 @@ static ssize_t dev_counters_read(struct file *file, char __user *buf,
339 struct hfi1_devdata *dd; 365 struct hfi1_devdata *dd;
340 ssize_t rval; 366 ssize_t rval;
341 367
342 rcu_read_lock();
343 dd = private2dd(file); 368 dd = private2dd(file);
344 avail = hfi1_read_cntrs(dd, NULL, &counters); 369 avail = hfi1_read_cntrs(dd, NULL, &counters);
345 rval = simple_read_from_buffer(buf, count, ppos, counters, avail); 370 rval = simple_read_from_buffer(buf, count, ppos, counters, avail);
346 rcu_read_unlock();
347 return rval; 371 return rval;
348} 372}
349 373
@@ -356,11 +380,9 @@ static ssize_t dev_names_read(struct file *file, char __user *buf,
356 struct hfi1_devdata *dd; 380 struct hfi1_devdata *dd;
357 ssize_t rval; 381 ssize_t rval;
358 382
359 rcu_read_lock();
360 dd = private2dd(file); 383 dd = private2dd(file);
361 avail = hfi1_read_cntrs(dd, &names, NULL); 384 avail = hfi1_read_cntrs(dd, &names, NULL);
362 rval = simple_read_from_buffer(buf, count, ppos, names, avail); 385 rval = simple_read_from_buffer(buf, count, ppos, names, avail);
363 rcu_read_unlock();
364 return rval; 386 return rval;
365} 387}
366 388
@@ -383,11 +405,9 @@ static ssize_t portnames_read(struct file *file, char __user *buf,
383 struct hfi1_devdata *dd; 405 struct hfi1_devdata *dd;
384 ssize_t rval; 406 ssize_t rval;
385 407
386 rcu_read_lock();
387 dd = private2dd(file); 408 dd = private2dd(file);
388 avail = hfi1_read_portcntrs(dd->pport, &names, NULL); 409 avail = hfi1_read_portcntrs(dd->pport, &names, NULL);
389 rval = simple_read_from_buffer(buf, count, ppos, names, avail); 410 rval = simple_read_from_buffer(buf, count, ppos, names, avail);
390 rcu_read_unlock();
391 return rval; 411 return rval;
392} 412}
393 413
@@ -400,11 +420,9 @@ static ssize_t portcntrs_debugfs_read(struct file *file, char __user *buf,
400 struct hfi1_pportdata *ppd; 420 struct hfi1_pportdata *ppd;
401 ssize_t rval; 421 ssize_t rval;
402 422
403 rcu_read_lock();
404 ppd = private2ppd(file); 423 ppd = private2ppd(file);
405 avail = hfi1_read_portcntrs(ppd, NULL, &counters); 424 avail = hfi1_read_portcntrs(ppd, NULL, &counters);
406 rval = simple_read_from_buffer(buf, count, ppos, counters, avail); 425 rval = simple_read_from_buffer(buf, count, ppos, counters, avail);
407 rcu_read_unlock();
408 return rval; 426 return rval;
409} 427}
410 428
@@ -434,16 +452,13 @@ static ssize_t asic_flags_read(struct file *file, char __user *buf,
434 int used; 452 int used;
435 int i; 453 int i;
436 454
437 rcu_read_lock();
438 ppd = private2ppd(file); 455 ppd = private2ppd(file);
439 dd = ppd->dd; 456 dd = ppd->dd;
440 size = PAGE_SIZE; 457 size = PAGE_SIZE;
441 used = 0; 458 used = 0;
442 tmp = kmalloc(size, GFP_KERNEL); 459 tmp = kmalloc(size, GFP_KERNEL);
443 if (!tmp) { 460 if (!tmp)
444 rcu_read_unlock();
445 return -ENOMEM; 461 return -ENOMEM;
446 }
447 462
448 scratch0 = read_csr(dd, ASIC_CFG_SCRATCH); 463 scratch0 = read_csr(dd, ASIC_CFG_SCRATCH);
449 used += scnprintf(tmp + used, size - used, 464 used += scnprintf(tmp + used, size - used,
@@ -470,7 +485,6 @@ static ssize_t asic_flags_read(struct file *file, char __user *buf,
470 used += scnprintf(tmp + used, size - used, "Write bits to clear\n"); 485 used += scnprintf(tmp + used, size - used, "Write bits to clear\n");
471 486
472 ret = simple_read_from_buffer(buf, count, ppos, tmp, used); 487 ret = simple_read_from_buffer(buf, count, ppos, tmp, used);
473 rcu_read_unlock();
474 kfree(tmp); 488 kfree(tmp);
475 return ret; 489 return ret;
476} 490}
@@ -486,15 +500,12 @@ static ssize_t asic_flags_write(struct file *file, const char __user *buf,
486 u64 scratch0; 500 u64 scratch0;
487 u64 clear; 501 u64 clear;
488 502
489 rcu_read_lock();
490 ppd = private2ppd(file); 503 ppd = private2ppd(file);
491 dd = ppd->dd; 504 dd = ppd->dd;
492 505
493 buff = kmalloc(count + 1, GFP_KERNEL); 506 buff = kmalloc(count + 1, GFP_KERNEL);
494 if (!buff) { 507 if (!buff)
495 ret = -ENOMEM; 508 return -ENOMEM;
496 goto do_return;
497 }
498 509
499 ret = copy_from_user(buff, buf, count); 510 ret = copy_from_user(buff, buf, count);
500 if (ret > 0) { 511 if (ret > 0) {
@@ -527,8 +538,6 @@ static ssize_t asic_flags_write(struct file *file, const char __user *buf,
527 538
528 do_free: 539 do_free:
529 kfree(buff); 540 kfree(buff);
530 do_return:
531 rcu_read_unlock();
532 return ret; 541 return ret;
533} 542}
534 543
@@ -542,18 +551,14 @@ static ssize_t qsfp_debugfs_dump(struct file *file, char __user *buf,
542 char *tmp; 551 char *tmp;
543 int ret; 552 int ret;
544 553
545 rcu_read_lock();
546 ppd = private2ppd(file); 554 ppd = private2ppd(file);
547 tmp = kmalloc(PAGE_SIZE, GFP_KERNEL); 555 tmp = kmalloc(PAGE_SIZE, GFP_KERNEL);
548 if (!tmp) { 556 if (!tmp)
549 rcu_read_unlock();
550 return -ENOMEM; 557 return -ENOMEM;
551 }
552 558
553 ret = qsfp_dump(ppd, tmp, PAGE_SIZE); 559 ret = qsfp_dump(ppd, tmp, PAGE_SIZE);
554 if (ret > 0) 560 if (ret > 0)
555 ret = simple_read_from_buffer(buf, count, ppos, tmp, ret); 561 ret = simple_read_from_buffer(buf, count, ppos, tmp, ret);
556 rcu_read_unlock();
557 kfree(tmp); 562 kfree(tmp);
558 return ret; 563 return ret;
559} 564}
@@ -569,7 +574,6 @@ static ssize_t __i2c_debugfs_write(struct file *file, const char __user *buf,
569 int offset; 574 int offset;
570 int total_written; 575 int total_written;
571 576
572 rcu_read_lock();
573 ppd = private2ppd(file); 577 ppd = private2ppd(file);
574 578
575 /* byte offset format: [offsetSize][i2cAddr][offsetHigh][offsetLow] */ 579 /* byte offset format: [offsetSize][i2cAddr][offsetHigh][offsetLow] */
@@ -577,16 +581,12 @@ static ssize_t __i2c_debugfs_write(struct file *file, const char __user *buf,
577 offset = *ppos & 0xffff; 581 offset = *ppos & 0xffff;
578 582
579 /* explicitly reject invalid address 0 to catch cp and cat */ 583 /* explicitly reject invalid address 0 to catch cp and cat */
580 if (i2c_addr == 0) { 584 if (i2c_addr == 0)
581 ret = -EINVAL; 585 return -EINVAL;
582 goto _return;
583 }
584 586
585 buff = kmalloc(count, GFP_KERNEL); 587 buff = kmalloc(count, GFP_KERNEL);
586 if (!buff) { 588 if (!buff)
587 ret = -ENOMEM; 589 return -ENOMEM;
588 goto _return;
589 }
590 590
591 ret = copy_from_user(buff, buf, count); 591 ret = copy_from_user(buff, buf, count);
592 if (ret > 0) { 592 if (ret > 0) {
@@ -606,8 +606,6 @@ static ssize_t __i2c_debugfs_write(struct file *file, const char __user *buf,
606 606
607 _free: 607 _free:
608 kfree(buff); 608 kfree(buff);
609 _return:
610 rcu_read_unlock();
611 return ret; 609 return ret;
612} 610}
613 611
@@ -636,7 +634,6 @@ static ssize_t __i2c_debugfs_read(struct file *file, char __user *buf,
636 int offset; 634 int offset;
637 int total_read; 635 int total_read;
638 636
639 rcu_read_lock();
640 ppd = private2ppd(file); 637 ppd = private2ppd(file);
641 638
642 /* byte offset format: [offsetSize][i2cAddr][offsetHigh][offsetLow] */ 639 /* byte offset format: [offsetSize][i2cAddr][offsetHigh][offsetLow] */
@@ -644,16 +641,12 @@ static ssize_t __i2c_debugfs_read(struct file *file, char __user *buf,
644 offset = *ppos & 0xffff; 641 offset = *ppos & 0xffff;
645 642
646 /* explicitly reject invalid address 0 to catch cp and cat */ 643 /* explicitly reject invalid address 0 to catch cp and cat */
647 if (i2c_addr == 0) { 644 if (i2c_addr == 0)
648 ret = -EINVAL; 645 return -EINVAL;
649 goto _return;
650 }
651 646
652 buff = kmalloc(count, GFP_KERNEL); 647 buff = kmalloc(count, GFP_KERNEL);
653 if (!buff) { 648 if (!buff)
654 ret = -ENOMEM; 649 return -ENOMEM;
655 goto _return;
656 }
657 650
658 total_read = i2c_read(ppd, target, i2c_addr, offset, buff, count); 651 total_read = i2c_read(ppd, target, i2c_addr, offset, buff, count);
659 if (total_read < 0) { 652 if (total_read < 0) {
@@ -673,8 +666,6 @@ static ssize_t __i2c_debugfs_read(struct file *file, char __user *buf,
673 666
674 _free: 667 _free:
675 kfree(buff); 668 kfree(buff);
676 _return:
677 rcu_read_unlock();
678 return ret; 669 return ret;
679} 670}
680 671
@@ -701,26 +692,20 @@ static ssize_t __qsfp_debugfs_write(struct file *file, const char __user *buf,
701 int ret; 692 int ret;
702 int total_written; 693 int total_written;
703 694
704 rcu_read_lock(); 695 if (*ppos + count > QSFP_PAGESIZE * 4) /* base page + page00-page03 */
705 if (*ppos + count > QSFP_PAGESIZE * 4) { /* base page + page00-page03 */ 696 return -EINVAL;
706 ret = -EINVAL;
707 goto _return;
708 }
709 697
710 ppd = private2ppd(file); 698 ppd = private2ppd(file);
711 699
712 buff = kmalloc(count, GFP_KERNEL); 700 buff = kmalloc(count, GFP_KERNEL);
713 if (!buff) { 701 if (!buff)
714 ret = -ENOMEM; 702 return -ENOMEM;
715 goto _return;
716 }
717 703
718 ret = copy_from_user(buff, buf, count); 704 ret = copy_from_user(buff, buf, count);
719 if (ret > 0) { 705 if (ret > 0) {
720 ret = -EFAULT; 706 ret = -EFAULT;
721 goto _free; 707 goto _free;
722 } 708 }
723
724 total_written = qsfp_write(ppd, target, *ppos, buff, count); 709 total_written = qsfp_write(ppd, target, *ppos, buff, count);
725 if (total_written < 0) { 710 if (total_written < 0) {
726 ret = total_written; 711 ret = total_written;
@@ -733,8 +718,6 @@ static ssize_t __qsfp_debugfs_write(struct file *file, const char __user *buf,
733 718
734 _free: 719 _free:
735 kfree(buff); 720 kfree(buff);
736 _return:
737 rcu_read_unlock();
738 return ret; 721 return ret;
739} 722}
740 723
@@ -761,7 +744,6 @@ static ssize_t __qsfp_debugfs_read(struct file *file, char __user *buf,
761 int ret; 744 int ret;
762 int total_read; 745 int total_read;
763 746
764 rcu_read_lock();
765 if (*ppos + count > QSFP_PAGESIZE * 4) { /* base page + page00-page03 */ 747 if (*ppos + count > QSFP_PAGESIZE * 4) { /* base page + page00-page03 */
766 ret = -EINVAL; 748 ret = -EINVAL;
767 goto _return; 749 goto _return;
@@ -794,7 +776,6 @@ static ssize_t __qsfp_debugfs_read(struct file *file, char __user *buf,
794 _free: 776 _free:
795 kfree(buff); 777 kfree(buff);
796 _return: 778 _return:
797 rcu_read_unlock();
798 return ret; 779 return ret;
799} 780}
800 781
@@ -1010,7 +991,6 @@ void hfi1_dbg_ibdev_exit(struct hfi1_ibdev *ibd)
1010 debugfs_remove_recursive(ibd->hfi1_ibdev_dbg); 991 debugfs_remove_recursive(ibd->hfi1_ibdev_dbg);
1011out: 992out:
1012 ibd->hfi1_ibdev_dbg = NULL; 993 ibd->hfi1_ibdev_dbg = NULL;
1013 synchronize_rcu();
1014} 994}
1015 995
1016/* 996/*
@@ -1035,9 +1015,7 @@ static const char * const hfi1_statnames[] = {
1035}; 1015};
1036 1016
1037static void *_driver_stats_names_seq_start(struct seq_file *s, loff_t *pos) 1017static void *_driver_stats_names_seq_start(struct seq_file *s, loff_t *pos)
1038__acquires(RCU)
1039{ 1018{
1040 rcu_read_lock();
1041 if (*pos >= ARRAY_SIZE(hfi1_statnames)) 1019 if (*pos >= ARRAY_SIZE(hfi1_statnames))
1042 return NULL; 1020 return NULL;
1043 return pos; 1021 return pos;
@@ -1055,9 +1033,7 @@ static void *_driver_stats_names_seq_next(
1055} 1033}
1056 1034
1057static void _driver_stats_names_seq_stop(struct seq_file *s, void *v) 1035static void _driver_stats_names_seq_stop(struct seq_file *s, void *v)
1058__releases(RCU)
1059{ 1036{
1060 rcu_read_unlock();
1061} 1037}
1062 1038
1063static int _driver_stats_names_seq_show(struct seq_file *s, void *v) 1039static int _driver_stats_names_seq_show(struct seq_file *s, void *v)
@@ -1073,9 +1049,7 @@ DEBUGFS_SEQ_FILE_OPEN(driver_stats_names)
1073DEBUGFS_FILE_OPS(driver_stats_names); 1049DEBUGFS_FILE_OPS(driver_stats_names);
1074 1050
1075static void *_driver_stats_seq_start(struct seq_file *s, loff_t *pos) 1051static void *_driver_stats_seq_start(struct seq_file *s, loff_t *pos)
1076__acquires(RCU)
1077{ 1052{
1078 rcu_read_lock();
1079 if (*pos >= ARRAY_SIZE(hfi1_statnames)) 1053 if (*pos >= ARRAY_SIZE(hfi1_statnames))
1080 return NULL; 1054 return NULL;
1081 return pos; 1055 return pos;
@@ -1090,9 +1064,7 @@ static void *_driver_stats_seq_next(struct seq_file *s, void *v, loff_t *pos)
1090} 1064}
1091 1065
1092static void _driver_stats_seq_stop(struct seq_file *s, void *v) 1066static void _driver_stats_seq_stop(struct seq_file *s, void *v)
1093__releases(RCU)
1094{ 1067{
1095 rcu_read_unlock();
1096} 1068}
1097 1069
1098static u64 hfi1_sps_ints(void) 1070static u64 hfi1_sps_ints(void)
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
index a021e660d482..325ec211370f 100644
--- a/drivers/infiniband/hw/hfi1/hfi.h
+++ b/drivers/infiniband/hw/hfi1/hfi.h
@@ -605,6 +605,7 @@ struct hfi1_pportdata {
605 struct work_struct freeze_work; 605 struct work_struct freeze_work;
606 struct work_struct link_downgrade_work; 606 struct work_struct link_downgrade_work;
607 struct work_struct link_bounce_work; 607 struct work_struct link_bounce_work;
608 struct delayed_work start_link_work;
608 /* host link state variables */ 609 /* host link state variables */
609 struct mutex hls_lock; 610 struct mutex hls_lock;
610 u32 host_link_state; 611 u32 host_link_state;
@@ -659,6 +660,7 @@ struct hfi1_pportdata {
659 u8 linkinit_reason; 660 u8 linkinit_reason;
660 u8 local_tx_rate; /* rate given to 8051 firmware */ 661 u8 local_tx_rate; /* rate given to 8051 firmware */
661 u8 last_pstate; /* info only */ 662 u8 last_pstate; /* info only */
663 u8 qsfp_retry_count;
662 664
663 /* placeholders for IB MAD packet settings */ 665 /* placeholders for IB MAD packet settings */
664 u8 overrun_threshold; 666 u8 overrun_threshold;
@@ -1804,7 +1806,7 @@ extern unsigned int hfi1_max_mtu;
1804extern unsigned int hfi1_cu; 1806extern unsigned int hfi1_cu;
1805extern unsigned int user_credit_return_threshold; 1807extern unsigned int user_credit_return_threshold;
1806extern int num_user_contexts; 1808extern int num_user_contexts;
1807extern unsigned n_krcvqs; 1809extern unsigned long n_krcvqs;
1808extern uint krcvqs[]; 1810extern uint krcvqs[];
1809extern int krcvqsset; 1811extern int krcvqsset;
1810extern uint kdeth_qp; 1812extern uint kdeth_qp;
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index b7935451093c..384b43d2fd49 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -94,7 +94,7 @@ module_param_array(krcvqs, uint, &krcvqsset, S_IRUGO);
94MODULE_PARM_DESC(krcvqs, "Array of the number of non-control kernel receive queues by VL"); 94MODULE_PARM_DESC(krcvqs, "Array of the number of non-control kernel receive queues by VL");
95 95
96/* computed based on above array */ 96/* computed based on above array */
97unsigned n_krcvqs; 97unsigned long n_krcvqs;
98 98
99static unsigned hfi1_rcvarr_split = 25; 99static unsigned hfi1_rcvarr_split = 25;
100module_param_named(rcvarr_split, hfi1_rcvarr_split, uint, S_IRUGO); 100module_param_named(rcvarr_split, hfi1_rcvarr_split, uint, S_IRUGO);
@@ -500,6 +500,7 @@ void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
500 INIT_WORK(&ppd->link_downgrade_work, handle_link_downgrade); 500 INIT_WORK(&ppd->link_downgrade_work, handle_link_downgrade);
501 INIT_WORK(&ppd->sma_message_work, handle_sma_message); 501 INIT_WORK(&ppd->sma_message_work, handle_sma_message);
502 INIT_WORK(&ppd->link_bounce_work, handle_link_bounce); 502 INIT_WORK(&ppd->link_bounce_work, handle_link_bounce);
503 INIT_DELAYED_WORK(&ppd->start_link_work, handle_start_link);
503 INIT_WORK(&ppd->linkstate_active_work, receive_interrupt_work); 504 INIT_WORK(&ppd->linkstate_active_work, receive_interrupt_work);
504 INIT_WORK(&ppd->qsfp_info.qsfp_work, qsfp_event); 505 INIT_WORK(&ppd->qsfp_info.qsfp_work, qsfp_event);
505 506
diff --git a/drivers/infiniband/hw/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c
index 39e42c373a01..7ffc14f21523 100644
--- a/drivers/infiniband/hw/hfi1/mad.c
+++ b/drivers/infiniband/hw/hfi1/mad.c
@@ -2604,7 +2604,7 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
2604 u8 lq, num_vls; 2604 u8 lq, num_vls;
2605 u8 res_lli, res_ler; 2605 u8 res_lli, res_ler;
2606 u64 port_mask; 2606 u64 port_mask;
2607 unsigned long port_num; 2607 u8 port_num;
2608 unsigned long vl; 2608 unsigned long vl;
2609 u32 vl_select_mask; 2609 u32 vl_select_mask;
2610 int vfi; 2610 int vfi;
@@ -2638,9 +2638,9 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
2638 */ 2638 */
2639 port_mask = be64_to_cpu(req->port_select_mask[3]); 2639 port_mask = be64_to_cpu(req->port_select_mask[3]);
2640 port_num = find_first_bit((unsigned long *)&port_mask, 2640 port_num = find_first_bit((unsigned long *)&port_mask,
2641 sizeof(port_mask)); 2641 sizeof(port_mask) * 8);
2642 2642
2643 if ((u8)port_num != port) { 2643 if (port_num != port) {
2644 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; 2644 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
2645 return reply((struct ib_mad_hdr *)pmp); 2645 return reply((struct ib_mad_hdr *)pmp);
2646 } 2646 }
@@ -2842,7 +2842,7 @@ static int pma_get_opa_porterrors(struct opa_pma_mad *pmp,
2842 */ 2842 */
2843 port_mask = be64_to_cpu(req->port_select_mask[3]); 2843 port_mask = be64_to_cpu(req->port_select_mask[3]);
2844 port_num = find_first_bit((unsigned long *)&port_mask, 2844 port_num = find_first_bit((unsigned long *)&port_mask,
2845 sizeof(port_mask)); 2845 sizeof(port_mask) * 8);
2846 2846
2847 if (port_num != port) { 2847 if (port_num != port) {
2848 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; 2848 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
@@ -3015,7 +3015,7 @@ static int pma_get_opa_errorinfo(struct opa_pma_mad *pmp,
3015 */ 3015 */
3016 port_mask = be64_to_cpu(req->port_select_mask[3]); 3016 port_mask = be64_to_cpu(req->port_select_mask[3]);
3017 port_num = find_first_bit((unsigned long *)&port_mask, 3017 port_num = find_first_bit((unsigned long *)&port_mask,
3018 sizeof(port_mask)); 3018 sizeof(port_mask) * 8);
3019 3019
3020 if (port_num != port) { 3020 if (port_num != port) {
3021 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; 3021 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
@@ -3252,7 +3252,7 @@ static int pma_set_opa_errorinfo(struct opa_pma_mad *pmp,
3252 */ 3252 */
3253 port_mask = be64_to_cpu(req->port_select_mask[3]); 3253 port_mask = be64_to_cpu(req->port_select_mask[3]);
3254 port_num = find_first_bit((unsigned long *)&port_mask, 3254 port_num = find_first_bit((unsigned long *)&port_mask,
3255 sizeof(port_mask)); 3255 sizeof(port_mask) * 8);
3256 3256
3257 if (port_num != port) { 3257 if (port_num != port) {
3258 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; 3258 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
diff --git a/drivers/infiniband/hw/hfi1/pio_copy.c b/drivers/infiniband/hw/hfi1/pio_copy.c
index 8c25e1b58849..3a1ef3056282 100644
--- a/drivers/infiniband/hw/hfi1/pio_copy.c
+++ b/drivers/infiniband/hw/hfi1/pio_copy.c
@@ -771,6 +771,9 @@ void seg_pio_copy_mid(struct pio_buf *pbuf, const void *from, size_t nbytes)
771 read_extra_bytes(pbuf, from, to_fill); 771 read_extra_bytes(pbuf, from, to_fill);
772 from += to_fill; 772 from += to_fill;
773 nbytes -= to_fill; 773 nbytes -= to_fill;
774 /* may not be enough valid bytes left to align */
775 if (extra > nbytes)
776 extra = nbytes;
774 777
775 /* ...now write carry */ 778 /* ...now write carry */
776 dest = pbuf->start + (pbuf->qw_written * sizeof(u64)); 779 dest = pbuf->start + (pbuf->qw_written * sizeof(u64));
@@ -798,6 +801,15 @@ void seg_pio_copy_mid(struct pio_buf *pbuf, const void *from, size_t nbytes)
798 read_low_bytes(pbuf, from, extra); 801 read_low_bytes(pbuf, from, extra);
799 from += extra; 802 from += extra;
800 nbytes -= extra; 803 nbytes -= extra;
804 /*
805 * If no bytes are left, return early - we are done.
806 * NOTE: This short-circuit is *required* because
807 * "extra" may have been reduced in size and "from"
808 * is not aligned, as required when leaving this
809 * if block.
810 */
811 if (nbytes == 0)
812 return;
801 } 813 }
802 814
803 /* at this point, from is QW aligned */ 815 /* at this point, from is QW aligned */
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
index 0ecf27903dc2..1694037d1eee 100644
--- a/drivers/infiniband/hw/hfi1/user_sdma.c
+++ b/drivers/infiniband/hw/hfi1/user_sdma.c
@@ -114,6 +114,8 @@ MODULE_PARM_DESC(sdma_comp_size, "Size of User SDMA completion ring. Default: 12
114#define KDETH_HCRC_LOWER_SHIFT 24 114#define KDETH_HCRC_LOWER_SHIFT 24
115#define KDETH_HCRC_LOWER_MASK 0xff 115#define KDETH_HCRC_LOWER_MASK 0xff
116 116
117#define AHG_KDETH_INTR_SHIFT 12
118
117#define PBC2LRH(x) ((((x) & 0xfff) << 2) - 4) 119#define PBC2LRH(x) ((((x) & 0xfff) << 2) - 4)
118#define LRH2PBC(x) ((((x) >> 2) + 1) & 0xfff) 120#define LRH2PBC(x) ((((x) >> 2) + 1) & 0xfff)
119 121
@@ -1480,7 +1482,8 @@ static int set_txreq_header_ahg(struct user_sdma_request *req,
1480 /* Clear KDETH.SH on last packet */ 1482 /* Clear KDETH.SH on last packet */
1481 if (unlikely(tx->flags & TXREQ_FLAGS_REQ_LAST_PKT)) { 1483 if (unlikely(tx->flags & TXREQ_FLAGS_REQ_LAST_PKT)) {
1482 val |= cpu_to_le16(KDETH_GET(hdr->kdeth.ver_tid_offset, 1484 val |= cpu_to_le16(KDETH_GET(hdr->kdeth.ver_tid_offset,
1483 INTR) >> 16); 1485 INTR) <<
1486 AHG_KDETH_INTR_SHIFT);
1484 val &= cpu_to_le16(~(1U << 13)); 1487 val &= cpu_to_le16(~(1U << 13));
1485 AHG_HEADER_SET(req->ahg, diff, 7, 16, 14, val); 1488 AHG_HEADER_SET(req->ahg, diff, 7, 16, 14, val);
1486 } else { 1489 } else {
diff --git a/drivers/infiniband/hw/i40iw/i40iw_hw.c b/drivers/infiniband/hw/i40iw/i40iw_hw.c
index 3ee0cad96bc6..0c92a40b3e86 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_hw.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_hw.c
@@ -265,6 +265,7 @@ void i40iw_next_iw_state(struct i40iw_qp *iwqp,
265 info.dont_send_fin = false; 265 info.dont_send_fin = false;
266 if (iwqp->sc_qp.term_flags && (state == I40IW_QP_STATE_ERROR)) 266 if (iwqp->sc_qp.term_flags && (state == I40IW_QP_STATE_ERROR))
267 info.reset_tcp_conn = true; 267 info.reset_tcp_conn = true;
268 iwqp->hw_iwarp_state = state;
268 i40iw_hw_modify_qp(iwqp->iwdev, iwqp, &info, 0); 269 i40iw_hw_modify_qp(iwqp->iwdev, iwqp, &info, 0);
269} 270}
270 271
diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
index 0cbbe4038298..445e230d5ff8 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
@@ -100,7 +100,7 @@ static struct notifier_block i40iw_net_notifier = {
100 .notifier_call = i40iw_net_event 100 .notifier_call = i40iw_net_event
101}; 101};
102 102
103static int i40iw_notifiers_registered; 103static atomic_t i40iw_notifiers_registered;
104 104
105/** 105/**
106 * i40iw_find_i40e_handler - find a handler given a client info 106 * i40iw_find_i40e_handler - find a handler given a client info
@@ -1342,12 +1342,11 @@ exit:
1342 */ 1342 */
1343static void i40iw_register_notifiers(void) 1343static void i40iw_register_notifiers(void)
1344{ 1344{
1345 if (!i40iw_notifiers_registered) { 1345 if (atomic_inc_return(&i40iw_notifiers_registered) == 1) {
1346 register_inetaddr_notifier(&i40iw_inetaddr_notifier); 1346 register_inetaddr_notifier(&i40iw_inetaddr_notifier);
1347 register_inet6addr_notifier(&i40iw_inetaddr6_notifier); 1347 register_inet6addr_notifier(&i40iw_inetaddr6_notifier);
1348 register_netevent_notifier(&i40iw_net_notifier); 1348 register_netevent_notifier(&i40iw_net_notifier);
1349 } 1349 }
1350 i40iw_notifiers_registered++;
1351} 1350}
1352 1351
1353/** 1352/**
@@ -1429,8 +1428,7 @@ static void i40iw_deinit_device(struct i40iw_device *iwdev, bool reset, bool del
1429 i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx); 1428 i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx);
1430 /* fallthrough */ 1429 /* fallthrough */
1431 case INET_NOTIFIER: 1430 case INET_NOTIFIER:
1432 if (i40iw_notifiers_registered > 0) { 1431 if (!atomic_dec_return(&i40iw_notifiers_registered)) {
1433 i40iw_notifiers_registered--;
1434 unregister_netevent_notifier(&i40iw_net_notifier); 1432 unregister_netevent_notifier(&i40iw_net_notifier);
1435 unregister_inetaddr_notifier(&i40iw_inetaddr_notifier); 1433 unregister_inetaddr_notifier(&i40iw_inetaddr_notifier);
1436 unregister_inet6addr_notifier(&i40iw_inetaddr6_notifier); 1434 unregister_inet6addr_notifier(&i40iw_inetaddr6_notifier);
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 006db6436e3b..5df63dacaaa3 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -687,12 +687,6 @@ repoll:
687 is_error = (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == 687 is_error = (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
688 MLX4_CQE_OPCODE_ERROR; 688 MLX4_CQE_OPCODE_ERROR;
689 689
690 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_OPCODE_NOP &&
691 is_send)) {
692 pr_warn("Completion for NOP opcode detected!\n");
693 return -EAGAIN;
694 }
695
696 /* Resize CQ in progress */ 690 /* Resize CQ in progress */
697 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_CQE_OPCODE_RESIZE)) { 691 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_CQE_OPCODE_RESIZE)) {
698 if (cq->resize_buf) { 692 if (cq->resize_buf) {
@@ -718,12 +712,6 @@ repoll:
718 */ 712 */
719 mqp = __mlx4_qp_lookup(to_mdev(cq->ibcq.device)->dev, 713 mqp = __mlx4_qp_lookup(to_mdev(cq->ibcq.device)->dev,
720 be32_to_cpu(cqe->vlan_my_qpn)); 714 be32_to_cpu(cqe->vlan_my_qpn));
721 if (unlikely(!mqp)) {
722 pr_warn("CQ %06x with entry for unknown QPN %06x\n",
723 cq->mcq.cqn, be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK);
724 return -EAGAIN;
725 }
726
727 *cur_qp = to_mibqp(mqp); 715 *cur_qp = to_mibqp(mqp);
728 } 716 }
729 717
@@ -736,11 +724,6 @@ repoll:
736 /* SRQ is also in the radix tree */ 724 /* SRQ is also in the radix tree */
737 msrq = mlx4_srq_lookup(to_mdev(cq->ibcq.device)->dev, 725 msrq = mlx4_srq_lookup(to_mdev(cq->ibcq.device)->dev,
738 srq_num); 726 srq_num);
739 if (unlikely(!msrq)) {
740 pr_warn("CQ %06x with entry for unknown SRQN %06x\n",
741 cq->mcq.cqn, srq_num);
742 return -EAGAIN;
743 }
744 } 727 }
745 728
746 if (is_send) { 729 if (is_send) {
@@ -891,7 +874,6 @@ int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
891 struct mlx4_ib_qp *cur_qp = NULL; 874 struct mlx4_ib_qp *cur_qp = NULL;
892 unsigned long flags; 875 unsigned long flags;
893 int npolled; 876 int npolled;
894 int err = 0;
895 struct mlx4_ib_dev *mdev = to_mdev(cq->ibcq.device); 877 struct mlx4_ib_dev *mdev = to_mdev(cq->ibcq.device);
896 878
897 spin_lock_irqsave(&cq->lock, flags); 879 spin_lock_irqsave(&cq->lock, flags);
@@ -901,8 +883,7 @@ int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
901 } 883 }
902 884
903 for (npolled = 0; npolled < num_entries; ++npolled) { 885 for (npolled = 0; npolled < num_entries; ++npolled) {
904 err = mlx4_ib_poll_one(cq, &cur_qp, wc + npolled); 886 if (mlx4_ib_poll_one(cq, &cur_qp, wc + npolled))
905 if (err)
906 break; 887 break;
907 } 888 }
908 889
@@ -911,10 +892,7 @@ int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
911out: 892out:
912 spin_unlock_irqrestore(&cq->lock, flags); 893 spin_unlock_irqrestore(&cq->lock, flags);
913 894
914 if (err == 0 || err == -EAGAIN) 895 return npolled;
915 return npolled;
916 else
917 return err;
918} 896}
919 897
920int mlx4_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) 898int mlx4_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 308a358e5b46..e4fac9292e4a 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -553,12 +553,6 @@ repoll:
553 * from the table. 553 * from the table.
554 */ 554 */
555 mqp = __mlx5_qp_lookup(dev->mdev, qpn); 555 mqp = __mlx5_qp_lookup(dev->mdev, qpn);
556 if (unlikely(!mqp)) {
557 mlx5_ib_warn(dev, "CQE@CQ %06x for unknown QPN %6x\n",
558 cq->mcq.cqn, qpn);
559 return -EINVAL;
560 }
561
562 *cur_qp = to_mibqp(mqp); 556 *cur_qp = to_mibqp(mqp);
563 } 557 }
564 558
@@ -619,13 +613,6 @@ repoll:
619 read_lock(&dev->mdev->priv.mkey_table.lock); 613 read_lock(&dev->mdev->priv.mkey_table.lock);
620 mmkey = __mlx5_mr_lookup(dev->mdev, 614 mmkey = __mlx5_mr_lookup(dev->mdev,
621 mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey))); 615 mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey)));
622 if (unlikely(!mmkey)) {
623 read_unlock(&dev->mdev->priv.mkey_table.lock);
624 mlx5_ib_warn(dev, "CQE@CQ %06x for unknown MR %6x\n",
625 cq->mcq.cqn, be32_to_cpu(sig_err_cqe->mkey));
626 return -EINVAL;
627 }
628
629 mr = to_mibmr(mmkey); 616 mr = to_mibmr(mmkey);
630 get_sig_err_item(sig_err_cqe, &mr->sig->err_item); 617 get_sig_err_item(sig_err_cqe, &mr->sig->err_item);
631 mr->sig->sig_err_exists = true; 618 mr->sig->sig_err_exists = true;
@@ -676,7 +663,6 @@ int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
676 unsigned long flags; 663 unsigned long flags;
677 int soft_polled = 0; 664 int soft_polled = 0;
678 int npolled; 665 int npolled;
679 int err = 0;
680 666
681 spin_lock_irqsave(&cq->lock, flags); 667 spin_lock_irqsave(&cq->lock, flags);
682 if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { 668 if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
@@ -688,8 +674,7 @@ int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
688 soft_polled = poll_soft_wc(cq, num_entries, wc); 674 soft_polled = poll_soft_wc(cq, num_entries, wc);
689 675
690 for (npolled = 0; npolled < num_entries - soft_polled; npolled++) { 676 for (npolled = 0; npolled < num_entries - soft_polled; npolled++) {
691 err = mlx5_poll_one(cq, &cur_qp, wc + soft_polled + npolled); 677 if (mlx5_poll_one(cq, &cur_qp, wc + soft_polled + npolled))
692 if (err)
693 break; 678 break;
694 } 679 }
695 680
@@ -698,10 +683,7 @@ int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
698out: 683out:
699 spin_unlock_irqrestore(&cq->lock, flags); 684 spin_unlock_irqrestore(&cq->lock, flags);
700 685
701 if (err == 0 || err == -EAGAIN) 686 return soft_polled + npolled;
702 return soft_polled + npolled;
703 else
704 return err;
705} 687}
706 688
707int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) 689int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 1b4094baa2de..8150ea372c53 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -1849,6 +1849,7 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
1849 int domain) 1849 int domain)
1850{ 1850{
1851 struct mlx5_ib_dev *dev = to_mdev(qp->device); 1851 struct mlx5_ib_dev *dev = to_mdev(qp->device);
1852 struct mlx5_ib_qp *mqp = to_mqp(qp);
1852 struct mlx5_ib_flow_handler *handler = NULL; 1853 struct mlx5_ib_flow_handler *handler = NULL;
1853 struct mlx5_flow_destination *dst = NULL; 1854 struct mlx5_flow_destination *dst = NULL;
1854 struct mlx5_ib_flow_prio *ft_prio; 1855 struct mlx5_ib_flow_prio *ft_prio;
@@ -1875,7 +1876,10 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
1875 } 1876 }
1876 1877
1877 dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR; 1878 dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR;
1878 dst->tir_num = to_mqp(qp)->raw_packet_qp.rq.tirn; 1879 if (mqp->flags & MLX5_IB_QP_RSS)
1880 dst->tir_num = mqp->rss_qp.tirn;
1881 else
1882 dst->tir_num = mqp->raw_packet_qp.rq.tirn;
1879 1883
1880 if (flow_attr->type == IB_FLOW_ATTR_NORMAL) { 1884 if (flow_attr->type == IB_FLOW_ATTR_NORMAL) {
1881 if (flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) { 1885 if (flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) {
diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c
index 40df2cca0609..996b54e366b0 100644
--- a/drivers/infiniband/hw/mlx5/mem.c
+++ b/drivers/infiniband/hw/mlx5/mem.c
@@ -71,7 +71,7 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
71 71
72 addr = addr >> page_shift; 72 addr = addr >> page_shift;
73 tmp = (unsigned long)addr; 73 tmp = (unsigned long)addr;
74 m = find_first_bit(&tmp, sizeof(tmp)); 74 m = find_first_bit(&tmp, BITS_PER_LONG);
75 skip = 1 << m; 75 skip = 1 << m;
76 mask = skip - 1; 76 mask = skip - 1;
77 i = 0; 77 i = 0;
@@ -81,7 +81,7 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
81 for (k = 0; k < len; k++) { 81 for (k = 0; k < len; k++) {
82 if (!(i & mask)) { 82 if (!(i & mask)) {
83 tmp = (unsigned long)pfn; 83 tmp = (unsigned long)pfn;
84 m = min_t(unsigned long, m, find_first_bit(&tmp, sizeof(tmp))); 84 m = min_t(unsigned long, m, find_first_bit(&tmp, BITS_PER_LONG));
85 skip = 1 << m; 85 skip = 1 << m;
86 mask = skip - 1; 86 mask = skip - 1;
87 base = pfn; 87 base = pfn;
@@ -89,7 +89,7 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
89 } else { 89 } else {
90 if (base + p != pfn) { 90 if (base + p != pfn) {
91 tmp = (unsigned long)p; 91 tmp = (unsigned long)p;
92 m = find_first_bit(&tmp, sizeof(tmp)); 92 m = find_first_bit(&tmp, BITS_PER_LONG);
93 skip = 1 << m; 93 skip = 1 << m;
94 mask = skip - 1; 94 mask = skip - 1;
95 base = pfn; 95 base = pfn;
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 372385d0f993..95146f4aa3e3 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -402,6 +402,7 @@ enum mlx5_ib_qp_flags {
402 /* QP uses 1 as its source QP number */ 402 /* QP uses 1 as its source QP number */
403 MLX5_IB_QP_SQPN_QP1 = 1 << 6, 403 MLX5_IB_QP_SQPN_QP1 = 1 << 6,
404 MLX5_IB_QP_CAP_SCATTER_FCS = 1 << 7, 404 MLX5_IB_QP_CAP_SCATTER_FCS = 1 << 7,
405 MLX5_IB_QP_RSS = 1 << 8,
405}; 406};
406 407
407struct mlx5_umr_wr { 408struct mlx5_umr_wr {
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 0dd7d93cac95..affc3f6598ca 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -1449,6 +1449,7 @@ create_tir:
1449 kvfree(in); 1449 kvfree(in);
1450 /* qpn is reserved for that QP */ 1450 /* qpn is reserved for that QP */
1451 qp->trans_qp.base.mqp.qpn = 0; 1451 qp->trans_qp.base.mqp.qpn = 0;
1452 qp->flags |= MLX5_IB_QP_RSS;
1452 return 0; 1453 return 0;
1453 1454
1454err: 1455err:
@@ -3658,12 +3659,8 @@ static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
3658 struct ib_send_wr *wr, unsigned *idx, 3659 struct ib_send_wr *wr, unsigned *idx,
3659 int *size, int nreq) 3660 int *size, int nreq)
3660{ 3661{
3661 int err = 0; 3662 if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)))
3662 3663 return -ENOMEM;
3663 if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq))) {
3664 err = -ENOMEM;
3665 return err;
3666 }
3667 3664
3668 *idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1); 3665 *idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1);
3669 *seg = mlx5_get_send_wqe(qp, *idx); 3666 *seg = mlx5_get_send_wqe(qp, *idx);
@@ -3679,7 +3676,7 @@ static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
3679 *seg += sizeof(**ctrl); 3676 *seg += sizeof(**ctrl);
3680 *size = sizeof(**ctrl) / 16; 3677 *size = sizeof(**ctrl) / 16;
3681 3678
3682 return err; 3679 return 0;
3683} 3680}
3684 3681
3685static void finish_wqe(struct mlx5_ib_qp *qp, 3682static void finish_wqe(struct mlx5_ib_qp *qp,
@@ -3758,7 +3755,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
3758 num_sge = wr->num_sge; 3755 num_sge = wr->num_sge;
3759 if (unlikely(num_sge > qp->sq.max_gs)) { 3756 if (unlikely(num_sge > qp->sq.max_gs)) {
3760 mlx5_ib_warn(dev, "\n"); 3757 mlx5_ib_warn(dev, "\n");
3761 err = -ENOMEM; 3758 err = -EINVAL;
3762 *bad_wr = wr; 3759 *bad_wr = wr;
3763 goto out; 3760 goto out;
3764 } 3761 }
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 4f7d9b48df64..9dbfcc0ab577 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -478,6 +478,7 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
478 struct ipoib_ah *address, u32 qpn); 478 struct ipoib_ah *address, u32 qpn);
479void ipoib_reap_ah(struct work_struct *work); 479void ipoib_reap_ah(struct work_struct *work);
480 480
481struct ipoib_path *__path_find(struct net_device *dev, void *gid);
481void ipoib_mark_paths_invalid(struct net_device *dev); 482void ipoib_mark_paths_invalid(struct net_device *dev);
482void ipoib_flush_paths(struct net_device *dev); 483void ipoib_flush_paths(struct net_device *dev);
483int ipoib_check_sm_sendonly_fullmember_support(struct ipoib_dev_priv *priv); 484int ipoib_check_sm_sendonly_fullmember_support(struct ipoib_dev_priv *priv);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 951d9abcca8b..4ad297d3de89 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -1318,6 +1318,8 @@ void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx)
1318 } 1318 }
1319} 1319}
1320 1320
1321#define QPN_AND_OPTIONS_OFFSET 4
1322
1321static void ipoib_cm_tx_start(struct work_struct *work) 1323static void ipoib_cm_tx_start(struct work_struct *work)
1322{ 1324{
1323 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, 1325 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
@@ -1326,6 +1328,7 @@ static void ipoib_cm_tx_start(struct work_struct *work)
1326 struct ipoib_neigh *neigh; 1328 struct ipoib_neigh *neigh;
1327 struct ipoib_cm_tx *p; 1329 struct ipoib_cm_tx *p;
1328 unsigned long flags; 1330 unsigned long flags;
1331 struct ipoib_path *path;
1329 int ret; 1332 int ret;
1330 1333
1331 struct ib_sa_path_rec pathrec; 1334 struct ib_sa_path_rec pathrec;
@@ -1338,7 +1341,19 @@ static void ipoib_cm_tx_start(struct work_struct *work)
1338 p = list_entry(priv->cm.start_list.next, typeof(*p), list); 1341 p = list_entry(priv->cm.start_list.next, typeof(*p), list);
1339 list_del_init(&p->list); 1342 list_del_init(&p->list);
1340 neigh = p->neigh; 1343 neigh = p->neigh;
1344
1341 qpn = IPOIB_QPN(neigh->daddr); 1345 qpn = IPOIB_QPN(neigh->daddr);
1346 /*
1347 * As long as the search is with these 2 locks,
1348 * path existence indicates its validity.
1349 */
1350 path = __path_find(dev, neigh->daddr + QPN_AND_OPTIONS_OFFSET);
1351 if (!path) {
1352 pr_info("%s ignore not valid path %pI6\n",
1353 __func__,
1354 neigh->daddr + QPN_AND_OPTIONS_OFFSET);
1355 goto free_neigh;
1356 }
1342 memcpy(&pathrec, &p->path->pathrec, sizeof pathrec); 1357 memcpy(&pathrec, &p->path->pathrec, sizeof pathrec);
1343 1358
1344 spin_unlock_irqrestore(&priv->lock, flags); 1359 spin_unlock_irqrestore(&priv->lock, flags);
@@ -1350,6 +1365,7 @@ static void ipoib_cm_tx_start(struct work_struct *work)
1350 spin_lock_irqsave(&priv->lock, flags); 1365 spin_lock_irqsave(&priv->lock, flags);
1351 1366
1352 if (ret) { 1367 if (ret) {
1368free_neigh:
1353 neigh = p->neigh; 1369 neigh = p->neigh;
1354 if (neigh) { 1370 if (neigh) {
1355 neigh->cm = NULL; 1371 neigh->cm = NULL;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 74bcaa064226..cc1c1b062ea5 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -485,7 +485,7 @@ int ipoib_set_mode(struct net_device *dev, const char *buf)
485 return -EINVAL; 485 return -EINVAL;
486} 486}
487 487
488static struct ipoib_path *__path_find(struct net_device *dev, void *gid) 488struct ipoib_path *__path_find(struct net_device *dev, void *gid)
489{ 489{
490 struct ipoib_dev_priv *priv = netdev_priv(dev); 490 struct ipoib_dev_priv *priv = netdev_priv(dev);
491 struct rb_node *n = priv->path_tree.rb_node; 491 struct rb_node *n = priv->path_tree.rb_node;
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 7914c14478cd..cae9bbcc27e7 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -403,6 +403,7 @@ isert_init_conn(struct isert_conn *isert_conn)
403 INIT_LIST_HEAD(&isert_conn->node); 403 INIT_LIST_HEAD(&isert_conn->node);
404 init_completion(&isert_conn->login_comp); 404 init_completion(&isert_conn->login_comp);
405 init_completion(&isert_conn->login_req_comp); 405 init_completion(&isert_conn->login_req_comp);
406 init_waitqueue_head(&isert_conn->rem_wait);
406 kref_init(&isert_conn->kref); 407 kref_init(&isert_conn->kref);
407 mutex_init(&isert_conn->mutex); 408 mutex_init(&isert_conn->mutex);
408 INIT_WORK(&isert_conn->release_work, isert_release_work); 409 INIT_WORK(&isert_conn->release_work, isert_release_work);
@@ -578,7 +579,8 @@ isert_connect_release(struct isert_conn *isert_conn)
578 BUG_ON(!device); 579 BUG_ON(!device);
579 580
580 isert_free_rx_descriptors(isert_conn); 581 isert_free_rx_descriptors(isert_conn);
581 if (isert_conn->cm_id) 582 if (isert_conn->cm_id &&
583 !isert_conn->dev_removed)
582 rdma_destroy_id(isert_conn->cm_id); 584 rdma_destroy_id(isert_conn->cm_id);
583 585
584 if (isert_conn->qp) { 586 if (isert_conn->qp) {
@@ -593,7 +595,10 @@ isert_connect_release(struct isert_conn *isert_conn)
593 595
594 isert_device_put(device); 596 isert_device_put(device);
595 597
596 kfree(isert_conn); 598 if (isert_conn->dev_removed)
599 wake_up_interruptible(&isert_conn->rem_wait);
600 else
601 kfree(isert_conn);
597} 602}
598 603
599static void 604static void
@@ -753,6 +758,7 @@ static int
753isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) 758isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
754{ 759{
755 struct isert_np *isert_np = cma_id->context; 760 struct isert_np *isert_np = cma_id->context;
761 struct isert_conn *isert_conn;
756 int ret = 0; 762 int ret = 0;
757 763
758 isert_info("%s (%d): status %d id %p np %p\n", 764 isert_info("%s (%d): status %d id %p np %p\n",
@@ -773,10 +779,21 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
773 break; 779 break;
774 case RDMA_CM_EVENT_ADDR_CHANGE: /* FALLTHRU */ 780 case RDMA_CM_EVENT_ADDR_CHANGE: /* FALLTHRU */
775 case RDMA_CM_EVENT_DISCONNECTED: /* FALLTHRU */ 781 case RDMA_CM_EVENT_DISCONNECTED: /* FALLTHRU */
776 case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */
777 case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */ 782 case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */
778 ret = isert_disconnected_handler(cma_id, event->event); 783 ret = isert_disconnected_handler(cma_id, event->event);
779 break; 784 break;
785 case RDMA_CM_EVENT_DEVICE_REMOVAL:
786 isert_conn = cma_id->qp->qp_context;
787 isert_conn->dev_removed = true;
788 isert_disconnected_handler(cma_id, event->event);
789 wait_event_interruptible(isert_conn->rem_wait,
790 isert_conn->state == ISER_CONN_DOWN);
791 kfree(isert_conn);
792 /*
793 * return non-zero from the callback to destroy
794 * the rdma cm id
795 */
796 return 1;
780 case RDMA_CM_EVENT_REJECTED: /* FALLTHRU */ 797 case RDMA_CM_EVENT_REJECTED: /* FALLTHRU */
781 case RDMA_CM_EVENT_UNREACHABLE: /* FALLTHRU */ 798 case RDMA_CM_EVENT_UNREACHABLE: /* FALLTHRU */
782 case RDMA_CM_EVENT_CONNECT_ERROR: 799 case RDMA_CM_EVENT_CONNECT_ERROR:
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index fc791efe3a10..c02ada57d7f5 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -158,6 +158,8 @@ struct isert_conn {
158 struct work_struct release_work; 158 struct work_struct release_work;
159 bool logout_posted; 159 bool logout_posted;
160 bool snd_w_inv; 160 bool snd_w_inv;
161 wait_queue_head_t rem_wait;
162 bool dev_removed;
161}; 163};
162 164
163#define ISERT_MAX_CQ 64 165#define ISERT_MAX_CQ 64
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index 97c372908e78..7817d40d81e7 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -127,6 +127,7 @@ config XGENE_SLIMPRO_MBOX
127config BCM_PDC_MBOX 127config BCM_PDC_MBOX
128 tristate "Broadcom PDC Mailbox" 128 tristate "Broadcom PDC Mailbox"
129 depends on ARM64 || COMPILE_TEST 129 depends on ARM64 || COMPILE_TEST
130 depends on HAS_DMA
130 default ARCH_BCM_IPROC 131 default ARCH_BCM_IPROC
131 help 132 help
132 Mailbox implementation for the Broadcom PDC ring manager, 133 Mailbox implementation for the Broadcom PDC ring manager,
diff --git a/drivers/mailbox/bcm-pdc-mailbox.c b/drivers/mailbox/bcm-pdc-mailbox.c
index cbe0c1ee4ba9..c19dd820ea9b 100644
--- a/drivers/mailbox/bcm-pdc-mailbox.c
+++ b/drivers/mailbox/bcm-pdc-mailbox.c
@@ -469,7 +469,7 @@ static const struct file_operations pdc_debugfs_stats = {
469 * this directory for a SPU. 469 * this directory for a SPU.
470 * @pdcs: PDC state structure 470 * @pdcs: PDC state structure
471 */ 471 */
472void pdc_setup_debugfs(struct pdc_state *pdcs) 472static void pdc_setup_debugfs(struct pdc_state *pdcs)
473{ 473{
474 char spu_stats_name[16]; 474 char spu_stats_name[16];
475 475
@@ -485,7 +485,7 @@ void pdc_setup_debugfs(struct pdc_state *pdcs)
485 &pdc_debugfs_stats); 485 &pdc_debugfs_stats);
486} 486}
487 487
488void pdc_free_debugfs(void) 488static void pdc_free_debugfs(void)
489{ 489{
490 if (debugfs_dir && simple_empty(debugfs_dir)) { 490 if (debugfs_dir && simple_empty(debugfs_dir)) {
491 debugfs_remove_recursive(debugfs_dir); 491 debugfs_remove_recursive(debugfs_dir);
@@ -1191,10 +1191,11 @@ static void pdc_shutdown(struct mbox_chan *chan)
1191{ 1191{
1192 struct pdc_state *pdcs = chan->con_priv; 1192 struct pdc_state *pdcs = chan->con_priv;
1193 1193
1194 if (pdcs) 1194 if (!pdcs)
1195 dev_dbg(&pdcs->pdev->dev, 1195 return;
1196 "Shutdown mailbox channel for PDC %u", pdcs->pdc_idx);
1197 1196
1197 dev_dbg(&pdcs->pdev->dev,
1198 "Shutdown mailbox channel for PDC %u", pdcs->pdc_idx);
1198 pdc_ring_free(pdcs); 1199 pdc_ring_free(pdcs);
1199} 1200}
1200 1201
diff --git a/drivers/misc/lkdtm_usercopy.c b/drivers/misc/lkdtm_usercopy.c
index 5525a204db93..1dd611423d8b 100644
--- a/drivers/misc/lkdtm_usercopy.c
+++ b/drivers/misc/lkdtm_usercopy.c
@@ -9,7 +9,15 @@
9#include <linux/uaccess.h> 9#include <linux/uaccess.h>
10#include <asm/cacheflush.h> 10#include <asm/cacheflush.h>
11 11
12static size_t cache_size = 1024; 12/*
13 * Many of the tests here end up using const sizes, but those would
14 * normally be ignored by hardened usercopy, so force the compiler
15 * into choosing the non-const path to make sure we trigger the
16 * hardened usercopy checks by added "unconst" to all the const copies,
17 * and making sure "cache_size" isn't optimized into a const.
18 */
19static volatile size_t unconst = 0;
20static volatile size_t cache_size = 1024;
13static struct kmem_cache *bad_cache; 21static struct kmem_cache *bad_cache;
14 22
15static const unsigned char test_text[] = "This is a test.\n"; 23static const unsigned char test_text[] = "This is a test.\n";
@@ -67,14 +75,14 @@ static noinline void do_usercopy_stack(bool to_user, bool bad_frame)
67 if (to_user) { 75 if (to_user) {
68 pr_info("attempting good copy_to_user of local stack\n"); 76 pr_info("attempting good copy_to_user of local stack\n");
69 if (copy_to_user((void __user *)user_addr, good_stack, 77 if (copy_to_user((void __user *)user_addr, good_stack,
70 sizeof(good_stack))) { 78 unconst + sizeof(good_stack))) {
71 pr_warn("copy_to_user failed unexpectedly?!\n"); 79 pr_warn("copy_to_user failed unexpectedly?!\n");
72 goto free_user; 80 goto free_user;
73 } 81 }
74 82
75 pr_info("attempting bad copy_to_user of distant stack\n"); 83 pr_info("attempting bad copy_to_user of distant stack\n");
76 if (copy_to_user((void __user *)user_addr, bad_stack, 84 if (copy_to_user((void __user *)user_addr, bad_stack,
77 sizeof(good_stack))) { 85 unconst + sizeof(good_stack))) {
78 pr_warn("copy_to_user failed, but lacked Oops\n"); 86 pr_warn("copy_to_user failed, but lacked Oops\n");
79 goto free_user; 87 goto free_user;
80 } 88 }
@@ -88,14 +96,14 @@ static noinline void do_usercopy_stack(bool to_user, bool bad_frame)
88 96
89 pr_info("attempting good copy_from_user of local stack\n"); 97 pr_info("attempting good copy_from_user of local stack\n");
90 if (copy_from_user(good_stack, (void __user *)user_addr, 98 if (copy_from_user(good_stack, (void __user *)user_addr,
91 sizeof(good_stack))) { 99 unconst + sizeof(good_stack))) {
92 pr_warn("copy_from_user failed unexpectedly?!\n"); 100 pr_warn("copy_from_user failed unexpectedly?!\n");
93 goto free_user; 101 goto free_user;
94 } 102 }
95 103
96 pr_info("attempting bad copy_from_user of distant stack\n"); 104 pr_info("attempting bad copy_from_user of distant stack\n");
97 if (copy_from_user(bad_stack, (void __user *)user_addr, 105 if (copy_from_user(bad_stack, (void __user *)user_addr,
98 sizeof(good_stack))) { 106 unconst + sizeof(good_stack))) {
99 pr_warn("copy_from_user failed, but lacked Oops\n"); 107 pr_warn("copy_from_user failed, but lacked Oops\n");
100 goto free_user; 108 goto free_user;
101 } 109 }
@@ -109,7 +117,7 @@ static void do_usercopy_heap_size(bool to_user)
109{ 117{
110 unsigned long user_addr; 118 unsigned long user_addr;
111 unsigned char *one, *two; 119 unsigned char *one, *two;
112 const size_t size = 1024; 120 size_t size = unconst + 1024;
113 121
114 one = kmalloc(size, GFP_KERNEL); 122 one = kmalloc(size, GFP_KERNEL);
115 two = kmalloc(size, GFP_KERNEL); 123 two = kmalloc(size, GFP_KERNEL);
@@ -285,13 +293,14 @@ void lkdtm_USERCOPY_KERNEL(void)
285 293
286 pr_info("attempting good copy_to_user from kernel rodata\n"); 294 pr_info("attempting good copy_to_user from kernel rodata\n");
287 if (copy_to_user((void __user *)user_addr, test_text, 295 if (copy_to_user((void __user *)user_addr, test_text,
288 sizeof(test_text))) { 296 unconst + sizeof(test_text))) {
289 pr_warn("copy_to_user failed unexpectedly?!\n"); 297 pr_warn("copy_to_user failed unexpectedly?!\n");
290 goto free_user; 298 goto free_user;
291 } 299 }
292 300
293 pr_info("attempting bad copy_to_user from kernel text\n"); 301 pr_info("attempting bad copy_to_user from kernel text\n");
294 if (copy_to_user((void __user *)user_addr, vm_mmap, PAGE_SIZE)) { 302 if (copy_to_user((void __user *)user_addr, vm_mmap,
303 unconst + PAGE_SIZE)) {
295 pr_warn("copy_to_user failed, but lacked Oops\n"); 304 pr_warn("copy_to_user failed, but lacked Oops\n");
296 goto free_user; 305 goto free_user;
297 } 306 }
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index 5749a4eee746..0fe8fad25e4d 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -1539,12 +1539,11 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
1539 offset += range->npins; 1539 offset += range->npins;
1540 } 1540 }
1541 1541
1542 /* Mask and clear all interrupts */ 1542 /* Clear all interrupts */
1543 chv_writel(0, pctrl->regs + CHV_INTMASK);
1544 chv_writel(0xffff, pctrl->regs + CHV_INTSTAT); 1543 chv_writel(0xffff, pctrl->regs + CHV_INTSTAT);
1545 1544
1546 ret = gpiochip_irqchip_add(chip, &chv_gpio_irqchip, 0, 1545 ret = gpiochip_irqchip_add(chip, &chv_gpio_irqchip, 0,
1547 handle_simple_irq, IRQ_TYPE_NONE); 1546 handle_bad_irq, IRQ_TYPE_NONE);
1548 if (ret) { 1547 if (ret) {
1549 dev_err(pctrl->dev, "failed to add IRQ chip\n"); 1548 dev_err(pctrl->dev, "failed to add IRQ chip\n");
1550 goto fail; 1549 goto fail;
diff --git a/drivers/pinctrl/pinctrl-pistachio.c b/drivers/pinctrl/pinctrl-pistachio.c
index 7bad200bd67c..55375b1b3cc8 100644
--- a/drivers/pinctrl/pinctrl-pistachio.c
+++ b/drivers/pinctrl/pinctrl-pistachio.c
@@ -809,17 +809,17 @@ static const struct pistachio_pin_group pistachio_groups[] = {
809 PADS_FUNCTION_SELECT2, 12, 0x3), 809 PADS_FUNCTION_SELECT2, 12, 0x3),
810 MFIO_MUX_PIN_GROUP(83, MIPS_PLL_LOCK, MIPS_TRACE_DATA, USB_DEBUG, 810 MFIO_MUX_PIN_GROUP(83, MIPS_PLL_LOCK, MIPS_TRACE_DATA, USB_DEBUG,
811 PADS_FUNCTION_SELECT2, 14, 0x3), 811 PADS_FUNCTION_SELECT2, 14, 0x3),
812 MFIO_MUX_PIN_GROUP(84, SYS_PLL_LOCK, MIPS_TRACE_DATA, USB_DEBUG, 812 MFIO_MUX_PIN_GROUP(84, AUDIO_PLL_LOCK, MIPS_TRACE_DATA, USB_DEBUG,
813 PADS_FUNCTION_SELECT2, 16, 0x3), 813 PADS_FUNCTION_SELECT2, 16, 0x3),
814 MFIO_MUX_PIN_GROUP(85, WIFI_PLL_LOCK, MIPS_TRACE_DATA, SDHOST_DEBUG, 814 MFIO_MUX_PIN_GROUP(85, RPU_V_PLL_LOCK, MIPS_TRACE_DATA, SDHOST_DEBUG,
815 PADS_FUNCTION_SELECT2, 18, 0x3), 815 PADS_FUNCTION_SELECT2, 18, 0x3),
816 MFIO_MUX_PIN_GROUP(86, BT_PLL_LOCK, MIPS_TRACE_DATA, SDHOST_DEBUG, 816 MFIO_MUX_PIN_GROUP(86, RPU_L_PLL_LOCK, MIPS_TRACE_DATA, SDHOST_DEBUG,
817 PADS_FUNCTION_SELECT2, 20, 0x3), 817 PADS_FUNCTION_SELECT2, 20, 0x3),
818 MFIO_MUX_PIN_GROUP(87, RPU_V_PLL_LOCK, DREQ2, SOCIF_DEBUG, 818 MFIO_MUX_PIN_GROUP(87, SYS_PLL_LOCK, DREQ2, SOCIF_DEBUG,
819 PADS_FUNCTION_SELECT2, 22, 0x3), 819 PADS_FUNCTION_SELECT2, 22, 0x3),
820 MFIO_MUX_PIN_GROUP(88, RPU_L_PLL_LOCK, DREQ3, SOCIF_DEBUG, 820 MFIO_MUX_PIN_GROUP(88, WIFI_PLL_LOCK, DREQ3, SOCIF_DEBUG,
821 PADS_FUNCTION_SELECT2, 24, 0x3), 821 PADS_FUNCTION_SELECT2, 24, 0x3),
822 MFIO_MUX_PIN_GROUP(89, AUDIO_PLL_LOCK, DREQ4, DREQ5, 822 MFIO_MUX_PIN_GROUP(89, BT_PLL_LOCK, DREQ4, DREQ5,
823 PADS_FUNCTION_SELECT2, 26, 0x3), 823 PADS_FUNCTION_SELECT2, 26, 0x3),
824 PIN_GROUP(TCK, "tck"), 824 PIN_GROUP(TCK, "tck"),
825 PIN_GROUP(TRSTN, "trstn"), 825 PIN_GROUP(TRSTN, "trstn"),
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c
index ce483b03a263..f9d661e5c14a 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c
@@ -485,12 +485,12 @@ static const struct sunxi_desc_pin sun8i_a23_pins[] = {
485 SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 8), 485 SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 8),
486 SUNXI_FUNCTION(0x0, "gpio_in"), 486 SUNXI_FUNCTION(0x0, "gpio_in"),
487 SUNXI_FUNCTION(0x1, "gpio_out"), 487 SUNXI_FUNCTION(0x1, "gpio_out"),
488 SUNXI_FUNCTION(0x2, "uart2"), /* RTS */ 488 SUNXI_FUNCTION(0x2, "uart1"), /* RTS */
489 SUNXI_FUNCTION_IRQ_BANK(0x4, 2, 8)), /* PG_EINT8 */ 489 SUNXI_FUNCTION_IRQ_BANK(0x4, 2, 8)), /* PG_EINT8 */
490 SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 9), 490 SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 9),
491 SUNXI_FUNCTION(0x0, "gpio_in"), 491 SUNXI_FUNCTION(0x0, "gpio_in"),
492 SUNXI_FUNCTION(0x1, "gpio_out"), 492 SUNXI_FUNCTION(0x1, "gpio_out"),
493 SUNXI_FUNCTION(0x2, "uart2"), /* CTS */ 493 SUNXI_FUNCTION(0x2, "uart1"), /* CTS */
494 SUNXI_FUNCTION_IRQ_BANK(0x4, 2, 9)), /* PG_EINT9 */ 494 SUNXI_FUNCTION_IRQ_BANK(0x4, 2, 9)), /* PG_EINT9 */
495 SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 10), 495 SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 10),
496 SUNXI_FUNCTION(0x0, "gpio_in"), 496 SUNXI_FUNCTION(0x0, "gpio_in"),
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c
index 3040abe6f73a..3131cac2b76f 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c
@@ -407,12 +407,12 @@ static const struct sunxi_desc_pin sun8i_a33_pins[] = {
407 SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 8), 407 SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 8),
408 SUNXI_FUNCTION(0x0, "gpio_in"), 408 SUNXI_FUNCTION(0x0, "gpio_in"),
409 SUNXI_FUNCTION(0x1, "gpio_out"), 409 SUNXI_FUNCTION(0x1, "gpio_out"),
410 SUNXI_FUNCTION(0x2, "uart2"), /* RTS */ 410 SUNXI_FUNCTION(0x2, "uart1"), /* RTS */
411 SUNXI_FUNCTION_IRQ_BANK(0x4, 1, 8)), /* PG_EINT8 */ 411 SUNXI_FUNCTION_IRQ_BANK(0x4, 1, 8)), /* PG_EINT8 */
412 SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 9), 412 SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 9),
413 SUNXI_FUNCTION(0x0, "gpio_in"), 413 SUNXI_FUNCTION(0x0, "gpio_in"),
414 SUNXI_FUNCTION(0x1, "gpio_out"), 414 SUNXI_FUNCTION(0x1, "gpio_out"),
415 SUNXI_FUNCTION(0x2, "uart2"), /* CTS */ 415 SUNXI_FUNCTION(0x2, "uart1"), /* CTS */
416 SUNXI_FUNCTION_IRQ_BANK(0x4, 1, 9)), /* PG_EINT9 */ 416 SUNXI_FUNCTION_IRQ_BANK(0x4, 1, 9)), /* PG_EINT9 */
417 SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 10), 417 SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 10),
418 SUNXI_FUNCTION(0x0, "gpio_in"), 418 SUNXI_FUNCTION(0x0, "gpio_in"),
diff --git a/drivers/regulator/max14577-regulator.c b/drivers/regulator/max14577-regulator.c
index b2daa6641417..c9ff26199711 100644
--- a/drivers/regulator/max14577-regulator.c
+++ b/drivers/regulator/max14577-regulator.c
@@ -2,7 +2,7 @@
2 * max14577.c - Regulator driver for the Maxim 14577/77836 2 * max14577.c - Regulator driver for the Maxim 14577/77836
3 * 3 *
4 * Copyright (C) 2013,2014 Samsung Electronics 4 * Copyright (C) 2013,2014 Samsung Electronics
5 * Krzysztof Kozlowski <k.kozlowski@samsung.com> 5 * Krzysztof Kozlowski <krzk@kernel.org>
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
@@ -331,7 +331,7 @@ static void __exit max14577_regulator_exit(void)
331} 331}
332module_exit(max14577_regulator_exit); 332module_exit(max14577_regulator_exit);
333 333
334MODULE_AUTHOR("Krzysztof Kozlowski <k.kozlowski@samsung.com>"); 334MODULE_AUTHOR("Krzysztof Kozlowski <krzk@kernel.org>");
335MODULE_DESCRIPTION("Maxim 14577/77836 regulator driver"); 335MODULE_DESCRIPTION("Maxim 14577/77836 regulator driver");
336MODULE_LICENSE("GPL"); 336MODULE_LICENSE("GPL");
337MODULE_ALIAS("platform:max14577-regulator"); 337MODULE_ALIAS("platform:max14577-regulator");
diff --git a/drivers/regulator/max77693-regulator.c b/drivers/regulator/max77693-regulator.c
index de730fd3f8a5..cfbb9512e486 100644
--- a/drivers/regulator/max77693-regulator.c
+++ b/drivers/regulator/max77693-regulator.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (C) 2013-2015 Samsung Electronics 4 * Copyright (C) 2013-2015 Samsung Electronics
5 * Jonghwa Lee <jonghwa3.lee@samsung.com> 5 * Jonghwa Lee <jonghwa3.lee@samsung.com>
6 * Krzysztof Kozlowski <k.kozlowski.k@gmail.com> 6 * Krzysztof Kozlowski <krzk@kernel.org>
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by 9 * it under the terms of the GNU General Public License as published by
@@ -314,5 +314,5 @@ module_exit(max77693_pmic_cleanup);
314 314
315MODULE_DESCRIPTION("MAXIM 77693/77843 regulator driver"); 315MODULE_DESCRIPTION("MAXIM 77693/77843 regulator driver");
316MODULE_AUTHOR("Jonghwa Lee <jonghwa3.lee@samsung.com>"); 316MODULE_AUTHOR("Jonghwa Lee <jonghwa3.lee@samsung.com>");
317MODULE_AUTHOR("Krzysztof Kozlowski <k.kozlowski.k@gmail.com>"); 317MODULE_AUTHOR("Krzysztof Kozlowski <krzk@kernel.org>");
318MODULE_LICENSE("GPL"); 318MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/qcom_smd-regulator.c b/drivers/regulator/qcom_smd-regulator.c
index 5022fa8d10c6..8ed46a9a55c8 100644
--- a/drivers/regulator/qcom_smd-regulator.c
+++ b/drivers/regulator/qcom_smd-regulator.c
@@ -178,20 +178,21 @@ static const struct regulator_desc pma8084_hfsmps = {
178static const struct regulator_desc pma8084_ftsmps = { 178static const struct regulator_desc pma8084_ftsmps = {
179 .linear_ranges = (struct regulator_linear_range[]) { 179 .linear_ranges = (struct regulator_linear_range[]) {
180 REGULATOR_LINEAR_RANGE(350000, 0, 184, 5000), 180 REGULATOR_LINEAR_RANGE(350000, 0, 184, 5000),
181 REGULATOR_LINEAR_RANGE(700000, 185, 339, 10000), 181 REGULATOR_LINEAR_RANGE(1280000, 185, 261, 10000),
182 }, 182 },
183 .n_linear_ranges = 2, 183 .n_linear_ranges = 2,
184 .n_voltages = 340, 184 .n_voltages = 262,
185 .ops = &rpm_smps_ldo_ops, 185 .ops = &rpm_smps_ldo_ops,
186}; 186};
187 187
188static const struct regulator_desc pma8084_pldo = { 188static const struct regulator_desc pma8084_pldo = {
189 .linear_ranges = (struct regulator_linear_range[]) { 189 .linear_ranges = (struct regulator_linear_range[]) {
190 REGULATOR_LINEAR_RANGE(750000, 0, 30, 25000), 190 REGULATOR_LINEAR_RANGE( 750000, 0, 63, 12500),
191 REGULATOR_LINEAR_RANGE(1500000, 31, 99, 50000), 191 REGULATOR_LINEAR_RANGE(1550000, 64, 126, 25000),
192 REGULATOR_LINEAR_RANGE(3100000, 127, 163, 50000),
192 }, 193 },
193 .n_linear_ranges = 2, 194 .n_linear_ranges = 3,
194 .n_voltages = 100, 195 .n_voltages = 164,
195 .ops = &rpm_smps_ldo_ops, 196 .ops = &rpm_smps_ldo_ops,
196}; 197};
197 198
@@ -221,29 +222,30 @@ static const struct regulator_desc pm8x41_hfsmps = {
221static const struct regulator_desc pm8841_ftsmps = { 222static const struct regulator_desc pm8841_ftsmps = {
222 .linear_ranges = (struct regulator_linear_range[]) { 223 .linear_ranges = (struct regulator_linear_range[]) {
223 REGULATOR_LINEAR_RANGE(350000, 0, 184, 5000), 224 REGULATOR_LINEAR_RANGE(350000, 0, 184, 5000),
224 REGULATOR_LINEAR_RANGE(700000, 185, 339, 10000), 225 REGULATOR_LINEAR_RANGE(1280000, 185, 261, 10000),
225 }, 226 },
226 .n_linear_ranges = 2, 227 .n_linear_ranges = 2,
227 .n_voltages = 340, 228 .n_voltages = 262,
228 .ops = &rpm_smps_ldo_ops, 229 .ops = &rpm_smps_ldo_ops,
229}; 230};
230 231
231static const struct regulator_desc pm8941_boost = { 232static const struct regulator_desc pm8941_boost = {
232 .linear_ranges = (struct regulator_linear_range[]) { 233 .linear_ranges = (struct regulator_linear_range[]) {
233 REGULATOR_LINEAR_RANGE(4000000, 0, 15, 100000), 234 REGULATOR_LINEAR_RANGE(4000000, 0, 30, 50000),
234 }, 235 },
235 .n_linear_ranges = 1, 236 .n_linear_ranges = 1,
236 .n_voltages = 16, 237 .n_voltages = 31,
237 .ops = &rpm_smps_ldo_ops, 238 .ops = &rpm_smps_ldo_ops,
238}; 239};
239 240
240static const struct regulator_desc pm8941_pldo = { 241static const struct regulator_desc pm8941_pldo = {
241 .linear_ranges = (struct regulator_linear_range[]) { 242 .linear_ranges = (struct regulator_linear_range[]) {
242 REGULATOR_LINEAR_RANGE( 750000, 0, 30, 25000), 243 REGULATOR_LINEAR_RANGE( 750000, 0, 63, 12500),
243 REGULATOR_LINEAR_RANGE(1500000, 31, 99, 50000), 244 REGULATOR_LINEAR_RANGE(1550000, 64, 126, 25000),
245 REGULATOR_LINEAR_RANGE(3100000, 127, 163, 50000),
244 }, 246 },
245 .n_linear_ranges = 2, 247 .n_linear_ranges = 3,
246 .n_voltages = 100, 248 .n_voltages = 164,
247 .ops = &rpm_smps_ldo_ops, 249 .ops = &rpm_smps_ldo_ops,
248}; 250};
249 251
diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c
index 83458f7a2824..6dc96c8dfe75 100644
--- a/drivers/scsi/constants.c
+++ b/drivers/scsi/constants.c
@@ -361,8 +361,9 @@ static const char * const snstext[] = {
361 361
362/* Get sense key string or NULL if not available */ 362/* Get sense key string or NULL if not available */
363const char * 363const char *
364scsi_sense_key_string(unsigned char key) { 364scsi_sense_key_string(unsigned char key)
365 if (key <= 0xE) 365{
366 if (key < ARRAY_SIZE(snstext))
366 return snstext[key]; 367 return snstext[key];
367 return NULL; 368 return NULL;
368} 369}
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index eaccd651ccda..246456925335 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -246,6 +246,10 @@ static struct {
246 {"IBM", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, 246 {"IBM", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
247 {"SUN", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, 247 {"SUN", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
248 {"DELL", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, 248 {"DELL", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
249 {"STK", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
250 {"NETAPP", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
251 {"LSI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
252 {"ENGENIO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
249 {"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36}, 253 {"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36},
250 {"SONY", "CD-ROM CDU-8001", NULL, BLIST_BORKEN}, 254 {"SONY", "CD-ROM CDU-8001", NULL, BLIST_BORKEN},
251 {"SONY", "TSL", NULL, BLIST_FORCELUN}, /* DDS3 & DDS4 autoloaders */ 255 {"SONY", "TSL", NULL, BLIST_FORCELUN}, /* DDS3 & DDS4 autoloaders */
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 3f0ff072184b..60b651bfaa01 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -341,22 +341,6 @@ static int do_sas_phy_delete(struct device *dev, void *data)
341} 341}
342 342
343/** 343/**
344 * is_sas_attached - check if device is SAS attached
345 * @sdev: scsi device to check
346 *
347 * returns true if the device is SAS attached
348 */
349int is_sas_attached(struct scsi_device *sdev)
350{
351 struct Scsi_Host *shost = sdev->host;
352
353 return shost->transportt->host_attrs.ac.class ==
354 &sas_host_class.class;
355}
356EXPORT_SYMBOL(is_sas_attached);
357
358
359/**
360 * sas_remove_children - tear down a devices SAS data structures 344 * sas_remove_children - tear down a devices SAS data structures
361 * @dev: device belonging to the sas object 345 * @dev: device belonging to the sas object
362 * 346 *
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
index 0e8601aa877a..8c9a35c91705 100644
--- a/drivers/scsi/ses.c
+++ b/drivers/scsi/ses.c
@@ -587,7 +587,7 @@ static void ses_match_to_enclosure(struct enclosure_device *edev,
587 587
588 ses_enclosure_data_process(edev, to_scsi_device(edev->edev.parent), 0); 588 ses_enclosure_data_process(edev, to_scsi_device(edev->edev.parent), 0);
589 589
590 if (is_sas_attached(sdev)) 590 if (scsi_is_sas_rphy(&sdev->sdev_gendev))
591 efd.addr = sas_get_address(sdev); 591 efd.addr = sas_get_address(sdev);
592 592
593 if (efd.addr) { 593 if (efd.addr) {
diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c
index 823cbc92d1e7..7a37090dabbe 100644
--- a/drivers/spi/spi-img-spfi.c
+++ b/drivers/spi/spi-img-spfi.c
@@ -720,8 +720,6 @@ static int img_spfi_remove(struct platform_device *pdev)
720 clk_disable_unprepare(spfi->sys_clk); 720 clk_disable_unprepare(spfi->sys_clk);
721 } 721 }
722 722
723 spi_master_put(master);
724
725 return 0; 723 return 0;
726} 724}
727 725
diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
index 0be89e052428..899d7a8f0889 100644
--- a/drivers/spi/spi-mt65xx.c
+++ b/drivers/spi/spi-mt65xx.c
@@ -685,7 +685,6 @@ static int mtk_spi_remove(struct platform_device *pdev)
685 pm_runtime_disable(&pdev->dev); 685 pm_runtime_disable(&pdev->dev);
686 686
687 mtk_spi_reset(mdata); 687 mtk_spi_reset(mdata);
688 spi_master_put(master);
689 688
690 return 0; 689 return 0;
691} 690}
diff --git a/drivers/spi/spi-pxa2xx-pci.c b/drivers/spi/spi-pxa2xx-pci.c
index f3df522db93b..58d2d48e16a5 100644
--- a/drivers/spi/spi-pxa2xx-pci.c
+++ b/drivers/spi/spi-pxa2xx-pci.c
@@ -214,6 +214,7 @@ static int pxa2xx_spi_pci_probe(struct pci_dev *dev,
214 return PTR_ERR(ssp->clk); 214 return PTR_ERR(ssp->clk);
215 215
216 memset(&pi, 0, sizeof(pi)); 216 memset(&pi, 0, sizeof(pi));
217 pi.fwnode = dev->dev.fwnode;
217 pi.parent = &dev->dev; 218 pi.parent = &dev->dev;
218 pi.name = "pxa2xx-spi"; 219 pi.name = "pxa2xx-spi";
219 pi.id = ssp->port_id; 220 pi.id = ssp->port_id;
diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
index c338ef1136f6..7f1555621f8e 100644
--- a/drivers/spi/spi-qup.c
+++ b/drivers/spi/spi-qup.c
@@ -1030,7 +1030,6 @@ static int spi_qup_remove(struct platform_device *pdev)
1030 1030
1031 pm_runtime_put_noidle(&pdev->dev); 1031 pm_runtime_put_noidle(&pdev->dev);
1032 pm_runtime_disable(&pdev->dev); 1032 pm_runtime_disable(&pdev->dev);
1033 spi_master_put(master);
1034 1033
1035 return 0; 1034 return 0;
1036} 1035}
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index 0f83ad1d5a58..1de3a772eb7d 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -262,6 +262,9 @@ static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p,
262 262
263 for (k = 0; k < ARRAY_SIZE(sh_msiof_spi_div_table); k++) { 263 for (k = 0; k < ARRAY_SIZE(sh_msiof_spi_div_table); k++) {
264 brps = DIV_ROUND_UP(div, sh_msiof_spi_div_table[k].div); 264 brps = DIV_ROUND_UP(div, sh_msiof_spi_div_table[k].div);
265 /* SCR_BRDV_DIV_1 is valid only if BRPS is x 1/1 or x 1/2 */
266 if (sh_msiof_spi_div_table[k].div == 1 && brps > 2)
267 continue;
265 if (brps <= 32) /* max of brdv is 32 */ 268 if (brps <= 32) /* max of brdv is 32 */
266 break; 269 break;
267 } 270 }
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 51ad42fad567..200ca228d885 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -960,7 +960,7 @@ static int spi_transfer_one_message(struct spi_master *master,
960 struct spi_transfer *xfer; 960 struct spi_transfer *xfer;
961 bool keep_cs = false; 961 bool keep_cs = false;
962 int ret = 0; 962 int ret = 0;
963 unsigned long ms = 1; 963 unsigned long long ms = 1;
964 struct spi_statistics *statm = &master->statistics; 964 struct spi_statistics *statm = &master->statistics;
965 struct spi_statistics *stats = &msg->spi->statistics; 965 struct spi_statistics *stats = &msg->spi->statistics;
966 966
@@ -991,9 +991,13 @@ static int spi_transfer_one_message(struct spi_master *master,
991 991
992 if (ret > 0) { 992 if (ret > 0) {
993 ret = 0; 993 ret = 0;
994 ms = xfer->len * 8 * 1000 / xfer->speed_hz; 994 ms = 8LL * 1000LL * xfer->len;
995 do_div(ms, xfer->speed_hz);
995 ms += ms + 100; /* some tolerance */ 996 ms += ms + 100; /* some tolerance */
996 997
998 if (ms > UINT_MAX)
999 ms = UINT_MAX;
1000
997 ms = wait_for_completion_timeout(&master->xfer_completion, 1001 ms = wait_for_completion_timeout(&master->xfer_completion,
998 msecs_to_jiffies(ms)); 1002 msecs_to_jiffies(ms));
999 } 1003 }
@@ -1159,6 +1163,7 @@ static void __spi_pump_messages(struct spi_master *master, bool in_kthread)
1159 if (ret < 0) { 1163 if (ret < 0) {
1160 dev_err(&master->dev, "Failed to power device: %d\n", 1164 dev_err(&master->dev, "Failed to power device: %d\n",
1161 ret); 1165 ret);
1166 mutex_unlock(&master->io_mutex);
1162 return; 1167 return;
1163 } 1168 }
1164 } 1169 }
@@ -1174,6 +1179,7 @@ static void __spi_pump_messages(struct spi_master *master, bool in_kthread)
1174 1179
1175 if (master->auto_runtime_pm) 1180 if (master->auto_runtime_pm)
1176 pm_runtime_put(master->dev.parent); 1181 pm_runtime_put(master->dev.parent);
1182 mutex_unlock(&master->io_mutex);
1177 return; 1183 return;
1178 } 1184 }
1179 } 1185 }
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index cbd8990e2e77..2b5b10eed74f 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -118,10 +118,11 @@ static inline int arch_within_stack_frames(const void * const stack,
118extern void __check_object_size(const void *ptr, unsigned long n, 118extern void __check_object_size(const void *ptr, unsigned long n,
119 bool to_user); 119 bool to_user);
120 120
121static inline void check_object_size(const void *ptr, unsigned long n, 121static __always_inline void check_object_size(const void *ptr, unsigned long n,
122 bool to_user) 122 bool to_user)
123{ 123{
124 __check_object_size(ptr, n, to_user); 124 if (!__builtin_constant_p(n))
125 __check_object_size(ptr, n, to_user);
125} 126}
126#else 127#else
127static inline void check_object_size(const void *ptr, unsigned long n, 128static inline void check_object_size(const void *ptr, unsigned long n,
diff --git a/include/scsi/scsi_transport_sas.h b/include/scsi/scsi_transport_sas.h
index 13c0b2ba1b6c..73d870918939 100644
--- a/include/scsi/scsi_transport_sas.h
+++ b/include/scsi/scsi_transport_sas.h
@@ -11,12 +11,12 @@ struct sas_rphy;
11struct request; 11struct request;
12 12
13#if !IS_ENABLED(CONFIG_SCSI_SAS_ATTRS) 13#if !IS_ENABLED(CONFIG_SCSI_SAS_ATTRS)
14static inline int is_sas_attached(struct scsi_device *sdev) 14static inline int scsi_is_sas_rphy(const struct device *sdev)
15{ 15{
16 return 0; 16 return 0;
17} 17}
18#else 18#else
19extern int is_sas_attached(struct scsi_device *sdev); 19extern int scsi_is_sas_rphy(const struct device *);
20#endif 20#endif
21 21
22static inline int sas_protocol_ata(enum sas_protocol proto) 22static inline int sas_protocol_ata(enum sas_protocol proto)
@@ -202,7 +202,6 @@ extern int sas_rphy_add(struct sas_rphy *);
202extern void sas_rphy_remove(struct sas_rphy *); 202extern void sas_rphy_remove(struct sas_rphy *);
203extern void sas_rphy_delete(struct sas_rphy *); 203extern void sas_rphy_delete(struct sas_rphy *);
204extern void sas_rphy_unlink(struct sas_rphy *); 204extern void sas_rphy_unlink(struct sas_rphy *);
205extern int scsi_is_sas_rphy(const struct device *);
206 205
207struct sas_port *sas_port_alloc(struct device *, int); 206struct sas_port *sas_port_alloc(struct device *, int);
208struct sas_port *sas_port_alloc_num(struct device *); 207struct sas_port *sas_port_alloc_num(struct device *);
diff --git a/mm/usercopy.c b/mm/usercopy.c
index a3cc3052f830..089328f2b920 100644
--- a/mm/usercopy.c
+++ b/mm/usercopy.c
@@ -134,31 +134,16 @@ static inline const char *check_bogus_address(const void *ptr, unsigned long n)
134 return NULL; 134 return NULL;
135} 135}
136 136
137static inline const char *check_heap_object(const void *ptr, unsigned long n, 137/* Checks for allocs that are marked in some way as spanning multiple pages. */
138 bool to_user) 138static inline const char *check_page_span(const void *ptr, unsigned long n,
139 struct page *page, bool to_user)
139{ 140{
140 struct page *page, *endpage; 141#ifdef CONFIG_HARDENED_USERCOPY_PAGESPAN
141 const void *end = ptr + n - 1; 142 const void *end = ptr + n - 1;
143 struct page *endpage;
142 bool is_reserved, is_cma; 144 bool is_reserved, is_cma;
143 145
144 /* 146 /*
145 * Some architectures (arm64) return true for virt_addr_valid() on
146 * vmalloced addresses. Work around this by checking for vmalloc
147 * first.
148 */
149 if (is_vmalloc_addr(ptr))
150 return NULL;
151
152 if (!virt_addr_valid(ptr))
153 return NULL;
154
155 page = virt_to_head_page(ptr);
156
157 /* Check slab allocator for flags and size. */
158 if (PageSlab(page))
159 return __check_heap_object(ptr, n, page);
160
161 /*
162 * Sometimes the kernel data regions are not marked Reserved (see 147 * Sometimes the kernel data regions are not marked Reserved (see
163 * check below). And sometimes [_sdata,_edata) does not cover 148 * check below). And sometimes [_sdata,_edata) does not cover
164 * rodata and/or bss, so check each range explicitly. 149 * rodata and/or bss, so check each range explicitly.
@@ -186,7 +171,7 @@ static inline const char *check_heap_object(const void *ptr, unsigned long n,
186 ((unsigned long)end & (unsigned long)PAGE_MASK))) 171 ((unsigned long)end & (unsigned long)PAGE_MASK)))
187 return NULL; 172 return NULL;
188 173
189 /* Allow if start and end are inside the same compound page. */ 174 /* Allow if fully inside the same compound (__GFP_COMP) page. */
190 endpage = virt_to_head_page(end); 175 endpage = virt_to_head_page(end);
191 if (likely(endpage == page)) 176 if (likely(endpage == page))
192 return NULL; 177 return NULL;
@@ -199,20 +184,44 @@ static inline const char *check_heap_object(const void *ptr, unsigned long n,
199 is_reserved = PageReserved(page); 184 is_reserved = PageReserved(page);
200 is_cma = is_migrate_cma_page(page); 185 is_cma = is_migrate_cma_page(page);
201 if (!is_reserved && !is_cma) 186 if (!is_reserved && !is_cma)
202 goto reject; 187 return "<spans multiple pages>";
203 188
204 for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) { 189 for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) {
205 page = virt_to_head_page(ptr); 190 page = virt_to_head_page(ptr);
206 if (is_reserved && !PageReserved(page)) 191 if (is_reserved && !PageReserved(page))
207 goto reject; 192 return "<spans Reserved and non-Reserved pages>";
208 if (is_cma && !is_migrate_cma_page(page)) 193 if (is_cma && !is_migrate_cma_page(page))
209 goto reject; 194 return "<spans CMA and non-CMA pages>";
210 } 195 }
196#endif
211 197
212 return NULL; 198 return NULL;
199}
200
201static inline const char *check_heap_object(const void *ptr, unsigned long n,
202 bool to_user)
203{
204 struct page *page;
205
206 /*
207 * Some architectures (arm64) return true for virt_addr_valid() on
208 * vmalloced addresses. Work around this by checking for vmalloc
209 * first.
210 */
211 if (is_vmalloc_addr(ptr))
212 return NULL;
213
214 if (!virt_addr_valid(ptr))
215 return NULL;
216
217 page = virt_to_head_page(ptr);
218
219 /* Check slab allocator for flags and size. */
220 if (PageSlab(page))
221 return __check_heap_object(ptr, n, page);
213 222
214reject: 223 /* Verify object does not incorrectly span multiple pages. */
215 return "<spans multiple pages>"; 224 return check_page_span(ptr, n, page, to_user);
216} 225}
217 226
218/* 227/*
diff --git a/scripts/package/builddeb b/scripts/package/builddeb
index e1c09e2f9be7..8ea9fd2b6573 100755
--- a/scripts/package/builddeb
+++ b/scripts/package/builddeb
@@ -332,7 +332,9 @@ if grep -q '^CONFIG_STACK_VALIDATION=y' $KCONFIG_CONFIG ; then
332 (cd $objtree; find tools/objtool -type f -executable) >> "$objtree/debian/hdrobjfiles" 332 (cd $objtree; find tools/objtool -type f -executable) >> "$objtree/debian/hdrobjfiles"
333fi 333fi
334(cd $objtree; find arch/$SRCARCH/include Module.symvers include scripts -type f) >> "$objtree/debian/hdrobjfiles" 334(cd $objtree; find arch/$SRCARCH/include Module.symvers include scripts -type f) >> "$objtree/debian/hdrobjfiles"
335(cd $objtree; find scripts/gcc-plugins -name \*.so -o -name gcc-common.h) >> "$objtree/debian/hdrobjfiles" 335if grep -q '^CONFIG_GCC_PLUGINS=y' $KCONFIG_CONFIG ; then
336 (cd $objtree; find scripts/gcc-plugins -name \*.so -o -name gcc-common.h) >> "$objtree/debian/hdrobjfiles"
337fi
336destdir=$kernel_headers_dir/usr/src/linux-headers-$version 338destdir=$kernel_headers_dir/usr/src/linux-headers-$version
337mkdir -p "$destdir" 339mkdir -p "$destdir"
338(cd $srctree; tar -c -f - -T -) < "$objtree/debian/hdrsrcfiles" | (cd $destdir; tar -xf -) 340(cd $srctree; tar -c -f - -T -) < "$objtree/debian/hdrsrcfiles" | (cd $destdir; tar -xf -)
diff --git a/security/Kconfig b/security/Kconfig
index da10d9b573a4..118f4549404e 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -147,6 +147,17 @@ config HARDENED_USERCOPY
147 or are part of the kernel text. This kills entire classes 147 or are part of the kernel text. This kills entire classes
148 of heap overflow exploits and similar kernel memory exposures. 148 of heap overflow exploits and similar kernel memory exposures.
149 149
150config HARDENED_USERCOPY_PAGESPAN
151 bool "Refuse to copy allocations that span multiple pages"
152 depends on HARDENED_USERCOPY
153 depends on EXPERT
154 help
155 When a multi-page allocation is done without __GFP_COMP,
156 hardened usercopy will reject attempts to copy it. There are,
157 however, several cases of this in the kernel that have not all
158 been removed. This config is intended to be used only while
159 trying to find such users.
160
150source security/selinux/Kconfig 161source security/selinux/Kconfig
151source security/smack/Kconfig 162source security/smack/Kconfig
152source security/tomoyo/Kconfig 163source security/tomoyo/Kconfig