aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm/Kconfig.debug6
-rw-r--r--arch/arm/Makefile4
-rw-r--r--arch/arm/boot/compressed/head.S4
-rw-r--r--arch/arm/include/asm/assembler.h8
-rw-r--r--arch/arm/include/asm/memory.h3
-rw-r--r--arch/arm/include/asm/tlb.h4
-rw-r--r--arch/arm/include/asm/uaccess.h58
-rw-r--r--arch/arm/kernel/hw_breakpoint.c62
-rw-r--r--arch/arm/kernel/traps.c11
-rw-r--r--arch/arm/lib/delay.c1
-rw-r--r--arch/arm/lib/getuser.S23
-rw-r--r--arch/arm/lib/putuser.S6
-rw-r--r--arch/arm/mm/context.c7
-rw-r--r--arch/arm/mm/mm.h3
-rw-r--r--arch/arm/mm/mmu.c8
-rw-r--r--arch/blackfin/Kconfig1
-rw-r--r--arch/blackfin/Makefile1
-rw-r--r--arch/blackfin/include/asm/smp.h2
-rw-r--r--arch/blackfin/mach-common/smp.c223
-rw-r--r--arch/x86/kvm/i8259.c2
-rw-r--r--arch/x86/kvm/vmx.c23
-rw-r--r--arch/x86/kvm/x86.c13
-rw-r--r--crypto/authenc.c4
-rw-r--r--drivers/crypto/caam/key_gen.c1
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c3
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c13
-rw-r--r--drivers/scsi/scsi_error.c10
-rw-r--r--drivers/scsi/scsi_lib.c5
-rw-r--r--drivers/scsi/scsi_scan.c10
-rw-r--r--fs/fuse/control.c4
-rw-r--r--fs/fuse/cuse.c4
-rw-r--r--fs/fuse/dev.c1
-rw-r--r--fs/fuse/inode.c12
-rw-r--r--fs/nfs/file.c4
-rw-r--r--fs/nfs/inode.c2
-rw-r--r--fs/nfs/nfs3proc.c2
-rw-r--r--fs/nfs/nfs4file.c4
-rw-r--r--fs/nfs/nfs4proc.c55
-rw-r--r--fs/nfs/nfs4xdr.c17
-rw-r--r--fs/nfs/super.c2
-rw-r--r--include/linux/nfs_fs.h5
-rw-r--r--include/linux/nfs_xdr.h2
-rw-r--r--include/linux/sunrpc/xprt.h3
-rw-r--r--kernel/workqueue.c110
-rw-r--r--lib/digsig.c6
-rw-r--r--net/sunrpc/xprt.c34
-rw-r--r--net/sunrpc/xprtrdma/transport.c1
-rw-r--r--net/sunrpc/xprtsock.c3
-rw-r--r--scripts/link-vmlinux.sh7
49 files changed, 466 insertions, 331 deletions
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index f15f82bf3a50..e968a52e4881 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -356,15 +356,15 @@ choice
356 is nothing connected to read from the DCC. 356 is nothing connected to read from the DCC.
357 357
358 config DEBUG_SEMIHOSTING 358 config DEBUG_SEMIHOSTING
359 bool "Kernel low-level debug output via semihosting I" 359 bool "Kernel low-level debug output via semihosting I/O"
360 help 360 help
361 Semihosting enables code running on an ARM target to use 361 Semihosting enables code running on an ARM target to use
362 the I/O facilities on a host debugger/emulator through a 362 the I/O facilities on a host debugger/emulator through a
363 simple SVC calls. The host debugger or emulator must have 363 simple SVC call. The host debugger or emulator must have
364 semihosting enabled for the special svc call to be trapped 364 semihosting enabled for the special svc call to be trapped
365 otherwise the kernel will crash. 365 otherwise the kernel will crash.
366 366
367 This is known to work with OpenOCD, as wellas 367 This is known to work with OpenOCD, as well as
368 ARM's Fast Models, or any other controlling environment 368 ARM's Fast Models, or any other controlling environment
369 that implements semihosting. 369 that implements semihosting.
370 370
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 30eae87ead6d..a051dfbdd7db 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -284,10 +284,10 @@ zImage Image xipImage bootpImage uImage: vmlinux
284zinstall uinstall install: vmlinux 284zinstall uinstall install: vmlinux
285 $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@ 285 $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@
286 286
287%.dtb: 287%.dtb: scripts
288 $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@ 288 $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
289 289
290dtbs: 290dtbs: scripts
291 $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@ 291 $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
292 292
293# We use MRPROPER_FILES and CLEAN_FILES now 293# We use MRPROPER_FILES and CLEAN_FILES now
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index b8c64b80bafc..81769c1341fa 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -659,10 +659,14 @@ __armv7_mmu_cache_on:
659#ifdef CONFIG_CPU_ENDIAN_BE8 659#ifdef CONFIG_CPU_ENDIAN_BE8
660 orr r0, r0, #1 << 25 @ big-endian page tables 660 orr r0, r0, #1 << 25 @ big-endian page tables
661#endif 661#endif
662 mrcne p15, 0, r6, c2, c0, 2 @ read ttb control reg
662 orrne r0, r0, #1 @ MMU enabled 663 orrne r0, r0, #1 @ MMU enabled
663 movne r1, #0xfffffffd @ domain 0 = client 664 movne r1, #0xfffffffd @ domain 0 = client
665 bic r6, r6, #1 << 31 @ 32-bit translation system
666 bic r6, r6, #3 << 0 @ use only ttbr0
664 mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer 667 mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer
665 mcrne p15, 0, r1, c3, c0, 0 @ load domain access control 668 mcrne p15, 0, r1, c3, c0, 0 @ load domain access control
669 mcrne p15, 0, r6, c2, c0, 2 @ load ttb control
666#endif 670#endif
667 mcr p15, 0, r0, c7, c5, 4 @ ISB 671 mcr p15, 0, r0, c7, c5, 4 @ ISB
668 mcr p15, 0, r0, c1, c0, 0 @ load control register 672 mcr p15, 0, r0, c1, c0, 0 @ load control register
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 03fb93621d0d..5c8b3bf4d825 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -320,4 +320,12 @@
320 .size \name , . - \name 320 .size \name , . - \name
321 .endm 321 .endm
322 322
323 .macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req
324#ifndef CONFIG_CPU_USE_DOMAINS
325 adds \tmp, \addr, #\size - 1
326 sbcccs \tmp, \tmp, \limit
327 bcs \bad
328#endif
329 .endm
330
323#endif /* __ASM_ASSEMBLER_H__ */ 331#endif /* __ASM_ASSEMBLER_H__ */
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index e965f1b560f1..5f6ddcc56452 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -187,6 +187,7 @@ static inline unsigned long __phys_to_virt(unsigned long x)
187#define __phys_to_virt(x) ((x) - PHYS_OFFSET + PAGE_OFFSET) 187#define __phys_to_virt(x) ((x) - PHYS_OFFSET + PAGE_OFFSET)
188#endif 188#endif
189#endif 189#endif
190#endif /* __ASSEMBLY__ */
190 191
191#ifndef PHYS_OFFSET 192#ifndef PHYS_OFFSET
192#ifdef PLAT_PHYS_OFFSET 193#ifdef PLAT_PHYS_OFFSET
@@ -196,6 +197,8 @@ static inline unsigned long __phys_to_virt(unsigned long x)
196#endif 197#endif
197#endif 198#endif
198 199
200#ifndef __ASSEMBLY__
201
199/* 202/*
200 * PFNs are used to describe any physical page; this means 203 * PFNs are used to describe any physical page; this means
201 * PFN 0 == physical address 0. 204 * PFN 0 == physical address 0.
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
index 314d4664eae7..99a19512ee26 100644
--- a/arch/arm/include/asm/tlb.h
+++ b/arch/arm/include/asm/tlb.h
@@ -199,6 +199,9 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
199{ 199{
200 pgtable_page_dtor(pte); 200 pgtable_page_dtor(pte);
201 201
202#ifdef CONFIG_ARM_LPAE
203 tlb_add_flush(tlb, addr);
204#else
202 /* 205 /*
203 * With the classic ARM MMU, a pte page has two corresponding pmd 206 * With the classic ARM MMU, a pte page has two corresponding pmd
204 * entries, each covering 1MB. 207 * entries, each covering 1MB.
@@ -206,6 +209,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
206 addr &= PMD_MASK; 209 addr &= PMD_MASK;
207 tlb_add_flush(tlb, addr + SZ_1M - PAGE_SIZE); 210 tlb_add_flush(tlb, addr + SZ_1M - PAGE_SIZE);
208 tlb_add_flush(tlb, addr + SZ_1M); 211 tlb_add_flush(tlb, addr + SZ_1M);
212#endif
209 213
210 tlb_remove_page(tlb, pte); 214 tlb_remove_page(tlb, pte);
211} 215}
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 479a6352e0b5..77bd79f2ffdb 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -101,28 +101,39 @@ extern int __get_user_1(void *);
101extern int __get_user_2(void *); 101extern int __get_user_2(void *);
102extern int __get_user_4(void *); 102extern int __get_user_4(void *);
103 103
104#define __get_user_x(__r2,__p,__e,__s,__i...) \ 104#define __GUP_CLOBBER_1 "lr", "cc"
105#ifdef CONFIG_CPU_USE_DOMAINS
106#define __GUP_CLOBBER_2 "ip", "lr", "cc"
107#else
108#define __GUP_CLOBBER_2 "lr", "cc"
109#endif
110#define __GUP_CLOBBER_4 "lr", "cc"
111
112#define __get_user_x(__r2,__p,__e,__l,__s) \
105 __asm__ __volatile__ ( \ 113 __asm__ __volatile__ ( \
106 __asmeq("%0", "r0") __asmeq("%1", "r2") \ 114 __asmeq("%0", "r0") __asmeq("%1", "r2") \
115 __asmeq("%3", "r1") \
107 "bl __get_user_" #__s \ 116 "bl __get_user_" #__s \
108 : "=&r" (__e), "=r" (__r2) \ 117 : "=&r" (__e), "=r" (__r2) \
109 : "0" (__p) \ 118 : "0" (__p), "r" (__l) \
110 : __i, "cc") 119 : __GUP_CLOBBER_##__s)
111 120
112#define get_user(x,p) \ 121#define __get_user_check(x,p) \
113 ({ \ 122 ({ \
123 unsigned long __limit = current_thread_info()->addr_limit - 1; \
114 register const typeof(*(p)) __user *__p asm("r0") = (p);\ 124 register const typeof(*(p)) __user *__p asm("r0") = (p);\
115 register unsigned long __r2 asm("r2"); \ 125 register unsigned long __r2 asm("r2"); \
126 register unsigned long __l asm("r1") = __limit; \
116 register int __e asm("r0"); \ 127 register int __e asm("r0"); \
117 switch (sizeof(*(__p))) { \ 128 switch (sizeof(*(__p))) { \
118 case 1: \ 129 case 1: \
119 __get_user_x(__r2, __p, __e, 1, "lr"); \ 130 __get_user_x(__r2, __p, __e, __l, 1); \
120 break; \ 131 break; \
121 case 2: \ 132 case 2: \
122 __get_user_x(__r2, __p, __e, 2, "r3", "lr"); \ 133 __get_user_x(__r2, __p, __e, __l, 2); \
123 break; \ 134 break; \
124 case 4: \ 135 case 4: \
125 __get_user_x(__r2, __p, __e, 4, "lr"); \ 136 __get_user_x(__r2, __p, __e, __l, 4); \
126 break; \ 137 break; \
127 default: __e = __get_user_bad(); break; \ 138 default: __e = __get_user_bad(); break; \
128 } \ 139 } \
@@ -130,42 +141,57 @@ extern int __get_user_4(void *);
130 __e; \ 141 __e; \
131 }) 142 })
132 143
144#define get_user(x,p) \
145 ({ \
146 might_fault(); \
147 __get_user_check(x,p); \
148 })
149
133extern int __put_user_1(void *, unsigned int); 150extern int __put_user_1(void *, unsigned int);
134extern int __put_user_2(void *, unsigned int); 151extern int __put_user_2(void *, unsigned int);
135extern int __put_user_4(void *, unsigned int); 152extern int __put_user_4(void *, unsigned int);
136extern int __put_user_8(void *, unsigned long long); 153extern int __put_user_8(void *, unsigned long long);
137 154
138#define __put_user_x(__r2,__p,__e,__s) \ 155#define __put_user_x(__r2,__p,__e,__l,__s) \
139 __asm__ __volatile__ ( \ 156 __asm__ __volatile__ ( \
140 __asmeq("%0", "r0") __asmeq("%2", "r2") \ 157 __asmeq("%0", "r0") __asmeq("%2", "r2") \
158 __asmeq("%3", "r1") \
141 "bl __put_user_" #__s \ 159 "bl __put_user_" #__s \
142 : "=&r" (__e) \ 160 : "=&r" (__e) \
143 : "0" (__p), "r" (__r2) \ 161 : "0" (__p), "r" (__r2), "r" (__l) \
144 : "ip", "lr", "cc") 162 : "ip", "lr", "cc")
145 163
146#define put_user(x,p) \ 164#define __put_user_check(x,p) \
147 ({ \ 165 ({ \
166 unsigned long __limit = current_thread_info()->addr_limit - 1; \
148 register const typeof(*(p)) __r2 asm("r2") = (x); \ 167 register const typeof(*(p)) __r2 asm("r2") = (x); \
149 register const typeof(*(p)) __user *__p asm("r0") = (p);\ 168 register const typeof(*(p)) __user *__p asm("r0") = (p);\
169 register unsigned long __l asm("r1") = __limit; \
150 register int __e asm("r0"); \ 170 register int __e asm("r0"); \
151 switch (sizeof(*(__p))) { \ 171 switch (sizeof(*(__p))) { \
152 case 1: \ 172 case 1: \
153 __put_user_x(__r2, __p, __e, 1); \ 173 __put_user_x(__r2, __p, __e, __l, 1); \
154 break; \ 174 break; \
155 case 2: \ 175 case 2: \
156 __put_user_x(__r2, __p, __e, 2); \ 176 __put_user_x(__r2, __p, __e, __l, 2); \
157 break; \ 177 break; \
158 case 4: \ 178 case 4: \
159 __put_user_x(__r2, __p, __e, 4); \ 179 __put_user_x(__r2, __p, __e, __l, 4); \
160 break; \ 180 break; \
161 case 8: \ 181 case 8: \
162 __put_user_x(__r2, __p, __e, 8); \ 182 __put_user_x(__r2, __p, __e, __l, 8); \
163 break; \ 183 break; \
164 default: __e = __put_user_bad(); break; \ 184 default: __e = __put_user_bad(); break; \
165 } \ 185 } \
166 __e; \ 186 __e; \
167 }) 187 })
168 188
189#define put_user(x,p) \
190 ({ \
191 might_fault(); \
192 __put_user_check(x,p); \
193 })
194
169#else /* CONFIG_MMU */ 195#else /* CONFIG_MMU */
170 196
171/* 197/*
@@ -219,6 +245,7 @@ do { \
219 unsigned long __gu_addr = (unsigned long)(ptr); \ 245 unsigned long __gu_addr = (unsigned long)(ptr); \
220 unsigned long __gu_val; \ 246 unsigned long __gu_val; \
221 __chk_user_ptr(ptr); \ 247 __chk_user_ptr(ptr); \
248 might_fault(); \
222 switch (sizeof(*(ptr))) { \ 249 switch (sizeof(*(ptr))) { \
223 case 1: __get_user_asm_byte(__gu_val,__gu_addr,err); break; \ 250 case 1: __get_user_asm_byte(__gu_val,__gu_addr,err); break; \
224 case 2: __get_user_asm_half(__gu_val,__gu_addr,err); break; \ 251 case 2: __get_user_asm_half(__gu_val,__gu_addr,err); break; \
@@ -300,6 +327,7 @@ do { \
300 unsigned long __pu_addr = (unsigned long)(ptr); \ 327 unsigned long __pu_addr = (unsigned long)(ptr); \
301 __typeof__(*(ptr)) __pu_val = (x); \ 328 __typeof__(*(ptr)) __pu_val = (x); \
302 __chk_user_ptr(ptr); \ 329 __chk_user_ptr(ptr); \
330 might_fault(); \
303 switch (sizeof(*(ptr))) { \ 331 switch (sizeof(*(ptr))) { \
304 case 1: __put_user_asm_byte(__pu_val,__pu_addr,err); break; \ 332 case 1: __put_user_asm_byte(__pu_val,__pu_addr,err); break; \
305 case 2: __put_user_asm_half(__pu_val,__pu_addr,err); break; \ 333 case 2: __put_user_asm_half(__pu_val,__pu_addr,err); break; \
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index ba386bd94107..281bf3301241 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -159,6 +159,12 @@ static int debug_arch_supported(void)
159 arch >= ARM_DEBUG_ARCH_V7_1; 159 arch >= ARM_DEBUG_ARCH_V7_1;
160} 160}
161 161
162/* Can we determine the watchpoint access type from the fsr? */
163static int debug_exception_updates_fsr(void)
164{
165 return 0;
166}
167
162/* Determine number of WRP registers available. */ 168/* Determine number of WRP registers available. */
163static int get_num_wrp_resources(void) 169static int get_num_wrp_resources(void)
164{ 170{
@@ -604,13 +610,14 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
604 /* Aligned */ 610 /* Aligned */
605 break; 611 break;
606 case 1: 612 case 1:
607 /* Allow single byte watchpoint. */
608 if (info->ctrl.len == ARM_BREAKPOINT_LEN_1)
609 break;
610 case 2: 613 case 2:
611 /* Allow halfword watchpoints and breakpoints. */ 614 /* Allow halfword watchpoints and breakpoints. */
612 if (info->ctrl.len == ARM_BREAKPOINT_LEN_2) 615 if (info->ctrl.len == ARM_BREAKPOINT_LEN_2)
613 break; 616 break;
617 case 3:
618 /* Allow single byte watchpoint. */
619 if (info->ctrl.len == ARM_BREAKPOINT_LEN_1)
620 break;
614 default: 621 default:
615 ret = -EINVAL; 622 ret = -EINVAL;
616 goto out; 623 goto out;
@@ -619,18 +626,35 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
619 info->address &= ~alignment_mask; 626 info->address &= ~alignment_mask;
620 info->ctrl.len <<= offset; 627 info->ctrl.len <<= offset;
621 628
622 /* 629 if (!bp->overflow_handler) {
623 * Currently we rely on an overflow handler to take 630 /*
624 * care of single-stepping the breakpoint when it fires. 631 * Mismatch breakpoints are required for single-stepping
625 * In the case of userspace breakpoints on a core with V7 debug, 632 * breakpoints.
626 * we can use the mismatch feature as a poor-man's hardware 633 */
627 * single-step, but this only works for per-task breakpoints. 634 if (!core_has_mismatch_brps())
628 */ 635 return -EINVAL;
629 if (!bp->overflow_handler && (arch_check_bp_in_kernelspace(bp) || 636
630 !core_has_mismatch_brps() || !bp->hw.bp_target)) { 637 /* We don't allow mismatch breakpoints in kernel space. */
631 pr_warning("overflow handler required but none found\n"); 638 if (arch_check_bp_in_kernelspace(bp))
632 ret = -EINVAL; 639 return -EPERM;
640
641 /*
642 * Per-cpu breakpoints are not supported by our stepping
643 * mechanism.
644 */
645 if (!bp->hw.bp_target)
646 return -EINVAL;
647
648 /*
649 * We only support specific access types if the fsr
650 * reports them.
651 */
652 if (!debug_exception_updates_fsr() &&
653 (info->ctrl.type == ARM_BREAKPOINT_LOAD ||
654 info->ctrl.type == ARM_BREAKPOINT_STORE))
655 return -EINVAL;
633 } 656 }
657
634out: 658out:
635 return ret; 659 return ret;
636} 660}
@@ -706,10 +730,12 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr,
706 goto unlock; 730 goto unlock;
707 731
708 /* Check that the access type matches. */ 732 /* Check that the access type matches. */
709 access = (fsr & ARM_FSR_ACCESS_MASK) ? HW_BREAKPOINT_W : 733 if (debug_exception_updates_fsr()) {
710 HW_BREAKPOINT_R; 734 access = (fsr & ARM_FSR_ACCESS_MASK) ?
711 if (!(access & hw_breakpoint_type(wp))) 735 HW_BREAKPOINT_W : HW_BREAKPOINT_R;
712 goto unlock; 736 if (!(access & hw_breakpoint_type(wp)))
737 goto unlock;
738 }
713 739
714 /* We have a winner. */ 740 /* We have a winner. */
715 info->trigger = addr; 741 info->trigger = addr;
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index f7945218b8c6..b0179b89a04c 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -420,20 +420,23 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
420#endif 420#endif
421 instr = *(u32 *) pc; 421 instr = *(u32 *) pc;
422 } else if (thumb_mode(regs)) { 422 } else if (thumb_mode(regs)) {
423 get_user(instr, (u16 __user *)pc); 423 if (get_user(instr, (u16 __user *)pc))
424 goto die_sig;
424 if (is_wide_instruction(instr)) { 425 if (is_wide_instruction(instr)) {
425 unsigned int instr2; 426 unsigned int instr2;
426 get_user(instr2, (u16 __user *)pc+1); 427 if (get_user(instr2, (u16 __user *)pc+1))
428 goto die_sig;
427 instr <<= 16; 429 instr <<= 16;
428 instr |= instr2; 430 instr |= instr2;
429 } 431 }
430 } else { 432 } else if (get_user(instr, (u32 __user *)pc)) {
431 get_user(instr, (u32 __user *)pc); 433 goto die_sig;
432 } 434 }
433 435
434 if (call_undef_hook(regs, instr) == 0) 436 if (call_undef_hook(regs, instr) == 0)
435 return; 437 return;
436 438
439die_sig:
437#ifdef CONFIG_DEBUG_USER 440#ifdef CONFIG_DEBUG_USER
438 if (user_debug & UDBG_UNDEFINED) { 441 if (user_debug & UDBG_UNDEFINED) {
439 printk(KERN_INFO "%s (%d): undefined instruction: pc=%p\n", 442 printk(KERN_INFO "%s (%d): undefined instruction: pc=%p\n",
diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
index d6dacc69254e..395d5fbb8fa2 100644
--- a/arch/arm/lib/delay.c
+++ b/arch/arm/lib/delay.c
@@ -59,6 +59,7 @@ void __init init_current_timer_delay(unsigned long freq)
59{ 59{
60 pr_info("Switching to timer-based delay loop\n"); 60 pr_info("Switching to timer-based delay loop\n");
61 lpj_fine = freq / HZ; 61 lpj_fine = freq / HZ;
62 loops_per_jiffy = lpj_fine;
62 arm_delay_ops.delay = __timer_delay; 63 arm_delay_ops.delay = __timer_delay;
63 arm_delay_ops.const_udelay = __timer_const_udelay; 64 arm_delay_ops.const_udelay = __timer_const_udelay;
64 arm_delay_ops.udelay = __timer_udelay; 65 arm_delay_ops.udelay = __timer_udelay;
diff --git a/arch/arm/lib/getuser.S b/arch/arm/lib/getuser.S
index 11093a7c3e32..9b06bb41fca6 100644
--- a/arch/arm/lib/getuser.S
+++ b/arch/arm/lib/getuser.S
@@ -16,8 +16,9 @@
16 * __get_user_X 16 * __get_user_X
17 * 17 *
18 * Inputs: r0 contains the address 18 * Inputs: r0 contains the address
19 * r1 contains the address limit, which must be preserved
19 * Outputs: r0 is the error code 20 * Outputs: r0 is the error code
20 * r2, r3 contains the zero-extended value 21 * r2 contains the zero-extended value
21 * lr corrupted 22 * lr corrupted
22 * 23 *
23 * No other registers must be altered. (see <asm/uaccess.h> 24 * No other registers must be altered. (see <asm/uaccess.h>
@@ -27,33 +28,39 @@
27 * Note also that it is intended that __get_user_bad is not global. 28 * Note also that it is intended that __get_user_bad is not global.
28 */ 29 */
29#include <linux/linkage.h> 30#include <linux/linkage.h>
31#include <asm/assembler.h>
30#include <asm/errno.h> 32#include <asm/errno.h>
31#include <asm/domain.h> 33#include <asm/domain.h>
32 34
33ENTRY(__get_user_1) 35ENTRY(__get_user_1)
36 check_uaccess r0, 1, r1, r2, __get_user_bad
341: TUSER(ldrb) r2, [r0] 371: TUSER(ldrb) r2, [r0]
35 mov r0, #0 38 mov r0, #0
36 mov pc, lr 39 mov pc, lr
37ENDPROC(__get_user_1) 40ENDPROC(__get_user_1)
38 41
39ENTRY(__get_user_2) 42ENTRY(__get_user_2)
40#ifdef CONFIG_THUMB2_KERNEL 43 check_uaccess r0, 2, r1, r2, __get_user_bad
412: TUSER(ldrb) r2, [r0] 44#ifdef CONFIG_CPU_USE_DOMAINS
423: TUSER(ldrb) r3, [r0, #1] 45rb .req ip
462: ldrbt r2, [r0], #1
473: ldrbt rb, [r0], #0
43#else 48#else
442: TUSER(ldrb) r2, [r0], #1 49rb .req r0
453: TUSER(ldrb) r3, [r0] 502: ldrb r2, [r0]
513: ldrb rb, [r0, #1]
46#endif 52#endif
47#ifndef __ARMEB__ 53#ifndef __ARMEB__
48 orr r2, r2, r3, lsl #8 54 orr r2, r2, rb, lsl #8
49#else 55#else
50 orr r2, r3, r2, lsl #8 56 orr r2, rb, r2, lsl #8
51#endif 57#endif
52 mov r0, #0 58 mov r0, #0
53 mov pc, lr 59 mov pc, lr
54ENDPROC(__get_user_2) 60ENDPROC(__get_user_2)
55 61
56ENTRY(__get_user_4) 62ENTRY(__get_user_4)
63 check_uaccess r0, 4, r1, r2, __get_user_bad
574: TUSER(ldr) r2, [r0] 644: TUSER(ldr) r2, [r0]
58 mov r0, #0 65 mov r0, #0
59 mov pc, lr 66 mov pc, lr
diff --git a/arch/arm/lib/putuser.S b/arch/arm/lib/putuser.S
index 7db25990c589..3d73dcb959b0 100644
--- a/arch/arm/lib/putuser.S
+++ b/arch/arm/lib/putuser.S
@@ -16,6 +16,7 @@
16 * __put_user_X 16 * __put_user_X
17 * 17 *
18 * Inputs: r0 contains the address 18 * Inputs: r0 contains the address
19 * r1 contains the address limit, which must be preserved
19 * r2, r3 contains the value 20 * r2, r3 contains the value
20 * Outputs: r0 is the error code 21 * Outputs: r0 is the error code
21 * lr corrupted 22 * lr corrupted
@@ -27,16 +28,19 @@
27 * Note also that it is intended that __put_user_bad is not global. 28 * Note also that it is intended that __put_user_bad is not global.
28 */ 29 */
29#include <linux/linkage.h> 30#include <linux/linkage.h>
31#include <asm/assembler.h>
30#include <asm/errno.h> 32#include <asm/errno.h>
31#include <asm/domain.h> 33#include <asm/domain.h>
32 34
33ENTRY(__put_user_1) 35ENTRY(__put_user_1)
36 check_uaccess r0, 1, r1, ip, __put_user_bad
341: TUSER(strb) r2, [r0] 371: TUSER(strb) r2, [r0]
35 mov r0, #0 38 mov r0, #0
36 mov pc, lr 39 mov pc, lr
37ENDPROC(__put_user_1) 40ENDPROC(__put_user_1)
38 41
39ENTRY(__put_user_2) 42ENTRY(__put_user_2)
43 check_uaccess r0, 2, r1, ip, __put_user_bad
40 mov ip, r2, lsr #8 44 mov ip, r2, lsr #8
41#ifdef CONFIG_THUMB2_KERNEL 45#ifdef CONFIG_THUMB2_KERNEL
42#ifndef __ARMEB__ 46#ifndef __ARMEB__
@@ -60,12 +64,14 @@ ENTRY(__put_user_2)
60ENDPROC(__put_user_2) 64ENDPROC(__put_user_2)
61 65
62ENTRY(__put_user_4) 66ENTRY(__put_user_4)
67 check_uaccess r0, 4, r1, ip, __put_user_bad
634: TUSER(str) r2, [r0] 684: TUSER(str) r2, [r0]
64 mov r0, #0 69 mov r0, #0
65 mov pc, lr 70 mov pc, lr
66ENDPROC(__put_user_4) 71ENDPROC(__put_user_4)
67 72
68ENTRY(__put_user_8) 73ENTRY(__put_user_8)
74 check_uaccess r0, 8, r1, ip, __put_user_bad
69#ifdef CONFIG_THUMB2_KERNEL 75#ifdef CONFIG_THUMB2_KERNEL
705: TUSER(str) r2, [r0] 765: TUSER(str) r2, [r0]
716: TUSER(str) r3, [r0, #4] 776: TUSER(str) r3, [r0, #4]
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index 119bc52ab93e..4e07eec1270d 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -63,10 +63,11 @@ static int contextidr_notifier(struct notifier_block *unused, unsigned long cmd,
63 pid = task_pid_nr(thread->task) << ASID_BITS; 63 pid = task_pid_nr(thread->task) << ASID_BITS;
64 asm volatile( 64 asm volatile(
65 " mrc p15, 0, %0, c13, c0, 1\n" 65 " mrc p15, 0, %0, c13, c0, 1\n"
66 " bfi %1, %0, #0, %2\n" 66 " and %0, %0, %2\n"
67 " mcr p15, 0, %1, c13, c0, 1\n" 67 " orr %0, %0, %1\n"
68 " mcr p15, 0, %0, c13, c0, 1\n"
68 : "=r" (contextidr), "+r" (pid) 69 : "=r" (contextidr), "+r" (pid)
69 : "I" (ASID_BITS)); 70 : "I" (~ASID_MASK));
70 isb(); 71 isb();
71 72
72 return NOTIFY_OK; 73 return NOTIFY_OK;
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index 6776160618ef..a8ee92da3544 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -55,6 +55,9 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page
55/* permanent static mappings from iotable_init() */ 55/* permanent static mappings from iotable_init() */
56#define VM_ARM_STATIC_MAPPING 0x40000000 56#define VM_ARM_STATIC_MAPPING 0x40000000
57 57
58/* empty mapping */
59#define VM_ARM_EMPTY_MAPPING 0x20000000
60
58/* mapping type (attributes) for permanent static mappings */ 61/* mapping type (attributes) for permanent static mappings */
59#define VM_ARM_MTYPE(mt) ((mt) << 20) 62#define VM_ARM_MTYPE(mt) ((mt) << 20)
60#define VM_ARM_MTYPE_MASK (0x1f << 20) 63#define VM_ARM_MTYPE_MASK (0x1f << 20)
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 4c2d0451e84a..c2fa21d0103e 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -807,7 +807,7 @@ static void __init pmd_empty_section_gap(unsigned long addr)
807 vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm)); 807 vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm));
808 vm->addr = (void *)addr; 808 vm->addr = (void *)addr;
809 vm->size = SECTION_SIZE; 809 vm->size = SECTION_SIZE;
810 vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING; 810 vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING;
811 vm->caller = pmd_empty_section_gap; 811 vm->caller = pmd_empty_section_gap;
812 vm_area_add_early(vm); 812 vm_area_add_early(vm);
813} 813}
@@ -820,7 +820,7 @@ static void __init fill_pmd_gaps(void)
820 820
821 /* we're still single threaded hence no lock needed here */ 821 /* we're still single threaded hence no lock needed here */
822 for (vm = vmlist; vm; vm = vm->next) { 822 for (vm = vmlist; vm; vm = vm->next) {
823 if (!(vm->flags & VM_ARM_STATIC_MAPPING)) 823 if (!(vm->flags & (VM_ARM_STATIC_MAPPING | VM_ARM_EMPTY_MAPPING)))
824 continue; 824 continue;
825 addr = (unsigned long)vm->addr; 825 addr = (unsigned long)vm->addr;
826 if (addr < next) 826 if (addr < next)
@@ -961,8 +961,8 @@ void __init sanity_check_meminfo(void)
961 * Check whether this memory bank would partially overlap 961 * Check whether this memory bank would partially overlap
962 * the vmalloc area. 962 * the vmalloc area.
963 */ 963 */
964 if (__va(bank->start + bank->size) > vmalloc_min || 964 if (__va(bank->start + bank->size - 1) >= vmalloc_min ||
965 __va(bank->start + bank->size) < __va(bank->start)) { 965 __va(bank->start + bank->size - 1) <= __va(bank->start)) {
966 unsigned long newsize = vmalloc_min - __va(bank->start); 966 unsigned long newsize = vmalloc_min - __va(bank->start);
967 printk(KERN_NOTICE "Truncating RAM at %.8llx-%.8llx " 967 printk(KERN_NOTICE "Truncating RAM at %.8llx-%.8llx "
968 "to -%.8llx (vmalloc region overlap).\n", 968 "to -%.8llx (vmalloc region overlap).\n",
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig
index f34861920634..c7092e6057c5 100644
--- a/arch/blackfin/Kconfig
+++ b/arch/blackfin/Kconfig
@@ -38,6 +38,7 @@ config BLACKFIN
38 select GENERIC_ATOMIC64 38 select GENERIC_ATOMIC64
39 select GENERIC_IRQ_PROBE 39 select GENERIC_IRQ_PROBE
40 select IRQ_PER_CPU if SMP 40 select IRQ_PER_CPU if SMP
41 select USE_GENERIC_SMP_HELPERS if SMP
41 select HAVE_NMI_WATCHDOG if NMI_WATCHDOG 42 select HAVE_NMI_WATCHDOG if NMI_WATCHDOG
42 select GENERIC_SMP_IDLE_THREAD 43 select GENERIC_SMP_IDLE_THREAD
43 select ARCH_USES_GETTIMEOFFSET if !GENERIC_CLOCKEVENTS 44 select ARCH_USES_GETTIMEOFFSET if !GENERIC_CLOCKEVENTS
diff --git a/arch/blackfin/Makefile b/arch/blackfin/Makefile
index d3d7e64ca96d..66cf00095b84 100644
--- a/arch/blackfin/Makefile
+++ b/arch/blackfin/Makefile
@@ -20,7 +20,6 @@ endif
20KBUILD_AFLAGS += $(call cc-option,-mno-fdpic) 20KBUILD_AFLAGS += $(call cc-option,-mno-fdpic)
21KBUILD_CFLAGS_MODULE += -mlong-calls 21KBUILD_CFLAGS_MODULE += -mlong-calls
22LDFLAGS += -m elf32bfin 22LDFLAGS += -m elf32bfin
23KALLSYMS += --symbol-prefix=_
24 23
25KBUILD_DEFCONFIG := BF537-STAMP_defconfig 24KBUILD_DEFCONFIG := BF537-STAMP_defconfig
26 25
diff --git a/arch/blackfin/include/asm/smp.h b/arch/blackfin/include/asm/smp.h
index dc3d144b4bb5..9631598dcc5d 100644
--- a/arch/blackfin/include/asm/smp.h
+++ b/arch/blackfin/include/asm/smp.h
@@ -18,6 +18,8 @@
18#define raw_smp_processor_id() blackfin_core_id() 18#define raw_smp_processor_id() blackfin_core_id()
19 19
20extern void bfin_relocate_coreb_l1_mem(void); 20extern void bfin_relocate_coreb_l1_mem(void);
21extern void arch_send_call_function_single_ipi(int cpu);
22extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
21 23
22#if defined(CONFIG_SMP) && defined(CONFIG_ICACHE_FLUSH_L1) 24#if defined(CONFIG_SMP) && defined(CONFIG_ICACHE_FLUSH_L1)
23asmlinkage void blackfin_icache_flush_range_l1(unsigned long *ptr); 25asmlinkage void blackfin_icache_flush_range_l1(unsigned long *ptr);
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c
index 00bbe672b3b3..a40151306b77 100644
--- a/arch/blackfin/mach-common/smp.c
+++ b/arch/blackfin/mach-common/smp.c
@@ -48,10 +48,13 @@ unsigned long blackfin_iflush_l1_entry[NR_CPUS];
48 48
49struct blackfin_initial_pda __cpuinitdata initial_pda_coreb; 49struct blackfin_initial_pda __cpuinitdata initial_pda_coreb;
50 50
51#define BFIN_IPI_TIMER 0 51enum ipi_message_type {
52#define BFIN_IPI_RESCHEDULE 1 52 BFIN_IPI_TIMER,
53#define BFIN_IPI_CALL_FUNC 2 53 BFIN_IPI_RESCHEDULE,
54#define BFIN_IPI_CPU_STOP 3 54 BFIN_IPI_CALL_FUNC,
55 BFIN_IPI_CALL_FUNC_SINGLE,
56 BFIN_IPI_CPU_STOP,
57};
55 58
56struct blackfin_flush_data { 59struct blackfin_flush_data {
57 unsigned long start; 60 unsigned long start;
@@ -60,35 +63,20 @@ struct blackfin_flush_data {
60 63
61void *secondary_stack; 64void *secondary_stack;
62 65
63
64struct smp_call_struct {
65 void (*func)(void *info);
66 void *info;
67 int wait;
68 cpumask_t *waitmask;
69};
70
71static struct blackfin_flush_data smp_flush_data; 66static struct blackfin_flush_data smp_flush_data;
72 67
73static DEFINE_SPINLOCK(stop_lock); 68static DEFINE_SPINLOCK(stop_lock);
74 69
75struct ipi_message {
76 unsigned long type;
77 struct smp_call_struct call_struct;
78};
79
80/* A magic number - stress test shows this is safe for common cases */ 70/* A magic number - stress test shows this is safe for common cases */
81#define BFIN_IPI_MSGQ_LEN 5 71#define BFIN_IPI_MSGQ_LEN 5
82 72
83/* Simple FIFO buffer, overflow leads to panic */ 73/* Simple FIFO buffer, overflow leads to panic */
84struct ipi_message_queue { 74struct ipi_data {
85 spinlock_t lock;
86 unsigned long count; 75 unsigned long count;
87 unsigned long head; /* head of the queue */ 76 unsigned long bits;
88 struct ipi_message ipi_message[BFIN_IPI_MSGQ_LEN];
89}; 77};
90 78
91static DEFINE_PER_CPU(struct ipi_message_queue, ipi_msg_queue); 79static DEFINE_PER_CPU(struct ipi_data, bfin_ipi);
92 80
93static void ipi_cpu_stop(unsigned int cpu) 81static void ipi_cpu_stop(unsigned int cpu)
94{ 82{
@@ -129,28 +117,6 @@ static void ipi_flush_icache(void *info)
129 blackfin_icache_flush_range(fdata->start, fdata->end); 117 blackfin_icache_flush_range(fdata->start, fdata->end);
130} 118}
131 119
132static void ipi_call_function(unsigned int cpu, struct ipi_message *msg)
133{
134 int wait;
135 void (*func)(void *info);
136 void *info;
137 func = msg->call_struct.func;
138 info = msg->call_struct.info;
139 wait = msg->call_struct.wait;
140 func(info);
141 if (wait) {
142#ifdef __ARCH_SYNC_CORE_DCACHE
143 /*
144 * 'wait' usually means synchronization between CPUs.
145 * Invalidate D cache in case shared data was changed
146 * by func() to ensure cache coherence.
147 */
148 resync_core_dcache();
149#endif
150 cpumask_clear_cpu(cpu, msg->call_struct.waitmask);
151 }
152}
153
154/* Use IRQ_SUPPLE_0 to request reschedule. 120/* Use IRQ_SUPPLE_0 to request reschedule.
155 * When returning from interrupt to user space, 121 * When returning from interrupt to user space,
156 * there is chance to reschedule */ 122 * there is chance to reschedule */
@@ -172,152 +138,95 @@ void ipi_timer(void)
172 138
173static irqreturn_t ipi_handler_int1(int irq, void *dev_instance) 139static irqreturn_t ipi_handler_int1(int irq, void *dev_instance)
174{ 140{
175 struct ipi_message *msg; 141 struct ipi_data *bfin_ipi_data;
176 struct ipi_message_queue *msg_queue;
177 unsigned int cpu = smp_processor_id(); 142 unsigned int cpu = smp_processor_id();
178 unsigned long flags; 143 unsigned long pending;
144 unsigned long msg;
179 145
180 platform_clear_ipi(cpu, IRQ_SUPPLE_1); 146 platform_clear_ipi(cpu, IRQ_SUPPLE_1);
181 147
182 msg_queue = &__get_cpu_var(ipi_msg_queue); 148 bfin_ipi_data = &__get_cpu_var(bfin_ipi);
183 149
184 spin_lock_irqsave(&msg_queue->lock, flags); 150 while ((pending = xchg(&bfin_ipi_data->bits, 0)) != 0) {
185 151 msg = 0;
186 while (msg_queue->count) { 152 do {
187 msg = &msg_queue->ipi_message[msg_queue->head]; 153 msg = find_next_bit(&pending, BITS_PER_LONG, msg + 1);
188 switch (msg->type) { 154 switch (msg) {
189 case BFIN_IPI_TIMER: 155 case BFIN_IPI_TIMER:
190 ipi_timer(); 156 ipi_timer();
191 break; 157 break;
192 case BFIN_IPI_RESCHEDULE: 158 case BFIN_IPI_RESCHEDULE:
193 scheduler_ipi(); 159 scheduler_ipi();
194 break; 160 break;
195 case BFIN_IPI_CALL_FUNC: 161 case BFIN_IPI_CALL_FUNC:
196 ipi_call_function(cpu, msg); 162 generic_smp_call_function_interrupt();
197 break; 163 break;
198 case BFIN_IPI_CPU_STOP: 164
199 ipi_cpu_stop(cpu); 165 case BFIN_IPI_CALL_FUNC_SINGLE:
200 break; 166 generic_smp_call_function_single_interrupt();
201 default: 167 break;
202 printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%lx\n", 168
203 cpu, msg->type); 169 case BFIN_IPI_CPU_STOP:
204 break; 170 ipi_cpu_stop(cpu);
205 } 171 break;
206 msg_queue->head++; 172 }
207 msg_queue->head %= BFIN_IPI_MSGQ_LEN; 173 } while (msg < BITS_PER_LONG);
208 msg_queue->count--; 174
175 smp_mb();
209 } 176 }
210 spin_unlock_irqrestore(&msg_queue->lock, flags);
211 return IRQ_HANDLED; 177 return IRQ_HANDLED;
212} 178}
213 179
214static void ipi_queue_init(void) 180static void bfin_ipi_init(void)
215{ 181{
216 unsigned int cpu; 182 unsigned int cpu;
217 struct ipi_message_queue *msg_queue; 183 struct ipi_data *bfin_ipi_data;
218 for_each_possible_cpu(cpu) { 184 for_each_possible_cpu(cpu) {
219 msg_queue = &per_cpu(ipi_msg_queue, cpu); 185 bfin_ipi_data = &per_cpu(bfin_ipi, cpu);
220 spin_lock_init(&msg_queue->lock); 186 bfin_ipi_data->bits = 0;
221 msg_queue->count = 0; 187 bfin_ipi_data->count = 0;
222 msg_queue->head = 0;
223 } 188 }
224} 189}
225 190
226static inline void smp_send_message(cpumask_t callmap, unsigned long type, 191void send_ipi(const struct cpumask *cpumask, enum ipi_message_type msg)
227 void (*func) (void *info), void *info, int wait)
228{ 192{
229 unsigned int cpu; 193 unsigned int cpu;
230 struct ipi_message_queue *msg_queue; 194 struct ipi_data *bfin_ipi_data;
231 struct ipi_message *msg; 195 unsigned long flags;
232 unsigned long flags, next_msg; 196
233 cpumask_t waitmask; /* waitmask is shared by all cpus */ 197 local_irq_save(flags);
234 198
235 cpumask_copy(&waitmask, &callmap); 199 for_each_cpu(cpu, cpumask) {
236 for_each_cpu(cpu, &callmap) { 200 bfin_ipi_data = &per_cpu(bfin_ipi, cpu);
237 msg_queue = &per_cpu(ipi_msg_queue, cpu); 201 smp_mb();
238 spin_lock_irqsave(&msg_queue->lock, flags); 202 set_bit(msg, &bfin_ipi_data->bits);
239 if (msg_queue->count < BFIN_IPI_MSGQ_LEN) { 203 bfin_ipi_data->count++;
240 next_msg = (msg_queue->head + msg_queue->count)
241 % BFIN_IPI_MSGQ_LEN;
242 msg = &msg_queue->ipi_message[next_msg];
243 msg->type = type;
244 if (type == BFIN_IPI_CALL_FUNC) {
245 msg->call_struct.func = func;
246 msg->call_struct.info = info;
247 msg->call_struct.wait = wait;
248 msg->call_struct.waitmask = &waitmask;
249 }
250 msg_queue->count++;
251 } else
252 panic("IPI message queue overflow\n");
253 spin_unlock_irqrestore(&msg_queue->lock, flags);
254 platform_send_ipi_cpu(cpu, IRQ_SUPPLE_1); 204 platform_send_ipi_cpu(cpu, IRQ_SUPPLE_1);
255 } 205 }
256 206
257 if (wait) { 207 local_irq_restore(flags);
258 while (!cpumask_empty(&waitmask))
259 blackfin_dcache_invalidate_range(
260 (unsigned long)(&waitmask),
261 (unsigned long)(&waitmask));
262#ifdef __ARCH_SYNC_CORE_DCACHE
263 /*
264 * Invalidate D cache in case shared data was changed by
265 * other processors to ensure cache coherence.
266 */
267 resync_core_dcache();
268#endif
269 }
270} 208}
271 209
272int smp_call_function(void (*func)(void *info), void *info, int wait) 210void arch_send_call_function_single_ipi(int cpu)
273{ 211{
274 cpumask_t callmap; 212 send_ipi(cpumask_of(cpu), BFIN_IPI_CALL_FUNC_SINGLE);
275
276 preempt_disable();
277 cpumask_copy(&callmap, cpu_online_mask);
278 cpumask_clear_cpu(smp_processor_id(), &callmap);
279 if (!cpumask_empty(&callmap))
280 smp_send_message(callmap, BFIN_IPI_CALL_FUNC, func, info, wait);
281
282 preempt_enable();
283
284 return 0;
285} 213}
286EXPORT_SYMBOL_GPL(smp_call_function);
287 214
288int smp_call_function_single(int cpuid, void (*func) (void *info), void *info, 215void arch_send_call_function_ipi_mask(const struct cpumask *mask)
289 int wait)
290{ 216{
291 unsigned int cpu = cpuid; 217 send_ipi(mask, BFIN_IPI_CALL_FUNC);
292 cpumask_t callmap;
293
294 if (cpu_is_offline(cpu))
295 return 0;
296 cpumask_clear(&callmap);
297 cpumask_set_cpu(cpu, &callmap);
298
299 smp_send_message(callmap, BFIN_IPI_CALL_FUNC, func, info, wait);
300
301 return 0;
302} 218}
303EXPORT_SYMBOL_GPL(smp_call_function_single);
304 219
305void smp_send_reschedule(int cpu) 220void smp_send_reschedule(int cpu)
306{ 221{
307 cpumask_t callmap; 222 send_ipi(cpumask_of(cpu), BFIN_IPI_RESCHEDULE);
308 /* simply trigger an ipi */
309
310 cpumask_clear(&callmap);
311 cpumask_set_cpu(cpu, &callmap);
312
313 smp_send_message(callmap, BFIN_IPI_RESCHEDULE, NULL, NULL, 0);
314 223
315 return; 224 return;
316} 225}
317 226
318void smp_send_msg(const struct cpumask *mask, unsigned long type) 227void smp_send_msg(const struct cpumask *mask, unsigned long type)
319{ 228{
320 smp_send_message(*mask, type, NULL, NULL, 0); 229 send_ipi(mask, type);
321} 230}
322 231
323void smp_timer_broadcast(const struct cpumask *mask) 232void smp_timer_broadcast(const struct cpumask *mask)
@@ -333,7 +242,7 @@ void smp_send_stop(void)
333 cpumask_copy(&callmap, cpu_online_mask); 242 cpumask_copy(&callmap, cpu_online_mask);
334 cpumask_clear_cpu(smp_processor_id(), &callmap); 243 cpumask_clear_cpu(smp_processor_id(), &callmap);
335 if (!cpumask_empty(&callmap)) 244 if (!cpumask_empty(&callmap))
336 smp_send_message(callmap, BFIN_IPI_CPU_STOP, NULL, NULL, 0); 245 send_ipi(&callmap, BFIN_IPI_CPU_STOP);
337 246
338 preempt_enable(); 247 preempt_enable();
339 248
@@ -436,7 +345,7 @@ void __init smp_prepare_boot_cpu(void)
436void __init smp_prepare_cpus(unsigned int max_cpus) 345void __init smp_prepare_cpus(unsigned int max_cpus)
437{ 346{
438 platform_prepare_cpus(max_cpus); 347 platform_prepare_cpus(max_cpus);
439 ipi_queue_init(); 348 bfin_ipi_init();
440 platform_request_ipi(IRQ_SUPPLE_0, ipi_handler_int0); 349 platform_request_ipi(IRQ_SUPPLE_0, ipi_handler_int0);
441 platform_request_ipi(IRQ_SUPPLE_1, ipi_handler_int1); 350 platform_request_ipi(IRQ_SUPPLE_1, ipi_handler_int1);
442} 351}
diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c
index e498b18f010c..9fc9aa7ac703 100644
--- a/arch/x86/kvm/i8259.c
+++ b/arch/x86/kvm/i8259.c
@@ -318,7 +318,7 @@ static void pic_ioport_write(void *opaque, u32 addr, u32 val)
318 if (val & 0x10) { 318 if (val & 0x10) {
319 u8 edge_irr = s->irr & ~s->elcr; 319 u8 edge_irr = s->irr & ~s->elcr;
320 int i; 320 int i;
321 bool found; 321 bool found = false;
322 struct kvm_vcpu *vcpu; 322 struct kvm_vcpu *vcpu;
323 323
324 s->init4 = val & 1; 324 s->init4 = val & 1;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index c00f03de1b79..b1eb202ee76a 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -3619,6 +3619,7 @@ static void seg_setup(int seg)
3619 3619
3620static int alloc_apic_access_page(struct kvm *kvm) 3620static int alloc_apic_access_page(struct kvm *kvm)
3621{ 3621{
3622 struct page *page;
3622 struct kvm_userspace_memory_region kvm_userspace_mem; 3623 struct kvm_userspace_memory_region kvm_userspace_mem;
3623 int r = 0; 3624 int r = 0;
3624 3625
@@ -3633,7 +3634,13 @@ static int alloc_apic_access_page(struct kvm *kvm)
3633 if (r) 3634 if (r)
3634 goto out; 3635 goto out;
3635 3636
3636 kvm->arch.apic_access_page = gfn_to_page(kvm, 0xfee00); 3637 page = gfn_to_page(kvm, 0xfee00);
3638 if (is_error_page(page)) {
3639 r = -EFAULT;
3640 goto out;
3641 }
3642
3643 kvm->arch.apic_access_page = page;
3637out: 3644out:
3638 mutex_unlock(&kvm->slots_lock); 3645 mutex_unlock(&kvm->slots_lock);
3639 return r; 3646 return r;
@@ -3641,6 +3648,7 @@ out:
3641 3648
3642static int alloc_identity_pagetable(struct kvm *kvm) 3649static int alloc_identity_pagetable(struct kvm *kvm)
3643{ 3650{
3651 struct page *page;
3644 struct kvm_userspace_memory_region kvm_userspace_mem; 3652 struct kvm_userspace_memory_region kvm_userspace_mem;
3645 int r = 0; 3653 int r = 0;
3646 3654
@@ -3656,8 +3664,13 @@ static int alloc_identity_pagetable(struct kvm *kvm)
3656 if (r) 3664 if (r)
3657 goto out; 3665 goto out;
3658 3666
3659 kvm->arch.ept_identity_pagetable = gfn_to_page(kvm, 3667 page = gfn_to_page(kvm, kvm->arch.ept_identity_map_addr >> PAGE_SHIFT);
3660 kvm->arch.ept_identity_map_addr >> PAGE_SHIFT); 3668 if (is_error_page(page)) {
3669 r = -EFAULT;
3670 goto out;
3671 }
3672
3673 kvm->arch.ept_identity_pagetable = page;
3661out: 3674out:
3662 mutex_unlock(&kvm->slots_lock); 3675 mutex_unlock(&kvm->slots_lock);
3663 return r; 3676 return r;
@@ -6575,7 +6588,7 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
6575 /* Exposing INVPCID only when PCID is exposed */ 6588 /* Exposing INVPCID only when PCID is exposed */
6576 best = kvm_find_cpuid_entry(vcpu, 0x7, 0); 6589 best = kvm_find_cpuid_entry(vcpu, 0x7, 0);
6577 if (vmx_invpcid_supported() && 6590 if (vmx_invpcid_supported() &&
6578 best && (best->ecx & bit(X86_FEATURE_INVPCID)) && 6591 best && (best->ebx & bit(X86_FEATURE_INVPCID)) &&
6579 guest_cpuid_has_pcid(vcpu)) { 6592 guest_cpuid_has_pcid(vcpu)) {
6580 exec_control |= SECONDARY_EXEC_ENABLE_INVPCID; 6593 exec_control |= SECONDARY_EXEC_ENABLE_INVPCID;
6581 vmcs_write32(SECONDARY_VM_EXEC_CONTROL, 6594 vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
@@ -6585,7 +6598,7 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
6585 vmcs_write32(SECONDARY_VM_EXEC_CONTROL, 6598 vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
6586 exec_control); 6599 exec_control);
6587 if (best) 6600 if (best)
6588 best->ecx &= ~bit(X86_FEATURE_INVPCID); 6601 best->ebx &= ~bit(X86_FEATURE_INVPCID);
6589 } 6602 }
6590} 6603}
6591 6604
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 148ed666e311..2966c847d489 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5113,17 +5113,20 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu)
5113 !kvm_event_needs_reinjection(vcpu); 5113 !kvm_event_needs_reinjection(vcpu);
5114} 5114}
5115 5115
5116static void vapic_enter(struct kvm_vcpu *vcpu) 5116static int vapic_enter(struct kvm_vcpu *vcpu)
5117{ 5117{
5118 struct kvm_lapic *apic = vcpu->arch.apic; 5118 struct kvm_lapic *apic = vcpu->arch.apic;
5119 struct page *page; 5119 struct page *page;
5120 5120
5121 if (!apic || !apic->vapic_addr) 5121 if (!apic || !apic->vapic_addr)
5122 return; 5122 return 0;
5123 5123
5124 page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); 5124 page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
5125 if (is_error_page(page))
5126 return -EFAULT;
5125 5127
5126 vcpu->arch.apic->vapic_page = page; 5128 vcpu->arch.apic->vapic_page = page;
5129 return 0;
5127} 5130}
5128 5131
5129static void vapic_exit(struct kvm_vcpu *vcpu) 5132static void vapic_exit(struct kvm_vcpu *vcpu)
@@ -5430,7 +5433,11 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
5430 } 5433 }
5431 5434
5432 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); 5435 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
5433 vapic_enter(vcpu); 5436 r = vapic_enter(vcpu);
5437 if (r) {
5438 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
5439 return r;
5440 }
5434 5441
5435 r = 1; 5442 r = 1;
5436 while (r > 0) { 5443 while (r > 0) {
diff --git a/crypto/authenc.c b/crypto/authenc.c
index 5ef7ba6b6a76..d0583a4489e6 100644
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -336,7 +336,7 @@ static int crypto_authenc_genicv(struct aead_request *req, u8 *iv,
336 cryptlen += ivsize; 336 cryptlen += ivsize;
337 } 337 }
338 338
339 if (sg_is_last(assoc)) { 339 if (req->assoclen && sg_is_last(assoc)) {
340 authenc_ahash_fn = crypto_authenc_ahash; 340 authenc_ahash_fn = crypto_authenc_ahash;
341 sg_init_table(asg, 2); 341 sg_init_table(asg, 2);
342 sg_set_page(asg, sg_page(assoc), assoc->length, assoc->offset); 342 sg_set_page(asg, sg_page(assoc), assoc->length, assoc->offset);
@@ -490,7 +490,7 @@ static int crypto_authenc_iverify(struct aead_request *req, u8 *iv,
490 cryptlen += ivsize; 490 cryptlen += ivsize;
491 } 491 }
492 492
493 if (sg_is_last(assoc)) { 493 if (req->assoclen && sg_is_last(assoc)) {
494 authenc_ahash_fn = crypto_authenc_ahash; 494 authenc_ahash_fn = crypto_authenc_ahash;
495 sg_init_table(asg, 2); 495 sg_init_table(asg, 2);
496 sg_set_page(asg, sg_page(assoc), assoc->length, assoc->offset); 496 sg_set_page(asg, sg_page(assoc), assoc->length, assoc->offset);
diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c
index 002888185f17..d216cd3cc569 100644
--- a/drivers/crypto/caam/key_gen.c
+++ b/drivers/crypto/caam/key_gen.c
@@ -120,3 +120,4 @@ u32 gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
120 120
121 return ret; 121 return ret;
122} 122}
123EXPORT_SYMBOL(gen_split_key);
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index dc27598785e5..ed38454228c6 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -4066,7 +4066,6 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4066 spin_lock_init(&instance->cmd_pool_lock); 4066 spin_lock_init(&instance->cmd_pool_lock);
4067 spin_lock_init(&instance->hba_lock); 4067 spin_lock_init(&instance->hba_lock);
4068 spin_lock_init(&instance->completion_lock); 4068 spin_lock_init(&instance->completion_lock);
4069 spin_lock_init(&poll_aen_lock);
4070 4069
4071 mutex_init(&instance->aen_mutex); 4070 mutex_init(&instance->aen_mutex);
4072 mutex_init(&instance->reset_mutex); 4071 mutex_init(&instance->reset_mutex);
@@ -5392,6 +5391,8 @@ static int __init megasas_init(void)
5392 printk(KERN_INFO "megasas: %s %s\n", MEGASAS_VERSION, 5391 printk(KERN_INFO "megasas: %s %s\n", MEGASAS_VERSION,
5393 MEGASAS_EXT_VERSION); 5392 MEGASAS_EXT_VERSION);
5394 5393
5394 spin_lock_init(&poll_aen_lock);
5395
5395 support_poll_for_event = 2; 5396 support_poll_for_event = 2;
5396 support_device_change = 1; 5397 support_device_change = 1;
5397 5398
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index 9d46fcbe7755..b25757d1e91b 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -2424,10 +2424,13 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
2424 } 2424 }
2425 2425
2426 /* command line tunables for max controller queue depth */ 2426 /* command line tunables for max controller queue depth */
2427 if (max_queue_depth != -1) 2427 if (max_queue_depth != -1 && max_queue_depth != 0) {
2428 max_request_credit = (max_queue_depth < facts->RequestCredit) 2428 max_request_credit = min_t(u16, max_queue_depth +
2429 ? max_queue_depth : facts->RequestCredit; 2429 ioc->hi_priority_depth + ioc->internal_depth,
2430 else 2430 facts->RequestCredit);
2431 if (max_request_credit > MAX_HBA_QUEUE_DEPTH)
2432 max_request_credit = MAX_HBA_QUEUE_DEPTH;
2433 } else
2431 max_request_credit = min_t(u16, facts->RequestCredit, 2434 max_request_credit = min_t(u16, facts->RequestCredit,
2432 MAX_HBA_QUEUE_DEPTH); 2435 MAX_HBA_QUEUE_DEPTH);
2433 2436
@@ -2502,7 +2505,7 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
2502 /* set the scsi host can_queue depth 2505 /* set the scsi host can_queue depth
2503 * with some internal commands that could be outstanding 2506 * with some internal commands that could be outstanding
2504 */ 2507 */
2505 ioc->shost->can_queue = ioc->scsiio_depth - (2); 2508 ioc->shost->can_queue = ioc->scsiio_depth;
2506 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "scsi host: " 2509 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "scsi host: "
2507 "can_queue depth (%d)\n", ioc->name, ioc->shost->can_queue)); 2510 "can_queue depth (%d)\n", ioc->name, ioc->shost->can_queue));
2508 2511
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 4a6381c87253..de2337f255a7 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -42,6 +42,8 @@
42 42
43#include <trace/events/scsi.h> 43#include <trace/events/scsi.h>
44 44
45static void scsi_eh_done(struct scsi_cmnd *scmd);
46
45#define SENSE_TIMEOUT (10*HZ) 47#define SENSE_TIMEOUT (10*HZ)
46 48
47/* 49/*
@@ -241,6 +243,14 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
241 if (! scsi_command_normalize_sense(scmd, &sshdr)) 243 if (! scsi_command_normalize_sense(scmd, &sshdr))
242 return FAILED; /* no valid sense data */ 244 return FAILED; /* no valid sense data */
243 245
246 if (scmd->cmnd[0] == TEST_UNIT_READY && scmd->scsi_done != scsi_eh_done)
247 /*
248 * nasty: for mid-layer issued TURs, we need to return the
249 * actual sense data without any recovery attempt. For eh
250 * issued ones, we need to try to recover and interpret
251 */
252 return SUCCESS;
253
244 if (scsi_sense_is_deferred(&sshdr)) 254 if (scsi_sense_is_deferred(&sshdr))
245 return NEEDS_RETRY; 255 return NEEDS_RETRY;
246 256
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index ffd77739ae3e..faa790fba134 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -776,7 +776,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
776 } 776 }
777 777
778 if (req->cmd_type == REQ_TYPE_BLOCK_PC) { /* SG_IO ioctl from block level */ 778 if (req->cmd_type == REQ_TYPE_BLOCK_PC) { /* SG_IO ioctl from block level */
779 req->errors = result;
780 if (result) { 779 if (result) {
781 if (sense_valid && req->sense) { 780 if (sense_valid && req->sense) {
782 /* 781 /*
@@ -792,6 +791,10 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
792 if (!sense_deferred) 791 if (!sense_deferred)
793 error = __scsi_error_from_host_byte(cmd, result); 792 error = __scsi_error_from_host_byte(cmd, result);
794 } 793 }
794 /*
795 * __scsi_error_from_host_byte may have reset the host_byte
796 */
797 req->errors = cmd->result;
795 798
796 req->resid_len = scsi_get_resid(cmd); 799 req->resid_len = scsi_get_resid(cmd);
797 800
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 56a93794c470..d947ffc20ceb 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -764,6 +764,16 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
764 sdev->model = (char *) (sdev->inquiry + 16); 764 sdev->model = (char *) (sdev->inquiry + 16);
765 sdev->rev = (char *) (sdev->inquiry + 32); 765 sdev->rev = (char *) (sdev->inquiry + 32);
766 766
767 if (strncmp(sdev->vendor, "ATA ", 8) == 0) {
768 /*
769 * sata emulation layer device. This is a hack to work around
770 * the SATL power management specifications which state that
771 * when the SATL detects the device has gone into standby
772 * mode, it shall respond with NOT READY.
773 */
774 sdev->allow_restart = 1;
775 }
776
767 if (*bflags & BLIST_ISROM) { 777 if (*bflags & BLIST_ISROM) {
768 sdev->type = TYPE_ROM; 778 sdev->type = TYPE_ROM;
769 sdev->removable = 1; 779 sdev->removable = 1;
diff --git a/fs/fuse/control.c b/fs/fuse/control.c
index 03ff5b1eba93..75a20c092dd4 100644
--- a/fs/fuse/control.c
+++ b/fs/fuse/control.c
@@ -117,7 +117,7 @@ static ssize_t fuse_conn_max_background_write(struct file *file,
117 const char __user *buf, 117 const char __user *buf,
118 size_t count, loff_t *ppos) 118 size_t count, loff_t *ppos)
119{ 119{
120 unsigned val; 120 unsigned uninitialized_var(val);
121 ssize_t ret; 121 ssize_t ret;
122 122
123 ret = fuse_conn_limit_write(file, buf, count, ppos, &val, 123 ret = fuse_conn_limit_write(file, buf, count, ppos, &val,
@@ -154,7 +154,7 @@ static ssize_t fuse_conn_congestion_threshold_write(struct file *file,
154 const char __user *buf, 154 const char __user *buf,
155 size_t count, loff_t *ppos) 155 size_t count, loff_t *ppos)
156{ 156{
157 unsigned val; 157 unsigned uninitialized_var(val);
158 ssize_t ret; 158 ssize_t ret;
159 159
160 ret = fuse_conn_limit_write(file, buf, count, ppos, &val, 160 ret = fuse_conn_limit_write(file, buf, count, ppos, &val,
diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
index 3426521f3205..ee8d55042298 100644
--- a/fs/fuse/cuse.c
+++ b/fs/fuse/cuse.c
@@ -396,7 +396,7 @@ err_device:
396err_region: 396err_region:
397 unregister_chrdev_region(devt, 1); 397 unregister_chrdev_region(devt, 1);
398err: 398err:
399 fc->conn_error = 1; 399 fuse_conn_kill(fc);
400 goto out; 400 goto out;
401} 401}
402 402
@@ -532,8 +532,6 @@ static int cuse_channel_release(struct inode *inode, struct file *file)
532 cdev_del(cc->cdev); 532 cdev_del(cc->cdev);
533 } 533 }
534 534
535 /* kill connection and shutdown channel */
536 fuse_conn_kill(&cc->fc);
537 rc = fuse_dev_release(inode, file); /* puts the base reference */ 535 rc = fuse_dev_release(inode, file); /* puts the base reference */
538 536
539 return rc; 537 return rc;
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 7df2b5e8fbe1..f4246cfc8d87 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -1576,6 +1576,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
1576 req->pages[req->num_pages] = page; 1576 req->pages[req->num_pages] = page;
1577 req->num_pages++; 1577 req->num_pages++;
1578 1578
1579 offset = 0;
1579 num -= this_num; 1580 num -= this_num;
1580 total_len += this_num; 1581 total_len += this_num;
1581 index++; 1582 index++;
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index ce0a2838ccd0..fca222dabe3c 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -367,11 +367,6 @@ void fuse_conn_kill(struct fuse_conn *fc)
367 wake_up_all(&fc->waitq); 367 wake_up_all(&fc->waitq);
368 wake_up_all(&fc->blocked_waitq); 368 wake_up_all(&fc->blocked_waitq);
369 wake_up_all(&fc->reserved_req_waitq); 369 wake_up_all(&fc->reserved_req_waitq);
370 mutex_lock(&fuse_mutex);
371 list_del(&fc->entry);
372 fuse_ctl_remove_conn(fc);
373 mutex_unlock(&fuse_mutex);
374 fuse_bdi_destroy(fc);
375} 370}
376EXPORT_SYMBOL_GPL(fuse_conn_kill); 371EXPORT_SYMBOL_GPL(fuse_conn_kill);
377 372
@@ -380,7 +375,14 @@ static void fuse_put_super(struct super_block *sb)
380 struct fuse_conn *fc = get_fuse_conn_super(sb); 375 struct fuse_conn *fc = get_fuse_conn_super(sb);
381 376
382 fuse_send_destroy(fc); 377 fuse_send_destroy(fc);
378
383 fuse_conn_kill(fc); 379 fuse_conn_kill(fc);
380 mutex_lock(&fuse_mutex);
381 list_del(&fc->entry);
382 fuse_ctl_remove_conn(fc);
383 mutex_unlock(&fuse_mutex);
384 fuse_bdi_destroy(fc);
385
384 fuse_conn_put(fc); 386 fuse_conn_put(fc);
385} 387}
386 388
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 75d6d0a3d32e..6a7fcab7ecb3 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -287,10 +287,12 @@ nfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync)
287 struct inode *inode = file->f_path.dentry->d_inode; 287 struct inode *inode = file->f_path.dentry->d_inode;
288 288
289 ret = filemap_write_and_wait_range(inode->i_mapping, start, end); 289 ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
290 if (ret != 0)
291 goto out;
290 mutex_lock(&inode->i_mutex); 292 mutex_lock(&inode->i_mutex);
291 ret = nfs_file_fsync_commit(file, start, end, datasync); 293 ret = nfs_file_fsync_commit(file, start, end, datasync);
292 mutex_unlock(&inode->i_mutex); 294 mutex_unlock(&inode->i_mutex);
293 295out:
294 return ret; 296 return ret;
295} 297}
296 298
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index c6e895f0fbf3..9b47610338f5 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -154,7 +154,7 @@ static void nfs_zap_caches_locked(struct inode *inode)
154 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode); 154 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
155 nfsi->attrtimeo_timestamp = jiffies; 155 nfsi->attrtimeo_timestamp = jiffies;
156 156
157 memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode))); 157 memset(NFS_I(inode)->cookieverf, 0, sizeof(NFS_I(inode)->cookieverf));
158 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) 158 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
159 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE; 159 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
160 else 160 else
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index d6b3b5f2d779..69322096c325 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -643,7 +643,7 @@ nfs3_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
643 u64 cookie, struct page **pages, unsigned int count, int plus) 643 u64 cookie, struct page **pages, unsigned int count, int plus)
644{ 644{
645 struct inode *dir = dentry->d_inode; 645 struct inode *dir = dentry->d_inode;
646 __be32 *verf = NFS_COOKIEVERF(dir); 646 __be32 *verf = NFS_I(dir)->cookieverf;
647 struct nfs3_readdirargs arg = { 647 struct nfs3_readdirargs arg = {
648 .fh = NFS_FH(dir), 648 .fh = NFS_FH(dir),
649 .cookie = cookie, 649 .cookie = cookie,
diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
index acb65e7887f8..eb5eb8eef4d3 100644
--- a/fs/nfs/nfs4file.c
+++ b/fs/nfs/nfs4file.c
@@ -96,13 +96,15 @@ nfs4_file_fsync(struct file *file, loff_t start, loff_t end, int datasync)
96 struct inode *inode = file->f_path.dentry->d_inode; 96 struct inode *inode = file->f_path.dentry->d_inode;
97 97
98 ret = filemap_write_and_wait_range(inode->i_mapping, start, end); 98 ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
99 if (ret != 0)
100 goto out;
99 mutex_lock(&inode->i_mutex); 101 mutex_lock(&inode->i_mutex);
100 ret = nfs_file_fsync_commit(file, start, end, datasync); 102 ret = nfs_file_fsync_commit(file, start, end, datasync);
101 if (!ret && !datasync) 103 if (!ret && !datasync)
102 /* application has asked for meta-data sync */ 104 /* application has asked for meta-data sync */
103 ret = pnfs_layoutcommit_inode(inode, true); 105 ret = pnfs_layoutcommit_inode(inode, true);
104 mutex_unlock(&inode->i_mutex); 106 mutex_unlock(&inode->i_mutex);
105 107out:
106 return ret; 108 return ret;
107} 109}
108 110
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 635274140b18..1e50326d00dd 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -3215,11 +3215,11 @@ static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
3215 dentry->d_parent->d_name.name, 3215 dentry->d_parent->d_name.name,
3216 dentry->d_name.name, 3216 dentry->d_name.name,
3217 (unsigned long long)cookie); 3217 (unsigned long long)cookie);
3218 nfs4_setup_readdir(cookie, NFS_COOKIEVERF(dir), dentry, &args); 3218 nfs4_setup_readdir(cookie, NFS_I(dir)->cookieverf, dentry, &args);
3219 res.pgbase = args.pgbase; 3219 res.pgbase = args.pgbase;
3220 status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0); 3220 status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0);
3221 if (status >= 0) { 3221 if (status >= 0) {
3222 memcpy(NFS_COOKIEVERF(dir), res.verifier.data, NFS4_VERIFIER_SIZE); 3222 memcpy(NFS_I(dir)->cookieverf, res.verifier.data, NFS4_VERIFIER_SIZE);
3223 status += args.pgbase; 3223 status += args.pgbase;
3224 } 3224 }
3225 3225
@@ -3653,11 +3653,11 @@ static inline int nfs4_server_supports_acls(struct nfs_server *server)
3653 && (server->acl_bitmask & ACL4_SUPPORT_DENY_ACL); 3653 && (server->acl_bitmask & ACL4_SUPPORT_DENY_ACL);
3654} 3654}
3655 3655
3656/* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_CACHE_SIZE, and that 3656/* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_SIZE, and that
3657 * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_CACHE_SIZE) bytes on 3657 * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_SIZE) bytes on
3658 * the stack. 3658 * the stack.
3659 */ 3659 */
3660#define NFS4ACL_MAXPAGES (XATTR_SIZE_MAX >> PAGE_CACHE_SHIFT) 3660#define NFS4ACL_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE)
3661 3661
3662static int buf_to_pages_noslab(const void *buf, size_t buflen, 3662static int buf_to_pages_noslab(const void *buf, size_t buflen,
3663 struct page **pages, unsigned int *pgbase) 3663 struct page **pages, unsigned int *pgbase)
@@ -3668,7 +3668,7 @@ static int buf_to_pages_noslab(const void *buf, size_t buflen,
3668 spages = pages; 3668 spages = pages;
3669 3669
3670 do { 3670 do {
3671 len = min_t(size_t, PAGE_CACHE_SIZE, buflen); 3671 len = min_t(size_t, PAGE_SIZE, buflen);
3672 newpage = alloc_page(GFP_KERNEL); 3672 newpage = alloc_page(GFP_KERNEL);
3673 3673
3674 if (newpage == NULL) 3674 if (newpage == NULL)
@@ -3739,7 +3739,7 @@ static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, size
3739 struct nfs4_cached_acl *acl; 3739 struct nfs4_cached_acl *acl;
3740 size_t buflen = sizeof(*acl) + acl_len; 3740 size_t buflen = sizeof(*acl) + acl_len;
3741 3741
3742 if (pages && buflen <= PAGE_SIZE) { 3742 if (buflen <= PAGE_SIZE) {
3743 acl = kmalloc(buflen, GFP_KERNEL); 3743 acl = kmalloc(buflen, GFP_KERNEL);
3744 if (acl == NULL) 3744 if (acl == NULL)
3745 goto out; 3745 goto out;
@@ -3782,17 +3782,15 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu
3782 .rpc_argp = &args, 3782 .rpc_argp = &args,
3783 .rpc_resp = &res, 3783 .rpc_resp = &res,
3784 }; 3784 };
3785 int ret = -ENOMEM, npages, i; 3785 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE);
3786 size_t acl_len = 0; 3786 int ret = -ENOMEM, i;
3787 3787
3788 npages = (buflen + PAGE_SIZE - 1) >> PAGE_SHIFT;
3789 /* As long as we're doing a round trip to the server anyway, 3788 /* As long as we're doing a round trip to the server anyway,
3790 * let's be prepared for a page of acl data. */ 3789 * let's be prepared for a page of acl data. */
3791 if (npages == 0) 3790 if (npages == 0)
3792 npages = 1; 3791 npages = 1;
3793 3792 if (npages > ARRAY_SIZE(pages))
3794 /* Add an extra page to handle the bitmap returned */ 3793 return -ERANGE;
3795 npages++;
3796 3794
3797 for (i = 0; i < npages; i++) { 3795 for (i = 0; i < npages; i++) {
3798 pages[i] = alloc_page(GFP_KERNEL); 3796 pages[i] = alloc_page(GFP_KERNEL);
@@ -3808,11 +3806,6 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu
3808 args.acl_len = npages * PAGE_SIZE; 3806 args.acl_len = npages * PAGE_SIZE;
3809 args.acl_pgbase = 0; 3807 args.acl_pgbase = 0;
3810 3808
3811 /* Let decode_getfacl know not to fail if the ACL data is larger than
3812 * the page we send as a guess */
3813 if (buf == NULL)
3814 res.acl_flags |= NFS4_ACL_LEN_REQUEST;
3815
3816 dprintk("%s buf %p buflen %zu npages %d args.acl_len %zu\n", 3809 dprintk("%s buf %p buflen %zu npages %d args.acl_len %zu\n",
3817 __func__, buf, buflen, npages, args.acl_len); 3810 __func__, buf, buflen, npages, args.acl_len);
3818 ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), 3811 ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode),
@@ -3820,20 +3813,19 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu
3820 if (ret) 3813 if (ret)
3821 goto out_free; 3814 goto out_free;
3822 3815
3823 acl_len = res.acl_len; 3816 /* Handle the case where the passed-in buffer is too short */
3824 if (acl_len > args.acl_len) 3817 if (res.acl_flags & NFS4_ACL_TRUNC) {
3825 nfs4_write_cached_acl(inode, NULL, 0, acl_len); 3818 /* Did the user only issue a request for the acl length? */
3826 else 3819 if (buf == NULL)
3827 nfs4_write_cached_acl(inode, pages, res.acl_data_offset, 3820 goto out_ok;
3828 acl_len);
3829 if (buf) {
3830 ret = -ERANGE; 3821 ret = -ERANGE;
3831 if (acl_len > buflen) 3822 goto out_free;
3832 goto out_free;
3833 _copy_from_pages(buf, pages, res.acl_data_offset,
3834 acl_len);
3835 } 3823 }
3836 ret = acl_len; 3824 nfs4_write_cached_acl(inode, pages, res.acl_data_offset, res.acl_len);
3825 if (buf)
3826 _copy_from_pages(buf, pages, res.acl_data_offset, res.acl_len);
3827out_ok:
3828 ret = res.acl_len;
3837out_free: 3829out_free:
3838 for (i = 0; i < npages; i++) 3830 for (i = 0; i < npages; i++)
3839 if (pages[i]) 3831 if (pages[i])
@@ -3891,10 +3883,13 @@ static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t bufl
3891 .rpc_argp = &arg, 3883 .rpc_argp = &arg,
3892 .rpc_resp = &res, 3884 .rpc_resp = &res,
3893 }; 3885 };
3886 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE);
3894 int ret, i; 3887 int ret, i;
3895 3888
3896 if (!nfs4_server_supports_acls(server)) 3889 if (!nfs4_server_supports_acls(server))
3897 return -EOPNOTSUPP; 3890 return -EOPNOTSUPP;
3891 if (npages > ARRAY_SIZE(pages))
3892 return -ERANGE;
3898 i = buf_to_pages_noslab(buf, buflen, arg.acl_pages, &arg.acl_pgbase); 3893 i = buf_to_pages_noslab(buf, buflen, arg.acl_pages, &arg.acl_pgbase);
3899 if (i < 0) 3894 if (i < 0)
3900 return i; 3895 return i;
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 1bfbd67c556d..8dba6bd48557 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -5072,18 +5072,14 @@ static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req,
5072 * are stored with the acl data to handle the problem of 5072 * are stored with the acl data to handle the problem of
5073 * variable length bitmaps.*/ 5073 * variable length bitmaps.*/
5074 res->acl_data_offset = xdr_stream_pos(xdr) - pg_offset; 5074 res->acl_data_offset = xdr_stream_pos(xdr) - pg_offset;
5075
5076 /* We ignore &savep and don't do consistency checks on
5077 * the attr length. Let userspace figure it out.... */
5078 res->acl_len = attrlen; 5075 res->acl_len = attrlen;
5079 if (attrlen > (xdr->nwords << 2)) { 5076
5080 if (res->acl_flags & NFS4_ACL_LEN_REQUEST) { 5077 /* Check for receive buffer overflow */
5081 /* getxattr interface called with a NULL buf */ 5078 if (res->acl_len > (xdr->nwords << 2) ||
5082 goto out; 5079 res->acl_len + res->acl_data_offset > xdr->buf->page_len) {
5083 } 5080 res->acl_flags |= NFS4_ACL_TRUNC;
5084 dprintk("NFS: acl reply: attrlen %u > page_len %u\n", 5081 dprintk("NFS: acl reply: attrlen %u > page_len %u\n",
5085 attrlen, xdr->nwords << 2); 5082 attrlen, xdr->nwords << 2);
5086 return -EINVAL;
5087 } 5083 }
5088 } else 5084 } else
5089 status = -EOPNOTSUPP; 5085 status = -EOPNOTSUPP;
@@ -6229,7 +6225,8 @@ static int nfs4_xdr_dec_open(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
6229 status = decode_open(xdr, res); 6225 status = decode_open(xdr, res);
6230 if (status) 6226 if (status)
6231 goto out; 6227 goto out;
6232 if (decode_getfh(xdr, &res->fh) != 0) 6228 status = decode_getfh(xdr, &res->fh);
6229 if (status)
6233 goto out; 6230 goto out;
6234 decode_getfattr(xdr, res->f_attr, res->server); 6231 decode_getfattr(xdr, res->f_attr, res->server);
6235out: 6232out:
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 239aff7338eb..b8eda700584b 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -1867,6 +1867,7 @@ static int nfs23_validate_mount_data(void *options,
1867 1867
1868 memcpy(sap, &data->addr, sizeof(data->addr)); 1868 memcpy(sap, &data->addr, sizeof(data->addr));
1869 args->nfs_server.addrlen = sizeof(data->addr); 1869 args->nfs_server.addrlen = sizeof(data->addr);
1870 args->nfs_server.port = ntohs(data->addr.sin_port);
1870 if (!nfs_verify_server_address(sap)) 1871 if (!nfs_verify_server_address(sap))
1871 goto out_no_address; 1872 goto out_no_address;
1872 1873
@@ -2564,6 +2565,7 @@ static int nfs4_validate_mount_data(void *options,
2564 return -EFAULT; 2565 return -EFAULT;
2565 if (!nfs_verify_server_address(sap)) 2566 if (!nfs_verify_server_address(sap))
2566 goto out_no_address; 2567 goto out_no_address;
2568 args->nfs_server.port = ntohs(((struct sockaddr_in *)sap)->sin_port);
2567 2569
2568 if (data->auth_flavourlen) { 2570 if (data->auth_flavourlen) {
2569 if (data->auth_flavourlen > 1) 2571 if (data->auth_flavourlen > 1)
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 1f8fc7f9bcd8..4b03f56e280e 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -265,11 +265,6 @@ static inline const struct nfs_rpc_ops *NFS_PROTO(const struct inode *inode)
265 return NFS_SERVER(inode)->nfs_client->rpc_ops; 265 return NFS_SERVER(inode)->nfs_client->rpc_ops;
266} 266}
267 267
268static inline __be32 *NFS_COOKIEVERF(const struct inode *inode)
269{
270 return NFS_I(inode)->cookieverf;
271}
272
273static inline unsigned NFS_MINATTRTIMEO(const struct inode *inode) 268static inline unsigned NFS_MINATTRTIMEO(const struct inode *inode)
274{ 269{
275 struct nfs_server *nfss = NFS_SERVER(inode); 270 struct nfs_server *nfss = NFS_SERVER(inode);
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index ac7c8ae254f2..be9cf3c7e79e 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -652,7 +652,7 @@ struct nfs_getaclargs {
652}; 652};
653 653
654/* getxattr ACL interface flags */ 654/* getxattr ACL interface flags */
655#define NFS4_ACL_LEN_REQUEST 0x0001 /* zero length getxattr buffer */ 655#define NFS4_ACL_TRUNC 0x0001 /* ACL was truncated */
656struct nfs_getaclres { 656struct nfs_getaclres {
657 size_t acl_len; 657 size_t acl_len;
658 size_t acl_data_offset; 658 size_t acl_data_offset;
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index cff40aa7db62..bf8c49ff7530 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -114,6 +114,7 @@ struct rpc_xprt_ops {
114 void (*set_buffer_size)(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize); 114 void (*set_buffer_size)(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize);
115 int (*reserve_xprt)(struct rpc_xprt *xprt, struct rpc_task *task); 115 int (*reserve_xprt)(struct rpc_xprt *xprt, struct rpc_task *task);
116 void (*release_xprt)(struct rpc_xprt *xprt, struct rpc_task *task); 116 void (*release_xprt)(struct rpc_xprt *xprt, struct rpc_task *task);
117 void (*alloc_slot)(struct rpc_xprt *xprt, struct rpc_task *task);
117 void (*rpcbind)(struct rpc_task *task); 118 void (*rpcbind)(struct rpc_task *task);
118 void (*set_port)(struct rpc_xprt *xprt, unsigned short port); 119 void (*set_port)(struct rpc_xprt *xprt, unsigned short port);
119 void (*connect)(struct rpc_task *task); 120 void (*connect)(struct rpc_task *task);
@@ -281,6 +282,8 @@ void xprt_connect(struct rpc_task *task);
281void xprt_reserve(struct rpc_task *task); 282void xprt_reserve(struct rpc_task *task);
282int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task); 283int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task);
283int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task); 284int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task);
285void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task);
286void xprt_lock_and_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task);
284int xprt_prepare_transmit(struct rpc_task *task); 287int xprt_prepare_transmit(struct rpc_task *task);
285void xprt_transmit(struct rpc_task *task); 288void xprt_transmit(struct rpc_task *task);
286void xprt_end_transmit(struct rpc_task *task); 289void xprt_end_transmit(struct rpc_task *task);
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 692d97628a10..1e1373bcb3e3 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -66,6 +66,7 @@ enum {
66 66
67 /* pool flags */ 67 /* pool flags */
68 POOL_MANAGE_WORKERS = 1 << 0, /* need to manage workers */ 68 POOL_MANAGE_WORKERS = 1 << 0, /* need to manage workers */
69 POOL_MANAGING_WORKERS = 1 << 1, /* managing workers */
69 70
70 /* worker flags */ 71 /* worker flags */
71 WORKER_STARTED = 1 << 0, /* started */ 72 WORKER_STARTED = 1 << 0, /* started */
@@ -652,7 +653,7 @@ static bool need_to_manage_workers(struct worker_pool *pool)
652/* Do we have too many workers and should some go away? */ 653/* Do we have too many workers and should some go away? */
653static bool too_many_workers(struct worker_pool *pool) 654static bool too_many_workers(struct worker_pool *pool)
654{ 655{
655 bool managing = mutex_is_locked(&pool->manager_mutex); 656 bool managing = pool->flags & POOL_MANAGING_WORKERS;
656 int nr_idle = pool->nr_idle + managing; /* manager is considered idle */ 657 int nr_idle = pool->nr_idle + managing; /* manager is considered idle */
657 int nr_busy = pool->nr_workers - nr_idle; 658 int nr_busy = pool->nr_workers - nr_idle;
658 659
@@ -1326,6 +1327,15 @@ static void idle_worker_rebind(struct worker *worker)
1326 1327
1327 /* we did our part, wait for rebind_workers() to finish up */ 1328 /* we did our part, wait for rebind_workers() to finish up */
1328 wait_event(gcwq->rebind_hold, !(worker->flags & WORKER_REBIND)); 1329 wait_event(gcwq->rebind_hold, !(worker->flags & WORKER_REBIND));
1330
1331 /*
1332 * rebind_workers() shouldn't finish until all workers passed the
1333 * above WORKER_REBIND wait. Tell it when done.
1334 */
1335 spin_lock_irq(&worker->pool->gcwq->lock);
1336 if (!--worker->idle_rebind->cnt)
1337 complete(&worker->idle_rebind->done);
1338 spin_unlock_irq(&worker->pool->gcwq->lock);
1329} 1339}
1330 1340
1331/* 1341/*
@@ -1396,12 +1406,15 @@ retry:
1396 /* set REBIND and kick idle ones, we'll wait for these later */ 1406 /* set REBIND and kick idle ones, we'll wait for these later */
1397 for_each_worker_pool(pool, gcwq) { 1407 for_each_worker_pool(pool, gcwq) {
1398 list_for_each_entry(worker, &pool->idle_list, entry) { 1408 list_for_each_entry(worker, &pool->idle_list, entry) {
1409 unsigned long worker_flags = worker->flags;
1410
1399 if (worker->flags & WORKER_REBIND) 1411 if (worker->flags & WORKER_REBIND)
1400 continue; 1412 continue;
1401 1413
1402 /* morph UNBOUND to REBIND */ 1414 /* morph UNBOUND to REBIND atomically */
1403 worker->flags &= ~WORKER_UNBOUND; 1415 worker_flags &= ~WORKER_UNBOUND;
1404 worker->flags |= WORKER_REBIND; 1416 worker_flags |= WORKER_REBIND;
1417 ACCESS_ONCE(worker->flags) = worker_flags;
1405 1418
1406 idle_rebind.cnt++; 1419 idle_rebind.cnt++;
1407 worker->idle_rebind = &idle_rebind; 1420 worker->idle_rebind = &idle_rebind;
@@ -1419,25 +1432,15 @@ retry:
1419 goto retry; 1432 goto retry;
1420 } 1433 }
1421 1434
1422 /* 1435 /* all idle workers are rebound, rebind busy workers */
1423 * All idle workers are rebound and waiting for %WORKER_REBIND to
1424 * be cleared inside idle_worker_rebind(). Clear and release.
1425 * Clearing %WORKER_REBIND from this foreign context is safe
1426 * because these workers are still guaranteed to be idle.
1427 */
1428 for_each_worker_pool(pool, gcwq)
1429 list_for_each_entry(worker, &pool->idle_list, entry)
1430 worker->flags &= ~WORKER_REBIND;
1431
1432 wake_up_all(&gcwq->rebind_hold);
1433
1434 /* rebind busy workers */
1435 for_each_busy_worker(worker, i, pos, gcwq) { 1436 for_each_busy_worker(worker, i, pos, gcwq) {
1436 struct work_struct *rebind_work = &worker->rebind_work; 1437 struct work_struct *rebind_work = &worker->rebind_work;
1438 unsigned long worker_flags = worker->flags;
1437 1439
1438 /* morph UNBOUND to REBIND */ 1440 /* morph UNBOUND to REBIND atomically */
1439 worker->flags &= ~WORKER_UNBOUND; 1441 worker_flags &= ~WORKER_UNBOUND;
1440 worker->flags |= WORKER_REBIND; 1442 worker_flags |= WORKER_REBIND;
1443 ACCESS_ONCE(worker->flags) = worker_flags;
1441 1444
1442 if (test_and_set_bit(WORK_STRUCT_PENDING_BIT, 1445 if (test_and_set_bit(WORK_STRUCT_PENDING_BIT,
1443 work_data_bits(rebind_work))) 1446 work_data_bits(rebind_work)))
@@ -1449,6 +1452,34 @@ retry:
1449 worker->scheduled.next, 1452 worker->scheduled.next,
1450 work_color_to_flags(WORK_NO_COLOR)); 1453 work_color_to_flags(WORK_NO_COLOR));
1451 } 1454 }
1455
1456 /*
1457 * All idle workers are rebound and waiting for %WORKER_REBIND to
1458 * be cleared inside idle_worker_rebind(). Clear and release.
1459 * Clearing %WORKER_REBIND from this foreign context is safe
1460 * because these workers are still guaranteed to be idle.
1461 *
1462 * We need to make sure all idle workers passed WORKER_REBIND wait
1463 * in idle_worker_rebind() before returning; otherwise, workers can
1464 * get stuck at the wait if hotplug cycle repeats.
1465 */
1466 idle_rebind.cnt = 1;
1467 INIT_COMPLETION(idle_rebind.done);
1468
1469 for_each_worker_pool(pool, gcwq) {
1470 list_for_each_entry(worker, &pool->idle_list, entry) {
1471 worker->flags &= ~WORKER_REBIND;
1472 idle_rebind.cnt++;
1473 }
1474 }
1475
1476 wake_up_all(&gcwq->rebind_hold);
1477
1478 if (--idle_rebind.cnt) {
1479 spin_unlock_irq(&gcwq->lock);
1480 wait_for_completion(&idle_rebind.done);
1481 spin_lock_irq(&gcwq->lock);
1482 }
1452} 1483}
1453 1484
1454static struct worker *alloc_worker(void) 1485static struct worker *alloc_worker(void)
@@ -1794,9 +1825,45 @@ static bool manage_workers(struct worker *worker)
1794 struct worker_pool *pool = worker->pool; 1825 struct worker_pool *pool = worker->pool;
1795 bool ret = false; 1826 bool ret = false;
1796 1827
1797 if (!mutex_trylock(&pool->manager_mutex)) 1828 if (pool->flags & POOL_MANAGING_WORKERS)
1798 return ret; 1829 return ret;
1799 1830
1831 pool->flags |= POOL_MANAGING_WORKERS;
1832
1833 /*
1834 * To simplify both worker management and CPU hotplug, hold off
1835 * management while hotplug is in progress. CPU hotplug path can't
1836 * grab %POOL_MANAGING_WORKERS to achieve this because that can
1837 * lead to idle worker depletion (all become busy thinking someone
1838 * else is managing) which in turn can result in deadlock under
1839 * extreme circumstances. Use @pool->manager_mutex to synchronize
1840 * manager against CPU hotplug.
1841 *
1842 * manager_mutex would always be free unless CPU hotplug is in
1843 * progress. trylock first without dropping @gcwq->lock.
1844 */
1845 if (unlikely(!mutex_trylock(&pool->manager_mutex))) {
1846 spin_unlock_irq(&pool->gcwq->lock);
1847 mutex_lock(&pool->manager_mutex);
1848 /*
1849 * CPU hotplug could have happened while we were waiting
1850 * for manager_mutex. Hotplug itself can't handle us
1851 * because manager isn't either on idle or busy list, and
1852 * @gcwq's state and ours could have deviated.
1853 *
1854 * As hotplug is now excluded via manager_mutex, we can
1855 * simply try to bind. It will succeed or fail depending
1856 * on @gcwq's current state. Try it and adjust
1857 * %WORKER_UNBOUND accordingly.
1858 */
1859 if (worker_maybe_bind_and_lock(worker))
1860 worker->flags &= ~WORKER_UNBOUND;
1861 else
1862 worker->flags |= WORKER_UNBOUND;
1863
1864 ret = true;
1865 }
1866
1800 pool->flags &= ~POOL_MANAGE_WORKERS; 1867 pool->flags &= ~POOL_MANAGE_WORKERS;
1801 1868
1802 /* 1869 /*
@@ -1806,6 +1873,7 @@ static bool manage_workers(struct worker *worker)
1806 ret |= maybe_destroy_workers(pool); 1873 ret |= maybe_destroy_workers(pool);
1807 ret |= maybe_create_worker(pool); 1874 ret |= maybe_create_worker(pool);
1808 1875
1876 pool->flags &= ~POOL_MANAGING_WORKERS;
1809 mutex_unlock(&pool->manager_mutex); 1877 mutex_unlock(&pool->manager_mutex);
1810 return ret; 1878 return ret;
1811} 1879}
diff --git a/lib/digsig.c b/lib/digsig.c
index 286d558033e2..8c0e62975c88 100644
--- a/lib/digsig.c
+++ b/lib/digsig.c
@@ -163,9 +163,11 @@ static int digsig_verify_rsa(struct key *key,
163 memcpy(out1 + head, p, l); 163 memcpy(out1 + head, p, l);
164 164
165 err = pkcs_1_v1_5_decode_emsa(out1, len, mblen, out2, &len); 165 err = pkcs_1_v1_5_decode_emsa(out1, len, mblen, out2, &len);
166 if (err)
167 goto err;
166 168
167 if (!err && len == hlen) 169 if (len != hlen || memcmp(out2, h, hlen))
168 err = memcmp(out2, h, hlen); 170 err = -EINVAL;
169 171
170err: 172err:
171 mpi_free(in); 173 mpi_free(in);
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index a5a402a7d21f..5d7f61d7559c 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -969,11 +969,11 @@ static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
969 return false; 969 return false;
970} 970}
971 971
972static void xprt_alloc_slot(struct rpc_task *task) 972void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
973{ 973{
974 struct rpc_xprt *xprt = task->tk_xprt;
975 struct rpc_rqst *req; 974 struct rpc_rqst *req;
976 975
976 spin_lock(&xprt->reserve_lock);
977 if (!list_empty(&xprt->free)) { 977 if (!list_empty(&xprt->free)) {
978 req = list_entry(xprt->free.next, struct rpc_rqst, rq_list); 978 req = list_entry(xprt->free.next, struct rpc_rqst, rq_list);
979 list_del(&req->rq_list); 979 list_del(&req->rq_list);
@@ -994,12 +994,29 @@ static void xprt_alloc_slot(struct rpc_task *task)
994 default: 994 default:
995 task->tk_status = -EAGAIN; 995 task->tk_status = -EAGAIN;
996 } 996 }
997 spin_unlock(&xprt->reserve_lock);
997 return; 998 return;
998out_init_req: 999out_init_req:
999 task->tk_status = 0; 1000 task->tk_status = 0;
1000 task->tk_rqstp = req; 1001 task->tk_rqstp = req;
1001 xprt_request_init(task, xprt); 1002 xprt_request_init(task, xprt);
1003 spin_unlock(&xprt->reserve_lock);
1004}
1005EXPORT_SYMBOL_GPL(xprt_alloc_slot);
1006
1007void xprt_lock_and_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
1008{
1009 /* Note: grabbing the xprt_lock_write() ensures that we throttle
1010 * new slot allocation if the transport is congested (i.e. when
1011 * reconnecting a stream transport or when out of socket write
1012 * buffer space).
1013 */
1014 if (xprt_lock_write(xprt, task)) {
1015 xprt_alloc_slot(xprt, task);
1016 xprt_release_write(xprt, task);
1017 }
1002} 1018}
1019EXPORT_SYMBOL_GPL(xprt_lock_and_alloc_slot);
1003 1020
1004static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req) 1021static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
1005{ 1022{
@@ -1083,20 +1100,9 @@ void xprt_reserve(struct rpc_task *task)
1083 if (task->tk_rqstp != NULL) 1100 if (task->tk_rqstp != NULL)
1084 return; 1101 return;
1085 1102
1086 /* Note: grabbing the xprt_lock_write() here is not strictly needed,
1087 * but ensures that we throttle new slot allocation if the transport
1088 * is congested (e.g. if reconnecting or if we're out of socket
1089 * write buffer space).
1090 */
1091 task->tk_timeout = 0; 1103 task->tk_timeout = 0;
1092 task->tk_status = -EAGAIN; 1104 task->tk_status = -EAGAIN;
1093 if (!xprt_lock_write(xprt, task)) 1105 xprt->ops->alloc_slot(xprt, task);
1094 return;
1095
1096 spin_lock(&xprt->reserve_lock);
1097 xprt_alloc_slot(task);
1098 spin_unlock(&xprt->reserve_lock);
1099 xprt_release_write(xprt, task);
1100} 1106}
1101 1107
1102static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt) 1108static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt)
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
index 06cdbff79e4a..5d9202dc7cb1 100644
--- a/net/sunrpc/xprtrdma/transport.c
+++ b/net/sunrpc/xprtrdma/transport.c
@@ -713,6 +713,7 @@ static void xprt_rdma_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
713static struct rpc_xprt_ops xprt_rdma_procs = { 713static struct rpc_xprt_ops xprt_rdma_procs = {
714 .reserve_xprt = xprt_rdma_reserve_xprt, 714 .reserve_xprt = xprt_rdma_reserve_xprt,
715 .release_xprt = xprt_release_xprt_cong, /* sunrpc/xprt.c */ 715 .release_xprt = xprt_release_xprt_cong, /* sunrpc/xprt.c */
716 .alloc_slot = xprt_alloc_slot,
716 .release_request = xprt_release_rqst_cong, /* ditto */ 717 .release_request = xprt_release_rqst_cong, /* ditto */
717 .set_retrans_timeout = xprt_set_retrans_timeout_def, /* ditto */ 718 .set_retrans_timeout = xprt_set_retrans_timeout_def, /* ditto */
718 .rpcbind = rpcb_getport_async, /* sunrpc/rpcb_clnt.c */ 719 .rpcbind = rpcb_getport_async, /* sunrpc/rpcb_clnt.c */
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 400567243f84..a35b8e52e551 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -2473,6 +2473,7 @@ static void bc_destroy(struct rpc_xprt *xprt)
2473static struct rpc_xprt_ops xs_local_ops = { 2473static struct rpc_xprt_ops xs_local_ops = {
2474 .reserve_xprt = xprt_reserve_xprt, 2474 .reserve_xprt = xprt_reserve_xprt,
2475 .release_xprt = xs_tcp_release_xprt, 2475 .release_xprt = xs_tcp_release_xprt,
2476 .alloc_slot = xprt_alloc_slot,
2476 .rpcbind = xs_local_rpcbind, 2477 .rpcbind = xs_local_rpcbind,
2477 .set_port = xs_local_set_port, 2478 .set_port = xs_local_set_port,
2478 .connect = xs_connect, 2479 .connect = xs_connect,
@@ -2489,6 +2490,7 @@ static struct rpc_xprt_ops xs_udp_ops = {
2489 .set_buffer_size = xs_udp_set_buffer_size, 2490 .set_buffer_size = xs_udp_set_buffer_size,
2490 .reserve_xprt = xprt_reserve_xprt_cong, 2491 .reserve_xprt = xprt_reserve_xprt_cong,
2491 .release_xprt = xprt_release_xprt_cong, 2492 .release_xprt = xprt_release_xprt_cong,
2493 .alloc_slot = xprt_alloc_slot,
2492 .rpcbind = rpcb_getport_async, 2494 .rpcbind = rpcb_getport_async,
2493 .set_port = xs_set_port, 2495 .set_port = xs_set_port,
2494 .connect = xs_connect, 2496 .connect = xs_connect,
@@ -2506,6 +2508,7 @@ static struct rpc_xprt_ops xs_udp_ops = {
2506static struct rpc_xprt_ops xs_tcp_ops = { 2508static struct rpc_xprt_ops xs_tcp_ops = {
2507 .reserve_xprt = xprt_reserve_xprt, 2509 .reserve_xprt = xprt_reserve_xprt,
2508 .release_xprt = xs_tcp_release_xprt, 2510 .release_xprt = xs_tcp_release_xprt,
2511 .alloc_slot = xprt_lock_and_alloc_slot,
2509 .rpcbind = rpcb_getport_async, 2512 .rpcbind = rpcb_getport_async,
2510 .set_port = xs_set_port, 2513 .set_port = xs_set_port,
2511 .connect = xs_connect, 2514 .connect = xs_connect,
diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
index 4235a6361fec..b3d907eb93a9 100644
--- a/scripts/link-vmlinux.sh
+++ b/scripts/link-vmlinux.sh
@@ -74,8 +74,13 @@ kallsyms()
74 info KSYM ${2} 74 info KSYM ${2}
75 local kallsymopt; 75 local kallsymopt;
76 76
77 if [ -n "${CONFIG_SYMBOL_PREFIX}" ]; then
78 kallsymopt="${kallsymopt} \
79 --symbol-prefix=${CONFIG_SYMBOL_PREFIX}"
80 fi
81
77 if [ -n "${CONFIG_KALLSYMS_ALL}" ]; then 82 if [ -n "${CONFIG_KALLSYMS_ALL}" ]; then
78 kallsymopt=--all-symbols 83 kallsymopt="${kallsymopt} --all-symbols"
79 fi 84 fi
80 85
81 local aflags="${KBUILD_AFLAGS} ${KBUILD_AFLAGS_KERNEL} \ 86 local aflags="${KBUILD_AFLAGS} ${KBUILD_AFLAGS_KERNEL} \