aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorArnd Bergmann <arnd@arndb.de>2013-03-14 18:55:59 -0400
committerArnd Bergmann <arnd@arndb.de>2013-03-14 18:55:59 -0400
commit7ac6c89189c669fd5f883937f291f18d324d470b (patch)
tree5f0712bdedac45a40148ede0064d4e6eddb4260e /arch
parent6fdd496e07f511a94ba27e4a1433038b32d6af05 (diff)
parent0ed66befaae893e82c9f016238282d73ef9fd6c7 (diff)
Merge tag 'at91-fixes' of git://github.com/at91linux/linux-at91 into fixes
From Nicolas Ferre <nicolas.ferre@atmel.com>: Two patches for Device Tree on at91sam9x5/NAND. Two more for fixing PM suspend/resume IRQ on AIC5 and GPIO used with pinctrl. * tag 'at91-fixes' of git://github.com/at91linux/linux-at91: ARM: at91: fix infinite loop in at91_irq_suspend/resume ARM: at91: add gpio suspend/resume support when using pinctrl ARM: at91: dt: at91sam9x5: complete NAND pinctrl ARM: at91: dt: at91sam9x5: correct NAND pins comments Includes an update to -rc2 Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'arch')
-rw-r--r--arch/alpha/boot/head.S1
-rw-r--r--arch/arm/boot/compressed/Makefile2
-rw-r--r--arch/arm/boot/dts/at91sam9x5.dtsi28
-rw-r--r--arch/arm/include/asm/mmu.h8
-rw-r--r--arch/arm/include/asm/mmu_context.h2
-rw-r--r--arch/arm/include/asm/tlbflush.h34
-rw-r--r--arch/arm/include/uapi/asm/unistd.h2
-rw-r--r--arch/arm/kernel/asm-offsets.c2
-rw-r--r--arch/arm/kernel/calls.S2
-rw-r--r--arch/arm/kernel/head.S26
-rw-r--r--arch/arm/kernel/hw_breakpoint.c2
-rw-r--r--arch/arm/kernel/perf_event.c4
-rw-r--r--arch/arm/kernel/perf_event_v7.c2
-rw-r--r--arch/arm/kernel/smp.c1
-rw-r--r--arch/arm/kernel/smp_tlb.c12
-rw-r--r--arch/arm/kernel/smp_twd.c4
-rw-r--r--arch/arm/kernel/suspend.c1
-rw-r--r--arch/arm/lib/memset.S85
-rw-r--r--arch/arm/mach-at91/include/mach/gpio.h8
-rw-r--r--arch/arm/mach-at91/irq.c20
-rw-r--r--arch/arm/mach-at91/pm.c10
-rw-r--r--arch/arm/mach-netx/generic.c2
-rw-r--r--arch/arm/mach-netx/include/mach/irqs.h64
-rw-r--r--arch/arm/mm/context.c29
-rw-r--r--arch/arm/mm/idmap.c1
-rw-r--r--arch/arm/mm/proc-v7-3level.S2
-rw-r--r--arch/ia64/kernel/perfmon.c1
-rw-r--r--arch/metag/include/asm/elf.h3
-rw-r--r--arch/metag/mm/Kconfig1
-rw-r--r--arch/powerpc/crypto/sha1-powerpc-asm.S4
-rw-r--r--arch/powerpc/include/asm/bitops.h2
-rw-r--r--arch/powerpc/include/asm/reg.h3
-rw-r--r--arch/powerpc/include/asm/systbl.h1
-rw-r--r--arch/powerpc/include/asm/unistd.h2
-rw-r--r--arch/powerpc/include/uapi/asm/unistd.h1
-rw-r--r--arch/powerpc/kernel/cpu_setup_power.S5
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S4
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c1
-rw-r--r--arch/powerpc/platforms/pseries/hvcserver.c5
-rw-r--r--arch/s390/hypfs/inode.c1
-rw-r--r--arch/tile/include/asm/compat.h3
-rw-r--r--arch/tile/kernel/compat.c42
-rw-r--r--arch/x86/include/asm/bootparam_utils.h20
-rw-r--r--arch/x86/kernel/setup.c10
-rw-r--r--arch/x86/kernel/smpboot.c3
-rw-r--r--arch/x86/mm/init.c5
-rw-r--r--arch/x86/mm/pat.c7
47 files changed, 316 insertions, 162 deletions
diff --git a/arch/alpha/boot/head.S b/arch/alpha/boot/head.S
index b06812bcac83..8efb26686d47 100644
--- a/arch/alpha/boot/head.S
+++ b/arch/alpha/boot/head.S
@@ -4,6 +4,7 @@
4 * initial bootloader stuff.. 4 * initial bootloader stuff..
5 */ 5 */
6 6
7#include <asm/pal.h>
7 8
8 .set noreorder 9 .set noreorder
9 .globl __start 10 .globl __start
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
index 5cad8a6dadb0..afed28e37ea5 100644
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -120,7 +120,7 @@ ORIG_CFLAGS := $(KBUILD_CFLAGS)
120KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS)) 120KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS))
121endif 121endif
122 122
123ccflags-y := -fpic -fno-builtin -I$(obj) 123ccflags-y := -fpic -mno-single-pic-base -fno-builtin -I$(obj)
124asflags-y := -Wa,-march=all -DZIMAGE 124asflags-y := -Wa,-march=all -DZIMAGE
125 125
126# Supply kernel BSS size to the decompressor via a linker symbol. 126# Supply kernel BSS size to the decompressor via a linker symbol.
diff --git a/arch/arm/boot/dts/at91sam9x5.dtsi b/arch/arm/boot/dts/at91sam9x5.dtsi
index aa98e641931f..a98c0d50fbbe 100644
--- a/arch/arm/boot/dts/at91sam9x5.dtsi
+++ b/arch/arm/boot/dts/at91sam9x5.dtsi
@@ -238,8 +238,32 @@
238 nand { 238 nand {
239 pinctrl_nand: nand-0 { 239 pinctrl_nand: nand-0 {
240 atmel,pins = 240 atmel,pins =
241 <3 4 0x0 0x1 /* PD5 gpio RDY pin pull_up */ 241 <3 0 0x1 0x0 /* PD0 periph A Read Enable */
242 3 5 0x0 0x1>; /* PD4 gpio enable pin pull_up */ 242 3 1 0x1 0x0 /* PD1 periph A Write Enable */
243 3 2 0x1 0x0 /* PD2 periph A Address Latch Enable */
244 3 3 0x1 0x0 /* PD3 periph A Command Latch Enable */
245 3 4 0x0 0x1 /* PD4 gpio Chip Enable pin pull_up */
246 3 5 0x0 0x1 /* PD5 gpio RDY/BUSY pin pull_up */
247 3 6 0x1 0x0 /* PD6 periph A Data bit 0 */
248 3 7 0x1 0x0 /* PD7 periph A Data bit 1 */
249 3 8 0x1 0x0 /* PD8 periph A Data bit 2 */
250 3 9 0x1 0x0 /* PD9 periph A Data bit 3 */
251 3 10 0x1 0x0 /* PD10 periph A Data bit 4 */
252 3 11 0x1 0x0 /* PD11 periph A Data bit 5 */
253 3 12 0x1 0x0 /* PD12 periph A Data bit 6 */
254 3 13 0x1 0x0>; /* PD13 periph A Data bit 7 */
255 };
256
257 pinctrl_nand_16bits: nand_16bits-0 {
258 atmel,pins =
259 <3 14 0x1 0x0 /* PD14 periph A Data bit 8 */
260 3 15 0x1 0x0 /* PD15 periph A Data bit 9 */
261 3 16 0x1 0x0 /* PD16 periph A Data bit 10 */
262 3 17 0x1 0x0 /* PD17 periph A Data bit 11 */
263 3 18 0x1 0x0 /* PD18 periph A Data bit 12 */
264 3 19 0x1 0x0 /* PD19 periph A Data bit 13 */
265 3 20 0x1 0x0 /* PD20 periph A Data bit 14 */
266 3 21 0x1 0x0>; /* PD21 periph A Data bit 15 */
243 }; 267 };
244 }; 268 };
245 269
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h
index 9f77e7804f3b..e3d55547e755 100644
--- a/arch/arm/include/asm/mmu.h
+++ b/arch/arm/include/asm/mmu.h
@@ -5,15 +5,15 @@
5 5
6typedef struct { 6typedef struct {
7#ifdef CONFIG_CPU_HAS_ASID 7#ifdef CONFIG_CPU_HAS_ASID
8 u64 id; 8 atomic64_t id;
9#endif 9#endif
10 unsigned int vmalloc_seq; 10 unsigned int vmalloc_seq;
11} mm_context_t; 11} mm_context_t;
12 12
13#ifdef CONFIG_CPU_HAS_ASID 13#ifdef CONFIG_CPU_HAS_ASID
14#define ASID_BITS 8 14#define ASID_BITS 8
15#define ASID_MASK ((~0ULL) << ASID_BITS) 15#define ASID_MASK ((~0ULL) << ASID_BITS)
16#define ASID(mm) ((mm)->context.id & ~ASID_MASK) 16#define ASID(mm) ((mm)->context.id.counter & ~ASID_MASK)
17#else 17#else
18#define ASID(mm) (0) 18#define ASID(mm) (0)
19#endif 19#endif
@@ -26,7 +26,7 @@ typedef struct {
26 * modified for 2.6 by Hyok S. Choi <hyok.choi@samsung.com> 26 * modified for 2.6 by Hyok S. Choi <hyok.choi@samsung.com>
27 */ 27 */
28typedef struct { 28typedef struct {
29 unsigned long end_brk; 29 unsigned long end_brk;
30} mm_context_t; 30} mm_context_t;
31 31
32#endif 32#endif
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h
index e1f644bc7cc5..863a6611323c 100644
--- a/arch/arm/include/asm/mmu_context.h
+++ b/arch/arm/include/asm/mmu_context.h
@@ -25,7 +25,7 @@ void __check_vmalloc_seq(struct mm_struct *mm);
25#ifdef CONFIG_CPU_HAS_ASID 25#ifdef CONFIG_CPU_HAS_ASID
26 26
27void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk); 27void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
28#define init_new_context(tsk,mm) ({ mm->context.id = 0; }) 28#define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; })
29 29
30#else /* !CONFIG_CPU_HAS_ASID */ 30#else /* !CONFIG_CPU_HAS_ASID */
31 31
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index 6e924d3a77eb..4db8c8820f0d 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -34,10 +34,13 @@
34#define TLB_V6_D_ASID (1 << 17) 34#define TLB_V6_D_ASID (1 << 17)
35#define TLB_V6_I_ASID (1 << 18) 35#define TLB_V6_I_ASID (1 << 18)
36 36
37#define TLB_V6_BP (1 << 19)
38
37/* Unified Inner Shareable TLB operations (ARMv7 MP extensions) */ 39/* Unified Inner Shareable TLB operations (ARMv7 MP extensions) */
38#define TLB_V7_UIS_PAGE (1 << 19) 40#define TLB_V7_UIS_PAGE (1 << 20)
39#define TLB_V7_UIS_FULL (1 << 20) 41#define TLB_V7_UIS_FULL (1 << 21)
40#define TLB_V7_UIS_ASID (1 << 21) 42#define TLB_V7_UIS_ASID (1 << 22)
43#define TLB_V7_UIS_BP (1 << 23)
41 44
42#define TLB_BARRIER (1 << 28) 45#define TLB_BARRIER (1 << 28)
43#define TLB_L2CLEAN_FR (1 << 29) /* Feroceon */ 46#define TLB_L2CLEAN_FR (1 << 29) /* Feroceon */
@@ -150,7 +153,8 @@
150#define v6wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \ 153#define v6wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
151 TLB_V6_I_FULL | TLB_V6_D_FULL | \ 154 TLB_V6_I_FULL | TLB_V6_D_FULL | \
152 TLB_V6_I_PAGE | TLB_V6_D_PAGE | \ 155 TLB_V6_I_PAGE | TLB_V6_D_PAGE | \
153 TLB_V6_I_ASID | TLB_V6_D_ASID) 156 TLB_V6_I_ASID | TLB_V6_D_ASID | \
157 TLB_V6_BP)
154 158
155#ifdef CONFIG_CPU_TLB_V6 159#ifdef CONFIG_CPU_TLB_V6
156# define v6wbi_possible_flags v6wbi_tlb_flags 160# define v6wbi_possible_flags v6wbi_tlb_flags
@@ -166,9 +170,11 @@
166#endif 170#endif
167 171
168#define v7wbi_tlb_flags_smp (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \ 172#define v7wbi_tlb_flags_smp (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
169 TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | TLB_V7_UIS_ASID) 173 TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | \
174 TLB_V7_UIS_ASID | TLB_V7_UIS_BP)
170#define v7wbi_tlb_flags_up (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \ 175#define v7wbi_tlb_flags_up (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
171 TLB_V6_U_FULL | TLB_V6_U_PAGE | TLB_V6_U_ASID) 176 TLB_V6_U_FULL | TLB_V6_U_PAGE | \
177 TLB_V6_U_ASID | TLB_V6_BP)
172 178
173#ifdef CONFIG_CPU_TLB_V7 179#ifdef CONFIG_CPU_TLB_V7
174 180
@@ -430,6 +436,20 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
430 } 436 }
431} 437}
432 438
439static inline void local_flush_bp_all(void)
440{
441 const int zero = 0;
442 const unsigned int __tlb_flag = __cpu_tlb_flags;
443
444 if (tlb_flag(TLB_V7_UIS_BP))
445 asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero));
446 else if (tlb_flag(TLB_V6_BP))
447 asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero));
448
449 if (tlb_flag(TLB_BARRIER))
450 isb();
451}
452
433/* 453/*
434 * flush_pmd_entry 454 * flush_pmd_entry
435 * 455 *
@@ -480,6 +500,7 @@ static inline void clean_pmd_entry(void *pmd)
480#define flush_tlb_kernel_page local_flush_tlb_kernel_page 500#define flush_tlb_kernel_page local_flush_tlb_kernel_page
481#define flush_tlb_range local_flush_tlb_range 501#define flush_tlb_range local_flush_tlb_range
482#define flush_tlb_kernel_range local_flush_tlb_kernel_range 502#define flush_tlb_kernel_range local_flush_tlb_kernel_range
503#define flush_bp_all local_flush_bp_all
483#else 504#else
484extern void flush_tlb_all(void); 505extern void flush_tlb_all(void);
485extern void flush_tlb_mm(struct mm_struct *mm); 506extern void flush_tlb_mm(struct mm_struct *mm);
@@ -487,6 +508,7 @@ extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr);
487extern void flush_tlb_kernel_page(unsigned long kaddr); 508extern void flush_tlb_kernel_page(unsigned long kaddr);
488extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); 509extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
489extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); 510extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
511extern void flush_bp_all(void);
490#endif 512#endif
491 513
492/* 514/*
diff --git a/arch/arm/include/uapi/asm/unistd.h b/arch/arm/include/uapi/asm/unistd.h
index 4da7cde70b5d..af33b44990ed 100644
--- a/arch/arm/include/uapi/asm/unistd.h
+++ b/arch/arm/include/uapi/asm/unistd.h
@@ -404,7 +404,7 @@
404#define __NR_setns (__NR_SYSCALL_BASE+375) 404#define __NR_setns (__NR_SYSCALL_BASE+375)
405#define __NR_process_vm_readv (__NR_SYSCALL_BASE+376) 405#define __NR_process_vm_readv (__NR_SYSCALL_BASE+376)
406#define __NR_process_vm_writev (__NR_SYSCALL_BASE+377) 406#define __NR_process_vm_writev (__NR_SYSCALL_BASE+377)
407 /* 378 for kcmp */ 407#define __NR_kcmp (__NR_SYSCALL_BASE+378)
408#define __NR_finit_module (__NR_SYSCALL_BASE+379) 408#define __NR_finit_module (__NR_SYSCALL_BASE+379)
409 409
410/* 410/*
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index 5ce738b43508..923eec7105cf 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -110,7 +110,7 @@ int main(void)
110 BLANK(); 110 BLANK();
111#endif 111#endif
112#ifdef CONFIG_CPU_HAS_ASID 112#ifdef CONFIG_CPU_HAS_ASID
113 DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id)); 113 DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id.counter));
114 BLANK(); 114 BLANK();
115#endif 115#endif
116 DEFINE(VMA_VM_MM, offsetof(struct vm_area_struct, vm_mm)); 116 DEFINE(VMA_VM_MM, offsetof(struct vm_area_struct, vm_mm));
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index 0cc57611fc4f..c6ca7e376773 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -387,7 +387,7 @@
387/* 375 */ CALL(sys_setns) 387/* 375 */ CALL(sys_setns)
388 CALL(sys_process_vm_readv) 388 CALL(sys_process_vm_readv)
389 CALL(sys_process_vm_writev) 389 CALL(sys_process_vm_writev)
390 CALL(sys_ni_syscall) /* reserved for sys_kcmp */ 390 CALL(sys_kcmp)
391 CALL(sys_finit_module) 391 CALL(sys_finit_module)
392#ifndef syscalls_counted 392#ifndef syscalls_counted
393.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls 393.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 486a15ae9011..e0eb9a1cae77 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -184,13 +184,22 @@ __create_page_tables:
184 orr r3, r3, #3 @ PGD block type 184 orr r3, r3, #3 @ PGD block type
185 mov r6, #4 @ PTRS_PER_PGD 185 mov r6, #4 @ PTRS_PER_PGD
186 mov r7, #1 << (55 - 32) @ L_PGD_SWAPPER 186 mov r7, #1 << (55 - 32) @ L_PGD_SWAPPER
1871: str r3, [r0], #4 @ set bottom PGD entry bits 1871:
188#ifdef CONFIG_CPU_ENDIAN_BE8
188 str r7, [r0], #4 @ set top PGD entry bits 189 str r7, [r0], #4 @ set top PGD entry bits
190 str r3, [r0], #4 @ set bottom PGD entry bits
191#else
192 str r3, [r0], #4 @ set bottom PGD entry bits
193 str r7, [r0], #4 @ set top PGD entry bits
194#endif
189 add r3, r3, #0x1000 @ next PMD table 195 add r3, r3, #0x1000 @ next PMD table
190 subs r6, r6, #1 196 subs r6, r6, #1
191 bne 1b 197 bne 1b
192 198
193 add r4, r4, #0x1000 @ point to the PMD tables 199 add r4, r4, #0x1000 @ point to the PMD tables
200#ifdef CONFIG_CPU_ENDIAN_BE8
201 add r4, r4, #4 @ we only write the bottom word
202#endif
194#endif 203#endif
195 204
196 ldr r7, [r10, #PROCINFO_MM_MMUFLAGS] @ mm_mmuflags 205 ldr r7, [r10, #PROCINFO_MM_MMUFLAGS] @ mm_mmuflags
@@ -258,6 +267,11 @@ __create_page_tables:
258 addne r6, r6, #1 << SECTION_SHIFT 267 addne r6, r6, #1 << SECTION_SHIFT
259 strne r6, [r3] 268 strne r6, [r3]
260 269
270#if defined(CONFIG_LPAE) && defined(CONFIG_CPU_ENDIAN_BE8)
271 sub r4, r4, #4 @ Fixup page table pointer
272 @ for 64-bit descriptors
273#endif
274
261#ifdef CONFIG_DEBUG_LL 275#ifdef CONFIG_DEBUG_LL
262#if !defined(CONFIG_DEBUG_ICEDCC) && !defined(CONFIG_DEBUG_SEMIHOSTING) 276#if !defined(CONFIG_DEBUG_ICEDCC) && !defined(CONFIG_DEBUG_SEMIHOSTING)
263 /* 277 /*
@@ -276,13 +290,17 @@ __create_page_tables:
276 orr r3, r7, r3, lsl #SECTION_SHIFT 290 orr r3, r7, r3, lsl #SECTION_SHIFT
277#ifdef CONFIG_ARM_LPAE 291#ifdef CONFIG_ARM_LPAE
278 mov r7, #1 << (54 - 32) @ XN 292 mov r7, #1 << (54 - 32) @ XN
293#ifdef CONFIG_CPU_ENDIAN_BE8
294 str r7, [r0], #4
295 str r3, [r0], #4
279#else 296#else
280 orr r3, r3, #PMD_SECT_XN
281#endif
282 str r3, [r0], #4 297 str r3, [r0], #4
283#ifdef CONFIG_ARM_LPAE
284 str r7, [r0], #4 298 str r7, [r0], #4
285#endif 299#endif
300#else
301 orr r3, r3, #PMD_SECT_XN
302 str r3, [r0], #4
303#endif
286 304
287#else /* CONFIG_DEBUG_ICEDCC || CONFIG_DEBUG_SEMIHOSTING */ 305#else /* CONFIG_DEBUG_ICEDCC || CONFIG_DEBUG_SEMIHOSTING */
288 /* we don't need any serial debugging mappings */ 306 /* we don't need any serial debugging mappings */
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index 5eae53e7a2e1..96093b75ab90 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -1023,7 +1023,7 @@ out_mdbgen:
1023static int __cpuinit dbg_reset_notify(struct notifier_block *self, 1023static int __cpuinit dbg_reset_notify(struct notifier_block *self,
1024 unsigned long action, void *cpu) 1024 unsigned long action, void *cpu)
1025{ 1025{
1026 if (action == CPU_ONLINE) 1026 if ((action & ~CPU_TASKS_FROZEN) == CPU_ONLINE)
1027 smp_call_function_single((int)cpu, reset_ctrl_regs, NULL, 1); 1027 smp_call_function_single((int)cpu, reset_ctrl_regs, NULL, 1);
1028 1028
1029 return NOTIFY_OK; 1029 return NOTIFY_OK;
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 31e0eb353cd8..146157dfe27c 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -400,7 +400,7 @@ __hw_perf_event_init(struct perf_event *event)
400 } 400 }
401 401
402 if (event->group_leader != event) { 402 if (event->group_leader != event) {
403 if (validate_group(event) != 0); 403 if (validate_group(event) != 0)
404 return -EINVAL; 404 return -EINVAL;
405 } 405 }
406 406
@@ -484,7 +484,7 @@ const struct dev_pm_ops armpmu_dev_pm_ops = {
484 SET_RUNTIME_PM_OPS(armpmu_runtime_suspend, armpmu_runtime_resume, NULL) 484 SET_RUNTIME_PM_OPS(armpmu_runtime_suspend, armpmu_runtime_resume, NULL)
485}; 485};
486 486
487static void __init armpmu_init(struct arm_pmu *armpmu) 487static void armpmu_init(struct arm_pmu *armpmu)
488{ 488{
489 atomic_set(&armpmu->active_events, 0); 489 atomic_set(&armpmu->active_events, 0);
490 mutex_init(&armpmu->reserve_mutex); 490 mutex_init(&armpmu->reserve_mutex);
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index 8c79a9e70b83..039cffb053a7 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -774,7 +774,7 @@ static const unsigned armv7_a7_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
774/* 774/*
775 * PMXEVTYPER: Event selection reg 775 * PMXEVTYPER: Event selection reg
776 */ 776 */
777#define ARMV7_EVTYPE_MASK 0xc00000ff /* Mask for writable bits */ 777#define ARMV7_EVTYPE_MASK 0xc80000ff /* Mask for writable bits */
778#define ARMV7_EVTYPE_EVENT 0xff /* Mask for EVENT bits */ 778#define ARMV7_EVTYPE_EVENT 0xff /* Mask for EVENT bits */
779 779
780/* 780/*
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 1bdfd87c8e41..31644f1978d5 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -285,6 +285,7 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
285 * switch away from it before attempting any exclusive accesses. 285 * switch away from it before attempting any exclusive accesses.
286 */ 286 */
287 cpu_switch_mm(mm->pgd, mm); 287 cpu_switch_mm(mm->pgd, mm);
288 local_flush_bp_all();
288 enter_lazy_tlb(mm, current); 289 enter_lazy_tlb(mm, current);
289 local_flush_tlb_all(); 290 local_flush_tlb_all();
290 291
diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c
index 02c5d2ce23bf..bd0300531399 100644
--- a/arch/arm/kernel/smp_tlb.c
+++ b/arch/arm/kernel/smp_tlb.c
@@ -64,6 +64,11 @@ static inline void ipi_flush_tlb_kernel_range(void *arg)
64 local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end); 64 local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end);
65} 65}
66 66
67static inline void ipi_flush_bp_all(void *ignored)
68{
69 local_flush_bp_all();
70}
71
67void flush_tlb_all(void) 72void flush_tlb_all(void)
68{ 73{
69 if (tlb_ops_need_broadcast()) 74 if (tlb_ops_need_broadcast())
@@ -127,3 +132,10 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
127 local_flush_tlb_kernel_range(start, end); 132 local_flush_tlb_kernel_range(start, end);
128} 133}
129 134
135void flush_bp_all(void)
136{
137 if (tlb_ops_need_broadcast())
138 on_each_cpu(ipi_flush_bp_all, NULL, 1);
139 else
140 local_flush_bp_all();
141}
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index c092115d903a..3f2565037480 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -22,6 +22,7 @@
22#include <linux/of_irq.h> 22#include <linux/of_irq.h>
23#include <linux/of_address.h> 23#include <linux/of_address.h>
24 24
25#include <asm/smp_plat.h>
25#include <asm/smp_twd.h> 26#include <asm/smp_twd.h>
26#include <asm/localtimer.h> 27#include <asm/localtimer.h>
27 28
@@ -373,6 +374,9 @@ void __init twd_local_timer_of_register(void)
373 struct device_node *np; 374 struct device_node *np;
374 int err; 375 int err;
375 376
377 if (!is_smp() || !setup_max_cpus)
378 return;
379
376 np = of_find_matching_node(NULL, twd_of_match); 380 np = of_find_matching_node(NULL, twd_of_match);
377 if (!np) 381 if (!np)
378 return; 382 return;
diff --git a/arch/arm/kernel/suspend.c b/arch/arm/kernel/suspend.c
index 358bca3a995e..c59c97ea8268 100644
--- a/arch/arm/kernel/suspend.c
+++ b/arch/arm/kernel/suspend.c
@@ -68,6 +68,7 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
68 ret = __cpu_suspend(arg, fn); 68 ret = __cpu_suspend(arg, fn);
69 if (ret == 0) { 69 if (ret == 0) {
70 cpu_switch_mm(mm->pgd, mm); 70 cpu_switch_mm(mm->pgd, mm);
71 local_flush_bp_all();
71 local_flush_tlb_all(); 72 local_flush_tlb_all();
72 } 73 }
73 74
diff --git a/arch/arm/lib/memset.S b/arch/arm/lib/memset.S
index 650d5923ab83..d912e7397ecc 100644
--- a/arch/arm/lib/memset.S
+++ b/arch/arm/lib/memset.S
@@ -19,9 +19,9 @@
191: subs r2, r2, #4 @ 1 do we have enough 191: subs r2, r2, #4 @ 1 do we have enough
20 blt 5f @ 1 bytes to align with? 20 blt 5f @ 1 bytes to align with?
21 cmp r3, #2 @ 1 21 cmp r3, #2 @ 1
22 strltb r1, [r0], #1 @ 1 22 strltb r1, [ip], #1 @ 1
23 strleb r1, [r0], #1 @ 1 23 strleb r1, [ip], #1 @ 1
24 strb r1, [r0], #1 @ 1 24 strb r1, [ip], #1 @ 1
25 add r2, r2, r3 @ 1 (r2 = r2 - (4 - r3)) 25 add r2, r2, r3 @ 1 (r2 = r2 - (4 - r3))
26/* 26/*
27 * The pointer is now aligned and the length is adjusted. Try doing the 27 * The pointer is now aligned and the length is adjusted. Try doing the
@@ -29,10 +29,14 @@
29 */ 29 */
30 30
31ENTRY(memset) 31ENTRY(memset)
32 ands r3, r0, #3 @ 1 unaligned? 32/*
33 * Preserve the contents of r0 for the return value.
34 */
35 mov ip, r0
36 ands r3, ip, #3 @ 1 unaligned?
33 bne 1b @ 1 37 bne 1b @ 1
34/* 38/*
35 * we know that the pointer in r0 is aligned to a word boundary. 39 * we know that the pointer in ip is aligned to a word boundary.
36 */ 40 */
37 orr r1, r1, r1, lsl #8 41 orr r1, r1, r1, lsl #8
38 orr r1, r1, r1, lsl #16 42 orr r1, r1, r1, lsl #16
@@ -43,29 +47,28 @@ ENTRY(memset)
43#if ! CALGN(1)+0 47#if ! CALGN(1)+0
44 48
45/* 49/*
46 * We need an extra register for this loop - save the return address and 50 * We need 2 extra registers for this loop - use r8 and the LR
47 * use the LR
48 */ 51 */
49 str lr, [sp, #-4]! 52 stmfd sp!, {r8, lr}
50 mov ip, r1 53 mov r8, r1
51 mov lr, r1 54 mov lr, r1
52 55
532: subs r2, r2, #64 562: subs r2, r2, #64
54 stmgeia r0!, {r1, r3, ip, lr} @ 64 bytes at a time. 57 stmgeia ip!, {r1, r3, r8, lr} @ 64 bytes at a time.
55 stmgeia r0!, {r1, r3, ip, lr} 58 stmgeia ip!, {r1, r3, r8, lr}
56 stmgeia r0!, {r1, r3, ip, lr} 59 stmgeia ip!, {r1, r3, r8, lr}
57 stmgeia r0!, {r1, r3, ip, lr} 60 stmgeia ip!, {r1, r3, r8, lr}
58 bgt 2b 61 bgt 2b
59 ldmeqfd sp!, {pc} @ Now <64 bytes to go. 62 ldmeqfd sp!, {r8, pc} @ Now <64 bytes to go.
60/* 63/*
61 * No need to correct the count; we're only testing bits from now on 64 * No need to correct the count; we're only testing bits from now on
62 */ 65 */
63 tst r2, #32 66 tst r2, #32
64 stmneia r0!, {r1, r3, ip, lr} 67 stmneia ip!, {r1, r3, r8, lr}
65 stmneia r0!, {r1, r3, ip, lr} 68 stmneia ip!, {r1, r3, r8, lr}
66 tst r2, #16 69 tst r2, #16
67 stmneia r0!, {r1, r3, ip, lr} 70 stmneia ip!, {r1, r3, r8, lr}
68 ldr lr, [sp], #4 71 ldmfd sp!, {r8, lr}
69 72
70#else 73#else
71 74
@@ -74,54 +77,54 @@ ENTRY(memset)
74 * whole cache lines at once. 77 * whole cache lines at once.
75 */ 78 */
76 79
77 stmfd sp!, {r4-r7, lr} 80 stmfd sp!, {r4-r8, lr}
78 mov r4, r1 81 mov r4, r1
79 mov r5, r1 82 mov r5, r1
80 mov r6, r1 83 mov r6, r1
81 mov r7, r1 84 mov r7, r1
82 mov ip, r1 85 mov r8, r1
83 mov lr, r1 86 mov lr, r1
84 87
85 cmp r2, #96 88 cmp r2, #96
86 tstgt r0, #31 89 tstgt ip, #31
87 ble 3f 90 ble 3f
88 91
89 and ip, r0, #31 92 and r8, ip, #31
90 rsb ip, ip, #32 93 rsb r8, r8, #32
91 sub r2, r2, ip 94 sub r2, r2, r8
92 movs ip, ip, lsl #(32 - 4) 95 movs r8, r8, lsl #(32 - 4)
93 stmcsia r0!, {r4, r5, r6, r7} 96 stmcsia ip!, {r4, r5, r6, r7}
94 stmmiia r0!, {r4, r5} 97 stmmiia ip!, {r4, r5}
95 tst ip, #(1 << 30) 98 tst r8, #(1 << 30)
96 mov ip, r1 99 mov r8, r1
97 strne r1, [r0], #4 100 strne r1, [ip], #4
98 101
993: subs r2, r2, #64 1023: subs r2, r2, #64
100 stmgeia r0!, {r1, r3-r7, ip, lr} 103 stmgeia ip!, {r1, r3-r8, lr}
101 stmgeia r0!, {r1, r3-r7, ip, lr} 104 stmgeia ip!, {r1, r3-r8, lr}
102 bgt 3b 105 bgt 3b
103 ldmeqfd sp!, {r4-r7, pc} 106 ldmeqfd sp!, {r4-r8, pc}
104 107
105 tst r2, #32 108 tst r2, #32
106 stmneia r0!, {r1, r3-r7, ip, lr} 109 stmneia ip!, {r1, r3-r8, lr}
107 tst r2, #16 110 tst r2, #16
108 stmneia r0!, {r4-r7} 111 stmneia ip!, {r4-r7}
109 ldmfd sp!, {r4-r7, lr} 112 ldmfd sp!, {r4-r8, lr}
110 113
111#endif 114#endif
112 115
1134: tst r2, #8 1164: tst r2, #8
114 stmneia r0!, {r1, r3} 117 stmneia ip!, {r1, r3}
115 tst r2, #4 118 tst r2, #4
116 strne r1, [r0], #4 119 strne r1, [ip], #4
117/* 120/*
118 * When we get here, we've got less than 4 bytes to zero. We 121 * When we get here, we've got less than 4 bytes to zero. We
119 * may have an unaligned pointer as well. 122 * may have an unaligned pointer as well.
120 */ 123 */
1215: tst r2, #2 1245: tst r2, #2
122 strneb r1, [r0], #1 125 strneb r1, [ip], #1
123 strneb r1, [r0], #1 126 strneb r1, [ip], #1
124 tst r2, #1 127 tst r2, #1
125 strneb r1, [r0], #1 128 strneb r1, [ip], #1
126 mov pc, lr 129 mov pc, lr
127ENDPROC(memset) 130ENDPROC(memset)
diff --git a/arch/arm/mach-at91/include/mach/gpio.h b/arch/arm/mach-at91/include/mach/gpio.h
index eed465ab0dd7..5fc23771c154 100644
--- a/arch/arm/mach-at91/include/mach/gpio.h
+++ b/arch/arm/mach-at91/include/mach/gpio.h
@@ -209,6 +209,14 @@ extern int at91_get_gpio_value(unsigned pin);
209extern void at91_gpio_suspend(void); 209extern void at91_gpio_suspend(void);
210extern void at91_gpio_resume(void); 210extern void at91_gpio_resume(void);
211 211
212#ifdef CONFIG_PINCTRL_AT91
213extern void at91_pinctrl_gpio_suspend(void);
214extern void at91_pinctrl_gpio_resume(void);
215#else
216static inline void at91_pinctrl_gpio_suspend(void) {}
217static inline void at91_pinctrl_gpio_resume(void) {}
218#endif
219
212#endif /* __ASSEMBLY__ */ 220#endif /* __ASSEMBLY__ */
213 221
214#endif 222#endif
diff --git a/arch/arm/mach-at91/irq.c b/arch/arm/mach-at91/irq.c
index 8e210262aeee..e0ca59171022 100644
--- a/arch/arm/mach-at91/irq.c
+++ b/arch/arm/mach-at91/irq.c
@@ -92,23 +92,21 @@ static int at91_aic_set_wake(struct irq_data *d, unsigned value)
92 92
93void at91_irq_suspend(void) 93void at91_irq_suspend(void)
94{ 94{
95 int i = 0, bit; 95 int bit = -1;
96 96
97 if (has_aic5()) { 97 if (has_aic5()) {
98 /* disable enabled irqs */ 98 /* disable enabled irqs */
99 while ((bit = find_next_bit(backups, n_irqs, i)) < n_irqs) { 99 while ((bit = find_next_bit(backups, n_irqs, bit + 1)) < n_irqs) {
100 at91_aic_write(AT91_AIC5_SSR, 100 at91_aic_write(AT91_AIC5_SSR,
101 bit & AT91_AIC5_INTSEL_MSK); 101 bit & AT91_AIC5_INTSEL_MSK);
102 at91_aic_write(AT91_AIC5_IDCR, 1); 102 at91_aic_write(AT91_AIC5_IDCR, 1);
103 i = bit;
104 } 103 }
105 /* enable wakeup irqs */ 104 /* enable wakeup irqs */
106 i = 0; 105 bit = -1;
107 while ((bit = find_next_bit(wakeups, n_irqs, i)) < n_irqs) { 106 while ((bit = find_next_bit(wakeups, n_irqs, bit + 1)) < n_irqs) {
108 at91_aic_write(AT91_AIC5_SSR, 107 at91_aic_write(AT91_AIC5_SSR,
109 bit & AT91_AIC5_INTSEL_MSK); 108 bit & AT91_AIC5_INTSEL_MSK);
110 at91_aic_write(AT91_AIC5_IECR, 1); 109 at91_aic_write(AT91_AIC5_IECR, 1);
111 i = bit;
112 } 110 }
113 } else { 111 } else {
114 at91_aic_write(AT91_AIC_IDCR, *backups); 112 at91_aic_write(AT91_AIC_IDCR, *backups);
@@ -118,23 +116,21 @@ void at91_irq_suspend(void)
118 116
119void at91_irq_resume(void) 117void at91_irq_resume(void)
120{ 118{
121 int i = 0, bit; 119 int bit = -1;
122 120
123 if (has_aic5()) { 121 if (has_aic5()) {
124 /* disable wakeup irqs */ 122 /* disable wakeup irqs */
125 while ((bit = find_next_bit(wakeups, n_irqs, i)) < n_irqs) { 123 while ((bit = find_next_bit(wakeups, n_irqs, bit + 1)) < n_irqs) {
126 at91_aic_write(AT91_AIC5_SSR, 124 at91_aic_write(AT91_AIC5_SSR,
127 bit & AT91_AIC5_INTSEL_MSK); 125 bit & AT91_AIC5_INTSEL_MSK);
128 at91_aic_write(AT91_AIC5_IDCR, 1); 126 at91_aic_write(AT91_AIC5_IDCR, 1);
129 i = bit;
130 } 127 }
131 /* enable irqs disabled for suspend */ 128 /* enable irqs disabled for suspend */
132 i = 0; 129 bit = -1;
133 while ((bit = find_next_bit(backups, n_irqs, i)) < n_irqs) { 130 while ((bit = find_next_bit(backups, n_irqs, bit + 1)) < n_irqs) {
134 at91_aic_write(AT91_AIC5_SSR, 131 at91_aic_write(AT91_AIC5_SSR,
135 bit & AT91_AIC5_INTSEL_MSK); 132 bit & AT91_AIC5_INTSEL_MSK);
136 at91_aic_write(AT91_AIC5_IECR, 1); 133 at91_aic_write(AT91_AIC5_IECR, 1);
137 i = bit;
138 } 134 }
139 } else { 135 } else {
140 at91_aic_write(AT91_AIC_IDCR, *wakeups); 136 at91_aic_write(AT91_AIC_IDCR, *wakeups);
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
index adb6db888a1f..73f1f250403a 100644
--- a/arch/arm/mach-at91/pm.c
+++ b/arch/arm/mach-at91/pm.c
@@ -201,7 +201,10 @@ extern u32 at91_slow_clock_sz;
201 201
202static int at91_pm_enter(suspend_state_t state) 202static int at91_pm_enter(suspend_state_t state)
203{ 203{
204 at91_gpio_suspend(); 204 if (of_have_populated_dt())
205 at91_pinctrl_gpio_suspend();
206 else
207 at91_gpio_suspend();
205 at91_irq_suspend(); 208 at91_irq_suspend();
206 209
207 pr_debug("AT91: PM - wake mask %08x, pm state %d\n", 210 pr_debug("AT91: PM - wake mask %08x, pm state %d\n",
@@ -286,7 +289,10 @@ static int at91_pm_enter(suspend_state_t state)
286error: 289error:
287 target_state = PM_SUSPEND_ON; 290 target_state = PM_SUSPEND_ON;
288 at91_irq_resume(); 291 at91_irq_resume();
289 at91_gpio_resume(); 292 if (of_have_populated_dt())
293 at91_pinctrl_gpio_resume();
294 else
295 at91_gpio_resume();
290 return 0; 296 return 0;
291} 297}
292 298
diff --git a/arch/arm/mach-netx/generic.c b/arch/arm/mach-netx/generic.c
index 27c2cb7ab813..1504b68f4c66 100644
--- a/arch/arm/mach-netx/generic.c
+++ b/arch/arm/mach-netx/generic.c
@@ -168,7 +168,7 @@ void __init netx_init_irq(void)
168{ 168{
169 int irq; 169 int irq;
170 170
171 vic_init(io_p2v(NETX_PA_VIC), 0, ~0, 0); 171 vic_init(io_p2v(NETX_PA_VIC), NETX_IRQ_VIC_START, ~0, 0);
172 172
173 for (irq = NETX_IRQ_HIF_CHAINED(0); irq <= NETX_IRQ_HIF_LAST; irq++) { 173 for (irq = NETX_IRQ_HIF_CHAINED(0); irq <= NETX_IRQ_HIF_LAST; irq++) {
174 irq_set_chip_and_handler(irq, &netx_hif_chip, 174 irq_set_chip_and_handler(irq, &netx_hif_chip,
diff --git a/arch/arm/mach-netx/include/mach/irqs.h b/arch/arm/mach-netx/include/mach/irqs.h
index 6ce914d54a30..8f74a844a775 100644
--- a/arch/arm/mach-netx/include/mach/irqs.h
+++ b/arch/arm/mach-netx/include/mach/irqs.h
@@ -17,42 +17,42 @@
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */ 18 */
19 19
20#define NETX_IRQ_VIC_START 0 20#define NETX_IRQ_VIC_START 64
21#define NETX_IRQ_SOFTINT 0 21#define NETX_IRQ_SOFTINT (NETX_IRQ_VIC_START + 0)
22#define NETX_IRQ_TIMER0 1 22#define NETX_IRQ_TIMER0 (NETX_IRQ_VIC_START + 1)
23#define NETX_IRQ_TIMER1 2 23#define NETX_IRQ_TIMER1 (NETX_IRQ_VIC_START + 2)
24#define NETX_IRQ_TIMER2 3 24#define NETX_IRQ_TIMER2 (NETX_IRQ_VIC_START + 3)
25#define NETX_IRQ_SYSTIME_NS 4 25#define NETX_IRQ_SYSTIME_NS (NETX_IRQ_VIC_START + 4)
26#define NETX_IRQ_SYSTIME_S 5 26#define NETX_IRQ_SYSTIME_S (NETX_IRQ_VIC_START + 5)
27#define NETX_IRQ_GPIO_15 6 27#define NETX_IRQ_GPIO_15 (NETX_IRQ_VIC_START + 6)
28#define NETX_IRQ_WATCHDOG 7 28#define NETX_IRQ_WATCHDOG (NETX_IRQ_VIC_START + 7)
29#define NETX_IRQ_UART0 8 29#define NETX_IRQ_UART0 (NETX_IRQ_VIC_START + 8)
30#define NETX_IRQ_UART1 9 30#define NETX_IRQ_UART1 (NETX_IRQ_VIC_START + 9)
31#define NETX_IRQ_UART2 10 31#define NETX_IRQ_UART2 (NETX_IRQ_VIC_START + 10)
32#define NETX_IRQ_USB 11 32#define NETX_IRQ_USB (NETX_IRQ_VIC_START + 11)
33#define NETX_IRQ_SPI 12 33#define NETX_IRQ_SPI (NETX_IRQ_VIC_START + 12)
34#define NETX_IRQ_I2C 13 34#define NETX_IRQ_I2C (NETX_IRQ_VIC_START + 13)
35#define NETX_IRQ_LCD 14 35#define NETX_IRQ_LCD (NETX_IRQ_VIC_START + 14)
36#define NETX_IRQ_HIF 15 36#define NETX_IRQ_HIF (NETX_IRQ_VIC_START + 15)
37#define NETX_IRQ_GPIO_0_14 16 37#define NETX_IRQ_GPIO_0_14 (NETX_IRQ_VIC_START + 16)
38#define NETX_IRQ_XPEC0 17 38#define NETX_IRQ_XPEC0 (NETX_IRQ_VIC_START + 17)
39#define NETX_IRQ_XPEC1 18 39#define NETX_IRQ_XPEC1 (NETX_IRQ_VIC_START + 18)
40#define NETX_IRQ_XPEC2 19 40#define NETX_IRQ_XPEC2 (NETX_IRQ_VIC_START + 19)
41#define NETX_IRQ_XPEC3 20 41#define NETX_IRQ_XPEC3 (NETX_IRQ_VIC_START + 20)
42#define NETX_IRQ_XPEC(no) (17 + (no)) 42#define NETX_IRQ_XPEC(no) (NETX_IRQ_VIC_START + 17 + (no))
43#define NETX_IRQ_MSYNC0 21 43#define NETX_IRQ_MSYNC0 (NETX_IRQ_VIC_START + 21)
44#define NETX_IRQ_MSYNC1 22 44#define NETX_IRQ_MSYNC1 (NETX_IRQ_VIC_START + 22)
45#define NETX_IRQ_MSYNC2 23 45#define NETX_IRQ_MSYNC2 (NETX_IRQ_VIC_START + 23)
46#define NETX_IRQ_MSYNC3 24 46#define NETX_IRQ_MSYNC3 (NETX_IRQ_VIC_START + 24)
47#define NETX_IRQ_IRQ_PHY 25 47#define NETX_IRQ_IRQ_PHY (NETX_IRQ_VIC_START + 25)
48#define NETX_IRQ_ISO_AREA 26 48#define NETX_IRQ_ISO_AREA (NETX_IRQ_VIC_START + 26)
49/* int 27 is reserved */ 49/* int 27 is reserved */
50/* int 28 is reserved */ 50/* int 28 is reserved */
51#define NETX_IRQ_TIMER3 29 51#define NETX_IRQ_TIMER3 (NETX_IRQ_VIC_START + 29)
52#define NETX_IRQ_TIMER4 30 52#define NETX_IRQ_TIMER4 (NETX_IRQ_VIC_START + 30)
53/* int 31 is reserved */ 53/* int 31 is reserved */
54 54
55#define NETX_IRQS 32 55#define NETX_IRQS (NETX_IRQ_VIC_START + 32)
56 56
57/* for multiplexed irqs on gpio 0..14 */ 57/* for multiplexed irqs on gpio 0..14 */
58#define NETX_IRQ_GPIO(x) (NETX_IRQS + (x)) 58#define NETX_IRQ_GPIO(x) (NETX_IRQS + (x))
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index 7a0511191f6b..a5a4b2bc42ba 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -152,9 +152,9 @@ static int is_reserved_asid(u64 asid)
152 return 0; 152 return 0;
153} 153}
154 154
155static void new_context(struct mm_struct *mm, unsigned int cpu) 155static u64 new_context(struct mm_struct *mm, unsigned int cpu)
156{ 156{
157 u64 asid = mm->context.id; 157 u64 asid = atomic64_read(&mm->context.id);
158 u64 generation = atomic64_read(&asid_generation); 158 u64 generation = atomic64_read(&asid_generation);
159 159
160 if (asid != 0 && is_reserved_asid(asid)) { 160 if (asid != 0 && is_reserved_asid(asid)) {
@@ -181,13 +181,14 @@ static void new_context(struct mm_struct *mm, unsigned int cpu)
181 cpumask_clear(mm_cpumask(mm)); 181 cpumask_clear(mm_cpumask(mm));
182 } 182 }
183 183
184 mm->context.id = asid; 184 return asid;
185} 185}
186 186
187void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) 187void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
188{ 188{
189 unsigned long flags; 189 unsigned long flags;
190 unsigned int cpu = smp_processor_id(); 190 unsigned int cpu = smp_processor_id();
191 u64 asid;
191 192
192 if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)) 193 if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq))
193 __check_vmalloc_seq(mm); 194 __check_vmalloc_seq(mm);
@@ -198,20 +199,26 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
198 */ 199 */
199 cpu_set_reserved_ttbr0(); 200 cpu_set_reserved_ttbr0();
200 201
201 if (!((mm->context.id ^ atomic64_read(&asid_generation)) >> ASID_BITS) 202 asid = atomic64_read(&mm->context.id);
202 && atomic64_xchg(&per_cpu(active_asids, cpu), mm->context.id)) 203 if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS)
204 && atomic64_xchg(&per_cpu(active_asids, cpu), asid))
203 goto switch_mm_fastpath; 205 goto switch_mm_fastpath;
204 206
205 raw_spin_lock_irqsave(&cpu_asid_lock, flags); 207 raw_spin_lock_irqsave(&cpu_asid_lock, flags);
206 /* Check that our ASID belongs to the current generation. */ 208 /* Check that our ASID belongs to the current generation. */
207 if ((mm->context.id ^ atomic64_read(&asid_generation)) >> ASID_BITS) 209 asid = atomic64_read(&mm->context.id);
208 new_context(mm, cpu); 210 if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
209 211 asid = new_context(mm, cpu);
210 atomic64_set(&per_cpu(active_asids, cpu), mm->context.id); 212 atomic64_set(&mm->context.id, asid);
211 cpumask_set_cpu(cpu, mm_cpumask(mm)); 213 }
212 214
213 if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) 215 if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) {
216 local_flush_bp_all();
214 local_flush_tlb_all(); 217 local_flush_tlb_all();
218 }
219
220 atomic64_set(&per_cpu(active_asids, cpu), asid);
221 cpumask_set_cpu(cpu, mm_cpumask(mm));
215 raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); 222 raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
216 223
217switch_mm_fastpath: 224switch_mm_fastpath:
diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c
index 2dffc010cc41..5ee505c937d1 100644
--- a/arch/arm/mm/idmap.c
+++ b/arch/arm/mm/idmap.c
@@ -141,6 +141,7 @@ void setup_mm_for_reboot(void)
141{ 141{
142 /* Switch to the identity mapping. */ 142 /* Switch to the identity mapping. */
143 cpu_switch_mm(idmap_pgd, &init_mm); 143 cpu_switch_mm(idmap_pgd, &init_mm);
144 local_flush_bp_all();
144 145
145#ifdef CONFIG_CPU_HAS_ASID 146#ifdef CONFIG_CPU_HAS_ASID
146 /* 147 /*
diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S
index 50bf1dafc9ea..6ffd78c0f9ab 100644
--- a/arch/arm/mm/proc-v7-3level.S
+++ b/arch/arm/mm/proc-v7-3level.S
@@ -48,7 +48,7 @@
48ENTRY(cpu_v7_switch_mm) 48ENTRY(cpu_v7_switch_mm)
49#ifdef CONFIG_MMU 49#ifdef CONFIG_MMU
50 mmid r1, r1 @ get mm->context.id 50 mmid r1, r1 @ get mm->context.id
51 and r3, r1, #0xff 51 asid r3, r1
52 mov r3, r3, lsl #(48 - 32) @ ASID 52 mov r3, r3, lsl #(48 - 32) @ ASID
53 mcrr p15, 0, r0, r3, c2 @ set TTB 0 53 mcrr p15, 0, r0, r3, c2 @ set TTB 0
54 isb 54 isb
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 433f5e8a2cd1..2eda28414abb 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -619,6 +619,7 @@ static struct file_system_type pfm_fs_type = {
619 .mount = pfmfs_mount, 619 .mount = pfmfs_mount,
620 .kill_sb = kill_anon_super, 620 .kill_sb = kill_anon_super,
621}; 621};
622MODULE_ALIAS_FS("pfmfs");
622 623
623DEFINE_PER_CPU(unsigned long, pfm_syst_info); 624DEFINE_PER_CPU(unsigned long, pfm_syst_info);
624DEFINE_PER_CPU(struct task_struct *, pmu_owner); 625DEFINE_PER_CPU(struct task_struct *, pmu_owner);
diff --git a/arch/metag/include/asm/elf.h b/arch/metag/include/asm/elf.h
index d63b9d0e57dd..d2baf6961794 100644
--- a/arch/metag/include/asm/elf.h
+++ b/arch/metag/include/asm/elf.h
@@ -100,9 +100,6 @@ typedef unsigned long elf_fpregset_t;
100 100
101#define ELF_PLATFORM (NULL) 101#define ELF_PLATFORM (NULL)
102 102
103#define SET_PERSONALITY(ex) \
104 set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
105
106#define STACK_RND_MASK (0) 103#define STACK_RND_MASK (0)
107 104
108#ifdef CONFIG_METAG_USER_TCM 105#ifdef CONFIG_METAG_USER_TCM
diff --git a/arch/metag/mm/Kconfig b/arch/metag/mm/Kconfig
index cd7f2f2ad416..975f2f4e3ecf 100644
--- a/arch/metag/mm/Kconfig
+++ b/arch/metag/mm/Kconfig
@@ -40,6 +40,7 @@ endchoice
40 40
41config NUMA 41config NUMA
42 bool "Non Uniform Memory Access (NUMA) Support" 42 bool "Non Uniform Memory Access (NUMA) Support"
43 select ARCH_WANT_NUMA_VARIABLE_LOCALITY
43 help 44 help
44 Some Meta systems have MMU-mappable on-chip memories with 45 Some Meta systems have MMU-mappable on-chip memories with
45 lower latencies than main memory. This enables support for 46 lower latencies than main memory. This enables support for
diff --git a/arch/powerpc/crypto/sha1-powerpc-asm.S b/arch/powerpc/crypto/sha1-powerpc-asm.S
index a5f8264d2d3c..125e16520061 100644
--- a/arch/powerpc/crypto/sha1-powerpc-asm.S
+++ b/arch/powerpc/crypto/sha1-powerpc-asm.S
@@ -113,7 +113,7 @@
113 STEPUP4((t)+16, fn) 113 STEPUP4((t)+16, fn)
114 114
115_GLOBAL(powerpc_sha_transform) 115_GLOBAL(powerpc_sha_transform)
116 PPC_STLU r1,-STACKFRAMESIZE(r1) 116 PPC_STLU r1,-INT_FRAME_SIZE(r1)
117 SAVE_8GPRS(14, r1) 117 SAVE_8GPRS(14, r1)
118 SAVE_10GPRS(22, r1) 118 SAVE_10GPRS(22, r1)
119 119
@@ -175,5 +175,5 @@ _GLOBAL(powerpc_sha_transform)
175 175
176 REST_8GPRS(14, r1) 176 REST_8GPRS(14, r1)
177 REST_10GPRS(22, r1) 177 REST_10GPRS(22, r1)
178 addi r1,r1,STACKFRAMESIZE 178 addi r1,r1,INT_FRAME_SIZE
179 blr 179 blr
diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h
index ef918a2328bb..08bd299c75b1 100644
--- a/arch/powerpc/include/asm/bitops.h
+++ b/arch/powerpc/include/asm/bitops.h
@@ -52,8 +52,6 @@
52#define smp_mb__before_clear_bit() smp_mb() 52#define smp_mb__before_clear_bit() smp_mb()
53#define smp_mb__after_clear_bit() smp_mb() 53#define smp_mb__after_clear_bit() smp_mb()
54 54
55#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
56
57/* Macro for generating the ***_bits() functions */ 55/* Macro for generating the ***_bits() functions */
58#define DEFINE_BITOP(fn, op, prefix, postfix) \ 56#define DEFINE_BITOP(fn, op, prefix, postfix) \
59static __inline__ void fn(unsigned long mask, \ 57static __inline__ void fn(unsigned long mask, \
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index e66586122030..c9c67fc888c9 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -266,7 +266,8 @@
266#define SPRN_HSRR0 0x13A /* Hypervisor Save/Restore 0 */ 266#define SPRN_HSRR0 0x13A /* Hypervisor Save/Restore 0 */
267#define SPRN_HSRR1 0x13B /* Hypervisor Save/Restore 1 */ 267#define SPRN_HSRR1 0x13B /* Hypervisor Save/Restore 1 */
268#define SPRN_FSCR 0x099 /* Facility Status & Control Register */ 268#define SPRN_FSCR 0x099 /* Facility Status & Control Register */
269#define FSCR_TAR (1<<8) /* Enable Target Adress Register */ 269#define FSCR_TAR (1 << (63-55)) /* Enable Target Address Register */
270#define FSCR_DSCR (1 << (63-61)) /* Enable Data Stream Control Register */
270#define SPRN_TAR 0x32f /* Target Address Register */ 271#define SPRN_TAR 0x32f /* Target Address Register */
271#define SPRN_LPCR 0x13E /* LPAR Control Register */ 272#define SPRN_LPCR 0x13E /* LPAR Control Register */
272#define LPCR_VPM0 (1ul << (63-0)) 273#define LPCR_VPM0 (1ul << (63-0))
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index 535b6d8a41cc..ebbec52d21bd 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -358,3 +358,4 @@ SYSCALL_SPU(setns)
358COMPAT_SYS(process_vm_readv) 358COMPAT_SYS(process_vm_readv)
359COMPAT_SYS(process_vm_writev) 359COMPAT_SYS(process_vm_writev)
360SYSCALL(finit_module) 360SYSCALL(finit_module)
361SYSCALL(ni_syscall) /* sys_kcmp */
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index f25b5c45c435..1487f0f12293 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -12,7 +12,7 @@
12#include <uapi/asm/unistd.h> 12#include <uapi/asm/unistd.h>
13 13
14 14
15#define __NR_syscalls 354 15#define __NR_syscalls 355
16 16
17#define __NR__exit __NR_exit 17#define __NR__exit __NR_exit
18#define NR_syscalls __NR_syscalls 18#define NR_syscalls __NR_syscalls
diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h
index 8c478c6c6b1e..74cb4d72d673 100644
--- a/arch/powerpc/include/uapi/asm/unistd.h
+++ b/arch/powerpc/include/uapi/asm/unistd.h
@@ -376,6 +376,7 @@
376#define __NR_process_vm_readv 351 376#define __NR_process_vm_readv 351
377#define __NR_process_vm_writev 352 377#define __NR_process_vm_writev 352
378#define __NR_finit_module 353 378#define __NR_finit_module 353
379#define __NR_kcmp 354
379 380
380 381
381#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */ 382#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S
index d29facbf9a28..ea847abb0d0a 100644
--- a/arch/powerpc/kernel/cpu_setup_power.S
+++ b/arch/powerpc/kernel/cpu_setup_power.S
@@ -48,6 +48,7 @@ _GLOBAL(__restore_cpu_power7)
48 48
49_GLOBAL(__setup_cpu_power8) 49_GLOBAL(__setup_cpu_power8)
50 mflr r11 50 mflr r11
51 bl __init_FSCR
51 bl __init_hvmode_206 52 bl __init_hvmode_206
52 mtlr r11 53 mtlr r11
53 beqlr 54 beqlr
@@ -56,13 +57,13 @@ _GLOBAL(__setup_cpu_power8)
56 mfspr r3,SPRN_LPCR 57 mfspr r3,SPRN_LPCR
57 oris r3, r3, LPCR_AIL_3@h 58 oris r3, r3, LPCR_AIL_3@h
58 bl __init_LPCR 59 bl __init_LPCR
59 bl __init_FSCR
60 bl __init_TLB 60 bl __init_TLB
61 mtlr r11 61 mtlr r11
62 blr 62 blr
63 63
64_GLOBAL(__restore_cpu_power8) 64_GLOBAL(__restore_cpu_power8)
65 mflr r11 65 mflr r11
66 bl __init_FSCR
66 mfmsr r3 67 mfmsr r3
67 rldicl. r0,r3,4,63 68 rldicl. r0,r3,4,63
68 beqlr 69 beqlr
@@ -115,7 +116,7 @@ __init_LPCR:
115 116
116__init_FSCR: 117__init_FSCR:
117 mfspr r3,SPRN_FSCR 118 mfspr r3,SPRN_FSCR
118 ori r3,r3,FSCR_TAR 119 ori r3,r3,FSCR_TAR|FSCR_DSCR
119 mtspr SPRN_FSCR,r3 120 mtspr SPRN_FSCR,r3
120 blr 121 blr
121 122
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index a8a5361fb70c..87ef8f5ee5bc 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -74,13 +74,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) \
74 mflr r10 ; \ 74 mflr r10 ; \
75 ld r12,PACAKBASE(r13) ; \ 75 ld r12,PACAKBASE(r13) ; \
76 LOAD_HANDLER(r12, system_call_entry_direct) ; \ 76 LOAD_HANDLER(r12, system_call_entry_direct) ; \
77 mtlr r12 ; \ 77 mtctr r12 ; \
78 mfspr r12,SPRN_SRR1 ; \ 78 mfspr r12,SPRN_SRR1 ; \
79 /* Re-use of r13... No spare regs to do this */ \ 79 /* Re-use of r13... No spare regs to do this */ \
80 li r13,MSR_RI ; \ 80 li r13,MSR_RI ; \
81 mtmsrd r13,1 ; \ 81 mtmsrd r13,1 ; \
82 GET_PACA(r13) ; /* get r13 back */ \ 82 GET_PACA(r13) ; /* get r13 back */ \
83 blr ; 83 bctr ;
84#else 84#else
85 /* We can branch directly */ 85 /* We can branch directly */
86#define SYSCALL_PSERIES_2_DIRECT \ 86#define SYSCALL_PSERIES_2_DIRECT \
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index 863184b182f4..3f3bb4cdbbec 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -749,6 +749,7 @@ static struct file_system_type spufs_type = {
749 .mount = spufs_mount, 749 .mount = spufs_mount,
750 .kill_sb = kill_litter_super, 750 .kill_sb = kill_litter_super,
751}; 751};
752MODULE_ALIAS_FS("spufs");
752 753
753static int __init spufs_init(void) 754static int __init spufs_init(void)
754{ 755{
diff --git a/arch/powerpc/platforms/pseries/hvcserver.c b/arch/powerpc/platforms/pseries/hvcserver.c
index fcf4b4cbeaf3..4557e91626c4 100644
--- a/arch/powerpc/platforms/pseries/hvcserver.c
+++ b/arch/powerpc/platforms/pseries/hvcserver.c
@@ -23,6 +23,7 @@
23#include <linux/list.h> 23#include <linux/list.h>
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/string.h>
26 27
27#include <asm/hvcall.h> 28#include <asm/hvcall.h>
28#include <asm/hvcserver.h> 29#include <asm/hvcserver.h>
@@ -188,9 +189,9 @@ int hvcs_get_partner_info(uint32_t unit_address, struct list_head *head,
188 = (unsigned int)last_p_partition_ID; 189 = (unsigned int)last_p_partition_ID;
189 190
190 /* copy the Null-term char too */ 191 /* copy the Null-term char too */
191 strncpy(&next_partner_info->location_code[0], 192 strlcpy(&next_partner_info->location_code[0],
192 (char *)&pi_buff[2], 193 (char *)&pi_buff[2],
193 strlen((char *)&pi_buff[2]) + 1); 194 sizeof(next_partner_info->location_code));
194 195
195 list_add_tail(&(next_partner_info->node), head); 196 list_add_tail(&(next_partner_info->node), head);
196 next_partner_info = NULL; 197 next_partner_info = NULL;
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index 8538015ed4a0..5f7d7ba2874c 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -456,6 +456,7 @@ static struct file_system_type hypfs_type = {
456 .mount = hypfs_mount, 456 .mount = hypfs_mount,
457 .kill_sb = hypfs_kill_super 457 .kill_sb = hypfs_kill_super
458}; 458};
459MODULE_ALIAS_FS("s390_hypfs");
459 460
460static const struct super_operations hypfs_s_ops = { 461static const struct super_operations hypfs_s_ops = {
461 .statfs = simple_statfs, 462 .statfs = simple_statfs,
diff --git a/arch/tile/include/asm/compat.h b/arch/tile/include/asm/compat.h
index 001d418a8957..78f1f2ded86c 100644
--- a/arch/tile/include/asm/compat.h
+++ b/arch/tile/include/asm/compat.h
@@ -288,6 +288,9 @@ long compat_sys_sync_file_range2(int fd, unsigned int flags,
288long compat_sys_fallocate(int fd, int mode, 288long compat_sys_fallocate(int fd, int mode,
289 u32 offset_lo, u32 offset_hi, 289 u32 offset_lo, u32 offset_hi,
290 u32 len_lo, u32 len_hi); 290 u32 len_lo, u32 len_hi);
291long compat_sys_llseek(unsigned int fd, unsigned int offset_high,
292 unsigned int offset_low, loff_t __user * result,
293 unsigned int origin);
291 294
292/* Assembly trampoline to avoid clobbering r0. */ 295/* Assembly trampoline to avoid clobbering r0. */
293long _compat_sys_rt_sigreturn(void); 296long _compat_sys_rt_sigreturn(void);
diff --git a/arch/tile/kernel/compat.c b/arch/tile/kernel/compat.c
index 7f72401b4f45..6ea4cdb3c6a0 100644
--- a/arch/tile/kernel/compat.c
+++ b/arch/tile/kernel/compat.c
@@ -32,50 +32,65 @@
32 * adapt the usual convention. 32 * adapt the usual convention.
33 */ 33 */
34 34
35long compat_sys_truncate64(char __user *filename, u32 dummy, u32 low, u32 high) 35COMPAT_SYSCALL_DEFINE4(truncate64, char __user *, filename, u32, dummy,
36 u32, low, u32, high)
36{ 37{
37 return sys_truncate(filename, ((loff_t)high << 32) | low); 38 return sys_truncate(filename, ((loff_t)high << 32) | low);
38} 39}
39 40
40long compat_sys_ftruncate64(unsigned int fd, u32 dummy, u32 low, u32 high) 41COMPAT_SYSCALL_DEFINE4(ftruncate64, unsigned int, fd, u32, dummy,
42 u32, low, u32, high)
41{ 43{
42 return sys_ftruncate(fd, ((loff_t)high << 32) | low); 44 return sys_ftruncate(fd, ((loff_t)high << 32) | low);
43} 45}
44 46
45long compat_sys_pread64(unsigned int fd, char __user *ubuf, size_t count, 47COMPAT_SYSCALL_DEFINE6(pread64, unsigned int, fd, char __user *, ubuf,
46 u32 dummy, u32 low, u32 high) 48 size_t, count, u32, dummy, u32, low, u32, high)
47{ 49{
48 return sys_pread64(fd, ubuf, count, ((loff_t)high << 32) | low); 50 return sys_pread64(fd, ubuf, count, ((loff_t)high << 32) | low);
49} 51}
50 52
51long compat_sys_pwrite64(unsigned int fd, char __user *ubuf, size_t count, 53COMPAT_SYSCALL_DEFINE6(pwrite64, unsigned int, fd, char __user *, ubuf,
52 u32 dummy, u32 low, u32 high) 54 size_t, count, u32, dummy, u32, low, u32, high)
53{ 55{
54 return sys_pwrite64(fd, ubuf, count, ((loff_t)high << 32) | low); 56 return sys_pwrite64(fd, ubuf, count, ((loff_t)high << 32) | low);
55} 57}
56 58
57long compat_sys_lookup_dcookie(u32 low, u32 high, char __user *buf, size_t len) 59COMPAT_SYSCALL_DEFINE4(lookup_dcookie, u32, low, u32, high,
60 char __user *, buf, size_t, len)
58{ 61{
59 return sys_lookup_dcookie(((loff_t)high << 32) | low, buf, len); 62 return sys_lookup_dcookie(((loff_t)high << 32) | low, buf, len);
60} 63}
61 64
62long compat_sys_sync_file_range2(int fd, unsigned int flags, 65COMPAT_SYSCALL_DEFINE6(sync_file_range2, int, fd, unsigned int, flags,
63 u32 offset_lo, u32 offset_hi, 66 u32, offset_lo, u32, offset_hi,
64 u32 nbytes_lo, u32 nbytes_hi) 67 u32, nbytes_lo, u32, nbytes_hi)
65{ 68{
66 return sys_sync_file_range(fd, ((loff_t)offset_hi << 32) | offset_lo, 69 return sys_sync_file_range(fd, ((loff_t)offset_hi << 32) | offset_lo,
67 ((loff_t)nbytes_hi << 32) | nbytes_lo, 70 ((loff_t)nbytes_hi << 32) | nbytes_lo,
68 flags); 71 flags);
69} 72}
70 73
71long compat_sys_fallocate(int fd, int mode, 74COMPAT_SYSCALL_DEFINE6(fallocate, int, fd, int, mode,
72 u32 offset_lo, u32 offset_hi, 75 u32, offset_lo, u32, offset_hi,
73 u32 len_lo, u32 len_hi) 76 u32, len_lo, u32, len_hi)
74{ 77{
75 return sys_fallocate(fd, mode, ((loff_t)offset_hi << 32) | offset_lo, 78 return sys_fallocate(fd, mode, ((loff_t)offset_hi << 32) | offset_lo,
76 ((loff_t)len_hi << 32) | len_lo); 79 ((loff_t)len_hi << 32) | len_lo);
77} 80}
78 81
82/*
83 * Avoid bug in generic sys_llseek() that specifies offset_high and
84 * offset_low as "unsigned long", thus making it possible to pass
85 * a sign-extended high 32 bits in offset_low.
86 */
87COMPAT_SYSCALL_DEFINE5(llseek, unsigned int, fd, unsigned int, offset_high,
88 unsigned int, offset_low, loff_t __user *, result,
89 unsigned int, origin)
90{
91 return sys_llseek(fd, offset_high, offset_low, result, origin);
92}
93
79/* Provide the compat syscall number to call mapping. */ 94/* Provide the compat syscall number to call mapping. */
80#undef __SYSCALL 95#undef __SYSCALL
81#define __SYSCALL(nr, call) [nr] = (call), 96#define __SYSCALL(nr, call) [nr] = (call),
@@ -83,6 +98,7 @@ long compat_sys_fallocate(int fd, int mode,
83/* See comments in sys.c */ 98/* See comments in sys.c */
84#define compat_sys_fadvise64_64 sys32_fadvise64_64 99#define compat_sys_fadvise64_64 sys32_fadvise64_64
85#define compat_sys_readahead sys32_readahead 100#define compat_sys_readahead sys32_readahead
101#define sys_llseek compat_sys_llseek
86 102
87/* Call the assembly trampolines where necessary. */ 103/* Call the assembly trampolines where necessary. */
88#define compat_sys_rt_sigreturn _compat_sys_rt_sigreturn 104#define compat_sys_rt_sigreturn _compat_sys_rt_sigreturn
diff --git a/arch/x86/include/asm/bootparam_utils.h b/arch/x86/include/asm/bootparam_utils.h
index 5b5e9cb774b5..653668d140f9 100644
--- a/arch/x86/include/asm/bootparam_utils.h
+++ b/arch/x86/include/asm/bootparam_utils.h
@@ -14,13 +14,29 @@
14 * analysis of kexec-tools; if other broken bootloaders initialize a 14 * analysis of kexec-tools; if other broken bootloaders initialize a
15 * different set of fields we will need to figure out how to disambiguate. 15 * different set of fields we will need to figure out how to disambiguate.
16 * 16 *
17 * Note: efi_info is commonly left uninitialized, but that field has a
18 * private magic, so it is better to leave it unchanged.
17 */ 19 */
18static void sanitize_boot_params(struct boot_params *boot_params) 20static void sanitize_boot_params(struct boot_params *boot_params)
19{ 21{
22 /*
23 * IMPORTANT NOTE TO BOOTLOADER AUTHORS: do not simply clear
24 * this field. The purpose of this field is to guarantee
25 * compliance with the x86 boot spec located in
26 * Documentation/x86/boot.txt . That spec says that the
27 * *whole* structure should be cleared, after which only the
28 * portion defined by struct setup_header (boot_params->hdr)
29 * should be copied in.
30 *
31 * If you're having an issue because the sentinel is set, you
32 * need to change the whole structure to be cleared, not this
33 * (or any other) individual field, or you will soon have
34 * problems again.
35 */
20 if (boot_params->sentinel) { 36 if (boot_params->sentinel) {
21 /*fields in boot_params are not valid, clear them */ 37 /* fields in boot_params are left uninitialized, clear them */
22 memset(&boot_params->olpc_ofw_header, 0, 38 memset(&boot_params->olpc_ofw_header, 0,
23 (char *)&boot_params->alt_mem_k - 39 (char *)&boot_params->efi_info -
24 (char *)&boot_params->olpc_ofw_header); 40 (char *)&boot_params->olpc_ofw_header);
25 memset(&boot_params->kbd_status, 0, 41 memset(&boot_params->kbd_status, 0,
26 (char *)&boot_params->hdr - 42 (char *)&boot_params->hdr -
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 84d32855f65c..90d8cc930f5e 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -171,9 +171,15 @@ static struct resource bss_resource = {
171 171
172#ifdef CONFIG_X86_32 172#ifdef CONFIG_X86_32
173/* cpu data as detected by the assembly code in head.S */ 173/* cpu data as detected by the assembly code in head.S */
174struct cpuinfo_x86 new_cpu_data __cpuinitdata = {0, 0, 0, 0, -1, 1, 0, 0, -1}; 174struct cpuinfo_x86 new_cpu_data __cpuinitdata = {
175 .wp_works_ok = -1,
176 .fdiv_bug = -1,
177};
175/* common cpu data for all cpus */ 178/* common cpu data for all cpus */
176struct cpuinfo_x86 boot_cpu_data __read_mostly = {0, 0, 0, 0, -1, 1, 0, 0, -1}; 179struct cpuinfo_x86 boot_cpu_data __read_mostly = {
180 .wp_works_ok = -1,
181 .fdiv_bug = -1,
182};
177EXPORT_SYMBOL(boot_cpu_data); 183EXPORT_SYMBOL(boot_cpu_data);
178 184
179unsigned int def_to_bigsmp; 185unsigned int def_to_bigsmp;
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index a6ceaedc396a..9f190a2a00e9 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1365,9 +1365,8 @@ static inline void mwait_play_dead(void)
1365 unsigned int eax, ebx, ecx, edx; 1365 unsigned int eax, ebx, ecx, edx;
1366 unsigned int highest_cstate = 0; 1366 unsigned int highest_cstate = 0;
1367 unsigned int highest_subcstate = 0; 1367 unsigned int highest_subcstate = 0;
1368 int i;
1369 void *mwait_ptr; 1368 void *mwait_ptr;
1370 struct cpuinfo_x86 *c = __this_cpu_ptr(&cpu_info); 1369 int i;
1371 1370
1372 if (!this_cpu_has(X86_FEATURE_MWAIT)) 1371 if (!this_cpu_has(X86_FEATURE_MWAIT))
1373 return; 1372 return;
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 4903a03ae876..59b7fc453277 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -410,9 +410,8 @@ void __init init_mem_mapping(void)
410 /* the ISA range is always mapped regardless of memory holes */ 410 /* the ISA range is always mapped regardless of memory holes */
411 init_memory_mapping(0, ISA_END_ADDRESS); 411 init_memory_mapping(0, ISA_END_ADDRESS);
412 412
413 /* xen has big range in reserved near end of ram, skip it at first */ 413 /* xen has big range in reserved near end of ram, skip it at first.*/
414 addr = memblock_find_in_range(ISA_END_ADDRESS, end, PMD_SIZE, 414 addr = memblock_find_in_range(ISA_END_ADDRESS, end, PMD_SIZE, PMD_SIZE);
415 PAGE_SIZE);
416 real_end = addr + PMD_SIZE; 415 real_end = addr + PMD_SIZE;
417 416
418 /* step_size need to be small so pgt_buf from BRK could cover it */ 417 /* step_size need to be small so pgt_buf from BRK could cover it */
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 2610bd93c896..657438858e83 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -563,6 +563,13 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
563 if (base > __pa(high_memory-1)) 563 if (base > __pa(high_memory-1))
564 return 0; 564 return 0;
565 565
566 /*
567 * some areas in the middle of the kernel identity range
568 * are not mapped, like the PCI space.
569 */
570 if (!page_is_ram(base >> PAGE_SHIFT))
571 return 0;
572
566 id_sz = (__pa(high_memory-1) <= base + size) ? 573 id_sz = (__pa(high_memory-1) <= base + size) ?
567 __pa(high_memory) - base : 574 __pa(high_memory) - base :
568 size; 575 size;