aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arc/include/asm/dma-mapping.h2
-rw-r--r--arch/arc/include/asm/elf.h3
-rw-r--r--arch/arc/include/asm/entry.h2
-rw-r--r--arch/arc/include/asm/kgdb.h6
-rw-r--r--arch/arc/include/asm/ptrace.h2
-rw-r--r--arch/arc/include/asm/syscalls.h2
-rw-r--r--arch/arc/include/uapi/asm/ptrace.h4
-rw-r--r--arch/arc/kernel/entry.S27
-rw-r--r--arch/arc/kernel/kgdb.c1
-rw-r--r--arch/arc/kernel/setup.c4
-rw-r--r--arch/arc/kernel/sys.c2
-rw-r--r--arch/arm/Kconfig11
-rw-r--r--arch/arm/Kconfig.debug1
-rw-r--r--arch/arm/boot/dts/at91sam9x5.dtsi28
-rw-r--r--arch/arm/boot/dts/exynos4.dtsi9
-rw-r--r--arch/arm/boot/dts/exynos5440.dtsi6
-rw-r--r--arch/arm/boot/dts/tegra20.dtsi2
-rw-r--r--arch/arm/boot/dts/tegra30.dtsi2
-rw-r--r--arch/arm/kernel/smp.c2
-rw-r--r--arch/arm/lib/memset.S33
-rw-r--r--arch/arm/mach-at91/include/mach/gpio.h8
-rw-r--r--arch/arm/mach-at91/irq.c20
-rw-r--r--arch/arm/mach-at91/pm.c10
-rw-r--r--arch/arm/mach-davinci/dma.c3
-rw-r--r--arch/arm/mach-footbridge/Kconfig1
-rw-r--r--arch/arm/mach-imx/clk-imx35.c1
-rw-r--r--arch/arm/mach-imx/imx25-dt.c5
-rw-r--r--arch/arm/mach-mmp/gplugd.c1
-rw-r--r--arch/arm/mach-mxs/mach-mxs.c24
-rw-r--r--arch/arm/mach-s5pv210/clock.c36
-rw-r--r--arch/arm/mach-s5pv210/mach-goni.c2
-rw-r--r--arch/arm/mach-shmobile/board-marzen.c1
-rw-r--r--arch/arm/net/bpf_jit_32.c2
-rw-r--r--arch/arm64/Kconfig1
-rw-r--r--arch/arm64/Kconfig.debug11
-rw-r--r--arch/arm64/configs/defconfig1
-rw-r--r--arch/arm64/include/asm/ucontext.h2
-rw-r--r--arch/arm64/kernel/arm64ksyms.c2
-rw-r--r--arch/arm64/kernel/signal32.c1
-rw-r--r--arch/arm64/mm/mmu.c2
-rw-r--r--arch/ia64/kernel/process.c5
-rw-r--r--arch/powerpc/Kconfig1
-rw-r--r--arch/powerpc/include/asm/mmu-hash64.h128
-rw-r--r--arch/powerpc/kernel/cputable.c2
-rw-r--r--arch/powerpc/kernel/epapr_paravirt.c6
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S178
-rw-r--r--arch/powerpc/kernel/prom_init.c14
-rw-r--r--arch/powerpc/kernel/ptrace.c1
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_host.c4
-rw-r--r--arch/powerpc/mm/hash_utils_64.c22
-rw-r--r--arch/powerpc/mm/mmu_context_hash64.c11
-rw-r--r--arch/powerpc/mm/pgtable_64.c2
-rw-r--r--arch/powerpc/mm/slb_low.S50
-rw-r--r--arch/powerpc/mm/tlb_hash64.c2
-rw-r--r--arch/powerpc/perf/power7-pmu.c13
-rw-r--r--arch/powerpc/platforms/85xx/sgy_cts1000.c6
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype6
-rw-r--r--arch/s390/include/asm/eadm.h6
-rw-r--r--arch/s390/include/asm/tlbflush.h2
-rw-r--r--arch/s390/kernel/entry.S3
-rw-r--r--arch/s390/kernel/entry64.S5
-rw-r--r--arch/s390/kernel/setup.c2
-rw-r--r--arch/sparc/Kconfig8
-rw-r--r--arch/sparc/include/asm/spitfire.h1
-rw-r--r--arch/sparc/kernel/cpu.c6
-rw-r--r--arch/sparc/kernel/head_64.S25
-rw-r--r--arch/sparc/kernel/leon_pci_grpci2.c41
-rw-r--r--arch/tile/configs/tilegx_defconfig1
-rw-r--r--arch/tile/configs/tilepro_defconfig1
-rw-r--r--arch/x86/include/asm/kprobes.h1
-rw-r--r--arch/x86/include/asm/kvm_host.h4
-rw-r--r--arch/x86/include/asm/xen/hypercall.h4
-rw-r--r--arch/x86/include/uapi/asm/msr-index.h1
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c4
-rw-r--r--arch/x86/kernel/kprobes/core.c5
-rw-r--r--arch/x86/kernel/microcode_intel_early.c30
-rw-r--r--arch/x86/kvm/x86.c64
-rw-r--r--arch/x86/lib/usercopy_64.c4
-rw-r--r--arch/x86/xen/mmu.c3
79 files changed, 521 insertions, 434 deletions
diff --git a/arch/arc/include/asm/dma-mapping.h b/arch/arc/include/asm/dma-mapping.h
index 31f77aec0823..45b8e0cea176 100644
--- a/arch/arc/include/asm/dma-mapping.h
+++ b/arch/arc/include/asm/dma-mapping.h
@@ -126,7 +126,7 @@ dma_map_sg(struct device *dev, struct scatterlist *sg,
126 int i; 126 int i;
127 127
128 for_each_sg(sg, s, nents, i) 128 for_each_sg(sg, s, nents, i)
129 sg->dma_address = dma_map_page(dev, sg_page(s), s->offset, 129 s->dma_address = dma_map_page(dev, sg_page(s), s->offset,
130 s->length, dir); 130 s->length, dir);
131 131
132 return nents; 132 return nents;
diff --git a/arch/arc/include/asm/elf.h b/arch/arc/include/asm/elf.h
index f4c8d36ebecb..a26282857683 100644
--- a/arch/arc/include/asm/elf.h
+++ b/arch/arc/include/asm/elf.h
@@ -72,7 +72,4 @@ extern int elf_check_arch(const struct elf32_hdr *);
72 */ 72 */
73#define ELF_PLATFORM (NULL) 73#define ELF_PLATFORM (NULL)
74 74
75#define SET_PERSONALITY(ex) \
76 set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
77
78#endif 75#endif
diff --git a/arch/arc/include/asm/entry.h b/arch/arc/include/asm/entry.h
index 23daa326fc9b..eb2ae53187d9 100644
--- a/arch/arc/include/asm/entry.h
+++ b/arch/arc/include/asm/entry.h
@@ -415,7 +415,7 @@
415 *-------------------------------------------------------------*/ 415 *-------------------------------------------------------------*/
416.macro SAVE_ALL_EXCEPTION marker 416.macro SAVE_ALL_EXCEPTION marker
417 417
418 st \marker, [sp, 8] 418 st \marker, [sp, 8] /* orig_r8 */
419 st r0, [sp, 4] /* orig_r0, needed only for sys calls */ 419 st r0, [sp, 4] /* orig_r0, needed only for sys calls */
420 420
421 /* Restore r9 used to code the early prologue */ 421 /* Restore r9 used to code the early prologue */
diff --git a/arch/arc/include/asm/kgdb.h b/arch/arc/include/asm/kgdb.h
index f3c4934f0ca9..4930957ca3d3 100644
--- a/arch/arc/include/asm/kgdb.h
+++ b/arch/arc/include/asm/kgdb.h
@@ -13,7 +13,7 @@
13 13
14#ifdef CONFIG_KGDB 14#ifdef CONFIG_KGDB
15 15
16#include <asm/user.h> 16#include <asm/ptrace.h>
17 17
18/* to ensure compatibility with Linux 2.6.35, we don't implement the get/set 18/* to ensure compatibility with Linux 2.6.35, we don't implement the get/set
19 * register API yet */ 19 * register API yet */
@@ -53,9 +53,7 @@ enum arc700_linux_regnums {
53}; 53};
54 54
55#else 55#else
56static inline void kgdb_trap(struct pt_regs *regs, int param) 56#define kgdb_trap(regs, param)
57{
58}
59#endif 57#endif
60 58
61#endif /* __ARC_KGDB_H__ */ 59#endif /* __ARC_KGDB_H__ */
diff --git a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h
index 8ae783d20a81..6179de7e07c2 100644
--- a/arch/arc/include/asm/ptrace.h
+++ b/arch/arc/include/asm/ptrace.h
@@ -123,7 +123,7 @@ static inline long regs_return_value(struct pt_regs *regs)
123#define orig_r8_IS_SCALL 0x0001 123#define orig_r8_IS_SCALL 0x0001
124#define orig_r8_IS_SCALL_RESTARTED 0x0002 124#define orig_r8_IS_SCALL_RESTARTED 0x0002
125#define orig_r8_IS_BRKPT 0x0004 125#define orig_r8_IS_BRKPT 0x0004
126#define orig_r8_IS_EXCPN 0x0004 126#define orig_r8_IS_EXCPN 0x0008
127#define orig_r8_IS_IRQ1 0x0010 127#define orig_r8_IS_IRQ1 0x0010
128#define orig_r8_IS_IRQ2 0x0020 128#define orig_r8_IS_IRQ2 0x0020
129 129
diff --git a/arch/arc/include/asm/syscalls.h b/arch/arc/include/asm/syscalls.h
index e53a5340ba4f..dd785befe7fd 100644
--- a/arch/arc/include/asm/syscalls.h
+++ b/arch/arc/include/asm/syscalls.h
@@ -16,8 +16,6 @@
16#include <linux/types.h> 16#include <linux/types.h>
17 17
18int sys_clone_wrapper(int, int, int, int, int); 18int sys_clone_wrapper(int, int, int, int, int);
19int sys_fork_wrapper(void);
20int sys_vfork_wrapper(void);
21int sys_cacheflush(uint32_t, uint32_t uint32_t); 19int sys_cacheflush(uint32_t, uint32_t uint32_t);
22int sys_arc_settls(void *); 20int sys_arc_settls(void *);
23int sys_arc_gettls(void); 21int sys_arc_gettls(void);
diff --git a/arch/arc/include/uapi/asm/ptrace.h b/arch/arc/include/uapi/asm/ptrace.h
index 6afa4f702075..30333cec0fef 100644
--- a/arch/arc/include/uapi/asm/ptrace.h
+++ b/arch/arc/include/uapi/asm/ptrace.h
@@ -28,14 +28,14 @@
28*/ 28*/
29struct user_regs_struct { 29struct user_regs_struct {
30 30
31 struct scratch { 31 struct {
32 long pad; 32 long pad;
33 long bta, lp_start, lp_end, lp_count; 33 long bta, lp_start, lp_end, lp_count;
34 long status32, ret, blink, fp, gp; 34 long status32, ret, blink, fp, gp;
35 long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0; 35 long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0;
36 long sp; 36 long sp;
37 } scratch; 37 } scratch;
38 struct callee { 38 struct {
39 long pad; 39 long pad;
40 long r25, r24, r23, r22, r21, r20; 40 long r25, r24, r23, r22, r21, r20;
41 long r19, r18, r17, r16, r15, r14, r13; 41 long r19, r18, r17, r16, r15, r14, r13;
diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S
index ef6800ba2f03..91eeab81f52d 100644
--- a/arch/arc/kernel/entry.S
+++ b/arch/arc/kernel/entry.S
@@ -452,7 +452,7 @@ tracesys:
452 ; using ERET won't work since next-PC has already committed 452 ; using ERET won't work since next-PC has already committed
453 lr r12, [efa] 453 lr r12, [efa]
454 GET_CURR_TASK_FIELD_PTR TASK_THREAD, r11 454 GET_CURR_TASK_FIELD_PTR TASK_THREAD, r11
455 st r12, [r11, THREAD_FAULT_ADDR] 455 st r12, [r11, THREAD_FAULT_ADDR] ; thread.fault_address
456 456
457 ; PRE Sys Call Ptrace hook 457 ; PRE Sys Call Ptrace hook
458 mov r0, sp ; pt_regs needed 458 mov r0, sp ; pt_regs needed
@@ -792,31 +792,6 @@ ARC_EXIT ret_from_fork
792 792
793;################### Special Sys Call Wrappers ########################## 793;################### Special Sys Call Wrappers ##########################
794 794
795; TBD: call do_fork directly from here
796ARC_ENTRY sys_fork_wrapper
797 SAVE_CALLEE_SAVED_USER
798 bl @sys_fork
799 DISCARD_CALLEE_SAVED_USER
800
801 GET_CURR_THR_INFO_FLAGS r10
802 btst r10, TIF_SYSCALL_TRACE
803 bnz tracesys_exit
804
805 b ret_from_system_call
806ARC_EXIT sys_fork_wrapper
807
808ARC_ENTRY sys_vfork_wrapper
809 SAVE_CALLEE_SAVED_USER
810 bl @sys_vfork
811 DISCARD_CALLEE_SAVED_USER
812
813 GET_CURR_THR_INFO_FLAGS r10
814 btst r10, TIF_SYSCALL_TRACE
815 bnz tracesys_exit
816
817 b ret_from_system_call
818ARC_EXIT sys_vfork_wrapper
819
820ARC_ENTRY sys_clone_wrapper 795ARC_ENTRY sys_clone_wrapper
821 SAVE_CALLEE_SAVED_USER 796 SAVE_CALLEE_SAVED_USER
822 bl @sys_clone 797 bl @sys_clone
diff --git a/arch/arc/kernel/kgdb.c b/arch/arc/kernel/kgdb.c
index 2888ba5be47e..52bdc83c1495 100644
--- a/arch/arc/kernel/kgdb.c
+++ b/arch/arc/kernel/kgdb.c
@@ -9,6 +9,7 @@
9 */ 9 */
10 10
11#include <linux/kgdb.h> 11#include <linux/kgdb.h>
12#include <linux/sched.h>
12#include <asm/disasm.h> 13#include <asm/disasm.h>
13#include <asm/cacheflush.h> 14#include <asm/cacheflush.h>
14 15
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index dc0f968dae0a..2d95ac07df7b 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -232,10 +232,8 @@ char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len)
232 232
233 n += scnprintf(buf + n, len - n, "\n"); 233 n += scnprintf(buf + n, len - n, "\n");
234 234
235#ifdef _ASM_GENERIC_UNISTD_H
236 n += scnprintf(buf + n, len - n, 235 n += scnprintf(buf + n, len - n,
237 "OS ABI [v2]\t: asm-generic/{unistd,stat,fcntl}\n"); 236 "OS ABI [v3]\t: no-legacy-syscalls\n");
238#endif
239 237
240 return buf; 238 return buf;
241} 239}
diff --git a/arch/arc/kernel/sys.c b/arch/arc/kernel/sys.c
index f6bdd07583f3..9d6c1ca26af6 100644
--- a/arch/arc/kernel/sys.c
+++ b/arch/arc/kernel/sys.c
@@ -6,8 +6,6 @@
6#include <asm/syscalls.h> 6#include <asm/syscalls.h>
7 7
8#define sys_clone sys_clone_wrapper 8#define sys_clone sys_clone_wrapper
9#define sys_fork sys_fork_wrapper
10#define sys_vfork sys_vfork_wrapper
11 9
12#undef __SYSCALL 10#undef __SYSCALL
13#define __SYSCALL(nr, call) [nr] = (call), 11#define __SYSCALL(nr, call) [nr] = (call),
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 2c3bdce15134..13b739469c51 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -49,7 +49,6 @@ config ARM
49 select HAVE_REGS_AND_STACK_ACCESS_API 49 select HAVE_REGS_AND_STACK_ACCESS_API
50 select HAVE_SYSCALL_TRACEPOINTS 50 select HAVE_SYSCALL_TRACEPOINTS
51 select HAVE_UID16 51 select HAVE_UID16
52 select VIRT_TO_BUS
53 select KTIME_SCALAR 52 select KTIME_SCALAR
54 select PERF_USE_VMALLOC 53 select PERF_USE_VMALLOC
55 select RTC_LIB 54 select RTC_LIB
@@ -743,6 +742,7 @@ config ARCH_RPC
743 select NEED_MACH_IO_H 742 select NEED_MACH_IO_H
744 select NEED_MACH_MEMORY_H 743 select NEED_MACH_MEMORY_H
745 select NO_IOPORT 744 select NO_IOPORT
745 select VIRT_TO_BUS
746 help 746 help
747 On the Acorn Risc-PC, Linux can support the internal IDE disk and 747 On the Acorn Risc-PC, Linux can support the internal IDE disk and
748 CD-ROM interface, serial and parallel port, and the floppy drive. 748 CD-ROM interface, serial and parallel port, and the floppy drive.
@@ -878,6 +878,7 @@ config ARCH_SHARK
878 select ISA_DMA 878 select ISA_DMA
879 select NEED_MACH_MEMORY_H 879 select NEED_MACH_MEMORY_H
880 select PCI 880 select PCI
881 select VIRT_TO_BUS
881 select ZONE_DMA 882 select ZONE_DMA
882 help 883 help
883 Support for the StrongARM based Digital DNARD machine, also known 884 Support for the StrongARM based Digital DNARD machine, also known
@@ -1005,12 +1006,12 @@ config ARCH_MULTI_V4_V5
1005 bool 1006 bool
1006 1007
1007config ARCH_MULTI_V6 1008config ARCH_MULTI_V6
1008 bool "ARMv6 based platforms (ARM11, Scorpion, ...)" 1009 bool "ARMv6 based platforms (ARM11)"
1009 select ARCH_MULTI_V6_V7 1010 select ARCH_MULTI_V6_V7
1010 select CPU_V6 1011 select CPU_V6
1011 1012
1012config ARCH_MULTI_V7 1013config ARCH_MULTI_V7
1013 bool "ARMv7 based platforms (Cortex-A, PJ4, Krait)" 1014 bool "ARMv7 based platforms (Cortex-A, PJ4, Scorpion, Krait)"
1014 default y 1015 default y
1015 select ARCH_MULTI_V6_V7 1016 select ARCH_MULTI_V6_V7
1016 select ARCH_VEXPRESS 1017 select ARCH_VEXPRESS
@@ -1461,10 +1462,6 @@ config ISA_DMA
1461 bool 1462 bool
1462 select ISA_DMA_API 1463 select ISA_DMA_API
1463 1464
1464config ARCH_NO_VIRT_TO_BUS
1465 def_bool y
1466 depends on !ARCH_RPC && !ARCH_NETWINDER && !ARCH_SHARK
1467
1468# Select ISA DMA interface 1465# Select ISA DMA interface
1469config ISA_DMA_API 1466config ISA_DMA_API
1470 bool 1467 bool
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index ecfcdba2d17c..9b31f4311ea2 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -495,6 +495,7 @@ config DEBUG_IMX_UART_PORT
495 DEBUG_IMX53_UART || \ 495 DEBUG_IMX53_UART || \
496 DEBUG_IMX6Q_UART 496 DEBUG_IMX6Q_UART
497 default 1 497 default 1
498 depends on ARCH_MXC
498 help 499 help
499 Choose UART port on which kernel low-level debug messages 500 Choose UART port on which kernel low-level debug messages
500 should be output. 501 should be output.
diff --git a/arch/arm/boot/dts/at91sam9x5.dtsi b/arch/arm/boot/dts/at91sam9x5.dtsi
index aa98e641931f..a98c0d50fbbe 100644
--- a/arch/arm/boot/dts/at91sam9x5.dtsi
+++ b/arch/arm/boot/dts/at91sam9x5.dtsi
@@ -238,8 +238,32 @@
238 nand { 238 nand {
239 pinctrl_nand: nand-0 { 239 pinctrl_nand: nand-0 {
240 atmel,pins = 240 atmel,pins =
241 <3 4 0x0 0x1 /* PD5 gpio RDY pin pull_up */ 241 <3 0 0x1 0x0 /* PD0 periph A Read Enable */
242 3 5 0x0 0x1>; /* PD4 gpio enable pin pull_up */ 242 3 1 0x1 0x0 /* PD1 periph A Write Enable */
243 3 2 0x1 0x0 /* PD2 periph A Address Latch Enable */
244 3 3 0x1 0x0 /* PD3 periph A Command Latch Enable */
245 3 4 0x0 0x1 /* PD4 gpio Chip Enable pin pull_up */
246 3 5 0x0 0x1 /* PD5 gpio RDY/BUSY pin pull_up */
247 3 6 0x1 0x0 /* PD6 periph A Data bit 0 */
248 3 7 0x1 0x0 /* PD7 periph A Data bit 1 */
249 3 8 0x1 0x0 /* PD8 periph A Data bit 2 */
250 3 9 0x1 0x0 /* PD9 periph A Data bit 3 */
251 3 10 0x1 0x0 /* PD10 periph A Data bit 4 */
252 3 11 0x1 0x0 /* PD11 periph A Data bit 5 */
253 3 12 0x1 0x0 /* PD12 periph A Data bit 6 */
254 3 13 0x1 0x0>; /* PD13 periph A Data bit 7 */
255 };
256
257 pinctrl_nand_16bits: nand_16bits-0 {
258 atmel,pins =
259 <3 14 0x1 0x0 /* PD14 periph A Data bit 8 */
260 3 15 0x1 0x0 /* PD15 periph A Data bit 9 */
261 3 16 0x1 0x0 /* PD16 periph A Data bit 10 */
262 3 17 0x1 0x0 /* PD17 periph A Data bit 11 */
263 3 18 0x1 0x0 /* PD18 periph A Data bit 12 */
264 3 19 0x1 0x0 /* PD19 periph A Data bit 13 */
265 3 20 0x1 0x0 /* PD20 periph A Data bit 14 */
266 3 21 0x1 0x0>; /* PD21 periph A Data bit 15 */
243 }; 267 };
244 }; 268 };
245 269
diff --git a/arch/arm/boot/dts/exynos4.dtsi b/arch/arm/boot/dts/exynos4.dtsi
index e1347fceb5bc..1a62bcf18aa3 100644
--- a/arch/arm/boot/dts/exynos4.dtsi
+++ b/arch/arm/boot/dts/exynos4.dtsi
@@ -275,18 +275,27 @@
275 compatible = "arm,pl330", "arm,primecell"; 275 compatible = "arm,pl330", "arm,primecell";
276 reg = <0x12680000 0x1000>; 276 reg = <0x12680000 0x1000>;
277 interrupts = <0 35 0>; 277 interrupts = <0 35 0>;
278 #dma-cells = <1>;
279 #dma-channels = <8>;
280 #dma-requests = <32>;
278 }; 281 };
279 282
280 pdma1: pdma@12690000 { 283 pdma1: pdma@12690000 {
281 compatible = "arm,pl330", "arm,primecell"; 284 compatible = "arm,pl330", "arm,primecell";
282 reg = <0x12690000 0x1000>; 285 reg = <0x12690000 0x1000>;
283 interrupts = <0 36 0>; 286 interrupts = <0 36 0>;
287 #dma-cells = <1>;
288 #dma-channels = <8>;
289 #dma-requests = <32>;
284 }; 290 };
285 291
286 mdma1: mdma@12850000 { 292 mdma1: mdma@12850000 {
287 compatible = "arm,pl330", "arm,primecell"; 293 compatible = "arm,pl330", "arm,primecell";
288 reg = <0x12850000 0x1000>; 294 reg = <0x12850000 0x1000>;
289 interrupts = <0 34 0>; 295 interrupts = <0 34 0>;
296 #dma-cells = <1>;
297 #dma-channels = <8>;
298 #dma-requests = <1>;
290 }; 299 };
291 }; 300 };
292}; 301};
diff --git a/arch/arm/boot/dts/exynos5440.dtsi b/arch/arm/boot/dts/exynos5440.dtsi
index 5f3562ad6746..9a99755920c0 100644
--- a/arch/arm/boot/dts/exynos5440.dtsi
+++ b/arch/arm/boot/dts/exynos5440.dtsi
@@ -142,12 +142,18 @@
142 compatible = "arm,pl330", "arm,primecell"; 142 compatible = "arm,pl330", "arm,primecell";
143 reg = <0x120000 0x1000>; 143 reg = <0x120000 0x1000>;
144 interrupts = <0 34 0>; 144 interrupts = <0 34 0>;
145 #dma-cells = <1>;
146 #dma-channels = <8>;
147 #dma-requests = <32>;
145 }; 148 };
146 149
147 pdma1: pdma@121B0000 { 150 pdma1: pdma@121B0000 {
148 compatible = "arm,pl330", "arm,primecell"; 151 compatible = "arm,pl330", "arm,primecell";
149 reg = <0x121000 0x1000>; 152 reg = <0x121000 0x1000>;
150 interrupts = <0 35 0>; 153 interrupts = <0 35 0>;
154 #dma-cells = <1>;
155 #dma-channels = <8>;
156 #dma-requests = <32>;
151 }; 157 };
152 }; 158 };
153 159
diff --git a/arch/arm/boot/dts/tegra20.dtsi b/arch/arm/boot/dts/tegra20.dtsi
index 48d00a099ce3..3d3f64d2111a 100644
--- a/arch/arm/boot/dts/tegra20.dtsi
+++ b/arch/arm/boot/dts/tegra20.dtsi
@@ -385,7 +385,7 @@
385 385
386 spi@7000d800 { 386 spi@7000d800 {
387 compatible = "nvidia,tegra20-slink"; 387 compatible = "nvidia,tegra20-slink";
388 reg = <0x7000d480 0x200>; 388 reg = <0x7000d800 0x200>;
389 interrupts = <0 83 0x04>; 389 interrupts = <0 83 0x04>;
390 nvidia,dma-request-selector = <&apbdma 17>; 390 nvidia,dma-request-selector = <&apbdma 17>;
391 #address-cells = <1>; 391 #address-cells = <1>;
diff --git a/arch/arm/boot/dts/tegra30.dtsi b/arch/arm/boot/dts/tegra30.dtsi
index 9d87a3ffe998..dbf46c272562 100644
--- a/arch/arm/boot/dts/tegra30.dtsi
+++ b/arch/arm/boot/dts/tegra30.dtsi
@@ -372,7 +372,7 @@
372 372
373 spi@7000d800 { 373 spi@7000d800 {
374 compatible = "nvidia,tegra30-slink", "nvidia,tegra20-slink"; 374 compatible = "nvidia,tegra30-slink", "nvidia,tegra20-slink";
375 reg = <0x7000d480 0x200>; 375 reg = <0x7000d800 0x200>;
376 interrupts = <0 83 0x04>; 376 interrupts = <0 83 0x04>;
377 nvidia,dma-request-selector = <&apbdma 17>; 377 nvidia,dma-request-selector = <&apbdma 17>;
378 #address-cells = <1>; 378 #address-cells = <1>;
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 31644f1978d5..79078edbb9bc 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -480,7 +480,7 @@ static void __cpuinit broadcast_timer_setup(struct clock_event_device *evt)
480 evt->features = CLOCK_EVT_FEAT_ONESHOT | 480 evt->features = CLOCK_EVT_FEAT_ONESHOT |
481 CLOCK_EVT_FEAT_PERIODIC | 481 CLOCK_EVT_FEAT_PERIODIC |
482 CLOCK_EVT_FEAT_DUMMY; 482 CLOCK_EVT_FEAT_DUMMY;
483 evt->rating = 400; 483 evt->rating = 100;
484 evt->mult = 1; 484 evt->mult = 1;
485 evt->set_mode = broadcast_timer_set_mode; 485 evt->set_mode = broadcast_timer_set_mode;
486 486
diff --git a/arch/arm/lib/memset.S b/arch/arm/lib/memset.S
index d912e7397ecc..94b0650ea98f 100644
--- a/arch/arm/lib/memset.S
+++ b/arch/arm/lib/memset.S
@@ -14,31 +14,15 @@
14 14
15 .text 15 .text
16 .align 5 16 .align 5
17 .word 0
18
191: subs r2, r2, #4 @ 1 do we have enough
20 blt 5f @ 1 bytes to align with?
21 cmp r3, #2 @ 1
22 strltb r1, [ip], #1 @ 1
23 strleb r1, [ip], #1 @ 1
24 strb r1, [ip], #1 @ 1
25 add r2, r2, r3 @ 1 (r2 = r2 - (4 - r3))
26/*
27 * The pointer is now aligned and the length is adjusted. Try doing the
28 * memset again.
29 */
30 17
31ENTRY(memset) 18ENTRY(memset)
32/* 19 ands r3, r0, #3 @ 1 unaligned?
33 * Preserve the contents of r0 for the return value. 20 mov ip, r0 @ preserve r0 as return value
34 */ 21 bne 6f @ 1
35 mov ip, r0
36 ands r3, ip, #3 @ 1 unaligned?
37 bne 1b @ 1
38/* 22/*
39 * we know that the pointer in ip is aligned to a word boundary. 23 * we know that the pointer in ip is aligned to a word boundary.
40 */ 24 */
41 orr r1, r1, r1, lsl #8 251: orr r1, r1, r1, lsl #8
42 orr r1, r1, r1, lsl #16 26 orr r1, r1, r1, lsl #16
43 mov r3, r1 27 mov r3, r1
44 cmp r2, #16 28 cmp r2, #16
@@ -127,4 +111,13 @@ ENTRY(memset)
127 tst r2, #1 111 tst r2, #1
128 strneb r1, [ip], #1 112 strneb r1, [ip], #1
129 mov pc, lr 113 mov pc, lr
114
1156: subs r2, r2, #4 @ 1 do we have enough
116 blt 5b @ 1 bytes to align with?
117 cmp r3, #2 @ 1
118 strltb r1, [ip], #1 @ 1
119 strleb r1, [ip], #1 @ 1
120 strb r1, [ip], #1 @ 1
121 add r2, r2, r3 @ 1 (r2 = r2 - (4 - r3))
122 b 1b
130ENDPROC(memset) 123ENDPROC(memset)
diff --git a/arch/arm/mach-at91/include/mach/gpio.h b/arch/arm/mach-at91/include/mach/gpio.h
index eed465ab0dd7..5fc23771c154 100644
--- a/arch/arm/mach-at91/include/mach/gpio.h
+++ b/arch/arm/mach-at91/include/mach/gpio.h
@@ -209,6 +209,14 @@ extern int at91_get_gpio_value(unsigned pin);
209extern void at91_gpio_suspend(void); 209extern void at91_gpio_suspend(void);
210extern void at91_gpio_resume(void); 210extern void at91_gpio_resume(void);
211 211
212#ifdef CONFIG_PINCTRL_AT91
213extern void at91_pinctrl_gpio_suspend(void);
214extern void at91_pinctrl_gpio_resume(void);
215#else
216static inline void at91_pinctrl_gpio_suspend(void) {}
217static inline void at91_pinctrl_gpio_resume(void) {}
218#endif
219
212#endif /* __ASSEMBLY__ */ 220#endif /* __ASSEMBLY__ */
213 221
214#endif 222#endif
diff --git a/arch/arm/mach-at91/irq.c b/arch/arm/mach-at91/irq.c
index 8e210262aeee..e0ca59171022 100644
--- a/arch/arm/mach-at91/irq.c
+++ b/arch/arm/mach-at91/irq.c
@@ -92,23 +92,21 @@ static int at91_aic_set_wake(struct irq_data *d, unsigned value)
92 92
93void at91_irq_suspend(void) 93void at91_irq_suspend(void)
94{ 94{
95 int i = 0, bit; 95 int bit = -1;
96 96
97 if (has_aic5()) { 97 if (has_aic5()) {
98 /* disable enabled irqs */ 98 /* disable enabled irqs */
99 while ((bit = find_next_bit(backups, n_irqs, i)) < n_irqs) { 99 while ((bit = find_next_bit(backups, n_irqs, bit + 1)) < n_irqs) {
100 at91_aic_write(AT91_AIC5_SSR, 100 at91_aic_write(AT91_AIC5_SSR,
101 bit & AT91_AIC5_INTSEL_MSK); 101 bit & AT91_AIC5_INTSEL_MSK);
102 at91_aic_write(AT91_AIC5_IDCR, 1); 102 at91_aic_write(AT91_AIC5_IDCR, 1);
103 i = bit;
104 } 103 }
105 /* enable wakeup irqs */ 104 /* enable wakeup irqs */
106 i = 0; 105 bit = -1;
107 while ((bit = find_next_bit(wakeups, n_irqs, i)) < n_irqs) { 106 while ((bit = find_next_bit(wakeups, n_irqs, bit + 1)) < n_irqs) {
108 at91_aic_write(AT91_AIC5_SSR, 107 at91_aic_write(AT91_AIC5_SSR,
109 bit & AT91_AIC5_INTSEL_MSK); 108 bit & AT91_AIC5_INTSEL_MSK);
110 at91_aic_write(AT91_AIC5_IECR, 1); 109 at91_aic_write(AT91_AIC5_IECR, 1);
111 i = bit;
112 } 110 }
113 } else { 111 } else {
114 at91_aic_write(AT91_AIC_IDCR, *backups); 112 at91_aic_write(AT91_AIC_IDCR, *backups);
@@ -118,23 +116,21 @@ void at91_irq_suspend(void)
118 116
119void at91_irq_resume(void) 117void at91_irq_resume(void)
120{ 118{
121 int i = 0, bit; 119 int bit = -1;
122 120
123 if (has_aic5()) { 121 if (has_aic5()) {
124 /* disable wakeup irqs */ 122 /* disable wakeup irqs */
125 while ((bit = find_next_bit(wakeups, n_irqs, i)) < n_irqs) { 123 while ((bit = find_next_bit(wakeups, n_irqs, bit + 1)) < n_irqs) {
126 at91_aic_write(AT91_AIC5_SSR, 124 at91_aic_write(AT91_AIC5_SSR,
127 bit & AT91_AIC5_INTSEL_MSK); 125 bit & AT91_AIC5_INTSEL_MSK);
128 at91_aic_write(AT91_AIC5_IDCR, 1); 126 at91_aic_write(AT91_AIC5_IDCR, 1);
129 i = bit;
130 } 127 }
131 /* enable irqs disabled for suspend */ 128 /* enable irqs disabled for suspend */
132 i = 0; 129 bit = -1;
133 while ((bit = find_next_bit(backups, n_irqs, i)) < n_irqs) { 130 while ((bit = find_next_bit(backups, n_irqs, bit + 1)) < n_irqs) {
134 at91_aic_write(AT91_AIC5_SSR, 131 at91_aic_write(AT91_AIC5_SSR,
135 bit & AT91_AIC5_INTSEL_MSK); 132 bit & AT91_AIC5_INTSEL_MSK);
136 at91_aic_write(AT91_AIC5_IECR, 1); 133 at91_aic_write(AT91_AIC5_IECR, 1);
137 i = bit;
138 } 134 }
139 } else { 135 } else {
140 at91_aic_write(AT91_AIC_IDCR, *wakeups); 136 at91_aic_write(AT91_AIC_IDCR, *wakeups);
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
index adb6db888a1f..73f1f250403a 100644
--- a/arch/arm/mach-at91/pm.c
+++ b/arch/arm/mach-at91/pm.c
@@ -201,7 +201,10 @@ extern u32 at91_slow_clock_sz;
201 201
202static int at91_pm_enter(suspend_state_t state) 202static int at91_pm_enter(suspend_state_t state)
203{ 203{
204 at91_gpio_suspend(); 204 if (of_have_populated_dt())
205 at91_pinctrl_gpio_suspend();
206 else
207 at91_gpio_suspend();
205 at91_irq_suspend(); 208 at91_irq_suspend();
206 209
207 pr_debug("AT91: PM - wake mask %08x, pm state %d\n", 210 pr_debug("AT91: PM - wake mask %08x, pm state %d\n",
@@ -286,7 +289,10 @@ static int at91_pm_enter(suspend_state_t state)
286error: 289error:
287 target_state = PM_SUSPEND_ON; 290 target_state = PM_SUSPEND_ON;
288 at91_irq_resume(); 291 at91_irq_resume();
289 at91_gpio_resume(); 292 if (of_have_populated_dt())
293 at91_pinctrl_gpio_resume();
294 else
295 at91_gpio_resume();
290 return 0; 296 return 0;
291} 297}
292 298
diff --git a/arch/arm/mach-davinci/dma.c b/arch/arm/mach-davinci/dma.c
index a685e9706b7b..45b7c71d9cc1 100644
--- a/arch/arm/mach-davinci/dma.c
+++ b/arch/arm/mach-davinci/dma.c
@@ -743,6 +743,9 @@ EXPORT_SYMBOL(edma_free_channel);
743 */ 743 */
744int edma_alloc_slot(unsigned ctlr, int slot) 744int edma_alloc_slot(unsigned ctlr, int slot)
745{ 745{
746 if (!edma_cc[ctlr])
747 return -EINVAL;
748
746 if (slot >= 0) 749 if (slot >= 0)
747 slot = EDMA_CHAN_SLOT(slot); 750 slot = EDMA_CHAN_SLOT(slot);
748 751
diff --git a/arch/arm/mach-footbridge/Kconfig b/arch/arm/mach-footbridge/Kconfig
index abda5a18a664..0f2111a11315 100644
--- a/arch/arm/mach-footbridge/Kconfig
+++ b/arch/arm/mach-footbridge/Kconfig
@@ -67,6 +67,7 @@ config ARCH_NETWINDER
67 select ISA 67 select ISA
68 select ISA_DMA 68 select ISA_DMA
69 select PCI 69 select PCI
70 select VIRT_TO_BUS
70 help 71 help
71 Say Y here if you intend to run this kernel on the Rebel.COM 72 Say Y here if you intend to run this kernel on the Rebel.COM
72 NetWinder. Information about this machine can be found at: 73 NetWinder. Information about this machine can be found at:
diff --git a/arch/arm/mach-imx/clk-imx35.c b/arch/arm/mach-imx/clk-imx35.c
index 74e3a34d78b8..e13a8fa5e62c 100644
--- a/arch/arm/mach-imx/clk-imx35.c
+++ b/arch/arm/mach-imx/clk-imx35.c
@@ -264,6 +264,7 @@ int __init mx35_clocks_init(void)
264 clk_prepare_enable(clk[gpio3_gate]); 264 clk_prepare_enable(clk[gpio3_gate]);
265 clk_prepare_enable(clk[iim_gate]); 265 clk_prepare_enable(clk[iim_gate]);
266 clk_prepare_enable(clk[emi_gate]); 266 clk_prepare_enable(clk[emi_gate]);
267 clk_prepare_enable(clk[max_gate]);
267 268
268 /* 269 /*
269 * SCC is needed to boot via mmc after a watchdog reset. The clock code 270 * SCC is needed to boot via mmc after a watchdog reset. The clock code
diff --git a/arch/arm/mach-imx/imx25-dt.c b/arch/arm/mach-imx/imx25-dt.c
index 03b65e5ea541..82348391582a 100644
--- a/arch/arm/mach-imx/imx25-dt.c
+++ b/arch/arm/mach-imx/imx25-dt.c
@@ -27,6 +27,11 @@ static const char * const imx25_dt_board_compat[] __initconst = {
27 NULL 27 NULL
28}; 28};
29 29
30static void __init imx25_timer_init(void)
31{
32 mx25_clocks_init_dt();
33}
34
30DT_MACHINE_START(IMX25_DT, "Freescale i.MX25 (Device Tree Support)") 35DT_MACHINE_START(IMX25_DT, "Freescale i.MX25 (Device Tree Support)")
31 .map_io = mx25_map_io, 36 .map_io = mx25_map_io,
32 .init_early = imx25_init_early, 37 .init_early = imx25_init_early,
diff --git a/arch/arm/mach-mmp/gplugd.c b/arch/arm/mach-mmp/gplugd.c
index d1e2d595e79c..f62b68d926f4 100644
--- a/arch/arm/mach-mmp/gplugd.c
+++ b/arch/arm/mach-mmp/gplugd.c
@@ -9,6 +9,7 @@
9 */ 9 */
10 10
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/platform_device.h>
12#include <linux/gpio.h> 13#include <linux/gpio.h>
13 14
14#include <asm/mach/arch.h> 15#include <asm/mach/arch.h>
diff --git a/arch/arm/mach-mxs/mach-mxs.c b/arch/arm/mach-mxs/mach-mxs.c
index 3218f1f2c0e0..e7b781d3788f 100644
--- a/arch/arm/mach-mxs/mach-mxs.c
+++ b/arch/arm/mach-mxs/mach-mxs.c
@@ -41,8 +41,6 @@ static struct fb_videomode mx23evk_video_modes[] = {
41 .lower_margin = 4, 41 .lower_margin = 4,
42 .hsync_len = 1, 42 .hsync_len = 1,
43 .vsync_len = 1, 43 .vsync_len = 1,
44 .sync = FB_SYNC_DATA_ENABLE_HIGH_ACT |
45 FB_SYNC_DOTCLK_FAILING_ACT,
46 }, 44 },
47}; 45};
48 46
@@ -59,8 +57,6 @@ static struct fb_videomode mx28evk_video_modes[] = {
59 .lower_margin = 10, 57 .lower_margin = 10,
60 .hsync_len = 10, 58 .hsync_len = 10,
61 .vsync_len = 10, 59 .vsync_len = 10,
62 .sync = FB_SYNC_DATA_ENABLE_HIGH_ACT |
63 FB_SYNC_DOTCLK_FAILING_ACT,
64 }, 60 },
65}; 61};
66 62
@@ -77,7 +73,6 @@ static struct fb_videomode m28evk_video_modes[] = {
77 .lower_margin = 45, 73 .lower_margin = 45,
78 .hsync_len = 1, 74 .hsync_len = 1,
79 .vsync_len = 1, 75 .vsync_len = 1,
80 .sync = FB_SYNC_DATA_ENABLE_HIGH_ACT,
81 }, 76 },
82}; 77};
83 78
@@ -94,9 +89,7 @@ static struct fb_videomode apx4devkit_video_modes[] = {
94 .lower_margin = 13, 89 .lower_margin = 13,
95 .hsync_len = 48, 90 .hsync_len = 48,
96 .vsync_len = 3, 91 .vsync_len = 3,
97 .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT | 92 .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
98 FB_SYNC_DATA_ENABLE_HIGH_ACT |
99 FB_SYNC_DOTCLK_FAILING_ACT,
100 }, 93 },
101}; 94};
102 95
@@ -113,9 +106,7 @@ static struct fb_videomode apf28dev_video_modes[] = {
113 .lower_margin = 0x15, 106 .lower_margin = 0x15,
114 .hsync_len = 64, 107 .hsync_len = 64,
115 .vsync_len = 4, 108 .vsync_len = 4,
116 .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT | 109 .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
117 FB_SYNC_DATA_ENABLE_HIGH_ACT |
118 FB_SYNC_DOTCLK_FAILING_ACT,
119 }, 110 },
120}; 111};
121 112
@@ -132,7 +123,6 @@ static struct fb_videomode cfa10049_video_modes[] = {
132 .lower_margin = 2, 123 .lower_margin = 2,
133 .hsync_len = 15, 124 .hsync_len = 15,
134 .vsync_len = 15, 125 .vsync_len = 15,
135 .sync = FB_SYNC_DATA_ENABLE_HIGH_ACT
136 }, 126 },
137}; 127};
138 128
@@ -259,6 +249,8 @@ static void __init imx23_evk_init(void)
259 mxsfb_pdata.mode_count = ARRAY_SIZE(mx23evk_video_modes); 249 mxsfb_pdata.mode_count = ARRAY_SIZE(mx23evk_video_modes);
260 mxsfb_pdata.default_bpp = 32; 250 mxsfb_pdata.default_bpp = 32;
261 mxsfb_pdata.ld_intf_width = STMLCDIF_24BIT; 251 mxsfb_pdata.ld_intf_width = STMLCDIF_24BIT;
252 mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT |
253 MXSFB_SYNC_DOTCLK_FAILING_ACT;
262} 254}
263 255
264static inline void enable_clk_enet_out(void) 256static inline void enable_clk_enet_out(void)
@@ -278,6 +270,8 @@ static void __init imx28_evk_init(void)
278 mxsfb_pdata.mode_count = ARRAY_SIZE(mx28evk_video_modes); 270 mxsfb_pdata.mode_count = ARRAY_SIZE(mx28evk_video_modes);
279 mxsfb_pdata.default_bpp = 32; 271 mxsfb_pdata.default_bpp = 32;
280 mxsfb_pdata.ld_intf_width = STMLCDIF_24BIT; 272 mxsfb_pdata.ld_intf_width = STMLCDIF_24BIT;
273 mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT |
274 MXSFB_SYNC_DOTCLK_FAILING_ACT;
281 275
282 mxs_saif_clkmux_select(MXS_DIGCTL_SAIF_CLKMUX_EXTMSTR0); 276 mxs_saif_clkmux_select(MXS_DIGCTL_SAIF_CLKMUX_EXTMSTR0);
283} 277}
@@ -297,6 +291,7 @@ static void __init m28evk_init(void)
297 mxsfb_pdata.mode_count = ARRAY_SIZE(m28evk_video_modes); 291 mxsfb_pdata.mode_count = ARRAY_SIZE(m28evk_video_modes);
298 mxsfb_pdata.default_bpp = 16; 292 mxsfb_pdata.default_bpp = 16;
299 mxsfb_pdata.ld_intf_width = STMLCDIF_18BIT; 293 mxsfb_pdata.ld_intf_width = STMLCDIF_18BIT;
294 mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT;
300} 295}
301 296
302static void __init sc_sps1_init(void) 297static void __init sc_sps1_init(void)
@@ -322,6 +317,8 @@ static void __init apx4devkit_init(void)
322 mxsfb_pdata.mode_count = ARRAY_SIZE(apx4devkit_video_modes); 317 mxsfb_pdata.mode_count = ARRAY_SIZE(apx4devkit_video_modes);
323 mxsfb_pdata.default_bpp = 32; 318 mxsfb_pdata.default_bpp = 32;
324 mxsfb_pdata.ld_intf_width = STMLCDIF_24BIT; 319 mxsfb_pdata.ld_intf_width = STMLCDIF_24BIT;
320 mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT |
321 MXSFB_SYNC_DOTCLK_FAILING_ACT;
325} 322}
326 323
327#define ENET0_MDC__GPIO_4_0 MXS_GPIO_NR(4, 0) 324#define ENET0_MDC__GPIO_4_0 MXS_GPIO_NR(4, 0)
@@ -407,6 +404,7 @@ static void __init cfa10049_init(void)
407 mxsfb_pdata.mode_count = ARRAY_SIZE(cfa10049_video_modes); 404 mxsfb_pdata.mode_count = ARRAY_SIZE(cfa10049_video_modes);
408 mxsfb_pdata.default_bpp = 32; 405 mxsfb_pdata.default_bpp = 32;
409 mxsfb_pdata.ld_intf_width = STMLCDIF_18BIT; 406 mxsfb_pdata.ld_intf_width = STMLCDIF_18BIT;
407 mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT;
410} 408}
411 409
412static void __init cfa10037_init(void) 410static void __init cfa10037_init(void)
@@ -423,6 +421,8 @@ static void __init apf28_init(void)
423 mxsfb_pdata.mode_count = ARRAY_SIZE(apf28dev_video_modes); 421 mxsfb_pdata.mode_count = ARRAY_SIZE(apf28dev_video_modes);
424 mxsfb_pdata.default_bpp = 16; 422 mxsfb_pdata.default_bpp = 16;
425 mxsfb_pdata.ld_intf_width = STMLCDIF_16BIT; 423 mxsfb_pdata.ld_intf_width = STMLCDIF_16BIT;
424 mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT |
425 MXSFB_SYNC_DOTCLK_FAILING_ACT;
426} 426}
427 427
428static void __init mxs_machine_init(void) 428static void __init mxs_machine_init(void)
diff --git a/arch/arm/mach-s5pv210/clock.c b/arch/arm/mach-s5pv210/clock.c
index fcdf52dbcc49..f051f53e35b7 100644
--- a/arch/arm/mach-s5pv210/clock.c
+++ b/arch/arm/mach-s5pv210/clock.c
@@ -214,11 +214,6 @@ static struct clk clk_pcmcdclk2 = {
214 .name = "pcmcdclk", 214 .name = "pcmcdclk",
215}; 215};
216 216
217static struct clk dummy_apb_pclk = {
218 .name = "apb_pclk",
219 .id = -1,
220};
221
222static struct clk *clkset_vpllsrc_list[] = { 217static struct clk *clkset_vpllsrc_list[] = {
223 [0] = &clk_fin_vpll, 218 [0] = &clk_fin_vpll,
224 [1] = &clk_sclk_hdmi27m, 219 [1] = &clk_sclk_hdmi27m,
@@ -305,18 +300,6 @@ static struct clk_ops clk_fout_apll_ops = {
305 300
306static struct clk init_clocks_off[] = { 301static struct clk init_clocks_off[] = {
307 { 302 {
308 .name = "dma",
309 .devname = "dma-pl330.0",
310 .parent = &clk_hclk_psys.clk,
311 .enable = s5pv210_clk_ip0_ctrl,
312 .ctrlbit = (1 << 3),
313 }, {
314 .name = "dma",
315 .devname = "dma-pl330.1",
316 .parent = &clk_hclk_psys.clk,
317 .enable = s5pv210_clk_ip0_ctrl,
318 .ctrlbit = (1 << 4),
319 }, {
320 .name = "rot", 303 .name = "rot",
321 .parent = &clk_hclk_dsys.clk, 304 .parent = &clk_hclk_dsys.clk,
322 .enable = s5pv210_clk_ip0_ctrl, 305 .enable = s5pv210_clk_ip0_ctrl,
@@ -573,6 +556,20 @@ static struct clk clk_hsmmc3 = {
573 .ctrlbit = (1<<19), 556 .ctrlbit = (1<<19),
574}; 557};
575 558
559static struct clk clk_pdma0 = {
560 .name = "pdma0",
561 .parent = &clk_hclk_psys.clk,
562 .enable = s5pv210_clk_ip0_ctrl,
563 .ctrlbit = (1 << 3),
564};
565
566static struct clk clk_pdma1 = {
567 .name = "pdma1",
568 .parent = &clk_hclk_psys.clk,
569 .enable = s5pv210_clk_ip0_ctrl,
570 .ctrlbit = (1 << 4),
571};
572
576static struct clk *clkset_uart_list[] = { 573static struct clk *clkset_uart_list[] = {
577 [6] = &clk_mout_mpll.clk, 574 [6] = &clk_mout_mpll.clk,
578 [7] = &clk_mout_epll.clk, 575 [7] = &clk_mout_epll.clk,
@@ -1075,6 +1072,8 @@ static struct clk *clk_cdev[] = {
1075 &clk_hsmmc1, 1072 &clk_hsmmc1,
1076 &clk_hsmmc2, 1073 &clk_hsmmc2,
1077 &clk_hsmmc3, 1074 &clk_hsmmc3,
1075 &clk_pdma0,
1076 &clk_pdma1,
1078}; 1077};
1079 1078
1080/* Clock initialisation code */ 1079/* Clock initialisation code */
@@ -1333,6 +1332,8 @@ static struct clk_lookup s5pv210_clk_lookup[] = {
1333 CLKDEV_INIT(NULL, "spi_busclk0", &clk_p), 1332 CLKDEV_INIT(NULL, "spi_busclk0", &clk_p),
1334 CLKDEV_INIT("s5pv210-spi.0", "spi_busclk1", &clk_sclk_spi0.clk), 1333 CLKDEV_INIT("s5pv210-spi.0", "spi_busclk1", &clk_sclk_spi0.clk),
1335 CLKDEV_INIT("s5pv210-spi.1", "spi_busclk1", &clk_sclk_spi1.clk), 1334 CLKDEV_INIT("s5pv210-spi.1", "spi_busclk1", &clk_sclk_spi1.clk),
1335 CLKDEV_INIT("dma-pl330.0", "apb_pclk", &clk_pdma0),
1336 CLKDEV_INIT("dma-pl330.1", "apb_pclk", &clk_pdma1),
1336}; 1337};
1337 1338
1338void __init s5pv210_register_clocks(void) 1339void __init s5pv210_register_clocks(void)
@@ -1361,6 +1362,5 @@ void __init s5pv210_register_clocks(void)
1361 for (ptr = 0; ptr < ARRAY_SIZE(clk_cdev); ptr++) 1362 for (ptr = 0; ptr < ARRAY_SIZE(clk_cdev); ptr++)
1362 s3c_disable_clocks(clk_cdev[ptr], 1); 1363 s3c_disable_clocks(clk_cdev[ptr], 1);
1363 1364
1364 s3c24xx_register_clock(&dummy_apb_pclk);
1365 s3c_pwmclk_init(); 1365 s3c_pwmclk_init();
1366} 1366}
diff --git a/arch/arm/mach-s5pv210/mach-goni.c b/arch/arm/mach-s5pv210/mach-goni.c
index 3a38f7b34b94..e373de44a8b6 100644
--- a/arch/arm/mach-s5pv210/mach-goni.c
+++ b/arch/arm/mach-s5pv210/mach-goni.c
@@ -845,7 +845,7 @@ static struct fimc_source_info goni_camera_sensors[] = {
845 .mux_id = 0, 845 .mux_id = 0,
846 .flags = V4L2_MBUS_PCLK_SAMPLE_FALLING | 846 .flags = V4L2_MBUS_PCLK_SAMPLE_FALLING |
847 V4L2_MBUS_VSYNC_ACTIVE_LOW, 847 V4L2_MBUS_VSYNC_ACTIVE_LOW,
848 .bus_type = FIMC_BUS_TYPE_ITU_601, 848 .fimc_bus_type = FIMC_BUS_TYPE_ITU_601,
849 .board_info = &noon010pc30_board_info, 849 .board_info = &noon010pc30_board_info,
850 .i2c_bus_num = 0, 850 .i2c_bus_num = 0,
851 .clk_frequency = 16000000UL, 851 .clk_frequency = 16000000UL,
diff --git a/arch/arm/mach-shmobile/board-marzen.c b/arch/arm/mach-shmobile/board-marzen.c
index cdcb799e802f..fec49ebc359a 100644
--- a/arch/arm/mach-shmobile/board-marzen.c
+++ b/arch/arm/mach-shmobile/board-marzen.c
@@ -32,6 +32,7 @@
32#include <linux/smsc911x.h> 32#include <linux/smsc911x.h>
33#include <linux/spi/spi.h> 33#include <linux/spi/spi.h>
34#include <linux/spi/sh_hspi.h> 34#include <linux/spi/sh_hspi.h>
35#include <linux/mmc/host.h>
35#include <linux/mmc/sh_mobile_sdhi.h> 36#include <linux/mmc/sh_mobile_sdhi.h>
36#include <linux/mfd/tmio.h> 37#include <linux/mfd/tmio.h>
37#include <linux/usb/otg.h> 38#include <linux/usb/otg.h>
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index 6828ef6ce80e..a0bd8a755bdf 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -576,7 +576,7 @@ load_ind:
576 /* x = ((*(frame + k)) & 0xf) << 2; */ 576 /* x = ((*(frame + k)) & 0xf) << 2; */
577 ctx->seen |= SEEN_X | SEEN_DATA | SEEN_CALL; 577 ctx->seen |= SEEN_X | SEEN_DATA | SEEN_CALL;
578 /* the interpreter should deal with the negative K */ 578 /* the interpreter should deal with the negative K */
579 if (k < 0) 579 if ((int)k < 0)
580 return -1; 580 return -1;
581 /* offset in r1: we might have to take the slow path */ 581 /* offset in r1: we might have to take the slow path */
582 emit_mov_i(r_off, k, ctx); 582 emit_mov_i(r_off, k, ctx);
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index fd70a68387eb..9b6d19f74078 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -9,7 +9,6 @@ config ARM64
9 select CLONE_BACKWARDS 9 select CLONE_BACKWARDS
10 select COMMON_CLK 10 select COMMON_CLK
11 select GENERIC_CLOCKEVENTS 11 select GENERIC_CLOCKEVENTS
12 select GENERIC_HARDIRQS_NO_DEPRECATED
13 select GENERIC_IOMAP 12 select GENERIC_IOMAP
14 select GENERIC_IRQ_PROBE 13 select GENERIC_IRQ_PROBE
15 select GENERIC_IRQ_SHOW 14 select GENERIC_IRQ_SHOW
diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug
index 51493430f142..1a6bfe954d49 100644
--- a/arch/arm64/Kconfig.debug
+++ b/arch/arm64/Kconfig.debug
@@ -6,17 +6,6 @@ config FRAME_POINTER
6 bool 6 bool
7 default y 7 default y
8 8
9config DEBUG_ERRORS
10 bool "Verbose kernel error messages"
11 depends on DEBUG_KERNEL
12 help
13 This option controls verbose debugging information which can be
14 printed when the kernel detects an internal error. This debugging
15 information is useful to kernel hackers when tracking down problems,
16 but mostly meaningless to other people. It's safe to say Y unless
17 you are concerned with the code size or don't want to see these
18 messages.
19
20config DEBUG_STACK_USAGE 9config DEBUG_STACK_USAGE
21 bool "Enable stack utilization instrumentation" 10 bool "Enable stack utilization instrumentation"
22 depends on DEBUG_KERNEL 11 depends on DEBUG_KERNEL
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 9212c7880da7..09bef29f3a09 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -82,4 +82,3 @@ CONFIG_DEBUG_KERNEL=y
82CONFIG_DEBUG_INFO=y 82CONFIG_DEBUG_INFO=y
83# CONFIG_FTRACE is not set 83# CONFIG_FTRACE is not set
84CONFIG_ATOMIC64_SELFTEST=y 84CONFIG_ATOMIC64_SELFTEST=y
85CONFIG_DEBUG_ERRORS=y
diff --git a/arch/arm64/include/asm/ucontext.h b/arch/arm64/include/asm/ucontext.h
index bde960720892..42e04c877428 100644
--- a/arch/arm64/include/asm/ucontext.h
+++ b/arch/arm64/include/asm/ucontext.h
@@ -22,7 +22,7 @@ struct ucontext {
22 stack_t uc_stack; 22 stack_t uc_stack;
23 sigset_t uc_sigmask; 23 sigset_t uc_sigmask;
24 /* glibc uses a 1024-bit sigset_t */ 24 /* glibc uses a 1024-bit sigset_t */
25 __u8 __unused[(1024 - sizeof(sigset_t)) / 8]; 25 __u8 __unused[1024 / 8 - sizeof(sigset_t)];
26 /* last for future expansion */ 26 /* last for future expansion */
27 struct sigcontext uc_mcontext; 27 struct sigcontext uc_mcontext;
28}; 28};
diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c
index cef3925eaf60..aa3e948f7885 100644
--- a/arch/arm64/kernel/arm64ksyms.c
+++ b/arch/arm64/kernel/arm64ksyms.c
@@ -40,7 +40,9 @@ EXPORT_SYMBOL(__copy_to_user);
40EXPORT_SYMBOL(__clear_user); 40EXPORT_SYMBOL(__clear_user);
41 41
42 /* bitops */ 42 /* bitops */
43#ifdef CONFIG_SMP
43EXPORT_SYMBOL(__atomic_hash); 44EXPORT_SYMBOL(__atomic_hash);
45#endif
44 46
45 /* physical memory */ 47 /* physical memory */
46EXPORT_SYMBOL(memstart_addr); 48EXPORT_SYMBOL(memstart_addr);
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
index 7f4f3673f2bc..e393174fe859 100644
--- a/arch/arm64/kernel/signal32.c
+++ b/arch/arm64/kernel/signal32.c
@@ -549,7 +549,6 @@ int compat_setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info,
549 sigset_t *set, struct pt_regs *regs) 549 sigset_t *set, struct pt_regs *regs)
550{ 550{
551 struct compat_rt_sigframe __user *frame; 551 struct compat_rt_sigframe __user *frame;
552 compat_stack_t stack;
553 int err = 0; 552 int err = 0;
554 553
555 frame = compat_get_sigframe(ka, regs, sizeof(*frame)); 554 frame = compat_get_sigframe(ka, regs, sizeof(*frame));
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 224b44ab534e..70b8cd4021c4 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -261,7 +261,7 @@ static void __init create_mapping(phys_addr_t phys, unsigned long virt,
261void __iomem * __init early_io_map(phys_addr_t phys, unsigned long virt) 261void __iomem * __init early_io_map(phys_addr_t phys, unsigned long virt)
262{ 262{
263 unsigned long size, mask; 263 unsigned long size, mask;
264 bool page64k = IS_ENABLED(ARM64_64K_PAGES); 264 bool page64k = IS_ENABLED(CONFIG_ARM64_64K_PAGES);
265 pgd_t *pgd; 265 pgd_t *pgd;
266 pud_t *pud; 266 pud_t *pud;
267 pmd_t *pmd; 267 pmd_t *pmd;
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index e34f565f595a..6f7dc8b7b35c 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -291,7 +291,6 @@ cpu_idle (void)
291 } 291 }
292 292
293 if (!need_resched()) { 293 if (!need_resched()) {
294 void (*idle)(void);
295#ifdef CONFIG_SMP 294#ifdef CONFIG_SMP
296 min_xtp(); 295 min_xtp();
297#endif 296#endif
@@ -299,9 +298,7 @@ cpu_idle (void)
299 if (mark_idle) 298 if (mark_idle)
300 (*mark_idle)(1); 299 (*mark_idle)(1);
301 300
302 if (!idle) 301 default_idle();
303 idle = default_idle;
304 (*idle)();
305 if (mark_idle) 302 if (mark_idle)
306 (*mark_idle)(0); 303 (*mark_idle)(0);
307#ifdef CONFIG_SMP 304#ifdef CONFIG_SMP
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 80821512e9cc..ea5bb045983a 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -90,6 +90,7 @@ config GENERIC_GPIO
90config PPC 90config PPC
91 bool 91 bool
92 default y 92 default y
93 select BINFMT_ELF
93 select OF 94 select OF
94 select OF_EARLY_FLATTREE 95 select OF_EARLY_FLATTREE
95 select HAVE_FTRACE_MCOUNT_RECORD 96 select HAVE_FTRACE_MCOUNT_RECORD
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h
index 2fdb47a19efd..b59e06f507ea 100644
--- a/arch/powerpc/include/asm/mmu-hash64.h
+++ b/arch/powerpc/include/asm/mmu-hash64.h
@@ -343,17 +343,16 @@ extern void slb_set_size(u16 size);
343/* 343/*
344 * VSID allocation (256MB segment) 344 * VSID allocation (256MB segment)
345 * 345 *
346 * We first generate a 38-bit "proto-VSID". For kernel addresses this 346 * We first generate a 37-bit "proto-VSID". Proto-VSIDs are generated
347 * is equal to the ESID | 1 << 37, for user addresses it is: 347 * from mmu context id and effective segment id of the address.
348 * (context << USER_ESID_BITS) | (esid & ((1U << USER_ESID_BITS) - 1)
349 * 348 *
350 * This splits the proto-VSID into the below range 349 * For user processes max context id is limited to ((1ul << 19) - 5)
351 * 0 - (2^(CONTEXT_BITS + USER_ESID_BITS) - 1) : User proto-VSID range 350 * for kernel space, we use the top 4 context ids to map address as below
352 * 2^(CONTEXT_BITS + USER_ESID_BITS) - 2^(VSID_BITS) : Kernel proto-VSID range 351 * NOTE: each context only support 64TB now.
353 * 352 * 0x7fffc - [ 0xc000000000000000 - 0xc0003fffffffffff ]
354 * We also have CONTEXT_BITS + USER_ESID_BITS = VSID_BITS - 1 353 * 0x7fffd - [ 0xd000000000000000 - 0xd0003fffffffffff ]
355 * That is, we assign half of the space to user processes and half 354 * 0x7fffe - [ 0xe000000000000000 - 0xe0003fffffffffff ]
356 * to the kernel. 355 * 0x7ffff - [ 0xf000000000000000 - 0xf0003fffffffffff ]
357 * 356 *
358 * The proto-VSIDs are then scrambled into real VSIDs with the 357 * The proto-VSIDs are then scrambled into real VSIDs with the
359 * multiplicative hash: 358 * multiplicative hash:
@@ -363,41 +362,49 @@ extern void slb_set_size(u16 size);
363 * VSID_MULTIPLIER is prime, so in particular it is 362 * VSID_MULTIPLIER is prime, so in particular it is
364 * co-prime to VSID_MODULUS, making this a 1:1 scrambling function. 363 * co-prime to VSID_MODULUS, making this a 1:1 scrambling function.
365 * Because the modulus is 2^n-1 we can compute it efficiently without 364 * Because the modulus is 2^n-1 we can compute it efficiently without
366 * a divide or extra multiply (see below). 365 * a divide or extra multiply (see below). The scramble function gives
367 * 366 * robust scattering in the hash table (at least based on some initial
368 * This scheme has several advantages over older methods: 367 * results).
369 *
370 * - We have VSIDs allocated for every kernel address
371 * (i.e. everything above 0xC000000000000000), except the very top
372 * segment, which simplifies several things.
373 * 368 *
374 * - We allow for USER_ESID_BITS significant bits of ESID and 369 * We also consider VSID 0 special. We use VSID 0 for slb entries mapping
375 * CONTEXT_BITS bits of context for user addresses. 370 * bad address. This enables us to consolidate bad address handling in
376 * i.e. 64T (46 bits) of address space for up to half a million contexts. 371 * hash_page.
377 * 372 *
378 * - The scramble function gives robust scattering in the hash 373 * We also need to avoid the last segment of the last context, because that
379 * table (at least based on some initial results). The previous 374 * would give a protovsid of 0x1fffffffff. That will result in a VSID 0
380 * method was more susceptible to pathological cases giving excessive 375 * because of the modulo operation in vsid scramble. But the vmemmap
381 * hash collisions. 376 * (which is what uses region 0xf) will never be close to 64TB in size
377 * (it's 56 bytes per page of system memory).
382 */ 378 */
383 379
380#define CONTEXT_BITS 19
381#define ESID_BITS 18
382#define ESID_BITS_1T 6
383
384/*
385 * 256MB segment
386 * The proto-VSID space has 2^(CONTEX_BITS + ESID_BITS) - 1 segments
387 * available for user + kernel mapping. The top 4 contexts are used for
388 * kernel mapping. Each segment contains 2^28 bytes. Each
389 * context maps 2^46 bytes (64TB) so we can support 2^19-1 contexts
390 * (19 == 37 + 28 - 46).
391 */
392#define MAX_USER_CONTEXT ((ASM_CONST(1) << CONTEXT_BITS) - 5)
393
384/* 394/*
385 * This should be computed such that protovosid * vsid_mulitplier 395 * This should be computed such that protovosid * vsid_mulitplier
386 * doesn't overflow 64 bits. It should also be co-prime to vsid_modulus 396 * doesn't overflow 64 bits. It should also be co-prime to vsid_modulus
387 */ 397 */
388#define VSID_MULTIPLIER_256M ASM_CONST(12538073) /* 24-bit prime */ 398#define VSID_MULTIPLIER_256M ASM_CONST(12538073) /* 24-bit prime */
389#define VSID_BITS_256M 38 399#define VSID_BITS_256M (CONTEXT_BITS + ESID_BITS)
390#define VSID_MODULUS_256M ((1UL<<VSID_BITS_256M)-1) 400#define VSID_MODULUS_256M ((1UL<<VSID_BITS_256M)-1)
391 401
392#define VSID_MULTIPLIER_1T ASM_CONST(12538073) /* 24-bit prime */ 402#define VSID_MULTIPLIER_1T ASM_CONST(12538073) /* 24-bit prime */
393#define VSID_BITS_1T 26 403#define VSID_BITS_1T (CONTEXT_BITS + ESID_BITS_1T)
394#define VSID_MODULUS_1T ((1UL<<VSID_BITS_1T)-1) 404#define VSID_MODULUS_1T ((1UL<<VSID_BITS_1T)-1)
395 405
396#define CONTEXT_BITS 19
397#define USER_ESID_BITS 18
398#define USER_ESID_BITS_1T 6
399 406
400#define USER_VSID_RANGE (1UL << (USER_ESID_BITS + SID_SHIFT)) 407#define USER_VSID_RANGE (1UL << (ESID_BITS + SID_SHIFT))
401 408
402/* 409/*
403 * This macro generates asm code to compute the VSID scramble 410 * This macro generates asm code to compute the VSID scramble
@@ -421,7 +428,8 @@ extern void slb_set_size(u16 size);
421 srdi rx,rt,VSID_BITS_##size; \ 428 srdi rx,rt,VSID_BITS_##size; \
422 clrldi rt,rt,(64-VSID_BITS_##size); \ 429 clrldi rt,rt,(64-VSID_BITS_##size); \
423 add rt,rt,rx; /* add high and low bits */ \ 430 add rt,rt,rx; /* add high and low bits */ \
424 /* Now, r3 == VSID (mod 2^36-1), and lies between 0 and \ 431 /* NOTE: explanation based on VSID_BITS_##size = 36 \
432 * Now, r3 == VSID (mod 2^36-1), and lies between 0 and \
425 * 2^36-1+2^28-1. That in particular means that if r3 >= \ 433 * 2^36-1+2^28-1. That in particular means that if r3 >= \
426 * 2^36-1, then r3+1 has the 2^36 bit set. So, if r3+1 has \ 434 * 2^36-1, then r3+1 has the 2^36 bit set. So, if r3+1 has \
427 * the bit clear, r3 already has the answer we want, if it \ 435 * the bit clear, r3 already has the answer we want, if it \
@@ -513,34 +521,6 @@ typedef struct {
513 }) 521 })
514#endif /* 1 */ 522#endif /* 1 */
515 523
516/*
517 * This is only valid for addresses >= PAGE_OFFSET
518 * The proto-VSID space is divided into two class
519 * User: 0 to 2^(CONTEXT_BITS + USER_ESID_BITS) -1
520 * kernel: 2^(CONTEXT_BITS + USER_ESID_BITS) to 2^(VSID_BITS) - 1
521 *
522 * With KERNEL_START at 0xc000000000000000, the proto vsid for
523 * the kernel ends up with 0xc00000000 (36 bits). With 64TB
524 * support we need to have kernel proto-VSID in the
525 * [2^37 to 2^38 - 1] range due to the increased USER_ESID_BITS.
526 */
527static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize)
528{
529 unsigned long proto_vsid;
530 /*
531 * We need to make sure proto_vsid for the kernel is
532 * >= 2^(CONTEXT_BITS + USER_ESID_BITS[_1T])
533 */
534 if (ssize == MMU_SEGSIZE_256M) {
535 proto_vsid = ea >> SID_SHIFT;
536 proto_vsid |= (1UL << (CONTEXT_BITS + USER_ESID_BITS));
537 return vsid_scramble(proto_vsid, 256M);
538 }
539 proto_vsid = ea >> SID_SHIFT_1T;
540 proto_vsid |= (1UL << (CONTEXT_BITS + USER_ESID_BITS_1T));
541 return vsid_scramble(proto_vsid, 1T);
542}
543
544/* Returns the segment size indicator for a user address */ 524/* Returns the segment size indicator for a user address */
545static inline int user_segment_size(unsigned long addr) 525static inline int user_segment_size(unsigned long addr)
546{ 526{
@@ -550,17 +530,41 @@ static inline int user_segment_size(unsigned long addr)
550 return MMU_SEGSIZE_256M; 530 return MMU_SEGSIZE_256M;
551} 531}
552 532
553/* This is only valid for user addresses (which are below 2^44) */
554static inline unsigned long get_vsid(unsigned long context, unsigned long ea, 533static inline unsigned long get_vsid(unsigned long context, unsigned long ea,
555 int ssize) 534 int ssize)
556{ 535{
536 /*
537 * Bad address. We return VSID 0 for that
538 */
539 if ((ea & ~REGION_MASK) >= PGTABLE_RANGE)
540 return 0;
541
557 if (ssize == MMU_SEGSIZE_256M) 542 if (ssize == MMU_SEGSIZE_256M)
558 return vsid_scramble((context << USER_ESID_BITS) 543 return vsid_scramble((context << ESID_BITS)
559 | (ea >> SID_SHIFT), 256M); 544 | (ea >> SID_SHIFT), 256M);
560 return vsid_scramble((context << USER_ESID_BITS_1T) 545 return vsid_scramble((context << ESID_BITS_1T)
561 | (ea >> SID_SHIFT_1T), 1T); 546 | (ea >> SID_SHIFT_1T), 1T);
562} 547}
563 548
549/*
550 * This is only valid for addresses >= PAGE_OFFSET
551 *
552 * For kernel space, we use the top 4 context ids to map address as below
553 * 0x7fffc - [ 0xc000000000000000 - 0xc0003fffffffffff ]
554 * 0x7fffd - [ 0xd000000000000000 - 0xd0003fffffffffff ]
555 * 0x7fffe - [ 0xe000000000000000 - 0xe0003fffffffffff ]
556 * 0x7ffff - [ 0xf000000000000000 - 0xf0003fffffffffff ]
557 */
558static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize)
559{
560 unsigned long context;
561
562 /*
563 * kernel take the top 4 context from the available range
564 */
565 context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1;
566 return get_vsid(context, ea, ssize);
567}
564#endif /* __ASSEMBLY__ */ 568#endif /* __ASSEMBLY__ */
565 569
566#endif /* _ASM_POWERPC_MMU_HASH64_H_ */ 570#endif /* _ASM_POWERPC_MMU_HASH64_H_ */
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 75a3d71b895d..19599ef352bc 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -275,7 +275,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
275 .cpu_features = CPU_FTRS_PPC970, 275 .cpu_features = CPU_FTRS_PPC970,
276 .cpu_user_features = COMMON_USER_POWER4 | 276 .cpu_user_features = COMMON_USER_POWER4 |
277 PPC_FEATURE_HAS_ALTIVEC_COMP, 277 PPC_FEATURE_HAS_ALTIVEC_COMP,
278 .mmu_features = MMU_FTR_HPTE_TABLE, 278 .mmu_features = MMU_FTRS_PPC970,
279 .icache_bsize = 128, 279 .icache_bsize = 128,
280 .dcache_bsize = 128, 280 .dcache_bsize = 128,
281 .num_pmcs = 8, 281 .num_pmcs = 8,
diff --git a/arch/powerpc/kernel/epapr_paravirt.c b/arch/powerpc/kernel/epapr_paravirt.c
index f3eab8594d9f..d44a571e45a7 100644
--- a/arch/powerpc/kernel/epapr_paravirt.c
+++ b/arch/powerpc/kernel/epapr_paravirt.c
@@ -23,8 +23,10 @@
23#include <asm/code-patching.h> 23#include <asm/code-patching.h>
24#include <asm/machdep.h> 24#include <asm/machdep.h>
25 25
26#if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64)
26extern void epapr_ev_idle(void); 27extern void epapr_ev_idle(void);
27extern u32 epapr_ev_idle_start[]; 28extern u32 epapr_ev_idle_start[];
29#endif
28 30
29bool epapr_paravirt_enabled; 31bool epapr_paravirt_enabled;
30 32
@@ -47,11 +49,15 @@ static int __init epapr_paravirt_init(void)
47 49
48 for (i = 0; i < (len / 4); i++) { 50 for (i = 0; i < (len / 4); i++) {
49 patch_instruction(epapr_hypercall_start + i, insts[i]); 51 patch_instruction(epapr_hypercall_start + i, insts[i]);
52#if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64)
50 patch_instruction(epapr_ev_idle_start + i, insts[i]); 53 patch_instruction(epapr_ev_idle_start + i, insts[i]);
54#endif
51 } 55 }
52 56
57#if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64)
53 if (of_get_property(hyper_node, "has-idle", NULL)) 58 if (of_get_property(hyper_node, "has-idle", NULL))
54 ppc_md.power_save = epapr_ev_idle; 59 ppc_md.power_save = epapr_ev_idle;
60#endif
55 61
56 epapr_paravirt_enabled = true; 62 epapr_paravirt_enabled = true;
57 63
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 87ef8f5ee5bc..56bd92362ce1 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -1066,78 +1066,6 @@ unrecov_user_slb:
1066#endif /* __DISABLED__ */ 1066#endif /* __DISABLED__ */
1067 1067
1068 1068
1069/*
1070 * r13 points to the PACA, r9 contains the saved CR,
1071 * r12 contain the saved SRR1, SRR0 is still ready for return
1072 * r3 has the faulting address
1073 * r9 - r13 are saved in paca->exslb.
1074 * r3 is saved in paca->slb_r3
1075 * We assume we aren't going to take any exceptions during this procedure.
1076 */
1077_GLOBAL(slb_miss_realmode)
1078 mflr r10
1079#ifdef CONFIG_RELOCATABLE
1080 mtctr r11
1081#endif
1082
1083 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
1084 std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
1085
1086 bl .slb_allocate_realmode
1087
1088 /* All done -- return from exception. */
1089
1090 ld r10,PACA_EXSLB+EX_LR(r13)
1091 ld r3,PACA_EXSLB+EX_R3(r13)
1092 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
1093
1094 mtlr r10
1095
1096 andi. r10,r12,MSR_RI /* check for unrecoverable exception */
1097 beq- 2f
1098
1099.machine push
1100.machine "power4"
1101 mtcrf 0x80,r9
1102 mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
1103.machine pop
1104
1105 RESTORE_PPR_PACA(PACA_EXSLB, r9)
1106 ld r9,PACA_EXSLB+EX_R9(r13)
1107 ld r10,PACA_EXSLB+EX_R10(r13)
1108 ld r11,PACA_EXSLB+EX_R11(r13)
1109 ld r12,PACA_EXSLB+EX_R12(r13)
1110 ld r13,PACA_EXSLB+EX_R13(r13)
1111 rfid
1112 b . /* prevent speculative execution */
1113
11142: mfspr r11,SPRN_SRR0
1115 ld r10,PACAKBASE(r13)
1116 LOAD_HANDLER(r10,unrecov_slb)
1117 mtspr SPRN_SRR0,r10
1118 ld r10,PACAKMSR(r13)
1119 mtspr SPRN_SRR1,r10
1120 rfid
1121 b .
1122
1123unrecov_slb:
1124 EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
1125 DISABLE_INTS
1126 bl .save_nvgprs
11271: addi r3,r1,STACK_FRAME_OVERHEAD
1128 bl .unrecoverable_exception
1129 b 1b
1130
1131
1132#ifdef CONFIG_PPC_970_NAP
1133power4_fixup_nap:
1134 andc r9,r9,r10
1135 std r9,TI_LOCAL_FLAGS(r11)
1136 ld r10,_LINK(r1) /* make idle task do the */
1137 std r10,_NIP(r1) /* equivalent of a blr */
1138 blr
1139#endif
1140
1141 .align 7 1069 .align 7
1142 .globl alignment_common 1070 .globl alignment_common
1143alignment_common: 1071alignment_common:
@@ -1336,6 +1264,78 @@ _GLOBAL(opal_mc_secondary_handler)
1336 1264
1337 1265
1338/* 1266/*
1267 * r13 points to the PACA, r9 contains the saved CR,
1268 * r12 contain the saved SRR1, SRR0 is still ready for return
1269 * r3 has the faulting address
1270 * r9 - r13 are saved in paca->exslb.
1271 * r3 is saved in paca->slb_r3
1272 * We assume we aren't going to take any exceptions during this procedure.
1273 */
1274_GLOBAL(slb_miss_realmode)
1275 mflr r10
1276#ifdef CONFIG_RELOCATABLE
1277 mtctr r11
1278#endif
1279
1280 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
1281 std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
1282
1283 bl .slb_allocate_realmode
1284
1285 /* All done -- return from exception. */
1286
1287 ld r10,PACA_EXSLB+EX_LR(r13)
1288 ld r3,PACA_EXSLB+EX_R3(r13)
1289 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
1290
1291 mtlr r10
1292
1293 andi. r10,r12,MSR_RI /* check for unrecoverable exception */
1294 beq- 2f
1295
1296.machine push
1297.machine "power4"
1298 mtcrf 0x80,r9
1299 mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
1300.machine pop
1301
1302 RESTORE_PPR_PACA(PACA_EXSLB, r9)
1303 ld r9,PACA_EXSLB+EX_R9(r13)
1304 ld r10,PACA_EXSLB+EX_R10(r13)
1305 ld r11,PACA_EXSLB+EX_R11(r13)
1306 ld r12,PACA_EXSLB+EX_R12(r13)
1307 ld r13,PACA_EXSLB+EX_R13(r13)
1308 rfid
1309 b . /* prevent speculative execution */
1310
13112: mfspr r11,SPRN_SRR0
1312 ld r10,PACAKBASE(r13)
1313 LOAD_HANDLER(r10,unrecov_slb)
1314 mtspr SPRN_SRR0,r10
1315 ld r10,PACAKMSR(r13)
1316 mtspr SPRN_SRR1,r10
1317 rfid
1318 b .
1319
1320unrecov_slb:
1321 EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
1322 DISABLE_INTS
1323 bl .save_nvgprs
13241: addi r3,r1,STACK_FRAME_OVERHEAD
1325 bl .unrecoverable_exception
1326 b 1b
1327
1328
1329#ifdef CONFIG_PPC_970_NAP
1330power4_fixup_nap:
1331 andc r9,r9,r10
1332 std r9,TI_LOCAL_FLAGS(r11)
1333 ld r10,_LINK(r1) /* make idle task do the */
1334 std r10,_NIP(r1) /* equivalent of a blr */
1335 blr
1336#endif
1337
1338/*
1339 * Hash table stuff 1339 * Hash table stuff
1340 */ 1340 */
1341 .align 7 1341 .align 7
@@ -1452,20 +1452,36 @@ do_ste_alloc:
1452_GLOBAL(do_stab_bolted) 1452_GLOBAL(do_stab_bolted)
1453 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ 1453 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
1454 std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */ 1454 std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */
1455 mfspr r11,SPRN_DAR /* ea */
1455 1456
1457 /*
1458 * check for bad kernel/user address
1459 * (ea & ~REGION_MASK) >= PGTABLE_RANGE
1460 */
1461 rldicr. r9,r11,4,(63 - 46 - 4)
1462 li r9,0 /* VSID = 0 for bad address */
1463 bne- 0f
1464
1465 /*
1466 * Calculate VSID:
1467 * This is the kernel vsid, we take the top for context from
1468 * the range. context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1
1469 * Here we know that (ea >> 60) == 0xc
1470 */
1471 lis r9,(MAX_USER_CONTEXT + 1)@ha
1472 addi r9,r9,(MAX_USER_CONTEXT + 1)@l
1473
1474 srdi r10,r11,SID_SHIFT
1475 rldimi r10,r9,ESID_BITS,0 /* proto vsid */
1476 ASM_VSID_SCRAMBLE(r10, r9, 256M)
1477 rldic r9,r10,12,16 /* r9 = vsid << 12 */
1478
14790:
1456 /* Hash to the primary group */ 1480 /* Hash to the primary group */
1457 ld r10,PACASTABVIRT(r13) 1481 ld r10,PACASTABVIRT(r13)
1458 mfspr r11,SPRN_DAR 1482 srdi r11,r11,SID_SHIFT
1459 srdi r11,r11,28
1460 rldimi r10,r11,7,52 /* r10 = first ste of the group */ 1483 rldimi r10,r11,7,52 /* r10 = first ste of the group */
1461 1484
1462 /* Calculate VSID */
1463 /* This is a kernel address, so protovsid = ESID | 1 << 37 */
1464 li r9,0x1
1465 rldimi r11,r9,(CONTEXT_BITS + USER_ESID_BITS),0
1466 ASM_VSID_SCRAMBLE(r11, r9, 256M)
1467 rldic r9,r11,12,16 /* r9 = vsid << 12 */
1468
1469 /* Search the primary group for a free entry */ 1485 /* Search the primary group for a free entry */
14701: ld r11,0(r10) /* Test valid bit of the current ste */ 14861: ld r11,0(r10) /* Test valid bit of the current ste */
1471 andi. r11,r11,0x80 1487 andi. r11,r11,0x80
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 7f7fb7fd991b..13f8d168b3f1 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -2832,11 +2832,13 @@ static void unreloc_toc(void)
2832{ 2832{
2833} 2833}
2834#else 2834#else
2835static void __reloc_toc(void *tocstart, unsigned long offset, 2835static void __reloc_toc(unsigned long offset, unsigned long nr_entries)
2836 unsigned long nr_entries)
2837{ 2836{
2838 unsigned long i; 2837 unsigned long i;
2839 unsigned long *toc_entry = (unsigned long *)tocstart; 2838 unsigned long *toc_entry;
2839
2840 /* Get the start of the TOC by using r2 directly. */
2841 asm volatile("addi %0,2,-0x8000" : "=b" (toc_entry));
2840 2842
2841 for (i = 0; i < nr_entries; i++) { 2843 for (i = 0; i < nr_entries; i++) {
2842 *toc_entry = *toc_entry + offset; 2844 *toc_entry = *toc_entry + offset;
@@ -2850,8 +2852,7 @@ static void reloc_toc(void)
2850 unsigned long nr_entries = 2852 unsigned long nr_entries =
2851 (__prom_init_toc_end - __prom_init_toc_start) / sizeof(long); 2853 (__prom_init_toc_end - __prom_init_toc_start) / sizeof(long);
2852 2854
2853 /* Need to add offset to get at __prom_init_toc_start */ 2855 __reloc_toc(offset, nr_entries);
2854 __reloc_toc(__prom_init_toc_start + offset, offset, nr_entries);
2855 2856
2856 mb(); 2857 mb();
2857} 2858}
@@ -2864,8 +2865,7 @@ static void unreloc_toc(void)
2864 2865
2865 mb(); 2866 mb();
2866 2867
2867 /* __prom_init_toc_start has been relocated, no need to add offset */ 2868 __reloc_toc(-offset, nr_entries);
2868 __reloc_toc(__prom_init_toc_start, -offset, nr_entries);
2869} 2869}
2870#endif 2870#endif
2871#endif 2871#endif
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index 245c1b6a0858..f9b30c68ba47 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -1428,6 +1428,7 @@ static long ppc_set_hwdebug(struct task_struct *child,
1428 1428
1429 brk.address = bp_info->addr & ~7UL; 1429 brk.address = bp_info->addr & ~7UL;
1430 brk.type = HW_BRK_TYPE_TRANSLATE; 1430 brk.type = HW_BRK_TYPE_TRANSLATE;
1431 brk.len = 8;
1431 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ) 1432 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
1432 brk.type |= HW_BRK_TYPE_READ; 1433 brk.type |= HW_BRK_TYPE_READ;
1433 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE) 1434 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
index ead58e317294..5d7d29a313eb 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
@@ -326,8 +326,8 @@ int kvmppc_mmu_init(struct kvm_vcpu *vcpu)
326 vcpu3s->context_id[0] = err; 326 vcpu3s->context_id[0] = err;
327 327
328 vcpu3s->proto_vsid_max = ((vcpu3s->context_id[0] + 1) 328 vcpu3s->proto_vsid_max = ((vcpu3s->context_id[0] + 1)
329 << USER_ESID_BITS) - 1; 329 << ESID_BITS) - 1;
330 vcpu3s->proto_vsid_first = vcpu3s->context_id[0] << USER_ESID_BITS; 330 vcpu3s->proto_vsid_first = vcpu3s->context_id[0] << ESID_BITS;
331 vcpu3s->proto_vsid_next = vcpu3s->proto_vsid_first; 331 vcpu3s->proto_vsid_next = vcpu3s->proto_vsid_first;
332 332
333 kvmppc_mmu_hpte_init(vcpu); 333 kvmppc_mmu_hpte_init(vcpu);
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 1b6e1271719f..f410c3e12c1e 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -195,6 +195,11 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
195 unsigned long vpn = hpt_vpn(vaddr, vsid, ssize); 195 unsigned long vpn = hpt_vpn(vaddr, vsid, ssize);
196 unsigned long tprot = prot; 196 unsigned long tprot = prot;
197 197
198 /*
199 * If we hit a bad address return error.
200 */
201 if (!vsid)
202 return -1;
198 /* Make kernel text executable */ 203 /* Make kernel text executable */
199 if (overlaps_kernel_text(vaddr, vaddr + step)) 204 if (overlaps_kernel_text(vaddr, vaddr + step))
200 tprot &= ~HPTE_R_N; 205 tprot &= ~HPTE_R_N;
@@ -759,6 +764,8 @@ void __init early_init_mmu(void)
759 /* Initialize stab / SLB management */ 764 /* Initialize stab / SLB management */
760 if (mmu_has_feature(MMU_FTR_SLB)) 765 if (mmu_has_feature(MMU_FTR_SLB))
761 slb_initialize(); 766 slb_initialize();
767 else
768 stab_initialize(get_paca()->stab_real);
762} 769}
763 770
764#ifdef CONFIG_SMP 771#ifdef CONFIG_SMP
@@ -922,11 +929,6 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
922 DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n", 929 DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n",
923 ea, access, trap); 930 ea, access, trap);
924 931
925 if ((ea & ~REGION_MASK) >= PGTABLE_RANGE) {
926 DBG_LOW(" out of pgtable range !\n");
927 return 1;
928 }
929
930 /* Get region & vsid */ 932 /* Get region & vsid */
931 switch (REGION_ID(ea)) { 933 switch (REGION_ID(ea)) {
932 case USER_REGION_ID: 934 case USER_REGION_ID:
@@ -957,6 +959,11 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
957 } 959 }
958 DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid); 960 DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid);
959 961
962 /* Bad address. */
963 if (!vsid) {
964 DBG_LOW("Bad address!\n");
965 return 1;
966 }
960 /* Get pgdir */ 967 /* Get pgdir */
961 pgdir = mm->pgd; 968 pgdir = mm->pgd;
962 if (pgdir == NULL) 969 if (pgdir == NULL)
@@ -1126,6 +1133,8 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
1126 /* Get VSID */ 1133 /* Get VSID */
1127 ssize = user_segment_size(ea); 1134 ssize = user_segment_size(ea);
1128 vsid = get_vsid(mm->context.id, ea, ssize); 1135 vsid = get_vsid(mm->context.id, ea, ssize);
1136 if (!vsid)
1137 return;
1129 1138
1130 /* Hash doesn't like irqs */ 1139 /* Hash doesn't like irqs */
1131 local_irq_save(flags); 1140 local_irq_save(flags);
@@ -1233,6 +1242,9 @@ static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
1233 hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize); 1242 hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize);
1234 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); 1243 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
1235 1244
1245 /* Don't create HPTE entries for bad address */
1246 if (!vsid)
1247 return;
1236 ret = ppc_md.hpte_insert(hpteg, vpn, __pa(vaddr), 1248 ret = ppc_md.hpte_insert(hpteg, vpn, __pa(vaddr),
1237 mode, HPTE_V_BOLTED, 1249 mode, HPTE_V_BOLTED,
1238 mmu_linear_psize, mmu_kernel_ssize); 1250 mmu_linear_psize, mmu_kernel_ssize);
diff --git a/arch/powerpc/mm/mmu_context_hash64.c b/arch/powerpc/mm/mmu_context_hash64.c
index 40bc5b0ace54..d1d1b92c5b99 100644
--- a/arch/powerpc/mm/mmu_context_hash64.c
+++ b/arch/powerpc/mm/mmu_context_hash64.c
@@ -29,15 +29,6 @@
29static DEFINE_SPINLOCK(mmu_context_lock); 29static DEFINE_SPINLOCK(mmu_context_lock);
30static DEFINE_IDA(mmu_context_ida); 30static DEFINE_IDA(mmu_context_ida);
31 31
32/*
33 * 256MB segment
34 * The proto-VSID space has 2^(CONTEX_BITS + USER_ESID_BITS) - 1 segments
35 * available for user mappings. Each segment contains 2^28 bytes. Each
36 * context maps 2^46 bytes (64TB) so we can support 2^19-1 contexts
37 * (19 == 37 + 28 - 46).
38 */
39#define MAX_CONTEXT ((1UL << CONTEXT_BITS) - 1)
40
41int __init_new_context(void) 32int __init_new_context(void)
42{ 33{
43 int index; 34 int index;
@@ -56,7 +47,7 @@ again:
56 else if (err) 47 else if (err)
57 return err; 48 return err;
58 49
59 if (index > MAX_CONTEXT) { 50 if (index > MAX_USER_CONTEXT) {
60 spin_lock(&mmu_context_lock); 51 spin_lock(&mmu_context_lock);
61 ida_remove(&mmu_context_ida, index); 52 ida_remove(&mmu_context_ida, index);
62 spin_unlock(&mmu_context_lock); 53 spin_unlock(&mmu_context_lock);
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index e212a271c7a4..654258f165ae 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -61,7 +61,7 @@
61#endif 61#endif
62 62
63#ifdef CONFIG_PPC_STD_MMU_64 63#ifdef CONFIG_PPC_STD_MMU_64
64#if TASK_SIZE_USER64 > (1UL << (USER_ESID_BITS + SID_SHIFT)) 64#if TASK_SIZE_USER64 > (1UL << (ESID_BITS + SID_SHIFT))
65#error TASK_SIZE_USER64 exceeds user VSID range 65#error TASK_SIZE_USER64 exceeds user VSID range
66#endif 66#endif
67#endif 67#endif
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S
index 1a16ca227757..17aa6dfceb34 100644
--- a/arch/powerpc/mm/slb_low.S
+++ b/arch/powerpc/mm/slb_low.S
@@ -31,10 +31,15 @@
31 * No other registers are examined or changed. 31 * No other registers are examined or changed.
32 */ 32 */
33_GLOBAL(slb_allocate_realmode) 33_GLOBAL(slb_allocate_realmode)
34 /* r3 = faulting address */ 34 /*
35 * check for bad kernel/user address
36 * (ea & ~REGION_MASK) >= PGTABLE_RANGE
37 */
38 rldicr. r9,r3,4,(63 - 46 - 4)
39 bne- 8f
35 40
36 srdi r9,r3,60 /* get region */ 41 srdi r9,r3,60 /* get region */
37 srdi r10,r3,28 /* get esid */ 42 srdi r10,r3,SID_SHIFT /* get esid */
38 cmpldi cr7,r9,0xc /* cmp PAGE_OFFSET for later use */ 43 cmpldi cr7,r9,0xc /* cmp PAGE_OFFSET for later use */
39 44
40 /* r3 = address, r10 = esid, cr7 = <> PAGE_OFFSET */ 45 /* r3 = address, r10 = esid, cr7 = <> PAGE_OFFSET */
@@ -56,12 +61,14 @@ _GLOBAL(slb_allocate_realmode)
56 */ 61 */
57_GLOBAL(slb_miss_kernel_load_linear) 62_GLOBAL(slb_miss_kernel_load_linear)
58 li r11,0 63 li r11,0
59 li r9,0x1
60 /* 64 /*
61 * for 1T we shift 12 bits more. slb_finish_load_1T will do 65 * context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1
62 * the necessary adjustment 66 * r9 = region id.
63 */ 67 */
64 rldimi r10,r9,(CONTEXT_BITS + USER_ESID_BITS),0 68 addis r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@ha
69 addi r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@l
70
71
65BEGIN_FTR_SECTION 72BEGIN_FTR_SECTION
66 b slb_finish_load 73 b slb_finish_load
67END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) 74END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
@@ -91,24 +98,19 @@ _GLOBAL(slb_miss_kernel_load_vmemmap)
91 _GLOBAL(slb_miss_kernel_load_io) 98 _GLOBAL(slb_miss_kernel_load_io)
92 li r11,0 99 li r11,0
936: 1006:
94 li r9,0x1
95 /* 101 /*
96 * for 1T we shift 12 bits more. slb_finish_load_1T will do 102 * context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1
97 * the necessary adjustment 103 * r9 = region id.
98 */ 104 */
99 rldimi r10,r9,(CONTEXT_BITS + USER_ESID_BITS),0 105 addis r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@ha
106 addi r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@l
107
100BEGIN_FTR_SECTION 108BEGIN_FTR_SECTION
101 b slb_finish_load 109 b slb_finish_load
102END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) 110END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
103 b slb_finish_load_1T 111 b slb_finish_load_1T
104 112
1050: /* user address: proto-VSID = context << 15 | ESID. First check 1130:
106 * if the address is within the boundaries of the user region
107 */
108 srdi. r9,r10,USER_ESID_BITS
109 bne- 8f /* invalid ea bits set */
110
111
112 /* when using slices, we extract the psize off the slice bitmaps 114 /* when using slices, we extract the psize off the slice bitmaps
113 * and then we need to get the sllp encoding off the mmu_psize_defs 115 * and then we need to get the sllp encoding off the mmu_psize_defs
114 * array. 116 * array.
@@ -164,15 +166,13 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
164 ld r9,PACACONTEXTID(r13) 166 ld r9,PACACONTEXTID(r13)
165BEGIN_FTR_SECTION 167BEGIN_FTR_SECTION
166 cmpldi r10,0x1000 168 cmpldi r10,0x1000
167END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
168 rldimi r10,r9,USER_ESID_BITS,0
169BEGIN_FTR_SECTION
170 bge slb_finish_load_1T 169 bge slb_finish_load_1T
171END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) 170END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
172 b slb_finish_load 171 b slb_finish_load
173 172
1748: /* invalid EA */ 1738: /* invalid EA */
175 li r10,0 /* BAD_VSID */ 174 li r10,0 /* BAD_VSID */
175 li r9,0 /* BAD_VSID */
176 li r11,SLB_VSID_USER /* flags don't much matter */ 176 li r11,SLB_VSID_USER /* flags don't much matter */
177 b slb_finish_load 177 b slb_finish_load
178 178
@@ -221,8 +221,6 @@ _GLOBAL(slb_allocate_user)
221 221
222 /* get context to calculate proto-VSID */ 222 /* get context to calculate proto-VSID */
223 ld r9,PACACONTEXTID(r13) 223 ld r9,PACACONTEXTID(r13)
224 rldimi r10,r9,USER_ESID_BITS,0
225
226 /* fall through slb_finish_load */ 224 /* fall through slb_finish_load */
227 225
228#endif /* __DISABLED__ */ 226#endif /* __DISABLED__ */
@@ -231,9 +229,10 @@ _GLOBAL(slb_allocate_user)
231/* 229/*
232 * Finish loading of an SLB entry and return 230 * Finish loading of an SLB entry and return
233 * 231 *
234 * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET 232 * r3 = EA, r9 = context, r10 = ESID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET
235 */ 233 */
236slb_finish_load: 234slb_finish_load:
235 rldimi r10,r9,ESID_BITS,0
237 ASM_VSID_SCRAMBLE(r10,r9,256M) 236 ASM_VSID_SCRAMBLE(r10,r9,256M)
238 /* 237 /*
239 * bits above VSID_BITS_256M need to be ignored from r10 238 * bits above VSID_BITS_256M need to be ignored from r10
@@ -298,10 +297,11 @@ _GLOBAL(slb_compare_rr_to_size)
298/* 297/*
299 * Finish loading of a 1T SLB entry (for the kernel linear mapping) and return. 298 * Finish loading of a 1T SLB entry (for the kernel linear mapping) and return.
300 * 299 *
301 * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9 300 * r3 = EA, r9 = context, r10 = ESID(256MB), r11 = flags, clobbers r9
302 */ 301 */
303slb_finish_load_1T: 302slb_finish_load_1T:
304 srdi r10,r10,40-28 /* get 1T ESID */ 303 srdi r10,r10,(SID_SHIFT_1T - SID_SHIFT) /* get 1T ESID */
304 rldimi r10,r9,ESID_BITS_1T,0
305 ASM_VSID_SCRAMBLE(r10,r9,1T) 305 ASM_VSID_SCRAMBLE(r10,r9,1T)
306 /* 306 /*
307 * bits above VSID_BITS_1T need to be ignored from r10 307 * bits above VSID_BITS_1T need to be ignored from r10
diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c
index 0d82ef50dc3f..023ec8a13f38 100644
--- a/arch/powerpc/mm/tlb_hash64.c
+++ b/arch/powerpc/mm/tlb_hash64.c
@@ -82,11 +82,11 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
82 if (!is_kernel_addr(addr)) { 82 if (!is_kernel_addr(addr)) {
83 ssize = user_segment_size(addr); 83 ssize = user_segment_size(addr);
84 vsid = get_vsid(mm->context.id, addr, ssize); 84 vsid = get_vsid(mm->context.id, addr, ssize);
85 WARN_ON(vsid == 0);
86 } else { 85 } else {
87 vsid = get_kernel_vsid(addr, mmu_kernel_ssize); 86 vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
88 ssize = mmu_kernel_ssize; 87 ssize = mmu_kernel_ssize;
89 } 88 }
89 WARN_ON(vsid == 0);
90 vpn = hpt_vpn(addr, vsid, ssize); 90 vpn = hpt_vpn(addr, vsid, ssize);
91 rpte = __real_pte(__pte(pte), ptep); 91 rpte = __real_pte(__pte(pte), ptep);
92 92
diff --git a/arch/powerpc/perf/power7-pmu.c b/arch/powerpc/perf/power7-pmu.c
index b554879bd31e..3c475d6267c7 100644
--- a/arch/powerpc/perf/power7-pmu.c
+++ b/arch/powerpc/perf/power7-pmu.c
@@ -420,7 +420,20 @@ static struct attribute_group power7_pmu_events_group = {
420 .attrs = power7_events_attr, 420 .attrs = power7_events_attr,
421}; 421};
422 422
423PMU_FORMAT_ATTR(event, "config:0-19");
424
425static struct attribute *power7_pmu_format_attr[] = {
426 &format_attr_event.attr,
427 NULL,
428};
429
430struct attribute_group power7_pmu_format_group = {
431 .name = "format",
432 .attrs = power7_pmu_format_attr,
433};
434
423static const struct attribute_group *power7_pmu_attr_groups[] = { 435static const struct attribute_group *power7_pmu_attr_groups[] = {
436 &power7_pmu_format_group,
424 &power7_pmu_events_group, 437 &power7_pmu_events_group,
425 NULL, 438 NULL,
426}; 439};
diff --git a/arch/powerpc/platforms/85xx/sgy_cts1000.c b/arch/powerpc/platforms/85xx/sgy_cts1000.c
index 611e92f291c4..7179726ba5c5 100644
--- a/arch/powerpc/platforms/85xx/sgy_cts1000.c
+++ b/arch/powerpc/platforms/85xx/sgy_cts1000.c
@@ -69,7 +69,7 @@ static irqreturn_t gpio_halt_irq(int irq, void *__data)
69 return IRQ_HANDLED; 69 return IRQ_HANDLED;
70}; 70};
71 71
72static int __devinit gpio_halt_probe(struct platform_device *pdev) 72static int gpio_halt_probe(struct platform_device *pdev)
73{ 73{
74 enum of_gpio_flags flags; 74 enum of_gpio_flags flags;
75 struct device_node *node = pdev->dev.of_node; 75 struct device_node *node = pdev->dev.of_node;
@@ -128,7 +128,7 @@ static int __devinit gpio_halt_probe(struct platform_device *pdev)
128 return 0; 128 return 0;
129} 129}
130 130
131static int __devexit gpio_halt_remove(struct platform_device *pdev) 131static int gpio_halt_remove(struct platform_device *pdev)
132{ 132{
133 if (halt_node) { 133 if (halt_node) {
134 int gpio = of_get_gpio(halt_node, 0); 134 int gpio = of_get_gpio(halt_node, 0);
@@ -165,7 +165,7 @@ static struct platform_driver gpio_halt_driver = {
165 .of_match_table = gpio_halt_match, 165 .of_match_table = gpio_halt_match,
166 }, 166 },
167 .probe = gpio_halt_probe, 167 .probe = gpio_halt_probe,
168 .remove = __devexit_p(gpio_halt_remove), 168 .remove = gpio_halt_remove,
169}; 169};
170 170
171module_platform_driver(gpio_halt_driver); 171module_platform_driver(gpio_halt_driver);
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index cea2f09c4241..18e3b76c78d7 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -124,9 +124,8 @@ config 6xx
124 select PPC_HAVE_PMU_SUPPORT 124 select PPC_HAVE_PMU_SUPPORT
125 125
126config POWER3 126config POWER3
127 bool
128 depends on PPC64 && PPC_BOOK3S 127 depends on PPC64 && PPC_BOOK3S
129 default y if !POWER4_ONLY 128 def_bool y
130 129
131config POWER4 130config POWER4
132 depends on PPC64 && PPC_BOOK3S 131 depends on PPC64 && PPC_BOOK3S
@@ -145,8 +144,7 @@ config TUNE_CELL
145 but somewhat slower on other machines. This option only changes 144 but somewhat slower on other machines. This option only changes
146 the scheduling of instructions, not the selection of instructions 145 the scheduling of instructions, not the selection of instructions
147 itself, so the resulting kernel will keep running on all other 146 itself, so the resulting kernel will keep running on all other
148 machines. When building a kernel that is supposed to run only 147 machines.
149 on Cell, you should also select the POWER4_ONLY option.
150 148
151# this is temp to handle compat with arch=ppc 149# this is temp to handle compat with arch=ppc
152config 8xx 150config 8xx
diff --git a/arch/s390/include/asm/eadm.h b/arch/s390/include/asm/eadm.h
index 8d4847191ecc..dc9200ca32ed 100644
--- a/arch/s390/include/asm/eadm.h
+++ b/arch/s390/include/asm/eadm.h
@@ -34,6 +34,8 @@ struct arsb {
34 u32 reserved[4]; 34 u32 reserved[4];
35} __packed; 35} __packed;
36 36
37#define EQC_WR_PROHIBIT 22
38
37struct msb { 39struct msb {
38 u8 fmt:4; 40 u8 fmt:4;
39 u8 oc:4; 41 u8 oc:4;
@@ -96,11 +98,13 @@ struct scm_device {
96#define OP_STATE_TEMP_ERR 2 98#define OP_STATE_TEMP_ERR 2
97#define OP_STATE_PERM_ERR 3 99#define OP_STATE_PERM_ERR 3
98 100
101enum scm_event {SCM_CHANGE, SCM_AVAIL};
102
99struct scm_driver { 103struct scm_driver {
100 struct device_driver drv; 104 struct device_driver drv;
101 int (*probe) (struct scm_device *scmdev); 105 int (*probe) (struct scm_device *scmdev);
102 int (*remove) (struct scm_device *scmdev); 106 int (*remove) (struct scm_device *scmdev);
103 void (*notify) (struct scm_device *scmdev); 107 void (*notify) (struct scm_device *scmdev, enum scm_event event);
104 void (*handler) (struct scm_device *scmdev, void *data, int error); 108 void (*handler) (struct scm_device *scmdev, void *data, int error);
105}; 109};
106 110
diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h
index 1d8fe2b17ef6..6b32af30878c 100644
--- a/arch/s390/include/asm/tlbflush.h
+++ b/arch/s390/include/asm/tlbflush.h
@@ -74,8 +74,6 @@ static inline void __tlb_flush_idte(unsigned long asce)
74 74
75static inline void __tlb_flush_mm(struct mm_struct * mm) 75static inline void __tlb_flush_mm(struct mm_struct * mm)
76{ 76{
77 if (unlikely(cpumask_empty(mm_cpumask(mm))))
78 return;
79 /* 77 /*
80 * If the machine has IDTE we prefer to do a per mm flush 78 * If the machine has IDTE we prefer to do a per mm flush
81 * on all cpus instead of doing a local flush if the mm 79 * on all cpus instead of doing a local flush if the mm
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 550228523267..94feff7d6132 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -636,7 +636,8 @@ ENTRY(mcck_int_handler)
636 UPDATE_VTIME %r14,%r15,__LC_MCCK_ENTER_TIMER 636 UPDATE_VTIME %r14,%r15,__LC_MCCK_ENTER_TIMER
637mcck_skip: 637mcck_skip:
638 SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+32,__LC_PANIC_STACK,PAGE_SHIFT 638 SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+32,__LC_PANIC_STACK,PAGE_SHIFT
639 mvc __PT_R0(64,%r11),__LC_GPREGS_SAVE_AREA 639 stm %r0,%r7,__PT_R0(%r11)
640 mvc __PT_R8(32,%r11),__LC_GPREGS_SAVE_AREA+32
640 stm %r8,%r9,__PT_PSW(%r11) 641 stm %r8,%r9,__PT_PSW(%r11)
641 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) 642 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
642 l %r1,BASED(.Ldo_machine_check) 643 l %r1,BASED(.Ldo_machine_check)
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 9c837c101297..2e6d60c55f90 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -678,8 +678,9 @@ ENTRY(mcck_int_handler)
678 UPDATE_VTIME %r14,__LC_MCCK_ENTER_TIMER 678 UPDATE_VTIME %r14,__LC_MCCK_ENTER_TIMER
679 LAST_BREAK %r14 679 LAST_BREAK %r14
680mcck_skip: 680mcck_skip:
681 lghi %r14,__LC_GPREGS_SAVE_AREA 681 lghi %r14,__LC_GPREGS_SAVE_AREA+64
682 mvc __PT_R0(128,%r11),0(%r14) 682 stmg %r0,%r7,__PT_R0(%r11)
683 mvc __PT_R8(64,%r11),0(%r14)
683 stmg %r8,%r9,__PT_PSW(%r11) 684 stmg %r8,%r9,__PT_PSW(%r11)
684 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 685 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
685 lgr %r2,%r11 # pass pointer to pt_regs 686 lgr %r2,%r11 # pass pointer to pt_regs
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index a5360de85ec7..29268859d8ee 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -571,6 +571,8 @@ static void __init setup_memory_end(void)
571 571
572 /* Split remaining virtual space between 1:1 mapping & vmemmap array */ 572 /* Split remaining virtual space between 1:1 mapping & vmemmap array */
573 tmp = VMALLOC_START / (PAGE_SIZE + sizeof(struct page)); 573 tmp = VMALLOC_START / (PAGE_SIZE + sizeof(struct page));
574 /* vmemmap contains a multiple of PAGES_PER_SECTION struct pages */
575 tmp = SECTION_ALIGN_UP(tmp);
574 tmp = VMALLOC_START - tmp * sizeof(struct page); 576 tmp = VMALLOC_START - tmp * sizeof(struct page);
575 tmp &= ~((vmax >> 11) - 1); /* align to page table level */ 577 tmp &= ~((vmax >> 11) - 1); /* align to page table level */
576 tmp = min(tmp, 1UL << MAX_PHYSMEM_BITS); 578 tmp = min(tmp, 1UL << MAX_PHYSMEM_BITS);
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 289127d5241c..3d361f236308 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -84,12 +84,6 @@ config ARCH_DEFCONFIG
84 default "arch/sparc/configs/sparc32_defconfig" if SPARC32 84 default "arch/sparc/configs/sparc32_defconfig" if SPARC32
85 default "arch/sparc/configs/sparc64_defconfig" if SPARC64 85 default "arch/sparc/configs/sparc64_defconfig" if SPARC64
86 86
87# CONFIG_BITS can be used at source level to get 32/64 bits
88config BITS
89 int
90 default 32 if SPARC32
91 default 64 if SPARC64
92
93config IOMMU_HELPER 87config IOMMU_HELPER
94 bool 88 bool
95 default y if SPARC64 89 default y if SPARC64
@@ -197,7 +191,7 @@ config RWSEM_XCHGADD_ALGORITHM
197 191
198config GENERIC_HWEIGHT 192config GENERIC_HWEIGHT
199 bool 193 bool
200 default y if !ULTRA_HAS_POPULATION_COUNT 194 default y
201 195
202config GENERIC_CALIBRATE_DELAY 196config GENERIC_CALIBRATE_DELAY
203 bool 197 bool
diff --git a/arch/sparc/include/asm/spitfire.h b/arch/sparc/include/asm/spitfire.h
index d06a26601753..6b67e50fb9b4 100644
--- a/arch/sparc/include/asm/spitfire.h
+++ b/arch/sparc/include/asm/spitfire.h
@@ -45,6 +45,7 @@
45#define SUN4V_CHIP_NIAGARA3 0x03 45#define SUN4V_CHIP_NIAGARA3 0x03
46#define SUN4V_CHIP_NIAGARA4 0x04 46#define SUN4V_CHIP_NIAGARA4 0x04
47#define SUN4V_CHIP_NIAGARA5 0x05 47#define SUN4V_CHIP_NIAGARA5 0x05
48#define SUN4V_CHIP_SPARC64X 0x8a
48#define SUN4V_CHIP_UNKNOWN 0xff 49#define SUN4V_CHIP_UNKNOWN 0xff
49 50
50#ifndef __ASSEMBLY__ 51#ifndef __ASSEMBLY__
diff --git a/arch/sparc/kernel/cpu.c b/arch/sparc/kernel/cpu.c
index a6c94a2bf9d4..5c5125895db8 100644
--- a/arch/sparc/kernel/cpu.c
+++ b/arch/sparc/kernel/cpu.c
@@ -493,6 +493,12 @@ static void __init sun4v_cpu_probe(void)
493 sparc_pmu_type = "niagara5"; 493 sparc_pmu_type = "niagara5";
494 break; 494 break;
495 495
496 case SUN4V_CHIP_SPARC64X:
497 sparc_cpu_type = "SPARC64-X";
498 sparc_fpu_type = "SPARC64-X integrated FPU";
499 sparc_pmu_type = "sparc64-x";
500 break;
501
496 default: 502 default:
497 printk(KERN_WARNING "CPU: Unknown sun4v cpu type [%s]\n", 503 printk(KERN_WARNING "CPU: Unknown sun4v cpu type [%s]\n",
498 prom_cpu_compatible); 504 prom_cpu_compatible);
diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S
index 2feb15c35d9e..26b706a1867d 100644
--- a/arch/sparc/kernel/head_64.S
+++ b/arch/sparc/kernel/head_64.S
@@ -134,6 +134,8 @@ prom_niagara_prefix:
134 .asciz "SUNW,UltraSPARC-T" 134 .asciz "SUNW,UltraSPARC-T"
135prom_sparc_prefix: 135prom_sparc_prefix:
136 .asciz "SPARC-" 136 .asciz "SPARC-"
137prom_sparc64x_prefix:
138 .asciz "SPARC64-X"
137 .align 4 139 .align 4
138prom_root_compatible: 140prom_root_compatible:
139 .skip 64 141 .skip 64
@@ -412,7 +414,7 @@ sun4v_chip_type:
412 cmp %g2, 'T' 414 cmp %g2, 'T'
413 be,pt %xcc, 70f 415 be,pt %xcc, 70f
414 cmp %g2, 'M' 416 cmp %g2, 'M'
415 bne,pn %xcc, 4f 417 bne,pn %xcc, 49f
416 nop 418 nop
417 419
41870: ldub [%g1 + 7], %g2 42070: ldub [%g1 + 7], %g2
@@ -425,7 +427,7 @@ sun4v_chip_type:
425 cmp %g2, '5' 427 cmp %g2, '5'
426 be,pt %xcc, 5f 428 be,pt %xcc, 5f
427 mov SUN4V_CHIP_NIAGARA5, %g4 429 mov SUN4V_CHIP_NIAGARA5, %g4
428 ba,pt %xcc, 4f 430 ba,pt %xcc, 49f
429 nop 431 nop
430 432
43191: sethi %hi(prom_cpu_compatible), %g1 43391: sethi %hi(prom_cpu_compatible), %g1
@@ -439,6 +441,25 @@ sun4v_chip_type:
439 mov SUN4V_CHIP_NIAGARA2, %g4 441 mov SUN4V_CHIP_NIAGARA2, %g4
440 442
4414: 4434:
444 /* Athena */
445 sethi %hi(prom_cpu_compatible), %g1
446 or %g1, %lo(prom_cpu_compatible), %g1
447 sethi %hi(prom_sparc64x_prefix), %g7
448 or %g7, %lo(prom_sparc64x_prefix), %g7
449 mov 9, %g3
45041: ldub [%g7], %g2
451 ldub [%g1], %g4
452 cmp %g2, %g4
453 bne,pn %icc, 49f
454 add %g7, 1, %g7
455 subcc %g3, 1, %g3
456 bne,pt %xcc, 41b
457 add %g1, 1, %g1
458 mov SUN4V_CHIP_SPARC64X, %g4
459 ba,pt %xcc, 5f
460 nop
461
46249:
442 mov SUN4V_CHIP_UNKNOWN, %g4 463 mov SUN4V_CHIP_UNKNOWN, %g4
4435: sethi %hi(sun4v_chip_type), %g2 4645: sethi %hi(sun4v_chip_type), %g2
444 or %g2, %lo(sun4v_chip_type), %g2 465 or %g2, %lo(sun4v_chip_type), %g2
diff --git a/arch/sparc/kernel/leon_pci_grpci2.c b/arch/sparc/kernel/leon_pci_grpci2.c
index fc4320886a3a..4d1487138d26 100644
--- a/arch/sparc/kernel/leon_pci_grpci2.c
+++ b/arch/sparc/kernel/leon_pci_grpci2.c
@@ -186,6 +186,8 @@ struct grpci2_cap_first {
186#define CAP9_IOMAP_OFS 0x20 186#define CAP9_IOMAP_OFS 0x20
187#define CAP9_BARSIZE_OFS 0x24 187#define CAP9_BARSIZE_OFS 0x24
188 188
189#define TGT 256
190
189struct grpci2_priv { 191struct grpci2_priv {
190 struct leon_pci_info info; /* must be on top of this structure */ 192 struct leon_pci_info info; /* must be on top of this structure */
191 struct grpci2_regs *regs; 193 struct grpci2_regs *regs;
@@ -237,8 +239,12 @@ static int grpci2_cfg_r32(struct grpci2_priv *priv, unsigned int bus,
237 if (where & 0x3) 239 if (where & 0x3)
238 return -EINVAL; 240 return -EINVAL;
239 241
240 if (bus == 0 && PCI_SLOT(devfn) != 0) 242 if (bus == 0) {
241 devfn += (0x8 * 6); 243 devfn += (0x8 * 6); /* start at AD16=Device0 */
244 } else if (bus == TGT) {
245 bus = 0;
246 devfn = 0; /* special case: bridge controller itself */
247 }
242 248
243 /* Select bus */ 249 /* Select bus */
244 spin_lock_irqsave(&grpci2_dev_lock, flags); 250 spin_lock_irqsave(&grpci2_dev_lock, flags);
@@ -303,8 +309,12 @@ static int grpci2_cfg_w32(struct grpci2_priv *priv, unsigned int bus,
303 if (where & 0x3) 309 if (where & 0x3)
304 return -EINVAL; 310 return -EINVAL;
305 311
306 if (bus == 0 && PCI_SLOT(devfn) != 0) 312 if (bus == 0) {
307 devfn += (0x8 * 6); 313 devfn += (0x8 * 6); /* start at AD16=Device0 */
314 } else if (bus == TGT) {
315 bus = 0;
316 devfn = 0; /* special case: bridge controller itself */
317 }
308 318
309 /* Select bus */ 319 /* Select bus */
310 spin_lock_irqsave(&grpci2_dev_lock, flags); 320 spin_lock_irqsave(&grpci2_dev_lock, flags);
@@ -368,7 +378,7 @@ static int grpci2_read_config(struct pci_bus *bus, unsigned int devfn,
368 unsigned int busno = bus->number; 378 unsigned int busno = bus->number;
369 int ret; 379 int ret;
370 380
371 if (PCI_SLOT(devfn) > 15 || (PCI_SLOT(devfn) == 0 && busno == 0)) { 381 if (PCI_SLOT(devfn) > 15 || busno > 255) {
372 *val = ~0; 382 *val = ~0;
373 return 0; 383 return 0;
374 } 384 }
@@ -406,7 +416,7 @@ static int grpci2_write_config(struct pci_bus *bus, unsigned int devfn,
406 struct grpci2_priv *priv = grpci2priv; 416 struct grpci2_priv *priv = grpci2priv;
407 unsigned int busno = bus->number; 417 unsigned int busno = bus->number;
408 418
409 if (PCI_SLOT(devfn) > 15 || (PCI_SLOT(devfn) == 0 && busno == 0)) 419 if (PCI_SLOT(devfn) > 15 || busno > 255)
410 return 0; 420 return 0;
411 421
412#ifdef GRPCI2_DEBUG_CFGACCESS 422#ifdef GRPCI2_DEBUG_CFGACCESS
@@ -578,15 +588,15 @@ void grpci2_hw_init(struct grpci2_priv *priv)
578 REGSTORE(regs->ahbmst_map[i], priv->pci_area); 588 REGSTORE(regs->ahbmst_map[i], priv->pci_area);
579 589
580 /* Get the GRPCI2 Host PCI ID */ 590 /* Get the GRPCI2 Host PCI ID */
581 grpci2_cfg_r32(priv, 0, 0, PCI_VENDOR_ID, &priv->pciid); 591 grpci2_cfg_r32(priv, TGT, 0, PCI_VENDOR_ID, &priv->pciid);
582 592
583 /* Get address to first (always defined) capability structure */ 593 /* Get address to first (always defined) capability structure */
584 grpci2_cfg_r8(priv, 0, 0, PCI_CAPABILITY_LIST, &capptr); 594 grpci2_cfg_r8(priv, TGT, 0, PCI_CAPABILITY_LIST, &capptr);
585 595
586 /* Enable/Disable Byte twisting */ 596 /* Enable/Disable Byte twisting */
587 grpci2_cfg_r32(priv, 0, 0, capptr+CAP9_IOMAP_OFS, &io_map); 597 grpci2_cfg_r32(priv, TGT, 0, capptr+CAP9_IOMAP_OFS, &io_map);
588 io_map = (io_map & ~0x1) | (priv->bt_enabled ? 1 : 0); 598 io_map = (io_map & ~0x1) | (priv->bt_enabled ? 1 : 0);
589 grpci2_cfg_w32(priv, 0, 0, capptr+CAP9_IOMAP_OFS, io_map); 599 grpci2_cfg_w32(priv, TGT, 0, capptr+CAP9_IOMAP_OFS, io_map);
590 600
591 /* Setup the Host's PCI Target BARs for other peripherals to access, 601 /* Setup the Host's PCI Target BARs for other peripherals to access,
592 * and do DMA to the host's memory. The target BARs can be sized and 602 * and do DMA to the host's memory. The target BARs can be sized and
@@ -617,17 +627,18 @@ void grpci2_hw_init(struct grpci2_priv *priv)
617 pciadr = 0; 627 pciadr = 0;
618 } 628 }
619 } 629 }
620 grpci2_cfg_w32(priv, 0, 0, capptr+CAP9_BARSIZE_OFS+i*4, bar_sz); 630 grpci2_cfg_w32(priv, TGT, 0, capptr+CAP9_BARSIZE_OFS+i*4,
621 grpci2_cfg_w32(priv, 0, 0, PCI_BASE_ADDRESS_0+i*4, pciadr); 631 bar_sz);
622 grpci2_cfg_w32(priv, 0, 0, capptr+CAP9_BAR_OFS+i*4, ahbadr); 632 grpci2_cfg_w32(priv, TGT, 0, PCI_BASE_ADDRESS_0+i*4, pciadr);
633 grpci2_cfg_w32(priv, TGT, 0, capptr+CAP9_BAR_OFS+i*4, ahbadr);
623 printk(KERN_INFO " TGT BAR[%d]: 0x%08x (PCI)-> 0x%08x\n", 634 printk(KERN_INFO " TGT BAR[%d]: 0x%08x (PCI)-> 0x%08x\n",
624 i, pciadr, ahbadr); 635 i, pciadr, ahbadr);
625 } 636 }
626 637
627 /* set as bus master and enable pci memory responses */ 638 /* set as bus master and enable pci memory responses */
628 grpci2_cfg_r32(priv, 0, 0, PCI_COMMAND, &data); 639 grpci2_cfg_r32(priv, TGT, 0, PCI_COMMAND, &data);
629 data |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); 640 data |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
630 grpci2_cfg_w32(priv, 0, 0, PCI_COMMAND, data); 641 grpci2_cfg_w32(priv, TGT, 0, PCI_COMMAND, data);
631 642
632 /* Enable Error respone (CPU-TRAP) on illegal memory access. */ 643 /* Enable Error respone (CPU-TRAP) on illegal memory access. */
633 REGSTORE(regs->ctrl, CTRL_ER | CTRL_PE); 644 REGSTORE(regs->ctrl, CTRL_ER | CTRL_PE);
diff --git a/arch/tile/configs/tilegx_defconfig b/arch/tile/configs/tilegx_defconfig
index 8c5eff6d6df5..47684815e5c8 100644
--- a/arch/tile/configs/tilegx_defconfig
+++ b/arch/tile/configs/tilegx_defconfig
@@ -330,7 +330,6 @@ CONFIG_MD_RAID0=m
330CONFIG_MD_RAID1=m 330CONFIG_MD_RAID1=m
331CONFIG_MD_RAID10=m 331CONFIG_MD_RAID10=m
332CONFIG_MD_RAID456=m 332CONFIG_MD_RAID456=m
333CONFIG_MULTICORE_RAID456=y
334CONFIG_MD_FAULTY=m 333CONFIG_MD_FAULTY=m
335CONFIG_BLK_DEV_DM=m 334CONFIG_BLK_DEV_DM=m
336CONFIG_DM_DEBUG=y 335CONFIG_DM_DEBUG=y
diff --git a/arch/tile/configs/tilepro_defconfig b/arch/tile/configs/tilepro_defconfig
index e7a3dfcbcda7..dd2b8f0c631f 100644
--- a/arch/tile/configs/tilepro_defconfig
+++ b/arch/tile/configs/tilepro_defconfig
@@ -324,7 +324,6 @@ CONFIG_MD_RAID0=m
324CONFIG_MD_RAID1=m 324CONFIG_MD_RAID1=m
325CONFIG_MD_RAID10=m 325CONFIG_MD_RAID10=m
326CONFIG_MD_RAID456=m 326CONFIG_MD_RAID456=m
327CONFIG_MULTICORE_RAID456=y
328CONFIG_MD_FAULTY=m 327CONFIG_MD_FAULTY=m
329CONFIG_BLK_DEV_DM=m 328CONFIG_BLK_DEV_DM=m
330CONFIG_DM_DEBUG=y 329CONFIG_DM_DEBUG=y
diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
index d3ddd17405d0..5a6d2873f80e 100644
--- a/arch/x86/include/asm/kprobes.h
+++ b/arch/x86/include/asm/kprobes.h
@@ -77,6 +77,7 @@ struct arch_specific_insn {
77 * a post_handler or break_handler). 77 * a post_handler or break_handler).
78 */ 78 */
79 int boostable; 79 int boostable;
80 bool if_modifier;
80}; 81};
81 82
82struct arch_optimized_insn { 83struct arch_optimized_insn {
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 635a74d22409..4979778cc7fb 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -414,8 +414,8 @@ struct kvm_vcpu_arch {
414 gpa_t time; 414 gpa_t time;
415 struct pvclock_vcpu_time_info hv_clock; 415 struct pvclock_vcpu_time_info hv_clock;
416 unsigned int hw_tsc_khz; 416 unsigned int hw_tsc_khz;
417 unsigned int time_offset; 417 struct gfn_to_hva_cache pv_time;
418 struct page *time_page; 418 bool pv_time_enabled;
419 /* set guest stopped flag in pvclock flags field */ 419 /* set guest stopped flag in pvclock flags field */
420 bool pvclock_set_guest_stopped_request; 420 bool pvclock_set_guest_stopped_request;
421 421
diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h
index c20d1ce62dc6..e709884d0ef9 100644
--- a/arch/x86/include/asm/xen/hypercall.h
+++ b/arch/x86/include/asm/xen/hypercall.h
@@ -382,14 +382,14 @@ HYPERVISOR_console_io(int cmd, int count, char *str)
382 return _hypercall3(int, console_io, cmd, count, str); 382 return _hypercall3(int, console_io, cmd, count, str);
383} 383}
384 384
385extern int __must_check HYPERVISOR_physdev_op_compat(int, void *); 385extern int __must_check xen_physdev_op_compat(int, void *);
386 386
387static inline int 387static inline int
388HYPERVISOR_physdev_op(int cmd, void *arg) 388HYPERVISOR_physdev_op(int cmd, void *arg)
389{ 389{
390 int rc = _hypercall2(int, physdev_op, cmd, arg); 390 int rc = _hypercall2(int, physdev_op, cmd, arg);
391 if (unlikely(rc == -ENOSYS)) 391 if (unlikely(rc == -ENOSYS))
392 rc = HYPERVISOR_physdev_op_compat(cmd, arg); 392 rc = xen_physdev_op_compat(cmd, arg);
393 return rc; 393 return rc;
394} 394}
395 395
diff --git a/arch/x86/include/uapi/asm/msr-index.h b/arch/x86/include/uapi/asm/msr-index.h
index 892ce40a7470..7a060f4b411f 100644
--- a/arch/x86/include/uapi/asm/msr-index.h
+++ b/arch/x86/include/uapi/asm/msr-index.h
@@ -44,6 +44,7 @@
44#define SNB_C1_AUTO_UNDEMOTE (1UL << 27) 44#define SNB_C1_AUTO_UNDEMOTE (1UL << 27)
45#define SNB_C3_AUTO_UNDEMOTE (1UL << 28) 45#define SNB_C3_AUTO_UNDEMOTE (1UL << 28)
46 46
47#define MSR_PLATFORM_INFO 0x000000ce
47#define MSR_MTRRcap 0x000000fe 48#define MSR_MTRRcap 0x000000fe
48#define MSR_IA32_BBL_CR_CTL 0x00000119 49#define MSR_IA32_BBL_CR_CTL 0x00000119
49#define MSR_IA32_BBL_CR_CTL3 0x0000011e 50#define MSR_IA32_BBL_CR_CTL3 0x0000011e
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 529c8931fc02..dab7580c47ae 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -101,6 +101,10 @@ static struct event_constraint intel_snb_event_constraints[] __read_mostly =
101 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 101 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
102 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 102 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
103 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ 103 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
104 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
105 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
106 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
107 INTEL_UEVENT_CONSTRAINT(0x06a3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
104 INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */ 108 INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
105 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */ 109 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
106 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */ 110 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 3f06e6149981..7bfe318d3d8a 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -375,6 +375,9 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p)
375 else 375 else
376 p->ainsn.boostable = -1; 376 p->ainsn.boostable = -1;
377 377
378 /* Check whether the instruction modifies Interrupt Flag or not */
379 p->ainsn.if_modifier = is_IF_modifier(p->ainsn.insn);
380
378 /* Also, displacement change doesn't affect the first byte */ 381 /* Also, displacement change doesn't affect the first byte */
379 p->opcode = p->ainsn.insn[0]; 382 p->opcode = p->ainsn.insn[0];
380} 383}
@@ -434,7 +437,7 @@ static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
434 __this_cpu_write(current_kprobe, p); 437 __this_cpu_write(current_kprobe, p);
435 kcb->kprobe_saved_flags = kcb->kprobe_old_flags 438 kcb->kprobe_saved_flags = kcb->kprobe_old_flags
436 = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF)); 439 = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF));
437 if (is_IF_modifier(p->ainsn.insn)) 440 if (p->ainsn.if_modifier)
438 kcb->kprobe_saved_flags &= ~X86_EFLAGS_IF; 441 kcb->kprobe_saved_flags &= ~X86_EFLAGS_IF;
439} 442}
440 443
diff --git a/arch/x86/kernel/microcode_intel_early.c b/arch/x86/kernel/microcode_intel_early.c
index 7890bc838952..d893e8ed8ac9 100644
--- a/arch/x86/kernel/microcode_intel_early.c
+++ b/arch/x86/kernel/microcode_intel_early.c
@@ -90,13 +90,13 @@ microcode_phys(struct microcode_intel **mc_saved_tmp,
90 struct microcode_intel ***mc_saved; 90 struct microcode_intel ***mc_saved;
91 91
92 mc_saved = (struct microcode_intel ***) 92 mc_saved = (struct microcode_intel ***)
93 __pa_symbol(&mc_saved_data->mc_saved); 93 __pa_nodebug(&mc_saved_data->mc_saved);
94 for (i = 0; i < mc_saved_data->mc_saved_count; i++) { 94 for (i = 0; i < mc_saved_data->mc_saved_count; i++) {
95 struct microcode_intel *p; 95 struct microcode_intel *p;
96 96
97 p = *(struct microcode_intel **) 97 p = *(struct microcode_intel **)
98 __pa(mc_saved_data->mc_saved + i); 98 __pa_nodebug(mc_saved_data->mc_saved + i);
99 mc_saved_tmp[i] = (struct microcode_intel *)__pa(p); 99 mc_saved_tmp[i] = (struct microcode_intel *)__pa_nodebug(p);
100 } 100 }
101} 101}
102#endif 102#endif
@@ -562,7 +562,7 @@ scan_microcode(unsigned long start, unsigned long end,
562 struct cpio_data cd; 562 struct cpio_data cd;
563 long offset = 0; 563 long offset = 0;
564#ifdef CONFIG_X86_32 564#ifdef CONFIG_X86_32
565 char *p = (char *)__pa_symbol(ucode_name); 565 char *p = (char *)__pa_nodebug(ucode_name);
566#else 566#else
567 char *p = ucode_name; 567 char *p = ucode_name;
568#endif 568#endif
@@ -630,8 +630,8 @@ static void __cpuinit print_ucode(struct ucode_cpu_info *uci)
630 if (mc_intel == NULL) 630 if (mc_intel == NULL)
631 return; 631 return;
632 632
633 delay_ucode_info_p = (int *)__pa_symbol(&delay_ucode_info); 633 delay_ucode_info_p = (int *)__pa_nodebug(&delay_ucode_info);
634 current_mc_date_p = (int *)__pa_symbol(&current_mc_date); 634 current_mc_date_p = (int *)__pa_nodebug(&current_mc_date);
635 635
636 *delay_ucode_info_p = 1; 636 *delay_ucode_info_p = 1;
637 *current_mc_date_p = mc_intel->hdr.date; 637 *current_mc_date_p = mc_intel->hdr.date;
@@ -659,8 +659,8 @@ static inline void __cpuinit print_ucode(struct ucode_cpu_info *uci)
659} 659}
660#endif 660#endif
661 661
662static int apply_microcode_early(struct mc_saved_data *mc_saved_data, 662static int __cpuinit apply_microcode_early(struct mc_saved_data *mc_saved_data,
663 struct ucode_cpu_info *uci) 663 struct ucode_cpu_info *uci)
664{ 664{
665 struct microcode_intel *mc_intel; 665 struct microcode_intel *mc_intel;
666 unsigned int val[2]; 666 unsigned int val[2];
@@ -741,15 +741,15 @@ load_ucode_intel_bsp(void)
741#ifdef CONFIG_X86_32 741#ifdef CONFIG_X86_32
742 struct boot_params *boot_params_p; 742 struct boot_params *boot_params_p;
743 743
744 boot_params_p = (struct boot_params *)__pa_symbol(&boot_params); 744 boot_params_p = (struct boot_params *)__pa_nodebug(&boot_params);
745 ramdisk_image = boot_params_p->hdr.ramdisk_image; 745 ramdisk_image = boot_params_p->hdr.ramdisk_image;
746 ramdisk_size = boot_params_p->hdr.ramdisk_size; 746 ramdisk_size = boot_params_p->hdr.ramdisk_size;
747 initrd_start_early = ramdisk_image; 747 initrd_start_early = ramdisk_image;
748 initrd_end_early = initrd_start_early + ramdisk_size; 748 initrd_end_early = initrd_start_early + ramdisk_size;
749 749
750 _load_ucode_intel_bsp( 750 _load_ucode_intel_bsp(
751 (struct mc_saved_data *)__pa_symbol(&mc_saved_data), 751 (struct mc_saved_data *)__pa_nodebug(&mc_saved_data),
752 (unsigned long *)__pa_symbol(&mc_saved_in_initrd), 752 (unsigned long *)__pa_nodebug(&mc_saved_in_initrd),
753 initrd_start_early, initrd_end_early, &uci); 753 initrd_start_early, initrd_end_early, &uci);
754#else 754#else
755 ramdisk_image = boot_params.hdr.ramdisk_image; 755 ramdisk_image = boot_params.hdr.ramdisk_image;
@@ -772,10 +772,10 @@ void __cpuinit load_ucode_intel_ap(void)
772 unsigned long *initrd_start_p; 772 unsigned long *initrd_start_p;
773 773
774 mc_saved_in_initrd_p = 774 mc_saved_in_initrd_p =
775 (unsigned long *)__pa_symbol(mc_saved_in_initrd); 775 (unsigned long *)__pa_nodebug(mc_saved_in_initrd);
776 mc_saved_data_p = (struct mc_saved_data *)__pa_symbol(&mc_saved_data); 776 mc_saved_data_p = (struct mc_saved_data *)__pa_nodebug(&mc_saved_data);
777 initrd_start_p = (unsigned long *)__pa_symbol(&initrd_start); 777 initrd_start_p = (unsigned long *)__pa_nodebug(&initrd_start);
778 initrd_start_addr = (unsigned long)__pa_symbol(*initrd_start_p); 778 initrd_start_addr = (unsigned long)__pa_nodebug(*initrd_start_p);
779#else 779#else
780 mc_saved_data_p = &mc_saved_data; 780 mc_saved_data_p = &mc_saved_data;
781 mc_saved_in_initrd_p = mc_saved_in_initrd; 781 mc_saved_in_initrd_p = mc_saved_in_initrd;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index f71500af1f81..f19ac0aca60d 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1406,25 +1406,15 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
1406 unsigned long flags, this_tsc_khz; 1406 unsigned long flags, this_tsc_khz;
1407 struct kvm_vcpu_arch *vcpu = &v->arch; 1407 struct kvm_vcpu_arch *vcpu = &v->arch;
1408 struct kvm_arch *ka = &v->kvm->arch; 1408 struct kvm_arch *ka = &v->kvm->arch;
1409 void *shared_kaddr;
1410 s64 kernel_ns, max_kernel_ns; 1409 s64 kernel_ns, max_kernel_ns;
1411 u64 tsc_timestamp, host_tsc; 1410 u64 tsc_timestamp, host_tsc;
1412 struct pvclock_vcpu_time_info *guest_hv_clock; 1411 struct pvclock_vcpu_time_info guest_hv_clock;
1413 u8 pvclock_flags; 1412 u8 pvclock_flags;
1414 bool use_master_clock; 1413 bool use_master_clock;
1415 1414
1416 kernel_ns = 0; 1415 kernel_ns = 0;
1417 host_tsc = 0; 1416 host_tsc = 0;
1418 1417
1419 /* Keep irq disabled to prevent changes to the clock */
1420 local_irq_save(flags);
1421 this_tsc_khz = __get_cpu_var(cpu_tsc_khz);
1422 if (unlikely(this_tsc_khz == 0)) {
1423 local_irq_restore(flags);
1424 kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
1425 return 1;
1426 }
1427
1428 /* 1418 /*
1429 * If the host uses TSC clock, then passthrough TSC as stable 1419 * If the host uses TSC clock, then passthrough TSC as stable
1430 * to the guest. 1420 * to the guest.
@@ -1436,6 +1426,15 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
1436 kernel_ns = ka->master_kernel_ns; 1426 kernel_ns = ka->master_kernel_ns;
1437 } 1427 }
1438 spin_unlock(&ka->pvclock_gtod_sync_lock); 1428 spin_unlock(&ka->pvclock_gtod_sync_lock);
1429
1430 /* Keep irq disabled to prevent changes to the clock */
1431 local_irq_save(flags);
1432 this_tsc_khz = __get_cpu_var(cpu_tsc_khz);
1433 if (unlikely(this_tsc_khz == 0)) {
1434 local_irq_restore(flags);
1435 kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
1436 return 1;
1437 }
1439 if (!use_master_clock) { 1438 if (!use_master_clock) {
1440 host_tsc = native_read_tsc(); 1439 host_tsc = native_read_tsc();
1441 kernel_ns = get_kernel_ns(); 1440 kernel_ns = get_kernel_ns();
@@ -1463,7 +1462,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
1463 1462
1464 local_irq_restore(flags); 1463 local_irq_restore(flags);
1465 1464
1466 if (!vcpu->time_page) 1465 if (!vcpu->pv_time_enabled)
1467 return 0; 1466 return 0;
1468 1467
1469 /* 1468 /*
@@ -1525,12 +1524,12 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
1525 */ 1524 */
1526 vcpu->hv_clock.version += 2; 1525 vcpu->hv_clock.version += 2;
1527 1526
1528 shared_kaddr = kmap_atomic(vcpu->time_page); 1527 if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time,
1529 1528 &guest_hv_clock, sizeof(guest_hv_clock))))
1530 guest_hv_clock = shared_kaddr + vcpu->time_offset; 1529 return 0;
1531 1530
1532 /* retain PVCLOCK_GUEST_STOPPED if set in guest copy */ 1531 /* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
1533 pvclock_flags = (guest_hv_clock->flags & PVCLOCK_GUEST_STOPPED); 1532 pvclock_flags = (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED);
1534 1533
1535 if (vcpu->pvclock_set_guest_stopped_request) { 1534 if (vcpu->pvclock_set_guest_stopped_request) {
1536 pvclock_flags |= PVCLOCK_GUEST_STOPPED; 1535 pvclock_flags |= PVCLOCK_GUEST_STOPPED;
@@ -1543,12 +1542,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
1543 1542
1544 vcpu->hv_clock.flags = pvclock_flags; 1543 vcpu->hv_clock.flags = pvclock_flags;
1545 1544
1546 memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock, 1545 kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
1547 sizeof(vcpu->hv_clock)); 1546 &vcpu->hv_clock,
1548 1547 sizeof(vcpu->hv_clock));
1549 kunmap_atomic(shared_kaddr);
1550
1551 mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
1552 return 0; 1548 return 0;
1553} 1549}
1554 1550
@@ -1837,10 +1833,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
1837 1833
1838static void kvmclock_reset(struct kvm_vcpu *vcpu) 1834static void kvmclock_reset(struct kvm_vcpu *vcpu)
1839{ 1835{
1840 if (vcpu->arch.time_page) { 1836 vcpu->arch.pv_time_enabled = false;
1841 kvm_release_page_dirty(vcpu->arch.time_page);
1842 vcpu->arch.time_page = NULL;
1843 }
1844} 1837}
1845 1838
1846static void accumulate_steal_time(struct kvm_vcpu *vcpu) 1839static void accumulate_steal_time(struct kvm_vcpu *vcpu)
@@ -1947,6 +1940,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
1947 break; 1940 break;
1948 case MSR_KVM_SYSTEM_TIME_NEW: 1941 case MSR_KVM_SYSTEM_TIME_NEW:
1949 case MSR_KVM_SYSTEM_TIME: { 1942 case MSR_KVM_SYSTEM_TIME: {
1943 u64 gpa_offset;
1950 kvmclock_reset(vcpu); 1944 kvmclock_reset(vcpu);
1951 1945
1952 vcpu->arch.time = data; 1946 vcpu->arch.time = data;
@@ -1956,14 +1950,17 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
1956 if (!(data & 1)) 1950 if (!(data & 1))
1957 break; 1951 break;
1958 1952
1959 /* ...but clean it before doing the actual write */ 1953 gpa_offset = data & ~(PAGE_MASK | 1);
1960 vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
1961 1954
1962 vcpu->arch.time_page = 1955 /* Check that the address is 32-byte aligned. */
1963 gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT); 1956 if (gpa_offset & (sizeof(struct pvclock_vcpu_time_info) - 1))
1957 break;
1964 1958
1965 if (is_error_page(vcpu->arch.time_page)) 1959 if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
1966 vcpu->arch.time_page = NULL; 1960 &vcpu->arch.pv_time, data & ~1ULL))
1961 vcpu->arch.pv_time_enabled = false;
1962 else
1963 vcpu->arch.pv_time_enabled = true;
1967 1964
1968 break; 1965 break;
1969 } 1966 }
@@ -2967,7 +2964,7 @@ static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
2967 */ 2964 */
2968static int kvm_set_guest_paused(struct kvm_vcpu *vcpu) 2965static int kvm_set_guest_paused(struct kvm_vcpu *vcpu)
2969{ 2966{
2970 if (!vcpu->arch.time_page) 2967 if (!vcpu->arch.pv_time_enabled)
2971 return -EINVAL; 2968 return -EINVAL;
2972 vcpu->arch.pvclock_set_guest_stopped_request = true; 2969 vcpu->arch.pvclock_set_guest_stopped_request = true;
2973 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 2970 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
@@ -6718,6 +6715,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
6718 goto fail_free_wbinvd_dirty_mask; 6715 goto fail_free_wbinvd_dirty_mask;
6719 6716
6720 vcpu->arch.ia32_tsc_adjust_msr = 0x0; 6717 vcpu->arch.ia32_tsc_adjust_msr = 0x0;
6718 vcpu->arch.pv_time_enabled = false;
6721 kvm_async_pf_hash_reset(vcpu); 6719 kvm_async_pf_hash_reset(vcpu);
6722 kvm_pmu_init(vcpu); 6720 kvm_pmu_init(vcpu);
6723 6721
diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
index 05928aae911e..906fea315791 100644
--- a/arch/x86/lib/usercopy_64.c
+++ b/arch/x86/lib/usercopy_64.c
@@ -74,10 +74,10 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
74 char c; 74 char c;
75 unsigned zero_len; 75 unsigned zero_len;
76 76
77 for (; len; --len) { 77 for (; len; --len, to++) {
78 if (__get_user_nocheck(c, from++, sizeof(char))) 78 if (__get_user_nocheck(c, from++, sizeof(char)))
79 break; 79 break;
80 if (__put_user_nocheck(c, to++, sizeof(char))) 80 if (__put_user_nocheck(c, to, sizeof(char)))
81 break; 81 break;
82 } 82 }
83 83
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index e8e34938c57d..6afbb2ca9a0a 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1467,8 +1467,6 @@ static void __init xen_write_cr3_init(unsigned long cr3)
1467 __xen_write_cr3(true, cr3); 1467 __xen_write_cr3(true, cr3);
1468 1468
1469 xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */ 1469 xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
1470
1471 pv_mmu_ops.write_cr3 = &xen_write_cr3;
1472} 1470}
1473#endif 1471#endif
1474 1472
@@ -2122,6 +2120,7 @@ static void __init xen_post_allocator_init(void)
2122#endif 2120#endif
2123 2121
2124#ifdef CONFIG_X86_64 2122#ifdef CONFIG_X86_64
2123 pv_mmu_ops.write_cr3 = &xen_write_cr3;
2125 SetPagePinned(virt_to_page(level3_user_vsyscall)); 2124 SetPagePinned(virt_to_page(level3_user_vsyscall));
2126#endif 2125#endif
2127 xen_mark_init_mm_pinned(); 2126 xen_mark_init_mm_pinned();