aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2012-06-15 18:51:55 -0400
committerDavid S. Miller <davem@davemloft.net>2012-06-15 18:51:55 -0400
commit7e52b33bd50faa866bc3e6e97e68438bc5e52251 (patch)
tree46e68adf23f4f170a0eb5045c33a76234de6cf92 /arch
parent91c8028c95a468da9c0aafd2d91cf24e27784206 (diff)
parent2a0c451ade8e1783c5d453948289e4a978d417c9 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: net/ipv6/route.c This deals with a merge conflict between the net-next addition of the inetpeer network namespace ops, and Thomas Graf's bug fix in 2a0c451ade8e1783c5d453948289e4a978d417c9 which makes sure we don't register /proc/net/ipv6_route before it is actually safe to do so. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/Kconfig1
-rw-r--r--arch/arm/mach-omap2/display.c4
-rw-r--r--arch/arm/mach-shmobile/Kconfig6
-rw-r--r--arch/arm/mm/dma-mapping.c10
-rw-r--r--arch/avr32/kernel/signal.c2
-rw-r--r--arch/blackfin/kernel/process.c2
-rw-r--r--arch/m68k/Kconfig2
-rw-r--r--arch/m68k/include/asm/Kbuild2
-rw-r--r--arch/m68k/include/asm/m528xsim.h2
-rw-r--r--arch/m68k/include/asm/uaccess_mm.h11
-rw-r--r--arch/m68k/kernel/ptrace.c2
-rw-r--r--arch/m68k/kernel/time.c4
-rw-r--r--arch/m68k/lib/uaccess.c74
-rw-r--r--arch/m68k/platform/68328/timers.c6
-rw-r--r--arch/m68k/platform/68360/config.c7
-rw-r--r--arch/parisc/Makefile3
-rw-r--r--arch/parisc/include/asm/Kbuild1
-rw-r--r--arch/parisc/include/asm/bug.h2
-rw-r--r--arch/powerpc/kernel/module_32.c11
-rw-r--r--arch/powerpc/kernel/time.c14
-rw-r--r--arch/sh/Kconfig2
-rw-r--r--arch/sh/Makefile16
-rw-r--r--arch/sh/include/asm/Kbuild34
-rw-r--r--arch/sh/include/asm/bitsperlong.h1
-rw-r--r--arch/sh/include/asm/cputime.h6
-rw-r--r--arch/sh/include/asm/current.h1
-rw-r--r--arch/sh/include/asm/delay.h1
-rw-r--r--arch/sh/include/asm/div64.h1
-rw-r--r--arch/sh/include/asm/emergency-restart.h6
-rw-r--r--arch/sh/include/asm/errno.h6
-rw-r--r--arch/sh/include/asm/fcntl.h1
-rw-r--r--arch/sh/include/asm/ioctl.h1
-rw-r--r--arch/sh/include/asm/ipcbuf.h1
-rw-r--r--arch/sh/include/asm/irq_regs.h1
-rw-r--r--arch/sh/include/asm/kvm_para.h1
-rw-r--r--arch/sh/include/asm/local.h7
-rw-r--r--arch/sh/include/asm/local64.h1
-rw-r--r--arch/sh/include/asm/mman.h1
-rw-r--r--arch/sh/include/asm/msgbuf.h1
-rw-r--r--arch/sh/include/asm/param.h1
-rw-r--r--arch/sh/include/asm/parport.h1
-rw-r--r--arch/sh/include/asm/percpu.h6
-rw-r--r--arch/sh/include/asm/poll.h1
-rw-r--r--arch/sh/include/asm/resource.h6
-rw-r--r--arch/sh/include/asm/scatterlist.h6
-rw-r--r--arch/sh/include/asm/sembuf.h1
-rw-r--r--arch/sh/include/asm/serial.h1
-rw-r--r--arch/sh/include/asm/shmbuf.h1
-rw-r--r--arch/sh/include/asm/siginfo.h6
-rw-r--r--arch/sh/include/asm/sizes.h1
-rw-r--r--arch/sh/include/asm/socket.h1
-rw-r--r--arch/sh/include/asm/statfs.h6
-rw-r--r--arch/sh/include/asm/termbits.h1
-rw-r--r--arch/sh/include/asm/termios.h1
-rw-r--r--arch/sh/include/asm/uaccess.h75
-rw-r--r--arch/sh/include/asm/uaccess_32.h75
-rw-r--r--arch/sh/include/asm/uaccess_64.h4
-rw-r--r--arch/sh/include/asm/ucontext.h1
-rw-r--r--arch/sh/include/asm/word-at-a-time.h53
-rw-r--r--arch/sh/include/asm/xor.h1
-rw-r--r--arch/sh/include/cpu-sh2a/cpu/ubc.h28
-rw-r--r--arch/sh/kernel/cpu/sh5/entry.S82
-rw-r--r--arch/sh/kernel/process.c1
-rw-r--r--arch/sh/kernel/process_64.c1
-rw-r--r--arch/sh/kernel/sh_ksyms_64.c2
-rw-r--r--arch/tile/include/asm/thread_info.h5
-rw-r--r--arch/tile/kernel/entry.S14
-rw-r--r--arch/tile/kernel/setup.c1
-rw-r--r--arch/x86/boot/header.S42
-rw-r--r--arch/x86/boot/tools/build.c172
-rw-r--r--arch/x86/crypto/aesni-intel_asm.S6
-rw-r--r--arch/x86/include/asm/nmi.h14
-rw-r--r--arch/x86/include/asm/uaccess.h12
-rw-r--r--arch/x86/include/asm/uv/uv_bau.h1
-rw-r--r--arch/x86/kernel/aperture_64.c6
-rw-r--r--arch/x86/kernel/apic/io_apic.c4
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c4
-rw-r--r--arch/x86/kernel/cpu/perf_event.c11
-rw-r--r--arch/x86/kernel/cpu/perf_event.h2
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c145
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_ds.c9
-rw-r--r--arch/x86/kernel/kvmclock.c5
-rw-r--r--arch/x86/kernel/nmi_selftest.c4
-rw-r--r--arch/x86/kernel/reboot.c6
-rw-r--r--arch/x86/kernel/smpboot.c19
-rw-r--r--arch/x86/lib/usercopy.c4
-rw-r--r--arch/x86/lib/x86-opcode-map.txt8
-rw-r--r--arch/x86/mm/init.c3
-rw-r--r--arch/x86/mm/srat.c2
-rw-r--r--arch/x86/platform/mrst/mrst.c2
-rw-r--r--arch/x86/platform/uv/tlb_uv.c1
-rw-r--r--arch/x86/tools/gen-insn-attr-x86.awk14
-rw-r--r--arch/xtensa/include/asm/syscall.h4
-rw-r--r--arch/xtensa/kernel/signal.c2
94 files changed, 495 insertions, 645 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index b649c5904a4f..84449dd8f031 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -7,7 +7,6 @@ config ARM
7 select HAVE_IDE if PCI || ISA || PCMCIA 7 select HAVE_IDE if PCI || ISA || PCMCIA
8 select HAVE_DMA_ATTRS 8 select HAVE_DMA_ATTRS
9 select HAVE_DMA_CONTIGUOUS if (CPU_V6 || CPU_V6K || CPU_V7) 9 select HAVE_DMA_CONTIGUOUS if (CPU_V6 || CPU_V6K || CPU_V7)
10 select CMA if (CPU_V6 || CPU_V6K || CPU_V7)
11 select HAVE_MEMBLOCK 10 select HAVE_MEMBLOCK
12 select RTC_LIB 11 select RTC_LIB
13 select SYS_SUPPORTS_APM_EMULATION 12 select SYS_SUPPORTS_APM_EMULATION
diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c
index 54d49ddb9b81..5fb47a14f4ba 100644
--- a/arch/arm/mach-omap2/display.c
+++ b/arch/arm/mach-omap2/display.c
@@ -271,9 +271,9 @@ static struct platform_device *create_simple_dss_pdev(const char *pdev_name,
271 goto err; 271 goto err;
272 } 272 }
273 273
274 r = omap_device_register(pdev); 274 r = platform_device_add(pdev);
275 if (r) { 275 if (r) {
276 pr_err("Could not register omap_device for %s\n", pdev_name); 276 pr_err("Could not register platform_device for %s\n", pdev_name);
277 goto err; 277 goto err;
278 } 278 }
279 279
diff --git a/arch/arm/mach-shmobile/Kconfig b/arch/arm/mach-shmobile/Kconfig
index f31383c32f9c..df33909205e2 100644
--- a/arch/arm/mach-shmobile/Kconfig
+++ b/arch/arm/mach-shmobile/Kconfig
@@ -186,6 +186,12 @@ config SH_TIMER_TMU
186 help 186 help
187 This enables build of the TMU timer driver. 187 This enables build of the TMU timer driver.
188 188
189config EM_TIMER_STI
190 bool "STI timer driver"
191 default y
192 help
193 This enables build of the STI timer driver.
194
189endmenu 195endmenu
190 196
191config SH_CLK_CPG 197config SH_CLK_CPG
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index ea6b43154090..106c4c0ebccd 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -268,10 +268,8 @@ static int __init consistent_init(void)
268 unsigned long base = consistent_base; 268 unsigned long base = consistent_base;
269 unsigned long num_ptes = (CONSISTENT_END - base) >> PMD_SHIFT; 269 unsigned long num_ptes = (CONSISTENT_END - base) >> PMD_SHIFT;
270 270
271#ifndef CONFIG_ARM_DMA_USE_IOMMU 271 if (IS_ENABLED(CONFIG_CMA) && !IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU))
272 if (cpu_architecture() >= CPU_ARCH_ARMv6)
273 return 0; 272 return 0;
274#endif
275 273
276 consistent_pte = kmalloc(num_ptes * sizeof(pte_t), GFP_KERNEL); 274 consistent_pte = kmalloc(num_ptes * sizeof(pte_t), GFP_KERNEL);
277 if (!consistent_pte) { 275 if (!consistent_pte) {
@@ -342,7 +340,7 @@ static int __init coherent_init(void)
342 struct page *page; 340 struct page *page;
343 void *ptr; 341 void *ptr;
344 342
345 if (cpu_architecture() < CPU_ARCH_ARMv6) 343 if (!IS_ENABLED(CONFIG_CMA))
346 return 0; 344 return 0;
347 345
348 ptr = __alloc_from_contiguous(NULL, size, prot, &page); 346 ptr = __alloc_from_contiguous(NULL, size, prot, &page);
@@ -704,7 +702,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
704 702
705 if (arch_is_coherent() || nommu()) 703 if (arch_is_coherent() || nommu())
706 addr = __alloc_simple_buffer(dev, size, gfp, &page); 704 addr = __alloc_simple_buffer(dev, size, gfp, &page);
707 else if (cpu_architecture() < CPU_ARCH_ARMv6) 705 else if (!IS_ENABLED(CONFIG_CMA))
708 addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller); 706 addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller);
709 else if (gfp & GFP_ATOMIC) 707 else if (gfp & GFP_ATOMIC)
710 addr = __alloc_from_pool(dev, size, &page, caller); 708 addr = __alloc_from_pool(dev, size, &page, caller);
@@ -773,7 +771,7 @@ void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
773 771
774 if (arch_is_coherent() || nommu()) { 772 if (arch_is_coherent() || nommu()) {
775 __dma_free_buffer(page, size); 773 __dma_free_buffer(page, size);
776 } else if (cpu_architecture() < CPU_ARCH_ARMv6) { 774 } else if (!IS_ENABLED(CONFIG_CMA)) {
777 __dma_free_remap(cpu_addr, size); 775 __dma_free_remap(cpu_addr, size);
778 __dma_free_buffer(page, size); 776 __dma_free_buffer(page, size);
779 } else { 777 } else {
diff --git a/arch/avr32/kernel/signal.c b/arch/avr32/kernel/signal.c
index c140f9b41dce..d552a854dacc 100644
--- a/arch/avr32/kernel/signal.c
+++ b/arch/avr32/kernel/signal.c
@@ -300,7 +300,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, struct thread_info *ti)
300 if ((sysreg_read(SR) & MODE_MASK) == MODE_SUPERVISOR) 300 if ((sysreg_read(SR) & MODE_MASK) == MODE_SUPERVISOR)
301 syscall = 1; 301 syscall = 1;
302 302
303 if (ti->flags & _TIF_SIGPENDING)) 303 if (ti->flags & _TIF_SIGPENDING)
304 do_signal(regs, syscall); 304 do_signal(regs, syscall);
305 305
306 if (ti->flags & _TIF_NOTIFY_RESUME) { 306 if (ti->flags & _TIF_NOTIFY_RESUME) {
diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c
index 2e3994b20169..62bcea7dcc6d 100644
--- a/arch/blackfin/kernel/process.c
+++ b/arch/blackfin/kernel/process.c
@@ -173,7 +173,7 @@ asmlinkage int bfin_clone(struct pt_regs *regs)
173 unsigned long newsp; 173 unsigned long newsp;
174 174
175#ifdef __ARCH_SYNC_CORE_DCACHE 175#ifdef __ARCH_SYNC_CORE_DCACHE
176 if (current->rt.nr_cpus_allowed == num_possible_cpus()) 176 if (current->nr_cpus_allowed == num_possible_cpus())
177 set_cpus_allowed_ptr(current, cpumask_of(smp_processor_id())); 177 set_cpus_allowed_ptr(current, cpumask_of(smp_processor_id()));
178#endif 178#endif
179 179
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index cac5b6be572a..147120128260 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -7,6 +7,8 @@ config M68K
7 select GENERIC_IRQ_SHOW 7 select GENERIC_IRQ_SHOW
8 select ARCH_HAVE_NMI_SAFE_CMPXCHG if RMW_INSNS 8 select ARCH_HAVE_NMI_SAFE_CMPXCHG if RMW_INSNS
9 select GENERIC_CPU_DEVICES 9 select GENERIC_CPU_DEVICES
10 select GENERIC_STRNCPY_FROM_USER if MMU
11 select GENERIC_STRNLEN_USER if MMU
10 select FPU if MMU 12 select FPU if MMU
11 select ARCH_USES_GETTIMEOFFSET if MMU && !COLDFIRE 13 select ARCH_USES_GETTIMEOFFSET if MMU && !COLDFIRE
12 14
diff --git a/arch/m68k/include/asm/Kbuild b/arch/m68k/include/asm/Kbuild
index 1a922fad76f7..eafa2539a8ee 100644
--- a/arch/m68k/include/asm/Kbuild
+++ b/arch/m68k/include/asm/Kbuild
@@ -1,2 +1,4 @@
1include include/asm-generic/Kbuild.asm 1include include/asm-generic/Kbuild.asm
2header-y += cachectl.h 2header-y += cachectl.h
3
4generic-y += word-at-a-time.h
diff --git a/arch/m68k/include/asm/m528xsim.h b/arch/m68k/include/asm/m528xsim.h
index d63b99ff7ff7..497c31c803ff 100644
--- a/arch/m68k/include/asm/m528xsim.h
+++ b/arch/m68k/include/asm/m528xsim.h
@@ -86,7 +86,7 @@
86/* 86/*
87 * QSPI module. 87 * QSPI module.
88 */ 88 */
89#define MCFQSPI_IOBASE (MCF_IPSBAR + 0x340) 89#define MCFQSPI_BASE (MCF_IPSBAR + 0x340)
90#define MCFQSPI_SIZE 0x40 90#define MCFQSPI_SIZE 0x40
91 91
92#define MCFQSPI_CS0 147 92#define MCFQSPI_CS0 147
diff --git a/arch/m68k/include/asm/uaccess_mm.h b/arch/m68k/include/asm/uaccess_mm.h
index 9c80cd515b20..472c891a4aee 100644
--- a/arch/m68k/include/asm/uaccess_mm.h
+++ b/arch/m68k/include/asm/uaccess_mm.h
@@ -379,12 +379,15 @@ __constant_copy_to_user(void __user *to, const void *from, unsigned long n)
379#define copy_from_user(to, from, n) __copy_from_user(to, from, n) 379#define copy_from_user(to, from, n) __copy_from_user(to, from, n)
380#define copy_to_user(to, from, n) __copy_to_user(to, from, n) 380#define copy_to_user(to, from, n) __copy_to_user(to, from, n)
381 381
382long strncpy_from_user(char *dst, const char __user *src, long count); 382#define user_addr_max() \
383long strnlen_user(const char __user *src, long n); 383 (segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL)
384
385extern long strncpy_from_user(char *dst, const char __user *src, long count);
386extern __must_check long strlen_user(const char __user *str);
387extern __must_check long strnlen_user(const char __user *str, long n);
388
384unsigned long __clear_user(void __user *to, unsigned long n); 389unsigned long __clear_user(void __user *to, unsigned long n);
385 390
386#define clear_user __clear_user 391#define clear_user __clear_user
387 392
388#define strlen_user(str) strnlen_user(str, 32767)
389
390#endif /* _M68K_UACCESS_H */ 393#endif /* _M68K_UACCESS_H */
diff --git a/arch/m68k/kernel/ptrace.c b/arch/m68k/kernel/ptrace.c
index 8b4a2222e658..1bc10e62b9af 100644
--- a/arch/m68k/kernel/ptrace.c
+++ b/arch/m68k/kernel/ptrace.c
@@ -286,7 +286,7 @@ asmlinkage void syscall_trace(void)
286 } 286 }
287} 287}
288 288
289#ifdef CONFIG_COLDFIRE 289#if defined(CONFIG_COLDFIRE) || !defined(CONFIG_MMU)
290asmlinkage int syscall_trace_enter(void) 290asmlinkage int syscall_trace_enter(void)
291{ 291{
292 int ret = 0; 292 int ret = 0;
diff --git a/arch/m68k/kernel/time.c b/arch/m68k/kernel/time.c
index d7deb7fc7eb5..707f0573ec6b 100644
--- a/arch/m68k/kernel/time.c
+++ b/arch/m68k/kernel/time.c
@@ -85,7 +85,7 @@ void __init time_init(void)
85 mach_sched_init(timer_interrupt); 85 mach_sched_init(timer_interrupt);
86} 86}
87 87
88#ifdef CONFIG_M68KCLASSIC 88#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
89 89
90u32 arch_gettimeoffset(void) 90u32 arch_gettimeoffset(void)
91{ 91{
@@ -108,4 +108,4 @@ static int __init rtc_init(void)
108 108
109module_init(rtc_init); 109module_init(rtc_init);
110 110
111#endif /* CONFIG_M68KCLASSIC */ 111#endif /* CONFIG_ARCH_USES_GETTIMEOFFSET */
diff --git a/arch/m68k/lib/uaccess.c b/arch/m68k/lib/uaccess.c
index 5664386338da..5e97f2ee7c11 100644
--- a/arch/m68k/lib/uaccess.c
+++ b/arch/m68k/lib/uaccess.c
@@ -104,80 +104,6 @@ unsigned long __generic_copy_to_user(void __user *to, const void *from,
104EXPORT_SYMBOL(__generic_copy_to_user); 104EXPORT_SYMBOL(__generic_copy_to_user);
105 105
106/* 106/*
107 * Copy a null terminated string from userspace.
108 */
109long strncpy_from_user(char *dst, const char __user *src, long count)
110{
111 long res;
112 char c;
113
114 if (count <= 0)
115 return count;
116
117 asm volatile ("\n"
118 "1: "MOVES".b (%2)+,%4\n"
119 " move.b %4,(%1)+\n"
120 " jeq 2f\n"
121 " subq.l #1,%3\n"
122 " jne 1b\n"
123 "2: sub.l %3,%0\n"
124 "3:\n"
125 " .section .fixup,\"ax\"\n"
126 " .even\n"
127 "10: move.l %5,%0\n"
128 " jra 3b\n"
129 " .previous\n"
130 "\n"
131 " .section __ex_table,\"a\"\n"
132 " .align 4\n"
133 " .long 1b,10b\n"
134 " .previous"
135 : "=d" (res), "+a" (dst), "+a" (src), "+r" (count), "=&d" (c)
136 : "i" (-EFAULT), "0" (count));
137
138 return res;
139}
140EXPORT_SYMBOL(strncpy_from_user);
141
142/*
143 * Return the size of a string (including the ending 0)
144 *
145 * Return 0 on exception, a value greater than N if too long
146 */
147long strnlen_user(const char __user *src, long n)
148{
149 char c;
150 long res;
151
152 asm volatile ("\n"
153 "1: subq.l #1,%1\n"
154 " jmi 3f\n"
155 "2: "MOVES".b (%0)+,%2\n"
156 " tst.b %2\n"
157 " jne 1b\n"
158 " jra 4f\n"
159 "\n"
160 "3: addq.l #1,%0\n"
161 "4: sub.l %4,%0\n"
162 "5:\n"
163 " .section .fixup,\"ax\"\n"
164 " .even\n"
165 "20: sub.l %0,%0\n"
166 " jra 5b\n"
167 " .previous\n"
168 "\n"
169 " .section __ex_table,\"a\"\n"
170 " .align 4\n"
171 " .long 2b,20b\n"
172 " .previous\n"
173 : "=&a" (res), "+d" (n), "=&d" (c)
174 : "0" (src), "r" (src));
175
176 return res;
177}
178EXPORT_SYMBOL(strnlen_user);
179
180/*
181 * Zero Userspace 107 * Zero Userspace
182 */ 108 */
183 109
diff --git a/arch/m68k/platform/68328/timers.c b/arch/m68k/platform/68328/timers.c
index c801c172b822..f4dc9b295609 100644
--- a/arch/m68k/platform/68328/timers.c
+++ b/arch/m68k/platform/68328/timers.c
@@ -53,6 +53,7 @@
53#endif 53#endif
54 54
55static u32 m68328_tick_cnt; 55static u32 m68328_tick_cnt;
56static irq_handler_t timer_interrupt;
56 57
57/***************************************************************************/ 58/***************************************************************************/
58 59
@@ -62,7 +63,7 @@ static irqreturn_t hw_tick(int irq, void *dummy)
62 TSTAT &= 0; 63 TSTAT &= 0;
63 64
64 m68328_tick_cnt += TICKS_PER_JIFFY; 65 m68328_tick_cnt += TICKS_PER_JIFFY;
65 return arch_timer_interrupt(irq, dummy); 66 return timer_interrupt(irq, dummy);
66} 67}
67 68
68/***************************************************************************/ 69/***************************************************************************/
@@ -99,7 +100,7 @@ static struct clocksource m68328_clk = {
99 100
100/***************************************************************************/ 101/***************************************************************************/
101 102
102void hw_timer_init(void) 103void hw_timer_init(irq_handler_t handler)
103{ 104{
104 /* disable timer 1 */ 105 /* disable timer 1 */
105 TCTL = 0; 106 TCTL = 0;
@@ -115,6 +116,7 @@ void hw_timer_init(void)
115 /* Enable timer 1 */ 116 /* Enable timer 1 */
116 TCTL |= TCTL_TEN; 117 TCTL |= TCTL_TEN;
117 clocksource_register_hz(&m68328_clk, TICKS_PER_JIFFY*HZ); 118 clocksource_register_hz(&m68328_clk, TICKS_PER_JIFFY*HZ);
119 timer_interrupt = handler;
118} 120}
119 121
120/***************************************************************************/ 122/***************************************************************************/
diff --git a/arch/m68k/platform/68360/config.c b/arch/m68k/platform/68360/config.c
index 255fc03913e9..9877cefad1e7 100644
--- a/arch/m68k/platform/68360/config.c
+++ b/arch/m68k/platform/68360/config.c
@@ -35,6 +35,7 @@ extern void m360_cpm_reset(void);
35#define OSCILLATOR (unsigned long int)33000000 35#define OSCILLATOR (unsigned long int)33000000
36#endif 36#endif
37 37
38static irq_handler_t timer_interrupt;
38unsigned long int system_clock; 39unsigned long int system_clock;
39 40
40extern QUICC *pquicc; 41extern QUICC *pquicc;
@@ -52,7 +53,7 @@ static irqreturn_t hw_tick(int irq, void *dummy)
52 53
53 pquicc->timer_ter1 = 0x0002; /* clear timer event */ 54 pquicc->timer_ter1 = 0x0002; /* clear timer event */
54 55
55 return arch_timer_interrupt(irq, dummy); 56 return timer_interrupt(irq, dummy);
56} 57}
57 58
58static struct irqaction m68360_timer_irq = { 59static struct irqaction m68360_timer_irq = {
@@ -61,7 +62,7 @@ static struct irqaction m68360_timer_irq = {
61 .handler = hw_tick, 62 .handler = hw_tick,
62}; 63};
63 64
64void hw_timer_init(void) 65void hw_timer_init(irq_handler_t handler)
65{ 66{
66 unsigned char prescaler; 67 unsigned char prescaler;
67 unsigned short tgcr_save; 68 unsigned short tgcr_save;
@@ -94,6 +95,8 @@ void hw_timer_init(void)
94 95
95 pquicc->timer_ter1 = 0x0003; /* clear timer events */ 96 pquicc->timer_ter1 = 0x0003; /* clear timer events */
96 97
98 timer_interrupt = handler;
99
97 /* enable timer 1 interrupt in CIMR */ 100 /* enable timer 1 interrupt in CIMR */
98 setup_irq(CPMVEC_TIMER1, &m68360_timer_irq); 101 setup_irq(CPMVEC_TIMER1, &m68360_timer_irq);
99 102
diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile
index dbc3850b1d0d..5707f1a62341 100644
--- a/arch/parisc/Makefile
+++ b/arch/parisc/Makefile
@@ -21,6 +21,7 @@ KBUILD_DEFCONFIG := default_defconfig
21 21
22NM = sh $(srctree)/arch/parisc/nm 22NM = sh $(srctree)/arch/parisc/nm
23CHECKFLAGS += -D__hppa__=1 23CHECKFLAGS += -D__hppa__=1
24LIBGCC = $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
24 25
25MACHINE := $(shell uname -m) 26MACHINE := $(shell uname -m)
26ifeq ($(MACHINE),parisc*) 27ifeq ($(MACHINE),parisc*)
@@ -79,7 +80,7 @@ kernel-y := mm/ kernel/ math-emu/
79kernel-$(CONFIG_HPUX) += hpux/ 80kernel-$(CONFIG_HPUX) += hpux/
80 81
81core-y += $(addprefix arch/parisc/, $(kernel-y)) 82core-y += $(addprefix arch/parisc/, $(kernel-y))
82libs-y += arch/parisc/lib/ `$(CC) -print-libgcc-file-name` 83libs-y += arch/parisc/lib/ $(LIBGCC)
83 84
84drivers-$(CONFIG_OPROFILE) += arch/parisc/oprofile/ 85drivers-$(CONFIG_OPROFILE) += arch/parisc/oprofile/
85 86
diff --git a/arch/parisc/include/asm/Kbuild b/arch/parisc/include/asm/Kbuild
index 19a434f55059..4383707d9801 100644
--- a/arch/parisc/include/asm/Kbuild
+++ b/arch/parisc/include/asm/Kbuild
@@ -1,3 +1,4 @@
1include include/asm-generic/Kbuild.asm 1include include/asm-generic/Kbuild.asm
2 2
3header-y += pdc.h 3header-y += pdc.h
4generic-y += word-at-a-time.h
diff --git a/arch/parisc/include/asm/bug.h b/arch/parisc/include/asm/bug.h
index 72cfdb0cfdd1..62a33338549c 100644
--- a/arch/parisc/include/asm/bug.h
+++ b/arch/parisc/include/asm/bug.h
@@ -1,6 +1,8 @@
1#ifndef _PARISC_BUG_H 1#ifndef _PARISC_BUG_H
2#define _PARISC_BUG_H 2#define _PARISC_BUG_H
3 3
4#include <linux/kernel.h> /* for BUGFLAG_TAINT */
5
4/* 6/*
5 * Tell the user there is some problem. 7 * Tell the user there is some problem.
6 * The offending file and line are encoded in the __bug_table section. 8 * The offending file and line are encoded in the __bug_table section.
diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
index 0b6d79617d7b..2e3200ca485f 100644
--- a/arch/powerpc/kernel/module_32.c
+++ b/arch/powerpc/kernel/module_32.c
@@ -176,8 +176,8 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
176 176
177static inline int entry_matches(struct ppc_plt_entry *entry, Elf32_Addr val) 177static inline int entry_matches(struct ppc_plt_entry *entry, Elf32_Addr val)
178{ 178{
179 if (entry->jump[0] == 0x3d600000 + ((val + 0x8000) >> 16) 179 if (entry->jump[0] == 0x3d800000 + ((val + 0x8000) >> 16)
180 && entry->jump[1] == 0x396b0000 + (val & 0xffff)) 180 && entry->jump[1] == 0x398c0000 + (val & 0xffff))
181 return 1; 181 return 1;
182 return 0; 182 return 0;
183} 183}
@@ -204,10 +204,9 @@ static uint32_t do_plt_call(void *location,
204 entry++; 204 entry++;
205 } 205 }
206 206
207 /* Stolen from Paul Mackerras as well... */ 207 entry->jump[0] = 0x3d800000+((val+0x8000)>>16); /* lis r12,sym@ha */
208 entry->jump[0] = 0x3d600000+((val+0x8000)>>16); /* lis r11,sym@ha */ 208 entry->jump[1] = 0x398c0000 + (val&0xffff); /* addi r12,r12,sym@l*/
209 entry->jump[1] = 0x396b0000 + (val&0xffff); /* addi r11,r11,sym@l*/ 209 entry->jump[2] = 0x7d8903a6; /* mtctr r12 */
210 entry->jump[2] = 0x7d6903a6; /* mtctr r11 */
211 entry->jump[3] = 0x4e800420; /* bctr */ 210 entry->jump[3] = 0x4e800420; /* bctr */
212 211
213 DEBUGP("Initialized plt for 0x%x at %p\n", val, entry); 212 DEBUGP("Initialized plt for 0x%x at %p\n", val, entry);
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 99a995c2a3f2..be171ee73bf8 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -475,6 +475,7 @@ void timer_interrupt(struct pt_regs * regs)
475 struct pt_regs *old_regs; 475 struct pt_regs *old_regs;
476 u64 *next_tb = &__get_cpu_var(decrementers_next_tb); 476 u64 *next_tb = &__get_cpu_var(decrementers_next_tb);
477 struct clock_event_device *evt = &__get_cpu_var(decrementers); 477 struct clock_event_device *evt = &__get_cpu_var(decrementers);
478 u64 now;
478 479
479 /* Ensure a positive value is written to the decrementer, or else 480 /* Ensure a positive value is written to the decrementer, or else
480 * some CPUs will continue to take decrementer exceptions. 481 * some CPUs will continue to take decrementer exceptions.
@@ -509,9 +510,16 @@ void timer_interrupt(struct pt_regs * regs)
509 irq_work_run(); 510 irq_work_run();
510 } 511 }
511 512
512 *next_tb = ~(u64)0; 513 now = get_tb_or_rtc();
513 if (evt->event_handler) 514 if (now >= *next_tb) {
514 evt->event_handler(evt); 515 *next_tb = ~(u64)0;
516 if (evt->event_handler)
517 evt->event_handler(evt);
518 } else {
519 now = *next_tb - now;
520 if (now <= DECREMENTER_MAX)
521 set_dec((int)now);
522 }
515 523
516#ifdef CONFIG_PPC64 524#ifdef CONFIG_PPC64
517 /* collect purr register values often, for accurate calculations */ 525 /* collect purr register values often, for accurate calculations */
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 99bcd0ee838d..31d9db7913e4 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -32,6 +32,8 @@ config SUPERH
32 select GENERIC_SMP_IDLE_THREAD 32 select GENERIC_SMP_IDLE_THREAD
33 select GENERIC_CLOCKEVENTS 33 select GENERIC_CLOCKEVENTS
34 select GENERIC_CMOS_UPDATE if SH_SH03 || SH_DREAMCAST 34 select GENERIC_CMOS_UPDATE if SH_SH03 || SH_DREAMCAST
35 select GENERIC_STRNCPY_FROM_USER
36 select GENERIC_STRNLEN_USER
35 help 37 help
36 The SuperH is a RISC processor targeted for use in embedded systems 38 The SuperH is a RISC processor targeted for use in embedded systems
37 and consumer electronics; it was also used in the Sega Dreamcast 39 and consumer electronics; it was also used in the Sega Dreamcast
diff --git a/arch/sh/Makefile b/arch/sh/Makefile
index 46edf070da1c..aed701c7b11b 100644
--- a/arch/sh/Makefile
+++ b/arch/sh/Makefile
@@ -9,6 +9,12 @@
9# License. See the file "COPYING" in the main directory of this archive 9# License. See the file "COPYING" in the main directory of this archive
10# for more details. 10# for more details.
11# 11#
12ifneq ($(SUBARCH),$(ARCH))
13 ifeq ($(CROSS_COMPILE),)
14 CROSS_COMPILE := $(call cc-cross-prefix, $(UTS_MACHINE)-linux- $(UTS_MACHINE)-linux-gnu- $(UTS_MACHINE)-unknown-linux-gnu-)
15 endif
16endif
17
12isa-y := any 18isa-y := any
13isa-$(CONFIG_SH_DSP) := sh 19isa-$(CONFIG_SH_DSP) := sh
14isa-$(CONFIG_CPU_SH2) := sh2 20isa-$(CONFIG_CPU_SH2) := sh2
@@ -106,19 +112,13 @@ LDFLAGS_vmlinux += --defsym phys_stext=_stext-$(CONFIG_PAGE_OFFSET) \
106KBUILD_DEFCONFIG := cayman_defconfig 112KBUILD_DEFCONFIG := cayman_defconfig
107endif 113endif
108 114
109ifneq ($(SUBARCH),$(ARCH))
110 ifeq ($(CROSS_COMPILE),)
111 CROSS_COMPILE := $(call cc-cross-prefix, $(UTS_MACHINE)-linux- $(UTS_MACHINE)-linux-gnu- $(UTS_MACHINE)-unknown-linux-gnu-)
112 endif
113endif
114
115ifdef CONFIG_CPU_LITTLE_ENDIAN 115ifdef CONFIG_CPU_LITTLE_ENDIAN
116ld-bfd := elf32-$(UTS_MACHINE)-linux 116ld-bfd := elf32-$(UTS_MACHINE)-linux
117LDFLAGS_vmlinux += --defsym 'jiffies=jiffies_64' --oformat $(ld-bfd) 117LDFLAGS_vmlinux += --defsym jiffies=jiffies_64 --oformat $(ld-bfd)
118LDFLAGS += -EL 118LDFLAGS += -EL
119else 119else
120ld-bfd := elf32-$(UTS_MACHINE)big-linux 120ld-bfd := elf32-$(UTS_MACHINE)big-linux
121LDFLAGS_vmlinux += --defsym 'jiffies=jiffies_64+4' --oformat $(ld-bfd) 121LDFLAGS_vmlinux += --defsym jiffies=jiffies_64+4 --oformat $(ld-bfd)
122LDFLAGS += -EB 122LDFLAGS += -EB
123endif 123endif
124 124
diff --git a/arch/sh/include/asm/Kbuild b/arch/sh/include/asm/Kbuild
index 7beb42322f60..7b673ddcd555 100644
--- a/arch/sh/include/asm/Kbuild
+++ b/arch/sh/include/asm/Kbuild
@@ -1,5 +1,39 @@
1include include/asm-generic/Kbuild.asm 1include include/asm-generic/Kbuild.asm
2 2
3generic-y += bitsperlong.h
4generic-y += cputime.h
5generic-y += current.h
6generic-y += delay.h
7generic-y += div64.h
8generic-y += emergency-restart.h
9generic-y += errno.h
10generic-y += fcntl.h
11generic-y += ioctl.h
12generic-y += ipcbuf.h
13generic-y += irq_regs.h
14generic-y += kvm_para.h
15generic-y += local.h
16generic-y += local64.h
17generic-y += param.h
18generic-y += parport.h
19generic-y += percpu.h
20generic-y += poll.h
21generic-y += mman.h
22generic-y += msgbuf.h
23generic-y += resource.h
24generic-y += scatterlist.h
25generic-y += sembuf.h
26generic-y += serial.h
27generic-y += shmbuf.h
28generic-y += siginfo.h
29generic-y += sizes.h
30generic-y += socket.h
31generic-y += statfs.h
32generic-y += termbits.h
33generic-y += termios.h
34generic-y += ucontext.h
35generic-y += xor.h
36
3header-y += cachectl.h 37header-y += cachectl.h
4header-y += cpu-features.h 38header-y += cpu-features.h
5header-y += hw_breakpoint.h 39header-y += hw_breakpoint.h
diff --git a/arch/sh/include/asm/bitsperlong.h b/arch/sh/include/asm/bitsperlong.h
deleted file mode 100644
index 6dc0bb0c13b2..000000000000
--- a/arch/sh/include/asm/bitsperlong.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/bitsperlong.h>
diff --git a/arch/sh/include/asm/cputime.h b/arch/sh/include/asm/cputime.h
deleted file mode 100644
index 6ca395d1393e..000000000000
--- a/arch/sh/include/asm/cputime.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __SH_CPUTIME_H
2#define __SH_CPUTIME_H
3
4#include <asm-generic/cputime.h>
5
6#endif /* __SH_CPUTIME_H */
diff --git a/arch/sh/include/asm/current.h b/arch/sh/include/asm/current.h
deleted file mode 100644
index 4c51401b5537..000000000000
--- a/arch/sh/include/asm/current.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/current.h>
diff --git a/arch/sh/include/asm/delay.h b/arch/sh/include/asm/delay.h
deleted file mode 100644
index 9670e127b7b2..000000000000
--- a/arch/sh/include/asm/delay.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/delay.h>
diff --git a/arch/sh/include/asm/div64.h b/arch/sh/include/asm/div64.h
deleted file mode 100644
index 6cd978cefb28..000000000000
--- a/arch/sh/include/asm/div64.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/div64.h>
diff --git a/arch/sh/include/asm/emergency-restart.h b/arch/sh/include/asm/emergency-restart.h
deleted file mode 100644
index 108d8c48e42e..000000000000
--- a/arch/sh/include/asm/emergency-restart.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _ASM_EMERGENCY_RESTART_H
2#define _ASM_EMERGENCY_RESTART_H
3
4#include <asm-generic/emergency-restart.h>
5
6#endif /* _ASM_EMERGENCY_RESTART_H */
diff --git a/arch/sh/include/asm/errno.h b/arch/sh/include/asm/errno.h
deleted file mode 100644
index 51cf6f9cebb8..000000000000
--- a/arch/sh/include/asm/errno.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __ASM_SH_ERRNO_H
2#define __ASM_SH_ERRNO_H
3
4#include <asm-generic/errno.h>
5
6#endif /* __ASM_SH_ERRNO_H */
diff --git a/arch/sh/include/asm/fcntl.h b/arch/sh/include/asm/fcntl.h
deleted file mode 100644
index 46ab12db5739..000000000000
--- a/arch/sh/include/asm/fcntl.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/fcntl.h>
diff --git a/arch/sh/include/asm/ioctl.h b/arch/sh/include/asm/ioctl.h
deleted file mode 100644
index b279fe06dfe5..000000000000
--- a/arch/sh/include/asm/ioctl.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/ioctl.h>
diff --git a/arch/sh/include/asm/ipcbuf.h b/arch/sh/include/asm/ipcbuf.h
deleted file mode 100644
index 84c7e51cb6d0..000000000000
--- a/arch/sh/include/asm/ipcbuf.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/ipcbuf.h>
diff --git a/arch/sh/include/asm/irq_regs.h b/arch/sh/include/asm/irq_regs.h
deleted file mode 100644
index 3dd9c0b70270..000000000000
--- a/arch/sh/include/asm/irq_regs.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/irq_regs.h>
diff --git a/arch/sh/include/asm/kvm_para.h b/arch/sh/include/asm/kvm_para.h
deleted file mode 100644
index 14fab8f0b957..000000000000
--- a/arch/sh/include/asm/kvm_para.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/kvm_para.h>
diff --git a/arch/sh/include/asm/local.h b/arch/sh/include/asm/local.h
deleted file mode 100644
index 9ed9b9cb459a..000000000000
--- a/arch/sh/include/asm/local.h
+++ /dev/null
@@ -1,7 +0,0 @@
1#ifndef __ASM_SH_LOCAL_H
2#define __ASM_SH_LOCAL_H
3
4#include <asm-generic/local.h>
5
6#endif /* __ASM_SH_LOCAL_H */
7
diff --git a/arch/sh/include/asm/local64.h b/arch/sh/include/asm/local64.h
deleted file mode 100644
index 36c93b5cc239..000000000000
--- a/arch/sh/include/asm/local64.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/local64.h>
diff --git a/arch/sh/include/asm/mman.h b/arch/sh/include/asm/mman.h
deleted file mode 100644
index 8eebf89f5ab1..000000000000
--- a/arch/sh/include/asm/mman.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/mman.h>
diff --git a/arch/sh/include/asm/msgbuf.h b/arch/sh/include/asm/msgbuf.h
deleted file mode 100644
index 809134c644a6..000000000000
--- a/arch/sh/include/asm/msgbuf.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/msgbuf.h>
diff --git a/arch/sh/include/asm/param.h b/arch/sh/include/asm/param.h
deleted file mode 100644
index 965d45427975..000000000000
--- a/arch/sh/include/asm/param.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/param.h>
diff --git a/arch/sh/include/asm/parport.h b/arch/sh/include/asm/parport.h
deleted file mode 100644
index cf252af64590..000000000000
--- a/arch/sh/include/asm/parport.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/parport.h>
diff --git a/arch/sh/include/asm/percpu.h b/arch/sh/include/asm/percpu.h
deleted file mode 100644
index 4db4b39a4399..000000000000
--- a/arch/sh/include/asm/percpu.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __ARCH_SH_PERCPU
2#define __ARCH_SH_PERCPU
3
4#include <asm-generic/percpu.h>
5
6#endif /* __ARCH_SH_PERCPU */
diff --git a/arch/sh/include/asm/poll.h b/arch/sh/include/asm/poll.h
deleted file mode 100644
index c98509d3149e..000000000000
--- a/arch/sh/include/asm/poll.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/poll.h>
diff --git a/arch/sh/include/asm/resource.h b/arch/sh/include/asm/resource.h
deleted file mode 100644
index 9c2499a86ec0..000000000000
--- a/arch/sh/include/asm/resource.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __ASM_SH_RESOURCE_H
2#define __ASM_SH_RESOURCE_H
3
4#include <asm-generic/resource.h>
5
6#endif /* __ASM_SH_RESOURCE_H */
diff --git a/arch/sh/include/asm/scatterlist.h b/arch/sh/include/asm/scatterlist.h
deleted file mode 100644
index 98dfc3510f10..000000000000
--- a/arch/sh/include/asm/scatterlist.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __ASM_SH_SCATTERLIST_H
2#define __ASM_SH_SCATTERLIST_H
3
4#include <asm-generic/scatterlist.h>
5
6#endif /* __ASM_SH_SCATTERLIST_H */
diff --git a/arch/sh/include/asm/sembuf.h b/arch/sh/include/asm/sembuf.h
deleted file mode 100644
index 7673b83cfef7..000000000000
--- a/arch/sh/include/asm/sembuf.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/sembuf.h>
diff --git a/arch/sh/include/asm/serial.h b/arch/sh/include/asm/serial.h
deleted file mode 100644
index a0cb0caff152..000000000000
--- a/arch/sh/include/asm/serial.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/serial.h>
diff --git a/arch/sh/include/asm/shmbuf.h b/arch/sh/include/asm/shmbuf.h
deleted file mode 100644
index 83c05fc2de38..000000000000
--- a/arch/sh/include/asm/shmbuf.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/shmbuf.h>
diff --git a/arch/sh/include/asm/siginfo.h b/arch/sh/include/asm/siginfo.h
deleted file mode 100644
index 813040ed68a9..000000000000
--- a/arch/sh/include/asm/siginfo.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __ASM_SH_SIGINFO_H
2#define __ASM_SH_SIGINFO_H
3
4#include <asm-generic/siginfo.h>
5
6#endif /* __ASM_SH_SIGINFO_H */
diff --git a/arch/sh/include/asm/sizes.h b/arch/sh/include/asm/sizes.h
deleted file mode 100644
index dd248c2e1085..000000000000
--- a/arch/sh/include/asm/sizes.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/sizes.h>
diff --git a/arch/sh/include/asm/socket.h b/arch/sh/include/asm/socket.h
deleted file mode 100644
index 6b71384b9d8b..000000000000
--- a/arch/sh/include/asm/socket.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/socket.h>
diff --git a/arch/sh/include/asm/statfs.h b/arch/sh/include/asm/statfs.h
deleted file mode 100644
index 9202a023328f..000000000000
--- a/arch/sh/include/asm/statfs.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __ASM_SH_STATFS_H
2#define __ASM_SH_STATFS_H
3
4#include <asm-generic/statfs.h>
5
6#endif /* __ASM_SH_STATFS_H */
diff --git a/arch/sh/include/asm/termbits.h b/arch/sh/include/asm/termbits.h
deleted file mode 100644
index 3935b106de79..000000000000
--- a/arch/sh/include/asm/termbits.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/termbits.h>
diff --git a/arch/sh/include/asm/termios.h b/arch/sh/include/asm/termios.h
deleted file mode 100644
index 280d78a9d966..000000000000
--- a/arch/sh/include/asm/termios.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/termios.h>
diff --git a/arch/sh/include/asm/uaccess.h b/arch/sh/include/asm/uaccess.h
index 050f221fa898..8698a80ed00c 100644
--- a/arch/sh/include/asm/uaccess.h
+++ b/arch/sh/include/asm/uaccess.h
@@ -25,6 +25,8 @@
25 (__chk_user_ptr(addr), \ 25 (__chk_user_ptr(addr), \
26 __access_ok((unsigned long __force)(addr), (size))) 26 __access_ok((unsigned long __force)(addr), (size)))
27 27
28#define user_addr_max() (current_thread_info()->addr_limit.seg)
29
28/* 30/*
29 * Uh, these should become the main single-value transfer routines ... 31 * Uh, these should become the main single-value transfer routines ...
30 * They automatically use the right size if we just have the right 32 * They automatically use the right size if we just have the right
@@ -100,6 +102,11 @@ struct __large_struct { unsigned long buf[100]; };
100# include "uaccess_64.h" 102# include "uaccess_64.h"
101#endif 103#endif
102 104
105extern long strncpy_from_user(char *dest, const char __user *src, long count);
106
107extern __must_check long strlen_user(const char __user *str);
108extern __must_check long strnlen_user(const char __user *str, long n);
109
103/* Generic arbitrary sized copy. */ 110/* Generic arbitrary sized copy. */
104/* Return the number of bytes NOT copied */ 111/* Return the number of bytes NOT copied */
105__kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n); 112__kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n);
@@ -137,37 +144,6 @@ __kernel_size_t __clear_user(void *addr, __kernel_size_t size);
137 __cl_size; \ 144 __cl_size; \
138}) 145})
139 146
140/**
141 * strncpy_from_user: - Copy a NUL terminated string from userspace.
142 * @dst: Destination address, in kernel space. This buffer must be at
143 * least @count bytes long.
144 * @src: Source address, in user space.
145 * @count: Maximum number of bytes to copy, including the trailing NUL.
146 *
147 * Copies a NUL-terminated string from userspace to kernel space.
148 *
149 * On success, returns the length of the string (not including the trailing
150 * NUL).
151 *
152 * If access to userspace fails, returns -EFAULT (some data may have been
153 * copied).
154 *
155 * If @count is smaller than the length of the string, copies @count bytes
156 * and returns @count.
157 */
158#define strncpy_from_user(dest,src,count) \
159({ \
160 unsigned long __sfu_src = (unsigned long)(src); \
161 int __sfu_count = (int)(count); \
162 long __sfu_res = -EFAULT; \
163 \
164 if (__access_ok(__sfu_src, __sfu_count)) \
165 __sfu_res = __strncpy_from_user((unsigned long)(dest), \
166 __sfu_src, __sfu_count); \
167 \
168 __sfu_res; \
169})
170
171static inline unsigned long 147static inline unsigned long
172copy_from_user(void *to, const void __user *from, unsigned long n) 148copy_from_user(void *to, const void __user *from, unsigned long n)
173{ 149{
@@ -192,43 +168,6 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
192 return __copy_size; 168 return __copy_size;
193} 169}
194 170
195/**
196 * strnlen_user: - Get the size of a string in user space.
197 * @s: The string to measure.
198 * @n: The maximum valid length
199 *
200 * Context: User context only. This function may sleep.
201 *
202 * Get the size of a NUL-terminated string in user space.
203 *
204 * Returns the size of the string INCLUDING the terminating NUL.
205 * On exception, returns 0.
206 * If the string is too long, returns a value greater than @n.
207 */
208static inline long strnlen_user(const char __user *s, long n)
209{
210 if (!__addr_ok(s))
211 return 0;
212 else
213 return __strnlen_user(s, n);
214}
215
216/**
217 * strlen_user: - Get the size of a string in user space.
218 * @str: The string to measure.
219 *
220 * Context: User context only. This function may sleep.
221 *
222 * Get the size of a NUL-terminated string in user space.
223 *
224 * Returns the size of the string INCLUDING the terminating NUL.
225 * On exception, returns 0.
226 *
227 * If there is a limit on the length of a valid string, you may wish to
228 * consider using strnlen_user() instead.
229 */
230#define strlen_user(str) strnlen_user(str, ~0UL >> 1)
231
232/* 171/*
233 * The exception table consists of pairs of addresses: the first is the 172 * The exception table consists of pairs of addresses: the first is the
234 * address of an instruction that is allowed to fault, and the second is 173 * address of an instruction that is allowed to fault, and the second is
diff --git a/arch/sh/include/asm/uaccess_32.h b/arch/sh/include/asm/uaccess_32.h
index ae0d24f6653f..c0de7ee35ab7 100644
--- a/arch/sh/include/asm/uaccess_32.h
+++ b/arch/sh/include/asm/uaccess_32.h
@@ -170,79 +170,4 @@ __asm__ __volatile__( \
170 170
171extern void __put_user_unknown(void); 171extern void __put_user_unknown(void);
172 172
173static inline int
174__strncpy_from_user(unsigned long __dest, unsigned long __user __src, int __count)
175{
176 __kernel_size_t res;
177 unsigned long __dummy, _d, _s, _c;
178
179 __asm__ __volatile__(
180 "9:\n"
181 "mov.b @%2+, %1\n\t"
182 "cmp/eq #0, %1\n\t"
183 "bt/s 2f\n"
184 "1:\n"
185 "mov.b %1, @%3\n\t"
186 "dt %4\n\t"
187 "bf/s 9b\n\t"
188 " add #1, %3\n\t"
189 "2:\n\t"
190 "sub %4, %0\n"
191 "3:\n"
192 ".section .fixup,\"ax\"\n"
193 "4:\n\t"
194 "mov.l 5f, %1\n\t"
195 "jmp @%1\n\t"
196 " mov %9, %0\n\t"
197 ".balign 4\n"
198 "5: .long 3b\n"
199 ".previous\n"
200 ".section __ex_table,\"a\"\n"
201 " .balign 4\n"
202 " .long 9b,4b\n"
203 ".previous"
204 : "=r" (res), "=&z" (__dummy), "=r" (_s), "=r" (_d), "=r"(_c)
205 : "0" (__count), "2" (__src), "3" (__dest), "4" (__count),
206 "i" (-EFAULT)
207 : "memory", "t");
208
209 return res;
210}
211
212/*
213 * Return the size of a string (including the ending 0 even when we have
214 * exceeded the maximum string length).
215 */
216static inline long __strnlen_user(const char __user *__s, long __n)
217{
218 unsigned long res;
219 unsigned long __dummy;
220
221 __asm__ __volatile__(
222 "1:\t"
223 "mov.b @(%0,%3), %1\n\t"
224 "cmp/eq %4, %0\n\t"
225 "bt/s 2f\n\t"
226 " add #1, %0\n\t"
227 "tst %1, %1\n\t"
228 "bf 1b\n\t"
229 "2:\n"
230 ".section .fixup,\"ax\"\n"
231 "3:\n\t"
232 "mov.l 4f, %1\n\t"
233 "jmp @%1\n\t"
234 " mov #0, %0\n"
235 ".balign 4\n"
236 "4: .long 2b\n"
237 ".previous\n"
238 ".section __ex_table,\"a\"\n"
239 " .balign 4\n"
240 " .long 1b,3b\n"
241 ".previous"
242 : "=z" (res), "=&r" (__dummy)
243 : "0" (0), "r" (__s), "r" (__n)
244 : "t");
245 return res;
246}
247
248#endif /* __ASM_SH_UACCESS_32_H */ 173#endif /* __ASM_SH_UACCESS_32_H */
diff --git a/arch/sh/include/asm/uaccess_64.h b/arch/sh/include/asm/uaccess_64.h
index 56fd20b8cdcc..2e07e0f40c6a 100644
--- a/arch/sh/include/asm/uaccess_64.h
+++ b/arch/sh/include/asm/uaccess_64.h
@@ -84,8 +84,4 @@ extern long __put_user_asm_l(void *, long);
84extern long __put_user_asm_q(void *, long); 84extern long __put_user_asm_q(void *, long);
85extern void __put_user_unknown(void); 85extern void __put_user_unknown(void);
86 86
87extern long __strnlen_user(const char *__s, long __n);
88extern int __strncpy_from_user(unsigned long __dest,
89 unsigned long __user __src, int __count);
90
91#endif /* __ASM_SH_UACCESS_64_H */ 87#endif /* __ASM_SH_UACCESS_64_H */
diff --git a/arch/sh/include/asm/ucontext.h b/arch/sh/include/asm/ucontext.h
deleted file mode 100644
index 9bc07b9f30fb..000000000000
--- a/arch/sh/include/asm/ucontext.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/ucontext.h>
diff --git a/arch/sh/include/asm/word-at-a-time.h b/arch/sh/include/asm/word-at-a-time.h
new file mode 100644
index 000000000000..6e38953ff7fd
--- /dev/null
+++ b/arch/sh/include/asm/word-at-a-time.h
@@ -0,0 +1,53 @@
1#ifndef __ASM_SH_WORD_AT_A_TIME_H
2#define __ASM_SH_WORD_AT_A_TIME_H
3
4#ifdef CONFIG_CPU_BIG_ENDIAN
5# include <asm-generic/word-at-a-time.h>
6#else
7/*
8 * Little-endian version cribbed from x86.
9 */
10struct word_at_a_time {
11 const unsigned long one_bits, high_bits;
12};
13
14#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
15
16/* Carl Chatfield / Jan Achrenius G+ version for 32-bit */
17static inline long count_masked_bytes(long mask)
18{
19 /* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */
20 long a = (0x0ff0001+mask) >> 23;
21 /* Fix the 1 for 00 case */
22 return a & mask;
23}
24
25/* Return nonzero if it has a zero */
26static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c)
27{
28 unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits;
29 *bits = mask;
30 return mask;
31}
32
33static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c)
34{
35 return bits;
36}
37
38static inline unsigned long create_zero_mask(unsigned long bits)
39{
40 bits = (bits - 1) & ~bits;
41 return bits >> 7;
42}
43
44/* The mask we created is directly usable as a bytemask */
45#define zero_bytemask(mask) (mask)
46
47static inline unsigned long find_zero(unsigned long mask)
48{
49 return count_masked_bytes(mask);
50}
51#endif
52
53#endif
diff --git a/arch/sh/include/asm/xor.h b/arch/sh/include/asm/xor.h
deleted file mode 100644
index c82eb12a5b18..000000000000
--- a/arch/sh/include/asm/xor.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/xor.h>
diff --git a/arch/sh/include/cpu-sh2a/cpu/ubc.h b/arch/sh/include/cpu-sh2a/cpu/ubc.h
deleted file mode 100644
index 1192e1c761a7..000000000000
--- a/arch/sh/include/cpu-sh2a/cpu/ubc.h
+++ /dev/null
@@ -1,28 +0,0 @@
1/*
2 * SH-2A UBC definitions
3 *
4 * Copyright (C) 2008 Kieran Bingham
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10
11#ifndef __ASM_CPU_SH2A_UBC_H
12#define __ASM_CPU_SH2A_UBC_H
13
14#define UBC_BARA 0xfffc0400
15#define UBC_BAMRA 0xfffc0404
16#define UBC_BBRA 0xfffc04a0 /* 16 bit access */
17#define UBC_BDRA 0xfffc0408
18#define UBC_BDMRA 0xfffc040c
19
20#define UBC_BARB 0xfffc0410
21#define UBC_BAMRB 0xfffc0414
22#define UBC_BBRB 0xfffc04b0 /* 16 bit access */
23#define UBC_BDRB 0xfffc0418
24#define UBC_BDMRB 0xfffc041c
25
26#define UBC_BRCR 0xfffc04c0
27
28#endif /* __ASM_CPU_SH2A_UBC_H */
diff --git a/arch/sh/kernel/cpu/sh5/entry.S b/arch/sh/kernel/cpu/sh5/entry.S
index ff1f0e6e9bec..b7cf6a547f11 100644
--- a/arch/sh/kernel/cpu/sh5/entry.S
+++ b/arch/sh/kernel/cpu/sh5/entry.S
@@ -1569,86 +1569,6 @@ ___clear_user_exit:
1569#endif /* CONFIG_MMU */ 1569#endif /* CONFIG_MMU */
1570 1570
1571/* 1571/*
1572 * int __strncpy_from_user(unsigned long __dest, unsigned long __src,
1573 * int __count)
1574 *
1575 * Inputs:
1576 * (r2) target address
1577 * (r3) source address
1578 * (r4) maximum size in bytes
1579 *
1580 * Ouputs:
1581 * (*r2) copied data
1582 * (r2) -EFAULT (in case of faulting)
1583 * copied data (otherwise)
1584 */
1585 .global __strncpy_from_user
1586__strncpy_from_user:
1587 pta ___strncpy_from_user1, tr0
1588 pta ___strncpy_from_user_done, tr1
1589 or r4, ZERO, r5 /* r5 = original count */
1590 beq/u r4, r63, tr1 /* early exit if r4==0 */
1591 movi -(EFAULT), r6 /* r6 = reply, no real fixup */
1592 or ZERO, ZERO, r7 /* r7 = data, clear top byte of data */
1593
1594___strncpy_from_user1:
1595 ld.b r3, 0, r7 /* Fault address: only in reading */
1596 st.b r2, 0, r7
1597 addi r2, 1, r2
1598 addi r3, 1, r3
1599 beq/u ZERO, r7, tr1
1600 addi r4, -1, r4 /* return real number of copied bytes */
1601 bne/l ZERO, r4, tr0
1602
1603___strncpy_from_user_done:
1604 sub r5, r4, r6 /* If done, return copied */
1605
1606___strncpy_from_user_exit:
1607 or r6, ZERO, r2
1608 ptabs LINK, tr0
1609 blink tr0, ZERO
1610
1611/*
1612 * extern long __strnlen_user(const char *__s, long __n)
1613 *
1614 * Inputs:
1615 * (r2) source address
1616 * (r3) source size in bytes
1617 *
1618 * Ouputs:
1619 * (r2) -EFAULT (in case of faulting)
1620 * string length (otherwise)
1621 */
1622 .global __strnlen_user
1623__strnlen_user:
1624 pta ___strnlen_user_set_reply, tr0
1625 pta ___strnlen_user1, tr1
1626 or ZERO, ZERO, r5 /* r5 = counter */
1627 movi -(EFAULT), r6 /* r6 = reply, no real fixup */
1628 or ZERO, ZERO, r7 /* r7 = data, clear top byte of data */
1629 beq r3, ZERO, tr0
1630
1631___strnlen_user1:
1632 ldx.b r2, r5, r7 /* Fault address: only in reading */
1633 addi r3, -1, r3 /* No real fixup */
1634 addi r5, 1, r5
1635 beq r3, ZERO, tr0
1636 bne r7, ZERO, tr1
1637! The line below used to be active. This meant led to a junk byte lying between each pair
1638! of entries in the argv & envp structures in memory. Whilst the program saw the right data
1639! via the argv and envp arguments to main, it meant the 'flat' representation visible through
1640! /proc/$pid/cmdline was corrupt, causing trouble with ps, for example.
1641! addi r5, 1, r5 /* Include '\0' */
1642
1643___strnlen_user_set_reply:
1644 or r5, ZERO, r6 /* If done, return counter */
1645
1646___strnlen_user_exit:
1647 or r6, ZERO, r2
1648 ptabs LINK, tr0
1649 blink tr0, ZERO
1650
1651/*
1652 * extern long __get_user_asm_?(void *val, long addr) 1572 * extern long __get_user_asm_?(void *val, long addr)
1653 * 1573 *
1654 * Inputs: 1574 * Inputs:
@@ -1982,8 +1902,6 @@ asm_uaccess_start:
1982 .long ___copy_user2, ___copy_user_exit 1902 .long ___copy_user2, ___copy_user_exit
1983 .long ___clear_user1, ___clear_user_exit 1903 .long ___clear_user1, ___clear_user_exit
1984#endif 1904#endif
1985 .long ___strncpy_from_user1, ___strncpy_from_user_exit
1986 .long ___strnlen_user1, ___strnlen_user_exit
1987 .long ___get_user_asm_b1, ___get_user_asm_b_exit 1905 .long ___get_user_asm_b1, ___get_user_asm_b_exit
1988 .long ___get_user_asm_w1, ___get_user_asm_w_exit 1906 .long ___get_user_asm_w1, ___get_user_asm_w_exit
1989 .long ___get_user_asm_l1, ___get_user_asm_l_exit 1907 .long ___get_user_asm_l1, ___get_user_asm_l_exit
diff --git a/arch/sh/kernel/process.c b/arch/sh/kernel/process.c
index 9b7a459a4613..055d91b70305 100644
--- a/arch/sh/kernel/process.c
+++ b/arch/sh/kernel/process.c
@@ -4,6 +4,7 @@
4#include <linux/sched.h> 4#include <linux/sched.h>
5#include <linux/export.h> 5#include <linux/export.h>
6#include <linux/stackprotector.h> 6#include <linux/stackprotector.h>
7#include <asm/fpu.h>
7 8
8struct kmem_cache *task_xstate_cachep = NULL; 9struct kmem_cache *task_xstate_cachep = NULL;
9unsigned int xstate_size; 10unsigned int xstate_size;
diff --git a/arch/sh/kernel/process_64.c b/arch/sh/kernel/process_64.c
index 4264583eabac..602545b12a86 100644
--- a/arch/sh/kernel/process_64.c
+++ b/arch/sh/kernel/process_64.c
@@ -33,6 +33,7 @@
33#include <asm/switch_to.h> 33#include <asm/switch_to.h>
34 34
35struct task_struct *last_task_used_math = NULL; 35struct task_struct *last_task_used_math = NULL;
36struct pt_regs fake_swapper_regs = { 0, };
36 37
37void show_regs(struct pt_regs *regs) 38void show_regs(struct pt_regs *regs)
38{ 39{
diff --git a/arch/sh/kernel/sh_ksyms_64.c b/arch/sh/kernel/sh_ksyms_64.c
index 45afa5c51f67..26a0774f5272 100644
--- a/arch/sh/kernel/sh_ksyms_64.c
+++ b/arch/sh/kernel/sh_ksyms_64.c
@@ -32,8 +32,6 @@ EXPORT_SYMBOL(__get_user_asm_b);
32EXPORT_SYMBOL(__get_user_asm_w); 32EXPORT_SYMBOL(__get_user_asm_w);
33EXPORT_SYMBOL(__get_user_asm_l); 33EXPORT_SYMBOL(__get_user_asm_l);
34EXPORT_SYMBOL(__get_user_asm_q); 34EXPORT_SYMBOL(__get_user_asm_q);
35EXPORT_SYMBOL(__strnlen_user);
36EXPORT_SYMBOL(__strncpy_from_user);
37EXPORT_SYMBOL(__clear_user); 35EXPORT_SYMBOL(__clear_user);
38EXPORT_SYMBOL(copy_page); 36EXPORT_SYMBOL(copy_page);
39EXPORT_SYMBOL(__copy_user); 37EXPORT_SYMBOL(__copy_user);
diff --git a/arch/tile/include/asm/thread_info.h b/arch/tile/include/asm/thread_info.h
index 7e1fef36bde6..e9c670d7a7fe 100644
--- a/arch/tile/include/asm/thread_info.h
+++ b/arch/tile/include/asm/thread_info.h
@@ -91,11 +91,6 @@ extern void smp_nap(void);
91/* Enable interrupts racelessly and nap forever: helper for cpu_idle(). */ 91/* Enable interrupts racelessly and nap forever: helper for cpu_idle(). */
92extern void _cpu_idle(void); 92extern void _cpu_idle(void);
93 93
94/* Switch boot idle thread to a freshly-allocated stack and free old stack. */
95extern void cpu_idle_on_new_stack(struct thread_info *old_ti,
96 unsigned long new_sp,
97 unsigned long new_ss10);
98
99#else /* __ASSEMBLY__ */ 94#else /* __ASSEMBLY__ */
100 95
101/* 96/*
diff --git a/arch/tile/kernel/entry.S b/arch/tile/kernel/entry.S
index 133c4b56a99e..c31637baff28 100644
--- a/arch/tile/kernel/entry.S
+++ b/arch/tile/kernel/entry.S
@@ -68,20 +68,6 @@ STD_ENTRY(KBacktraceIterator_init_current)
68 jrp lr /* keep backtracer happy */ 68 jrp lr /* keep backtracer happy */
69 STD_ENDPROC(KBacktraceIterator_init_current) 69 STD_ENDPROC(KBacktraceIterator_init_current)
70 70
71/*
72 * Reset our stack to r1/r2 (sp and ksp0+cpu respectively), then
73 * free the old stack (passed in r0) and re-invoke cpu_idle().
74 * We update sp and ksp0 simultaneously to avoid backtracer warnings.
75 */
76STD_ENTRY(cpu_idle_on_new_stack)
77 {
78 move sp, r1
79 mtspr SPR_SYSTEM_SAVE_K_0, r2
80 }
81 jal free_thread_info
82 j cpu_idle
83 STD_ENDPROC(cpu_idle_on_new_stack)
84
85/* Loop forever on a nap during SMP boot. */ 71/* Loop forever on a nap during SMP boot. */
86STD_ENTRY(smp_nap) 72STD_ENTRY(smp_nap)
87 nap 73 nap
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index 6098ccc59be2..dd87f3420390 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -29,6 +29,7 @@
29#include <linux/smp.h> 29#include <linux/smp.h>
30#include <linux/timex.h> 30#include <linux/timex.h>
31#include <linux/hugetlb.h> 31#include <linux/hugetlb.h>
32#include <linux/start_kernel.h>
32#include <asm/setup.h> 33#include <asm/setup.h>
33#include <asm/sections.h> 34#include <asm/sections.h>
34#include <asm/cacheflush.h> 35#include <asm/cacheflush.h>
diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
index 8bbea6aa40d9..efe5acfc79c3 100644
--- a/arch/x86/boot/header.S
+++ b/arch/x86/boot/header.S
@@ -94,10 +94,10 @@ bs_die:
94 94
95 .section ".bsdata", "a" 95 .section ".bsdata", "a"
96bugger_off_msg: 96bugger_off_msg:
97 .ascii "Direct booting from floppy is no longer supported.\r\n" 97 .ascii "Direct floppy boot is not supported. "
98 .ascii "Please use a boot loader program instead.\r\n" 98 .ascii "Use a boot loader program instead.\r\n"
99 .ascii "\n" 99 .ascii "\n"
100 .ascii "Remove disk and press any key to reboot . . .\r\n" 100 .ascii "Remove disk and press any key to reboot ...\r\n"
101 .byte 0 101 .byte 0
102 102
103#ifdef CONFIG_EFI_STUB 103#ifdef CONFIG_EFI_STUB
@@ -111,7 +111,7 @@ coff_header:
111#else 111#else
112 .word 0x8664 # x86-64 112 .word 0x8664 # x86-64
113#endif 113#endif
114 .word 2 # nr_sections 114 .word 3 # nr_sections
115 .long 0 # TimeDateStamp 115 .long 0 # TimeDateStamp
116 .long 0 # PointerToSymbolTable 116 .long 0 # PointerToSymbolTable
117 .long 1 # NumberOfSymbols 117 .long 1 # NumberOfSymbols
@@ -158,8 +158,8 @@ extra_header_fields:
158#else 158#else
159 .quad 0 # ImageBase 159 .quad 0 # ImageBase
160#endif 160#endif
161 .long 0x1000 # SectionAlignment 161 .long 0x20 # SectionAlignment
162 .long 0x200 # FileAlignment 162 .long 0x20 # FileAlignment
163 .word 0 # MajorOperatingSystemVersion 163 .word 0 # MajorOperatingSystemVersion
164 .word 0 # MinorOperatingSystemVersion 164 .word 0 # MinorOperatingSystemVersion
165 .word 0 # MajorImageVersion 165 .word 0 # MajorImageVersion
@@ -200,8 +200,10 @@ extra_header_fields:
200 200
201 # Section table 201 # Section table
202section_table: 202section_table:
203 .ascii ".text" 203 #
204 .byte 0 204 # The offset & size fields are filled in by build.c.
205 #
206 .ascii ".setup"
205 .byte 0 207 .byte 0
206 .byte 0 208 .byte 0
207 .long 0 209 .long 0
@@ -217,9 +219,8 @@ section_table:
217 219
218 # 220 #
219 # The EFI application loader requires a relocation section 221 # The EFI application loader requires a relocation section
220 # because EFI applications must be relocatable. But since 222 # because EFI applications must be relocatable. The .reloc
221 # we don't need the loader to fixup any relocs for us, we 223 # offset & size fields are filled in by build.c.
222 # just create an empty (zero-length) .reloc section header.
223 # 224 #
224 .ascii ".reloc" 225 .ascii ".reloc"
225 .byte 0 226 .byte 0
@@ -233,6 +234,25 @@ section_table:
233 .word 0 # NumberOfRelocations 234 .word 0 # NumberOfRelocations
234 .word 0 # NumberOfLineNumbers 235 .word 0 # NumberOfLineNumbers
235 .long 0x42100040 # Characteristics (section flags) 236 .long 0x42100040 # Characteristics (section flags)
237
238 #
239 # The offset & size fields are filled in by build.c.
240 #
241 .ascii ".text"
242 .byte 0
243 .byte 0
244 .byte 0
245 .long 0
246 .long 0x0 # startup_{32,64}
247 .long 0 # Size of initialized data
248 # on disk
249 .long 0x0 # startup_{32,64}
250 .long 0 # PointerToRelocations
251 .long 0 # PointerToLineNumbers
252 .word 0 # NumberOfRelocations
253 .word 0 # NumberOfLineNumbers
254 .long 0x60500020 # Characteristics (section flags)
255
236#endif /* CONFIG_EFI_STUB */ 256#endif /* CONFIG_EFI_STUB */
237 257
238 # Kernel attributes; used by setup. This is part 1 of the 258 # Kernel attributes; used by setup. This is part 1 of the
diff --git a/arch/x86/boot/tools/build.c b/arch/x86/boot/tools/build.c
index 3f61f6e2b46f..4b8e165ee572 100644
--- a/arch/x86/boot/tools/build.c
+++ b/arch/x86/boot/tools/build.c
@@ -50,6 +50,8 @@ typedef unsigned int u32;
50u8 buf[SETUP_SECT_MAX*512]; 50u8 buf[SETUP_SECT_MAX*512];
51int is_big_kernel; 51int is_big_kernel;
52 52
53#define PECOFF_RELOC_RESERVE 0x20
54
53/*----------------------------------------------------------------------*/ 55/*----------------------------------------------------------------------*/
54 56
55static const u32 crctab32[] = { 57static const u32 crctab32[] = {
@@ -133,11 +135,103 @@ static void usage(void)
133 die("Usage: build setup system [> image]"); 135 die("Usage: build setup system [> image]");
134} 136}
135 137
136int main(int argc, char ** argv)
137{
138#ifdef CONFIG_EFI_STUB 138#ifdef CONFIG_EFI_STUB
139 unsigned int file_sz, pe_header; 139
140static void update_pecoff_section_header(char *section_name, u32 offset, u32 size)
141{
142 unsigned int pe_header;
143 unsigned short num_sections;
144 u8 *section;
145
146 pe_header = get_unaligned_le32(&buf[0x3c]);
147 num_sections = get_unaligned_le16(&buf[pe_header + 6]);
148
149#ifdef CONFIG_X86_32
150 section = &buf[pe_header + 0xa8];
151#else
152 section = &buf[pe_header + 0xb8];
140#endif 153#endif
154
155 while (num_sections > 0) {
156 if (strncmp((char*)section, section_name, 8) == 0) {
157 /* section header size field */
158 put_unaligned_le32(size, section + 0x8);
159
160 /* section header vma field */
161 put_unaligned_le32(offset, section + 0xc);
162
163 /* section header 'size of initialised data' field */
164 put_unaligned_le32(size, section + 0x10);
165
166 /* section header 'file offset' field */
167 put_unaligned_le32(offset, section + 0x14);
168
169 break;
170 }
171 section += 0x28;
172 num_sections--;
173 }
174}
175
176static void update_pecoff_setup_and_reloc(unsigned int size)
177{
178 u32 setup_offset = 0x200;
179 u32 reloc_offset = size - PECOFF_RELOC_RESERVE;
180 u32 setup_size = reloc_offset - setup_offset;
181
182 update_pecoff_section_header(".setup", setup_offset, setup_size);
183 update_pecoff_section_header(".reloc", reloc_offset, PECOFF_RELOC_RESERVE);
184
185 /*
186 * Modify .reloc section contents with a single entry. The
187 * relocation is applied to offset 10 of the relocation section.
188 */
189 put_unaligned_le32(reloc_offset + 10, &buf[reloc_offset]);
190 put_unaligned_le32(10, &buf[reloc_offset + 4]);
191}
192
193static void update_pecoff_text(unsigned int text_start, unsigned int file_sz)
194{
195 unsigned int pe_header;
196 unsigned int text_sz = file_sz - text_start;
197
198 pe_header = get_unaligned_le32(&buf[0x3c]);
199
200 /* Size of image */
201 put_unaligned_le32(file_sz, &buf[pe_header + 0x50]);
202
203 /*
204 * Size of code: Subtract the size of the first sector (512 bytes)
205 * which includes the header.
206 */
207 put_unaligned_le32(file_sz - 512, &buf[pe_header + 0x1c]);
208
209#ifdef CONFIG_X86_32
210 /*
211 * Address of entry point.
212 *
213 * The EFI stub entry point is +16 bytes from the start of
214 * the .text section.
215 */
216 put_unaligned_le32(text_start + 16, &buf[pe_header + 0x28]);
217#else
218 /*
219 * Address of entry point. startup_32 is at the beginning and
220 * the 64-bit entry point (startup_64) is always 512 bytes
221 * after. The EFI stub entry point is 16 bytes after that, as
222 * the first instruction allows legacy loaders to jump over
223 * the EFI stub initialisation
224 */
225 put_unaligned_le32(text_start + 528, &buf[pe_header + 0x28]);
226#endif /* CONFIG_X86_32 */
227
228 update_pecoff_section_header(".text", text_start, text_sz);
229}
230
231#endif /* CONFIG_EFI_STUB */
232
233int main(int argc, char ** argv)
234{
141 unsigned int i, sz, setup_sectors; 235 unsigned int i, sz, setup_sectors;
142 int c; 236 int c;
143 u32 sys_size; 237 u32 sys_size;
@@ -163,6 +257,12 @@ int main(int argc, char ** argv)
163 die("Boot block hasn't got boot flag (0xAA55)"); 257 die("Boot block hasn't got boot flag (0xAA55)");
164 fclose(file); 258 fclose(file);
165 259
260#ifdef CONFIG_EFI_STUB
261 /* Reserve 0x20 bytes for .reloc section */
262 memset(buf+c, 0, PECOFF_RELOC_RESERVE);
263 c += PECOFF_RELOC_RESERVE;
264#endif
265
166 /* Pad unused space with zeros */ 266 /* Pad unused space with zeros */
167 setup_sectors = (c + 511) / 512; 267 setup_sectors = (c + 511) / 512;
168 if (setup_sectors < SETUP_SECT_MIN) 268 if (setup_sectors < SETUP_SECT_MIN)
@@ -170,6 +270,10 @@ int main(int argc, char ** argv)
170 i = setup_sectors*512; 270 i = setup_sectors*512;
171 memset(buf+c, 0, i-c); 271 memset(buf+c, 0, i-c);
172 272
273#ifdef CONFIG_EFI_STUB
274 update_pecoff_setup_and_reloc(i);
275#endif
276
173 /* Set the default root device */ 277 /* Set the default root device */
174 put_unaligned_le16(DEFAULT_ROOT_DEV, &buf[508]); 278 put_unaligned_le16(DEFAULT_ROOT_DEV, &buf[508]);
175 279
@@ -194,66 +298,8 @@ int main(int argc, char ** argv)
194 put_unaligned_le32(sys_size, &buf[0x1f4]); 298 put_unaligned_le32(sys_size, &buf[0x1f4]);
195 299
196#ifdef CONFIG_EFI_STUB 300#ifdef CONFIG_EFI_STUB
197 file_sz = sz + i + ((sys_size * 16) - sz); 301 update_pecoff_text(setup_sectors * 512, sz + i + ((sys_size * 16) - sz));
198 302#endif
199 pe_header = get_unaligned_le32(&buf[0x3c]);
200
201 /* Size of image */
202 put_unaligned_le32(file_sz, &buf[pe_header + 0x50]);
203
204 /*
205 * Subtract the size of the first section (512 bytes) which
206 * includes the header and .reloc section. The remaining size
207 * is that of the .text section.
208 */
209 file_sz -= 512;
210
211 /* Size of code */
212 put_unaligned_le32(file_sz, &buf[pe_header + 0x1c]);
213
214#ifdef CONFIG_X86_32
215 /*
216 * Address of entry point.
217 *
218 * The EFI stub entry point is +16 bytes from the start of
219 * the .text section.
220 */
221 put_unaligned_le32(i + 16, &buf[pe_header + 0x28]);
222
223 /* .text size */
224 put_unaligned_le32(file_sz, &buf[pe_header + 0xb0]);
225
226 /* .text vma */
227 put_unaligned_le32(0x200, &buf[pe_header + 0xb4]);
228
229 /* .text size of initialised data */
230 put_unaligned_le32(file_sz, &buf[pe_header + 0xb8]);
231
232 /* .text file offset */
233 put_unaligned_le32(0x200, &buf[pe_header + 0xbc]);
234#else
235 /*
236 * Address of entry point. startup_32 is at the beginning and
237 * the 64-bit entry point (startup_64) is always 512 bytes
238 * after. The EFI stub entry point is 16 bytes after that, as
239 * the first instruction allows legacy loaders to jump over
240 * the EFI stub initialisation
241 */
242 put_unaligned_le32(i + 528, &buf[pe_header + 0x28]);
243
244 /* .text size */
245 put_unaligned_le32(file_sz, &buf[pe_header + 0xc0]);
246
247 /* .text vma */
248 put_unaligned_le32(0x200, &buf[pe_header + 0xc4]);
249
250 /* .text size of initialised data */
251 put_unaligned_le32(file_sz, &buf[pe_header + 0xc8]);
252
253 /* .text file offset */
254 put_unaligned_le32(0x200, &buf[pe_header + 0xcc]);
255#endif /* CONFIG_X86_32 */
256#endif /* CONFIG_EFI_STUB */
257 303
258 crc = partial_crc32(buf, i, crc); 304 crc = partial_crc32(buf, i, crc);
259 if (fwrite(buf, 1, i, stdout) != i) 305 if (fwrite(buf, 1, i, stdout) != i)
diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
index be6d9e365a80..3470624d7835 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -2460,10 +2460,12 @@ ENTRY(aesni_cbc_dec)
2460 pxor IN3, STATE4 2460 pxor IN3, STATE4
2461 movaps IN4, IV 2461 movaps IN4, IV
2462#else 2462#else
2463 pxor (INP), STATE2
2464 pxor 0x10(INP), STATE3
2465 pxor IN1, STATE4 2463 pxor IN1, STATE4
2466 movaps IN2, IV 2464 movaps IN2, IV
2465 movups (INP), IN1
2466 pxor IN1, STATE2
2467 movups 0x10(INP), IN2
2468 pxor IN2, STATE3
2467#endif 2469#endif
2468 movups STATE1, (OUTP) 2470 movups STATE1, (OUTP)
2469 movups STATE2, 0x10(OUTP) 2471 movups STATE2, 0x10(OUTP)
diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
index 0e3793b821ef..dc580c42851c 100644
--- a/arch/x86/include/asm/nmi.h
+++ b/arch/x86/include/asm/nmi.h
@@ -54,6 +54,20 @@ struct nmiaction {
54 __register_nmi_handler((t), &fn##_na); \ 54 __register_nmi_handler((t), &fn##_na); \
55}) 55})
56 56
57/*
58 * For special handlers that register/unregister in the
59 * init section only. This should be considered rare.
60 */
61#define register_nmi_handler_initonly(t, fn, fg, n) \
62({ \
63 static struct nmiaction fn##_na __initdata = { \
64 .handler = (fn), \
65 .name = (n), \
66 .flags = (fg), \
67 }; \
68 __register_nmi_handler((t), &fn##_na); \
69})
70
57int __register_nmi_handler(unsigned int, struct nmiaction *); 71int __register_nmi_handler(unsigned int, struct nmiaction *);
58 72
59void unregister_nmi_handler(unsigned int, const char *); 73void unregister_nmi_handler(unsigned int, const char *);
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 04cd6882308e..e1f3a17034fc 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -33,9 +33,8 @@
33#define segment_eq(a, b) ((a).seg == (b).seg) 33#define segment_eq(a, b) ((a).seg == (b).seg)
34 34
35#define user_addr_max() (current_thread_info()->addr_limit.seg) 35#define user_addr_max() (current_thread_info()->addr_limit.seg)
36#define __addr_ok(addr) \ 36#define __addr_ok(addr) \
37 ((unsigned long __force)(addr) < \ 37 ((unsigned long __force)(addr) < user_addr_max())
38 (current_thread_info()->addr_limit.seg))
39 38
40/* 39/*
41 * Test whether a block of memory is a valid user space address. 40 * Test whether a block of memory is a valid user space address.
@@ -47,14 +46,14 @@
47 * This needs 33-bit (65-bit for x86_64) arithmetic. We have a carry... 46 * This needs 33-bit (65-bit for x86_64) arithmetic. We have a carry...
48 */ 47 */
49 48
50#define __range_not_ok(addr, size) \ 49#define __range_not_ok(addr, size, limit) \
51({ \ 50({ \
52 unsigned long flag, roksum; \ 51 unsigned long flag, roksum; \
53 __chk_user_ptr(addr); \ 52 __chk_user_ptr(addr); \
54 asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" \ 53 asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" \
55 : "=&r" (flag), "=r" (roksum) \ 54 : "=&r" (flag), "=r" (roksum) \
56 : "1" (addr), "g" ((long)(size)), \ 55 : "1" (addr), "g" ((long)(size)), \
57 "rm" (current_thread_info()->addr_limit.seg)); \ 56 "rm" (limit)); \
58 flag; \ 57 flag; \
59}) 58})
60 59
@@ -77,7 +76,8 @@
77 * checks that the pointer is in the user space range - after calling 76 * checks that the pointer is in the user space range - after calling
78 * this function, memory access functions may still return -EFAULT. 77 * this function, memory access functions may still return -EFAULT.
79 */ 78 */
80#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0)) 79#define access_ok(type, addr, size) \
80 (likely(__range_not_ok(addr, size, user_addr_max()) == 0))
81 81
82/* 82/*
83 * The exception table consists of pairs of addresses relative to the 83 * The exception table consists of pairs of addresses relative to the
diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h
index becf47b81735..6149b476d9df 100644
--- a/arch/x86/include/asm/uv/uv_bau.h
+++ b/arch/x86/include/asm/uv/uv_bau.h
@@ -149,7 +149,6 @@
149/* 4 bits of software ack period */ 149/* 4 bits of software ack period */
150#define UV2_ACK_MASK 0x7UL 150#define UV2_ACK_MASK 0x7UL
151#define UV2_ACK_UNITS_SHFT 3 151#define UV2_ACK_UNITS_SHFT 3
152#define UV2_LEG_SHFT UV2H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_SHFT
153#define UV2_EXT_SHFT UV2H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_SHFT 152#define UV2_EXT_SHFT UV2H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_SHFT
154 153
155/* 154/*
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
index 6e76c191a835..d5fd66f0d4cd 100644
--- a/arch/x86/kernel/aperture_64.c
+++ b/arch/x86/kernel/aperture_64.c
@@ -20,7 +20,6 @@
20#include <linux/bitops.h> 20#include <linux/bitops.h>
21#include <linux/ioport.h> 21#include <linux/ioport.h>
22#include <linux/suspend.h> 22#include <linux/suspend.h>
23#include <linux/kmemleak.h>
24#include <asm/e820.h> 23#include <asm/e820.h>
25#include <asm/io.h> 24#include <asm/io.h>
26#include <asm/iommu.h> 25#include <asm/iommu.h>
@@ -95,11 +94,6 @@ static u32 __init allocate_aperture(void)
95 return 0; 94 return 0;
96 } 95 }
97 memblock_reserve(addr, aper_size); 96 memblock_reserve(addr, aper_size);
98 /*
99 * Kmemleak should not scan this block as it may not be mapped via the
100 * kernel direct mapping.
101 */
102 kmemleak_ignore(phys_to_virt(addr));
103 printk(KERN_INFO "Mapping aperture over %d KB of RAM @ %lx\n", 97 printk(KERN_INFO "Mapping aperture over %d KB of RAM @ %lx\n",
104 aper_size >> 10, addr); 98 aper_size >> 10, addr);
105 insert_aperture_resource((u32)addr, aper_size); 99 insert_aperture_resource((u32)addr, aper_size);
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index ac96561d1a99..5f0ff597437c 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -1195,7 +1195,7 @@ static void __clear_irq_vector(int irq, struct irq_cfg *cfg)
1195 BUG_ON(!cfg->vector); 1195 BUG_ON(!cfg->vector);
1196 1196
1197 vector = cfg->vector; 1197 vector = cfg->vector;
1198 for_each_cpu_and(cpu, cfg->domain, cpu_online_mask) 1198 for_each_cpu(cpu, cfg->domain)
1199 per_cpu(vector_irq, cpu)[vector] = -1; 1199 per_cpu(vector_irq, cpu)[vector] = -1;
1200 1200
1201 cfg->vector = 0; 1201 cfg->vector = 0;
@@ -1203,7 +1203,7 @@ static void __clear_irq_vector(int irq, struct irq_cfg *cfg)
1203 1203
1204 if (likely(!cfg->move_in_progress)) 1204 if (likely(!cfg->move_in_progress))
1205 return; 1205 return;
1206 for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) { 1206 for_each_cpu(cpu, cfg->old_domain) {
1207 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; 1207 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
1208 vector++) { 1208 vector++) {
1209 if (per_cpu(vector_irq, cpu)[vector] != irq) 1209 if (per_cpu(vector_irq, cpu)[vector] != irq)
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 0a687fd185e6..da27c5d2168a 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -1274,7 +1274,7 @@ static void mce_timer_fn(unsigned long data)
1274 */ 1274 */
1275 iv = __this_cpu_read(mce_next_interval); 1275 iv = __this_cpu_read(mce_next_interval);
1276 if (mce_notify_irq()) 1276 if (mce_notify_irq())
1277 iv = max(iv, (unsigned long) HZ/100); 1277 iv = max(iv / 2, (unsigned long) HZ/100);
1278 else 1278 else
1279 iv = min(iv * 2, round_jiffies_relative(check_interval * HZ)); 1279 iv = min(iv * 2, round_jiffies_relative(check_interval * HZ));
1280 __this_cpu_write(mce_next_interval, iv); 1280 __this_cpu_write(mce_next_interval, iv);
@@ -1557,7 +1557,7 @@ static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
1557static void __mcheck_cpu_init_timer(void) 1557static void __mcheck_cpu_init_timer(void)
1558{ 1558{
1559 struct timer_list *t = &__get_cpu_var(mce_timer); 1559 struct timer_list *t = &__get_cpu_var(mce_timer);
1560 unsigned long iv = __this_cpu_read(mce_next_interval); 1560 unsigned long iv = check_interval * HZ;
1561 1561
1562 setup_timer(t, mce_timer_fn, smp_processor_id()); 1562 setup_timer(t, mce_timer_fn, smp_processor_id());
1563 1563
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index e049d6da0183..c4706cf9c011 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1496,6 +1496,7 @@ static struct cpu_hw_events *allocate_fake_cpuc(void)
1496 if (!cpuc->shared_regs) 1496 if (!cpuc->shared_regs)
1497 goto error; 1497 goto error;
1498 } 1498 }
1499 cpuc->is_fake = 1;
1499 return cpuc; 1500 return cpuc;
1500error: 1501error:
1501 free_fake_cpuc(cpuc); 1502 free_fake_cpuc(cpuc);
@@ -1756,6 +1757,12 @@ perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
1756 dump_trace(NULL, regs, NULL, 0, &backtrace_ops, entry); 1757 dump_trace(NULL, regs, NULL, 0, &backtrace_ops, entry);
1757} 1758}
1758 1759
1760static inline int
1761valid_user_frame(const void __user *fp, unsigned long size)
1762{
1763 return (__range_not_ok(fp, size, TASK_SIZE) == 0);
1764}
1765
1759#ifdef CONFIG_COMPAT 1766#ifdef CONFIG_COMPAT
1760 1767
1761#include <asm/compat.h> 1768#include <asm/compat.h>
@@ -1780,7 +1787,7 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
1780 if (bytes != sizeof(frame)) 1787 if (bytes != sizeof(frame))
1781 break; 1788 break;
1782 1789
1783 if (fp < compat_ptr(regs->sp)) 1790 if (!valid_user_frame(fp, sizeof(frame)))
1784 break; 1791 break;
1785 1792
1786 perf_callchain_store(entry, frame.return_address); 1793 perf_callchain_store(entry, frame.return_address);
@@ -1826,7 +1833,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
1826 if (bytes != sizeof(frame)) 1833 if (bytes != sizeof(frame))
1827 break; 1834 break;
1828 1835
1829 if ((unsigned long)fp < regs->sp) 1836 if (!valid_user_frame(fp, sizeof(frame)))
1830 break; 1837 break;
1831 1838
1832 perf_callchain_store(entry, frame.return_address); 1839 perf_callchain_store(entry, frame.return_address);
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index 6638aaf54493..7241e2fc3c17 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -117,6 +117,7 @@ struct cpu_hw_events {
117 struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */ 117 struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
118 118
119 unsigned int group_flag; 119 unsigned int group_flag;
120 int is_fake;
120 121
121 /* 122 /*
122 * Intel DebugStore bits 123 * Intel DebugStore bits
@@ -364,6 +365,7 @@ struct x86_pmu {
364 int pebs_record_size; 365 int pebs_record_size;
365 void (*drain_pebs)(struct pt_regs *regs); 366 void (*drain_pebs)(struct pt_regs *regs);
366 struct event_constraint *pebs_constraints; 367 struct event_constraint *pebs_constraints;
368 void (*pebs_aliases)(struct perf_event *event);
367 369
368 /* 370 /*
369 * Intel LBR 371 * Intel LBR
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 166546ec6aef..187c294bc658 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -1119,27 +1119,33 @@ intel_bts_constraints(struct perf_event *event)
1119 return NULL; 1119 return NULL;
1120} 1120}
1121 1121
1122static bool intel_try_alt_er(struct perf_event *event, int orig_idx) 1122static int intel_alt_er(int idx)
1123{ 1123{
1124 if (!(x86_pmu.er_flags & ERF_HAS_RSP_1)) 1124 if (!(x86_pmu.er_flags & ERF_HAS_RSP_1))
1125 return false; 1125 return idx;
1126 1126
1127 if (event->hw.extra_reg.idx == EXTRA_REG_RSP_0) { 1127 if (idx == EXTRA_REG_RSP_0)
1128 event->hw.config &= ~INTEL_ARCH_EVENT_MASK; 1128 return EXTRA_REG_RSP_1;
1129 event->hw.config |= 0x01bb; 1129
1130 event->hw.extra_reg.idx = EXTRA_REG_RSP_1; 1130 if (idx == EXTRA_REG_RSP_1)
1131 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1; 1131 return EXTRA_REG_RSP_0;
1132 } else if (event->hw.extra_reg.idx == EXTRA_REG_RSP_1) { 1132
1133 return idx;
1134}
1135
1136static void intel_fixup_er(struct perf_event *event, int idx)
1137{
1138 event->hw.extra_reg.idx = idx;
1139
1140 if (idx == EXTRA_REG_RSP_0) {
1133 event->hw.config &= ~INTEL_ARCH_EVENT_MASK; 1141 event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
1134 event->hw.config |= 0x01b7; 1142 event->hw.config |= 0x01b7;
1135 event->hw.extra_reg.idx = EXTRA_REG_RSP_0;
1136 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0; 1143 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0;
1144 } else if (idx == EXTRA_REG_RSP_1) {
1145 event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
1146 event->hw.config |= 0x01bb;
1147 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1;
1137 } 1148 }
1138
1139 if (event->hw.extra_reg.idx == orig_idx)
1140 return false;
1141
1142 return true;
1143} 1149}
1144 1150
1145/* 1151/*
@@ -1157,14 +1163,18 @@ __intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc,
1157 struct event_constraint *c = &emptyconstraint; 1163 struct event_constraint *c = &emptyconstraint;
1158 struct er_account *era; 1164 struct er_account *era;
1159 unsigned long flags; 1165 unsigned long flags;
1160 int orig_idx = reg->idx; 1166 int idx = reg->idx;
1161 1167
1162 /* already allocated shared msr */ 1168 /*
1163 if (reg->alloc) 1169 * reg->alloc can be set due to existing state, so for fake cpuc we
1170 * need to ignore this, otherwise we might fail to allocate proper fake
1171 * state for this extra reg constraint. Also see the comment below.
1172 */
1173 if (reg->alloc && !cpuc->is_fake)
1164 return NULL; /* call x86_get_event_constraint() */ 1174 return NULL; /* call x86_get_event_constraint() */
1165 1175
1166again: 1176again:
1167 era = &cpuc->shared_regs->regs[reg->idx]; 1177 era = &cpuc->shared_regs->regs[idx];
1168 /* 1178 /*
1169 * we use spin_lock_irqsave() to avoid lockdep issues when 1179 * we use spin_lock_irqsave() to avoid lockdep issues when
1170 * passing a fake cpuc 1180 * passing a fake cpuc
@@ -1173,6 +1183,29 @@ again:
1173 1183
1174 if (!atomic_read(&era->ref) || era->config == reg->config) { 1184 if (!atomic_read(&era->ref) || era->config == reg->config) {
1175 1185
1186 /*
1187 * If its a fake cpuc -- as per validate_{group,event}() we
1188 * shouldn't touch event state and we can avoid doing so
1189 * since both will only call get_event_constraints() once
1190 * on each event, this avoids the need for reg->alloc.
1191 *
1192 * Not doing the ER fixup will only result in era->reg being
1193 * wrong, but since we won't actually try and program hardware
1194 * this isn't a problem either.
1195 */
1196 if (!cpuc->is_fake) {
1197 if (idx != reg->idx)
1198 intel_fixup_er(event, idx);
1199
1200 /*
1201 * x86_schedule_events() can call get_event_constraints()
1202 * multiple times on events in the case of incremental
1203 * scheduling(). reg->alloc ensures we only do the ER
1204 * allocation once.
1205 */
1206 reg->alloc = 1;
1207 }
1208
1176 /* lock in msr value */ 1209 /* lock in msr value */
1177 era->config = reg->config; 1210 era->config = reg->config;
1178 era->reg = reg->reg; 1211 era->reg = reg->reg;
@@ -1180,17 +1213,17 @@ again:
1180 /* one more user */ 1213 /* one more user */
1181 atomic_inc(&era->ref); 1214 atomic_inc(&era->ref);
1182 1215
1183 /* no need to reallocate during incremental event scheduling */
1184 reg->alloc = 1;
1185
1186 /* 1216 /*
1187 * need to call x86_get_event_constraint() 1217 * need to call x86_get_event_constraint()
1188 * to check if associated event has constraints 1218 * to check if associated event has constraints
1189 */ 1219 */
1190 c = NULL; 1220 c = NULL;
1191 } else if (intel_try_alt_er(event, orig_idx)) { 1221 } else {
1192 raw_spin_unlock_irqrestore(&era->lock, flags); 1222 idx = intel_alt_er(idx);
1193 goto again; 1223 if (idx != reg->idx) {
1224 raw_spin_unlock_irqrestore(&era->lock, flags);
1225 goto again;
1226 }
1194 } 1227 }
1195 raw_spin_unlock_irqrestore(&era->lock, flags); 1228 raw_spin_unlock_irqrestore(&era->lock, flags);
1196 1229
@@ -1204,11 +1237,14 @@ __intel_shared_reg_put_constraints(struct cpu_hw_events *cpuc,
1204 struct er_account *era; 1237 struct er_account *era;
1205 1238
1206 /* 1239 /*
1207 * only put constraint if extra reg was actually 1240 * Only put constraint if extra reg was actually allocated. Also takes
1208 * allocated. Also takes care of event which do 1241 * care of event which do not use an extra shared reg.
1209 * not use an extra shared reg 1242 *
1243 * Also, if this is a fake cpuc we shouldn't touch any event state
1244 * (reg->alloc) and we don't care about leaving inconsistent cpuc state
1245 * either since it'll be thrown out.
1210 */ 1246 */
1211 if (!reg->alloc) 1247 if (!reg->alloc || cpuc->is_fake)
1212 return; 1248 return;
1213 1249
1214 era = &cpuc->shared_regs->regs[reg->idx]; 1250 era = &cpuc->shared_regs->regs[reg->idx];
@@ -1300,15 +1336,9 @@ static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
1300 intel_put_shared_regs_event_constraints(cpuc, event); 1336 intel_put_shared_regs_event_constraints(cpuc, event);
1301} 1337}
1302 1338
1303static int intel_pmu_hw_config(struct perf_event *event) 1339static void intel_pebs_aliases_core2(struct perf_event *event)
1304{ 1340{
1305 int ret = x86_pmu_hw_config(event); 1341 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
1306
1307 if (ret)
1308 return ret;
1309
1310 if (event->attr.precise_ip &&
1311 (event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
1312 /* 1342 /*
1313 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P 1343 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
1314 * (0x003c) so that we can use it with PEBS. 1344 * (0x003c) so that we can use it with PEBS.
@@ -1329,10 +1359,48 @@ static int intel_pmu_hw_config(struct perf_event *event)
1329 */ 1359 */
1330 u64 alt_config = X86_CONFIG(.event=0xc0, .inv=1, .cmask=16); 1360 u64 alt_config = X86_CONFIG(.event=0xc0, .inv=1, .cmask=16);
1331 1361
1362 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
1363 event->hw.config = alt_config;
1364 }
1365}
1366
1367static void intel_pebs_aliases_snb(struct perf_event *event)
1368{
1369 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
1370 /*
1371 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
1372 * (0x003c) so that we can use it with PEBS.
1373 *
1374 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
1375 * PEBS capable. However we can use UOPS_RETIRED.ALL
1376 * (0x01c2), which is a PEBS capable event, to get the same
1377 * count.
1378 *
1379 * UOPS_RETIRED.ALL counts the number of cycles that retires
1380 * CNTMASK micro-ops. By setting CNTMASK to a value (16)
1381 * larger than the maximum number of micro-ops that can be
1382 * retired per cycle (4) and then inverting the condition, we
1383 * count all cycles that retire 16 or less micro-ops, which
1384 * is every cycle.
1385 *
1386 * Thereby we gain a PEBS capable cycle counter.
1387 */
1388 u64 alt_config = X86_CONFIG(.event=0xc2, .umask=0x01, .inv=1, .cmask=16);
1332 1389
1333 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK); 1390 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
1334 event->hw.config = alt_config; 1391 event->hw.config = alt_config;
1335 } 1392 }
1393}
1394
1395static int intel_pmu_hw_config(struct perf_event *event)
1396{
1397 int ret = x86_pmu_hw_config(event);
1398
1399 if (ret)
1400 return ret;
1401
1402 if (event->attr.precise_ip && x86_pmu.pebs_aliases)
1403 x86_pmu.pebs_aliases(event);
1336 1404
1337 if (intel_pmu_needs_lbr_smpl(event)) { 1405 if (intel_pmu_needs_lbr_smpl(event)) {
1338 ret = intel_pmu_setup_lbr_filter(event); 1406 ret = intel_pmu_setup_lbr_filter(event);
@@ -1607,6 +1675,7 @@ static __initconst const struct x86_pmu intel_pmu = {
1607 .max_period = (1ULL << 31) - 1, 1675 .max_period = (1ULL << 31) - 1,
1608 .get_event_constraints = intel_get_event_constraints, 1676 .get_event_constraints = intel_get_event_constraints,
1609 .put_event_constraints = intel_put_event_constraints, 1677 .put_event_constraints = intel_put_event_constraints,
1678 .pebs_aliases = intel_pebs_aliases_core2,
1610 1679
1611 .format_attrs = intel_arch3_formats_attr, 1680 .format_attrs = intel_arch3_formats_attr,
1612 1681
@@ -1840,8 +1909,9 @@ __init int intel_pmu_init(void)
1840 break; 1909 break;
1841 1910
1842 case 42: /* SandyBridge */ 1911 case 42: /* SandyBridge */
1843 x86_add_quirk(intel_sandybridge_quirk);
1844 case 45: /* SandyBridge, "Romely-EP" */ 1912 case 45: /* SandyBridge, "Romely-EP" */
1913 x86_add_quirk(intel_sandybridge_quirk);
1914 case 58: /* IvyBridge */
1845 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, 1915 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
1846 sizeof(hw_cache_event_ids)); 1916 sizeof(hw_cache_event_ids));
1847 1917
@@ -1849,6 +1919,7 @@ __init int intel_pmu_init(void)
1849 1919
1850 x86_pmu.event_constraints = intel_snb_event_constraints; 1920 x86_pmu.event_constraints = intel_snb_event_constraints;
1851 x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints; 1921 x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
1922 x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
1852 x86_pmu.extra_regs = intel_snb_extra_regs; 1923 x86_pmu.extra_regs = intel_snb_extra_regs;
1853 /* all extra regs are per-cpu when HT is on */ 1924 /* all extra regs are per-cpu when HT is on */
1854 x86_pmu.er_flags |= ERF_HAS_RSP_1; 1925 x86_pmu.er_flags |= ERF_HAS_RSP_1;
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index 5a3edc27f6e5..35e2192df9f4 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -400,14 +400,7 @@ struct event_constraint intel_snb_pebs_event_constraints[] = {
400 INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */ 400 INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
401 INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */ 401 INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */
402 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.* */ 402 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.* */
403 INTEL_UEVENT_CONSTRAINT(0x11d0, 0xf), /* MEM_UOP_RETIRED.STLB_MISS_LOADS */ 403 INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */
404 INTEL_UEVENT_CONSTRAINT(0x12d0, 0xf), /* MEM_UOP_RETIRED.STLB_MISS_STORES */
405 INTEL_UEVENT_CONSTRAINT(0x21d0, 0xf), /* MEM_UOP_RETIRED.LOCK_LOADS */
406 INTEL_UEVENT_CONSTRAINT(0x22d0, 0xf), /* MEM_UOP_RETIRED.LOCK_STORES */
407 INTEL_UEVENT_CONSTRAINT(0x41d0, 0xf), /* MEM_UOP_RETIRED.SPLIT_LOADS */
408 INTEL_UEVENT_CONSTRAINT(0x42d0, 0xf), /* MEM_UOP_RETIRED.SPLIT_STORES */
409 INTEL_UEVENT_CONSTRAINT(0x81d0, 0xf), /* MEM_UOP_RETIRED.ANY_LOADS */
410 INTEL_UEVENT_CONSTRAINT(0x82d0, 0xf), /* MEM_UOP_RETIRED.ANY_STORES */
411 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ 404 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
412 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */ 405 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
413 INTEL_UEVENT_CONSTRAINT(0x02d4, 0xf), /* MEM_LOAD_UOPS_MISC_RETIRED.LLC_MISS */ 406 INTEL_UEVENT_CONSTRAINT(0x02d4, 0xf), /* MEM_LOAD_UOPS_MISC_RETIRED.LLC_MISS */
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index 086eb58c6e80..f1b42b3a186c 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -120,11 +120,6 @@ bool kvm_check_and_clear_guest_paused(void)
120 bool ret = false; 120 bool ret = false;
121 struct pvclock_vcpu_time_info *src; 121 struct pvclock_vcpu_time_info *src;
122 122
123 /*
124 * per_cpu() is safe here because this function is only called from
125 * timer functions where preemption is already disabled.
126 */
127 WARN_ON(!in_atomic());
128 src = &__get_cpu_var(hv_clock); 123 src = &__get_cpu_var(hv_clock);
129 if ((src->flags & PVCLOCK_GUEST_STOPPED) != 0) { 124 if ((src->flags & PVCLOCK_GUEST_STOPPED) != 0) {
130 __this_cpu_and(hv_clock.flags, ~PVCLOCK_GUEST_STOPPED); 125 __this_cpu_and(hv_clock.flags, ~PVCLOCK_GUEST_STOPPED);
diff --git a/arch/x86/kernel/nmi_selftest.c b/arch/x86/kernel/nmi_selftest.c
index e31bf8d5c4d2..149b8d9c6ad4 100644
--- a/arch/x86/kernel/nmi_selftest.c
+++ b/arch/x86/kernel/nmi_selftest.c
@@ -42,7 +42,7 @@ static int __init nmi_unk_cb(unsigned int val, struct pt_regs *regs)
42static void __init init_nmi_testsuite(void) 42static void __init init_nmi_testsuite(void)
43{ 43{
44 /* trap all the unknown NMIs we may generate */ 44 /* trap all the unknown NMIs we may generate */
45 register_nmi_handler(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk"); 45 register_nmi_handler_initonly(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk");
46} 46}
47 47
48static void __init cleanup_nmi_testsuite(void) 48static void __init cleanup_nmi_testsuite(void)
@@ -64,7 +64,7 @@ static void __init test_nmi_ipi(struct cpumask *mask)
64{ 64{
65 unsigned long timeout; 65 unsigned long timeout;
66 66
67 if (register_nmi_handler(NMI_LOCAL, test_nmi_ipi_callback, 67 if (register_nmi_handler_initonly(NMI_LOCAL, test_nmi_ipi_callback,
68 NMI_FLAG_FIRST, "nmi_selftest")) { 68 NMI_FLAG_FIRST, "nmi_selftest")) {
69 nmi_fail = FAILURE; 69 nmi_fail = FAILURE;
70 return; 70 return;
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 79c45af81604..25b48edb847c 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -639,9 +639,11 @@ void native_machine_shutdown(void)
639 set_cpus_allowed_ptr(current, cpumask_of(reboot_cpu_id)); 639 set_cpus_allowed_ptr(current, cpumask_of(reboot_cpu_id));
640 640
641 /* 641 /*
642 * O.K Now that I'm on the appropriate processor, 642 * O.K Now that I'm on the appropriate processor, stop all of the
643 * stop all of the others. 643 * others. Also disable the local irq to not receive the per-cpu
644 * timer interrupt which may trigger scheduler's load balance.
644 */ 645 */
646 local_irq_disable();
645 stop_other_cpus(); 647 stop_other_cpus();
646#endif 648#endif
647 649
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index f56f96da77f5..3fab55bea29b 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -382,6 +382,15 @@ void __cpuinit set_cpu_sibling_map(int cpu)
382 if ((i == cpu) || (has_mc && match_llc(c, o))) 382 if ((i == cpu) || (has_mc && match_llc(c, o)))
383 link_mask(llc_shared, cpu, i); 383 link_mask(llc_shared, cpu, i);
384 384
385 }
386
387 /*
388 * This needs a separate iteration over the cpus because we rely on all
389 * cpu_sibling_mask links to be set-up.
390 */
391 for_each_cpu(i, cpu_sibling_setup_mask) {
392 o = &cpu_data(i);
393
385 if ((i == cpu) || (has_mc && match_mc(c, o))) { 394 if ((i == cpu) || (has_mc && match_mc(c, o))) {
386 link_mask(core, cpu, i); 395 link_mask(core, cpu, i);
387 396
@@ -410,15 +419,7 @@ void __cpuinit set_cpu_sibling_map(int cpu)
410/* maps the cpu to the sched domain representing multi-core */ 419/* maps the cpu to the sched domain representing multi-core */
411const struct cpumask *cpu_coregroup_mask(int cpu) 420const struct cpumask *cpu_coregroup_mask(int cpu)
412{ 421{
413 struct cpuinfo_x86 *c = &cpu_data(cpu); 422 return cpu_llc_shared_mask(cpu);
414 /*
415 * For perf, we return last level cache shared map.
416 * And for power savings, we return cpu_core_map
417 */
418 if (!(cpu_has(c, X86_FEATURE_AMD_DCM)))
419 return cpu_core_mask(cpu);
420 else
421 return cpu_llc_shared_mask(cpu);
422} 423}
423 424
424static void impress_friends(void) 425static void impress_friends(void)
diff --git a/arch/x86/lib/usercopy.c b/arch/x86/lib/usercopy.c
index f61ee67ec00f..677b1ed184c9 100644
--- a/arch/x86/lib/usercopy.c
+++ b/arch/x86/lib/usercopy.c
@@ -8,6 +8,7 @@
8#include <linux/module.h> 8#include <linux/module.h>
9 9
10#include <asm/word-at-a-time.h> 10#include <asm/word-at-a-time.h>
11#include <linux/sched.h>
11 12
12/* 13/*
13 * best effort, GUP based copy_from_user() that is NMI-safe 14 * best effort, GUP based copy_from_user() that is NMI-safe
@@ -21,6 +22,9 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
21 void *map; 22 void *map;
22 int ret; 23 int ret;
23 24
25 if (__range_not_ok(from, n, TASK_SIZE) == 0)
26 return len;
27
24 do { 28 do {
25 ret = __get_user_pages_fast(addr, 1, 0, &page); 29 ret = __get_user_pages_fast(addr, 1, 0, &page);
26 if (!ret) 30 if (!ret)
diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt
index 819137904428..5d7e51f3fd28 100644
--- a/arch/x86/lib/x86-opcode-map.txt
+++ b/arch/x86/lib/x86-opcode-map.txt
@@ -28,7 +28,7 @@
28# - (66): the last prefix is 0x66 28# - (66): the last prefix is 0x66
29# - (F3): the last prefix is 0xF3 29# - (F3): the last prefix is 0xF3
30# - (F2): the last prefix is 0xF2 30# - (F2): the last prefix is 0xF2
31# 31# - (!F3) : the last prefix is not 0xF3 (including non-last prefix case)
32 32
33Table: one byte opcode 33Table: one byte opcode
34Referrer: 34Referrer:
@@ -515,12 +515,12 @@ b4: LFS Gv,Mp
515b5: LGS Gv,Mp 515b5: LGS Gv,Mp
516b6: MOVZX Gv,Eb 516b6: MOVZX Gv,Eb
517b7: MOVZX Gv,Ew 517b7: MOVZX Gv,Ew
518b8: JMPE | POPCNT Gv,Ev (F3) 518b8: JMPE (!F3) | POPCNT Gv,Ev (F3)
519b9: Grp10 (1A) 519b9: Grp10 (1A)
520ba: Grp8 Ev,Ib (1A) 520ba: Grp8 Ev,Ib (1A)
521bb: BTC Ev,Gv 521bb: BTC Ev,Gv
522bc: BSF Gv,Ev | TZCNT Gv,Ev (F3) 522bc: BSF Gv,Ev (!F3) | TZCNT Gv,Ev (F3)
523bd: BSR Gv,Ev | LZCNT Gv,Ev (F3) 523bd: BSR Gv,Ev (!F3) | LZCNT Gv,Ev (F3)
524be: MOVSX Gv,Eb 524be: MOVSX Gv,Eb
525bf: MOVSX Gv,Ew 525bf: MOVSX Gv,Ew
526# 0x0f 0xc0-0xcf 526# 0x0f 0xc0-0xcf
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 97141c26a13a..bc4e9d84157f 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -62,7 +62,8 @@ static void __init find_early_table_space(struct map_range *mr, unsigned long en
62 extra += PMD_SIZE; 62 extra += PMD_SIZE;
63#endif 63#endif
64 /* The first 2/4M doesn't use large pages. */ 64 /* The first 2/4M doesn't use large pages. */
65 extra += mr->end - mr->start; 65 if (mr->start < PMD_SIZE)
66 extra += mr->end - mr->start;
66 67
67 ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT; 68 ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
68 } else 69 } else
diff --git a/arch/x86/mm/srat.c b/arch/x86/mm/srat.c
index 732af3a96183..4599c3e8bcb6 100644
--- a/arch/x86/mm/srat.c
+++ b/arch/x86/mm/srat.c
@@ -176,6 +176,8 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
176 return; 176 return;
177 } 177 }
178 178
179 node_set(node, numa_nodes_parsed);
180
179 printk(KERN_INFO "SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]\n", 181 printk(KERN_INFO "SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]\n",
180 node, pxm, 182 node, pxm,
181 (unsigned long long) start, (unsigned long long) end - 1); 183 (unsigned long long) start, (unsigned long long) end - 1);
diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
index e31bcd8f2eee..fd41a9262d65 100644
--- a/arch/x86/platform/mrst/mrst.c
+++ b/arch/x86/platform/mrst/mrst.c
@@ -782,7 +782,7 @@ BLOCKING_NOTIFIER_HEAD(intel_scu_notifier);
782EXPORT_SYMBOL_GPL(intel_scu_notifier); 782EXPORT_SYMBOL_GPL(intel_scu_notifier);
783 783
784/* Called by IPC driver */ 784/* Called by IPC driver */
785void intel_scu_devices_create(void) 785void __devinit intel_scu_devices_create(void)
786{ 786{
787 int i; 787 int i;
788 788
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index 3ae0e61abd23..59880afa851f 100644
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -1295,7 +1295,6 @@ static void __init enable_timeouts(void)
1295 */ 1295 */
1296 mmr_image |= (1L << SOFTACK_MSHIFT); 1296 mmr_image |= (1L << SOFTACK_MSHIFT);
1297 if (is_uv2_hub()) { 1297 if (is_uv2_hub()) {
1298 mmr_image &= ~(1L << UV2_LEG_SHFT);
1299 mmr_image |= (1L << UV2_EXT_SHFT); 1298 mmr_image |= (1L << UV2_EXT_SHFT);
1300 } 1299 }
1301 write_mmr_misc_control(pnode, mmr_image); 1300 write_mmr_misc_control(pnode, mmr_image);
diff --git a/arch/x86/tools/gen-insn-attr-x86.awk b/arch/x86/tools/gen-insn-attr-x86.awk
index 5f6a5b6c3a15..ddcf39b1a18d 100644
--- a/arch/x86/tools/gen-insn-attr-x86.awk
+++ b/arch/x86/tools/gen-insn-attr-x86.awk
@@ -66,9 +66,10 @@ BEGIN {
66 rex_expr = "^REX(\\.[XRWB]+)*" 66 rex_expr = "^REX(\\.[XRWB]+)*"
67 fpu_expr = "^ESC" # TODO 67 fpu_expr = "^ESC" # TODO
68 68
69 lprefix1_expr = "\\(66\\)" 69 lprefix1_expr = "\\((66|!F3)\\)"
70 lprefix2_expr = "\\(F3\\)" 70 lprefix2_expr = "\\(F3\\)"
71 lprefix3_expr = "\\(F2\\)" 71 lprefix3_expr = "\\((F2|!F3)\\)"
72 lprefix_expr = "\\((66|F2|F3)\\)"
72 max_lprefix = 4 73 max_lprefix = 4
73 74
74 # All opcodes starting with lower-case 'v' or with (v1) superscript 75 # All opcodes starting with lower-case 'v' or with (v1) superscript
@@ -333,13 +334,16 @@ function convert_operands(count,opnd, i,j,imm,mod)
333 if (match(ext, lprefix1_expr)) { 334 if (match(ext, lprefix1_expr)) {
334 lptable1[idx] = add_flags(lptable1[idx],flags) 335 lptable1[idx] = add_flags(lptable1[idx],flags)
335 variant = "INAT_VARIANT" 336 variant = "INAT_VARIANT"
336 } else if (match(ext, lprefix2_expr)) { 337 }
338 if (match(ext, lprefix2_expr)) {
337 lptable2[idx] = add_flags(lptable2[idx],flags) 339 lptable2[idx] = add_flags(lptable2[idx],flags)
338 variant = "INAT_VARIANT" 340 variant = "INAT_VARIANT"
339 } else if (match(ext, lprefix3_expr)) { 341 }
342 if (match(ext, lprefix3_expr)) {
340 lptable3[idx] = add_flags(lptable3[idx],flags) 343 lptable3[idx] = add_flags(lptable3[idx],flags)
341 variant = "INAT_VARIANT" 344 variant = "INAT_VARIANT"
342 } else { 345 }
346 if (!match(ext, lprefix_expr)){
343 table[idx] = add_flags(table[idx],flags) 347 table[idx] = add_flags(table[idx],flags)
344 } 348 }
345 } 349 }
diff --git a/arch/xtensa/include/asm/syscall.h b/arch/xtensa/include/asm/syscall.h
index 0b9f2e13c781..c1dacca312f3 100644
--- a/arch/xtensa/include/asm/syscall.h
+++ b/arch/xtensa/include/asm/syscall.h
@@ -31,5 +31,5 @@ asmlinkage long sys_pselect6(int n, fd_set __user *inp, fd_set __user *outp,
31asmlinkage long sys_ppoll(struct pollfd __user *ufds, unsigned int nfds, 31asmlinkage long sys_ppoll(struct pollfd __user *ufds, unsigned int nfds,
32 struct timespec __user *tsp, const sigset_t __user *sigmask, 32 struct timespec __user *tsp, const sigset_t __user *sigmask,
33 size_t sigsetsize); 33 size_t sigsetsize);
34 34asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset,
35 35 size_t sigsetsize);
diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c
index b9f8e5850d3a..efe4e854b3cd 100644
--- a/arch/xtensa/kernel/signal.c
+++ b/arch/xtensa/kernel/signal.c
@@ -493,7 +493,7 @@ static void do_signal(struct pt_regs *regs)
493 if (ret) 493 if (ret)
494 return; 494 return;
495 495
496 signal_delivered(signr, info, ka, regs, 0); 496 signal_delivered(signr, &info, &ka, regs, 0);
497 if (current->ptrace & PT_SINGLESTEP) 497 if (current->ptrace & PT_SINGLESTEP)
498 task_pt_regs(current)->icountlevel = 1; 498 task_pt_regs(current)->icountlevel = 1;
499 499