aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/Kconfig2
-rw-r--r--arch/x86/boot/compressed/.gitignore2
-rw-r--r--arch/x86/include/asm/dma-mapping.h4
-rw-r--r--arch/x86/include/asm/ftrace.h4
-rw-r--r--arch/x86/include/asm/iommu.h1
-rw-r--r--arch/x86/include/asm/kvm_host.h3
-rw-r--r--arch/x86/kernel/Makefile3
-rw-r--r--arch/x86/kernel/entry_32.S4
-rw-r--r--arch/x86/kernel/entry_64.S4
-rw-r--r--arch/x86/kernel/ftrace.c50
-rw-r--r--arch/x86/kernel/genx2apic_uv_x.c7
-rw-r--r--arch/x86/kernel/i386_ksyms_32.c2
-rw-r--r--arch/x86/kernel/pci-dma.c16
-rw-r--r--arch/x86/kernel/pci-swiotlb_64.c14
-rw-r--r--arch/x86/kernel/x8664_ksyms_64.c2
-rw-r--r--arch/x86/kvm/i8254.c11
-rw-r--r--arch/x86/kvm/i8254.h1
-rw-r--r--arch/x86/kvm/mmu.c1
-rw-r--r--arch/x86/kvm/x86.c6
-rw-r--r--arch/x86/mm/init_64.c14
-rw-r--r--arch/x86/xen/Makefile2
-rw-r--r--arch/x86/xen/mmu.c18
22 files changed, 113 insertions, 58 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 350bee1d54dc..d11d7b513191 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -28,7 +28,7 @@ config X86
28 select HAVE_KRETPROBES 28 select HAVE_KRETPROBES
29 select HAVE_FTRACE_MCOUNT_RECORD 29 select HAVE_FTRACE_MCOUNT_RECORD
30 select HAVE_DYNAMIC_FTRACE 30 select HAVE_DYNAMIC_FTRACE
31 select HAVE_FTRACE 31 select HAVE_FUNCTION_TRACER
32 select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64) 32 select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64)
33 select HAVE_ARCH_KGDB if !X86_VOYAGER 33 select HAVE_ARCH_KGDB if !X86_VOYAGER
34 select HAVE_ARCH_TRACEHOOK 34 select HAVE_ARCH_TRACEHOOK
diff --git a/arch/x86/boot/compressed/.gitignore b/arch/x86/boot/compressed/.gitignore
index be0ed065249b..63eff3b04d01 100644
--- a/arch/x86/boot/compressed/.gitignore
+++ b/arch/x86/boot/compressed/.gitignore
@@ -1 +1,3 @@
1relocs 1relocs
2vmlinux.bin.all
3vmlinux.relocs
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
index 4a5397bfce27..7f225a4b2a26 100644
--- a/arch/x86/include/asm/dma-mapping.h
+++ b/arch/x86/include/asm/dma-mapping.h
@@ -255,9 +255,11 @@ static inline unsigned long dma_alloc_coherent_mask(struct device *dev,
255 255
256static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp) 256static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp)
257{ 257{
258#ifdef CONFIG_X86_64
259 unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp); 258 unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp);
260 259
260 if (dma_mask <= DMA_24BIT_MASK)
261 gfp |= GFP_DMA;
262#ifdef CONFIG_X86_64
261 if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA)) 263 if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA))
262 gfp |= GFP_DMA32; 264 gfp |= GFP_DMA32;
263#endif 265#endif
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h
index 47f7e65e6c1d..9e8bc29b8b17 100644
--- a/arch/x86/include/asm/ftrace.h
+++ b/arch/x86/include/asm/ftrace.h
@@ -1,7 +1,7 @@
1#ifndef _ASM_X86_FTRACE_H 1#ifndef _ASM_X86_FTRACE_H
2#define _ASM_X86_FTRACE_H 2#define _ASM_X86_FTRACE_H
3 3
4#ifdef CONFIG_FTRACE 4#ifdef CONFIG_FUNCTION_TRACER
5#define MCOUNT_ADDR ((long)(mcount)) 5#define MCOUNT_ADDR ((long)(mcount))
6#define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */ 6#define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */
7 7
@@ -19,6 +19,6 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr)
19} 19}
20#endif 20#endif
21 21
22#endif /* CONFIG_FTRACE */ 22#endif /* CONFIG_FUNCTION_TRACER */
23 23
24#endif /* _ASM_X86_FTRACE_H */ 24#endif /* _ASM_X86_FTRACE_H */
diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h
index 98e28ea8cd16..e4a552d44465 100644
--- a/arch/x86/include/asm/iommu.h
+++ b/arch/x86/include/asm/iommu.h
@@ -7,7 +7,6 @@ extern struct dma_mapping_ops nommu_dma_ops;
7extern int force_iommu, no_iommu; 7extern int force_iommu, no_iommu;
8extern int iommu_detected; 8extern int iommu_detected;
9extern int dmar_disabled; 9extern int dmar_disabled;
10extern int forbid_dac;
11 10
12extern unsigned long iommu_nr_pages(unsigned long addr, unsigned long len); 11extern unsigned long iommu_nr_pages(unsigned long addr, unsigned long len);
13 12
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 65679d006337..8346be87cfa1 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -364,6 +364,9 @@ struct kvm_arch{
364 364
365 struct page *ept_identity_pagetable; 365 struct page *ept_identity_pagetable;
366 bool ept_identity_pagetable_done; 366 bool ept_identity_pagetable_done;
367
368 unsigned long irq_sources_bitmap;
369 unsigned long irq_states[KVM_IOAPIC_NUM_PINS];
367}; 370};
368 371
369struct kvm_vm_stat { 372struct kvm_vm_stat {
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index d7e5a58ee22f..e489ff9cb3e2 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -6,11 +6,12 @@ extra-y := head_$(BITS).o head$(BITS).o head.o init_task.o vmlinu
6 6
7CPPFLAGS_vmlinux.lds += -U$(UTS_MACHINE) 7CPPFLAGS_vmlinux.lds += -U$(UTS_MACHINE)
8 8
9ifdef CONFIG_FTRACE 9ifdef CONFIG_FUNCTION_TRACER
10# Do not profile debug and lowlevel utilities 10# Do not profile debug and lowlevel utilities
11CFLAGS_REMOVE_tsc.o = -pg 11CFLAGS_REMOVE_tsc.o = -pg
12CFLAGS_REMOVE_rtc.o = -pg 12CFLAGS_REMOVE_rtc.o = -pg
13CFLAGS_REMOVE_paravirt-spinlocks.o = -pg 13CFLAGS_REMOVE_paravirt-spinlocks.o = -pg
14CFLAGS_REMOVE_ftrace.o = -pg
14endif 15endif
15 16
16# 17#
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index dd65143941a8..28b597ef9ca1 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -1149,7 +1149,7 @@ ENDPROC(xen_failsafe_callback)
1149 1149
1150#endif /* CONFIG_XEN */ 1150#endif /* CONFIG_XEN */
1151 1151
1152#ifdef CONFIG_FTRACE 1152#ifdef CONFIG_FUNCTION_TRACER
1153#ifdef CONFIG_DYNAMIC_FTRACE 1153#ifdef CONFIG_DYNAMIC_FTRACE
1154 1154
1155ENTRY(mcount) 1155ENTRY(mcount)
@@ -1204,7 +1204,7 @@ trace:
1204 jmp ftrace_stub 1204 jmp ftrace_stub
1205END(mcount) 1205END(mcount)
1206#endif /* CONFIG_DYNAMIC_FTRACE */ 1206#endif /* CONFIG_DYNAMIC_FTRACE */
1207#endif /* CONFIG_FTRACE */ 1207#endif /* CONFIG_FUNCTION_TRACER */
1208 1208
1209.section .rodata,"a" 1209.section .rodata,"a"
1210#include "syscall_table_32.S" 1210#include "syscall_table_32.S"
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 09e7145484c5..b86f332c96a6 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -61,7 +61,7 @@
61 61
62 .code64 62 .code64
63 63
64#ifdef CONFIG_FTRACE 64#ifdef CONFIG_FUNCTION_TRACER
65#ifdef CONFIG_DYNAMIC_FTRACE 65#ifdef CONFIG_DYNAMIC_FTRACE
66ENTRY(mcount) 66ENTRY(mcount)
67 retq 67 retq
@@ -138,7 +138,7 @@ trace:
138 jmp ftrace_stub 138 jmp ftrace_stub
139END(mcount) 139END(mcount)
140#endif /* CONFIG_DYNAMIC_FTRACE */ 140#endif /* CONFIG_DYNAMIC_FTRACE */
141#endif /* CONFIG_FTRACE */ 141#endif /* CONFIG_FUNCTION_TRACER */
142 142
143#ifndef CONFIG_PREEMPT 143#ifndef CONFIG_PREEMPT
144#define retint_kernel retint_restore_args 144#define retint_kernel retint_restore_args
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index d073d981a730..50ea0ac8c9bf 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -21,8 +21,7 @@
21#include <asm/nops.h> 21#include <asm/nops.h>
22 22
23 23
24/* Long is fine, even if it is only 4 bytes ;-) */ 24static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
25static unsigned long *ftrace_nop;
26 25
27union ftrace_code_union { 26union ftrace_code_union {
28 char code[MCOUNT_INSN_SIZE]; 27 char code[MCOUNT_INSN_SIZE];
@@ -33,17 +32,17 @@ union ftrace_code_union {
33}; 32};
34 33
35 34
36static int notrace ftrace_calc_offset(long ip, long addr) 35static int ftrace_calc_offset(long ip, long addr)
37{ 36{
38 return (int)(addr - ip); 37 return (int)(addr - ip);
39} 38}
40 39
41notrace unsigned char *ftrace_nop_replace(void) 40unsigned char *ftrace_nop_replace(void)
42{ 41{
43 return (char *)ftrace_nop; 42 return ftrace_nop;
44} 43}
45 44
46notrace unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) 45unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
47{ 46{
48 static union ftrace_code_union calc; 47 static union ftrace_code_union calc;
49 48
@@ -57,7 +56,7 @@ notrace unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
57 return calc.code; 56 return calc.code;
58} 57}
59 58
60notrace int 59int
61ftrace_modify_code(unsigned long ip, unsigned char *old_code, 60ftrace_modify_code(unsigned long ip, unsigned char *old_code,
62 unsigned char *new_code) 61 unsigned char *new_code)
63{ 62{
@@ -66,26 +65,31 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code,
66 /* 65 /*
67 * Note: Due to modules and __init, code can 66 * Note: Due to modules and __init, code can
68 * disappear and change, we need to protect against faulting 67 * disappear and change, we need to protect against faulting
69 * as well as code changing. 68 * as well as code changing. We do this by using the
69 * probe_kernel_* functions.
70 * 70 *
71 * No real locking needed, this code is run through 71 * No real locking needed, this code is run through
72 * kstop_machine, or before SMP starts. 72 * kstop_machine, or before SMP starts.
73 */ 73 */
74 if (__copy_from_user_inatomic(replaced, (char __user *)ip, MCOUNT_INSN_SIZE))
75 return 1;
76 74
75 /* read the text we want to modify */
76 if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
77 return -EFAULT;
78
79 /* Make sure it is what we expect it to be */
77 if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0) 80 if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0)
78 return 2; 81 return -EINVAL;
79 82
80 WARN_ON_ONCE(__copy_to_user_inatomic((char __user *)ip, new_code, 83 /* replace the text with the new text */
81 MCOUNT_INSN_SIZE)); 84 if (probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE))
85 return -EPERM;
82 86
83 sync_core(); 87 sync_core();
84 88
85 return 0; 89 return 0;
86} 90}
87 91
88notrace int ftrace_update_ftrace_func(ftrace_func_t func) 92int ftrace_update_ftrace_func(ftrace_func_t func)
89{ 93{
90 unsigned long ip = (unsigned long)(&ftrace_call); 94 unsigned long ip = (unsigned long)(&ftrace_call);
91 unsigned char old[MCOUNT_INSN_SIZE], *new; 95 unsigned char old[MCOUNT_INSN_SIZE], *new;
@@ -98,13 +102,6 @@ notrace int ftrace_update_ftrace_func(ftrace_func_t func)
98 return ret; 102 return ret;
99} 103}
100 104
101notrace int ftrace_mcount_set(unsigned long *data)
102{
103 /* mcount is initialized as a nop */
104 *data = 0;
105 return 0;
106}
107
108int __init ftrace_dyn_arch_init(void *data) 105int __init ftrace_dyn_arch_init(void *data)
109{ 106{
110 extern const unsigned char ftrace_test_p6nop[]; 107 extern const unsigned char ftrace_test_p6nop[];
@@ -127,9 +124,6 @@ int __init ftrace_dyn_arch_init(void *data)
127 * TODO: check the cpuid to determine the best nop. 124 * TODO: check the cpuid to determine the best nop.
128 */ 125 */
129 asm volatile ( 126 asm volatile (
130 "jmp ftrace_test_jmp\n"
131 /* This code needs to stay around */
132 ".section .text, \"ax\"\n"
133 "ftrace_test_jmp:" 127 "ftrace_test_jmp:"
134 "jmp ftrace_test_p6nop\n" 128 "jmp ftrace_test_p6nop\n"
135 "nop\n" 129 "nop\n"
@@ -140,8 +134,6 @@ int __init ftrace_dyn_arch_init(void *data)
140 "jmp 1f\n" 134 "jmp 1f\n"
141 "ftrace_test_nop5:" 135 "ftrace_test_nop5:"
142 ".byte 0x66,0x66,0x66,0x66,0x90\n" 136 ".byte 0x66,0x66,0x66,0x66,0x90\n"
143 "jmp 1f\n"
144 ".previous\n"
145 "1:" 137 "1:"
146 ".section .fixup, \"ax\"\n" 138 ".section .fixup, \"ax\"\n"
147 "2: movl $1, %0\n" 139 "2: movl $1, %0\n"
@@ -156,15 +148,15 @@ int __init ftrace_dyn_arch_init(void *data)
156 switch (faulted) { 148 switch (faulted) {
157 case 0: 149 case 0:
158 pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n"); 150 pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n");
159 ftrace_nop = (unsigned long *)ftrace_test_p6nop; 151 memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE);
160 break; 152 break;
161 case 1: 153 case 1:
162 pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n"); 154 pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n");
163 ftrace_nop = (unsigned long *)ftrace_test_nop5; 155 memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE);
164 break; 156 break;
165 case 2: 157 case 2:
166 pr_info("ftrace: converting mcount calls to jmp . + 5\n"); 158 pr_info("ftrace: converting mcount calls to jmp . + 5\n");
167 ftrace_nop = (unsigned long *)ftrace_test_jmp; 159 memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE);
168 break; 160 break;
169 } 161 }
170 162
diff --git a/arch/x86/kernel/genx2apic_uv_x.c b/arch/x86/kernel/genx2apic_uv_x.c
index 680a06557c5e..2c7dbdb98278 100644
--- a/arch/x86/kernel/genx2apic_uv_x.c
+++ b/arch/x86/kernel/genx2apic_uv_x.c
@@ -15,7 +15,6 @@
15#include <linux/ctype.h> 15#include <linux/ctype.h>
16#include <linux/init.h> 16#include <linux/init.h>
17#include <linux/sched.h> 17#include <linux/sched.h>
18#include <linux/bootmem.h>
19#include <linux/module.h> 18#include <linux/module.h>
20#include <linux/hardirq.h> 19#include <linux/hardirq.h>
21#include <asm/smp.h> 20#include <asm/smp.h>
@@ -398,16 +397,16 @@ void __init uv_system_init(void)
398 printk(KERN_DEBUG "UV: Found %d blades\n", uv_num_possible_blades()); 397 printk(KERN_DEBUG "UV: Found %d blades\n", uv_num_possible_blades());
399 398
400 bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades(); 399 bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades();
401 uv_blade_info = alloc_bootmem_pages(bytes); 400 uv_blade_info = kmalloc(bytes, GFP_KERNEL);
402 401
403 get_lowmem_redirect(&lowmem_redir_base, &lowmem_redir_size); 402 get_lowmem_redirect(&lowmem_redir_base, &lowmem_redir_size);
404 403
405 bytes = sizeof(uv_node_to_blade[0]) * num_possible_nodes(); 404 bytes = sizeof(uv_node_to_blade[0]) * num_possible_nodes();
406 uv_node_to_blade = alloc_bootmem_pages(bytes); 405 uv_node_to_blade = kmalloc(bytes, GFP_KERNEL);
407 memset(uv_node_to_blade, 255, bytes); 406 memset(uv_node_to_blade, 255, bytes);
408 407
409 bytes = sizeof(uv_cpu_to_blade[0]) * num_possible_cpus(); 408 bytes = sizeof(uv_cpu_to_blade[0]) * num_possible_cpus();
410 uv_cpu_to_blade = alloc_bootmem_pages(bytes); 409 uv_cpu_to_blade = kmalloc(bytes, GFP_KERNEL);
411 memset(uv_cpu_to_blade, 255, bytes); 410 memset(uv_cpu_to_blade, 255, bytes);
412 411
413 blade = 0; 412 blade = 0;
diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
index dd7ebee446af..43cec6bdda63 100644
--- a/arch/x86/kernel/i386_ksyms_32.c
+++ b/arch/x86/kernel/i386_ksyms_32.c
@@ -5,7 +5,7 @@
5#include <asm/desc.h> 5#include <asm/desc.h>
6#include <asm/ftrace.h> 6#include <asm/ftrace.h>
7 7
8#ifdef CONFIG_FTRACE 8#ifdef CONFIG_FUNCTION_TRACER
9/* mcount is defined in assembly */ 9/* mcount is defined in assembly */
10EXPORT_SYMBOL(mcount); 10EXPORT_SYMBOL(mcount);
11#endif 11#endif
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index 1972266e8ba5..192624820217 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -9,6 +9,8 @@
9#include <asm/calgary.h> 9#include <asm/calgary.h>
10#include <asm/amd_iommu.h> 10#include <asm/amd_iommu.h>
11 11
12static int forbid_dac __read_mostly;
13
12struct dma_mapping_ops *dma_ops; 14struct dma_mapping_ops *dma_ops;
13EXPORT_SYMBOL(dma_ops); 15EXPORT_SYMBOL(dma_ops);
14 16
@@ -291,3 +293,17 @@ void pci_iommu_shutdown(void)
291} 293}
292/* Must execute after PCI subsystem */ 294/* Must execute after PCI subsystem */
293fs_initcall(pci_iommu_init); 295fs_initcall(pci_iommu_init);
296
297#ifdef CONFIG_PCI
298/* Many VIA bridges seem to corrupt data for DAC. Disable it here */
299
300static __devinit void via_no_dac(struct pci_dev *dev)
301{
302 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) {
303 printk(KERN_INFO "PCI: VIA PCI bridge detected."
304 "Disabling DAC.\n");
305 forbid_dac = 1;
306 }
307}
308DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac);
309#endif
diff --git a/arch/x86/kernel/pci-swiotlb_64.c b/arch/x86/kernel/pci-swiotlb_64.c
index c4ce0332759e..3c539d111abb 100644
--- a/arch/x86/kernel/pci-swiotlb_64.c
+++ b/arch/x86/kernel/pci-swiotlb_64.c
@@ -18,9 +18,21 @@ swiotlb_map_single_phys(struct device *hwdev, phys_addr_t paddr, size_t size,
18 return swiotlb_map_single(hwdev, phys_to_virt(paddr), size, direction); 18 return swiotlb_map_single(hwdev, phys_to_virt(paddr), size, direction);
19} 19}
20 20
21static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
22 dma_addr_t *dma_handle, gfp_t flags)
23{
24 void *vaddr;
25
26 vaddr = dma_generic_alloc_coherent(hwdev, size, dma_handle, flags);
27 if (vaddr)
28 return vaddr;
29
30 return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
31}
32
21struct dma_mapping_ops swiotlb_dma_ops = { 33struct dma_mapping_ops swiotlb_dma_ops = {
22 .mapping_error = swiotlb_dma_mapping_error, 34 .mapping_error = swiotlb_dma_mapping_error,
23 .alloc_coherent = swiotlb_alloc_coherent, 35 .alloc_coherent = x86_swiotlb_alloc_coherent,
24 .free_coherent = swiotlb_free_coherent, 36 .free_coherent = swiotlb_free_coherent,
25 .map_single = swiotlb_map_single_phys, 37 .map_single = swiotlb_map_single_phys,
26 .unmap_single = swiotlb_unmap_single, 38 .unmap_single = swiotlb_unmap_single,
diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
index b545f371b5f5..695e426aa354 100644
--- a/arch/x86/kernel/x8664_ksyms_64.c
+++ b/arch/x86/kernel/x8664_ksyms_64.c
@@ -12,7 +12,7 @@
12#include <asm/desc.h> 12#include <asm/desc.h>
13#include <asm/ftrace.h> 13#include <asm/ftrace.h>
14 14
15#ifdef CONFIG_FTRACE 15#ifdef CONFIG_FUNCTION_TRACER
16/* mcount is defined in assembly */ 16/* mcount is defined in assembly */
17EXPORT_SYMBOL(mcount); 17EXPORT_SYMBOL(mcount);
18#endif 18#endif
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index 11c6725fb798..8772dc946823 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -545,6 +545,12 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm)
545 if (!pit) 545 if (!pit)
546 return NULL; 546 return NULL;
547 547
548 mutex_lock(&kvm->lock);
549 pit->irq_source_id = kvm_request_irq_source_id(kvm);
550 mutex_unlock(&kvm->lock);
551 if (pit->irq_source_id < 0)
552 return NULL;
553
548 mutex_init(&pit->pit_state.lock); 554 mutex_init(&pit->pit_state.lock);
549 mutex_lock(&pit->pit_state.lock); 555 mutex_lock(&pit->pit_state.lock);
550 spin_lock_init(&pit->pit_state.inject_lock); 556 spin_lock_init(&pit->pit_state.inject_lock);
@@ -587,6 +593,7 @@ void kvm_free_pit(struct kvm *kvm)
587 mutex_lock(&kvm->arch.vpit->pit_state.lock); 593 mutex_lock(&kvm->arch.vpit->pit_state.lock);
588 timer = &kvm->arch.vpit->pit_state.pit_timer.timer; 594 timer = &kvm->arch.vpit->pit_state.pit_timer.timer;
589 hrtimer_cancel(timer); 595 hrtimer_cancel(timer);
596 kvm_free_irq_source_id(kvm, kvm->arch.vpit->irq_source_id);
590 mutex_unlock(&kvm->arch.vpit->pit_state.lock); 597 mutex_unlock(&kvm->arch.vpit->pit_state.lock);
591 kfree(kvm->arch.vpit); 598 kfree(kvm->arch.vpit);
592 } 599 }
@@ -595,8 +602,8 @@ void kvm_free_pit(struct kvm *kvm)
595static void __inject_pit_timer_intr(struct kvm *kvm) 602static void __inject_pit_timer_intr(struct kvm *kvm)
596{ 603{
597 mutex_lock(&kvm->lock); 604 mutex_lock(&kvm->lock);
598 kvm_set_irq(kvm, 0, 1); 605 kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 1);
599 kvm_set_irq(kvm, 0, 0); 606 kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 0);
600 mutex_unlock(&kvm->lock); 607 mutex_unlock(&kvm->lock);
601} 608}
602 609
diff --git a/arch/x86/kvm/i8254.h b/arch/x86/kvm/i8254.h
index e436d4983aa1..4178022b97aa 100644
--- a/arch/x86/kvm/i8254.h
+++ b/arch/x86/kvm/i8254.h
@@ -44,6 +44,7 @@ struct kvm_pit {
44 struct kvm_io_device speaker_dev; 44 struct kvm_io_device speaker_dev;
45 struct kvm *kvm; 45 struct kvm *kvm;
46 struct kvm_kpit_state pit_state; 46 struct kvm_kpit_state pit_state;
47 int irq_source_id;
47}; 48};
48 49
49#define KVM_PIT_BASE_ADDRESS 0x40 50#define KVM_PIT_BASE_ADDRESS 0x40
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 99c239c5c0ac..2a5e64881d9b 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2634,6 +2634,7 @@ static int kvm_pv_mmu_write(struct kvm_vcpu *vcpu,
2634static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu) 2634static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu)
2635{ 2635{
2636 kvm_x86_ops->tlb_flush(vcpu); 2636 kvm_x86_ops->tlb_flush(vcpu);
2637 set_bit(KVM_REQ_MMU_SYNC, &vcpu->requests);
2637 return 1; 2638 return 1;
2638} 2639}
2639 2640
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 4f0677d1eae8..f1f8ff2f1fa2 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1742,7 +1742,8 @@ long kvm_arch_vm_ioctl(struct file *filp,
1742 goto out; 1742 goto out;
1743 if (irqchip_in_kernel(kvm)) { 1743 if (irqchip_in_kernel(kvm)) {
1744 mutex_lock(&kvm->lock); 1744 mutex_lock(&kvm->lock);
1745 kvm_set_irq(kvm, irq_event.irq, irq_event.level); 1745 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
1746 irq_event.irq, irq_event.level);
1746 mutex_unlock(&kvm->lock); 1747 mutex_unlock(&kvm->lock);
1747 r = 0; 1748 r = 0;
1748 } 1749 }
@@ -4013,6 +4014,9 @@ struct kvm *kvm_arch_create_vm(void)
4013 INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); 4014 INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
4014 INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); 4015 INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
4015 4016
4017 /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
4018 set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
4019
4016 return kvm; 4020 return kvm;
4017} 4021}
4018 4022
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index b8e461d49412..f79a02f64d10 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -350,8 +350,10 @@ phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end,
350 * pagetable pages as RO. So assume someone who pre-setup 350 * pagetable pages as RO. So assume someone who pre-setup
351 * these mappings are more intelligent. 351 * these mappings are more intelligent.
352 */ 352 */
353 if (pte_val(*pte)) 353 if (pte_val(*pte)) {
354 pages++;
354 continue; 355 continue;
356 }
355 357
356 if (0) 358 if (0)
357 printk(" pte=%p addr=%lx pte=%016lx\n", 359 printk(" pte=%p addr=%lx pte=%016lx\n",
@@ -418,8 +420,10 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
418 * not differ with respect to page frame and 420 * not differ with respect to page frame and
419 * attributes. 421 * attributes.
420 */ 422 */
421 if (page_size_mask & (1 << PG_LEVEL_2M)) 423 if (page_size_mask & (1 << PG_LEVEL_2M)) {
424 pages++;
422 continue; 425 continue;
426 }
423 new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd)); 427 new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd));
424 } 428 }
425 429
@@ -499,8 +503,10 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
499 * not differ with respect to page frame and 503 * not differ with respect to page frame and
500 * attributes. 504 * attributes.
501 */ 505 */
502 if (page_size_mask & (1 << PG_LEVEL_1G)) 506 if (page_size_mask & (1 << PG_LEVEL_1G)) {
507 pages++;
503 continue; 508 continue;
509 }
504 prot = pte_pgprot(pte_clrhuge(*(pte_t *)pud)); 510 prot = pte_pgprot(pte_clrhuge(*(pte_t *)pud));
505 } 511 }
506 512
@@ -831,7 +837,7 @@ int arch_add_memory(int nid, u64 start, u64 size)
831 unsigned long nr_pages = size >> PAGE_SHIFT; 837 unsigned long nr_pages = size >> PAGE_SHIFT;
832 int ret; 838 int ret;
833 839
834 last_mapped_pfn = init_memory_mapping(start, start + size-1); 840 last_mapped_pfn = init_memory_mapping(start, start + size);
835 if (last_mapped_pfn > max_pfn_mapped) 841 if (last_mapped_pfn > max_pfn_mapped)
836 max_pfn_mapped = last_mapped_pfn; 842 max_pfn_mapped = last_mapped_pfn;
837 843
diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile
index 313947940a1a..6dcefba7836f 100644
--- a/arch/x86/xen/Makefile
+++ b/arch/x86/xen/Makefile
@@ -1,4 +1,4 @@
1ifdef CONFIG_FTRACE 1ifdef CONFIG_FUNCTION_TRACER
2# Do not profile debug and lowlevel utilities 2# Do not profile debug and lowlevel utilities
3CFLAGS_REMOVE_spinlock.o = -pg 3CFLAGS_REMOVE_spinlock.o = -pg
4CFLAGS_REMOVE_time.o = -pg 4CFLAGS_REMOVE_time.o = -pg
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index d4d52f5a1cf7..aba77b2b7d18 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -246,11 +246,21 @@ xmaddr_t arbitrary_virt_to_machine(void *vaddr)
246{ 246{
247 unsigned long address = (unsigned long)vaddr; 247 unsigned long address = (unsigned long)vaddr;
248 unsigned int level; 248 unsigned int level;
249 pte_t *pte = lookup_address(address, &level); 249 pte_t *pte;
250 unsigned offset = address & ~PAGE_MASK; 250 unsigned offset;
251 251
252 BUG_ON(pte == NULL); 252 /*
253 * if the PFN is in the linear mapped vaddr range, we can just use
254 * the (quick) virt_to_machine() p2m lookup
255 */
256 if (virt_addr_valid(vaddr))
257 return virt_to_machine(vaddr);
253 258
259 /* otherwise we have to do a (slower) full page-table walk */
260
261 pte = lookup_address(address, &level);
262 BUG_ON(pte == NULL);
263 offset = address & ~PAGE_MASK;
254 return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset); 264 return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset);
255} 265}
256 266
@@ -410,7 +420,7 @@ void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
410 420
411 xen_mc_batch(); 421 xen_mc_batch();
412 422
413 u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD; 423 u.ptr = arbitrary_virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
414 u.val = pte_val_ma(pte); 424 u.val = pte_val_ma(pte);
415 xen_extend_mmu_update(&u); 425 xen_extend_mmu_update(&u);
416 426