aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-07-28 15:14:43 -0400
committerIngo Molnar <mingo@elte.hu>2008-07-28 15:14:43 -0400
commit414f746d232d41ed6ae8632c4495ae795373c44b (patch)
tree167f9bc8f139c6e82e6732b38c7a938b8a9d31cd /arch/x86
parent5a7a201c51c324876d00a54e7208af6af12d1ca4 (diff)
parentc9272c4f9fbe2087beb3392f526dc5b19efaa56b (diff)
Merge branch 'linus' into cpus4096
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/Kconfig35
-rw-r--r--arch/x86/Makefile5
-rw-r--r--arch/x86/ia32/ia32_aout.c6
-rw-r--r--arch/x86/kernel/amd_iommu.c4
-rw-r--r--arch/x86/kernel/amd_iommu_init.c4
-rw-r--r--arch/x86/kernel/head_32.S8
-rw-r--r--arch/x86/kernel/machine_kexec_32.c39
-rw-r--r--arch/x86/kernel/machine_kexec_64.c2
-rw-r--r--arch/x86/kernel/pci-calgary_64.c73
-rw-r--r--arch/x86/kernel/pci-dma.c27
-rw-r--r--arch/x86/kernel/pci-gart_64.c3
-rw-r--r--arch/x86/kernel/pci-nommu.c14
-rw-r--r--arch/x86/kernel/pci-swiotlb_64.c2
-rw-r--r--arch/x86/kernel/relocate_kernel_32.S174
-rw-r--r--arch/x86/kvm/mmu.c7
-rw-r--r--arch/x86/kvm/svm.c10
-rw-r--r--arch/x86/kvm/vmx.c22
-rw-r--r--arch/x86/kvm/x86.c109
-rw-r--r--arch/x86/mm/Makefile1
-rw-r--r--arch/x86/mm/gup.c295
-rw-r--r--arch/x86/mm/init_64.c37
-rw-r--r--arch/x86/mm/pgtable_32.c47
22 files changed, 625 insertions, 299 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index e3cba0b45600..b6fa2877b173 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -22,8 +22,9 @@ config X86
22 select HAVE_IDE 22 select HAVE_IDE
23 select HAVE_OPROFILE 23 select HAVE_OPROFILE
24 select HAVE_IOREMAP_PROT 24 select HAVE_IOREMAP_PROT
25 select HAVE_GET_USER_PAGES_FAST
25 select HAVE_KPROBES 26 select HAVE_KPROBES
26 select ARCH_WANT_OPTIONAL_GPIOLIB if !X86_RDC321X 27 select ARCH_WANT_OPTIONAL_GPIOLIB
27 select HAVE_KRETPROBES 28 select HAVE_KRETPROBES
28 select HAVE_DYNAMIC_FTRACE 29 select HAVE_DYNAMIC_FTRACE
29 select HAVE_FTRACE 30 select HAVE_FTRACE
@@ -332,20 +333,6 @@ config X86_BIGSMP
332 333
333endif 334endif
334 335
335config X86_RDC321X
336 bool "RDC R-321x SoC"
337 depends on X86_32
338 select M486
339 select X86_REBOOTFIXUPS
340 select GENERIC_GPIO
341 select LEDS_CLASS
342 select LEDS_GPIO
343 select NEW_LEDS
344 help
345 This option is needed for RDC R-321x system-on-chip, also known
346 as R-8610-(G).
347 If you don't have one of these chips, you should say N here.
348
349config X86_VSMP 336config X86_VSMP
350 bool "Support for ScaleMP vSMP" 337 bool "Support for ScaleMP vSMP"
351 select PARAVIRT 338 select PARAVIRT
@@ -369,6 +356,16 @@ config X86_VISWS
369 A kernel compiled for the Visual Workstation will run on general 356 A kernel compiled for the Visual Workstation will run on general
370 PCs as well. See <file:Documentation/sgi-visws.txt> for details. 357 PCs as well. See <file:Documentation/sgi-visws.txt> for details.
371 358
359config X86_RDC321X
360 bool "RDC R-321x SoC"
361 depends on X86_32
362 select M486
363 select X86_REBOOTFIXUPS
364 help
365 This option is needed for RDC R-321x system-on-chip, also known
366 as R-8610-(G).
367 If you don't have one of these chips, you should say N here.
368
372config SCHED_NO_NO_OMIT_FRAME_POINTER 369config SCHED_NO_NO_OMIT_FRAME_POINTER
373 def_bool y 370 def_bool y
374 prompt "Single-depth WCHAN output" 371 prompt "Single-depth WCHAN output"
@@ -1279,6 +1276,14 @@ config CRASH_DUMP
1279 (CONFIG_RELOCATABLE=y). 1276 (CONFIG_RELOCATABLE=y).
1280 For more details see Documentation/kdump/kdump.txt 1277 For more details see Documentation/kdump/kdump.txt
1281 1278
1279config KEXEC_JUMP
1280 bool "kexec jump (EXPERIMENTAL)"
1281 depends on EXPERIMENTAL
1282 depends on KEXEC && HIBERNATION && X86_32
1283 help
1284 Jump between original kernel and kexeced kernel and invoke
1285 code in physical address mode via KEXEC
1286
1282config PHYSICAL_START 1287config PHYSICAL_START
1283 hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP) 1288 hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP)
1284 default "0x1000000" if X86_NUMAQ 1289 default "0x1000000" if X86_NUMAQ
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 919ce21ea654..f5631da585b6 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -118,11 +118,6 @@ mflags-$(CONFIG_X86_GENERICARCH):= -Iinclude/asm-x86/mach-generic
118fcore-$(CONFIG_X86_GENERICARCH) += arch/x86/mach-generic/ 118fcore-$(CONFIG_X86_GENERICARCH) += arch/x86/mach-generic/
119mcore-$(CONFIG_X86_GENERICARCH) := arch/x86/mach-default/ 119mcore-$(CONFIG_X86_GENERICARCH) := arch/x86/mach-default/
120 120
121# RDC R-321x subarch support
122mflags-$(CONFIG_X86_RDC321X) := -Iinclude/asm-x86/mach-rdc321x
123mcore-$(CONFIG_X86_RDC321X) := arch/x86/mach-default/
124core-$(CONFIG_X86_RDC321X) += arch/x86/mach-rdc321x/
125
126# default subarch .h files 121# default subarch .h files
127mflags-y += -Iinclude/asm-x86/mach-default 122mflags-y += -Iinclude/asm-x86/mach-default
128 123
diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
index 58cccb6483b0..a0e1dbe67dc1 100644
--- a/arch/x86/ia32/ia32_aout.c
+++ b/arch/x86/ia32/ia32_aout.c
@@ -441,12 +441,6 @@ beyond_if:
441 regs->r8 = regs->r9 = regs->r10 = regs->r11 = 441 regs->r8 = regs->r9 = regs->r10 = regs->r11 =
442 regs->r12 = regs->r13 = regs->r14 = regs->r15 = 0; 442 regs->r12 = regs->r13 = regs->r14 = regs->r15 = 0;
443 set_fs(USER_DS); 443 set_fs(USER_DS);
444 if (unlikely(current->ptrace & PT_PTRACED)) {
445 if (current->ptrace & PT_TRACE_EXEC)
446 ptrace_notify((PTRACE_EVENT_EXEC << 8) | SIGTRAP);
447 else
448 send_sig(SIGTRAP, current, 0);
449 }
450 return 0; 444 return 0;
451} 445}
452 446
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index c25210e6ac88..74697408576f 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -667,7 +667,7 @@ static int get_device_resources(struct device *dev,
667 _bdf = calc_devid(pcidev->bus->number, pcidev->devfn); 667 _bdf = calc_devid(pcidev->bus->number, pcidev->devfn);
668 668
669 /* device not translated by any IOMMU in the system? */ 669 /* device not translated by any IOMMU in the system? */
670 if (_bdf >= amd_iommu_last_bdf) { 670 if (_bdf > amd_iommu_last_bdf) {
671 *iommu = NULL; 671 *iommu = NULL;
672 *domain = NULL; 672 *domain = NULL;
673 *bdf = 0xffff; 673 *bdf = 0xffff;
@@ -1085,7 +1085,7 @@ void prealloc_protection_domains(void)
1085 1085
1086 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { 1086 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
1087 devid = (dev->bus->number << 8) | dev->devfn; 1087 devid = (dev->bus->number << 8) | dev->devfn;
1088 if (devid >= amd_iommu_last_bdf) 1088 if (devid > amd_iommu_last_bdf)
1089 continue; 1089 continue;
1090 devid = amd_iommu_alias_table[devid]; 1090 devid = amd_iommu_alias_table[devid];
1091 if (domain_for_device(devid)) 1091 if (domain_for_device(devid))
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
index c9d8ff2eb130..d9a9da597e79 100644
--- a/arch/x86/kernel/amd_iommu_init.c
+++ b/arch/x86/kernel/amd_iommu_init.c
@@ -732,7 +732,7 @@ static int __init init_exclusion_range(struct ivmd_header *m)
732 set_device_exclusion_range(m->devid, m); 732 set_device_exclusion_range(m->devid, m);
733 break; 733 break;
734 case ACPI_IVMD_TYPE_ALL: 734 case ACPI_IVMD_TYPE_ALL:
735 for (i = 0; i < amd_iommu_last_bdf; ++i) 735 for (i = 0; i <= amd_iommu_last_bdf; ++i)
736 set_device_exclusion_range(i, m); 736 set_device_exclusion_range(i, m);
737 break; 737 break;
738 case ACPI_IVMD_TYPE_RANGE: 738 case ACPI_IVMD_TYPE_RANGE:
@@ -934,7 +934,7 @@ int __init amd_iommu_init(void)
934 /* 934 /*
935 * let all alias entries point to itself 935 * let all alias entries point to itself
936 */ 936 */
937 for (i = 0; i < amd_iommu_last_bdf; ++i) 937 for (i = 0; i <= amd_iommu_last_bdf; ++i)
938 amd_iommu_alias_table[i] = i; 938 amd_iommu_alias_table[i] = i;
939 939
940 /* 940 /*
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index f67e93441caf..a7010c3a377a 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -456,9 +456,6 @@ is386: movl $2,%ecx # set MP
4561: 4561:
457#endif /* CONFIG_SMP */ 457#endif /* CONFIG_SMP */
458 jmp *(initial_code) 458 jmp *(initial_code)
459.align 4
460ENTRY(initial_code)
461 .long i386_start_kernel
462 459
463/* 460/*
464 * We depend on ET to be correct. This checks for 287/387. 461 * We depend on ET to be correct. This checks for 287/387.
@@ -601,6 +598,11 @@ ignore_int:
601#endif 598#endif
602 iret 599 iret
603 600
601.section .cpuinit.data,"wa"
602.align 4
603ENTRY(initial_code)
604 .long i386_start_kernel
605
604.section .text 606.section .text
605/* 607/*
606 * Real beginning of normal "text" segment 608 * Real beginning of normal "text" segment
diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
index 8864230d55af..9fe478d98406 100644
--- a/arch/x86/kernel/machine_kexec_32.c
+++ b/arch/x86/kernel/machine_kexec_32.c
@@ -22,6 +22,7 @@
22#include <asm/cpufeature.h> 22#include <asm/cpufeature.h>
23#include <asm/desc.h> 23#include <asm/desc.h>
24#include <asm/system.h> 24#include <asm/system.h>
25#include <asm/cacheflush.h>
25 26
26#define PAGE_ALIGNED __attribute__ ((__aligned__(PAGE_SIZE))) 27#define PAGE_ALIGNED __attribute__ ((__aligned__(PAGE_SIZE)))
27static u32 kexec_pgd[1024] PAGE_ALIGNED; 28static u32 kexec_pgd[1024] PAGE_ALIGNED;
@@ -85,10 +86,12 @@ static void load_segments(void)
85 * reboot code buffer to allow us to avoid allocations 86 * reboot code buffer to allow us to avoid allocations
86 * later. 87 * later.
87 * 88 *
88 * Currently nothing. 89 * Make control page executable.
89 */ 90 */
90int machine_kexec_prepare(struct kimage *image) 91int machine_kexec_prepare(struct kimage *image)
91{ 92{
93 if (nx_enabled)
94 set_pages_x(image->control_code_page, 1);
92 return 0; 95 return 0;
93} 96}
94 97
@@ -98,27 +101,48 @@ int machine_kexec_prepare(struct kimage *image)
98 */ 101 */
99void machine_kexec_cleanup(struct kimage *image) 102void machine_kexec_cleanup(struct kimage *image)
100{ 103{
104 if (nx_enabled)
105 set_pages_nx(image->control_code_page, 1);
101} 106}
102 107
103/* 108/*
104 * Do not allocate memory (or fail in any way) in machine_kexec(). 109 * Do not allocate memory (or fail in any way) in machine_kexec().
105 * We are past the point of no return, committed to rebooting now. 110 * We are past the point of no return, committed to rebooting now.
106 */ 111 */
107NORET_TYPE void machine_kexec(struct kimage *image) 112void machine_kexec(struct kimage *image)
108{ 113{
109 unsigned long page_list[PAGES_NR]; 114 unsigned long page_list[PAGES_NR];
110 void *control_page; 115 void *control_page;
116 asmlinkage unsigned long
117 (*relocate_kernel_ptr)(unsigned long indirection_page,
118 unsigned long control_page,
119 unsigned long start_address,
120 unsigned int has_pae,
121 unsigned int preserve_context);
111 122
112 tracer_disable(); 123 tracer_disable();
113 124
114 /* Interrupts aren't acceptable while we reboot */ 125 /* Interrupts aren't acceptable while we reboot */
115 local_irq_disable(); 126 local_irq_disable();
116 127
128 if (image->preserve_context) {
129#ifdef CONFIG_X86_IO_APIC
130 /* We need to put APICs in legacy mode so that we can
131 * get timer interrupts in second kernel. kexec/kdump
132 * paths already have calls to disable_IO_APIC() in
133 * one form or other. kexec jump path also need
134 * one.
135 */
136 disable_IO_APIC();
137#endif
138 }
139
117 control_page = page_address(image->control_code_page); 140 control_page = page_address(image->control_code_page);
118 memcpy(control_page, relocate_kernel, PAGE_SIZE); 141 memcpy(control_page, relocate_kernel, PAGE_SIZE/2);
119 142
143 relocate_kernel_ptr = control_page;
120 page_list[PA_CONTROL_PAGE] = __pa(control_page); 144 page_list[PA_CONTROL_PAGE] = __pa(control_page);
121 page_list[VA_CONTROL_PAGE] = (unsigned long)relocate_kernel; 145 page_list[VA_CONTROL_PAGE] = (unsigned long)control_page;
122 page_list[PA_PGD] = __pa(kexec_pgd); 146 page_list[PA_PGD] = __pa(kexec_pgd);
123 page_list[VA_PGD] = (unsigned long)kexec_pgd; 147 page_list[VA_PGD] = (unsigned long)kexec_pgd;
124#ifdef CONFIG_X86_PAE 148#ifdef CONFIG_X86_PAE
@@ -131,6 +155,7 @@ NORET_TYPE void machine_kexec(struct kimage *image)
131 page_list[VA_PTE_0] = (unsigned long)kexec_pte0; 155 page_list[VA_PTE_0] = (unsigned long)kexec_pte0;
132 page_list[PA_PTE_1] = __pa(kexec_pte1); 156 page_list[PA_PTE_1] = __pa(kexec_pte1);
133 page_list[VA_PTE_1] = (unsigned long)kexec_pte1; 157 page_list[VA_PTE_1] = (unsigned long)kexec_pte1;
158 page_list[PA_SWAP_PAGE] = (page_to_pfn(image->swap_page) << PAGE_SHIFT);
134 159
135 /* The segment registers are funny things, they have both a 160 /* The segment registers are funny things, they have both a
136 * visible and an invisible part. Whenever the visible part is 161 * visible and an invisible part. Whenever the visible part is
@@ -149,8 +174,10 @@ NORET_TYPE void machine_kexec(struct kimage *image)
149 set_idt(phys_to_virt(0),0); 174 set_idt(phys_to_virt(0),0);
150 175
151 /* now call it */ 176 /* now call it */
152 relocate_kernel((unsigned long)image->head, (unsigned long)page_list, 177 image->start = relocate_kernel_ptr((unsigned long)image->head,
153 image->start, cpu_has_pae); 178 (unsigned long)page_list,
179 image->start, cpu_has_pae,
180 image->preserve_context);
154} 181}
155 182
156void arch_crash_save_vmcoreinfo(void) 183void arch_crash_save_vmcoreinfo(void)
diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c
index 9dd9262693a3..c43caa3a91f3 100644
--- a/arch/x86/kernel/machine_kexec_64.c
+++ b/arch/x86/kernel/machine_kexec_64.c
@@ -181,7 +181,7 @@ void machine_kexec_cleanup(struct kimage *image)
181 * Do not allocate memory (or fail in any way) in machine_kexec(). 181 * Do not allocate memory (or fail in any way) in machine_kexec().
182 * We are past the point of no return, committed to rebooting now. 182 * We are past the point of no return, committed to rebooting now.
183 */ 183 */
184NORET_TYPE void machine_kexec(struct kimage *image) 184void machine_kexec(struct kimage *image)
185{ 185{
186 unsigned long page_list[PAGES_NR]; 186 unsigned long page_list[PAGES_NR];
187 void *control_page; 187 void *control_page;
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
index 19e7fc7c2c4f..b67a4b1d4eae 100644
--- a/arch/x86/kernel/pci-calgary_64.c
+++ b/arch/x86/kernel/pci-calgary_64.c
@@ -37,6 +37,7 @@
37#include <linux/delay.h> 37#include <linux/delay.h>
38#include <linux/scatterlist.h> 38#include <linux/scatterlist.h>
39#include <linux/iommu-helper.h> 39#include <linux/iommu-helper.h>
40
40#include <asm/iommu.h> 41#include <asm/iommu.h>
41#include <asm/calgary.h> 42#include <asm/calgary.h>
42#include <asm/tce.h> 43#include <asm/tce.h>
@@ -413,22 +414,6 @@ static void calgary_unmap_sg(struct device *dev,
413 } 414 }
414} 415}
415 416
416static int calgary_nontranslate_map_sg(struct device* dev,
417 struct scatterlist *sg, int nelems, int direction)
418{
419 struct scatterlist *s;
420 int i;
421
422 for_each_sg(sg, s, nelems, i) {
423 struct page *p = sg_page(s);
424
425 BUG_ON(!p);
426 s->dma_address = virt_to_bus(sg_virt(s));
427 s->dma_length = s->length;
428 }
429 return nelems;
430}
431
432static int calgary_map_sg(struct device *dev, struct scatterlist *sg, 417static int calgary_map_sg(struct device *dev, struct scatterlist *sg,
433 int nelems, int direction) 418 int nelems, int direction)
434{ 419{
@@ -439,9 +424,6 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg,
439 unsigned long entry; 424 unsigned long entry;
440 int i; 425 int i;
441 426
442 if (!translation_enabled(tbl))
443 return calgary_nontranslate_map_sg(dev, sg, nelems, direction);
444
445 for_each_sg(sg, s, nelems, i) { 427 for_each_sg(sg, s, nelems, i) {
446 BUG_ON(!sg_page(s)); 428 BUG_ON(!sg_page(s));
447 429
@@ -477,7 +459,6 @@ error:
477static dma_addr_t calgary_map_single(struct device *dev, phys_addr_t paddr, 459static dma_addr_t calgary_map_single(struct device *dev, phys_addr_t paddr,
478 size_t size, int direction) 460 size_t size, int direction)
479{ 461{
480 dma_addr_t dma_handle = bad_dma_address;
481 void *vaddr = phys_to_virt(paddr); 462 void *vaddr = phys_to_virt(paddr);
482 unsigned long uaddr; 463 unsigned long uaddr;
483 unsigned int npages; 464 unsigned int npages;
@@ -486,12 +467,7 @@ static dma_addr_t calgary_map_single(struct device *dev, phys_addr_t paddr,
486 uaddr = (unsigned long)vaddr; 467 uaddr = (unsigned long)vaddr;
487 npages = num_dma_pages(uaddr, size); 468 npages = num_dma_pages(uaddr, size);
488 469
489 if (translation_enabled(tbl)) 470 return iommu_alloc(dev, tbl, vaddr, npages, direction);
490 dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction);
491 else
492 dma_handle = virt_to_bus(vaddr);
493
494 return dma_handle;
495} 471}
496 472
497static void calgary_unmap_single(struct device *dev, dma_addr_t dma_handle, 473static void calgary_unmap_single(struct device *dev, dma_addr_t dma_handle,
@@ -500,9 +476,6 @@ static void calgary_unmap_single(struct device *dev, dma_addr_t dma_handle,
500 struct iommu_table *tbl = find_iommu_table(dev); 476 struct iommu_table *tbl = find_iommu_table(dev);
501 unsigned int npages; 477 unsigned int npages;
502 478
503 if (!translation_enabled(tbl))
504 return;
505
506 npages = num_dma_pages(dma_handle, size); 479 npages = num_dma_pages(dma_handle, size);
507 iommu_free(tbl, dma_handle, npages); 480 iommu_free(tbl, dma_handle, npages);
508} 481}
@@ -525,18 +498,12 @@ static void* calgary_alloc_coherent(struct device *dev, size_t size,
525 goto error; 498 goto error;
526 memset(ret, 0, size); 499 memset(ret, 0, size);
527 500
528 if (translation_enabled(tbl)) { 501 /* set up tces to cover the allocated range */
529 /* set up tces to cover the allocated range */ 502 mapping = iommu_alloc(dev, tbl, ret, npages, DMA_BIDIRECTIONAL);
530 mapping = iommu_alloc(dev, tbl, ret, npages, DMA_BIDIRECTIONAL); 503 if (mapping == bad_dma_address)
531 if (mapping == bad_dma_address) 504 goto free;
532 goto free; 505 *dma_handle = mapping;
533
534 *dma_handle = mapping;
535 } else /* non translated slot */
536 *dma_handle = virt_to_bus(ret);
537
538 return ret; 506 return ret;
539
540free: 507free:
541 free_pages((unsigned long)ret, get_order(size)); 508 free_pages((unsigned long)ret, get_order(size));
542 ret = NULL; 509 ret = NULL;
@@ -544,7 +511,7 @@ error:
544 return ret; 511 return ret;
545} 512}
546 513
547static const struct dma_mapping_ops calgary_dma_ops = { 514static struct dma_mapping_ops calgary_dma_ops = {
548 .alloc_coherent = calgary_alloc_coherent, 515 .alloc_coherent = calgary_alloc_coherent,
549 .map_single = calgary_map_single, 516 .map_single = calgary_map_single,
550 .unmap_single = calgary_unmap_single, 517 .unmap_single = calgary_unmap_single,
@@ -1241,6 +1208,16 @@ static int __init calgary_init(void)
1241 goto error; 1208 goto error;
1242 } while (1); 1209 } while (1);
1243 1210
1211 dev = NULL;
1212 for_each_pci_dev(dev) {
1213 struct iommu_table *tbl;
1214
1215 tbl = find_iommu_table(&dev->dev);
1216
1217 if (translation_enabled(tbl))
1218 dev->dev.archdata.dma_ops = &calgary_dma_ops;
1219 }
1220
1244 return ret; 1221 return ret;
1245 1222
1246error: 1223error:
@@ -1262,6 +1239,7 @@ error:
1262 calgary_disable_translation(dev); 1239 calgary_disable_translation(dev);
1263 calgary_free_bus(dev); 1240 calgary_free_bus(dev);
1264 pci_dev_put(dev); /* Undo calgary_init_one()'s pci_dev_get() */ 1241 pci_dev_put(dev); /* Undo calgary_init_one()'s pci_dev_get() */
1242 dev->dev.archdata.dma_ops = NULL;
1265 } while (1); 1243 } while (1);
1266 1244
1267 return ret; 1245 return ret;
@@ -1503,6 +1481,10 @@ void __init detect_calgary(void)
1503 printk(KERN_INFO "PCI-DMA: Calgary TCE table spec is %d, " 1481 printk(KERN_INFO "PCI-DMA: Calgary TCE table spec is %d, "
1504 "CONFIG_IOMMU_DEBUG is %s.\n", specified_table_size, 1482 "CONFIG_IOMMU_DEBUG is %s.\n", specified_table_size,
1505 debugging ? "enabled" : "disabled"); 1483 debugging ? "enabled" : "disabled");
1484
1485 /* swiotlb for devices that aren't behind the Calgary. */
1486 if (max_pfn > MAX_DMA32_PFN)
1487 swiotlb = 1;
1506 } 1488 }
1507 return; 1489 return;
1508 1490
@@ -1519,7 +1501,7 @@ int __init calgary_iommu_init(void)
1519{ 1501{
1520 int ret; 1502 int ret;
1521 1503
1522 if (no_iommu || swiotlb) 1504 if (no_iommu || (swiotlb && !calgary_detected))
1523 return -ENODEV; 1505 return -ENODEV;
1524 1506
1525 if (!calgary_detected) 1507 if (!calgary_detected)
@@ -1532,15 +1514,14 @@ int __init calgary_iommu_init(void)
1532 if (ret) { 1514 if (ret) {
1533 printk(KERN_ERR "PCI-DMA: Calgary init failed %d, " 1515 printk(KERN_ERR "PCI-DMA: Calgary init failed %d, "
1534 "falling back to no_iommu\n", ret); 1516 "falling back to no_iommu\n", ret);
1535 if (max_pfn > MAX_DMA32_PFN)
1536 printk(KERN_ERR "WARNING more than 4GB of memory, "
1537 "32bit PCI may malfunction.\n");
1538 return ret; 1517 return ret;
1539 } 1518 }
1540 1519
1541 force_iommu = 1; 1520 force_iommu = 1;
1542 bad_dma_address = 0x0; 1521 bad_dma_address = 0x0;
1543 dma_ops = &calgary_dma_ops; 1522 /* dma_ops is set to swiotlb or nommu */
1523 if (!dma_ops)
1524 dma_ops = &nommu_dma_ops;
1544 1525
1545 return 0; 1526 return 0;
1546} 1527}
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index cbecb05551bb..37544123896d 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -11,7 +11,7 @@
11 11
12static int forbid_dac __read_mostly; 12static int forbid_dac __read_mostly;
13 13
14const struct dma_mapping_ops *dma_ops; 14struct dma_mapping_ops *dma_ops;
15EXPORT_SYMBOL(dma_ops); 15EXPORT_SYMBOL(dma_ops);
16 16
17static int iommu_sac_force __read_mostly; 17static int iommu_sac_force __read_mostly;
@@ -312,6 +312,8 @@ static int dma_release_coherent(struct device *dev, int order, void *vaddr)
312 312
313int dma_supported(struct device *dev, u64 mask) 313int dma_supported(struct device *dev, u64 mask)
314{ 314{
315 struct dma_mapping_ops *ops = get_dma_ops(dev);
316
315#ifdef CONFIG_PCI 317#ifdef CONFIG_PCI
316 if (mask > 0xffffffff && forbid_dac > 0) { 318 if (mask > 0xffffffff && forbid_dac > 0) {
317 dev_info(dev, "PCI: Disallowing DAC for device\n"); 319 dev_info(dev, "PCI: Disallowing DAC for device\n");
@@ -319,8 +321,8 @@ int dma_supported(struct device *dev, u64 mask)
319 } 321 }
320#endif 322#endif
321 323
322 if (dma_ops->dma_supported) 324 if (ops->dma_supported)
323 return dma_ops->dma_supported(dev, mask); 325 return ops->dma_supported(dev, mask);
324 326
325 /* Copied from i386. Doesn't make much sense, because it will 327 /* Copied from i386. Doesn't make much sense, because it will
326 only work for pci_alloc_coherent. 328 only work for pci_alloc_coherent.
@@ -367,6 +369,7 @@ void *
367dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, 369dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
368 gfp_t gfp) 370 gfp_t gfp)
369{ 371{
372 struct dma_mapping_ops *ops = get_dma_ops(dev);
370 void *memory = NULL; 373 void *memory = NULL;
371 struct page *page; 374 struct page *page;
372 unsigned long dma_mask = 0; 375 unsigned long dma_mask = 0;
@@ -435,8 +438,8 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
435 /* Let low level make its own zone decisions */ 438 /* Let low level make its own zone decisions */
436 gfp &= ~(GFP_DMA32|GFP_DMA); 439 gfp &= ~(GFP_DMA32|GFP_DMA);
437 440
438 if (dma_ops->alloc_coherent) 441 if (ops->alloc_coherent)
439 return dma_ops->alloc_coherent(dev, size, 442 return ops->alloc_coherent(dev, size,
440 dma_handle, gfp); 443 dma_handle, gfp);
441 return NULL; 444 return NULL;
442 } 445 }
@@ -448,14 +451,14 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
448 } 451 }
449 } 452 }
450 453
451 if (dma_ops->alloc_coherent) { 454 if (ops->alloc_coherent) {
452 free_pages((unsigned long)memory, get_order(size)); 455 free_pages((unsigned long)memory, get_order(size));
453 gfp &= ~(GFP_DMA|GFP_DMA32); 456 gfp &= ~(GFP_DMA|GFP_DMA32);
454 return dma_ops->alloc_coherent(dev, size, dma_handle, gfp); 457 return ops->alloc_coherent(dev, size, dma_handle, gfp);
455 } 458 }
456 459
457 if (dma_ops->map_simple) { 460 if (ops->map_simple) {
458 *dma_handle = dma_ops->map_simple(dev, virt_to_phys(memory), 461 *dma_handle = ops->map_simple(dev, virt_to_phys(memory),
459 size, 462 size,
460 PCI_DMA_BIDIRECTIONAL); 463 PCI_DMA_BIDIRECTIONAL);
461 if (*dma_handle != bad_dma_address) 464 if (*dma_handle != bad_dma_address)
@@ -477,12 +480,14 @@ EXPORT_SYMBOL(dma_alloc_coherent);
477void dma_free_coherent(struct device *dev, size_t size, 480void dma_free_coherent(struct device *dev, size_t size,
478 void *vaddr, dma_addr_t bus) 481 void *vaddr, dma_addr_t bus)
479{ 482{
483 struct dma_mapping_ops *ops = get_dma_ops(dev);
484
480 int order = get_order(size); 485 int order = get_order(size);
481 WARN_ON(irqs_disabled()); /* for portability */ 486 WARN_ON(irqs_disabled()); /* for portability */
482 if (dma_release_coherent(dev, order, vaddr)) 487 if (dma_release_coherent(dev, order, vaddr))
483 return; 488 return;
484 if (dma_ops->unmap_single) 489 if (ops->unmap_single)
485 dma_ops->unmap_single(dev, bus, size, 0); 490 ops->unmap_single(dev, bus, size, 0);
486 free_pages((unsigned long)vaddr, order); 491 free_pages((unsigned long)vaddr, order);
487} 492}
488EXPORT_SYMBOL(dma_free_coherent); 493EXPORT_SYMBOL(dma_free_coherent);
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
index df5f142657d2..744126e64950 100644
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@ -692,8 +692,7 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
692 692
693extern int agp_amd64_init(void); 693extern int agp_amd64_init(void);
694 694
695static const struct dma_mapping_ops gart_dma_ops = { 695static struct dma_mapping_ops gart_dma_ops = {
696 .mapping_error = NULL,
697 .map_single = gart_map_single, 696 .map_single = gart_map_single,
698 .map_simple = gart_map_simple, 697 .map_simple = gart_map_simple,
699 .unmap_single = gart_unmap_single, 698 .unmap_single = gart_unmap_single,
diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c
index 792b9179eff3..3f91f71cdc3e 100644
--- a/arch/x86/kernel/pci-nommu.c
+++ b/arch/x86/kernel/pci-nommu.c
@@ -72,21 +72,9 @@ static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg,
72 return nents; 72 return nents;
73} 73}
74 74
75/* Make sure we keep the same behaviour */ 75struct dma_mapping_ops nommu_dma_ops = {
76static int nommu_mapping_error(dma_addr_t dma_addr)
77{
78#ifdef CONFIG_X86_32
79 return 0;
80#else
81 return (dma_addr == bad_dma_address);
82#endif
83}
84
85
86const struct dma_mapping_ops nommu_dma_ops = {
87 .map_single = nommu_map_single, 76 .map_single = nommu_map_single,
88 .map_sg = nommu_map_sg, 77 .map_sg = nommu_map_sg,
89 .mapping_error = nommu_mapping_error,
90 .is_phys = 1, 78 .is_phys = 1,
91}; 79};
92 80
diff --git a/arch/x86/kernel/pci-swiotlb_64.c b/arch/x86/kernel/pci-swiotlb_64.c
index 20df839b9c20..c4ce0332759e 100644
--- a/arch/x86/kernel/pci-swiotlb_64.c
+++ b/arch/x86/kernel/pci-swiotlb_64.c
@@ -18,7 +18,7 @@ swiotlb_map_single_phys(struct device *hwdev, phys_addr_t paddr, size_t size,
18 return swiotlb_map_single(hwdev, phys_to_virt(paddr), size, direction); 18 return swiotlb_map_single(hwdev, phys_to_virt(paddr), size, direction);
19} 19}
20 20
21const struct dma_mapping_ops swiotlb_dma_ops = { 21struct dma_mapping_ops swiotlb_dma_ops = {
22 .mapping_error = swiotlb_dma_mapping_error, 22 .mapping_error = swiotlb_dma_mapping_error,
23 .alloc_coherent = swiotlb_alloc_coherent, 23 .alloc_coherent = swiotlb_alloc_coherent,
24 .free_coherent = swiotlb_free_coherent, 24 .free_coherent = swiotlb_free_coherent,
diff --git a/arch/x86/kernel/relocate_kernel_32.S b/arch/x86/kernel/relocate_kernel_32.S
index c30fe25d470d..703310a99023 100644
--- a/arch/x86/kernel/relocate_kernel_32.S
+++ b/arch/x86/kernel/relocate_kernel_32.S
@@ -20,11 +20,44 @@
20#define PAGE_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) 20#define PAGE_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
21#define PAE_PGD_ATTR (_PAGE_PRESENT) 21#define PAE_PGD_ATTR (_PAGE_PRESENT)
22 22
23/* control_page + PAGE_SIZE/2 ~ control_page + PAGE_SIZE * 3/4 are
24 * used to save some data for jumping back
25 */
26#define DATA(offset) (PAGE_SIZE/2+(offset))
27
28/* Minimal CPU state */
29#define ESP DATA(0x0)
30#define CR0 DATA(0x4)
31#define CR3 DATA(0x8)
32#define CR4 DATA(0xc)
33
34/* other data */
35#define CP_VA_CONTROL_PAGE DATA(0x10)
36#define CP_PA_PGD DATA(0x14)
37#define CP_PA_SWAP_PAGE DATA(0x18)
38#define CP_PA_BACKUP_PAGES_MAP DATA(0x1c)
39
23 .text 40 .text
24 .align PAGE_SIZE 41 .align PAGE_SIZE
25 .globl relocate_kernel 42 .globl relocate_kernel
26relocate_kernel: 43relocate_kernel:
27 movl 8(%esp), %ebp /* list of pages */ 44 /* Save the CPU context, used for jumping back */
45
46 pushl %ebx
47 pushl %esi
48 pushl %edi
49 pushl %ebp
50 pushf
51
52 movl 20+8(%esp), %ebp /* list of pages */
53 movl PTR(VA_CONTROL_PAGE)(%ebp), %edi
54 movl %esp, ESP(%edi)
55 movl %cr0, %eax
56 movl %eax, CR0(%edi)
57 movl %cr3, %eax
58 movl %eax, CR3(%edi)
59 movl %cr4, %eax
60 movl %eax, CR4(%edi)
28 61
29#ifdef CONFIG_X86_PAE 62#ifdef CONFIG_X86_PAE
30 /* map the control page at its virtual address */ 63 /* map the control page at its virtual address */
@@ -138,15 +171,25 @@ relocate_kernel:
138 171
139relocate_new_kernel: 172relocate_new_kernel:
140 /* read the arguments and say goodbye to the stack */ 173 /* read the arguments and say goodbye to the stack */
141 movl 4(%esp), %ebx /* page_list */ 174 movl 20+4(%esp), %ebx /* page_list */
142 movl 8(%esp), %ebp /* list of pages */ 175 movl 20+8(%esp), %ebp /* list of pages */
143 movl 12(%esp), %edx /* start address */ 176 movl 20+12(%esp), %edx /* start address */
144 movl 16(%esp), %ecx /* cpu_has_pae */ 177 movl 20+16(%esp), %ecx /* cpu_has_pae */
178 movl 20+20(%esp), %esi /* preserve_context */
145 179
146 /* zero out flags, and disable interrupts */ 180 /* zero out flags, and disable interrupts */
147 pushl $0 181 pushl $0
148 popfl 182 popfl
149 183
184 /* save some information for jumping back */
185 movl PTR(VA_CONTROL_PAGE)(%ebp), %edi
186 movl %edi, CP_VA_CONTROL_PAGE(%edi)
187 movl PTR(PA_PGD)(%ebp), %eax
188 movl %eax, CP_PA_PGD(%edi)
189 movl PTR(PA_SWAP_PAGE)(%ebp), %eax
190 movl %eax, CP_PA_SWAP_PAGE(%edi)
191 movl %ebx, CP_PA_BACKUP_PAGES_MAP(%edi)
192
150 /* get physical address of control page now */ 193 /* get physical address of control page now */
151 /* this is impossible after page table switch */ 194 /* this is impossible after page table switch */
152 movl PTR(PA_CONTROL_PAGE)(%ebp), %edi 195 movl PTR(PA_CONTROL_PAGE)(%ebp), %edi
@@ -197,8 +240,90 @@ identity_mapped:
197 xorl %eax, %eax 240 xorl %eax, %eax
198 movl %eax, %cr3 241 movl %eax, %cr3
199 242
243 movl CP_PA_SWAP_PAGE(%edi), %eax
244 pushl %eax
245 pushl %ebx
246 call swap_pages
247 addl $8, %esp
248
249 /* To be certain of avoiding problems with self-modifying code
250 * I need to execute a serializing instruction here.
251 * So I flush the TLB, it's handy, and not processor dependent.
252 */
253 xorl %eax, %eax
254 movl %eax, %cr3
255
256 /* set all of the registers to known values */
257 /* leave %esp alone */
258
259 testl %esi, %esi
260 jnz 1f
261 xorl %edi, %edi
262 xorl %eax, %eax
263 xorl %ebx, %ebx
264 xorl %ecx, %ecx
265 xorl %edx, %edx
266 xorl %esi, %esi
267 xorl %ebp, %ebp
268 ret
2691:
270 popl %edx
271 movl CP_PA_SWAP_PAGE(%edi), %esp
272 addl $PAGE_SIZE, %esp
2732:
274 call *%edx
275
276 /* get the re-entry point of the peer system */
277 movl 0(%esp), %ebp
278 call 1f
2791:
280 popl %ebx
281 subl $(1b - relocate_kernel), %ebx
282 movl CP_VA_CONTROL_PAGE(%ebx), %edi
283 lea PAGE_SIZE(%ebx), %esp
284 movl CP_PA_SWAP_PAGE(%ebx), %eax
285 movl CP_PA_BACKUP_PAGES_MAP(%ebx), %edx
286 pushl %eax
287 pushl %edx
288 call swap_pages
289 addl $8, %esp
290 movl CP_PA_PGD(%ebx), %eax
291 movl %eax, %cr3
292 movl %cr0, %eax
293 orl $(1<<31), %eax
294 movl %eax, %cr0
295 lea PAGE_SIZE(%edi), %esp
296 movl %edi, %eax
297 addl $(virtual_mapped - relocate_kernel), %eax
298 pushl %eax
299 ret
300
301virtual_mapped:
302 movl CR4(%edi), %eax
303 movl %eax, %cr4
304 movl CR3(%edi), %eax
305 movl %eax, %cr3
306 movl CR0(%edi), %eax
307 movl %eax, %cr0
308 movl ESP(%edi), %esp
309 movl %ebp, %eax
310
311 popf
312 popl %ebp
313 popl %edi
314 popl %esi
315 popl %ebx
316 ret
317
200 /* Do the copies */ 318 /* Do the copies */
201 movl %ebx, %ecx 319swap_pages:
320 movl 8(%esp), %edx
321 movl 4(%esp), %ecx
322 pushl %ebp
323 pushl %ebx
324 pushl %edi
325 pushl %esi
326 movl %ecx, %ebx
202 jmp 1f 327 jmp 1f
203 328
2040: /* top, read another word from the indirection page */ 3290: /* top, read another word from the indirection page */
@@ -226,27 +351,28 @@ identity_mapped:
226 movl %ecx, %esi /* For every source page do a copy */ 351 movl %ecx, %esi /* For every source page do a copy */
227 andl $0xfffff000, %esi 352 andl $0xfffff000, %esi
228 353
354 movl %edi, %eax
355 movl %esi, %ebp
356
357 movl %edx, %edi
229 movl $1024, %ecx 358 movl $1024, %ecx
230 rep ; movsl 359 rep ; movsl
231 jmp 0b
232 360
2333: 361 movl %ebp, %edi
234 362 movl %eax, %esi
235 /* To be certain of avoiding problems with self-modifying code 363 movl $1024, %ecx
236 * I need to execute a serializing instruction here. 364 rep ; movsl
237 * So I flush the TLB, it's handy, and not processor dependent.
238 */
239 xorl %eax, %eax
240 movl %eax, %cr3
241 365
242 /* set all of the registers to known values */ 366 movl %eax, %edi
243 /* leave %esp alone */ 367 movl %edx, %esi
368 movl $1024, %ecx
369 rep ; movsl
244 370
245 xorl %eax, %eax 371 lea PAGE_SIZE(%ebp), %esi
246 xorl %ebx, %ebx 372 jmp 0b
247 xorl %ecx, %ecx 3733:
248 xorl %edx, %edx 374 popl %esi
249 xorl %esi, %esi 375 popl %edi
250 xorl %edi, %edi 376 popl %ebx
251 xorl %ebp, %ebp 377 popl %ebp
252 ret 378 ret
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index b0e4ddca6c18..2fa231923cf7 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1814,6 +1814,7 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
1814 spin_unlock(&vcpu->kvm->mmu_lock); 1814 spin_unlock(&vcpu->kvm->mmu_lock);
1815 return r; 1815 return r;
1816} 1816}
1817EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt);
1817 1818
1818void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) 1819void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
1819{ 1820{
@@ -1870,6 +1871,12 @@ void kvm_enable_tdp(void)
1870} 1871}
1871EXPORT_SYMBOL_GPL(kvm_enable_tdp); 1872EXPORT_SYMBOL_GPL(kvm_enable_tdp);
1872 1873
1874void kvm_disable_tdp(void)
1875{
1876 tdp_enabled = false;
1877}
1878EXPORT_SYMBOL_GPL(kvm_disable_tdp);
1879
1873static void free_mmu_pages(struct kvm_vcpu *vcpu) 1880static void free_mmu_pages(struct kvm_vcpu *vcpu)
1874{ 1881{
1875 struct kvm_mmu_page *sp; 1882 struct kvm_mmu_page *sp;
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index b756e876dce3..e2ee264740c7 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -453,7 +453,8 @@ static __init int svm_hardware_setup(void)
453 if (npt_enabled) { 453 if (npt_enabled) {
454 printk(KERN_INFO "kvm: Nested Paging enabled\n"); 454 printk(KERN_INFO "kvm: Nested Paging enabled\n");
455 kvm_enable_tdp(); 455 kvm_enable_tdp();
456 } 456 } else
457 kvm_disable_tdp();
457 458
458 return 0; 459 return 0;
459 460
@@ -1007,10 +1008,13 @@ static int pf_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1007 struct kvm *kvm = svm->vcpu.kvm; 1008 struct kvm *kvm = svm->vcpu.kvm;
1008 u64 fault_address; 1009 u64 fault_address;
1009 u32 error_code; 1010 u32 error_code;
1011 bool event_injection = false;
1010 1012
1011 if (!irqchip_in_kernel(kvm) && 1013 if (!irqchip_in_kernel(kvm) &&
1012 is_external_interrupt(exit_int_info)) 1014 is_external_interrupt(exit_int_info)) {
1015 event_injection = true;
1013 push_irq(&svm->vcpu, exit_int_info & SVM_EVTINJ_VEC_MASK); 1016 push_irq(&svm->vcpu, exit_int_info & SVM_EVTINJ_VEC_MASK);
1017 }
1014 1018
1015 fault_address = svm->vmcb->control.exit_info_2; 1019 fault_address = svm->vmcb->control.exit_info_2;
1016 error_code = svm->vmcb->control.exit_info_1; 1020 error_code = svm->vmcb->control.exit_info_1;
@@ -1024,6 +1028,8 @@ static int pf_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1024 (u32)fault_address, (u32)(fault_address >> 32), 1028 (u32)fault_address, (u32)(fault_address >> 32),
1025 handler); 1029 handler);
1026 1030
1031 if (event_injection)
1032 kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address);
1027 return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code); 1033 return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code);
1028} 1034}
1029 1035
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 0cac63701719..2a69773e3b26 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2298,6 +2298,8 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2298 cr2 = vmcs_readl(EXIT_QUALIFICATION); 2298 cr2 = vmcs_readl(EXIT_QUALIFICATION);
2299 KVMTRACE_3D(PAGE_FAULT, vcpu, error_code, (u32)cr2, 2299 KVMTRACE_3D(PAGE_FAULT, vcpu, error_code, (u32)cr2,
2300 (u32)((u64)cr2 >> 32), handler); 2300 (u32)((u64)cr2 >> 32), handler);
2301 if (vect_info & VECTORING_INFO_VALID_MASK)
2302 kvm_mmu_unprotect_page_virt(vcpu, cr2);
2301 return kvm_mmu_page_fault(vcpu, cr2, error_code); 2303 return kvm_mmu_page_fault(vcpu, cr2, error_code);
2302 } 2304 }
2303 2305
@@ -3116,15 +3118,6 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
3116 return ERR_PTR(-ENOMEM); 3118 return ERR_PTR(-ENOMEM);
3117 3119
3118 allocate_vpid(vmx); 3120 allocate_vpid(vmx);
3119 if (id == 0 && vm_need_ept()) {
3120 kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK |
3121 VMX_EPT_WRITABLE_MASK |
3122 VMX_EPT_DEFAULT_MT << VMX_EPT_MT_EPTE_SHIFT);
3123 kvm_mmu_set_mask_ptes(0ull, VMX_EPT_FAKE_ACCESSED_MASK,
3124 VMX_EPT_FAKE_DIRTY_MASK, 0ull,
3125 VMX_EPT_EXECUTABLE_MASK);
3126 kvm_enable_tdp();
3127 }
3128 3121
3129 err = kvm_vcpu_init(&vmx->vcpu, kvm, id); 3122 err = kvm_vcpu_init(&vmx->vcpu, kvm, id);
3130 if (err) 3123 if (err)
@@ -3303,8 +3296,17 @@ static int __init vmx_init(void)
3303 vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_IA32_SYSENTER_ESP); 3296 vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_IA32_SYSENTER_ESP);
3304 vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_IA32_SYSENTER_EIP); 3297 vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_IA32_SYSENTER_EIP);
3305 3298
3306 if (cpu_has_vmx_ept()) 3299 if (vm_need_ept()) {
3307 bypass_guest_pf = 0; 3300 bypass_guest_pf = 0;
3301 kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK |
3302 VMX_EPT_WRITABLE_MASK |
3303 VMX_EPT_DEFAULT_MT << VMX_EPT_MT_EPTE_SHIFT);
3304 kvm_mmu_set_mask_ptes(0ull, VMX_EPT_FAKE_ACCESSED_MASK,
3305 VMX_EPT_FAKE_DIRTY_MASK, 0ull,
3306 VMX_EPT_EXECUTABLE_MASK);
3307 kvm_enable_tdp();
3308 } else
3309 kvm_disable_tdp();
3308 3310
3309 if (bypass_guest_pf) 3311 if (bypass_guest_pf)
3310 kvm_mmu_set_nonpresent_ptes(~0xffeull, 0ull); 3312 kvm_mmu_set_nonpresent_ptes(~0xffeull, 0ull);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 9f1cdb011cff..5916191420c7 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3184,6 +3184,10 @@ static void seg_desct_to_kvm_desct(struct desc_struct *seg_desc, u16 selector,
3184 kvm_desct->base |= seg_desc->base2 << 24; 3184 kvm_desct->base |= seg_desc->base2 << 24;
3185 kvm_desct->limit = seg_desc->limit0; 3185 kvm_desct->limit = seg_desc->limit0;
3186 kvm_desct->limit |= seg_desc->limit << 16; 3186 kvm_desct->limit |= seg_desc->limit << 16;
3187 if (seg_desc->g) {
3188 kvm_desct->limit <<= 12;
3189 kvm_desct->limit |= 0xfff;
3190 }
3187 kvm_desct->selector = selector; 3191 kvm_desct->selector = selector;
3188 kvm_desct->type = seg_desc->type; 3192 kvm_desct->type = seg_desc->type;
3189 kvm_desct->present = seg_desc->p; 3193 kvm_desct->present = seg_desc->p;
@@ -3223,6 +3227,7 @@ static void get_segment_descritptor_dtable(struct kvm_vcpu *vcpu,
3223static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, 3227static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
3224 struct desc_struct *seg_desc) 3228 struct desc_struct *seg_desc)
3225{ 3229{
3230 gpa_t gpa;
3226 struct descriptor_table dtable; 3231 struct descriptor_table dtable;
3227 u16 index = selector >> 3; 3232 u16 index = selector >> 3;
3228 3233
@@ -3232,13 +3237,16 @@ static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
3232 kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc); 3237 kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc);
3233 return 1; 3238 return 1;
3234 } 3239 }
3235 return kvm_read_guest(vcpu->kvm, dtable.base + index * 8, seg_desc, 8); 3240 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, dtable.base);
3241 gpa += index * 8;
3242 return kvm_read_guest(vcpu->kvm, gpa, seg_desc, 8);
3236} 3243}
3237 3244
3238/* allowed just for 8 bytes segments */ 3245/* allowed just for 8 bytes segments */
3239static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, 3246static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
3240 struct desc_struct *seg_desc) 3247 struct desc_struct *seg_desc)
3241{ 3248{
3249 gpa_t gpa;
3242 struct descriptor_table dtable; 3250 struct descriptor_table dtable;
3243 u16 index = selector >> 3; 3251 u16 index = selector >> 3;
3244 3252
@@ -3246,7 +3254,9 @@ static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
3246 3254
3247 if (dtable.limit < index * 8 + 7) 3255 if (dtable.limit < index * 8 + 7)
3248 return 1; 3256 return 1;
3249 return kvm_write_guest(vcpu->kvm, dtable.base + index * 8, seg_desc, 8); 3257 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, dtable.base);
3258 gpa += index * 8;
3259 return kvm_write_guest(vcpu->kvm, gpa, seg_desc, 8);
3250} 3260}
3251 3261
3252static u32 get_tss_base_addr(struct kvm_vcpu *vcpu, 3262static u32 get_tss_base_addr(struct kvm_vcpu *vcpu,
@@ -3258,55 +3268,7 @@ static u32 get_tss_base_addr(struct kvm_vcpu *vcpu,
3258 base_addr |= (seg_desc->base1 << 16); 3268 base_addr |= (seg_desc->base1 << 16);
3259 base_addr |= (seg_desc->base2 << 24); 3269 base_addr |= (seg_desc->base2 << 24);
3260 3270
3261 return base_addr; 3271 return vcpu->arch.mmu.gva_to_gpa(vcpu, base_addr);
3262}
3263
3264static int load_tss_segment32(struct kvm_vcpu *vcpu,
3265 struct desc_struct *seg_desc,
3266 struct tss_segment_32 *tss)
3267{
3268 u32 base_addr;
3269
3270 base_addr = get_tss_base_addr(vcpu, seg_desc);
3271
3272 return kvm_read_guest(vcpu->kvm, base_addr, tss,
3273 sizeof(struct tss_segment_32));
3274}
3275
3276static int save_tss_segment32(struct kvm_vcpu *vcpu,
3277 struct desc_struct *seg_desc,
3278 struct tss_segment_32 *tss)
3279{
3280 u32 base_addr;
3281
3282 base_addr = get_tss_base_addr(vcpu, seg_desc);
3283
3284 return kvm_write_guest(vcpu->kvm, base_addr, tss,
3285 sizeof(struct tss_segment_32));
3286}
3287
3288static int load_tss_segment16(struct kvm_vcpu *vcpu,
3289 struct desc_struct *seg_desc,
3290 struct tss_segment_16 *tss)
3291{
3292 u32 base_addr;
3293
3294 base_addr = get_tss_base_addr(vcpu, seg_desc);
3295
3296 return kvm_read_guest(vcpu->kvm, base_addr, tss,
3297 sizeof(struct tss_segment_16));
3298}
3299
3300static int save_tss_segment16(struct kvm_vcpu *vcpu,
3301 struct desc_struct *seg_desc,
3302 struct tss_segment_16 *tss)
3303{
3304 u32 base_addr;
3305
3306 base_addr = get_tss_base_addr(vcpu, seg_desc);
3307
3308 return kvm_write_guest(vcpu->kvm, base_addr, tss,
3309 sizeof(struct tss_segment_16));
3310} 3272}
3311 3273
3312static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg) 3274static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg)
@@ -3466,20 +3428,26 @@ static int load_state_from_tss16(struct kvm_vcpu *vcpu,
3466} 3428}
3467 3429
3468static int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector, 3430static int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector,
3469 struct desc_struct *cseg_desc, 3431 u32 old_tss_base,
3470 struct desc_struct *nseg_desc) 3432 struct desc_struct *nseg_desc)
3471{ 3433{
3472 struct tss_segment_16 tss_segment_16; 3434 struct tss_segment_16 tss_segment_16;
3473 int ret = 0; 3435 int ret = 0;
3474 3436
3475 if (load_tss_segment16(vcpu, cseg_desc, &tss_segment_16)) 3437 if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_16,
3438 sizeof tss_segment_16))
3476 goto out; 3439 goto out;
3477 3440
3478 save_state_to_tss16(vcpu, &tss_segment_16); 3441 save_state_to_tss16(vcpu, &tss_segment_16);
3479 save_tss_segment16(vcpu, cseg_desc, &tss_segment_16);
3480 3442
3481 if (load_tss_segment16(vcpu, nseg_desc, &tss_segment_16)) 3443 if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_16,
3444 sizeof tss_segment_16))
3482 goto out; 3445 goto out;
3446
3447 if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc),
3448 &tss_segment_16, sizeof tss_segment_16))
3449 goto out;
3450
3483 if (load_state_from_tss16(vcpu, &tss_segment_16)) 3451 if (load_state_from_tss16(vcpu, &tss_segment_16))
3484 goto out; 3452 goto out;
3485 3453
@@ -3489,20 +3457,26 @@ out:
3489} 3457}
3490 3458
3491static int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector, 3459static int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector,
3492 struct desc_struct *cseg_desc, 3460 u32 old_tss_base,
3493 struct desc_struct *nseg_desc) 3461 struct desc_struct *nseg_desc)
3494{ 3462{
3495 struct tss_segment_32 tss_segment_32; 3463 struct tss_segment_32 tss_segment_32;
3496 int ret = 0; 3464 int ret = 0;
3497 3465
3498 if (load_tss_segment32(vcpu, cseg_desc, &tss_segment_32)) 3466 if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_32,
3467 sizeof tss_segment_32))
3499 goto out; 3468 goto out;
3500 3469
3501 save_state_to_tss32(vcpu, &tss_segment_32); 3470 save_state_to_tss32(vcpu, &tss_segment_32);
3502 save_tss_segment32(vcpu, cseg_desc, &tss_segment_32);
3503 3471
3504 if (load_tss_segment32(vcpu, nseg_desc, &tss_segment_32)) 3472 if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_32,
3473 sizeof tss_segment_32))
3474 goto out;
3475
3476 if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc),
3477 &tss_segment_32, sizeof tss_segment_32))
3505 goto out; 3478 goto out;
3479
3506 if (load_state_from_tss32(vcpu, &tss_segment_32)) 3480 if (load_state_from_tss32(vcpu, &tss_segment_32))
3507 goto out; 3481 goto out;
3508 3482
@@ -3517,16 +3491,20 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
3517 struct desc_struct cseg_desc; 3491 struct desc_struct cseg_desc;
3518 struct desc_struct nseg_desc; 3492 struct desc_struct nseg_desc;
3519 int ret = 0; 3493 int ret = 0;
3494 u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR);
3495 u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR);
3520 3496
3521 kvm_get_segment(vcpu, &tr_seg, VCPU_SREG_TR); 3497 old_tss_base = vcpu->arch.mmu.gva_to_gpa(vcpu, old_tss_base);
3522 3498
3499 /* FIXME: Handle errors. Failure to read either TSS or their
3500 * descriptors should generate a pagefault.
3501 */
3523 if (load_guest_segment_descriptor(vcpu, tss_selector, &nseg_desc)) 3502 if (load_guest_segment_descriptor(vcpu, tss_selector, &nseg_desc))
3524 goto out; 3503 goto out;
3525 3504
3526 if (load_guest_segment_descriptor(vcpu, tr_seg.selector, &cseg_desc)) 3505 if (load_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc))
3527 goto out; 3506 goto out;
3528 3507
3529
3530 if (reason != TASK_SWITCH_IRET) { 3508 if (reason != TASK_SWITCH_IRET) {
3531 int cpl; 3509 int cpl;
3532 3510
@@ -3544,8 +3522,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
3544 3522
3545 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) { 3523 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
3546 cseg_desc.type &= ~(1 << 1); //clear the B flag 3524 cseg_desc.type &= ~(1 << 1); //clear the B flag
3547 save_guest_segment_descriptor(vcpu, tr_seg.selector, 3525 save_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc);
3548 &cseg_desc);
3549 } 3526 }
3550 3527
3551 if (reason == TASK_SWITCH_IRET) { 3528 if (reason == TASK_SWITCH_IRET) {
@@ -3557,10 +3534,10 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
3557 kvm_x86_ops->cache_regs(vcpu); 3534 kvm_x86_ops->cache_regs(vcpu);
3558 3535
3559 if (nseg_desc.type & 8) 3536 if (nseg_desc.type & 8)
3560 ret = kvm_task_switch_32(vcpu, tss_selector, &cseg_desc, 3537 ret = kvm_task_switch_32(vcpu, tss_selector, old_tss_base,
3561 &nseg_desc); 3538 &nseg_desc);
3562 else 3539 else
3563 ret = kvm_task_switch_16(vcpu, tss_selector, &cseg_desc, 3540 ret = kvm_task_switch_16(vcpu, tss_selector, old_tss_base,
3564 &nseg_desc); 3541 &nseg_desc);
3565 3542
3566 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE) { 3543 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE) {
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index 1fbb844c3d7a..2977ea37791f 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -1,6 +1,7 @@
1obj-y := init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \ 1obj-y := init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \
2 pat.o pgtable.o 2 pat.o pgtable.o
3 3
4obj-$(CONFIG_HAVE_GET_USER_PAGES_FAST) += gup.o
4obj-$(CONFIG_X86_32) += pgtable_32.o 5obj-$(CONFIG_X86_32) += pgtable_32.o
5 6
6obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o 7obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
new file mode 100644
index 000000000000..3085f25b4355
--- /dev/null
+++ b/arch/x86/mm/gup.c
@@ -0,0 +1,295 @@
1/*
2 * Lockless get_user_pages_fast for x86
3 *
4 * Copyright (C) 2008 Nick Piggin
5 * Copyright (C) 2008 Novell Inc.
6 */
7#include <linux/sched.h>
8#include <linux/mm.h>
9#include <linux/vmstat.h>
10#include <linux/highmem.h>
11
12#include <asm/pgtable.h>
13
14static inline pte_t gup_get_pte(pte_t *ptep)
15{
16#ifndef CONFIG_X86_PAE
17 return *ptep;
18#else
19 /*
20 * With get_user_pages_fast, we walk down the pagetables without taking
21 * any locks. For this we would like to load the pointers atoimcally,
22 * but that is not possible (without expensive cmpxchg8b) on PAE. What
23 * we do have is the guarantee that a pte will only either go from not
24 * present to present, or present to not present or both -- it will not
25 * switch to a completely different present page without a TLB flush in
26 * between; something that we are blocking by holding interrupts off.
27 *
28 * Setting ptes from not present to present goes:
29 * ptep->pte_high = h;
30 * smp_wmb();
31 * ptep->pte_low = l;
32 *
33 * And present to not present goes:
34 * ptep->pte_low = 0;
35 * smp_wmb();
36 * ptep->pte_high = 0;
37 *
38 * We must ensure here that the load of pte_low sees l iff pte_high
39 * sees h. We load pte_high *after* loading pte_low, which ensures we
40 * don't see an older value of pte_high. *Then* we recheck pte_low,
41 * which ensures that we haven't picked up a changed pte high. We might
42 * have got rubbish values from pte_low and pte_high, but we are
43 * guaranteed that pte_low will not have the present bit set *unless*
44 * it is 'l'. And get_user_pages_fast only operates on present ptes, so
45 * we're safe.
46 *
47 * gup_get_pte should not be used or copied outside gup.c without being
48 * very careful -- it does not atomically load the pte or anything that
49 * is likely to be useful for you.
50 */
51 pte_t pte;
52
53retry:
54 pte.pte_low = ptep->pte_low;
55 smp_rmb();
56 pte.pte_high = ptep->pte_high;
57 smp_rmb();
58 if (unlikely(pte.pte_low != ptep->pte_low))
59 goto retry;
60
61 return pte;
62#endif
63}
64
65/*
66 * The performance critical leaf functions are made noinline otherwise gcc
67 * inlines everything into a single function which results in too much
68 * register pressure.
69 */
70static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
71 unsigned long end, int write, struct page **pages, int *nr)
72{
73 unsigned long mask;
74 pte_t *ptep;
75
76 mask = _PAGE_PRESENT|_PAGE_USER;
77 if (write)
78 mask |= _PAGE_RW;
79
80 ptep = pte_offset_map(&pmd, addr);
81 do {
82 pte_t pte = gup_get_pte(ptep);
83 struct page *page;
84
85 if ((pte_val(pte) & (mask | _PAGE_SPECIAL)) != mask) {
86 pte_unmap(ptep);
87 return 0;
88 }
89 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
90 page = pte_page(pte);
91 get_page(page);
92 pages[*nr] = page;
93 (*nr)++;
94
95 } while (ptep++, addr += PAGE_SIZE, addr != end);
96 pte_unmap(ptep - 1);
97
98 return 1;
99}
100
101static inline void get_head_page_multiple(struct page *page, int nr)
102{
103 VM_BUG_ON(page != compound_head(page));
104 VM_BUG_ON(page_count(page) == 0);
105 atomic_add(nr, &page->_count);
106}
107
108static noinline int gup_huge_pmd(pmd_t pmd, unsigned long addr,
109 unsigned long end, int write, struct page **pages, int *nr)
110{
111 unsigned long mask;
112 pte_t pte = *(pte_t *)&pmd;
113 struct page *head, *page;
114 int refs;
115
116 mask = _PAGE_PRESENT|_PAGE_USER;
117 if (write)
118 mask |= _PAGE_RW;
119 if ((pte_val(pte) & mask) != mask)
120 return 0;
121 /* hugepages are never "special" */
122 VM_BUG_ON(pte_val(pte) & _PAGE_SPECIAL);
123 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
124
125 refs = 0;
126 head = pte_page(pte);
127 page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
128 do {
129 VM_BUG_ON(compound_head(page) != head);
130 pages[*nr] = page;
131 (*nr)++;
132 page++;
133 refs++;
134 } while (addr += PAGE_SIZE, addr != end);
135 get_head_page_multiple(head, refs);
136
137 return 1;
138}
139
140static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
141 int write, struct page **pages, int *nr)
142{
143 unsigned long next;
144 pmd_t *pmdp;
145
146 pmdp = pmd_offset(&pud, addr);
147 do {
148 pmd_t pmd = *pmdp;
149
150 next = pmd_addr_end(addr, end);
151 if (pmd_none(pmd))
152 return 0;
153 if (unlikely(pmd_large(pmd))) {
154 if (!gup_huge_pmd(pmd, addr, next, write, pages, nr))
155 return 0;
156 } else {
157 if (!gup_pte_range(pmd, addr, next, write, pages, nr))
158 return 0;
159 }
160 } while (pmdp++, addr = next, addr != end);
161
162 return 1;
163}
164
165static noinline int gup_huge_pud(pud_t pud, unsigned long addr,
166 unsigned long end, int write, struct page **pages, int *nr)
167{
168 unsigned long mask;
169 pte_t pte = *(pte_t *)&pud;
170 struct page *head, *page;
171 int refs;
172
173 mask = _PAGE_PRESENT|_PAGE_USER;
174 if (write)
175 mask |= _PAGE_RW;
176 if ((pte_val(pte) & mask) != mask)
177 return 0;
178 /* hugepages are never "special" */
179 VM_BUG_ON(pte_val(pte) & _PAGE_SPECIAL);
180 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
181
182 refs = 0;
183 head = pte_page(pte);
184 page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
185 do {
186 VM_BUG_ON(compound_head(page) != head);
187 pages[*nr] = page;
188 (*nr)++;
189 page++;
190 refs++;
191 } while (addr += PAGE_SIZE, addr != end);
192 get_head_page_multiple(head, refs);
193
194 return 1;
195}
196
197static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
198 int write, struct page **pages, int *nr)
199{
200 unsigned long next;
201 pud_t *pudp;
202
203 pudp = pud_offset(&pgd, addr);
204 do {
205 pud_t pud = *pudp;
206
207 next = pud_addr_end(addr, end);
208 if (pud_none(pud))
209 return 0;
210 if (unlikely(pud_large(pud))) {
211 if (!gup_huge_pud(pud, addr, next, write, pages, nr))
212 return 0;
213 } else {
214 if (!gup_pmd_range(pud, addr, next, write, pages, nr))
215 return 0;
216 }
217 } while (pudp++, addr = next, addr != end);
218
219 return 1;
220}
221
222int get_user_pages_fast(unsigned long start, int nr_pages, int write,
223 struct page **pages)
224{
225 struct mm_struct *mm = current->mm;
226 unsigned long end = start + (nr_pages << PAGE_SHIFT);
227 unsigned long addr = start;
228 unsigned long next;
229 pgd_t *pgdp;
230 int nr = 0;
231
232 if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
233 start, nr_pages*PAGE_SIZE)))
234 goto slow_irqon;
235
236 /*
237 * XXX: batch / limit 'nr', to avoid large irq off latency
238 * needs some instrumenting to determine the common sizes used by
239 * important workloads (eg. DB2), and whether limiting the batch size
240 * will decrease performance.
241 *
242 * It seems like we're in the clear for the moment. Direct-IO is
243 * the main guy that batches up lots of get_user_pages, and even
244 * they are limited to 64-at-a-time which is not so many.
245 */
246 /*
247 * This doesn't prevent pagetable teardown, but does prevent
248 * the pagetables and pages from being freed on x86.
249 *
250 * So long as we atomically load page table pointers versus teardown
251 * (which we do on x86, with the above PAE exception), we can follow the
252 * address down to the the page and take a ref on it.
253 */
254 local_irq_disable();
255 pgdp = pgd_offset(mm, addr);
256 do {
257 pgd_t pgd = *pgdp;
258
259 next = pgd_addr_end(addr, end);
260 if (pgd_none(pgd))
261 goto slow;
262 if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
263 goto slow;
264 } while (pgdp++, addr = next, addr != end);
265 local_irq_enable();
266
267 VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT);
268 return nr;
269
270 {
271 int ret;
272
273slow:
274 local_irq_enable();
275slow_irqon:
276 /* Try to get the remaining pages with get_user_pages */
277 start += nr << PAGE_SHIFT;
278 pages += nr;
279
280 down_read(&mm->mmap_sem);
281 ret = get_user_pages(current, mm, start,
282 (end - start) >> PAGE_SHIFT, write, 0, pages, NULL);
283 up_read(&mm->mmap_sem);
284
285 /* Have to be a bit careful with return values */
286 if (nr > 0) {
287 if (ret < 0)
288 ret = nr;
289 else
290 ret += nr;
291 }
292
293 return ret;
294 }
295}
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index ec37121f6709..129618ca0ea2 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -86,43 +86,6 @@ early_param("gbpages", parse_direct_gbpages_on);
86 * around without checking the pgd every time. 86 * around without checking the pgd every time.
87 */ 87 */
88 88
89void show_mem(void)
90{
91 long i, total = 0, reserved = 0;
92 long shared = 0, cached = 0;
93 struct page *page;
94 pg_data_t *pgdat;
95
96 printk(KERN_INFO "Mem-info:\n");
97 show_free_areas();
98 for_each_online_pgdat(pgdat) {
99 for (i = 0; i < pgdat->node_spanned_pages; ++i) {
100 /*
101 * This loop can take a while with 256 GB and
102 * 4k pages so defer the NMI watchdog:
103 */
104 if (unlikely(i % MAX_ORDER_NR_PAGES == 0))
105 touch_nmi_watchdog();
106
107 if (!pfn_valid(pgdat->node_start_pfn + i))
108 continue;
109
110 page = pfn_to_page(pgdat->node_start_pfn + i);
111 total++;
112 if (PageReserved(page))
113 reserved++;
114 else if (PageSwapCache(page))
115 cached++;
116 else if (page_count(page))
117 shared += page_count(page) - 1;
118 }
119 }
120 printk(KERN_INFO "%lu pages of RAM\n", total);
121 printk(KERN_INFO "%lu reserved pages\n", reserved);
122 printk(KERN_INFO "%lu pages shared\n", shared);
123 printk(KERN_INFO "%lu pages swap cached\n", cached);
124}
125
126int after_bootmem; 89int after_bootmem;
127 90
128static __init void *spp_getpage(void) 91static __init void *spp_getpage(void)
diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
index b4becbf8c570..cab0abbd1ebe 100644
--- a/arch/x86/mm/pgtable_32.c
+++ b/arch/x86/mm/pgtable_32.c
@@ -20,53 +20,6 @@
20#include <asm/tlb.h> 20#include <asm/tlb.h>
21#include <asm/tlbflush.h> 21#include <asm/tlbflush.h>
22 22
23void show_mem(void)
24{
25 int total = 0, reserved = 0;
26 int shared = 0, cached = 0;
27 int highmem = 0;
28 struct page *page;
29 pg_data_t *pgdat;
30 unsigned long i;
31 unsigned long flags;
32
33 printk(KERN_INFO "Mem-info:\n");
34 show_free_areas();
35 for_each_online_pgdat(pgdat) {
36 pgdat_resize_lock(pgdat, &flags);
37 for (i = 0; i < pgdat->node_spanned_pages; ++i) {
38 if (unlikely(i % MAX_ORDER_NR_PAGES == 0))
39 touch_nmi_watchdog();
40 page = pgdat_page_nr(pgdat, i);
41 total++;
42 if (PageHighMem(page))
43 highmem++;
44 if (PageReserved(page))
45 reserved++;
46 else if (PageSwapCache(page))
47 cached++;
48 else if (page_count(page))
49 shared += page_count(page) - 1;
50 }
51 pgdat_resize_unlock(pgdat, &flags);
52 }
53 printk(KERN_INFO "%d pages of RAM\n", total);
54 printk(KERN_INFO "%d pages of HIGHMEM\n", highmem);
55 printk(KERN_INFO "%d reserved pages\n", reserved);
56 printk(KERN_INFO "%d pages shared\n", shared);
57 printk(KERN_INFO "%d pages swap cached\n", cached);
58
59 printk(KERN_INFO "%lu pages dirty\n", global_page_state(NR_FILE_DIRTY));
60 printk(KERN_INFO "%lu pages writeback\n",
61 global_page_state(NR_WRITEBACK));
62 printk(KERN_INFO "%lu pages mapped\n", global_page_state(NR_FILE_MAPPED));
63 printk(KERN_INFO "%lu pages slab\n",
64 global_page_state(NR_SLAB_RECLAIMABLE) +
65 global_page_state(NR_SLAB_UNRECLAIMABLE));
66 printk(KERN_INFO "%lu pages pagetables\n",
67 global_page_state(NR_PAGETABLE));
68}
69
70/* 23/*
71 * Associate a virtual page frame with a given physical page frame 24 * Associate a virtual page frame with a given physical page frame
72 * and protection flags for that frame. 25 * and protection flags for that frame.