aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/blackfin/kernel/dma-mapping.c1
-rw-r--r--arch/m68k/kernel/dma.c2
-rw-r--r--arch/sparc64/kernel/iommu_common.c2
-rw-r--r--arch/sparc64/kernel/ldc.c2
-rw-r--r--arch/sparc64/kernel/sys_sparc.c2
-rw-r--r--arch/um/drivers/ubd_kern.c2
-rw-r--r--arch/x86/kernel/crash.c6
-rw-r--r--arch/x86/kernel/pci-gart_64.c3
-rw-r--r--arch/x86/lguest/boot.c54
-rw-r--r--arch/x86/lguest/i386_head.S8
-rw-r--r--arch/x86/mm/fault_32.c2
11 files changed, 45 insertions, 39 deletions
diff --git a/arch/blackfin/kernel/dma-mapping.c b/arch/blackfin/kernel/dma-mapping.c
index a16cb03c5291..d6b61d56b656 100644
--- a/arch/blackfin/kernel/dma-mapping.c
+++ b/arch/blackfin/kernel/dma-mapping.c
@@ -35,6 +35,7 @@
35#include <linux/device.h> 35#include <linux/device.h>
36#include <linux/dma-mapping.h> 36#include <linux/dma-mapping.h>
37#include <linux/io.h> 37#include <linux/io.h>
38#include <linux/scatterlist.h>
38#include <asm/cacheflush.h> 39#include <asm/cacheflush.h>
39#include <asm/bfin-global.h> 40#include <asm/bfin-global.h>
40 41
diff --git a/arch/m68k/kernel/dma.c b/arch/m68k/kernel/dma.c
index ef490e1ce600..6f8c080dd9f9 100644
--- a/arch/m68k/kernel/dma.c
+++ b/arch/m68k/kernel/dma.c
@@ -9,10 +9,10 @@
9#include <linux/dma-mapping.h> 9#include <linux/dma-mapping.h>
10#include <linux/device.h> 10#include <linux/device.h>
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/scatterlist.h>
12#include <linux/vmalloc.h> 13#include <linux/vmalloc.h>
13 14
14#include <asm/pgalloc.h> 15#include <asm/pgalloc.h>
15#include <asm/scatterlist.h>
16 16
17void *dma_alloc_coherent(struct device *dev, size_t size, 17void *dma_alloc_coherent(struct device *dev, size_t size,
18 dma_addr_t *handle, gfp_t flag) 18 dma_addr_t *handle, gfp_t flag)
diff --git a/arch/sparc64/kernel/iommu_common.c b/arch/sparc64/kernel/iommu_common.c
index b70324e0d83d..efd5dff85f60 100644
--- a/arch/sparc64/kernel/iommu_common.c
+++ b/arch/sparc64/kernel/iommu_common.c
@@ -234,7 +234,7 @@ unsigned long prepare_sg(struct scatterlist *sg, int nents)
234 dma_sg->dma_length = dent_len; 234 dma_sg->dma_length = dent_len;
235 235
236 if (dma_sg != sg) { 236 if (dma_sg != sg) {
237 dma_sg = next_sg(dma_sg); 237 dma_sg = sg_next(dma_sg);
238 dma_sg->dma_length = 0; 238 dma_sg->dma_length = 0;
239 } 239 }
240 240
diff --git a/arch/sparc64/kernel/ldc.c b/arch/sparc64/kernel/ldc.c
index c8313cb60f0a..217478a94128 100644
--- a/arch/sparc64/kernel/ldc.c
+++ b/arch/sparc64/kernel/ldc.c
@@ -2121,7 +2121,7 @@ int ldc_map_sg(struct ldc_channel *lp,
2121 state.nc = 0; 2121 state.nc = 0;
2122 2122
2123 for (i = 0; i < num_sg; i++) 2123 for (i = 0; i < num_sg; i++)
2124 fill_cookies(&state, page_to_pfn(sg[i].page) << PAGE_SHIFT, 2124 fill_cookies(&state, page_to_pfn(sg_page(&sg[i])) << PAGE_SHIFT,
2125 sg[i].offset, sg[i].length); 2125 sg[i].offset, sg[i].length);
2126 2126
2127 return state.nc; 2127 return state.nc;
diff --git a/arch/sparc64/kernel/sys_sparc.c b/arch/sparc64/kernel/sys_sparc.c
index 560cb1edb1d0..c56573a10eee 100644
--- a/arch/sparc64/kernel/sys_sparc.c
+++ b/arch/sparc64/kernel/sys_sparc.c
@@ -318,7 +318,7 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u
318 318
319 if (flags & MAP_FIXED) { 319 if (flags & MAP_FIXED) {
320 /* Ok, don't mess with it. */ 320 /* Ok, don't mess with it. */
321 return get_unmapped_area(NULL, addr, len, pgoff, flags); 321 return get_unmapped_area(NULL, orig_addr, len, pgoff, flags);
322 } 322 }
323 flags &= ~MAP_SHARED; 323 flags &= ~MAP_SHARED;
324 324
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index 3a8cd3dfb51c..e184b44b1011 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -35,6 +35,7 @@
35#include "linux/genhd.h" 35#include "linux/genhd.h"
36#include "linux/spinlock.h" 36#include "linux/spinlock.h"
37#include "linux/platform_device.h" 37#include "linux/platform_device.h"
38#include "linux/scatterlist.h"
38#include "asm/segment.h" 39#include "asm/segment.h"
39#include "asm/uaccess.h" 40#include "asm/uaccess.h"
40#include "asm/irq.h" 41#include "asm/irq.h"
@@ -704,6 +705,7 @@ static int ubd_add(int n, char **error_out)
704 ubd_dev->size = ROUND_BLOCK(ubd_dev->size); 705 ubd_dev->size = ROUND_BLOCK(ubd_dev->size);
705 706
706 INIT_LIST_HEAD(&ubd_dev->restart); 707 INIT_LIST_HEAD(&ubd_dev->restart);
708 sg_init_table(&ubd_dev->sg, MAX_SG);
707 709
708 err = -ENOMEM; 710 err = -ENOMEM;
709 ubd_dev->queue = blk_init_queue(do_ubd_request, &ubd_dev->lock); 711 ubd_dev->queue = blk_init_queue(do_ubd_request, &ubd_dev->lock);
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
index af0253f94a9a..8bb482ff091b 100644
--- a/arch/x86/kernel/crash.c
+++ b/arch/x86/kernel/crash.c
@@ -25,7 +25,7 @@
25#include <linux/kdebug.h> 25#include <linux/kdebug.h>
26#include <asm/smp.h> 26#include <asm/smp.h>
27 27
28#ifdef X86_32 28#ifdef CONFIG_X86_32
29#include <mach_ipi.h> 29#include <mach_ipi.h>
30#else 30#else
31#include <asm/mach_apic.h> 31#include <asm/mach_apic.h>
@@ -41,7 +41,7 @@ static int crash_nmi_callback(struct notifier_block *self,
41 unsigned long val, void *data) 41 unsigned long val, void *data)
42{ 42{
43 struct pt_regs *regs; 43 struct pt_regs *regs;
44#ifdef X86_32 44#ifdef CONFIG_X86_32
45 struct pt_regs fixed_regs; 45 struct pt_regs fixed_regs;
46#endif 46#endif
47 int cpu; 47 int cpu;
@@ -60,7 +60,7 @@ static int crash_nmi_callback(struct notifier_block *self,
60 return NOTIFY_STOP; 60 return NOTIFY_STOP;
61 local_irq_disable(); 61 local_irq_disable();
62 62
63#ifdef X86_32 63#ifdef CONFIG_X86_32
64 if (!user_mode_vm(regs)) { 64 if (!user_mode_vm(regs)) {
65 crash_fixup_ss_esp(&fixed_regs, regs); 65 crash_fixup_ss_esp(&fixed_regs, regs);
66 regs = &fixed_regs; 66 regs = &fixed_regs;
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
index c56e9ee64964..ae7e0161ce46 100644
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@ -338,7 +338,6 @@ static int __dma_map_cont(struct scatterlist *start, int nelems,
338 338
339 BUG_ON(s != start && s->offset); 339 BUG_ON(s != start && s->offset);
340 if (s == start) { 340 if (s == start) {
341 *sout = *s;
342 sout->dma_address = iommu_bus_base; 341 sout->dma_address = iommu_bus_base;
343 sout->dma_address += iommu_page*PAGE_SIZE + s->offset; 342 sout->dma_address += iommu_page*PAGE_SIZE + s->offset;
344 sout->dma_length = s->length; 343 sout->dma_length = s->length;
@@ -365,7 +364,7 @@ static inline int dma_map_cont(struct scatterlist *start, int nelems,
365{ 364{
366 if (!need) { 365 if (!need) {
367 BUG_ON(nelems != 1); 366 BUG_ON(nelems != 1);
368 *sout = *start; 367 sout->dma_address = start->dma_address;
369 sout->dma_length = start->length; 368 sout->dma_length = start->length;
370 return 0; 369 return 0;
371 } 370 }
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index d2235db4085f..a55b0902f9d3 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -56,6 +56,7 @@
56#include <linux/lguest.h> 56#include <linux/lguest.h>
57#include <linux/lguest_launcher.h> 57#include <linux/lguest_launcher.h>
58#include <linux/virtio_console.h> 58#include <linux/virtio_console.h>
59#include <linux/pm.h>
59#include <asm/paravirt.h> 60#include <asm/paravirt.h>
60#include <asm/param.h> 61#include <asm/param.h>
61#include <asm/page.h> 62#include <asm/page.h>
@@ -98,7 +99,7 @@ static cycle_t clock_base;
98 * When lazy_mode is set, it means we're allowed to defer all hypercalls and do 99 * When lazy_mode is set, it means we're allowed to defer all hypercalls and do
99 * them as a batch when lazy_mode is eventually turned off. Because hypercalls 100 * them as a batch when lazy_mode is eventually turned off. Because hypercalls
100 * are reasonably expensive, batching them up makes sense. For example, a 101 * are reasonably expensive, batching them up makes sense. For example, a
101 * large mmap might update dozens of page table entries: that code calls 102 * large munmap might update dozens of page table entries: that code calls
102 * paravirt_enter_lazy_mmu(), does the dozen updates, then calls 103 * paravirt_enter_lazy_mmu(), does the dozen updates, then calls
103 * lguest_leave_lazy_mode(). 104 * lguest_leave_lazy_mode().
104 * 105 *
@@ -163,8 +164,8 @@ void async_hcall(unsigned long call,
163/*:*/ 164/*:*/
164 165
165/*G:033 166/*G:033
166 * Here are our first native-instruction replacements: four functions for 167 * After that diversion we return to our first native-instruction
167 * interrupt control. 168 * replacements: four functions for interrupt control.
168 * 169 *
169 * The simplest way of implementing these would be to have "turn interrupts 170 * The simplest way of implementing these would be to have "turn interrupts
170 * off" and "turn interrupts on" hypercalls. Unfortunately, this is too slow: 171 * off" and "turn interrupts on" hypercalls. Unfortunately, this is too slow:
@@ -183,7 +184,7 @@ static unsigned long save_fl(void)
183 return lguest_data.irq_enabled; 184 return lguest_data.irq_enabled;
184} 185}
185 186
186/* "restore_flags" just sets the flags back to the value given. */ 187/* restore_flags() just sets the flags back to the value given. */
187static void restore_fl(unsigned long flags) 188static void restore_fl(unsigned long flags)
188{ 189{
189 lguest_data.irq_enabled = flags; 190 lguest_data.irq_enabled = flags;
@@ -356,7 +357,7 @@ static void lguest_cpuid(unsigned int *eax, unsigned int *ebx,
356 * it. The Host needs to know when the Guest wants to change them, so we have 357 * it. The Host needs to know when the Guest wants to change them, so we have
357 * a whole series of functions like read_cr0() and write_cr0(). 358 * a whole series of functions like read_cr0() and write_cr0().
358 * 359 *
359 * We start with CR0. CR0 allows you to turn on and off all kinds of basic 360 * We start with cr0. cr0 allows you to turn on and off all kinds of basic
360 * features, but Linux only really cares about one: the horrifically-named Task 361 * features, but Linux only really cares about one: the horrifically-named Task
361 * Switched (TS) bit at bit 3 (ie. 8) 362 * Switched (TS) bit at bit 3 (ie. 8)
362 * 363 *
@@ -371,8 +372,7 @@ static void lguest_cpuid(unsigned int *eax, unsigned int *ebx,
371static unsigned long current_cr0, current_cr3; 372static unsigned long current_cr0, current_cr3;
372static void lguest_write_cr0(unsigned long val) 373static void lguest_write_cr0(unsigned long val)
373{ 374{
374 /* 8 == TS bit. */ 375 lazy_hcall(LHCALL_TS, val & X86_CR0_TS, 0, 0);
375 lazy_hcall(LHCALL_TS, val & 8, 0, 0);
376 current_cr0 = val; 376 current_cr0 = val;
377} 377}
378 378
@@ -387,10 +387,10 @@ static unsigned long lguest_read_cr0(void)
387static void lguest_clts(void) 387static void lguest_clts(void)
388{ 388{
389 lazy_hcall(LHCALL_TS, 0, 0, 0); 389 lazy_hcall(LHCALL_TS, 0, 0, 0);
390 current_cr0 &= ~8U; 390 current_cr0 &= ~X86_CR0_TS;
391} 391}
392 392
393/* CR2 is the virtual address of the last page fault, which the Guest only ever 393/* cr2 is the virtual address of the last page fault, which the Guest only ever
394 * reads. The Host kindly writes this into our "struct lguest_data", so we 394 * reads. The Host kindly writes this into our "struct lguest_data", so we
395 * just read it out of there. */ 395 * just read it out of there. */
396static unsigned long lguest_read_cr2(void) 396static unsigned long lguest_read_cr2(void)
@@ -398,7 +398,7 @@ static unsigned long lguest_read_cr2(void)
398 return lguest_data.cr2; 398 return lguest_data.cr2;
399} 399}
400 400
401/* CR3 is the current toplevel pagetable page: the principle is the same as 401/* cr3 is the current toplevel pagetable page: the principle is the same as
402 * cr0. Keep a local copy, and tell the Host when it changes. */ 402 * cr0. Keep a local copy, and tell the Host when it changes. */
403static void lguest_write_cr3(unsigned long cr3) 403static void lguest_write_cr3(unsigned long cr3)
404{ 404{
@@ -411,7 +411,7 @@ static unsigned long lguest_read_cr3(void)
411 return current_cr3; 411 return current_cr3;
412} 412}
413 413
414/* CR4 is used to enable and disable PGE, but we don't care. */ 414/* cr4 is used to enable and disable PGE, but we don't care. */
415static unsigned long lguest_read_cr4(void) 415static unsigned long lguest_read_cr4(void)
416{ 416{
417 return 0; 417 return 0;
@@ -432,7 +432,7 @@ static void lguest_write_cr4(unsigned long val)
432 * maps virtual addresses to physical addresses using "page tables". We could 432 * maps virtual addresses to physical addresses using "page tables". We could
433 * use one huge index of 1 million entries: each address is 4 bytes, so that's 433 * use one huge index of 1 million entries: each address is 4 bytes, so that's
434 * 1024 pages just to hold the page tables. But since most virtual addresses 434 * 1024 pages just to hold the page tables. But since most virtual addresses
435 * are unused, we use a two level index which saves space. The CR3 register 435 * are unused, we use a two level index which saves space. The cr3 register
436 * contains the physical address of the top level "page directory" page, which 436 * contains the physical address of the top level "page directory" page, which
437 * contains physical addresses of up to 1024 second-level pages. Each of these 437 * contains physical addresses of up to 1024 second-level pages. Each of these
438 * second level pages contains up to 1024 physical addresses of actual pages, 438 * second level pages contains up to 1024 physical addresses of actual pages,
@@ -440,7 +440,7 @@ static void lguest_write_cr4(unsigned long val)
440 * 440 *
441 * Here's a diagram, where arrows indicate physical addresses: 441 * Here's a diagram, where arrows indicate physical addresses:
442 * 442 *
443 * CR3 ---> +---------+ 443 * cr3 ---> +---------+
444 * | --------->+---------+ 444 * | --------->+---------+
445 * | | | PADDR1 | 445 * | | | PADDR1 |
446 * Top-level | | PADDR2 | 446 * Top-level | | PADDR2 |
@@ -498,8 +498,7 @@ static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval)
498 * 498 *
499 * ... except in early boot when the kernel sets up the initial pagetables, 499 * ... except in early boot when the kernel sets up the initial pagetables,
500 * which makes booting astonishingly slow. So we don't even tell the Host 500 * which makes booting astonishingly slow. So we don't even tell the Host
501 * anything changed until we've done the first page table switch. 501 * anything changed until we've done the first page table switch. */
502 */
503static void lguest_set_pte(pte_t *ptep, pte_t pteval) 502static void lguest_set_pte(pte_t *ptep, pte_t pteval)
504{ 503{
505 *ptep = pteval; 504 *ptep = pteval;
@@ -720,10 +719,10 @@ static void lguest_time_init(void)
720 /* Set up the timer interrupt (0) to go to our simple timer routine */ 719 /* Set up the timer interrupt (0) to go to our simple timer routine */
721 set_irq_handler(0, lguest_time_irq); 720 set_irq_handler(0, lguest_time_irq);
722 721
723 /* Our clock structure look like arch/i386/kernel/tsc.c if we can use 722 /* Our clock structure looks like arch/x86/kernel/tsc_32.c if we can
724 * the TSC, otherwise it's a dumb nanosecond-resolution clock. Either 723 * use the TSC, otherwise it's a dumb nanosecond-resolution clock.
725 * way, the "rating" is initialized so high that it's always chosen 724 * Either way, the "rating" is set so high that it's always chosen over
726 * over any other clocksource. */ 725 * any other clocksource. */
727 if (lguest_data.tsc_khz) 726 if (lguest_data.tsc_khz)
728 lguest_clock.mult = clocksource_khz2mult(lguest_data.tsc_khz, 727 lguest_clock.mult = clocksource_khz2mult(lguest_data.tsc_khz,
729 lguest_clock.shift); 728 lguest_clock.shift);
@@ -749,7 +748,7 @@ static void lguest_time_init(void)
749 * to work. They're pretty simple. 748 * to work. They're pretty simple.
750 */ 749 */
751 750
752/* The Guest needs to tell the host what stack it expects traps to use. For 751/* The Guest needs to tell the Host what stack it expects traps to use. For
753 * native hardware, this is part of the Task State Segment mentioned above in 752 * native hardware, this is part of the Task State Segment mentioned above in
754 * lguest_load_tr_desc(), but to help hypervisors there's this special call. 753 * lguest_load_tr_desc(), but to help hypervisors there's this special call.
755 * 754 *
@@ -850,13 +849,16 @@ static __init char *lguest_memory_setup(void)
850 return "LGUEST"; 849 return "LGUEST";
851} 850}
852 851
853/* Before virtqueues are set up, we use LHCALL_NOTIFY on normal memory to 852/* We will eventually use the virtio console device to produce console output,
854 * produce console output. */ 853 * but before that is set up we use LHCALL_NOTIFY on normal memory to produce
854 * console output. */
855static __init int early_put_chars(u32 vtermno, const char *buf, int count) 855static __init int early_put_chars(u32 vtermno, const char *buf, int count)
856{ 856{
857 char scratch[17]; 857 char scratch[17];
858 unsigned int len = count; 858 unsigned int len = count;
859 859
860 /* We use a nul-terminated string, so we have to make a copy. Icky,
861 * huh? */
860 if (len > sizeof(scratch) - 1) 862 if (len > sizeof(scratch) - 1)
861 len = sizeof(scratch) - 1; 863 len = sizeof(scratch) - 1;
862 scratch[len] = '\0'; 864 scratch[len] = '\0';
@@ -883,7 +885,7 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
883 * Our current solution is to allow the paravirt back end to optionally patch 885 * Our current solution is to allow the paravirt back end to optionally patch
884 * over the indirect calls to replace them with something more efficient. We 886 * over the indirect calls to replace them with something more efficient. We
885 * patch the four most commonly called functions: disable interrupts, enable 887 * patch the four most commonly called functions: disable interrupts, enable
886 * interrupts, restore interrupts and save interrupts. We usually have 10 888 * interrupts, restore interrupts and save interrupts. We usually have 6 or 10
887 * bytes to patch into: the Guest versions of these operations are small enough 889 * bytes to patch into: the Guest versions of these operations are small enough
888 * that we can fit comfortably. 890 * that we can fit comfortably.
889 * 891 *
@@ -1015,7 +1017,7 @@ __init void lguest_init(void)
1015 asm volatile ("mov %0, %%fs" : : "r" (__KERNEL_DS) : "memory"); 1017 asm volatile ("mov %0, %%fs" : : "r" (__KERNEL_DS) : "memory");
1016 1018
1017 /* The Host uses the top of the Guest's virtual address space for the 1019 /* The Host uses the top of the Guest's virtual address space for the
1018 * Host<->Guest Switcher, and it tells us how much it needs in 1020 * Host<->Guest Switcher, and it tells us how big that is in
1019 * lguest_data.reserve_mem, set up on the LGUEST_INIT hypercall. */ 1021 * lguest_data.reserve_mem, set up on the LGUEST_INIT hypercall. */
1020 reserve_top_address(lguest_data.reserve_mem); 1022 reserve_top_address(lguest_data.reserve_mem);
1021 1023
@@ -1065,6 +1067,6 @@ __init void lguest_init(void)
1065/* 1067/*
1066 * This marks the end of stage II of our journey, The Guest. 1068 * This marks the end of stage II of our journey, The Guest.
1067 * 1069 *
1068 * It is now time for us to explore the nooks and crannies of the three Guest 1070 * It is now time for us to explore the layer of virtual drivers and complete
1069 * devices and complete our understanding of the Guest in "make Drivers". 1071 * our understanding of the Guest in "make Drivers".
1070 */ 1072 */
diff --git a/arch/x86/lguest/i386_head.S b/arch/x86/lguest/i386_head.S
index ebc6ac733899..95b6fbcded63 100644
--- a/arch/x86/lguest/i386_head.S
+++ b/arch/x86/lguest/i386_head.S
@@ -6,7 +6,7 @@
6#include <asm/processor-flags.h> 6#include <asm/processor-flags.h>
7 7
8/*G:020 This is where we begin: head.S notes that the boot header's platform 8/*G:020 This is where we begin: head.S notes that the boot header's platform
9 * type field is "1" (lguest), so calls us here. The boot header is in %esi. 9 * type field is "1" (lguest), so calls us here.
10 * 10 *
11 * WARNING: be very careful here! We're running at addresses equal to physical 11 * WARNING: be very careful here! We're running at addresses equal to physical
12 * addesses (around 0), not above PAGE_OFFSET as most code expectes 12 * addesses (around 0), not above PAGE_OFFSET as most code expectes
@@ -17,13 +17,15 @@
17 * boot. */ 17 * boot. */
18.section .init.text, "ax", @progbits 18.section .init.text, "ax", @progbits
19ENTRY(lguest_entry) 19ENTRY(lguest_entry)
20 /* Make initial hypercall now, so we can set up the pagetables. */ 20 /* We make the "initialization" hypercall now to tell the Host about
21 * us, and also find out where it put our page tables. */
21 movl $LHCALL_LGUEST_INIT, %eax 22 movl $LHCALL_LGUEST_INIT, %eax
22 movl $lguest_data - __PAGE_OFFSET, %edx 23 movl $lguest_data - __PAGE_OFFSET, %edx
23 int $LGUEST_TRAP_ENTRY 24 int $LGUEST_TRAP_ENTRY
24 25
25 /* The Host put the toplevel pagetable in lguest_data.pgdir. The movsl 26 /* The Host put the toplevel pagetable in lguest_data.pgdir. The movsl
26 * instruction uses %esi implicitly. */ 27 * instruction uses %esi implicitly as the source for the copy we'
28 * about to do. */
27 movl lguest_data - __PAGE_OFFSET + LGUEST_DATA_pgdir, %esi 29 movl lguest_data - __PAGE_OFFSET + LGUEST_DATA_pgdir, %esi
28 30
29 /* Copy first 32 entries of page directory to __PAGE_OFFSET entries. 31 /* Copy first 32 entries of page directory to __PAGE_OFFSET entries.
diff --git a/arch/x86/mm/fault_32.c b/arch/x86/mm/fault_32.c
index 503dfc05111b..33563ee8eb0f 100644
--- a/arch/x86/mm/fault_32.c
+++ b/arch/x86/mm/fault_32.c
@@ -550,7 +550,7 @@ no_context:
550 page &= PAGE_MASK; 550 page &= PAGE_MASK;
551 page = ((__typeof__(page) *) __va(page))[(address >> PMD_SHIFT) 551 page = ((__typeof__(page) *) __va(page))[(address >> PMD_SHIFT)
552 & (PTRS_PER_PMD - 1)]; 552 & (PTRS_PER_PMD - 1)];
553 printk(KERN_ALERT "*pde = %016Lx ", page); 553 printk(KERN_CONT "*pde = %016Lx ", page);
554 page &= ~_PAGE_NX; 554 page &= ~_PAGE_NX;
555 } 555 }
556#else 556#else