aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-11-19 03:44:37 -0500
committerIngo Molnar <mingo@elte.hu>2008-11-19 03:44:37 -0500
commit3ac3ba0b396fd99550e08034b0e4c27fdf39c252 (patch)
treef9f69fac41d66540a37a33808714d055d702328f /arch/x86
parent934352f214b3251eb0793c1209d346595a661d80 (diff)
parent7f0f598a0069d1ab072375965a4b69137233169c (diff)
Merge branch 'linus' into sched/core
Conflicts: kernel/Makefile
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/Kconfig2
-rw-r--r--arch/x86/include/asm/acpi.h1
-rw-r--r--arch/x86/include/asm/iommu.h1
-rw-r--r--arch/x86/include/asm/msr.h2
-rw-r--r--arch/x86/include/asm/tsc.h8
-rw-r--r--arch/x86/kernel/acpi/boot.c1
-rw-r--r--arch/x86/kernel/early-quirks.c18
-rw-r--r--arch/x86/kernel/hpet.c4
-rw-r--r--arch/x86/kernel/kvmclock.c2
-rw-r--r--arch/x86/kernel/tsc.c2
-rw-r--r--arch/x86/kvm/Kconfig2
-rw-r--r--arch/x86/kvm/i8254.c4
-rw-r--r--arch/x86/kvm/mmu.c2
-rw-r--r--arch/x86/kvm/vmx.c3
-rw-r--r--arch/x86/kvm/vmx.h1
-rw-r--r--arch/x86/oprofile/op_model_ppro.c9
-rw-r--r--arch/x86/xen/enlighten.c5
-rw-r--r--arch/x86/xen/mmu.c13
18 files changed, 36 insertions, 44 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 74db682ec1ce..f232d5a98491 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -957,7 +957,7 @@ config ARCH_PHYS_ADDR_T_64BIT
957config NUMA 957config NUMA
958 bool "Numa Memory Allocation and Scheduler Support (EXPERIMENTAL)" 958 bool "Numa Memory Allocation and Scheduler Support (EXPERIMENTAL)"
959 depends on SMP 959 depends on SMP
960 depends on X86_64 || (X86_32 && HIGHMEM64G && (X86_NUMAQ || X86_BIGSMP || X86_SUMMIT && ACPI) && EXPERIMENTAL) 960 depends on X86_64 || (X86_32 && HIGHMEM64G && (X86_NUMAQ || X86_BIGSMP || X86_SUMMIT && ACPI) && BROKEN)
961 default n if X86_PC 961 default n if X86_PC
962 default y if (X86_NUMAQ || X86_SUMMIT || X86_BIGSMP) 962 default y if (X86_NUMAQ || X86_SUMMIT || X86_BIGSMP)
963 help 963 help
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
index 8d676d8ecde9..9830681446ad 100644
--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
@@ -113,7 +113,6 @@ static inline void acpi_disable_pci(void)
113 acpi_pci_disabled = 1; 113 acpi_pci_disabled = 1;
114 acpi_noirq_set(); 114 acpi_noirq_set();
115} 115}
116extern int acpi_irq_balance_set(char *str);
117 116
118/* routines for saving/restoring kernel state */ 117/* routines for saving/restoring kernel state */
119extern int acpi_save_state_mem(void); 118extern int acpi_save_state_mem(void);
diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h
index e4a552d44465..0b500c5b6446 100644
--- a/arch/x86/include/asm/iommu.h
+++ b/arch/x86/include/asm/iommu.h
@@ -6,7 +6,6 @@ extern void no_iommu_init(void);
6extern struct dma_mapping_ops nommu_dma_ops; 6extern struct dma_mapping_ops nommu_dma_ops;
7extern int force_iommu, no_iommu; 7extern int force_iommu, no_iommu;
8extern int iommu_detected; 8extern int iommu_detected;
9extern int dmar_disabled;
10 9
11extern unsigned long iommu_nr_pages(unsigned long addr, unsigned long len); 10extern unsigned long iommu_nr_pages(unsigned long addr, unsigned long len);
12 11
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index 46be2fa7ac26..c2a812ebde89 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -108,9 +108,7 @@ static __always_inline unsigned long long __native_read_tsc(void)
108{ 108{
109 DECLARE_ARGS(val, low, high); 109 DECLARE_ARGS(val, low, high);
110 110
111 rdtsc_barrier();
112 asm volatile("rdtsc" : EAX_EDX_RET(val, low, high)); 111 asm volatile("rdtsc" : EAX_EDX_RET(val, low, high));
113 rdtsc_barrier();
114 112
115 return EAX_EDX_VAL(val, low, high); 113 return EAX_EDX_VAL(val, low, high);
116} 114}
diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h
index 38ae163cc91b..9cd83a8e40d5 100644
--- a/arch/x86/include/asm/tsc.h
+++ b/arch/x86/include/asm/tsc.h
@@ -34,6 +34,8 @@ static inline cycles_t get_cycles(void)
34 34
35static __always_inline cycles_t vget_cycles(void) 35static __always_inline cycles_t vget_cycles(void)
36{ 36{
37 cycles_t cycles;
38
37 /* 39 /*
38 * We only do VDSOs on TSC capable CPUs, so this shouldnt 40 * We only do VDSOs on TSC capable CPUs, so this shouldnt
39 * access boot_cpu_data (which is not VDSO-safe): 41 * access boot_cpu_data (which is not VDSO-safe):
@@ -42,7 +44,11 @@ static __always_inline cycles_t vget_cycles(void)
42 if (!cpu_has_tsc) 44 if (!cpu_has_tsc)
43 return 0; 45 return 0;
44#endif 46#endif
45 return (cycles_t)__native_read_tsc(); 47 rdtsc_barrier();
48 cycles = (cycles_t)__native_read_tsc();
49 rdtsc_barrier();
50
51 return cycles;
46} 52}
47 53
48extern void tsc_init(void); 54extern void tsc_init(void);
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 8c1f76abae9e..4c51a2f8fd31 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -1343,7 +1343,6 @@ static void __init acpi_process_madt(void)
1343 error = acpi_parse_madt_ioapic_entries(); 1343 error = acpi_parse_madt_ioapic_entries();
1344 if (!error) { 1344 if (!error) {
1345 acpi_irq_model = ACPI_IRQ_MODEL_IOAPIC; 1345 acpi_irq_model = ACPI_IRQ_MODEL_IOAPIC;
1346 acpi_irq_balance_set(NULL);
1347 acpi_ioapic = 1; 1346 acpi_ioapic = 1;
1348 1347
1349 smp_found_config = 1; 1348 smp_found_config = 1;
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index 3ce029ffaa55..1b894b72c0f5 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -188,20 +188,6 @@ static void __init ati_bugs_contd(int num, int slot, int func)
188} 188}
189#endif 189#endif
190 190
191#ifdef CONFIG_DMAR
192static void __init intel_g33_dmar(int num, int slot, int func)
193{
194 struct acpi_table_header *dmar_tbl;
195 acpi_status status;
196
197 status = acpi_get_table(ACPI_SIG_DMAR, 0, &dmar_tbl);
198 if (ACPI_SUCCESS(status)) {
199 printk(KERN_INFO "BIOS BUG: DMAR advertised on Intel G31/G33 chipset -- ignoring\n");
200 dmar_disabled = 1;
201 }
202}
203#endif
204
205#define QFLAG_APPLY_ONCE 0x1 191#define QFLAG_APPLY_ONCE 0x1
206#define QFLAG_APPLIED 0x2 192#define QFLAG_APPLIED 0x2
207#define QFLAG_DONE (QFLAG_APPLY_ONCE|QFLAG_APPLIED) 193#define QFLAG_DONE (QFLAG_APPLY_ONCE|QFLAG_APPLIED)
@@ -225,10 +211,6 @@ static struct chipset early_qrk[] __initdata = {
225 PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs }, 211 PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs },
226 { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS, 212 { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS,
227 PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs_contd }, 213 PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs_contd },
228#ifdef CONFIG_DMAR
229 { PCI_VENDOR_ID_INTEL, 0x29c0,
230 PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, intel_g33_dmar },
231#endif
232 {} 214 {}
233}; 215};
234 216
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 77017e834cf7..067d8de913f6 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -322,7 +322,7 @@ static int hpet_next_event(unsigned long delta,
322 * what we wrote hit the chip before we compare it to the 322 * what we wrote hit the chip before we compare it to the
323 * counter. 323 * counter.
324 */ 324 */
325 WARN_ON((u32)hpet_readl(HPET_T0_CMP) != cnt); 325 WARN_ON_ONCE((u32)hpet_readl(HPET_Tn_CMP(timer)) != cnt);
326 326
327 return (s32)((u32)hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0; 327 return (s32)((u32)hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0;
328} 328}
@@ -445,7 +445,7 @@ static int hpet_setup_irq(struct hpet_dev *dev)
445{ 445{
446 446
447 if (request_irq(dev->irq, hpet_interrupt_handler, 447 if (request_irq(dev->irq, hpet_interrupt_handler,
448 IRQF_SHARED|IRQF_NOBALANCING, dev->name, dev)) 448 IRQF_DISABLED|IRQF_NOBALANCING, dev->name, dev))
449 return -1; 449 return -1;
450 450
451 disable_irq(dev->irq); 451 disable_irq(dev->irq);
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index 774ac4991568..1c9cc431ea4f 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -128,7 +128,7 @@ static int kvm_register_clock(char *txt)
128} 128}
129 129
130#ifdef CONFIG_X86_LOCAL_APIC 130#ifdef CONFIG_X86_LOCAL_APIC
131static void kvm_setup_secondary_clock(void) 131static void __devinit kvm_setup_secondary_clock(void)
132{ 132{
133 /* 133 /*
134 * Now that the first cpu already had this clocksource initialized, 134 * Now that the first cpu already had this clocksource initialized,
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 2ef80e301925..424093b157d3 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -55,7 +55,7 @@ u64 native_sched_clock(void)
55 rdtscll(this_offset); 55 rdtscll(this_offset);
56 56
57 /* return the value in ns */ 57 /* return the value in ns */
58 return cycles_2_ns(this_offset); 58 return __cycles_2_ns(this_offset);
59} 59}
60 60
61/* We need to define a real function for sched_clock, to override the 61/* We need to define a real function for sched_clock, to override the
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index ce3251ce5504..b81125f0bdee 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -20,6 +20,8 @@ if VIRTUALIZATION
20config KVM 20config KVM
21 tristate "Kernel-based Virtual Machine (KVM) support" 21 tristate "Kernel-based Virtual Machine (KVM) support"
22 depends on HAVE_KVM 22 depends on HAVE_KVM
23 # for device assignment:
24 depends on PCI
23 select PREEMPT_NOTIFIERS 25 select PREEMPT_NOTIFIERS
24 select MMU_NOTIFIER 26 select MMU_NOTIFIER
25 select ANON_INODES 27 select ANON_INODES
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index 8772dc946823..59ebd37ad79e 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -548,8 +548,10 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm)
548 mutex_lock(&kvm->lock); 548 mutex_lock(&kvm->lock);
549 pit->irq_source_id = kvm_request_irq_source_id(kvm); 549 pit->irq_source_id = kvm_request_irq_source_id(kvm);
550 mutex_unlock(&kvm->lock); 550 mutex_unlock(&kvm->lock);
551 if (pit->irq_source_id < 0) 551 if (pit->irq_source_id < 0) {
552 kfree(pit);
552 return NULL; 553 return NULL;
554 }
553 555
554 mutex_init(&pit->pit_state.lock); 556 mutex_init(&pit->pit_state.lock);
555 mutex_lock(&pit->pit_state.lock); 557 mutex_lock(&pit->pit_state.lock);
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 2a5e64881d9b..f1983d9477cd 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -314,7 +314,7 @@ static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
314 if (r) 314 if (r)
315 goto out; 315 goto out;
316 r = mmu_topup_memory_cache(&vcpu->arch.mmu_rmap_desc_cache, 316 r = mmu_topup_memory_cache(&vcpu->arch.mmu_rmap_desc_cache,
317 rmap_desc_cache, 1); 317 rmap_desc_cache, 4);
318 if (r) 318 if (r)
319 goto out; 319 goto out;
320 r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8); 320 r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8);
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 2643b430d83a..d06b4dc0e2ea 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -3564,7 +3564,8 @@ static int __init vmx_init(void)
3564 bypass_guest_pf = 0; 3564 bypass_guest_pf = 0;
3565 kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK | 3565 kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK |
3566 VMX_EPT_WRITABLE_MASK | 3566 VMX_EPT_WRITABLE_MASK |
3567 VMX_EPT_DEFAULT_MT << VMX_EPT_MT_EPTE_SHIFT); 3567 VMX_EPT_DEFAULT_MT << VMX_EPT_MT_EPTE_SHIFT |
3568 VMX_EPT_IGMT_BIT);
3568 kvm_mmu_set_mask_ptes(0ull, 0ull, 0ull, 0ull, 3569 kvm_mmu_set_mask_ptes(0ull, 0ull, 0ull, 0ull,
3569 VMX_EPT_EXECUTABLE_MASK); 3570 VMX_EPT_EXECUTABLE_MASK);
3570 kvm_enable_tdp(); 3571 kvm_enable_tdp();
diff --git a/arch/x86/kvm/vmx.h b/arch/x86/kvm/vmx.h
index 3e010d21fdd7..ec5edc339da6 100644
--- a/arch/x86/kvm/vmx.h
+++ b/arch/x86/kvm/vmx.h
@@ -352,6 +352,7 @@ enum vmcs_field {
352#define VMX_EPT_READABLE_MASK 0x1ull 352#define VMX_EPT_READABLE_MASK 0x1ull
353#define VMX_EPT_WRITABLE_MASK 0x2ull 353#define VMX_EPT_WRITABLE_MASK 0x2ull
354#define VMX_EPT_EXECUTABLE_MASK 0x4ull 354#define VMX_EPT_EXECUTABLE_MASK 0x4ull
355#define VMX_EPT_IGMT_BIT (1ull << 6)
355 356
356#define VMX_EPT_IDENTITY_PAGETABLE_ADDR 0xfffbc000ul 357#define VMX_EPT_IDENTITY_PAGETABLE_ADDR 0xfffbc000ul
357 358
diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
index 0620d6d45f7d..3f1b81a83e2e 100644
--- a/arch/x86/oprofile/op_model_ppro.c
+++ b/arch/x86/oprofile/op_model_ppro.c
@@ -27,8 +27,7 @@ static int num_counters = 2;
27static int counter_width = 32; 27static int counter_width = 32;
28 28
29#define CTR_IS_RESERVED(msrs, c) (msrs->counters[(c)].addr ? 1 : 0) 29#define CTR_IS_RESERVED(msrs, c) (msrs->counters[(c)].addr ? 1 : 0)
30#define CTR_READ(l, h, msrs, c) do {rdmsr(msrs->counters[(c)].addr, (l), (h)); } while (0) 30#define CTR_OVERFLOWED(n) (!((n) & (1ULL<<(counter_width-1))))
31#define CTR_OVERFLOWED(n) (!((n) & (1U<<(counter_width-1))))
32 31
33#define CTRL_IS_RESERVED(msrs, c) (msrs->controls[(c)].addr ? 1 : 0) 32#define CTRL_IS_RESERVED(msrs, c) (msrs->controls[(c)].addr ? 1 : 0)
34#define CTRL_READ(l, h, msrs, c) do {rdmsr((msrs->controls[(c)].addr), (l), (h)); } while (0) 33#define CTRL_READ(l, h, msrs, c) do {rdmsr((msrs->controls[(c)].addr), (l), (h)); } while (0)
@@ -124,14 +123,14 @@ static void ppro_setup_ctrs(struct op_msrs const * const msrs)
124static int ppro_check_ctrs(struct pt_regs * const regs, 123static int ppro_check_ctrs(struct pt_regs * const regs,
125 struct op_msrs const * const msrs) 124 struct op_msrs const * const msrs)
126{ 125{
127 unsigned int low, high; 126 u64 val;
128 int i; 127 int i;
129 128
130 for (i = 0 ; i < num_counters; ++i) { 129 for (i = 0 ; i < num_counters; ++i) {
131 if (!reset_value[i]) 130 if (!reset_value[i])
132 continue; 131 continue;
133 CTR_READ(low, high, msrs, i); 132 rdmsrl(msrs->counters[i].addr, val);
134 if (CTR_OVERFLOWED(low)) { 133 if (CTR_OVERFLOWED(val)) {
135 oprofile_add_sample(regs, i); 134 oprofile_add_sample(regs, i);
136 wrmsrl(msrs->counters[i].addr, -reset_value[i]); 135 wrmsrl(msrs->counters[i].addr, -reset_value[i]);
137 } 136 }
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index b61534c7a4c4..5e4686d70f62 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -863,15 +863,16 @@ static void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, unsigned l
863 if (PagePinned(virt_to_page(mm->pgd))) { 863 if (PagePinned(virt_to_page(mm->pgd))) {
864 SetPagePinned(page); 864 SetPagePinned(page);
865 865
866 vm_unmap_aliases();
866 if (!PageHighMem(page)) { 867 if (!PageHighMem(page)) {
867 make_lowmem_page_readonly(__va(PFN_PHYS((unsigned long)pfn))); 868 make_lowmem_page_readonly(__va(PFN_PHYS((unsigned long)pfn)));
868 if (level == PT_PTE && USE_SPLIT_PTLOCKS) 869 if (level == PT_PTE && USE_SPLIT_PTLOCKS)
869 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn); 870 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
870 } else 871 } else {
871 /* make sure there are no stray mappings of 872 /* make sure there are no stray mappings of
872 this page */ 873 this page */
873 kmap_flush_unused(); 874 kmap_flush_unused();
874 vm_unmap_aliases(); 875 }
875 } 876 }
876} 877}
877 878
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index aba77b2b7d18..688936044dc9 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -850,13 +850,16 @@ static int xen_pin_page(struct mm_struct *mm, struct page *page,
850 read-only, and can be pinned. */ 850 read-only, and can be pinned. */
851static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd) 851static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
852{ 852{
853 vm_unmap_aliases();
854
853 xen_mc_batch(); 855 xen_mc_batch();
854 856
855 if (xen_pgd_walk(mm, xen_pin_page, USER_LIMIT)) { 857 if (xen_pgd_walk(mm, xen_pin_page, USER_LIMIT)) {
856 /* re-enable interrupts for kmap_flush_unused */ 858 /* re-enable interrupts for flushing */
857 xen_mc_issue(0); 859 xen_mc_issue(0);
860
858 kmap_flush_unused(); 861 kmap_flush_unused();
859 vm_unmap_aliases(); 862
860 xen_mc_batch(); 863 xen_mc_batch();
861 } 864 }
862 865
@@ -874,7 +877,7 @@ static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
874#else /* CONFIG_X86_32 */ 877#else /* CONFIG_X86_32 */
875#ifdef CONFIG_X86_PAE 878#ifdef CONFIG_X86_PAE
876 /* Need to make sure unshared kernel PMD is pinnable */ 879 /* Need to make sure unshared kernel PMD is pinnable */
877 xen_pin_page(mm, virt_to_page(pgd_page(pgd[pgd_index(TASK_SIZE)])), 880 xen_pin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
878 PT_PMD); 881 PT_PMD);
879#endif 882#endif
880 xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd))); 883 xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd)));
@@ -991,7 +994,7 @@ static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd)
991 994
992#ifdef CONFIG_X86_PAE 995#ifdef CONFIG_X86_PAE
993 /* Need to make sure unshared kernel PMD is unpinned */ 996 /* Need to make sure unshared kernel PMD is unpinned */
994 xen_unpin_page(mm, virt_to_page(pgd_page(pgd[pgd_index(TASK_SIZE)])), 997 xen_unpin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
995 PT_PMD); 998 PT_PMD);
996#endif 999#endif
997 1000