aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-x86
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-x86')
-rw-r--r--include/asm-x86/device.h3
-rw-r--r--include/asm-x86/dma-mapping.h121
-rw-r--r--include/asm-x86/efi.h2
-rw-r--r--include/asm-x86/gpio.h6
-rw-r--r--include/asm-x86/hw_irq.h12
-rw-r--r--include/asm-x86/iommu.h3
-rw-r--r--include/asm-x86/irq_vectors.h10
-rw-r--r--include/asm-x86/kexec.h18
-rw-r--r--include/asm-x86/kvm_host.h7
-rw-r--r--include/asm-x86/mach-summit/mach_apic.h2
-rw-r--r--include/asm-x86/mmzone_32.h6
-rw-r--r--include/asm-x86/namei.h11
-rw-r--r--include/asm-x86/pgtable.h9
-rw-r--r--include/asm-x86/swiotlb.h2
-rw-r--r--include/asm-x86/uaccess.h1
15 files changed, 127 insertions, 86 deletions
diff --git a/include/asm-x86/device.h b/include/asm-x86/device.h
index 87a715367a1b..3c034f48fdb0 100644
--- a/include/asm-x86/device.h
+++ b/include/asm-x86/device.h
@@ -5,6 +5,9 @@ struct dev_archdata {
5#ifdef CONFIG_ACPI 5#ifdef CONFIG_ACPI
6 void *acpi_handle; 6 void *acpi_handle;
7#endif 7#endif
8#ifdef CONFIG_X86_64
9struct dma_mapping_ops *dma_ops;
10#endif
8#ifdef CONFIG_DMAR 11#ifdef CONFIG_DMAR
9 void *iommu; /* hook for IOMMU specific extension */ 12 void *iommu; /* hook for IOMMU specific extension */
10#endif 13#endif
diff --git a/include/asm-x86/dma-mapping.h b/include/asm-x86/dma-mapping.h
index c2ddd3d1b883..ad9cd6d49bfc 100644
--- a/include/asm-x86/dma-mapping.h
+++ b/include/asm-x86/dma-mapping.h
@@ -17,7 +17,8 @@ extern int panic_on_overflow;
17extern int force_iommu; 17extern int force_iommu;
18 18
19struct dma_mapping_ops { 19struct dma_mapping_ops {
20 int (*mapping_error)(dma_addr_t dma_addr); 20 int (*mapping_error)(struct device *dev,
21 dma_addr_t dma_addr);
21 void* (*alloc_coherent)(struct device *dev, size_t size, 22 void* (*alloc_coherent)(struct device *dev, size_t size,
22 dma_addr_t *dma_handle, gfp_t gfp); 23 dma_addr_t *dma_handle, gfp_t gfp);
23 void (*free_coherent)(struct device *dev, size_t size, 24 void (*free_coherent)(struct device *dev, size_t size,
@@ -56,14 +57,32 @@ struct dma_mapping_ops {
56 int is_phys; 57 int is_phys;
57}; 58};
58 59
59extern const struct dma_mapping_ops *dma_ops; 60extern struct dma_mapping_ops *dma_ops;
60 61
61static inline int dma_mapping_error(dma_addr_t dma_addr) 62static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
62{ 63{
63 if (dma_ops->mapping_error) 64#ifdef CONFIG_X86_32
64 return dma_ops->mapping_error(dma_addr); 65 return dma_ops;
66#else
67 if (unlikely(!dev) || !dev->archdata.dma_ops)
68 return dma_ops;
69 else
70 return dev->archdata.dma_ops;
71#endif
72}
73
74/* Make sure we keep the same behaviour */
75static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
76{
77#ifdef CONFIG_X86_32
78 return 0;
79#else
80 struct dma_mapping_ops *ops = get_dma_ops(dev);
81 if (ops->mapping_error)
82 return ops->mapping_error(dev, dma_addr);
65 83
66 return (dma_addr == bad_dma_address); 84 return (dma_addr == bad_dma_address);
85#endif
67} 86}
68 87
69#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) 88#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
@@ -83,44 +102,53 @@ static inline dma_addr_t
83dma_map_single(struct device *hwdev, void *ptr, size_t size, 102dma_map_single(struct device *hwdev, void *ptr, size_t size,
84 int direction) 103 int direction)
85{ 104{
105 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
106
86 BUG_ON(!valid_dma_direction(direction)); 107 BUG_ON(!valid_dma_direction(direction));
87 return dma_ops->map_single(hwdev, virt_to_phys(ptr), size, direction); 108 return ops->map_single(hwdev, virt_to_phys(ptr), size, direction);
88} 109}
89 110
90static inline void 111static inline void
91dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size, 112dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
92 int direction) 113 int direction)
93{ 114{
115 struct dma_mapping_ops *ops = get_dma_ops(dev);
116
94 BUG_ON(!valid_dma_direction(direction)); 117 BUG_ON(!valid_dma_direction(direction));
95 if (dma_ops->unmap_single) 118 if (ops->unmap_single)
96 dma_ops->unmap_single(dev, addr, size, direction); 119 ops->unmap_single(dev, addr, size, direction);
97} 120}
98 121
99static inline int 122static inline int
100dma_map_sg(struct device *hwdev, struct scatterlist *sg, 123dma_map_sg(struct device *hwdev, struct scatterlist *sg,
101 int nents, int direction) 124 int nents, int direction)
102{ 125{
126 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
127
103 BUG_ON(!valid_dma_direction(direction)); 128 BUG_ON(!valid_dma_direction(direction));
104 return dma_ops->map_sg(hwdev, sg, nents, direction); 129 return ops->map_sg(hwdev, sg, nents, direction);
105} 130}
106 131
107static inline void 132static inline void
108dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, 133dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
109 int direction) 134 int direction)
110{ 135{
136 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
137
111 BUG_ON(!valid_dma_direction(direction)); 138 BUG_ON(!valid_dma_direction(direction));
112 if (dma_ops->unmap_sg) 139 if (ops->unmap_sg)
113 dma_ops->unmap_sg(hwdev, sg, nents, direction); 140 ops->unmap_sg(hwdev, sg, nents, direction);
114} 141}
115 142
116static inline void 143static inline void
117dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle, 144dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
118 size_t size, int direction) 145 size_t size, int direction)
119{ 146{
147 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
148
120 BUG_ON(!valid_dma_direction(direction)); 149 BUG_ON(!valid_dma_direction(direction));
121 if (dma_ops->sync_single_for_cpu) 150 if (ops->sync_single_for_cpu)
122 dma_ops->sync_single_for_cpu(hwdev, dma_handle, size, 151 ops->sync_single_for_cpu(hwdev, dma_handle, size, direction);
123 direction);
124 flush_write_buffers(); 152 flush_write_buffers();
125} 153}
126 154
@@ -128,10 +156,11 @@ static inline void
128dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle, 156dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
129 size_t size, int direction) 157 size_t size, int direction)
130{ 158{
159 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
160
131 BUG_ON(!valid_dma_direction(direction)); 161 BUG_ON(!valid_dma_direction(direction));
132 if (dma_ops->sync_single_for_device) 162 if (ops->sync_single_for_device)
133 dma_ops->sync_single_for_device(hwdev, dma_handle, size, 163 ops->sync_single_for_device(hwdev, dma_handle, size, direction);
134 direction);
135 flush_write_buffers(); 164 flush_write_buffers();
136} 165}
137 166
@@ -139,11 +168,12 @@ static inline void
139dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle, 168dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
140 unsigned long offset, size_t size, int direction) 169 unsigned long offset, size_t size, int direction)
141{ 170{
142 BUG_ON(!valid_dma_direction(direction)); 171 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
143 if (dma_ops->sync_single_range_for_cpu)
144 dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset,
145 size, direction);
146 172
173 BUG_ON(!valid_dma_direction(direction));
174 if (ops->sync_single_range_for_cpu)
175 ops->sync_single_range_for_cpu(hwdev, dma_handle, offset,
176 size, direction);
147 flush_write_buffers(); 177 flush_write_buffers();
148} 178}
149 179
@@ -152,11 +182,12 @@ dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
152 unsigned long offset, size_t size, 182 unsigned long offset, size_t size,
153 int direction) 183 int direction)
154{ 184{
155 BUG_ON(!valid_dma_direction(direction)); 185 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
156 if (dma_ops->sync_single_range_for_device)
157 dma_ops->sync_single_range_for_device(hwdev, dma_handle,
158 offset, size, direction);
159 186
187 BUG_ON(!valid_dma_direction(direction));
188 if (ops->sync_single_range_for_device)
189 ops->sync_single_range_for_device(hwdev, dma_handle,
190 offset, size, direction);
160 flush_write_buffers(); 191 flush_write_buffers();
161} 192}
162 193
@@ -164,9 +195,11 @@ static inline void
164dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, 195dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
165 int nelems, int direction) 196 int nelems, int direction)
166{ 197{
198 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
199
167 BUG_ON(!valid_dma_direction(direction)); 200 BUG_ON(!valid_dma_direction(direction));
168 if (dma_ops->sync_sg_for_cpu) 201 if (ops->sync_sg_for_cpu)
169 dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction); 202 ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
170 flush_write_buffers(); 203 flush_write_buffers();
171} 204}
172 205
@@ -174,9 +207,11 @@ static inline void
174dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, 207dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
175 int nelems, int direction) 208 int nelems, int direction)
176{ 209{
210 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
211
177 BUG_ON(!valid_dma_direction(direction)); 212 BUG_ON(!valid_dma_direction(direction));
178 if (dma_ops->sync_sg_for_device) 213 if (ops->sync_sg_for_device)
179 dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction); 214 ops->sync_sg_for_device(hwdev, sg, nelems, direction);
180 215
181 flush_write_buffers(); 216 flush_write_buffers();
182} 217}
@@ -185,9 +220,11 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
185 size_t offset, size_t size, 220 size_t offset, size_t size,
186 int direction) 221 int direction)
187{ 222{
223 struct dma_mapping_ops *ops = get_dma_ops(dev);
224
188 BUG_ON(!valid_dma_direction(direction)); 225 BUG_ON(!valid_dma_direction(direction));
189 return dma_ops->map_single(dev, page_to_phys(page)+offset, 226 return ops->map_single(dev, page_to_phys(page) + offset,
190 size, direction); 227 size, direction);
191} 228}
192 229
193static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, 230static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
@@ -212,25 +249,5 @@ static inline int dma_get_cache_alignment(void)
212 249
213#define dma_is_consistent(d, h) (1) 250#define dma_is_consistent(d, h) (1)
214 251
215#ifdef CONFIG_X86_32 252#include <asm-generic/dma-coherent.h>
216# define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
217struct dma_coherent_mem {
218 void *virt_base;
219 u32 device_base;
220 int size;
221 int flags;
222 unsigned long *bitmap;
223};
224
225extern int
226dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
227 dma_addr_t device_addr, size_t size, int flags);
228
229extern void
230dma_release_declared_memory(struct device *dev);
231
232extern void *
233dma_mark_declared_memory_occupied(struct device *dev,
234 dma_addr_t device_addr, size_t size);
235#endif /* CONFIG_X86_32 */
236#endif 253#endif
diff --git a/include/asm-x86/efi.h b/include/asm-x86/efi.h
index 7ed2bd7a7f51..d4f2b0abe929 100644
--- a/include/asm-x86/efi.h
+++ b/include/asm-x86/efi.h
@@ -86,7 +86,7 @@ extern u64 efi_call6(void *fp, u64 arg1, u64 arg2, u64 arg3,
86 efi_call6((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \ 86 efi_call6((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
87 (u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6)) 87 (u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6))
88 88
89extern void *efi_ioremap(unsigned long addr, unsigned long size); 89extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size);
90 90
91#endif /* CONFIG_X86_32 */ 91#endif /* CONFIG_X86_32 */
92 92
diff --git a/include/asm-x86/gpio.h b/include/asm-x86/gpio.h
index 116e9147fe66..c4c91b37c104 100644
--- a/include/asm-x86/gpio.h
+++ b/include/asm-x86/gpio.h
@@ -16,10 +16,6 @@
16#ifndef _ASM_I386_GPIO_H 16#ifndef _ASM_I386_GPIO_H
17#define _ASM_I386_GPIO_H 17#define _ASM_I386_GPIO_H
18 18
19#ifdef CONFIG_X86_RDC321X
20#include <gpio.h>
21#else /* CONFIG_X86_RDC321X */
22
23#include <asm-generic/gpio.h> 19#include <asm-generic/gpio.h>
24 20
25#ifdef CONFIG_GPIOLIB 21#ifdef CONFIG_GPIOLIB
@@ -57,6 +53,4 @@ static inline int irq_to_gpio(unsigned int irq)
57 53
58#endif /* CONFIG_GPIOLIB */ 54#endif /* CONFIG_GPIOLIB */
59 55
60#endif /* CONFIG_X86_RDC321X */
61
62#endif /* _ASM_I386_GPIO_H */ 56#endif /* _ASM_I386_GPIO_H */
diff --git a/include/asm-x86/hw_irq.h b/include/asm-x86/hw_irq.h
index 77ba51df5668..edd0b95f14d0 100644
--- a/include/asm-x86/hw_irq.h
+++ b/include/asm-x86/hw_irq.h
@@ -98,9 +98,17 @@ extern void (*const interrupt[NR_IRQS])(void);
98#else 98#else
99typedef int vector_irq_t[NR_VECTORS]; 99typedef int vector_irq_t[NR_VECTORS];
100DECLARE_PER_CPU(vector_irq_t, vector_irq); 100DECLARE_PER_CPU(vector_irq_t, vector_irq);
101extern spinlock_t vector_lock;
102#endif 101#endif
103extern void setup_vector_irq(int cpu); 102
103#if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_X86_64)
104extern void lock_vector_lock(void);
105extern void unlock_vector_lock(void);
106extern void __setup_vector_irq(int cpu);
107#else
108static inline void lock_vector_lock(void) {}
109static inline void unlock_vector_lock(void) {}
110static inline void __setup_vector_irq(int cpu) {}
111#endif
104 112
105#endif /* !ASSEMBLY_ */ 113#endif /* !ASSEMBLY_ */
106 114
diff --git a/include/asm-x86/iommu.h b/include/asm-x86/iommu.h
index d63166fb3ab7..5f888cc5be49 100644
--- a/include/asm-x86/iommu.h
+++ b/include/asm-x86/iommu.h
@@ -3,9 +3,12 @@
3 3
4extern void pci_iommu_shutdown(void); 4extern void pci_iommu_shutdown(void);
5extern void no_iommu_init(void); 5extern void no_iommu_init(void);
6extern struct dma_mapping_ops nommu_dma_ops;
6extern int force_iommu, no_iommu; 7extern int force_iommu, no_iommu;
7extern int iommu_detected; 8extern int iommu_detected;
8 9
10extern unsigned long iommu_num_pages(unsigned long addr, unsigned long len);
11
9#ifdef CONFIG_GART_IOMMU 12#ifdef CONFIG_GART_IOMMU
10extern int gart_iommu_aperture; 13extern int gart_iommu_aperture;
11extern int gart_iommu_aperture_allowed; 14extern int gart_iommu_aperture_allowed;
diff --git a/include/asm-x86/irq_vectors.h b/include/asm-x86/irq_vectors.h
index 90b1d1f12f08..b95d167b7fb2 100644
--- a/include/asm-x86/irq_vectors.h
+++ b/include/asm-x86/irq_vectors.h
@@ -109,7 +109,15 @@
109#define LAST_VM86_IRQ 15 109#define LAST_VM86_IRQ 15
110#define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15) 110#define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15)
111 111
112#if !defined(CONFIG_X86_VOYAGER) 112#ifdef CONFIG_X86_64
113# if NR_CPUS < MAX_IO_APICS
114# define NR_IRQS (NR_VECTORS + (32 * NR_CPUS))
115# else
116# define NR_IRQS (NR_VECTORS + (32 * MAX_IO_APICS))
117# endif
118# define NR_IRQ_VECTORS NR_IRQS
119
120#elif !defined(CONFIG_X86_VOYAGER)
113 121
114# if defined(CONFIG_X86_IO_APIC) || defined(CONFIG_PARAVIRT) || defined(CONFIG_X86_VISWS) 122# if defined(CONFIG_X86_IO_APIC) || defined(CONFIG_PARAVIRT) || defined(CONFIG_X86_VISWS)
115 123
diff --git a/include/asm-x86/kexec.h b/include/asm-x86/kexec.h
index 8f855a15f64d..c0e52a14fd4d 100644
--- a/include/asm-x86/kexec.h
+++ b/include/asm-x86/kexec.h
@@ -10,14 +10,15 @@
10# define VA_PTE_0 5 10# define VA_PTE_0 5
11# define PA_PTE_1 6 11# define PA_PTE_1 6
12# define VA_PTE_1 7 12# define VA_PTE_1 7
13# define PA_SWAP_PAGE 8
13# ifdef CONFIG_X86_PAE 14# ifdef CONFIG_X86_PAE
14# define PA_PMD_0 8 15# define PA_PMD_0 9
15# define VA_PMD_0 9 16# define VA_PMD_0 10
16# define PA_PMD_1 10 17# define PA_PMD_1 11
17# define VA_PMD_1 11 18# define VA_PMD_1 12
18# define PAGES_NR 12 19# define PAGES_NR 13
19# else 20# else
20# define PAGES_NR 8 21# define PAGES_NR 9
21# endif 22# endif
22#else 23#else
23# define PA_CONTROL_PAGE 0 24# define PA_CONTROL_PAGE 0
@@ -152,11 +153,12 @@ static inline void crash_setup_regs(struct pt_regs *newregs,
152} 153}
153 154
154#ifdef CONFIG_X86_32 155#ifdef CONFIG_X86_32
155asmlinkage NORET_TYPE void 156asmlinkage unsigned long
156relocate_kernel(unsigned long indirection_page, 157relocate_kernel(unsigned long indirection_page,
157 unsigned long control_page, 158 unsigned long control_page,
158 unsigned long start_address, 159 unsigned long start_address,
159 unsigned int has_pae) ATTRIB_NORET; 160 unsigned int has_pae,
161 unsigned int preserve_context);
160#else 162#else
161NORET_TYPE void 163NORET_TYPE void
162relocate_kernel(unsigned long indirection_page, 164relocate_kernel(unsigned long indirection_page,
diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h
index fdde0bedaa90..0f3c53114614 100644
--- a/include/asm-x86/kvm_host.h
+++ b/include/asm-x86/kvm_host.h
@@ -13,6 +13,7 @@
13 13
14#include <linux/types.h> 14#include <linux/types.h>
15#include <linux/mm.h> 15#include <linux/mm.h>
16#include <linux/mmu_notifier.h>
16 17
17#include <linux/kvm.h> 18#include <linux/kvm.h>
18#include <linux/kvm_para.h> 19#include <linux/kvm_para.h>
@@ -251,6 +252,7 @@ struct kvm_vcpu_arch {
251 gfn_t gfn; /* presumed gfn during guest pte update */ 252 gfn_t gfn; /* presumed gfn during guest pte update */
252 pfn_t pfn; /* pfn corresponding to that gfn */ 253 pfn_t pfn; /* pfn corresponding to that gfn */
253 int largepage; 254 int largepage;
255 unsigned long mmu_seq;
254 } update_pte; 256 } update_pte;
255 257
256 struct i387_fxsave_struct host_fx_image; 258 struct i387_fxsave_struct host_fx_image;
@@ -556,6 +558,7 @@ int kvm_fix_hypercall(struct kvm_vcpu *vcpu);
556int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code); 558int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code);
557 559
558void kvm_enable_tdp(void); 560void kvm_enable_tdp(void);
561void kvm_disable_tdp(void);
559 562
560int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3); 563int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3);
561int complete_pio(struct kvm_vcpu *vcpu); 564int complete_pio(struct kvm_vcpu *vcpu);
@@ -728,4 +731,8 @@ asmlinkage void kvm_handle_fault_on_reboot(void);
728 KVM_EX_ENTRY " 666b, 667b \n\t" \ 731 KVM_EX_ENTRY " 666b, 667b \n\t" \
729 ".popsection" 732 ".popsection"
730 733
734#define KVM_ARCH_WANT_MMU_NOTIFIER
735int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
736int kvm_age_hva(struct kvm *kvm, unsigned long hva);
737
731#endif 738#endif
diff --git a/include/asm-x86/mach-summit/mach_apic.h b/include/asm-x86/mach-summit/mach_apic.h
index 75d2c95005d7..c47e2ab5c5ca 100644
--- a/include/asm-x86/mach-summit/mach_apic.h
+++ b/include/asm-x86/mach-summit/mach_apic.h
@@ -122,7 +122,7 @@ static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_id_map)
122 122
123static inline physid_mask_t apicid_to_cpu_present(int apicid) 123static inline physid_mask_t apicid_to_cpu_present(int apicid)
124{ 124{
125 return physid_mask_of_physid(0); 125 return physid_mask_of_physid(apicid);
126} 126}
127 127
128static inline void setup_portio_remap(void) 128static inline void setup_portio_remap(void)
diff --git a/include/asm-x86/mmzone_32.h b/include/asm-x86/mmzone_32.h
index b2298a227567..5862e6460658 100644
--- a/include/asm-x86/mmzone_32.h
+++ b/include/asm-x86/mmzone_32.h
@@ -97,10 +97,16 @@ static inline int pfn_valid(int pfn)
97 reserve_bootmem_node(NODE_DATA(0), (addr), (size), (flags)) 97 reserve_bootmem_node(NODE_DATA(0), (addr), (size), (flags))
98#define alloc_bootmem(x) \ 98#define alloc_bootmem(x) \
99 __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) 99 __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
100#define alloc_bootmem_nopanic(x) \
101 __alloc_bootmem_node_nopanic(NODE_DATA(0), (x), SMP_CACHE_BYTES, \
102 __pa(MAX_DMA_ADDRESS))
100#define alloc_bootmem_low(x) \ 103#define alloc_bootmem_low(x) \
101 __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, 0) 104 __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, 0)
102#define alloc_bootmem_pages(x) \ 105#define alloc_bootmem_pages(x) \
103 __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) 106 __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
107#define alloc_bootmem_pages_nopanic(x) \
108 __alloc_bootmem_node_nopanic(NODE_DATA(0), (x), PAGE_SIZE, \
109 __pa(MAX_DMA_ADDRESS))
104#define alloc_bootmem_low_pages(x) \ 110#define alloc_bootmem_low_pages(x) \
105 __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0) 111 __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0)
106#define alloc_bootmem_node(pgdat, x) \ 112#define alloc_bootmem_node(pgdat, x) \
diff --git a/include/asm-x86/namei.h b/include/asm-x86/namei.h
deleted file mode 100644
index 415ef5d9550e..000000000000
--- a/include/asm-x86/namei.h
+++ /dev/null
@@ -1,11 +0,0 @@
1#ifndef _ASM_X86_NAMEI_H
2#define _ASM_X86_NAMEI_H
3
4/* This dummy routine maybe changed to something useful
5 * for /usr/gnemul/ emulation stuff.
6 * Look at asm-sparc/namei.h for details.
7 */
8
9#define __emul_prefix() NULL
10
11#endif /* _ASM_X86_NAMEI_H */
diff --git a/include/asm-x86/pgtable.h b/include/asm-x86/pgtable.h
index 3e5dbc4195f4..04caa2f544df 100644
--- a/include/asm-x86/pgtable.h
+++ b/include/asm-x86/pgtable.h
@@ -18,6 +18,7 @@
18#define _PAGE_BIT_UNUSED2 10 18#define _PAGE_BIT_UNUSED2 10
19#define _PAGE_BIT_UNUSED3 11 19#define _PAGE_BIT_UNUSED3 11
20#define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */ 20#define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
21#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
21#define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */ 22#define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
22 23
23#define _PAGE_PRESENT (_AT(pteval_t, 1) << _PAGE_BIT_PRESENT) 24#define _PAGE_PRESENT (_AT(pteval_t, 1) << _PAGE_BIT_PRESENT)
@@ -34,6 +35,8 @@
34#define _PAGE_UNUSED3 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED3) 35#define _PAGE_UNUSED3 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED3)
35#define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT) 36#define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
36#define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE) 37#define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
38#define _PAGE_SPECIAL (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL)
39#define __HAVE_ARCH_PTE_SPECIAL
37 40
38#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) 41#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
39#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX) 42#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
@@ -54,7 +57,7 @@
54 57
55/* Set of bits not changed in pte_modify */ 58/* Set of bits not changed in pte_modify */
56#define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \ 59#define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
57 _PAGE_ACCESSED | _PAGE_DIRTY) 60 _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY)
58 61
59#define _PAGE_CACHE_MASK (_PAGE_PCD | _PAGE_PWT) 62#define _PAGE_CACHE_MASK (_PAGE_PCD | _PAGE_PWT)
60#define _PAGE_CACHE_WB (0) 63#define _PAGE_CACHE_WB (0)
@@ -180,7 +183,7 @@ static inline int pte_exec(pte_t pte)
180 183
181static inline int pte_special(pte_t pte) 184static inline int pte_special(pte_t pte)
182{ 185{
183 return 0; 186 return pte_val(pte) & _PAGE_SPECIAL;
184} 187}
185 188
186static inline int pmd_large(pmd_t pte) 189static inline int pmd_large(pmd_t pte)
@@ -246,7 +249,7 @@ static inline pte_t pte_clrglobal(pte_t pte)
246 249
247static inline pte_t pte_mkspecial(pte_t pte) 250static inline pte_t pte_mkspecial(pte_t pte)
248{ 251{
249 return pte; 252 return __pte(pte_val(pte) | _PAGE_SPECIAL);
250} 253}
251 254
252extern pteval_t __supported_pte_mask; 255extern pteval_t __supported_pte_mask;
diff --git a/include/asm-x86/swiotlb.h b/include/asm-x86/swiotlb.h
index c706a7442633..2730b351afcf 100644
--- a/include/asm-x86/swiotlb.h
+++ b/include/asm-x86/swiotlb.h
@@ -35,7 +35,7 @@ extern int swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg,
35 int nents, int direction); 35 int nents, int direction);
36extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, 36extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg,
37 int nents, int direction); 37 int nents, int direction);
38extern int swiotlb_dma_mapping_error(dma_addr_t dma_addr); 38extern int swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr);
39extern void swiotlb_free_coherent(struct device *hwdev, size_t size, 39extern void swiotlb_free_coherent(struct device *hwdev, size_t size,
40 void *vaddr, dma_addr_t dma_handle); 40 void *vaddr, dma_addr_t dma_handle);
41extern int swiotlb_dma_supported(struct device *hwdev, u64 mask); 41extern int swiotlb_dma_supported(struct device *hwdev, u64 mask);
diff --git a/include/asm-x86/uaccess.h b/include/asm-x86/uaccess.h
index f6fa4d841bbc..5f702d1d5218 100644
--- a/include/asm-x86/uaccess.h
+++ b/include/asm-x86/uaccess.h
@@ -451,3 +451,4 @@ extern struct movsl_mask {
451#endif 451#endif
452 452
453#endif 453#endif
454