diff options
Diffstat (limited to 'include/asm-x86')
-rw-r--r-- | include/asm-x86/device.h | 3 | ||||
-rw-r--r-- | include/asm-x86/dma-mapping.h | 99 | ||||
-rw-r--r-- | include/asm-x86/gpio.h | 6 | ||||
-rw-r--r-- | include/asm-x86/iommu.h | 1 | ||||
-rw-r--r-- | include/asm-x86/kexec.h | 18 | ||||
-rw-r--r-- | include/asm-x86/mach-summit/mach_apic.h | 2 | ||||
-rw-r--r-- | include/asm-x86/pgtable.h | 9 | ||||
-rw-r--r-- | include/asm-x86/swiotlb.h | 2 | ||||
-rw-r--r-- | include/asm-x86/uaccess.h | 1 |
9 files changed, 91 insertions, 50 deletions
diff --git a/include/asm-x86/device.h b/include/asm-x86/device.h index 87a715367a1b..3c034f48fdb0 100644 --- a/include/asm-x86/device.h +++ b/include/asm-x86/device.h | |||
@@ -5,6 +5,9 @@ struct dev_archdata { | |||
5 | #ifdef CONFIG_ACPI | 5 | #ifdef CONFIG_ACPI |
6 | void *acpi_handle; | 6 | void *acpi_handle; |
7 | #endif | 7 | #endif |
8 | #ifdef CONFIG_X86_64 | ||
9 | struct dma_mapping_ops *dma_ops; | ||
10 | #endif | ||
8 | #ifdef CONFIG_DMAR | 11 | #ifdef CONFIG_DMAR |
9 | void *iommu; /* hook for IOMMU specific extension */ | 12 | void *iommu; /* hook for IOMMU specific extension */ |
10 | #endif | 13 | #endif |
diff --git a/include/asm-x86/dma-mapping.h b/include/asm-x86/dma-mapping.h index c2ddd3d1b883..0eaa9bf6011f 100644 --- a/include/asm-x86/dma-mapping.h +++ b/include/asm-x86/dma-mapping.h | |||
@@ -17,7 +17,8 @@ extern int panic_on_overflow; | |||
17 | extern int force_iommu; | 17 | extern int force_iommu; |
18 | 18 | ||
19 | struct dma_mapping_ops { | 19 | struct dma_mapping_ops { |
20 | int (*mapping_error)(dma_addr_t dma_addr); | 20 | int (*mapping_error)(struct device *dev, |
21 | dma_addr_t dma_addr); | ||
21 | void* (*alloc_coherent)(struct device *dev, size_t size, | 22 | void* (*alloc_coherent)(struct device *dev, size_t size, |
22 | dma_addr_t *dma_handle, gfp_t gfp); | 23 | dma_addr_t *dma_handle, gfp_t gfp); |
23 | void (*free_coherent)(struct device *dev, size_t size, | 24 | void (*free_coherent)(struct device *dev, size_t size, |
@@ -56,14 +57,32 @@ struct dma_mapping_ops { | |||
56 | int is_phys; | 57 | int is_phys; |
57 | }; | 58 | }; |
58 | 59 | ||
59 | extern const struct dma_mapping_ops *dma_ops; | 60 | extern struct dma_mapping_ops *dma_ops; |
60 | 61 | ||
61 | static inline int dma_mapping_error(dma_addr_t dma_addr) | 62 | static inline struct dma_mapping_ops *get_dma_ops(struct device *dev) |
62 | { | 63 | { |
63 | if (dma_ops->mapping_error) | 64 | #ifdef CONFIG_X86_32 |
64 | return dma_ops->mapping_error(dma_addr); | 65 | return dma_ops; |
66 | #else | ||
67 | if (unlikely(!dev) || !dev->archdata.dma_ops) | ||
68 | return dma_ops; | ||
69 | else | ||
70 | return dev->archdata.dma_ops; | ||
71 | #endif | ||
72 | } | ||
73 | |||
74 | /* Make sure we keep the same behaviour */ | ||
75 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | ||
76 | { | ||
77 | #ifdef CONFIG_X86_32 | ||
78 | return 0; | ||
79 | #else | ||
80 | struct dma_mapping_ops *ops = get_dma_ops(dev); | ||
81 | if (ops->mapping_error) | ||
82 | return ops->mapping_error(dev, dma_addr); | ||
65 | 83 | ||
66 | return (dma_addr == bad_dma_address); | 84 | return (dma_addr == bad_dma_address); |
85 | #endif | ||
67 | } | 86 | } |
68 | 87 | ||
69 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) | 88 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) |
@@ -83,44 +102,53 @@ static inline dma_addr_t | |||
83 | dma_map_single(struct device *hwdev, void *ptr, size_t size, | 102 | dma_map_single(struct device *hwdev, void *ptr, size_t size, |
84 | int direction) | 103 | int direction) |
85 | { | 104 | { |
105 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); | ||
106 | |||
86 | BUG_ON(!valid_dma_direction(direction)); | 107 | BUG_ON(!valid_dma_direction(direction)); |
87 | return dma_ops->map_single(hwdev, virt_to_phys(ptr), size, direction); | 108 | return ops->map_single(hwdev, virt_to_phys(ptr), size, direction); |
88 | } | 109 | } |
89 | 110 | ||
90 | static inline void | 111 | static inline void |
91 | dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size, | 112 | dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size, |
92 | int direction) | 113 | int direction) |
93 | { | 114 | { |
115 | struct dma_mapping_ops *ops = get_dma_ops(dev); | ||
116 | |||
94 | BUG_ON(!valid_dma_direction(direction)); | 117 | BUG_ON(!valid_dma_direction(direction)); |
95 | if (dma_ops->unmap_single) | 118 | if (ops->unmap_single) |
96 | dma_ops->unmap_single(dev, addr, size, direction); | 119 | ops->unmap_single(dev, addr, size, direction); |
97 | } | 120 | } |
98 | 121 | ||
99 | static inline int | 122 | static inline int |
100 | dma_map_sg(struct device *hwdev, struct scatterlist *sg, | 123 | dma_map_sg(struct device *hwdev, struct scatterlist *sg, |
101 | int nents, int direction) | 124 | int nents, int direction) |
102 | { | 125 | { |
126 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); | ||
127 | |||
103 | BUG_ON(!valid_dma_direction(direction)); | 128 | BUG_ON(!valid_dma_direction(direction)); |
104 | return dma_ops->map_sg(hwdev, sg, nents, direction); | 129 | return ops->map_sg(hwdev, sg, nents, direction); |
105 | } | 130 | } |
106 | 131 | ||
107 | static inline void | 132 | static inline void |
108 | dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, | 133 | dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, |
109 | int direction) | 134 | int direction) |
110 | { | 135 | { |
136 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); | ||
137 | |||
111 | BUG_ON(!valid_dma_direction(direction)); | 138 | BUG_ON(!valid_dma_direction(direction)); |
112 | if (dma_ops->unmap_sg) | 139 | if (ops->unmap_sg) |
113 | dma_ops->unmap_sg(hwdev, sg, nents, direction); | 140 | ops->unmap_sg(hwdev, sg, nents, direction); |
114 | } | 141 | } |
115 | 142 | ||
116 | static inline void | 143 | static inline void |
117 | dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle, | 144 | dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle, |
118 | size_t size, int direction) | 145 | size_t size, int direction) |
119 | { | 146 | { |
147 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); | ||
148 | |||
120 | BUG_ON(!valid_dma_direction(direction)); | 149 | BUG_ON(!valid_dma_direction(direction)); |
121 | if (dma_ops->sync_single_for_cpu) | 150 | if (ops->sync_single_for_cpu) |
122 | dma_ops->sync_single_for_cpu(hwdev, dma_handle, size, | 151 | ops->sync_single_for_cpu(hwdev, dma_handle, size, direction); |
123 | direction); | ||
124 | flush_write_buffers(); | 152 | flush_write_buffers(); |
125 | } | 153 | } |
126 | 154 | ||
@@ -128,10 +156,11 @@ static inline void | |||
128 | dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle, | 156 | dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle, |
129 | size_t size, int direction) | 157 | size_t size, int direction) |
130 | { | 158 | { |
159 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); | ||
160 | |||
131 | BUG_ON(!valid_dma_direction(direction)); | 161 | BUG_ON(!valid_dma_direction(direction)); |
132 | if (dma_ops->sync_single_for_device) | 162 | if (ops->sync_single_for_device) |
133 | dma_ops->sync_single_for_device(hwdev, dma_handle, size, | 163 | ops->sync_single_for_device(hwdev, dma_handle, size, direction); |
134 | direction); | ||
135 | flush_write_buffers(); | 164 | flush_write_buffers(); |
136 | } | 165 | } |
137 | 166 | ||
@@ -139,11 +168,12 @@ static inline void | |||
139 | dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle, | 168 | dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle, |
140 | unsigned long offset, size_t size, int direction) | 169 | unsigned long offset, size_t size, int direction) |
141 | { | 170 | { |
142 | BUG_ON(!valid_dma_direction(direction)); | 171 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); |
143 | if (dma_ops->sync_single_range_for_cpu) | ||
144 | dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, | ||
145 | size, direction); | ||
146 | 172 | ||
173 | BUG_ON(!valid_dma_direction(direction)); | ||
174 | if (ops->sync_single_range_for_cpu) | ||
175 | ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, | ||
176 | size, direction); | ||
147 | flush_write_buffers(); | 177 | flush_write_buffers(); |
148 | } | 178 | } |
149 | 179 | ||
@@ -152,11 +182,12 @@ dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle, | |||
152 | unsigned long offset, size_t size, | 182 | unsigned long offset, size_t size, |
153 | int direction) | 183 | int direction) |
154 | { | 184 | { |
155 | BUG_ON(!valid_dma_direction(direction)); | 185 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); |
156 | if (dma_ops->sync_single_range_for_device) | ||
157 | dma_ops->sync_single_range_for_device(hwdev, dma_handle, | ||
158 | offset, size, direction); | ||
159 | 186 | ||
187 | BUG_ON(!valid_dma_direction(direction)); | ||
188 | if (ops->sync_single_range_for_device) | ||
189 | ops->sync_single_range_for_device(hwdev, dma_handle, | ||
190 | offset, size, direction); | ||
160 | flush_write_buffers(); | 191 | flush_write_buffers(); |
161 | } | 192 | } |
162 | 193 | ||
@@ -164,9 +195,11 @@ static inline void | |||
164 | dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, | 195 | dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, |
165 | int nelems, int direction) | 196 | int nelems, int direction) |
166 | { | 197 | { |
198 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); | ||
199 | |||
167 | BUG_ON(!valid_dma_direction(direction)); | 200 | BUG_ON(!valid_dma_direction(direction)); |
168 | if (dma_ops->sync_sg_for_cpu) | 201 | if (ops->sync_sg_for_cpu) |
169 | dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction); | 202 | ops->sync_sg_for_cpu(hwdev, sg, nelems, direction); |
170 | flush_write_buffers(); | 203 | flush_write_buffers(); |
171 | } | 204 | } |
172 | 205 | ||
@@ -174,9 +207,11 @@ static inline void | |||
174 | dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, | 207 | dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, |
175 | int nelems, int direction) | 208 | int nelems, int direction) |
176 | { | 209 | { |
210 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); | ||
211 | |||
177 | BUG_ON(!valid_dma_direction(direction)); | 212 | BUG_ON(!valid_dma_direction(direction)); |
178 | if (dma_ops->sync_sg_for_device) | 213 | if (ops->sync_sg_for_device) |
179 | dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction); | 214 | ops->sync_sg_for_device(hwdev, sg, nelems, direction); |
180 | 215 | ||
181 | flush_write_buffers(); | 216 | flush_write_buffers(); |
182 | } | 217 | } |
@@ -185,9 +220,11 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, | |||
185 | size_t offset, size_t size, | 220 | size_t offset, size_t size, |
186 | int direction) | 221 | int direction) |
187 | { | 222 | { |
223 | struct dma_mapping_ops *ops = get_dma_ops(dev); | ||
224 | |||
188 | BUG_ON(!valid_dma_direction(direction)); | 225 | BUG_ON(!valid_dma_direction(direction)); |
189 | return dma_ops->map_single(dev, page_to_phys(page)+offset, | 226 | return ops->map_single(dev, page_to_phys(page) + offset, |
190 | size, direction); | 227 | size, direction); |
191 | } | 228 | } |
192 | 229 | ||
193 | static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, | 230 | static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, |
diff --git a/include/asm-x86/gpio.h b/include/asm-x86/gpio.h index 116e9147fe66..c4c91b37c104 100644 --- a/include/asm-x86/gpio.h +++ b/include/asm-x86/gpio.h | |||
@@ -16,10 +16,6 @@ | |||
16 | #ifndef _ASM_I386_GPIO_H | 16 | #ifndef _ASM_I386_GPIO_H |
17 | #define _ASM_I386_GPIO_H | 17 | #define _ASM_I386_GPIO_H |
18 | 18 | ||
19 | #ifdef CONFIG_X86_RDC321X | ||
20 | #include <gpio.h> | ||
21 | #else /* CONFIG_X86_RDC321X */ | ||
22 | |||
23 | #include <asm-generic/gpio.h> | 19 | #include <asm-generic/gpio.h> |
24 | 20 | ||
25 | #ifdef CONFIG_GPIOLIB | 21 | #ifdef CONFIG_GPIOLIB |
@@ -57,6 +53,4 @@ static inline int irq_to_gpio(unsigned int irq) | |||
57 | 53 | ||
58 | #endif /* CONFIG_GPIOLIB */ | 54 | #endif /* CONFIG_GPIOLIB */ |
59 | 55 | ||
60 | #endif /* CONFIG_X86_RDC321X */ | ||
61 | |||
62 | #endif /* _ASM_I386_GPIO_H */ | 56 | #endif /* _ASM_I386_GPIO_H */ |
diff --git a/include/asm-x86/iommu.h b/include/asm-x86/iommu.h index d63166fb3ab7..ecc8061904a9 100644 --- a/include/asm-x86/iommu.h +++ b/include/asm-x86/iommu.h | |||
@@ -3,6 +3,7 @@ | |||
3 | 3 | ||
4 | extern void pci_iommu_shutdown(void); | 4 | extern void pci_iommu_shutdown(void); |
5 | extern void no_iommu_init(void); | 5 | extern void no_iommu_init(void); |
6 | extern struct dma_mapping_ops nommu_dma_ops; | ||
6 | extern int force_iommu, no_iommu; | 7 | extern int force_iommu, no_iommu; |
7 | extern int iommu_detected; | 8 | extern int iommu_detected; |
8 | 9 | ||
diff --git a/include/asm-x86/kexec.h b/include/asm-x86/kexec.h index 8f855a15f64d..c0e52a14fd4d 100644 --- a/include/asm-x86/kexec.h +++ b/include/asm-x86/kexec.h | |||
@@ -10,14 +10,15 @@ | |||
10 | # define VA_PTE_0 5 | 10 | # define VA_PTE_0 5 |
11 | # define PA_PTE_1 6 | 11 | # define PA_PTE_1 6 |
12 | # define VA_PTE_1 7 | 12 | # define VA_PTE_1 7 |
13 | # define PA_SWAP_PAGE 8 | ||
13 | # ifdef CONFIG_X86_PAE | 14 | # ifdef CONFIG_X86_PAE |
14 | # define PA_PMD_0 8 | 15 | # define PA_PMD_0 9 |
15 | # define VA_PMD_0 9 | 16 | # define VA_PMD_0 10 |
16 | # define PA_PMD_1 10 | 17 | # define PA_PMD_1 11 |
17 | # define VA_PMD_1 11 | 18 | # define VA_PMD_1 12 |
18 | # define PAGES_NR 12 | 19 | # define PAGES_NR 13 |
19 | # else | 20 | # else |
20 | # define PAGES_NR 8 | 21 | # define PAGES_NR 9 |
21 | # endif | 22 | # endif |
22 | #else | 23 | #else |
23 | # define PA_CONTROL_PAGE 0 | 24 | # define PA_CONTROL_PAGE 0 |
@@ -152,11 +153,12 @@ static inline void crash_setup_regs(struct pt_regs *newregs, | |||
152 | } | 153 | } |
153 | 154 | ||
154 | #ifdef CONFIG_X86_32 | 155 | #ifdef CONFIG_X86_32 |
155 | asmlinkage NORET_TYPE void | 156 | asmlinkage unsigned long |
156 | relocate_kernel(unsigned long indirection_page, | 157 | relocate_kernel(unsigned long indirection_page, |
157 | unsigned long control_page, | 158 | unsigned long control_page, |
158 | unsigned long start_address, | 159 | unsigned long start_address, |
159 | unsigned int has_pae) ATTRIB_NORET; | 160 | unsigned int has_pae, |
161 | unsigned int preserve_context); | ||
160 | #else | 162 | #else |
161 | NORET_TYPE void | 163 | NORET_TYPE void |
162 | relocate_kernel(unsigned long indirection_page, | 164 | relocate_kernel(unsigned long indirection_page, |
diff --git a/include/asm-x86/mach-summit/mach_apic.h b/include/asm-x86/mach-summit/mach_apic.h index 75d2c95005d7..c47e2ab5c5ca 100644 --- a/include/asm-x86/mach-summit/mach_apic.h +++ b/include/asm-x86/mach-summit/mach_apic.h | |||
@@ -122,7 +122,7 @@ static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_id_map) | |||
122 | 122 | ||
123 | static inline physid_mask_t apicid_to_cpu_present(int apicid) | 123 | static inline physid_mask_t apicid_to_cpu_present(int apicid) |
124 | { | 124 | { |
125 | return physid_mask_of_physid(0); | 125 | return physid_mask_of_physid(apicid); |
126 | } | 126 | } |
127 | 127 | ||
128 | static inline void setup_portio_remap(void) | 128 | static inline void setup_portio_remap(void) |
diff --git a/include/asm-x86/pgtable.h b/include/asm-x86/pgtable.h index 3e5dbc4195f4..04caa2f544df 100644 --- a/include/asm-x86/pgtable.h +++ b/include/asm-x86/pgtable.h | |||
@@ -18,6 +18,7 @@ | |||
18 | #define _PAGE_BIT_UNUSED2 10 | 18 | #define _PAGE_BIT_UNUSED2 10 |
19 | #define _PAGE_BIT_UNUSED3 11 | 19 | #define _PAGE_BIT_UNUSED3 11 |
20 | #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */ | 20 | #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */ |
21 | #define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1 | ||
21 | #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */ | 22 | #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */ |
22 | 23 | ||
23 | #define _PAGE_PRESENT (_AT(pteval_t, 1) << _PAGE_BIT_PRESENT) | 24 | #define _PAGE_PRESENT (_AT(pteval_t, 1) << _PAGE_BIT_PRESENT) |
@@ -34,6 +35,8 @@ | |||
34 | #define _PAGE_UNUSED3 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED3) | 35 | #define _PAGE_UNUSED3 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED3) |
35 | #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT) | 36 | #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT) |
36 | #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE) | 37 | #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE) |
38 | #define _PAGE_SPECIAL (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL) | ||
39 | #define __HAVE_ARCH_PTE_SPECIAL | ||
37 | 40 | ||
38 | #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) | 41 | #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) |
39 | #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX) | 42 | #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX) |
@@ -54,7 +57,7 @@ | |||
54 | 57 | ||
55 | /* Set of bits not changed in pte_modify */ | 58 | /* Set of bits not changed in pte_modify */ |
56 | #define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \ | 59 | #define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \ |
57 | _PAGE_ACCESSED | _PAGE_DIRTY) | 60 | _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY) |
58 | 61 | ||
59 | #define _PAGE_CACHE_MASK (_PAGE_PCD | _PAGE_PWT) | 62 | #define _PAGE_CACHE_MASK (_PAGE_PCD | _PAGE_PWT) |
60 | #define _PAGE_CACHE_WB (0) | 63 | #define _PAGE_CACHE_WB (0) |
@@ -180,7 +183,7 @@ static inline int pte_exec(pte_t pte) | |||
180 | 183 | ||
181 | static inline int pte_special(pte_t pte) | 184 | static inline int pte_special(pte_t pte) |
182 | { | 185 | { |
183 | return 0; | 186 | return pte_val(pte) & _PAGE_SPECIAL; |
184 | } | 187 | } |
185 | 188 | ||
186 | static inline int pmd_large(pmd_t pte) | 189 | static inline int pmd_large(pmd_t pte) |
@@ -246,7 +249,7 @@ static inline pte_t pte_clrglobal(pte_t pte) | |||
246 | 249 | ||
247 | static inline pte_t pte_mkspecial(pte_t pte) | 250 | static inline pte_t pte_mkspecial(pte_t pte) |
248 | { | 251 | { |
249 | return pte; | 252 | return __pte(pte_val(pte) | _PAGE_SPECIAL); |
250 | } | 253 | } |
251 | 254 | ||
252 | extern pteval_t __supported_pte_mask; | 255 | extern pteval_t __supported_pte_mask; |
diff --git a/include/asm-x86/swiotlb.h b/include/asm-x86/swiotlb.h index c706a7442633..2730b351afcf 100644 --- a/include/asm-x86/swiotlb.h +++ b/include/asm-x86/swiotlb.h | |||
@@ -35,7 +35,7 @@ extern int swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, | |||
35 | int nents, int direction); | 35 | int nents, int direction); |
36 | extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, | 36 | extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, |
37 | int nents, int direction); | 37 | int nents, int direction); |
38 | extern int swiotlb_dma_mapping_error(dma_addr_t dma_addr); | 38 | extern int swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr); |
39 | extern void swiotlb_free_coherent(struct device *hwdev, size_t size, | 39 | extern void swiotlb_free_coherent(struct device *hwdev, size_t size, |
40 | void *vaddr, dma_addr_t dma_handle); | 40 | void *vaddr, dma_addr_t dma_handle); |
41 | extern int swiotlb_dma_supported(struct device *hwdev, u64 mask); | 41 | extern int swiotlb_dma_supported(struct device *hwdev, u64 mask); |
diff --git a/include/asm-x86/uaccess.h b/include/asm-x86/uaccess.h index f6fa4d841bbc..5f702d1d5218 100644 --- a/include/asm-x86/uaccess.h +++ b/include/asm-x86/uaccess.h | |||
@@ -451,3 +451,4 @@ extern struct movsl_mask { | |||
451 | #endif | 451 | #endif |
452 | 452 | ||
453 | #endif | 453 | #endif |
454 | |||