aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-x86
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-x86')
-rw-r--r--include/asm-x86/boot.h8
-rw-r--r--include/asm-x86/dma-mapping.h238
-rw-r--r--include/asm-x86/dma-mapping_32.h187
-rw-r--r--include/asm-x86/dma-mapping_64.h202
-rw-r--r--include/asm-x86/e820_32.h2
-rw-r--r--include/asm-x86/genapic_32.h1
-rw-r--r--include/asm-x86/i387.h37
-rw-r--r--include/asm-x86/numa_64.h3
-rw-r--r--include/asm-x86/pci_64.h1
-rw-r--r--include/asm-x86/processor.h16
-rw-r--r--include/asm-x86/scatterlist.h2
-rw-r--r--include/asm-x86/thread_info.h9
-rw-r--r--include/asm-x86/thread_info_32.h2
-rw-r--r--include/asm-x86/thread_info_64.h6
-rw-r--r--include/asm-x86/tsc.h1
15 files changed, 292 insertions, 423 deletions
diff --git a/include/asm-x86/boot.h b/include/asm-x86/boot.h
index ed8affbf96cb..2faed7ecb092 100644
--- a/include/asm-x86/boot.h
+++ b/include/asm-x86/boot.h
@@ -17,4 +17,12 @@
17 + (CONFIG_PHYSICAL_ALIGN - 1)) \ 17 + (CONFIG_PHYSICAL_ALIGN - 1)) \
18 & ~(CONFIG_PHYSICAL_ALIGN - 1)) 18 & ~(CONFIG_PHYSICAL_ALIGN - 1))
19 19
20#ifdef CONFIG_X86_64
21#define BOOT_HEAP_SIZE 0x7000
22#define BOOT_STACK_SIZE 0x4000
23#else
24#define BOOT_HEAP_SIZE 0x4000
25#define BOOT_STACK_SIZE 0x1000
26#endif
27
20#endif /* _ASM_BOOT_H */ 28#endif /* _ASM_BOOT_H */
diff --git a/include/asm-x86/dma-mapping.h b/include/asm-x86/dma-mapping.h
index 58f790f4df52..a1a4dc7fe6ec 100644
--- a/include/asm-x86/dma-mapping.h
+++ b/include/asm-x86/dma-mapping.h
@@ -1,5 +1,237 @@
1#ifndef _ASM_DMA_MAPPING_H_
2#define _ASM_DMA_MAPPING_H_
3
4/*
5 * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
6 * documentation.
7 */
8
9#include <linux/scatterlist.h>
10#include <asm/io.h>
11#include <asm/swiotlb.h>
12
13extern dma_addr_t bad_dma_address;
14extern int iommu_merge;
15extern struct device fallback_dev;
16extern int panic_on_overflow;
17extern int forbid_dac;
18extern int force_iommu;
19
20struct dma_mapping_ops {
21 int (*mapping_error)(dma_addr_t dma_addr);
22 void* (*alloc_coherent)(struct device *dev, size_t size,
23 dma_addr_t *dma_handle, gfp_t gfp);
24 void (*free_coherent)(struct device *dev, size_t size,
25 void *vaddr, dma_addr_t dma_handle);
26 dma_addr_t (*map_single)(struct device *hwdev, phys_addr_t ptr,
27 size_t size, int direction);
28 /* like map_single, but doesn't check the device mask */
29 dma_addr_t (*map_simple)(struct device *hwdev, phys_addr_t ptr,
30 size_t size, int direction);
31 void (*unmap_single)(struct device *dev, dma_addr_t addr,
32 size_t size, int direction);
33 void (*sync_single_for_cpu)(struct device *hwdev,
34 dma_addr_t dma_handle, size_t size,
35 int direction);
36 void (*sync_single_for_device)(struct device *hwdev,
37 dma_addr_t dma_handle, size_t size,
38 int direction);
39 void (*sync_single_range_for_cpu)(struct device *hwdev,
40 dma_addr_t dma_handle, unsigned long offset,
41 size_t size, int direction);
42 void (*sync_single_range_for_device)(struct device *hwdev,
43 dma_addr_t dma_handle, unsigned long offset,
44 size_t size, int direction);
45 void (*sync_sg_for_cpu)(struct device *hwdev,
46 struct scatterlist *sg, int nelems,
47 int direction);
48 void (*sync_sg_for_device)(struct device *hwdev,
49 struct scatterlist *sg, int nelems,
50 int direction);
51 int (*map_sg)(struct device *hwdev, struct scatterlist *sg,
52 int nents, int direction);
53 void (*unmap_sg)(struct device *hwdev,
54 struct scatterlist *sg, int nents,
55 int direction);
56 int (*dma_supported)(struct device *hwdev, u64 mask);
57 int is_phys;
58};
59
60extern const struct dma_mapping_ops *dma_ops;
61
62static inline int dma_mapping_error(dma_addr_t dma_addr)
63{
64 if (dma_ops->mapping_error)
65 return dma_ops->mapping_error(dma_addr);
66
67 return (dma_addr == bad_dma_address);
68}
69
70#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
71#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
72
73void *dma_alloc_coherent(struct device *dev, size_t size,
74 dma_addr_t *dma_handle, gfp_t flag);
75
76void dma_free_coherent(struct device *dev, size_t size,
77 void *vaddr, dma_addr_t dma_handle);
78
79
80extern int dma_supported(struct device *hwdev, u64 mask);
81extern int dma_set_mask(struct device *dev, u64 mask);
82
83static inline dma_addr_t
84dma_map_single(struct device *hwdev, void *ptr, size_t size,
85 int direction)
86{
87 BUG_ON(!valid_dma_direction(direction));
88 return dma_ops->map_single(hwdev, virt_to_phys(ptr), size, direction);
89}
90
91static inline void
92dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
93 int direction)
94{
95 BUG_ON(!valid_dma_direction(direction));
96 if (dma_ops->unmap_single)
97 dma_ops->unmap_single(dev, addr, size, direction);
98}
99
100static inline int
101dma_map_sg(struct device *hwdev, struct scatterlist *sg,
102 int nents, int direction)
103{
104 BUG_ON(!valid_dma_direction(direction));
105 return dma_ops->map_sg(hwdev, sg, nents, direction);
106}
107
108static inline void
109dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
110 int direction)
111{
112 BUG_ON(!valid_dma_direction(direction));
113 if (dma_ops->unmap_sg)
114 dma_ops->unmap_sg(hwdev, sg, nents, direction);
115}
116
117static inline void
118dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
119 size_t size, int direction)
120{
121 BUG_ON(!valid_dma_direction(direction));
122 if (dma_ops->sync_single_for_cpu)
123 dma_ops->sync_single_for_cpu(hwdev, dma_handle, size,
124 direction);
125 flush_write_buffers();
126}
127
128static inline void
129dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
130 size_t size, int direction)
131{
132 BUG_ON(!valid_dma_direction(direction));
133 if (dma_ops->sync_single_for_device)
134 dma_ops->sync_single_for_device(hwdev, dma_handle, size,
135 direction);
136 flush_write_buffers();
137}
138
139static inline void
140dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
141 unsigned long offset, size_t size, int direction)
142{
143 BUG_ON(!valid_dma_direction(direction));
144 if (dma_ops->sync_single_range_for_cpu)
145 dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset,
146 size, direction);
147
148 flush_write_buffers();
149}
150
151static inline void
152dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
153 unsigned long offset, size_t size,
154 int direction)
155{
156 BUG_ON(!valid_dma_direction(direction));
157 if (dma_ops->sync_single_range_for_device)
158 dma_ops->sync_single_range_for_device(hwdev, dma_handle,
159 offset, size, direction);
160
161 flush_write_buffers();
162}
163
164static inline void
165dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
166 int nelems, int direction)
167{
168 BUG_ON(!valid_dma_direction(direction));
169 if (dma_ops->sync_sg_for_cpu)
170 dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
171 flush_write_buffers();
172}
173
174static inline void
175dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
176 int nelems, int direction)
177{
178 BUG_ON(!valid_dma_direction(direction));
179 if (dma_ops->sync_sg_for_device)
180 dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction);
181
182 flush_write_buffers();
183}
184
185static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
186 size_t offset, size_t size,
187 int direction)
188{
189 BUG_ON(!valid_dma_direction(direction));
190 return dma_ops->map_single(dev, page_to_phys(page)+offset,
191 size, direction);
192}
193
194static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
195 size_t size, int direction)
196{
197 dma_unmap_single(dev, addr, size, direction);
198}
199
200static inline void
201dma_cache_sync(struct device *dev, void *vaddr, size_t size,
202 enum dma_data_direction dir)
203{
204 flush_write_buffers();
205}
206
207static inline int dma_get_cache_alignment(void)
208{
209 /* no easy way to get cache size on all x86, so return the
210 * maximum possible, to be safe */
211 return boot_cpu_data.x86_clflush_size;
212}
213
214#define dma_is_consistent(d, h) (1)
215
1#ifdef CONFIG_X86_32 216#ifdef CONFIG_X86_32
2# include "dma-mapping_32.h" 217# define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
3#else 218struct dma_coherent_mem {
4# include "dma-mapping_64.h" 219 void *virt_base;
220 u32 device_base;
221 int size;
222 int flags;
223 unsigned long *bitmap;
224};
225
226extern int
227dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
228 dma_addr_t device_addr, size_t size, int flags);
229
230extern void
231dma_release_declared_memory(struct device *dev);
232
233extern void *
234dma_mark_declared_memory_occupied(struct device *dev,
235 dma_addr_t device_addr, size_t size);
236#endif /* CONFIG_X86_32 */
5#endif 237#endif
diff --git a/include/asm-x86/dma-mapping_32.h b/include/asm-x86/dma-mapping_32.h
deleted file mode 100644
index 55f01bd9e556..000000000000
--- a/include/asm-x86/dma-mapping_32.h
+++ /dev/null
@@ -1,187 +0,0 @@
1#ifndef _ASM_I386_DMA_MAPPING_H
2#define _ASM_I386_DMA_MAPPING_H
3
4#include <linux/mm.h>
5#include <linux/scatterlist.h>
6
7#include <asm/cache.h>
8#include <asm/io.h>
9#include <asm/bug.h>
10
11#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
12#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
13
14void *dma_alloc_coherent(struct device *dev, size_t size,
15 dma_addr_t *dma_handle, gfp_t flag);
16
17void dma_free_coherent(struct device *dev, size_t size,
18 void *vaddr, dma_addr_t dma_handle);
19
20static inline dma_addr_t
21dma_map_single(struct device *dev, void *ptr, size_t size,
22 enum dma_data_direction direction)
23{
24 BUG_ON(!valid_dma_direction(direction));
25 WARN_ON(size == 0);
26 flush_write_buffers();
27 return virt_to_phys(ptr);
28}
29
30static inline void
31dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
32 enum dma_data_direction direction)
33{
34 BUG_ON(!valid_dma_direction(direction));
35}
36
37static inline int
38dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
39 enum dma_data_direction direction)
40{
41 struct scatterlist *sg;
42 int i;
43
44 BUG_ON(!valid_dma_direction(direction));
45 WARN_ON(nents == 0 || sglist[0].length == 0);
46
47 for_each_sg(sglist, sg, nents, i) {
48 BUG_ON(!sg_page(sg));
49
50 sg->dma_address = sg_phys(sg);
51 }
52
53 flush_write_buffers();
54 return nents;
55}
56
57static inline dma_addr_t
58dma_map_page(struct device *dev, struct page *page, unsigned long offset,
59 size_t size, enum dma_data_direction direction)
60{
61 BUG_ON(!valid_dma_direction(direction));
62 return page_to_phys(page) + offset;
63}
64
65static inline void
66dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
67 enum dma_data_direction direction)
68{
69 BUG_ON(!valid_dma_direction(direction));
70}
71
72
73static inline void
74dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
75 enum dma_data_direction direction)
76{
77 BUG_ON(!valid_dma_direction(direction));
78}
79
80static inline void
81dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
82 enum dma_data_direction direction)
83{
84}
85
86static inline void
87dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
88 enum dma_data_direction direction)
89{
90 flush_write_buffers();
91}
92
93static inline void
94dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
95 unsigned long offset, size_t size,
96 enum dma_data_direction direction)
97{
98}
99
100static inline void
101dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
102 unsigned long offset, size_t size,
103 enum dma_data_direction direction)
104{
105 flush_write_buffers();
106}
107
108static inline void
109dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
110 enum dma_data_direction direction)
111{
112}
113
114static inline void
115dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
116 enum dma_data_direction direction)
117{
118 flush_write_buffers();
119}
120
121static inline int
122dma_mapping_error(dma_addr_t dma_addr)
123{
124 return 0;
125}
126
127extern int forbid_dac;
128
129static inline int
130dma_supported(struct device *dev, u64 mask)
131{
132 /*
133 * we fall back to GFP_DMA when the mask isn't all 1s,
134 * so we can't guarantee allocations that must be
135 * within a tighter range than GFP_DMA..
136 */
137 if(mask < 0x00ffffff)
138 return 0;
139
140 /* Work around chipset bugs */
141 if (forbid_dac > 0 && mask > 0xffffffffULL)
142 return 0;
143
144 return 1;
145}
146
147static inline int
148dma_set_mask(struct device *dev, u64 mask)
149{
150 if(!dev->dma_mask || !dma_supported(dev, mask))
151 return -EIO;
152
153 *dev->dma_mask = mask;
154
155 return 0;
156}
157
158static inline int
159dma_get_cache_alignment(void)
160{
161 /* no easy way to get cache size on all x86, so return the
162 * maximum possible, to be safe */
163 return (1 << INTERNODE_CACHE_SHIFT);
164}
165
166#define dma_is_consistent(d, h) (1)
167
168static inline void
169dma_cache_sync(struct device *dev, void *vaddr, size_t size,
170 enum dma_data_direction direction)
171{
172 flush_write_buffers();
173}
174
175#define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
176extern int
177dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
178 dma_addr_t device_addr, size_t size, int flags);
179
180extern void
181dma_release_declared_memory(struct device *dev);
182
183extern void *
184dma_mark_declared_memory_occupied(struct device *dev,
185 dma_addr_t device_addr, size_t size);
186
187#endif
diff --git a/include/asm-x86/dma-mapping_64.h b/include/asm-x86/dma-mapping_64.h
deleted file mode 100644
index ecd0f6125ba3..000000000000
--- a/include/asm-x86/dma-mapping_64.h
+++ /dev/null
@@ -1,202 +0,0 @@
1#ifndef _X8664_DMA_MAPPING_H
2#define _X8664_DMA_MAPPING_H 1
3
4/*
5 * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
6 * documentation.
7 */
8
9#include <linux/scatterlist.h>
10#include <asm/io.h>
11#include <asm/swiotlb.h>
12
13struct dma_mapping_ops {
14 int (*mapping_error)(dma_addr_t dma_addr);
15 void* (*alloc_coherent)(struct device *dev, size_t size,
16 dma_addr_t *dma_handle, gfp_t gfp);
17 void (*free_coherent)(struct device *dev, size_t size,
18 void *vaddr, dma_addr_t dma_handle);
19 dma_addr_t (*map_single)(struct device *hwdev, void *ptr,
20 size_t size, int direction);
21 /* like map_single, but doesn't check the device mask */
22 dma_addr_t (*map_simple)(struct device *hwdev, char *ptr,
23 size_t size, int direction);
24 void (*unmap_single)(struct device *dev, dma_addr_t addr,
25 size_t size, int direction);
26 void (*sync_single_for_cpu)(struct device *hwdev,
27 dma_addr_t dma_handle, size_t size,
28 int direction);
29 void (*sync_single_for_device)(struct device *hwdev,
30 dma_addr_t dma_handle, size_t size,
31 int direction);
32 void (*sync_single_range_for_cpu)(struct device *hwdev,
33 dma_addr_t dma_handle, unsigned long offset,
34 size_t size, int direction);
35 void (*sync_single_range_for_device)(struct device *hwdev,
36 dma_addr_t dma_handle, unsigned long offset,
37 size_t size, int direction);
38 void (*sync_sg_for_cpu)(struct device *hwdev,
39 struct scatterlist *sg, int nelems,
40 int direction);
41 void (*sync_sg_for_device)(struct device *hwdev,
42 struct scatterlist *sg, int nelems,
43 int direction);
44 int (*map_sg)(struct device *hwdev, struct scatterlist *sg,
45 int nents, int direction);
46 void (*unmap_sg)(struct device *hwdev,
47 struct scatterlist *sg, int nents,
48 int direction);
49 int (*dma_supported)(struct device *hwdev, u64 mask);
50 int is_phys;
51};
52
53extern dma_addr_t bad_dma_address;
54extern const struct dma_mapping_ops* dma_ops;
55extern int iommu_merge;
56
57static inline int dma_mapping_error(dma_addr_t dma_addr)
58{
59 if (dma_ops->mapping_error)
60 return dma_ops->mapping_error(dma_addr);
61
62 return (dma_addr == bad_dma_address);
63}
64
65#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
66#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
67
68#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
69#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
70
71extern void *dma_alloc_coherent(struct device *dev, size_t size,
72 dma_addr_t *dma_handle, gfp_t gfp);
73extern void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
74 dma_addr_t dma_handle);
75
76static inline dma_addr_t
77dma_map_single(struct device *hwdev, void *ptr, size_t size,
78 int direction)
79{
80 BUG_ON(!valid_dma_direction(direction));
81 return dma_ops->map_single(hwdev, ptr, size, direction);
82}
83
84static inline void
85dma_unmap_single(struct device *dev, dma_addr_t addr,size_t size,
86 int direction)
87{
88 BUG_ON(!valid_dma_direction(direction));
89 dma_ops->unmap_single(dev, addr, size, direction);
90}
91
92#define dma_map_page(dev,page,offset,size,dir) \
93 dma_map_single((dev), page_address(page)+(offset), (size), (dir))
94
95#define dma_unmap_page dma_unmap_single
96
97static inline void
98dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
99 size_t size, int direction)
100{
101 BUG_ON(!valid_dma_direction(direction));
102 if (dma_ops->sync_single_for_cpu)
103 dma_ops->sync_single_for_cpu(hwdev, dma_handle, size,
104 direction);
105 flush_write_buffers();
106}
107
108static inline void
109dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
110 size_t size, int direction)
111{
112 BUG_ON(!valid_dma_direction(direction));
113 if (dma_ops->sync_single_for_device)
114 dma_ops->sync_single_for_device(hwdev, dma_handle, size,
115 direction);
116 flush_write_buffers();
117}
118
119static inline void
120dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
121 unsigned long offset, size_t size, int direction)
122{
123 BUG_ON(!valid_dma_direction(direction));
124 if (dma_ops->sync_single_range_for_cpu) {
125 dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, size, direction);
126 }
127
128 flush_write_buffers();
129}
130
131static inline void
132dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
133 unsigned long offset, size_t size, int direction)
134{
135 BUG_ON(!valid_dma_direction(direction));
136 if (dma_ops->sync_single_range_for_device)
137 dma_ops->sync_single_range_for_device(hwdev, dma_handle,
138 offset, size, direction);
139
140 flush_write_buffers();
141}
142
143static inline void
144dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
145 int nelems, int direction)
146{
147 BUG_ON(!valid_dma_direction(direction));
148 if (dma_ops->sync_sg_for_cpu)
149 dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
150 flush_write_buffers();
151}
152
153static inline void
154dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
155 int nelems, int direction)
156{
157 BUG_ON(!valid_dma_direction(direction));
158 if (dma_ops->sync_sg_for_device) {
159 dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction);
160 }
161
162 flush_write_buffers();
163}
164
165static inline int
166dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, int direction)
167{
168 BUG_ON(!valid_dma_direction(direction));
169 return dma_ops->map_sg(hwdev, sg, nents, direction);
170}
171
172static inline void
173dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
174 int direction)
175{
176 BUG_ON(!valid_dma_direction(direction));
177 dma_ops->unmap_sg(hwdev, sg, nents, direction);
178}
179
180extern int dma_supported(struct device *hwdev, u64 mask);
181
182/* same for gart, swiotlb, and nommu */
183static inline int dma_get_cache_alignment(void)
184{
185 return boot_cpu_data.x86_clflush_size;
186}
187
188#define dma_is_consistent(d, h) 1
189
190extern int dma_set_mask(struct device *dev, u64 mask);
191
192static inline void
193dma_cache_sync(struct device *dev, void *vaddr, size_t size,
194 enum dma_data_direction dir)
195{
196 flush_write_buffers();
197}
198
199extern struct device fallback_dev;
200extern int panic_on_overflow;
201
202#endif /* _X8664_DMA_MAPPING_H */
diff --git a/include/asm-x86/e820_32.h b/include/asm-x86/e820_32.h
index 43b1a8bd4b34..a9f7c6ec32bf 100644
--- a/include/asm-x86/e820_32.h
+++ b/include/asm-x86/e820_32.h
@@ -24,7 +24,7 @@ extern void update_e820(void);
24extern int e820_all_mapped(unsigned long start, unsigned long end, 24extern int e820_all_mapped(unsigned long start, unsigned long end,
25 unsigned type); 25 unsigned type);
26extern int e820_any_mapped(u64 start, u64 end, unsigned type); 26extern int e820_any_mapped(u64 start, u64 end, unsigned type);
27extern void find_max_pfn(void); 27extern void propagate_e820_map(void);
28extern void register_bootmem_low_pages(unsigned long max_low_pfn); 28extern void register_bootmem_low_pages(unsigned long max_low_pfn);
29extern void add_memory_region(unsigned long long start, 29extern void add_memory_region(unsigned long long start,
30 unsigned long long size, int type); 30 unsigned long long size, int type);
diff --git a/include/asm-x86/genapic_32.h b/include/asm-x86/genapic_32.h
index f1b96932746b..b02ea6e17de8 100644
--- a/include/asm-x86/genapic_32.h
+++ b/include/asm-x86/genapic_32.h
@@ -117,6 +117,7 @@ extern struct genapic *genapic;
117enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC}; 117enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC};
118#define get_uv_system_type() UV_NONE 118#define get_uv_system_type() UV_NONE
119#define is_uv_system() 0 119#define is_uv_system() 0
120#define uv_wakeup_secondary(a, b) 1
120 121
121 122
122#endif 123#endif
diff --git a/include/asm-x86/i387.h b/include/asm-x86/i387.h
index 54522b814f1c..da2adb45f6e3 100644
--- a/include/asm-x86/i387.h
+++ b/include/asm-x86/i387.h
@@ -21,8 +21,9 @@
21 21
22extern void fpu_init(void); 22extern void fpu_init(void);
23extern void mxcsr_feature_mask_init(void); 23extern void mxcsr_feature_mask_init(void);
24extern void init_fpu(struct task_struct *child); 24extern int init_fpu(struct task_struct *child);
25extern asmlinkage void math_state_restore(void); 25extern asmlinkage void math_state_restore(void);
26extern void init_thread_xstate(void);
26 27
27extern user_regset_active_fn fpregs_active, xfpregs_active; 28extern user_regset_active_fn fpregs_active, xfpregs_active;
28extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get; 29extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get;
@@ -117,24 +118,22 @@ static inline void __save_init_fpu(struct task_struct *tsk)
117 /* Using "fxsaveq %0" would be the ideal choice, but is only supported 118 /* Using "fxsaveq %0" would be the ideal choice, but is only supported
118 starting with gas 2.16. */ 119 starting with gas 2.16. */
119 __asm__ __volatile__("fxsaveq %0" 120 __asm__ __volatile__("fxsaveq %0"
120 : "=m" (tsk->thread.i387.fxsave)); 121 : "=m" (tsk->thread.xstate->fxsave));
121#elif 0 122#elif 0
122 /* Using, as a workaround, the properly prefixed form below isn't 123 /* Using, as a workaround, the properly prefixed form below isn't
123 accepted by any binutils version so far released, complaining that 124 accepted by any binutils version so far released, complaining that
124 the same type of prefix is used twice if an extended register is 125 the same type of prefix is used twice if an extended register is
125 needed for addressing (fix submitted to mainline 2005-11-21). */ 126 needed for addressing (fix submitted to mainline 2005-11-21). */
126 __asm__ __volatile__("rex64/fxsave %0" 127 __asm__ __volatile__("rex64/fxsave %0"
127 : "=m" (tsk->thread.i387.fxsave)); 128 : "=m" (tsk->thread.xstate->fxsave));
128#else 129#else
129 /* This, however, we can work around by forcing the compiler to select 130 /* This, however, we can work around by forcing the compiler to select
130 an addressing mode that doesn't require extended registers. */ 131 an addressing mode that doesn't require extended registers. */
131 __asm__ __volatile__("rex64/fxsave %P2(%1)" 132 __asm__ __volatile__("rex64/fxsave (%1)"
132 : "=m" (tsk->thread.i387.fxsave) 133 : "=m" (tsk->thread.xstate->fxsave)
133 : "cdaSDb" (tsk), 134 : "cdaSDb" (&tsk->thread.xstate->fxsave));
134 "i" (offsetof(__typeof__(*tsk),
135 thread.i387.fxsave)));
136#endif 135#endif
137 clear_fpu_state(&tsk->thread.i387.fxsave); 136 clear_fpu_state(&tsk->thread.xstate->fxsave);
138 task_thread_info(tsk)->status &= ~TS_USEDFPU; 137 task_thread_info(tsk)->status &= ~TS_USEDFPU;
139} 138}
140 139
@@ -148,7 +147,7 @@ static inline int save_i387(struct _fpstate __user *buf)
148 int err = 0; 147 int err = 0;
149 148
150 BUILD_BUG_ON(sizeof(struct user_i387_struct) != 149 BUILD_BUG_ON(sizeof(struct user_i387_struct) !=
151 sizeof(tsk->thread.i387.fxsave)); 150 sizeof(tsk->thread.xstate->fxsave));
152 151
153 if ((unsigned long)buf % 16) 152 if ((unsigned long)buf % 16)
154 printk("save_i387: bad fpstate %p\n", buf); 153 printk("save_i387: bad fpstate %p\n", buf);
@@ -164,7 +163,7 @@ static inline int save_i387(struct _fpstate __user *buf)
164 task_thread_info(tsk)->status &= ~TS_USEDFPU; 163 task_thread_info(tsk)->status &= ~TS_USEDFPU;
165 stts(); 164 stts();
166 } else { 165 } else {
167 if (__copy_to_user(buf, &tsk->thread.i387.fxsave, 166 if (__copy_to_user(buf, &tsk->thread.xstate->fxsave,
168 sizeof(struct i387_fxsave_struct))) 167 sizeof(struct i387_fxsave_struct)))
169 return -1; 168 return -1;
170 } 169 }
@@ -201,7 +200,7 @@ static inline void restore_fpu(struct task_struct *tsk)
201 "nop ; frstor %1", 200 "nop ; frstor %1",
202 "fxrstor %1", 201 "fxrstor %1",
203 X86_FEATURE_FXSR, 202 X86_FEATURE_FXSR,
204 "m" ((tsk)->thread.i387.fxsave)); 203 "m" (tsk->thread.xstate->fxsave));
205} 204}
206 205
207/* We need a safe address that is cheap to find and that is already 206/* We need a safe address that is cheap to find and that is already
@@ -225,8 +224,8 @@ static inline void __save_init_fpu(struct task_struct *tsk)
225 "fxsave %[fx]\n" 224 "fxsave %[fx]\n"
226 "bt $7,%[fsw] ; jnc 1f ; fnclex\n1:", 225 "bt $7,%[fsw] ; jnc 1f ; fnclex\n1:",
227 X86_FEATURE_FXSR, 226 X86_FEATURE_FXSR,
228 [fx] "m" (tsk->thread.i387.fxsave), 227 [fx] "m" (tsk->thread.xstate->fxsave),
229 [fsw] "m" (tsk->thread.i387.fxsave.swd) : "memory"); 228 [fsw] "m" (tsk->thread.xstate->fxsave.swd) : "memory");
230 /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception 229 /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
231 is pending. Clear the x87 state here by setting it to fixed 230 is pending. Clear the x87 state here by setting it to fixed
232 values. safe_address is a random variable that should be in L1 */ 231 values. safe_address is a random variable that should be in L1 */
@@ -327,25 +326,25 @@ static inline void clear_fpu(struct task_struct *tsk)
327static inline unsigned short get_fpu_cwd(struct task_struct *tsk) 326static inline unsigned short get_fpu_cwd(struct task_struct *tsk)
328{ 327{
329 if (cpu_has_fxsr) { 328 if (cpu_has_fxsr) {
330 return tsk->thread.i387.fxsave.cwd; 329 return tsk->thread.xstate->fxsave.cwd;
331 } else { 330 } else {
332 return (unsigned short)tsk->thread.i387.fsave.cwd; 331 return (unsigned short)tsk->thread.xstate->fsave.cwd;
333 } 332 }
334} 333}
335 334
336static inline unsigned short get_fpu_swd(struct task_struct *tsk) 335static inline unsigned short get_fpu_swd(struct task_struct *tsk)
337{ 336{
338 if (cpu_has_fxsr) { 337 if (cpu_has_fxsr) {
339 return tsk->thread.i387.fxsave.swd; 338 return tsk->thread.xstate->fxsave.swd;
340 } else { 339 } else {
341 return (unsigned short)tsk->thread.i387.fsave.swd; 340 return (unsigned short)tsk->thread.xstate->fsave.swd;
342 } 341 }
343} 342}
344 343
345static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk) 344static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk)
346{ 345{
347 if (cpu_has_xmm) { 346 if (cpu_has_xmm) {
348 return tsk->thread.i387.fxsave.mxcsr; 347 return tsk->thread.xstate->fxsave.mxcsr;
349 } else { 348 } else {
350 return MXCSR_DEFAULT; 349 return MXCSR_DEFAULT;
351 } 350 }
diff --git a/include/asm-x86/numa_64.h b/include/asm-x86/numa_64.h
index 32c22ae0709f..22e87c9f6a80 100644
--- a/include/asm-x86/numa_64.h
+++ b/include/asm-x86/numa_64.h
@@ -9,7 +9,8 @@ struct bootnode {
9 u64 end; 9 u64 end;
10}; 10};
11 11
12extern int compute_hash_shift(struct bootnode *nodes, int numnodes); 12extern int compute_hash_shift(struct bootnode *nodes, int numblks,
13 int *nodeids);
13 14
14#define ZONE_ALIGN (1UL << (MAX_ORDER+PAGE_SHIFT)) 15#define ZONE_ALIGN (1UL << (MAX_ORDER+PAGE_SHIFT))
15 16
diff --git a/include/asm-x86/pci_64.h b/include/asm-x86/pci_64.h
index df867e5d80b1..f330234ffa5c 100644
--- a/include/asm-x86/pci_64.h
+++ b/include/asm-x86/pci_64.h
@@ -22,6 +22,7 @@ extern int (*pci_config_read)(int seg, int bus, int dev, int fn,
22extern int (*pci_config_write)(int seg, int bus, int dev, int fn, 22extern int (*pci_config_write)(int seg, int bus, int dev, int fn,
23 int reg, int len, u32 value); 23 int reg, int len, u32 value);
24 24
25extern void dma32_reserve_bootmem(void);
25extern void pci_iommu_alloc(void); 26extern void pci_iommu_alloc(void);
26 27
27/* The PCI address space does equal the physical memory 28/* The PCI address space does equal the physical memory
diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h
index 6e26c7c717a2..e6bf92ddeb21 100644
--- a/include/asm-x86/processor.h
+++ b/include/asm-x86/processor.h
@@ -354,7 +354,7 @@ struct i387_soft_struct {
354 u32 entry_eip; 354 u32 entry_eip;
355}; 355};
356 356
357union i387_union { 357union thread_xstate {
358 struct i387_fsave_struct fsave; 358 struct i387_fsave_struct fsave;
359 struct i387_fxsave_struct fxsave; 359 struct i387_fxsave_struct fxsave;
360 struct i387_soft_struct soft; 360 struct i387_soft_struct soft;
@@ -365,6 +365,9 @@ DECLARE_PER_CPU(struct orig_ist, orig_ist);
365#endif 365#endif
366 366
367extern void print_cpu_info(struct cpuinfo_x86 *); 367extern void print_cpu_info(struct cpuinfo_x86 *);
368extern unsigned int xstate_size;
369extern void free_thread_xstate(struct task_struct *);
370extern struct kmem_cache *task_xstate_cachep;
368extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c); 371extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
369extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); 372extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
370extern unsigned short num_cache_leaves; 373extern unsigned short num_cache_leaves;
@@ -397,8 +400,8 @@ struct thread_struct {
397 unsigned long cr2; 400 unsigned long cr2;
398 unsigned long trap_no; 401 unsigned long trap_no;
399 unsigned long error_code; 402 unsigned long error_code;
400 /* Floating point info: */ 403 /* floating point and extended processor state */
401 union i387_union i387 __attribute__((aligned(16)));; 404 union thread_xstate *xstate;
402#ifdef CONFIG_X86_32 405#ifdef CONFIG_X86_32
403 /* Virtual 86 mode info */ 406 /* Virtual 86 mode info */
404 struct vm86_struct __user *vm86_info; 407 struct vm86_struct __user *vm86_info;
@@ -918,4 +921,11 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
918 921
919#define KSTK_EIP(task) (task_pt_regs(task)->ip) 922#define KSTK_EIP(task) (task_pt_regs(task)->ip)
920 923
924/* Get/set a process' ability to use the timestamp counter instruction */
925#define GET_TSC_CTL(adr) get_tsc_mode((adr))
926#define SET_TSC_CTL(val) set_tsc_mode((val))
927
928extern int get_tsc_mode(unsigned long adr);
929extern int set_tsc_mode(unsigned int val);
930
921#endif 931#endif
diff --git a/include/asm-x86/scatterlist.h b/include/asm-x86/scatterlist.h
index d13c197866d6..c0432061f81a 100644
--- a/include/asm-x86/scatterlist.h
+++ b/include/asm-x86/scatterlist.h
@@ -11,9 +11,7 @@ struct scatterlist {
11 unsigned int offset; 11 unsigned int offset;
12 unsigned int length; 12 unsigned int length;
13 dma_addr_t dma_address; 13 dma_addr_t dma_address;
14#ifdef CONFIG_X86_64
15 unsigned int dma_length; 14 unsigned int dma_length;
16#endif
17}; 15};
18 16
19#define ARCH_HAS_SG_CHAIN 17#define ARCH_HAS_SG_CHAIN
diff --git a/include/asm-x86/thread_info.h b/include/asm-x86/thread_info.h
index d5fd12f2abdb..77244f17993f 100644
--- a/include/asm-x86/thread_info.h
+++ b/include/asm-x86/thread_info.h
@@ -1,5 +1,14 @@
1#ifndef _ASM_X86_THREAD_INFO_H
1#ifdef CONFIG_X86_32 2#ifdef CONFIG_X86_32
2# include "thread_info_32.h" 3# include "thread_info_32.h"
3#else 4#else
4# include "thread_info_64.h" 5# include "thread_info_64.h"
5#endif 6#endif
7
8#ifndef __ASSEMBLY__
9extern void arch_task_cache_init(void);
10extern void free_thread_info(struct thread_info *ti);
11extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
12#define arch_task_cache_init arch_task_cache_init
13#endif
14#endif /* _ASM_X86_THREAD_INFO_H */
diff --git a/include/asm-x86/thread_info_32.h b/include/asm-x86/thread_info_32.h
index 4e053fa561a9..531859962096 100644
--- a/include/asm-x86/thread_info_32.h
+++ b/include/asm-x86/thread_info_32.h
@@ -102,8 +102,6 @@ static inline struct thread_info *current_thread_info(void)
102 __get_free_pages(GFP_KERNEL, get_order(THREAD_SIZE))) 102 __get_free_pages(GFP_KERNEL, get_order(THREAD_SIZE)))
103#endif 103#endif
104 104
105#define free_thread_info(info) free_pages((unsigned long)(info), get_order(THREAD_SIZE))
106
107#else /* !__ASSEMBLY__ */ 105#else /* !__ASSEMBLY__ */
108 106
109/* how to get the thread information struct from ASM */ 107/* how to get the thread information struct from ASM */
diff --git a/include/asm-x86/thread_info_64.h b/include/asm-x86/thread_info_64.h
index 1e5c6f6152cd..ed664e874dec 100644
--- a/include/asm-x86/thread_info_64.h
+++ b/include/asm-x86/thread_info_64.h
@@ -85,8 +85,6 @@ static inline struct thread_info *stack_thread_info(void)
85#define alloc_thread_info(tsk) \ 85#define alloc_thread_info(tsk) \
86 ((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER)) 86 ((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER))
87 87
88#define free_thread_info(ti) free_pages((unsigned long) (ti), THREAD_ORDER)
89
90#else /* !__ASSEMBLY__ */ 88#else /* !__ASSEMBLY__ */
91 89
92/* how to get the thread information struct from ASM */ 90/* how to get the thread information struct from ASM */
@@ -126,6 +124,7 @@ static inline struct thread_info *stack_thread_info(void)
126#define TIF_DEBUGCTLMSR 25 /* uses thread_struct.debugctlmsr */ 124#define TIF_DEBUGCTLMSR 25 /* uses thread_struct.debugctlmsr */
127#define TIF_DS_AREA_MSR 26 /* uses thread_struct.ds_area_msr */ 125#define TIF_DS_AREA_MSR 26 /* uses thread_struct.ds_area_msr */
128#define TIF_BTS_TRACE_TS 27 /* record scheduling event timestamps */ 126#define TIF_BTS_TRACE_TS 27 /* record scheduling event timestamps */
127#define TIF_NOTSC 28 /* TSC is not accessible in userland */
129 128
130#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) 129#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
131#define _TIF_SIGPENDING (1 << TIF_SIGPENDING) 130#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
@@ -147,6 +146,7 @@ static inline struct thread_info *stack_thread_info(void)
147#define _TIF_DEBUGCTLMSR (1 << TIF_DEBUGCTLMSR) 146#define _TIF_DEBUGCTLMSR (1 << TIF_DEBUGCTLMSR)
148#define _TIF_DS_AREA_MSR (1 << TIF_DS_AREA_MSR) 147#define _TIF_DS_AREA_MSR (1 << TIF_DS_AREA_MSR)
149#define _TIF_BTS_TRACE_TS (1 << TIF_BTS_TRACE_TS) 148#define _TIF_BTS_TRACE_TS (1 << TIF_BTS_TRACE_TS)
149#define _TIF_NOTSC (1 << TIF_NOTSC)
150 150
151/* work to do on interrupt/exception return */ 151/* work to do on interrupt/exception return */
152#define _TIF_WORK_MASK \ 152#define _TIF_WORK_MASK \
@@ -160,7 +160,7 @@ static inline struct thread_info *stack_thread_info(void)
160 160
161/* flags to check in __switch_to() */ 161/* flags to check in __switch_to() */
162#define _TIF_WORK_CTXSW \ 162#define _TIF_WORK_CTXSW \
163 (_TIF_IO_BITMAP|_TIF_DEBUGCTLMSR|_TIF_DS_AREA_MSR|_TIF_BTS_TRACE_TS) 163 (_TIF_IO_BITMAP|_TIF_DEBUGCTLMSR|_TIF_DS_AREA_MSR|_TIF_BTS_TRACE_TS|_TIF_NOTSC)
164#define _TIF_WORK_CTXSW_PREV _TIF_WORK_CTXSW 164#define _TIF_WORK_CTXSW_PREV _TIF_WORK_CTXSW
165#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW|_TIF_DEBUG) 165#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW|_TIF_DEBUG)
166 166
diff --git a/include/asm-x86/tsc.h b/include/asm-x86/tsc.h
index d2d8eb5b55f5..0434bd8349a7 100644
--- a/include/asm-x86/tsc.h
+++ b/include/asm-x86/tsc.h
@@ -18,6 +18,7 @@ extern unsigned int cpu_khz;
18extern unsigned int tsc_khz; 18extern unsigned int tsc_khz;
19 19
20extern void disable_TSC(void); 20extern void disable_TSC(void);
21extern void enable_TSC(void);
21 22
22static inline cycles_t get_cycles(void) 23static inline cycles_t get_cycles(void)
23{ 24{