diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-01-30 07:30:12 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-01-30 07:30:12 -0500 |
commit | 05fccb0e3840248324a96b320562210410be73dc (patch) | |
tree | d5d7036f84dbf05adf6dbba45a88b1acb9748246 /arch/x86/kernel/pci-gart_64.c | |
parent | e8d591dc710158bae6b53c8b7a0172351025c6e2 (diff) |
x86: code cleanups in arch/x86/kernel/pci-gart_64.c
code cleanups:
errors lines of code errors/KLOC
arch/x86/kernel/pci-gart_64.c 183 748 244.6
arch/x86/kernel/pci-gart_64.c 0 790 0
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/kernel/pci-gart_64.c')
-rw-r--r-- | arch/x86/kernel/pci-gart_64.c | 508 |
1 files changed, 276 insertions, 232 deletions
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c index 06bcba536045..d2b46b489412 100644 --- a/arch/x86/kernel/pci-gart_64.c +++ b/arch/x86/kernel/pci-gart_64.c | |||
@@ -1,12 +1,12 @@ | |||
1 | /* | 1 | /* |
2 | * Dynamic DMA mapping support for AMD Hammer. | 2 | * Dynamic DMA mapping support for AMD Hammer. |
3 | * | 3 | * |
4 | * Use the integrated AGP GART in the Hammer northbridge as an IOMMU for PCI. | 4 | * Use the integrated AGP GART in the Hammer northbridge as an IOMMU for PCI. |
5 | * This allows to use PCI devices that only support 32bit addresses on systems | 5 | * This allows to use PCI devices that only support 32bit addresses on systems |
6 | * with more than 4GB. | 6 | * with more than 4GB. |
7 | * | 7 | * |
8 | * See Documentation/DMA-mapping.txt for the interface specification. | 8 | * See Documentation/DMA-mapping.txt for the interface specification. |
9 | * | 9 | * |
10 | * Copyright 2002 Andi Kleen, SuSE Labs. | 10 | * Copyright 2002 Andi Kleen, SuSE Labs. |
11 | * Subject to the GNU General Public License v2 only. | 11 | * Subject to the GNU General Public License v2 only. |
12 | */ | 12 | */ |
@@ -37,23 +37,26 @@ | |||
37 | #include <asm/k8.h> | 37 | #include <asm/k8.h> |
38 | 38 | ||
39 | static unsigned long iommu_bus_base; /* GART remapping area (physical) */ | 39 | static unsigned long iommu_bus_base; /* GART remapping area (physical) */ |
40 | static unsigned long iommu_size; /* size of remapping area bytes */ | 40 | static unsigned long iommu_size; /* size of remapping area bytes */ |
41 | static unsigned long iommu_pages; /* .. and in pages */ | 41 | static unsigned long iommu_pages; /* .. and in pages */ |
42 | 42 | ||
43 | static u32 *iommu_gatt_base; /* Remapping table */ | 43 | static u32 *iommu_gatt_base; /* Remapping table */ |
44 | 44 | ||
45 | /* If this is disabled the IOMMU will use an optimized flushing strategy | 45 | /* |
46 | of only flushing when an mapping is reused. With it true the GART is flushed | 46 | * If this is disabled the IOMMU will use an optimized flushing strategy |
47 | for every mapping. Problem is that doing the lazy flush seems to trigger | 47 | * of only flushing when an mapping is reused. With it true the GART is |
48 | bugs with some popular PCI cards, in particular 3ware (but has been also | 48 | * flushed for every mapping. Problem is that doing the lazy flush seems |
49 | also seen with Qlogic at least). */ | 49 | * to trigger bugs with some popular PCI cards, in particular 3ware (but |
50 | * has been also also seen with Qlogic at least). | ||
51 | */ | ||
50 | int iommu_fullflush = 1; | 52 | int iommu_fullflush = 1; |
51 | 53 | ||
52 | /* Allocation bitmap for the remapping area */ | 54 | /* Allocation bitmap for the remapping area: */ |
53 | static DEFINE_SPINLOCK(iommu_bitmap_lock); | 55 | static DEFINE_SPINLOCK(iommu_bitmap_lock); |
54 | static unsigned long *iommu_gart_bitmap; /* guarded by iommu_bitmap_lock */ | 56 | /* Guarded by iommu_bitmap_lock: */ |
57 | static unsigned long *iommu_gart_bitmap; | ||
55 | 58 | ||
56 | static u32 gart_unmapped_entry; | 59 | static u32 gart_unmapped_entry; |
57 | 60 | ||
58 | #define GPTE_VALID 1 | 61 | #define GPTE_VALID 1 |
59 | #define GPTE_COHERENT 2 | 62 | #define GPTE_COHERENT 2 |
@@ -61,10 +64,10 @@ static u32 gart_unmapped_entry; | |||
61 | (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT) | 64 | (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT) |
62 | #define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28)) | 65 | #define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28)) |
63 | 66 | ||
64 | #define to_pages(addr,size) \ | 67 | #define to_pages(addr, size) \ |
65 | (round_up(((addr) & ~PAGE_MASK) + (size), PAGE_SIZE) >> PAGE_SHIFT) | 68 | (round_up(((addr) & ~PAGE_MASK) + (size), PAGE_SIZE) >> PAGE_SHIFT) |
66 | 69 | ||
67 | #define EMERGENCY_PAGES 32 /* = 128KB */ | 70 | #define EMERGENCY_PAGES 32 /* = 128KB */ |
68 | 71 | ||
69 | #ifdef CONFIG_AGP | 72 | #ifdef CONFIG_AGP |
70 | #define AGPEXTERN extern | 73 | #define AGPEXTERN extern |
@@ -77,130 +80,152 @@ AGPEXTERN int agp_memory_reserved; | |||
77 | AGPEXTERN __u32 *agp_gatt_table; | 80 | AGPEXTERN __u32 *agp_gatt_table; |
78 | 81 | ||
79 | static unsigned long next_bit; /* protected by iommu_bitmap_lock */ | 82 | static unsigned long next_bit; /* protected by iommu_bitmap_lock */ |
80 | static int need_flush; /* global flush state. set for each gart wrap */ | 83 | static int need_flush; /* global flush state. set for each gart wrap */ |
81 | 84 | ||
82 | static unsigned long alloc_iommu(int size) | 85 | static unsigned long alloc_iommu(int size) |
83 | { | 86 | { |
84 | unsigned long offset, flags; | 87 | unsigned long offset, flags; |
85 | 88 | ||
86 | spin_lock_irqsave(&iommu_bitmap_lock, flags); | 89 | spin_lock_irqsave(&iommu_bitmap_lock, flags); |
87 | offset = find_next_zero_string(iommu_gart_bitmap,next_bit,iommu_pages,size); | 90 | offset = find_next_zero_string(iommu_gart_bitmap, next_bit, |
91 | iommu_pages, size); | ||
88 | if (offset == -1) { | 92 | if (offset == -1) { |
89 | need_flush = 1; | 93 | need_flush = 1; |
90 | offset = find_next_zero_string(iommu_gart_bitmap,0,iommu_pages,size); | 94 | offset = find_next_zero_string(iommu_gart_bitmap, 0, |
95 | iommu_pages, size); | ||
91 | } | 96 | } |
92 | if (offset != -1) { | 97 | if (offset != -1) { |
93 | set_bit_string(iommu_gart_bitmap, offset, size); | 98 | set_bit_string(iommu_gart_bitmap, offset, size); |
94 | next_bit = offset+size; | 99 | next_bit = offset+size; |
95 | if (next_bit >= iommu_pages) { | 100 | if (next_bit >= iommu_pages) { |
96 | next_bit = 0; | 101 | next_bit = 0; |
97 | need_flush = 1; | 102 | need_flush = 1; |
98 | } | 103 | } |
99 | } | 104 | } |
100 | if (iommu_fullflush) | 105 | if (iommu_fullflush) |
101 | need_flush = 1; | 106 | need_flush = 1; |
102 | spin_unlock_irqrestore(&iommu_bitmap_lock, flags); | 107 | spin_unlock_irqrestore(&iommu_bitmap_lock, flags); |
108 | |||
103 | return offset; | 109 | return offset; |
104 | } | 110 | } |
105 | 111 | ||
106 | static void free_iommu(unsigned long offset, int size) | 112 | static void free_iommu(unsigned long offset, int size) |
107 | { | 113 | { |
108 | unsigned long flags; | 114 | unsigned long flags; |
115 | |||
109 | spin_lock_irqsave(&iommu_bitmap_lock, flags); | 116 | spin_lock_irqsave(&iommu_bitmap_lock, flags); |
110 | __clear_bit_string(iommu_gart_bitmap, offset, size); | 117 | __clear_bit_string(iommu_gart_bitmap, offset, size); |
111 | spin_unlock_irqrestore(&iommu_bitmap_lock, flags); | 118 | spin_unlock_irqrestore(&iommu_bitmap_lock, flags); |
112 | } | 119 | } |
113 | 120 | ||
114 | /* | 121 | /* |
115 | * Use global flush state to avoid races with multiple flushers. | 122 | * Use global flush state to avoid races with multiple flushers. |
116 | */ | 123 | */ |
117 | static void flush_gart(void) | 124 | static void flush_gart(void) |
118 | { | 125 | { |
119 | unsigned long flags; | 126 | unsigned long flags; |
127 | |||
120 | spin_lock_irqsave(&iommu_bitmap_lock, flags); | 128 | spin_lock_irqsave(&iommu_bitmap_lock, flags); |
121 | if (need_flush) { | 129 | if (need_flush) { |
122 | k8_flush_garts(); | 130 | k8_flush_garts(); |
123 | need_flush = 0; | 131 | need_flush = 0; |
124 | } | 132 | } |
125 | spin_unlock_irqrestore(&iommu_bitmap_lock, flags); | 133 | spin_unlock_irqrestore(&iommu_bitmap_lock, flags); |
126 | } | 134 | } |
127 | 135 | ||
128 | #ifdef CONFIG_IOMMU_LEAK | 136 | #ifdef CONFIG_IOMMU_LEAK |
129 | 137 | ||
130 | #define SET_LEAK(x) if (iommu_leak_tab) \ | 138 | #define SET_LEAK(x) \ |
131 | iommu_leak_tab[x] = __builtin_return_address(0); | 139 | do { \ |
132 | #define CLEAR_LEAK(x) if (iommu_leak_tab) \ | 140 | if (iommu_leak_tab) \ |
133 | iommu_leak_tab[x] = NULL; | 141 | iommu_leak_tab[x] = __builtin_return_address(0);\ |
142 | } while (0) | ||
143 | |||
144 | #define CLEAR_LEAK(x) \ | ||
145 | do { \ | ||
146 | if (iommu_leak_tab) \ | ||
147 | iommu_leak_tab[x] = NULL; \ | ||
148 | } while (0) | ||
134 | 149 | ||
135 | /* Debugging aid for drivers that don't free their IOMMU tables */ | 150 | /* Debugging aid for drivers that don't free their IOMMU tables */ |
136 | static void **iommu_leak_tab; | 151 | static void **iommu_leak_tab; |
137 | static int leak_trace; | 152 | static int leak_trace; |
138 | static int iommu_leak_pages = 20; | 153 | static int iommu_leak_pages = 20; |
154 | |||
139 | static void dump_leak(void) | 155 | static void dump_leak(void) |
140 | { | 156 | { |
141 | int i; | 157 | int i; |
142 | static int dump; | 158 | static int dump; |
143 | if (dump || !iommu_leak_tab) return; | 159 | |
160 | if (dump || !iommu_leak_tab) | ||
161 | return; | ||
144 | dump = 1; | 162 | dump = 1; |
145 | show_stack(NULL,NULL); | 163 | show_stack(NULL, NULL); |
146 | /* Very crude. dump some from the end of the table too */ | 164 | |
147 | printk("Dumping %d pages from end of IOMMU:\n", iommu_leak_pages); | 165 | /* Very crude. dump some from the end of the table too */ |
148 | for (i = 0; i < iommu_leak_pages; i+=2) { | 166 | printk(KERN_DEBUG "Dumping %d pages from end of IOMMU:\n", |
149 | printk("%lu: ", iommu_pages-i); | 167 | iommu_leak_pages); |
168 | for (i = 0; i < iommu_leak_pages; i += 2) { | ||
169 | printk(KERN_DEBUG "%lu: ", iommu_pages-i); | ||
150 | printk_address((unsigned long) iommu_leak_tab[iommu_pages-i]); | 170 | printk_address((unsigned long) iommu_leak_tab[iommu_pages-i]); |
151 | printk("%c", (i+1)%2 == 0 ? '\n' : ' '); | 171 | printk(KERN_CONT "%c", (i+1)%2 == 0 ? '\n' : ' '); |
152 | } | 172 | } |
153 | printk("\n"); | 173 | printk(KERN_DEBUG "\n"); |
154 | } | 174 | } |
155 | #else | 175 | #else |
156 | #define SET_LEAK(x) | 176 | # define SET_LEAK(x) |
157 | #define CLEAR_LEAK(x) | 177 | # define CLEAR_LEAK(x) |
158 | #endif | 178 | #endif |
159 | 179 | ||
160 | static void iommu_full(struct device *dev, size_t size, int dir) | 180 | static void iommu_full(struct device *dev, size_t size, int dir) |
161 | { | 181 | { |
162 | /* | 182 | /* |
163 | * Ran out of IOMMU space for this operation. This is very bad. | 183 | * Ran out of IOMMU space for this operation. This is very bad. |
164 | * Unfortunately the drivers cannot handle this operation properly. | 184 | * Unfortunately the drivers cannot handle this operation properly. |
165 | * Return some non mapped prereserved space in the aperture and | 185 | * Return some non mapped prereserved space in the aperture and |
166 | * let the Northbridge deal with it. This will result in garbage | 186 | * let the Northbridge deal with it. This will result in garbage |
167 | * in the IO operation. When the size exceeds the prereserved space | 187 | * in the IO operation. When the size exceeds the prereserved space |
168 | * memory corruption will occur or random memory will be DMAed | 188 | * memory corruption will occur or random memory will be DMAed |
169 | * out. Hopefully no network devices use single mappings that big. | 189 | * out. Hopefully no network devices use single mappings that big. |
170 | */ | 190 | */ |
171 | 191 | ||
172 | printk(KERN_ERR | 192 | printk(KERN_ERR |
173 | "PCI-DMA: Out of IOMMU space for %lu bytes at device %s\n", | 193 | "PCI-DMA: Out of IOMMU space for %lu bytes at device %s\n", |
174 | size, dev->bus_id); | 194 | size, dev->bus_id); |
175 | 195 | ||
176 | if (size > PAGE_SIZE*EMERGENCY_PAGES) { | 196 | if (size > PAGE_SIZE*EMERGENCY_PAGES) { |
177 | if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL) | 197 | if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL) |
178 | panic("PCI-DMA: Memory would be corrupted\n"); | 198 | panic("PCI-DMA: Memory would be corrupted\n"); |
179 | if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL) | 199 | if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL) |
180 | panic(KERN_ERR "PCI-DMA: Random memory would be DMAed\n"); | 200 | panic(KERN_ERR |
181 | } | 201 | "PCI-DMA: Random memory would be DMAed\n"); |
182 | 202 | } | |
183 | #ifdef CONFIG_IOMMU_LEAK | 203 | #ifdef CONFIG_IOMMU_LEAK |
184 | dump_leak(); | 204 | dump_leak(); |
185 | #endif | 205 | #endif |
186 | } | 206 | } |
187 | 207 | ||
188 | static inline int need_iommu(struct device *dev, unsigned long addr, size_t size) | 208 | static inline int |
189 | { | 209 | need_iommu(struct device *dev, unsigned long addr, size_t size) |
210 | { | ||
190 | u64 mask = *dev->dma_mask; | 211 | u64 mask = *dev->dma_mask; |
191 | int high = addr + size > mask; | 212 | int high = addr + size > mask; |
192 | int mmu = high; | 213 | int mmu = high; |
193 | if (force_iommu) | 214 | |
194 | mmu = 1; | 215 | if (force_iommu) |
195 | return mmu; | 216 | mmu = 1; |
217 | |||
218 | return mmu; | ||
196 | } | 219 | } |
197 | 220 | ||
198 | static inline int nonforced_iommu(struct device *dev, unsigned long addr, size_t size) | 221 | static inline int |
199 | { | 222 | nonforced_iommu(struct device *dev, unsigned long addr, size_t size) |
223 | { | ||
200 | u64 mask = *dev->dma_mask; | 224 | u64 mask = *dev->dma_mask; |
201 | int high = addr + size > mask; | 225 | int high = addr + size > mask; |
202 | int mmu = high; | 226 | int mmu = high; |
203 | return mmu; | 227 | |
228 | return mmu; | ||
204 | } | 229 | } |
205 | 230 | ||
206 | /* Map a single continuous physical area into the IOMMU. | 231 | /* Map a single continuous physical area into the IOMMU. |
@@ -208,13 +233,14 @@ static inline int nonforced_iommu(struct device *dev, unsigned long addr, size_t | |||
208 | */ | 233 | */ |
209 | static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem, | 234 | static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem, |
210 | size_t size, int dir) | 235 | size_t size, int dir) |
211 | { | 236 | { |
212 | unsigned long npages = to_pages(phys_mem, size); | 237 | unsigned long npages = to_pages(phys_mem, size); |
213 | unsigned long iommu_page = alloc_iommu(npages); | 238 | unsigned long iommu_page = alloc_iommu(npages); |
214 | int i; | 239 | int i; |
240 | |||
215 | if (iommu_page == -1) { | 241 | if (iommu_page == -1) { |
216 | if (!nonforced_iommu(dev, phys_mem, size)) | 242 | if (!nonforced_iommu(dev, phys_mem, size)) |
217 | return phys_mem; | 243 | return phys_mem; |
218 | if (panic_on_overflow) | 244 | if (panic_on_overflow) |
219 | panic("dma_map_area overflow %lu bytes\n", size); | 245 | panic("dma_map_area overflow %lu bytes\n", size); |
220 | iommu_full(dev, size, dir); | 246 | iommu_full(dev, size, dir); |
@@ -229,35 +255,39 @@ static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem, | |||
229 | return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK); | 255 | return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK); |
230 | } | 256 | } |
231 | 257 | ||
232 | static dma_addr_t gart_map_simple(struct device *dev, char *buf, | 258 | static dma_addr_t |
233 | size_t size, int dir) | 259 | gart_map_simple(struct device *dev, char *buf, size_t size, int dir) |
234 | { | 260 | { |
235 | dma_addr_t map = dma_map_area(dev, virt_to_bus(buf), size, dir); | 261 | dma_addr_t map = dma_map_area(dev, virt_to_bus(buf), size, dir); |
262 | |||
236 | flush_gart(); | 263 | flush_gart(); |
264 | |||
237 | return map; | 265 | return map; |
238 | } | 266 | } |
239 | 267 | ||
240 | /* Map a single area into the IOMMU */ | 268 | /* Map a single area into the IOMMU */ |
241 | static dma_addr_t gart_map_single(struct device *dev, void *addr, size_t size, int dir) | 269 | static dma_addr_t |
270 | gart_map_single(struct device *dev, void *addr, size_t size, int dir) | ||
242 | { | 271 | { |
243 | unsigned long phys_mem, bus; | 272 | unsigned long phys_mem, bus; |
244 | 273 | ||
245 | if (!dev) | 274 | if (!dev) |
246 | dev = &fallback_dev; | 275 | dev = &fallback_dev; |
247 | 276 | ||
248 | phys_mem = virt_to_phys(addr); | 277 | phys_mem = virt_to_phys(addr); |
249 | if (!need_iommu(dev, phys_mem, size)) | 278 | if (!need_iommu(dev, phys_mem, size)) |
250 | return phys_mem; | 279 | return phys_mem; |
251 | 280 | ||
252 | bus = gart_map_simple(dev, addr, size, dir); | 281 | bus = gart_map_simple(dev, addr, size, dir); |
253 | return bus; | 282 | |
283 | return bus; | ||
254 | } | 284 | } |
255 | 285 | ||
256 | /* | 286 | /* |
257 | * Free a DMA mapping. | 287 | * Free a DMA mapping. |
258 | */ | 288 | */ |
259 | static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr, | 289 | static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr, |
260 | size_t size, int direction) | 290 | size_t size, int direction) |
261 | { | 291 | { |
262 | unsigned long iommu_page; | 292 | unsigned long iommu_page; |
263 | int npages; | 293 | int npages; |
@@ -266,6 +296,7 @@ static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr, | |||
266 | if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE || | 296 | if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE || |
267 | dma_addr >= iommu_bus_base + iommu_size) | 297 | dma_addr >= iommu_bus_base + iommu_size) |
268 | return; | 298 | return; |
299 | |||
269 | iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT; | 300 | iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT; |
270 | npages = to_pages(dma_addr, size); | 301 | npages = to_pages(dma_addr, size); |
271 | for (i = 0; i < npages; i++) { | 302 | for (i = 0; i < npages; i++) { |
@@ -278,7 +309,8 @@ static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr, | |||
278 | /* | 309 | /* |
279 | * Wrapper for pci_unmap_single working with scatterlists. | 310 | * Wrapper for pci_unmap_single working with scatterlists. |
280 | */ | 311 | */ |
281 | static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) | 312 | static void |
313 | gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) | ||
282 | { | 314 | { |
283 | struct scatterlist *s; | 315 | struct scatterlist *s; |
284 | int i; | 316 | int i; |
@@ -303,12 +335,13 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg, | |||
303 | 335 | ||
304 | for_each_sg(sg, s, nents, i) { | 336 | for_each_sg(sg, s, nents, i) { |
305 | unsigned long addr = sg_phys(s); | 337 | unsigned long addr = sg_phys(s); |
306 | if (nonforced_iommu(dev, addr, s->length)) { | 338 | |
339 | if (nonforced_iommu(dev, addr, s->length)) { | ||
307 | addr = dma_map_area(dev, addr, s->length, dir); | 340 | addr = dma_map_area(dev, addr, s->length, dir); |
308 | if (addr == bad_dma_address) { | 341 | if (addr == bad_dma_address) { |
309 | if (i > 0) | 342 | if (i > 0) |
310 | gart_unmap_sg(dev, sg, i, dir); | 343 | gart_unmap_sg(dev, sg, i, dir); |
311 | nents = 0; | 344 | nents = 0; |
312 | sg[0].dma_length = 0; | 345 | sg[0].dma_length = 0; |
313 | break; | 346 | break; |
314 | } | 347 | } |
@@ -317,15 +350,16 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg, | |||
317 | s->dma_length = s->length; | 350 | s->dma_length = s->length; |
318 | } | 351 | } |
319 | flush_gart(); | 352 | flush_gart(); |
353 | |||
320 | return nents; | 354 | return nents; |
321 | } | 355 | } |
322 | 356 | ||
323 | /* Map multiple scatterlist entries continuous into the first. */ | 357 | /* Map multiple scatterlist entries continuous into the first. */ |
324 | static int __dma_map_cont(struct scatterlist *start, int nelems, | 358 | static int __dma_map_cont(struct scatterlist *start, int nelems, |
325 | struct scatterlist *sout, unsigned long pages) | 359 | struct scatterlist *sout, unsigned long pages) |
326 | { | 360 | { |
327 | unsigned long iommu_start = alloc_iommu(pages); | 361 | unsigned long iommu_start = alloc_iommu(pages); |
328 | unsigned long iommu_page = iommu_start; | 362 | unsigned long iommu_page = iommu_start; |
329 | struct scatterlist *s; | 363 | struct scatterlist *s; |
330 | int i; | 364 | int i; |
331 | 365 | ||
@@ -335,32 +369,33 @@ static int __dma_map_cont(struct scatterlist *start, int nelems, | |||
335 | for_each_sg(start, s, nelems, i) { | 369 | for_each_sg(start, s, nelems, i) { |
336 | unsigned long pages, addr; | 370 | unsigned long pages, addr; |
337 | unsigned long phys_addr = s->dma_address; | 371 | unsigned long phys_addr = s->dma_address; |
338 | 372 | ||
339 | BUG_ON(s != start && s->offset); | 373 | BUG_ON(s != start && s->offset); |
340 | if (s == start) { | 374 | if (s == start) { |
341 | sout->dma_address = iommu_bus_base; | 375 | sout->dma_address = iommu_bus_base; |
342 | sout->dma_address += iommu_page*PAGE_SIZE + s->offset; | 376 | sout->dma_address += iommu_page*PAGE_SIZE + s->offset; |
343 | sout->dma_length = s->length; | 377 | sout->dma_length = s->length; |
344 | } else { | 378 | } else { |
345 | sout->dma_length += s->length; | 379 | sout->dma_length += s->length; |
346 | } | 380 | } |
347 | 381 | ||
348 | addr = phys_addr; | 382 | addr = phys_addr; |
349 | pages = to_pages(s->offset, s->length); | 383 | pages = to_pages(s->offset, s->length); |
350 | while (pages--) { | 384 | while (pages--) { |
351 | iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr); | 385 | iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr); |
352 | SET_LEAK(iommu_page); | 386 | SET_LEAK(iommu_page); |
353 | addr += PAGE_SIZE; | 387 | addr += PAGE_SIZE; |
354 | iommu_page++; | 388 | iommu_page++; |
355 | } | 389 | } |
356 | } | 390 | } |
357 | BUG_ON(iommu_page - iommu_start != pages); | 391 | BUG_ON(iommu_page - iommu_start != pages); |
392 | |||
358 | return 0; | 393 | return 0; |
359 | } | 394 | } |
360 | 395 | ||
361 | static inline int dma_map_cont(struct scatterlist *start, int nelems, | 396 | static inline int |
362 | struct scatterlist *sout, | 397 | dma_map_cont(struct scatterlist *start, int nelems, struct scatterlist *sout, |
363 | unsigned long pages, int need) | 398 | unsigned long pages, int need) |
364 | { | 399 | { |
365 | if (!need) { | 400 | if (!need) { |
366 | BUG_ON(nelems != 1); | 401 | BUG_ON(nelems != 1); |
@@ -370,22 +405,19 @@ static inline int dma_map_cont(struct scatterlist *start, int nelems, | |||
370 | } | 405 | } |
371 | return __dma_map_cont(start, nelems, sout, pages); | 406 | return __dma_map_cont(start, nelems, sout, pages); |
372 | } | 407 | } |
373 | 408 | ||
374 | /* | 409 | /* |
375 | * DMA map all entries in a scatterlist. | 410 | * DMA map all entries in a scatterlist. |
376 | * Merge chunks that have page aligned sizes into a continuous mapping. | 411 | * Merge chunks that have page aligned sizes into a continuous mapping. |
377 | */ | 412 | */ |
378 | static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, | 413 | static int |
379 | int dir) | 414 | gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) |
380 | { | 415 | { |
381 | int i; | ||
382 | int out; | ||
383 | int start; | ||
384 | unsigned long pages = 0; | ||
385 | int need = 0, nextneed; | ||
386 | struct scatterlist *s, *ps, *start_sg, *sgmap; | 416 | struct scatterlist *s, *ps, *start_sg, *sgmap; |
417 | int need = 0, nextneed, i, out, start; | ||
418 | unsigned long pages = 0; | ||
387 | 419 | ||
388 | if (nents == 0) | 420 | if (nents == 0) |
389 | return 0; | 421 | return 0; |
390 | 422 | ||
391 | if (!dev) | 423 | if (!dev) |
@@ -397,15 +429,19 @@ static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, | |||
397 | ps = NULL; /* shut up gcc */ | 429 | ps = NULL; /* shut up gcc */ |
398 | for_each_sg(sg, s, nents, i) { | 430 | for_each_sg(sg, s, nents, i) { |
399 | dma_addr_t addr = sg_phys(s); | 431 | dma_addr_t addr = sg_phys(s); |
432 | |||
400 | s->dma_address = addr; | 433 | s->dma_address = addr; |
401 | BUG_ON(s->length == 0); | 434 | BUG_ON(s->length == 0); |
402 | 435 | ||
403 | nextneed = need_iommu(dev, addr, s->length); | 436 | nextneed = need_iommu(dev, addr, s->length); |
404 | 437 | ||
405 | /* Handle the previous not yet processed entries */ | 438 | /* Handle the previous not yet processed entries */ |
406 | if (i > start) { | 439 | if (i > start) { |
407 | /* Can only merge when the last chunk ends on a page | 440 | /* |
408 | boundary and the new one doesn't have an offset. */ | 441 | * Can only merge when the last chunk ends on a |
442 | * page boundary and the new one doesn't have an | ||
443 | * offset. | ||
444 | */ | ||
409 | if (!iommu_merge || !nextneed || !need || s->offset || | 445 | if (!iommu_merge || !nextneed || !need || s->offset || |
410 | (ps->offset + ps->length) % PAGE_SIZE) { | 446 | (ps->offset + ps->length) % PAGE_SIZE) { |
411 | if (dma_map_cont(start_sg, i - start, sgmap, | 447 | if (dma_map_cont(start_sg, i - start, sgmap, |
@@ -436,6 +472,7 @@ static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, | |||
436 | error: | 472 | error: |
437 | flush_gart(); | 473 | flush_gart(); |
438 | gart_unmap_sg(dev, sg, out, dir); | 474 | gart_unmap_sg(dev, sg, out, dir); |
475 | |||
439 | /* When it was forced or merged try again in a dumb way */ | 476 | /* When it was forced or merged try again in a dumb way */ |
440 | if (force_iommu || iommu_merge) { | 477 | if (force_iommu || iommu_merge) { |
441 | out = dma_map_sg_nonforce(dev, sg, nents, dir); | 478 | out = dma_map_sg_nonforce(dev, sg, nents, dir); |
@@ -444,64 +481,68 @@ error: | |||
444 | } | 481 | } |
445 | if (panic_on_overflow) | 482 | if (panic_on_overflow) |
446 | panic("dma_map_sg: overflow on %lu pages\n", pages); | 483 | panic("dma_map_sg: overflow on %lu pages\n", pages); |
484 | |||
447 | iommu_full(dev, pages << PAGE_SHIFT, dir); | 485 | iommu_full(dev, pages << PAGE_SHIFT, dir); |
448 | for_each_sg(sg, s, nents, i) | 486 | for_each_sg(sg, s, nents, i) |
449 | s->dma_address = bad_dma_address; | 487 | s->dma_address = bad_dma_address; |
450 | return 0; | 488 | return 0; |
451 | } | 489 | } |
452 | 490 | ||
453 | static int no_agp; | 491 | static int no_agp; |
454 | 492 | ||
455 | static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size) | 493 | static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size) |
456 | { | 494 | { |
457 | unsigned long a; | 495 | unsigned long a; |
458 | if (!iommu_size) { | 496 | |
459 | iommu_size = aper_size; | 497 | if (!iommu_size) { |
460 | if (!no_agp) | 498 | iommu_size = aper_size; |
461 | iommu_size /= 2; | 499 | if (!no_agp) |
462 | } | 500 | iommu_size /= 2; |
463 | 501 | } | |
464 | a = aper + iommu_size; | 502 | |
503 | a = aper + iommu_size; | ||
465 | iommu_size -= round_up(a, LARGE_PAGE_SIZE) - a; | 504 | iommu_size -= round_up(a, LARGE_PAGE_SIZE) - a; |
466 | 505 | ||
467 | if (iommu_size < 64*1024*1024) | 506 | if (iommu_size < 64*1024*1024) { |
468 | printk(KERN_WARNING | 507 | printk(KERN_WARNING |
469 | "PCI-DMA: Warning: Small IOMMU %luMB. Consider increasing the AGP aperture in BIOS\n",iommu_size>>20); | 508 | "PCI-DMA: Warning: Small IOMMU %luMB." |
470 | 509 | " Consider increasing the AGP aperture in BIOS\n", | |
510 | iommu_size >> 20); | ||
511 | } | ||
512 | |||
471 | return iommu_size; | 513 | return iommu_size; |
472 | } | 514 | } |
473 | 515 | ||
474 | static __init unsigned read_aperture(struct pci_dev *dev, u32 *size) | 516 | static __init unsigned read_aperture(struct pci_dev *dev, u32 *size) |
475 | { | 517 | { |
476 | unsigned aper_size = 0, aper_base_32; | 518 | unsigned aper_size = 0, aper_base_32, aper_order; |
477 | u64 aper_base; | 519 | u64 aper_base; |
478 | unsigned aper_order; | ||
479 | 520 | ||
480 | pci_read_config_dword(dev, 0x94, &aper_base_32); | 521 | pci_read_config_dword(dev, 0x94, &aper_base_32); |
481 | pci_read_config_dword(dev, 0x90, &aper_order); | 522 | pci_read_config_dword(dev, 0x90, &aper_order); |
482 | aper_order = (aper_order >> 1) & 7; | 523 | aper_order = (aper_order >> 1) & 7; |
483 | 524 | ||
484 | aper_base = aper_base_32 & 0x7fff; | 525 | aper_base = aper_base_32 & 0x7fff; |
485 | aper_base <<= 25; | 526 | aper_base <<= 25; |
486 | 527 | ||
487 | aper_size = (32 * 1024 * 1024) << aper_order; | 528 | aper_size = (32 * 1024 * 1024) << aper_order; |
488 | if (aper_base + aper_size > 0x100000000UL || !aper_size) | 529 | if (aper_base + aper_size > 0x100000000UL || !aper_size) |
489 | aper_base = 0; | 530 | aper_base = 0; |
490 | 531 | ||
491 | *size = aper_size; | 532 | *size = aper_size; |
492 | return aper_base; | 533 | return aper_base; |
493 | } | 534 | } |
494 | 535 | ||
495 | /* | 536 | /* |
496 | * Private Northbridge GATT initialization in case we cannot use the | 537 | * Private Northbridge GATT initialization in case we cannot use the |
497 | * AGP driver for some reason. | 538 | * AGP driver for some reason. |
498 | */ | 539 | */ |
499 | static __init int init_k8_gatt(struct agp_kern_info *info) | 540 | static __init int init_k8_gatt(struct agp_kern_info *info) |
500 | { | 541 | { |
542 | unsigned aper_size, gatt_size, new_aper_size; | ||
543 | unsigned aper_base, new_aper_base; | ||
501 | struct pci_dev *dev; | 544 | struct pci_dev *dev; |
502 | void *gatt; | 545 | void *gatt; |
503 | unsigned aper_base, new_aper_base; | ||
504 | unsigned aper_size, gatt_size, new_aper_size; | ||
505 | int i; | 546 | int i; |
506 | 547 | ||
507 | printk(KERN_INFO "PCI-DMA: Disabling AGP.\n"); | 548 | printk(KERN_INFO "PCI-DMA: Disabling AGP.\n"); |
@@ -509,75 +550,77 @@ static __init int init_k8_gatt(struct agp_kern_info *info) | |||
509 | dev = NULL; | 550 | dev = NULL; |
510 | for (i = 0; i < num_k8_northbridges; i++) { | 551 | for (i = 0; i < num_k8_northbridges; i++) { |
511 | dev = k8_northbridges[i]; | 552 | dev = k8_northbridges[i]; |
512 | new_aper_base = read_aperture(dev, &new_aper_size); | 553 | new_aper_base = read_aperture(dev, &new_aper_size); |
513 | if (!new_aper_base) | 554 | if (!new_aper_base) |
514 | goto nommu; | 555 | goto nommu; |
515 | 556 | ||
516 | if (!aper_base) { | 557 | if (!aper_base) { |
517 | aper_size = new_aper_size; | 558 | aper_size = new_aper_size; |
518 | aper_base = new_aper_base; | 559 | aper_base = new_aper_base; |
519 | } | 560 | } |
520 | if (aper_size != new_aper_size || aper_base != new_aper_base) | 561 | if (aper_size != new_aper_size || aper_base != new_aper_base) |
521 | goto nommu; | 562 | goto nommu; |
522 | } | 563 | } |
523 | if (!aper_base) | 564 | if (!aper_base) |
524 | goto nommu; | 565 | goto nommu; |
525 | info->aper_base = aper_base; | 566 | info->aper_base = aper_base; |
526 | info->aper_size = aper_size>>20; | 567 | info->aper_size = aper_size >> 20; |
527 | 568 | ||
528 | gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32); | 569 | gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32); |
529 | gatt = (void *)__get_free_pages(GFP_KERNEL, get_order(gatt_size)); | 570 | gatt = (void *)__get_free_pages(GFP_KERNEL, get_order(gatt_size)); |
530 | if (!gatt) | 571 | if (!gatt) |
531 | panic("Cannot allocate GATT table"); | 572 | panic("Cannot allocate GATT table"); |
532 | if (change_page_attr_addr((unsigned long)gatt, gatt_size >> PAGE_SHIFT, PAGE_KERNEL_NOCACHE)) | 573 | if (change_page_attr_addr((unsigned long)gatt, gatt_size >> PAGE_SHIFT, |
574 | PAGE_KERNEL_NOCACHE)) | ||
533 | panic("Could not set GART PTEs to uncacheable pages"); | 575 | panic("Could not set GART PTEs to uncacheable pages"); |
534 | global_flush_tlb(); | 576 | global_flush_tlb(); |
535 | 577 | ||
536 | memset(gatt, 0, gatt_size); | 578 | memset(gatt, 0, gatt_size); |
537 | agp_gatt_table = gatt; | 579 | agp_gatt_table = gatt; |
538 | 580 | ||
539 | for (i = 0; i < num_k8_northbridges; i++) { | 581 | for (i = 0; i < num_k8_northbridges; i++) { |
540 | u32 ctl; | 582 | u32 gatt_reg; |
541 | u32 gatt_reg; | 583 | u32 ctl; |
542 | 584 | ||
543 | dev = k8_northbridges[i]; | 585 | dev = k8_northbridges[i]; |
544 | gatt_reg = __pa(gatt) >> 12; | 586 | gatt_reg = __pa(gatt) >> 12; |
545 | gatt_reg <<= 4; | 587 | gatt_reg <<= 4; |
546 | pci_write_config_dword(dev, 0x98, gatt_reg); | 588 | pci_write_config_dword(dev, 0x98, gatt_reg); |
547 | pci_read_config_dword(dev, 0x90, &ctl); | 589 | pci_read_config_dword(dev, 0x90, &ctl); |
548 | 590 | ||
549 | ctl |= 1; | 591 | ctl |= 1; |
550 | ctl &= ~((1<<4) | (1<<5)); | 592 | ctl &= ~((1<<4) | (1<<5)); |
551 | 593 | ||
552 | pci_write_config_dword(dev, 0x90, ctl); | 594 | pci_write_config_dword(dev, 0x90, ctl); |
553 | } | 595 | } |
554 | flush_gart(); | 596 | flush_gart(); |
555 | 597 | ||
556 | printk("PCI-DMA: aperture base @ %x size %u KB\n",aper_base, aper_size>>10); | 598 | printk(KERN_INFO "PCI-DMA: aperture base @ %x size %u KB\n", |
599 | aper_base, aper_size>>10); | ||
557 | return 0; | 600 | return 0; |
558 | 601 | ||
559 | nommu: | 602 | nommu: |
560 | /* Should not happen anymore */ | 603 | /* Should not happen anymore */ |
561 | printk(KERN_ERR "PCI-DMA: More than 4GB of RAM and no IOMMU\n" | 604 | printk(KERN_ERR "PCI-DMA: More than 4GB of RAM and no IOMMU\n" |
562 | KERN_ERR "PCI-DMA: 32bit PCI IO may malfunction.\n"); | 605 | KERN_ERR "PCI-DMA: 32bit PCI IO may malfunction.\n"); |
563 | return -1; | 606 | return -1; |
564 | } | 607 | } |
565 | 608 | ||
566 | extern int agp_amd64_init(void); | 609 | extern int agp_amd64_init(void); |
567 | 610 | ||
568 | static const struct dma_mapping_ops gart_dma_ops = { | 611 | static const struct dma_mapping_ops gart_dma_ops = { |
569 | .mapping_error = NULL, | 612 | .mapping_error = NULL, |
570 | .map_single = gart_map_single, | 613 | .map_single = gart_map_single, |
571 | .map_simple = gart_map_simple, | 614 | .map_simple = gart_map_simple, |
572 | .unmap_single = gart_unmap_single, | 615 | .unmap_single = gart_unmap_single, |
573 | .sync_single_for_cpu = NULL, | 616 | .sync_single_for_cpu = NULL, |
574 | .sync_single_for_device = NULL, | 617 | .sync_single_for_device = NULL, |
575 | .sync_single_range_for_cpu = NULL, | 618 | .sync_single_range_for_cpu = NULL, |
576 | .sync_single_range_for_device = NULL, | 619 | .sync_single_range_for_device = NULL, |
577 | .sync_sg_for_cpu = NULL, | 620 | .sync_sg_for_cpu = NULL, |
578 | .sync_sg_for_device = NULL, | 621 | .sync_sg_for_device = NULL, |
579 | .map_sg = gart_map_sg, | 622 | .map_sg = gart_map_sg, |
580 | .unmap_sg = gart_unmap_sg, | 623 | .unmap_sg = gart_unmap_sg, |
581 | }; | 624 | }; |
582 | 625 | ||
583 | void gart_iommu_shutdown(void) | 626 | void gart_iommu_shutdown(void) |
@@ -588,23 +631,23 @@ void gart_iommu_shutdown(void) | |||
588 | if (no_agp && (dma_ops != &gart_dma_ops)) | 631 | if (no_agp && (dma_ops != &gart_dma_ops)) |
589 | return; | 632 | return; |
590 | 633 | ||
591 | for (i = 0; i < num_k8_northbridges; i++) { | 634 | for (i = 0; i < num_k8_northbridges; i++) { |
592 | u32 ctl; | 635 | u32 ctl; |
593 | 636 | ||
594 | dev = k8_northbridges[i]; | 637 | dev = k8_northbridges[i]; |
595 | pci_read_config_dword(dev, 0x90, &ctl); | 638 | pci_read_config_dword(dev, 0x90, &ctl); |
596 | 639 | ||
597 | ctl &= ~1; | 640 | ctl &= ~1; |
598 | 641 | ||
599 | pci_write_config_dword(dev, 0x90, ctl); | 642 | pci_write_config_dword(dev, 0x90, ctl); |
600 | } | 643 | } |
601 | } | 644 | } |
602 | 645 | ||
603 | void __init gart_iommu_init(void) | 646 | void __init gart_iommu_init(void) |
604 | { | 647 | { |
605 | struct agp_kern_info info; | 648 | struct agp_kern_info info; |
606 | unsigned long aper_size; | ||
607 | unsigned long iommu_start; | 649 | unsigned long iommu_start; |
650 | unsigned long aper_size; | ||
608 | unsigned long scratch; | 651 | unsigned long scratch; |
609 | long i; | 652 | long i; |
610 | 653 | ||
@@ -614,14 +657,14 @@ void __init gart_iommu_init(void) | |||
614 | } | 657 | } |
615 | 658 | ||
616 | #ifndef CONFIG_AGP_AMD64 | 659 | #ifndef CONFIG_AGP_AMD64 |
617 | no_agp = 1; | 660 | no_agp = 1; |
618 | #else | 661 | #else |
619 | /* Makefile puts PCI initialization via subsys_initcall first. */ | 662 | /* Makefile puts PCI initialization via subsys_initcall first. */ |
620 | /* Add other K8 AGP bridge drivers here */ | 663 | /* Add other K8 AGP bridge drivers here */ |
621 | no_agp = no_agp || | 664 | no_agp = no_agp || |
622 | (agp_amd64_init() < 0) || | 665 | (agp_amd64_init() < 0) || |
623 | (agp_copy_info(agp_bridge, &info) < 0); | 666 | (agp_copy_info(agp_bridge, &info) < 0); |
624 | #endif | 667 | #endif |
625 | 668 | ||
626 | if (swiotlb) | 669 | if (swiotlb) |
627 | return; | 670 | return; |
@@ -643,77 +686,78 @@ void __init gart_iommu_init(void) | |||
643 | } | 686 | } |
644 | 687 | ||
645 | printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n"); | 688 | printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n"); |
646 | aper_size = info.aper_size * 1024 * 1024; | 689 | aper_size = info.aper_size * 1024 * 1024; |
647 | iommu_size = check_iommu_size(info.aper_base, aper_size); | 690 | iommu_size = check_iommu_size(info.aper_base, aper_size); |
648 | iommu_pages = iommu_size >> PAGE_SHIFT; | 691 | iommu_pages = iommu_size >> PAGE_SHIFT; |
649 | 692 | ||
650 | iommu_gart_bitmap = (void*)__get_free_pages(GFP_KERNEL, | 693 | iommu_gart_bitmap = (void *) __get_free_pages(GFP_KERNEL, |
651 | get_order(iommu_pages/8)); | 694 | get_order(iommu_pages/8)); |
652 | if (!iommu_gart_bitmap) | 695 | if (!iommu_gart_bitmap) |
653 | panic("Cannot allocate iommu bitmap\n"); | 696 | panic("Cannot allocate iommu bitmap\n"); |
654 | memset(iommu_gart_bitmap, 0, iommu_pages/8); | 697 | memset(iommu_gart_bitmap, 0, iommu_pages/8); |
655 | 698 | ||
656 | #ifdef CONFIG_IOMMU_LEAK | 699 | #ifdef CONFIG_IOMMU_LEAK |
657 | if (leak_trace) { | 700 | if (leak_trace) { |
658 | iommu_leak_tab = (void *)__get_free_pages(GFP_KERNEL, | 701 | iommu_leak_tab = (void *)__get_free_pages(GFP_KERNEL, |
659 | get_order(iommu_pages*sizeof(void *))); | 702 | get_order(iommu_pages*sizeof(void *))); |
660 | if (iommu_leak_tab) | 703 | if (iommu_leak_tab) |
661 | memset(iommu_leak_tab, 0, iommu_pages * 8); | 704 | memset(iommu_leak_tab, 0, iommu_pages * 8); |
662 | else | 705 | else |
663 | printk("PCI-DMA: Cannot allocate leak trace area\n"); | 706 | printk(KERN_DEBUG |
664 | } | 707 | "PCI-DMA: Cannot allocate leak trace area\n"); |
708 | } | ||
665 | #endif | 709 | #endif |
666 | 710 | ||
667 | /* | 711 | /* |
668 | * Out of IOMMU space handling. | 712 | * Out of IOMMU space handling. |
669 | * Reserve some invalid pages at the beginning of the GART. | 713 | * Reserve some invalid pages at the beginning of the GART. |
670 | */ | 714 | */ |
671 | set_bit_string(iommu_gart_bitmap, 0, EMERGENCY_PAGES); | 715 | set_bit_string(iommu_gart_bitmap, 0, EMERGENCY_PAGES); |
672 | 716 | ||
673 | agp_memory_reserved = iommu_size; | 717 | agp_memory_reserved = iommu_size; |
674 | printk(KERN_INFO | 718 | printk(KERN_INFO |
675 | "PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n", | 719 | "PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n", |
676 | iommu_size>>20); | 720 | iommu_size >> 20); |
677 | 721 | ||
678 | iommu_start = aper_size - iommu_size; | 722 | iommu_start = aper_size - iommu_size; |
679 | iommu_bus_base = info.aper_base + iommu_start; | 723 | iommu_bus_base = info.aper_base + iommu_start; |
680 | bad_dma_address = iommu_bus_base; | 724 | bad_dma_address = iommu_bus_base; |
681 | iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT); | 725 | iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT); |
682 | 726 | ||
683 | /* | 727 | /* |
684 | * Unmap the IOMMU part of the GART. The alias of the page is | 728 | * Unmap the IOMMU part of the GART. The alias of the page is |
685 | * always mapped with cache enabled and there is no full cache | 729 | * always mapped with cache enabled and there is no full cache |
686 | * coherency across the GART remapping. The unmapping avoids | 730 | * coherency across the GART remapping. The unmapping avoids |
687 | * automatic prefetches from the CPU allocating cache lines in | 731 | * automatic prefetches from the CPU allocating cache lines in |
688 | * there. All CPU accesses are done via the direct mapping to | 732 | * there. All CPU accesses are done via the direct mapping to |
689 | * the backing memory. The GART address is only used by PCI | 733 | * the backing memory. The GART address is only used by PCI |
690 | * devices. | 734 | * devices. |
691 | */ | 735 | */ |
692 | clear_kernel_mapping((unsigned long)__va(iommu_bus_base), iommu_size); | 736 | clear_kernel_mapping((unsigned long)__va(iommu_bus_base), iommu_size); |
693 | 737 | ||
694 | /* | 738 | /* |
695 | * Try to workaround a bug (thanks to BenH) | 739 | * Try to workaround a bug (thanks to BenH) |
696 | * Set unmapped entries to a scratch page instead of 0. | 740 | * Set unmapped entries to a scratch page instead of 0. |
697 | * Any prefetches that hit unmapped entries won't get an bus abort | 741 | * Any prefetches that hit unmapped entries won't get an bus abort |
698 | * then. | 742 | * then. |
699 | */ | 743 | */ |
700 | scratch = get_zeroed_page(GFP_KERNEL); | 744 | scratch = get_zeroed_page(GFP_KERNEL); |
701 | if (!scratch) | 745 | if (!scratch) |
702 | panic("Cannot allocate iommu scratch page"); | 746 | panic("Cannot allocate iommu scratch page"); |
703 | gart_unmapped_entry = GPTE_ENCODE(__pa(scratch)); | 747 | gart_unmapped_entry = GPTE_ENCODE(__pa(scratch)); |
704 | for (i = EMERGENCY_PAGES; i < iommu_pages; i++) | 748 | for (i = EMERGENCY_PAGES; i < iommu_pages; i++) |
705 | iommu_gatt_base[i] = gart_unmapped_entry; | 749 | iommu_gatt_base[i] = gart_unmapped_entry; |
706 | 750 | ||
707 | flush_gart(); | 751 | flush_gart(); |
708 | dma_ops = &gart_dma_ops; | 752 | dma_ops = &gart_dma_ops; |
709 | } | 753 | } |
710 | 754 | ||
711 | void __init gart_parse_options(char *p) | 755 | void __init gart_parse_options(char *p) |
712 | { | 756 | { |
713 | int arg; | 757 | int arg; |
714 | 758 | ||
715 | #ifdef CONFIG_IOMMU_LEAK | 759 | #ifdef CONFIG_IOMMU_LEAK |
716 | if (!strncmp(p,"leak",4)) { | 760 | if (!strncmp(p, "leak", 4)) { |
717 | leak_trace = 1; | 761 | leak_trace = 1; |
718 | p += 4; | 762 | p += 4; |
719 | if (*p == '=') ++p; | 763 | if (*p == '=') ++p; |
@@ -723,18 +767,18 @@ void __init gart_parse_options(char *p) | |||
723 | #endif | 767 | #endif |
724 | if (isdigit(*p) && get_option(&p, &arg)) | 768 | if (isdigit(*p) && get_option(&p, &arg)) |
725 | iommu_size = arg; | 769 | iommu_size = arg; |
726 | if (!strncmp(p, "fullflush",8)) | 770 | if (!strncmp(p, "fullflush", 8)) |
727 | iommu_fullflush = 1; | 771 | iommu_fullflush = 1; |
728 | if (!strncmp(p, "nofullflush",11)) | 772 | if (!strncmp(p, "nofullflush", 11)) |
729 | iommu_fullflush = 0; | 773 | iommu_fullflush = 0; |
730 | if (!strncmp(p,"noagp",5)) | 774 | if (!strncmp(p, "noagp", 5)) |
731 | no_agp = 1; | 775 | no_agp = 1; |
732 | if (!strncmp(p, "noaperture",10)) | 776 | if (!strncmp(p, "noaperture", 10)) |
733 | fix_aperture = 0; | 777 | fix_aperture = 0; |
734 | /* duplicated from pci-dma.c */ | 778 | /* duplicated from pci-dma.c */ |
735 | if (!strncmp(p,"force",5)) | 779 | if (!strncmp(p, "force", 5)) |
736 | gart_iommu_aperture_allowed = 1; | 780 | gart_iommu_aperture_allowed = 1; |
737 | if (!strncmp(p,"allowed",7)) | 781 | if (!strncmp(p, "allowed", 7)) |
738 | gart_iommu_aperture_allowed = 1; | 782 | gart_iommu_aperture_allowed = 1; |
739 | if (!strncmp(p, "memaper", 7)) { | 783 | if (!strncmp(p, "memaper", 7)) { |
740 | fallback_aper_force = 1; | 784 | fallback_aper_force = 1; |