diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/Kconfig | 15 | ||||
-rw-r--r-- | arch/x86/kernel/Makefile | 1 | ||||
-rw-r--r-- | arch/x86/kernel/amd_iommu.c | 962 | ||||
-rw-r--r-- | arch/x86/kernel/amd_iommu_init.c | 875 | ||||
-rw-r--r-- | arch/x86/kernel/entry_64.S | 8 | ||||
-rw-r--r-- | arch/x86/kernel/i387.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/pci-dma.c | 5 | ||||
-rw-r--r-- | arch/x86/kernel/ptrace.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/time_64.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/tsc_32.c | 6 | ||||
-rw-r--r-- | arch/x86/lib/delay_32.c | 25 | ||||
-rw-r--r-- | arch/x86/mm/fault.c | 16 | ||||
-rw-r--r-- | arch/x86/oprofile/nmi_int.c | 3 |
13 files changed, 1896 insertions, 30 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 5d2858119930..7dc46ba26fbf 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -533,6 +533,21 @@ config CALGARY_IOMMU_ENABLED_BY_DEFAULT | |||
533 | Calgary anyway, pass 'iommu=calgary' on the kernel command line. | 533 | Calgary anyway, pass 'iommu=calgary' on the kernel command line. |
534 | If unsure, say Y. | 534 | If unsure, say Y. |
535 | 535 | ||
536 | config AMD_IOMMU | ||
537 | bool "AMD IOMMU support" | ||
538 | select SWIOTLB | ||
539 | depends on X86_64 && PCI && ACPI | ||
540 | help | ||
541 | With this option you can enable support for AMD IOMMU hardware in | ||
542 | your system. An IOMMU is a hardware component which provides | ||
543 | remapping of DMA memory accesses from devices. With an AMD IOMMU you | ||
544 | can isolate the the DMA memory of different devices and protect the | ||
545 | system from misbehaving device drivers or hardware. | ||
546 | |||
547 | You can find out if your system has an AMD IOMMU if you look into | ||
548 | your BIOS for an option to enable it or if you have an IVRS ACPI | ||
549 | table. | ||
550 | |||
536 | # need this always selected by IOMMU for the VIA workaround | 551 | # need this always selected by IOMMU for the VIA workaround |
537 | config SWIOTLB | 552 | config SWIOTLB |
538 | bool | 553 | bool |
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 8a42b797cd6b..53557cbe4bfa 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile | |||
@@ -99,6 +99,7 @@ ifeq ($(CONFIG_X86_64),y) | |||
99 | 99 | ||
100 | obj-$(CONFIG_GART_IOMMU) += pci-gart_64.o aperture_64.o | 100 | obj-$(CONFIG_GART_IOMMU) += pci-gart_64.o aperture_64.o |
101 | obj-$(CONFIG_CALGARY_IOMMU) += pci-calgary_64.o tce_64.o | 101 | obj-$(CONFIG_CALGARY_IOMMU) += pci-calgary_64.o tce_64.o |
102 | obj-$(CONFIG_AMD_IOMMU) += amd_iommu_init.o amd_iommu.o | ||
102 | obj-$(CONFIG_SWIOTLB) += pci-swiotlb_64.o | 103 | obj-$(CONFIG_SWIOTLB) += pci-swiotlb_64.o |
103 | 104 | ||
104 | obj-$(CONFIG_PCI_MMCONFIG) += mmconf-fam10h_64.o | 105 | obj-$(CONFIG_PCI_MMCONFIG) += mmconf-fam10h_64.o |
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c new file mode 100644 index 000000000000..f2766d84c7a0 --- /dev/null +++ b/arch/x86/kernel/amd_iommu.c | |||
@@ -0,0 +1,962 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. | ||
3 | * Author: Joerg Roedel <joerg.roedel@amd.com> | ||
4 | * Leo Duran <leo.duran@amd.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License version 2 as published | ||
8 | * by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
18 | */ | ||
19 | |||
20 | #include <linux/pci.h> | ||
21 | #include <linux/gfp.h> | ||
22 | #include <linux/bitops.h> | ||
23 | #include <linux/scatterlist.h> | ||
24 | #include <linux/iommu-helper.h> | ||
25 | #include <asm/proto.h> | ||
26 | #include <asm/gart.h> | ||
27 | #include <asm/amd_iommu_types.h> | ||
28 | #include <asm/amd_iommu.h> | ||
29 | |||
30 | #define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28)) | ||
31 | |||
32 | #define to_pages(addr, size) \ | ||
33 | (round_up(((addr) & ~PAGE_MASK) + (size), PAGE_SIZE) >> PAGE_SHIFT) | ||
34 | |||
35 | static DEFINE_RWLOCK(amd_iommu_devtable_lock); | ||
36 | |||
37 | struct command { | ||
38 | u32 data[4]; | ||
39 | }; | ||
40 | |||
41 | static int dma_ops_unity_map(struct dma_ops_domain *dma_dom, | ||
42 | struct unity_map_entry *e); | ||
43 | |||
44 | static int iommu_has_npcache(struct amd_iommu *iommu) | ||
45 | { | ||
46 | return iommu->cap & IOMMU_CAP_NPCACHE; | ||
47 | } | ||
48 | |||
49 | static int __iommu_queue_command(struct amd_iommu *iommu, struct command *cmd) | ||
50 | { | ||
51 | u32 tail, head; | ||
52 | u8 *target; | ||
53 | |||
54 | tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); | ||
55 | target = (iommu->cmd_buf + tail); | ||
56 | memcpy_toio(target, cmd, sizeof(*cmd)); | ||
57 | tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size; | ||
58 | head = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); | ||
59 | if (tail == head) | ||
60 | return -ENOMEM; | ||
61 | writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); | ||
62 | |||
63 | return 0; | ||
64 | } | ||
65 | |||
66 | static int iommu_queue_command(struct amd_iommu *iommu, struct command *cmd) | ||
67 | { | ||
68 | unsigned long flags; | ||
69 | int ret; | ||
70 | |||
71 | spin_lock_irqsave(&iommu->lock, flags); | ||
72 | ret = __iommu_queue_command(iommu, cmd); | ||
73 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
74 | |||
75 | return ret; | ||
76 | } | ||
77 | |||
78 | static int iommu_completion_wait(struct amd_iommu *iommu) | ||
79 | { | ||
80 | int ret; | ||
81 | struct command cmd; | ||
82 | volatile u64 ready = 0; | ||
83 | unsigned long ready_phys = virt_to_phys(&ready); | ||
84 | |||
85 | memset(&cmd, 0, sizeof(cmd)); | ||
86 | cmd.data[0] = LOW_U32(ready_phys) | CMD_COMPL_WAIT_STORE_MASK; | ||
87 | cmd.data[1] = HIGH_U32(ready_phys); | ||
88 | cmd.data[2] = 1; /* value written to 'ready' */ | ||
89 | CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT); | ||
90 | |||
91 | iommu->need_sync = 0; | ||
92 | |||
93 | ret = iommu_queue_command(iommu, &cmd); | ||
94 | |||
95 | if (ret) | ||
96 | return ret; | ||
97 | |||
98 | while (!ready) | ||
99 | cpu_relax(); | ||
100 | |||
101 | return 0; | ||
102 | } | ||
103 | |||
104 | static int iommu_queue_inv_dev_entry(struct amd_iommu *iommu, u16 devid) | ||
105 | { | ||
106 | struct command cmd; | ||
107 | |||
108 | BUG_ON(iommu == NULL); | ||
109 | |||
110 | memset(&cmd, 0, sizeof(cmd)); | ||
111 | CMD_SET_TYPE(&cmd, CMD_INV_DEV_ENTRY); | ||
112 | cmd.data[0] = devid; | ||
113 | |||
114 | iommu->need_sync = 1; | ||
115 | |||
116 | return iommu_queue_command(iommu, &cmd); | ||
117 | } | ||
118 | |||
119 | static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu, | ||
120 | u64 address, u16 domid, int pde, int s) | ||
121 | { | ||
122 | struct command cmd; | ||
123 | |||
124 | memset(&cmd, 0, sizeof(cmd)); | ||
125 | address &= PAGE_MASK; | ||
126 | CMD_SET_TYPE(&cmd, CMD_INV_IOMMU_PAGES); | ||
127 | cmd.data[1] |= domid; | ||
128 | cmd.data[2] = LOW_U32(address); | ||
129 | cmd.data[3] = HIGH_U32(address); | ||
130 | if (s) | ||
131 | cmd.data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK; | ||
132 | if (pde) | ||
133 | cmd.data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK; | ||
134 | |||
135 | iommu->need_sync = 1; | ||
136 | |||
137 | return iommu_queue_command(iommu, &cmd); | ||
138 | } | ||
139 | |||
140 | static int iommu_flush_pages(struct amd_iommu *iommu, u16 domid, | ||
141 | u64 address, size_t size) | ||
142 | { | ||
143 | int s = 0; | ||
144 | unsigned pages = to_pages(address, size); | ||
145 | |||
146 | address &= PAGE_MASK; | ||
147 | |||
148 | if (pages > 1) { | ||
149 | /* | ||
150 | * If we have to flush more than one page, flush all | ||
151 | * TLB entries for this domain | ||
152 | */ | ||
153 | address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS; | ||
154 | s = 1; | ||
155 | } | ||
156 | |||
157 | iommu_queue_inv_iommu_pages(iommu, address, domid, 0, s); | ||
158 | |||
159 | return 0; | ||
160 | } | ||
161 | |||
162 | static int iommu_map(struct protection_domain *dom, | ||
163 | unsigned long bus_addr, | ||
164 | unsigned long phys_addr, | ||
165 | int prot) | ||
166 | { | ||
167 | u64 __pte, *pte, *page; | ||
168 | |||
169 | bus_addr = PAGE_ALIGN(bus_addr); | ||
170 | phys_addr = PAGE_ALIGN(bus_addr); | ||
171 | |||
172 | /* only support 512GB address spaces for now */ | ||
173 | if (bus_addr > IOMMU_MAP_SIZE_L3 || !(prot & IOMMU_PROT_MASK)) | ||
174 | return -EINVAL; | ||
175 | |||
176 | pte = &dom->pt_root[IOMMU_PTE_L2_INDEX(bus_addr)]; | ||
177 | |||
178 | if (!IOMMU_PTE_PRESENT(*pte)) { | ||
179 | page = (u64 *)get_zeroed_page(GFP_KERNEL); | ||
180 | if (!page) | ||
181 | return -ENOMEM; | ||
182 | *pte = IOMMU_L2_PDE(virt_to_phys(page)); | ||
183 | } | ||
184 | |||
185 | pte = IOMMU_PTE_PAGE(*pte); | ||
186 | pte = &pte[IOMMU_PTE_L1_INDEX(bus_addr)]; | ||
187 | |||
188 | if (!IOMMU_PTE_PRESENT(*pte)) { | ||
189 | page = (u64 *)get_zeroed_page(GFP_KERNEL); | ||
190 | if (!page) | ||
191 | return -ENOMEM; | ||
192 | *pte = IOMMU_L1_PDE(virt_to_phys(page)); | ||
193 | } | ||
194 | |||
195 | pte = IOMMU_PTE_PAGE(*pte); | ||
196 | pte = &pte[IOMMU_PTE_L0_INDEX(bus_addr)]; | ||
197 | |||
198 | if (IOMMU_PTE_PRESENT(*pte)) | ||
199 | return -EBUSY; | ||
200 | |||
201 | __pte = phys_addr | IOMMU_PTE_P; | ||
202 | if (prot & IOMMU_PROT_IR) | ||
203 | __pte |= IOMMU_PTE_IR; | ||
204 | if (prot & IOMMU_PROT_IW) | ||
205 | __pte |= IOMMU_PTE_IW; | ||
206 | |||
207 | *pte = __pte; | ||
208 | |||
209 | return 0; | ||
210 | } | ||
211 | |||
212 | static int iommu_for_unity_map(struct amd_iommu *iommu, | ||
213 | struct unity_map_entry *entry) | ||
214 | { | ||
215 | u16 bdf, i; | ||
216 | |||
217 | for (i = entry->devid_start; i <= entry->devid_end; ++i) { | ||
218 | bdf = amd_iommu_alias_table[i]; | ||
219 | if (amd_iommu_rlookup_table[bdf] == iommu) | ||
220 | return 1; | ||
221 | } | ||
222 | |||
223 | return 0; | ||
224 | } | ||
225 | |||
226 | static int iommu_init_unity_mappings(struct amd_iommu *iommu) | ||
227 | { | ||
228 | struct unity_map_entry *entry; | ||
229 | int ret; | ||
230 | |||
231 | list_for_each_entry(entry, &amd_iommu_unity_map, list) { | ||
232 | if (!iommu_for_unity_map(iommu, entry)) | ||
233 | continue; | ||
234 | ret = dma_ops_unity_map(iommu->default_dom, entry); | ||
235 | if (ret) | ||
236 | return ret; | ||
237 | } | ||
238 | |||
239 | return 0; | ||
240 | } | ||
241 | |||
242 | static int dma_ops_unity_map(struct dma_ops_domain *dma_dom, | ||
243 | struct unity_map_entry *e) | ||
244 | { | ||
245 | u64 addr; | ||
246 | int ret; | ||
247 | |||
248 | for (addr = e->address_start; addr < e->address_end; | ||
249 | addr += PAGE_SIZE) { | ||
250 | ret = iommu_map(&dma_dom->domain, addr, addr, e->prot); | ||
251 | if (ret) | ||
252 | return ret; | ||
253 | /* | ||
254 | * if unity mapping is in aperture range mark the page | ||
255 | * as allocated in the aperture | ||
256 | */ | ||
257 | if (addr < dma_dom->aperture_size) | ||
258 | __set_bit(addr >> PAGE_SHIFT, dma_dom->bitmap); | ||
259 | } | ||
260 | |||
261 | return 0; | ||
262 | } | ||
263 | |||
264 | static int init_unity_mappings_for_device(struct dma_ops_domain *dma_dom, | ||
265 | u16 devid) | ||
266 | { | ||
267 | struct unity_map_entry *e; | ||
268 | int ret; | ||
269 | |||
270 | list_for_each_entry(e, &amd_iommu_unity_map, list) { | ||
271 | if (!(devid >= e->devid_start && devid <= e->devid_end)) | ||
272 | continue; | ||
273 | ret = dma_ops_unity_map(dma_dom, e); | ||
274 | if (ret) | ||
275 | return ret; | ||
276 | } | ||
277 | |||
278 | return 0; | ||
279 | } | ||
280 | |||
281 | static unsigned long dma_mask_to_pages(unsigned long mask) | ||
282 | { | ||
283 | return (mask >> PAGE_SHIFT) + | ||
284 | (PAGE_ALIGN(mask & ~PAGE_MASK) >> PAGE_SHIFT); | ||
285 | } | ||
286 | |||
287 | static unsigned long dma_ops_alloc_addresses(struct device *dev, | ||
288 | struct dma_ops_domain *dom, | ||
289 | unsigned int pages) | ||
290 | { | ||
291 | unsigned long limit = dma_mask_to_pages(*dev->dma_mask); | ||
292 | unsigned long address; | ||
293 | unsigned long size = dom->aperture_size >> PAGE_SHIFT; | ||
294 | unsigned long boundary_size; | ||
295 | |||
296 | boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, | ||
297 | PAGE_SIZE) >> PAGE_SHIFT; | ||
298 | limit = limit < size ? limit : size; | ||
299 | |||
300 | if (dom->next_bit >= limit) | ||
301 | dom->next_bit = 0; | ||
302 | |||
303 | address = iommu_area_alloc(dom->bitmap, limit, dom->next_bit, pages, | ||
304 | 0 , boundary_size, 0); | ||
305 | if (address == -1) | ||
306 | address = iommu_area_alloc(dom->bitmap, limit, 0, pages, | ||
307 | 0, boundary_size, 0); | ||
308 | |||
309 | if (likely(address != -1)) { | ||
310 | dom->next_bit = address + pages; | ||
311 | address <<= PAGE_SHIFT; | ||
312 | } else | ||
313 | address = bad_dma_address; | ||
314 | |||
315 | WARN_ON((address + (PAGE_SIZE*pages)) > dom->aperture_size); | ||
316 | |||
317 | return address; | ||
318 | } | ||
319 | |||
320 | static void dma_ops_free_addresses(struct dma_ops_domain *dom, | ||
321 | unsigned long address, | ||
322 | unsigned int pages) | ||
323 | { | ||
324 | address >>= PAGE_SHIFT; | ||
325 | iommu_area_free(dom->bitmap, address, pages); | ||
326 | } | ||
327 | |||
328 | static u16 domain_id_alloc(void) | ||
329 | { | ||
330 | unsigned long flags; | ||
331 | int id; | ||
332 | |||
333 | write_lock_irqsave(&amd_iommu_devtable_lock, flags); | ||
334 | id = find_first_zero_bit(amd_iommu_pd_alloc_bitmap, MAX_DOMAIN_ID); | ||
335 | BUG_ON(id == 0); | ||
336 | if (id > 0 && id < MAX_DOMAIN_ID) | ||
337 | __set_bit(id, amd_iommu_pd_alloc_bitmap); | ||
338 | else | ||
339 | id = 0; | ||
340 | write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); | ||
341 | |||
342 | return id; | ||
343 | } | ||
344 | |||
345 | static void dma_ops_reserve_addresses(struct dma_ops_domain *dom, | ||
346 | unsigned long start_page, | ||
347 | unsigned int pages) | ||
348 | { | ||
349 | unsigned int last_page = dom->aperture_size >> PAGE_SHIFT; | ||
350 | |||
351 | if (start_page + pages > last_page) | ||
352 | pages = last_page - start_page; | ||
353 | |||
354 | set_bit_string(dom->bitmap, start_page, pages); | ||
355 | } | ||
356 | |||
357 | static void dma_ops_free_pagetable(struct dma_ops_domain *dma_dom) | ||
358 | { | ||
359 | int i, j; | ||
360 | u64 *p1, *p2, *p3; | ||
361 | |||
362 | p1 = dma_dom->domain.pt_root; | ||
363 | |||
364 | if (!p1) | ||
365 | return; | ||
366 | |||
367 | for (i = 0; i < 512; ++i) { | ||
368 | if (!IOMMU_PTE_PRESENT(p1[i])) | ||
369 | continue; | ||
370 | |||
371 | p2 = IOMMU_PTE_PAGE(p1[i]); | ||
372 | for (j = 0; j < 512; ++i) { | ||
373 | if (!IOMMU_PTE_PRESENT(p2[j])) | ||
374 | continue; | ||
375 | p3 = IOMMU_PTE_PAGE(p2[j]); | ||
376 | free_page((unsigned long)p3); | ||
377 | } | ||
378 | |||
379 | free_page((unsigned long)p2); | ||
380 | } | ||
381 | |||
382 | free_page((unsigned long)p1); | ||
383 | } | ||
384 | |||
385 | static void dma_ops_domain_free(struct dma_ops_domain *dom) | ||
386 | { | ||
387 | if (!dom) | ||
388 | return; | ||
389 | |||
390 | dma_ops_free_pagetable(dom); | ||
391 | |||
392 | kfree(dom->pte_pages); | ||
393 | |||
394 | kfree(dom->bitmap); | ||
395 | |||
396 | kfree(dom); | ||
397 | } | ||
398 | |||
399 | static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu, | ||
400 | unsigned order) | ||
401 | { | ||
402 | struct dma_ops_domain *dma_dom; | ||
403 | unsigned i, num_pte_pages; | ||
404 | u64 *l2_pde; | ||
405 | u64 address; | ||
406 | |||
407 | /* | ||
408 | * Currently the DMA aperture must be between 32 MB and 1GB in size | ||
409 | */ | ||
410 | if ((order < 25) || (order > 30)) | ||
411 | return NULL; | ||
412 | |||
413 | dma_dom = kzalloc(sizeof(struct dma_ops_domain), GFP_KERNEL); | ||
414 | if (!dma_dom) | ||
415 | return NULL; | ||
416 | |||
417 | spin_lock_init(&dma_dom->domain.lock); | ||
418 | |||
419 | dma_dom->domain.id = domain_id_alloc(); | ||
420 | if (dma_dom->domain.id == 0) | ||
421 | goto free_dma_dom; | ||
422 | dma_dom->domain.mode = PAGE_MODE_3_LEVEL; | ||
423 | dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL); | ||
424 | dma_dom->domain.priv = dma_dom; | ||
425 | if (!dma_dom->domain.pt_root) | ||
426 | goto free_dma_dom; | ||
427 | dma_dom->aperture_size = (1ULL << order); | ||
428 | dma_dom->bitmap = kzalloc(dma_dom->aperture_size / (PAGE_SIZE * 8), | ||
429 | GFP_KERNEL); | ||
430 | if (!dma_dom->bitmap) | ||
431 | goto free_dma_dom; | ||
432 | /* | ||
433 | * mark the first page as allocated so we never return 0 as | ||
434 | * a valid dma-address. So we can use 0 as error value | ||
435 | */ | ||
436 | dma_dom->bitmap[0] = 1; | ||
437 | dma_dom->next_bit = 0; | ||
438 | |||
439 | if (iommu->exclusion_start && | ||
440 | iommu->exclusion_start < dma_dom->aperture_size) { | ||
441 | unsigned long startpage = iommu->exclusion_start >> PAGE_SHIFT; | ||
442 | int pages = to_pages(iommu->exclusion_start, | ||
443 | iommu->exclusion_length); | ||
444 | dma_ops_reserve_addresses(dma_dom, startpage, pages); | ||
445 | } | ||
446 | |||
447 | num_pte_pages = dma_dom->aperture_size / (PAGE_SIZE * 512); | ||
448 | dma_dom->pte_pages = kzalloc(num_pte_pages * sizeof(void *), | ||
449 | GFP_KERNEL); | ||
450 | if (!dma_dom->pte_pages) | ||
451 | goto free_dma_dom; | ||
452 | |||
453 | l2_pde = (u64 *)get_zeroed_page(GFP_KERNEL); | ||
454 | if (l2_pde == NULL) | ||
455 | goto free_dma_dom; | ||
456 | |||
457 | dma_dom->domain.pt_root[0] = IOMMU_L2_PDE(virt_to_phys(l2_pde)); | ||
458 | |||
459 | for (i = 0; i < num_pte_pages; ++i) { | ||
460 | dma_dom->pte_pages[i] = (u64 *)get_zeroed_page(GFP_KERNEL); | ||
461 | if (!dma_dom->pte_pages[i]) | ||
462 | goto free_dma_dom; | ||
463 | address = virt_to_phys(dma_dom->pte_pages[i]); | ||
464 | l2_pde[i] = IOMMU_L1_PDE(address); | ||
465 | } | ||
466 | |||
467 | return dma_dom; | ||
468 | |||
469 | free_dma_dom: | ||
470 | dma_ops_domain_free(dma_dom); | ||
471 | |||
472 | return NULL; | ||
473 | } | ||
474 | |||
475 | static struct protection_domain *domain_for_device(u16 devid) | ||
476 | { | ||
477 | struct protection_domain *dom; | ||
478 | unsigned long flags; | ||
479 | |||
480 | read_lock_irqsave(&amd_iommu_devtable_lock, flags); | ||
481 | dom = amd_iommu_pd_table[devid]; | ||
482 | read_unlock_irqrestore(&amd_iommu_devtable_lock, flags); | ||
483 | |||
484 | return dom; | ||
485 | } | ||
486 | |||
487 | static void set_device_domain(struct amd_iommu *iommu, | ||
488 | struct protection_domain *domain, | ||
489 | u16 devid) | ||
490 | { | ||
491 | unsigned long flags; | ||
492 | |||
493 | u64 pte_root = virt_to_phys(domain->pt_root); | ||
494 | |||
495 | pte_root |= (domain->mode & 0x07) << 9; | ||
496 | pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | 2; | ||
497 | |||
498 | write_lock_irqsave(&amd_iommu_devtable_lock, flags); | ||
499 | amd_iommu_dev_table[devid].data[0] = pte_root; | ||
500 | amd_iommu_dev_table[devid].data[1] = pte_root >> 32; | ||
501 | amd_iommu_dev_table[devid].data[2] = domain->id; | ||
502 | |||
503 | amd_iommu_pd_table[devid] = domain; | ||
504 | write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); | ||
505 | |||
506 | iommu_queue_inv_dev_entry(iommu, devid); | ||
507 | |||
508 | iommu->need_sync = 1; | ||
509 | } | ||
510 | |||
511 | static int get_device_resources(struct device *dev, | ||
512 | struct amd_iommu **iommu, | ||
513 | struct protection_domain **domain, | ||
514 | u16 *bdf) | ||
515 | { | ||
516 | struct dma_ops_domain *dma_dom; | ||
517 | struct pci_dev *pcidev; | ||
518 | u16 _bdf; | ||
519 | |||
520 | BUG_ON(!dev || dev->bus != &pci_bus_type || !dev->dma_mask); | ||
521 | |||
522 | pcidev = to_pci_dev(dev); | ||
523 | _bdf = (pcidev->bus->number << 8) | pcidev->devfn; | ||
524 | |||
525 | if (_bdf >= amd_iommu_last_bdf) { | ||
526 | *iommu = NULL; | ||
527 | *domain = NULL; | ||
528 | *bdf = 0xffff; | ||
529 | return 0; | ||
530 | } | ||
531 | |||
532 | *bdf = amd_iommu_alias_table[_bdf]; | ||
533 | |||
534 | *iommu = amd_iommu_rlookup_table[*bdf]; | ||
535 | if (*iommu == NULL) | ||
536 | return 0; | ||
537 | dma_dom = (*iommu)->default_dom; | ||
538 | *domain = domain_for_device(*bdf); | ||
539 | if (*domain == NULL) { | ||
540 | *domain = &dma_dom->domain; | ||
541 | set_device_domain(*iommu, *domain, *bdf); | ||
542 | printk(KERN_INFO "AMD IOMMU: Using protection domain %d for " | ||
543 | "device ", (*domain)->id); | ||
544 | print_devid(_bdf, 1); | ||
545 | } | ||
546 | |||
547 | return 1; | ||
548 | } | ||
549 | |||
550 | static dma_addr_t dma_ops_domain_map(struct amd_iommu *iommu, | ||
551 | struct dma_ops_domain *dom, | ||
552 | unsigned long address, | ||
553 | phys_addr_t paddr, | ||
554 | int direction) | ||
555 | { | ||
556 | u64 *pte, __pte; | ||
557 | |||
558 | WARN_ON(address > dom->aperture_size); | ||
559 | |||
560 | paddr &= PAGE_MASK; | ||
561 | |||
562 | pte = dom->pte_pages[IOMMU_PTE_L1_INDEX(address)]; | ||
563 | pte += IOMMU_PTE_L0_INDEX(address); | ||
564 | |||
565 | __pte = paddr | IOMMU_PTE_P | IOMMU_PTE_FC; | ||
566 | |||
567 | if (direction == DMA_TO_DEVICE) | ||
568 | __pte |= IOMMU_PTE_IR; | ||
569 | else if (direction == DMA_FROM_DEVICE) | ||
570 | __pte |= IOMMU_PTE_IW; | ||
571 | else if (direction == DMA_BIDIRECTIONAL) | ||
572 | __pte |= IOMMU_PTE_IR | IOMMU_PTE_IW; | ||
573 | |||
574 | WARN_ON(*pte); | ||
575 | |||
576 | *pte = __pte; | ||
577 | |||
578 | return (dma_addr_t)address; | ||
579 | } | ||
580 | |||
581 | static void dma_ops_domain_unmap(struct amd_iommu *iommu, | ||
582 | struct dma_ops_domain *dom, | ||
583 | unsigned long address) | ||
584 | { | ||
585 | u64 *pte; | ||
586 | |||
587 | if (address >= dom->aperture_size) | ||
588 | return; | ||
589 | |||
590 | WARN_ON(address & 0xfffULL || address > dom->aperture_size); | ||
591 | |||
592 | pte = dom->pte_pages[IOMMU_PTE_L1_INDEX(address)]; | ||
593 | pte += IOMMU_PTE_L0_INDEX(address); | ||
594 | |||
595 | WARN_ON(!*pte); | ||
596 | |||
597 | *pte = 0ULL; | ||
598 | } | ||
599 | |||
600 | static dma_addr_t __map_single(struct device *dev, | ||
601 | struct amd_iommu *iommu, | ||
602 | struct dma_ops_domain *dma_dom, | ||
603 | phys_addr_t paddr, | ||
604 | size_t size, | ||
605 | int dir) | ||
606 | { | ||
607 | dma_addr_t offset = paddr & ~PAGE_MASK; | ||
608 | dma_addr_t address, start; | ||
609 | unsigned int pages; | ||
610 | int i; | ||
611 | |||
612 | pages = to_pages(paddr, size); | ||
613 | paddr &= PAGE_MASK; | ||
614 | |||
615 | address = dma_ops_alloc_addresses(dev, dma_dom, pages); | ||
616 | if (unlikely(address == bad_dma_address)) | ||
617 | goto out; | ||
618 | |||
619 | start = address; | ||
620 | for (i = 0; i < pages; ++i) { | ||
621 | dma_ops_domain_map(iommu, dma_dom, start, paddr, dir); | ||
622 | paddr += PAGE_SIZE; | ||
623 | start += PAGE_SIZE; | ||
624 | } | ||
625 | address += offset; | ||
626 | |||
627 | out: | ||
628 | return address; | ||
629 | } | ||
630 | |||
631 | static void __unmap_single(struct amd_iommu *iommu, | ||
632 | struct dma_ops_domain *dma_dom, | ||
633 | dma_addr_t dma_addr, | ||
634 | size_t size, | ||
635 | int dir) | ||
636 | { | ||
637 | dma_addr_t i, start; | ||
638 | unsigned int pages; | ||
639 | |||
640 | if ((dma_addr == 0) || (dma_addr + size > dma_dom->aperture_size)) | ||
641 | return; | ||
642 | |||
643 | pages = to_pages(dma_addr, size); | ||
644 | dma_addr &= PAGE_MASK; | ||
645 | start = dma_addr; | ||
646 | |||
647 | for (i = 0; i < pages; ++i) { | ||
648 | dma_ops_domain_unmap(iommu, dma_dom, start); | ||
649 | start += PAGE_SIZE; | ||
650 | } | ||
651 | |||
652 | dma_ops_free_addresses(dma_dom, dma_addr, pages); | ||
653 | } | ||
654 | |||
655 | static dma_addr_t map_single(struct device *dev, phys_addr_t paddr, | ||
656 | size_t size, int dir) | ||
657 | { | ||
658 | unsigned long flags; | ||
659 | struct amd_iommu *iommu; | ||
660 | struct protection_domain *domain; | ||
661 | u16 devid; | ||
662 | dma_addr_t addr; | ||
663 | |||
664 | get_device_resources(dev, &iommu, &domain, &devid); | ||
665 | |||
666 | if (iommu == NULL || domain == NULL) | ||
667 | return (dma_addr_t)paddr; | ||
668 | |||
669 | spin_lock_irqsave(&domain->lock, flags); | ||
670 | addr = __map_single(dev, iommu, domain->priv, paddr, size, dir); | ||
671 | if (addr == bad_dma_address) | ||
672 | goto out; | ||
673 | |||
674 | if (iommu_has_npcache(iommu)) | ||
675 | iommu_flush_pages(iommu, domain->id, addr, size); | ||
676 | |||
677 | if (iommu->need_sync) | ||
678 | iommu_completion_wait(iommu); | ||
679 | |||
680 | out: | ||
681 | spin_unlock_irqrestore(&domain->lock, flags); | ||
682 | |||
683 | return addr; | ||
684 | } | ||
685 | |||
686 | static void unmap_single(struct device *dev, dma_addr_t dma_addr, | ||
687 | size_t size, int dir) | ||
688 | { | ||
689 | unsigned long flags; | ||
690 | struct amd_iommu *iommu; | ||
691 | struct protection_domain *domain; | ||
692 | u16 devid; | ||
693 | |||
694 | if (!get_device_resources(dev, &iommu, &domain, &devid)) | ||
695 | return; | ||
696 | |||
697 | spin_lock_irqsave(&domain->lock, flags); | ||
698 | |||
699 | __unmap_single(iommu, domain->priv, dma_addr, size, dir); | ||
700 | |||
701 | iommu_flush_pages(iommu, domain->id, dma_addr, size); | ||
702 | |||
703 | if (iommu->need_sync) | ||
704 | iommu_completion_wait(iommu); | ||
705 | |||
706 | spin_unlock_irqrestore(&domain->lock, flags); | ||
707 | } | ||
708 | |||
709 | static int map_sg_no_iommu(struct device *dev, struct scatterlist *sglist, | ||
710 | int nelems, int dir) | ||
711 | { | ||
712 | struct scatterlist *s; | ||
713 | int i; | ||
714 | |||
715 | for_each_sg(sglist, s, nelems, i) { | ||
716 | s->dma_address = (dma_addr_t)sg_phys(s); | ||
717 | s->dma_length = s->length; | ||
718 | } | ||
719 | |||
720 | return nelems; | ||
721 | } | ||
722 | |||
723 | static int map_sg(struct device *dev, struct scatterlist *sglist, | ||
724 | int nelems, int dir) | ||
725 | { | ||
726 | unsigned long flags; | ||
727 | struct amd_iommu *iommu; | ||
728 | struct protection_domain *domain; | ||
729 | u16 devid; | ||
730 | int i; | ||
731 | struct scatterlist *s; | ||
732 | phys_addr_t paddr; | ||
733 | int mapped_elems = 0; | ||
734 | |||
735 | get_device_resources(dev, &iommu, &domain, &devid); | ||
736 | |||
737 | if (!iommu || !domain) | ||
738 | return map_sg_no_iommu(dev, sglist, nelems, dir); | ||
739 | |||
740 | spin_lock_irqsave(&domain->lock, flags); | ||
741 | |||
742 | for_each_sg(sglist, s, nelems, i) { | ||
743 | paddr = sg_phys(s); | ||
744 | |||
745 | s->dma_address = __map_single(dev, iommu, domain->priv, | ||
746 | paddr, s->length, dir); | ||
747 | |||
748 | if (s->dma_address) { | ||
749 | s->dma_length = s->length; | ||
750 | mapped_elems++; | ||
751 | } else | ||
752 | goto unmap; | ||
753 | if (iommu_has_npcache(iommu)) | ||
754 | iommu_flush_pages(iommu, domain->id, s->dma_address, | ||
755 | s->dma_length); | ||
756 | } | ||
757 | |||
758 | if (iommu->need_sync) | ||
759 | iommu_completion_wait(iommu); | ||
760 | |||
761 | out: | ||
762 | spin_unlock_irqrestore(&domain->lock, flags); | ||
763 | |||
764 | return mapped_elems; | ||
765 | unmap: | ||
766 | for_each_sg(sglist, s, mapped_elems, i) { | ||
767 | if (s->dma_address) | ||
768 | __unmap_single(iommu, domain->priv, s->dma_address, | ||
769 | s->dma_length, dir); | ||
770 | s->dma_address = s->dma_length = 0; | ||
771 | } | ||
772 | |||
773 | mapped_elems = 0; | ||
774 | |||
775 | goto out; | ||
776 | } | ||
777 | |||
778 | static void unmap_sg(struct device *dev, struct scatterlist *sglist, | ||
779 | int nelems, int dir) | ||
780 | { | ||
781 | unsigned long flags; | ||
782 | struct amd_iommu *iommu; | ||
783 | struct protection_domain *domain; | ||
784 | struct scatterlist *s; | ||
785 | u16 devid; | ||
786 | int i; | ||
787 | |||
788 | if (!get_device_resources(dev, &iommu, &domain, &devid)) | ||
789 | return; | ||
790 | |||
791 | spin_lock_irqsave(&domain->lock, flags); | ||
792 | |||
793 | for_each_sg(sglist, s, nelems, i) { | ||
794 | __unmap_single(iommu, domain->priv, s->dma_address, | ||
795 | s->dma_length, dir); | ||
796 | iommu_flush_pages(iommu, domain->id, s->dma_address, | ||
797 | s->dma_length); | ||
798 | s->dma_address = s->dma_length = 0; | ||
799 | } | ||
800 | |||
801 | if (iommu->need_sync) | ||
802 | iommu_completion_wait(iommu); | ||
803 | |||
804 | spin_unlock_irqrestore(&domain->lock, flags); | ||
805 | } | ||
806 | |||
807 | static void *alloc_coherent(struct device *dev, size_t size, | ||
808 | dma_addr_t *dma_addr, gfp_t flag) | ||
809 | { | ||
810 | unsigned long flags; | ||
811 | void *virt_addr; | ||
812 | struct amd_iommu *iommu; | ||
813 | struct protection_domain *domain; | ||
814 | u16 devid; | ||
815 | phys_addr_t paddr; | ||
816 | |||
817 | virt_addr = (void *)__get_free_pages(flag, get_order(size)); | ||
818 | if (!virt_addr) | ||
819 | return 0; | ||
820 | |||
821 | memset(virt_addr, 0, size); | ||
822 | paddr = virt_to_phys(virt_addr); | ||
823 | |||
824 | get_device_resources(dev, &iommu, &domain, &devid); | ||
825 | |||
826 | if (!iommu || !domain) { | ||
827 | *dma_addr = (dma_addr_t)paddr; | ||
828 | return virt_addr; | ||
829 | } | ||
830 | |||
831 | spin_lock_irqsave(&domain->lock, flags); | ||
832 | |||
833 | *dma_addr = __map_single(dev, iommu, domain->priv, paddr, | ||
834 | size, DMA_BIDIRECTIONAL); | ||
835 | |||
836 | if (*dma_addr == bad_dma_address) { | ||
837 | free_pages((unsigned long)virt_addr, get_order(size)); | ||
838 | virt_addr = NULL; | ||
839 | goto out; | ||
840 | } | ||
841 | |||
842 | if (iommu_has_npcache(iommu)) | ||
843 | iommu_flush_pages(iommu, domain->id, *dma_addr, size); | ||
844 | |||
845 | if (iommu->need_sync) | ||
846 | iommu_completion_wait(iommu); | ||
847 | |||
848 | out: | ||
849 | spin_unlock_irqrestore(&domain->lock, flags); | ||
850 | |||
851 | return virt_addr; | ||
852 | } | ||
853 | |||
854 | static void free_coherent(struct device *dev, size_t size, | ||
855 | void *virt_addr, dma_addr_t dma_addr) | ||
856 | { | ||
857 | unsigned long flags; | ||
858 | struct amd_iommu *iommu; | ||
859 | struct protection_domain *domain; | ||
860 | u16 devid; | ||
861 | |||
862 | get_device_resources(dev, &iommu, &domain, &devid); | ||
863 | |||
864 | if (!iommu || !domain) | ||
865 | goto free_mem; | ||
866 | |||
867 | spin_lock_irqsave(&domain->lock, flags); | ||
868 | |||
869 | __unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL); | ||
870 | iommu_flush_pages(iommu, domain->id, dma_addr, size); | ||
871 | |||
872 | if (iommu->need_sync) | ||
873 | iommu_completion_wait(iommu); | ||
874 | |||
875 | spin_unlock_irqrestore(&domain->lock, flags); | ||
876 | |||
877 | free_mem: | ||
878 | free_pages((unsigned long)virt_addr, get_order(size)); | ||
879 | } | ||
880 | |||
881 | /* | ||
882 | * If the driver core informs the DMA layer if a driver grabs a device | ||
883 | * we don't need to preallocate the protection domains anymore. | ||
884 | * For now we have to. | ||
885 | */ | ||
886 | void prealloc_protection_domains(void) | ||
887 | { | ||
888 | struct pci_dev *dev = NULL; | ||
889 | struct dma_ops_domain *dma_dom; | ||
890 | struct amd_iommu *iommu; | ||
891 | int order = amd_iommu_aperture_order; | ||
892 | u16 devid; | ||
893 | |||
894 | while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { | ||
895 | devid = (dev->bus->number << 8) | dev->devfn; | ||
896 | if (devid >= amd_iommu_last_bdf) | ||
897 | continue; | ||
898 | devid = amd_iommu_alias_table[devid]; | ||
899 | if (domain_for_device(devid)) | ||
900 | continue; | ||
901 | iommu = amd_iommu_rlookup_table[devid]; | ||
902 | if (!iommu) | ||
903 | continue; | ||
904 | dma_dom = dma_ops_domain_alloc(iommu, order); | ||
905 | if (!dma_dom) | ||
906 | continue; | ||
907 | init_unity_mappings_for_device(dma_dom, devid); | ||
908 | set_device_domain(iommu, &dma_dom->domain, devid); | ||
909 | printk(KERN_INFO "AMD IOMMU: Allocated domain %d for device ", | ||
910 | dma_dom->domain.id); | ||
911 | print_devid(devid, 1); | ||
912 | } | ||
913 | } | ||
914 | |||
915 | static struct dma_mapping_ops amd_iommu_dma_ops = { | ||
916 | .alloc_coherent = alloc_coherent, | ||
917 | .free_coherent = free_coherent, | ||
918 | .map_single = map_single, | ||
919 | .unmap_single = unmap_single, | ||
920 | .map_sg = map_sg, | ||
921 | .unmap_sg = unmap_sg, | ||
922 | }; | ||
923 | |||
924 | int __init amd_iommu_init_dma_ops(void) | ||
925 | { | ||
926 | struct amd_iommu *iommu; | ||
927 | int order = amd_iommu_aperture_order; | ||
928 | int ret; | ||
929 | |||
930 | list_for_each_entry(iommu, &amd_iommu_list, list) { | ||
931 | iommu->default_dom = dma_ops_domain_alloc(iommu, order); | ||
932 | if (iommu->default_dom == NULL) | ||
933 | return -ENOMEM; | ||
934 | ret = iommu_init_unity_mappings(iommu); | ||
935 | if (ret) | ||
936 | goto free_domains; | ||
937 | } | ||
938 | |||
939 | if (amd_iommu_isolate) | ||
940 | prealloc_protection_domains(); | ||
941 | |||
942 | iommu_detected = 1; | ||
943 | force_iommu = 1; | ||
944 | bad_dma_address = 0; | ||
945 | #ifdef CONFIG_GART_IOMMU | ||
946 | gart_iommu_aperture_disabled = 1; | ||
947 | gart_iommu_aperture = 0; | ||
948 | #endif | ||
949 | |||
950 | dma_ops = &amd_iommu_dma_ops; | ||
951 | |||
952 | return 0; | ||
953 | |||
954 | free_domains: | ||
955 | |||
956 | list_for_each_entry(iommu, &amd_iommu_list, list) { | ||
957 | if (iommu->default_dom) | ||
958 | dma_ops_domain_free(iommu->default_dom); | ||
959 | } | ||
960 | |||
961 | return ret; | ||
962 | } | ||
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c new file mode 100644 index 000000000000..2a13e430437d --- /dev/null +++ b/arch/x86/kernel/amd_iommu_init.c | |||
@@ -0,0 +1,875 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. | ||
3 | * Author: Joerg Roedel <joerg.roedel@amd.com> | ||
4 | * Leo Duran <leo.duran@amd.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License version 2 as published | ||
8 | * by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
18 | */ | ||
19 | |||
20 | #include <linux/pci.h> | ||
21 | #include <linux/acpi.h> | ||
22 | #include <linux/gfp.h> | ||
23 | #include <linux/list.h> | ||
24 | #include <linux/sysdev.h> | ||
25 | #include <asm/pci-direct.h> | ||
26 | #include <asm/amd_iommu_types.h> | ||
27 | #include <asm/amd_iommu.h> | ||
28 | #include <asm/gart.h> | ||
29 | |||
30 | /* | ||
31 | * definitions for the ACPI scanning code | ||
32 | */ | ||
33 | #define UPDATE_LAST_BDF(x) do {\ | ||
34 | if ((x) > amd_iommu_last_bdf) \ | ||
35 | amd_iommu_last_bdf = (x); \ | ||
36 | } while (0); | ||
37 | |||
38 | #define DEVID(bus, devfn) (((bus) << 8) | (devfn)) | ||
39 | #define PCI_BUS(x) (((x) >> 8) & 0xff) | ||
40 | #define IVRS_HEADER_LENGTH 48 | ||
41 | #define TBL_SIZE(x) (1 << (PAGE_SHIFT + get_order(amd_iommu_last_bdf * (x)))) | ||
42 | |||
43 | #define ACPI_IVHD_TYPE 0x10 | ||
44 | #define ACPI_IVMD_TYPE_ALL 0x20 | ||
45 | #define ACPI_IVMD_TYPE 0x21 | ||
46 | #define ACPI_IVMD_TYPE_RANGE 0x22 | ||
47 | |||
48 | #define IVHD_DEV_ALL 0x01 | ||
49 | #define IVHD_DEV_SELECT 0x02 | ||
50 | #define IVHD_DEV_SELECT_RANGE_START 0x03 | ||
51 | #define IVHD_DEV_RANGE_END 0x04 | ||
52 | #define IVHD_DEV_ALIAS 0x42 | ||
53 | #define IVHD_DEV_ALIAS_RANGE 0x43 | ||
54 | #define IVHD_DEV_EXT_SELECT 0x46 | ||
55 | #define IVHD_DEV_EXT_SELECT_RANGE 0x47 | ||
56 | |||
57 | #define IVHD_FLAG_HT_TUN_EN 0x00 | ||
58 | #define IVHD_FLAG_PASSPW_EN 0x01 | ||
59 | #define IVHD_FLAG_RESPASSPW_EN 0x02 | ||
60 | #define IVHD_FLAG_ISOC_EN 0x03 | ||
61 | |||
62 | #define IVMD_FLAG_EXCL_RANGE 0x08 | ||
63 | #define IVMD_FLAG_UNITY_MAP 0x01 | ||
64 | |||
65 | #define ACPI_DEVFLAG_INITPASS 0x01 | ||
66 | #define ACPI_DEVFLAG_EXTINT 0x02 | ||
67 | #define ACPI_DEVFLAG_NMI 0x04 | ||
68 | #define ACPI_DEVFLAG_SYSMGT1 0x10 | ||
69 | #define ACPI_DEVFLAG_SYSMGT2 0x20 | ||
70 | #define ACPI_DEVFLAG_LINT0 0x40 | ||
71 | #define ACPI_DEVFLAG_LINT1 0x80 | ||
72 | #define ACPI_DEVFLAG_ATSDIS 0x10000000 | ||
73 | |||
74 | struct ivhd_header { | ||
75 | u8 type; | ||
76 | u8 flags; | ||
77 | u16 length; | ||
78 | u16 devid; | ||
79 | u16 cap_ptr; | ||
80 | u64 mmio_phys; | ||
81 | u16 pci_seg; | ||
82 | u16 info; | ||
83 | u32 reserved; | ||
84 | } __attribute__((packed)); | ||
85 | |||
86 | struct ivhd_entry { | ||
87 | u8 type; | ||
88 | u16 devid; | ||
89 | u8 flags; | ||
90 | u32 ext; | ||
91 | } __attribute__((packed)); | ||
92 | |||
93 | struct ivmd_header { | ||
94 | u8 type; | ||
95 | u8 flags; | ||
96 | u16 length; | ||
97 | u16 devid; | ||
98 | u16 aux; | ||
99 | u64 resv; | ||
100 | u64 range_start; | ||
101 | u64 range_length; | ||
102 | } __attribute__((packed)); | ||
103 | |||
104 | static int __initdata amd_iommu_detected; | ||
105 | |||
106 | u16 amd_iommu_last_bdf; | ||
107 | struct list_head amd_iommu_unity_map; | ||
108 | unsigned amd_iommu_aperture_order = 26; | ||
109 | int amd_iommu_isolate; | ||
110 | |||
111 | struct list_head amd_iommu_list; | ||
112 | struct dev_table_entry *amd_iommu_dev_table; | ||
113 | u16 *amd_iommu_alias_table; | ||
114 | struct amd_iommu **amd_iommu_rlookup_table; | ||
115 | struct protection_domain **amd_iommu_pd_table; | ||
116 | unsigned long *amd_iommu_pd_alloc_bitmap; | ||
117 | |||
118 | static u32 dev_table_size; | ||
119 | static u32 alias_table_size; | ||
120 | static u32 rlookup_table_size; | ||
121 | |||
122 | static void __init iommu_set_exclusion_range(struct amd_iommu *iommu) | ||
123 | { | ||
124 | u64 start = iommu->exclusion_start & PAGE_MASK; | ||
125 | u64 limit = (start + iommu->exclusion_length) & PAGE_MASK; | ||
126 | u64 entry; | ||
127 | |||
128 | if (!iommu->exclusion_start) | ||
129 | return; | ||
130 | |||
131 | entry = start | MMIO_EXCL_ENABLE_MASK; | ||
132 | memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET, | ||
133 | &entry, sizeof(entry)); | ||
134 | |||
135 | entry = limit; | ||
136 | memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET, | ||
137 | &entry, sizeof(entry)); | ||
138 | } | ||
139 | |||
140 | static void __init iommu_set_device_table(struct amd_iommu *iommu) | ||
141 | { | ||
142 | u32 entry; | ||
143 | |||
144 | BUG_ON(iommu->mmio_base == NULL); | ||
145 | |||
146 | entry = virt_to_phys(amd_iommu_dev_table); | ||
147 | entry |= (dev_table_size >> 12) - 1; | ||
148 | memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET, | ||
149 | &entry, sizeof(entry)); | ||
150 | } | ||
151 | |||
152 | static void __init iommu_feature_enable(struct amd_iommu *iommu, u8 bit) | ||
153 | { | ||
154 | u32 ctrl; | ||
155 | |||
156 | ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET); | ||
157 | ctrl |= (1 << bit); | ||
158 | writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); | ||
159 | } | ||
160 | |||
161 | static void __init iommu_feature_disable(struct amd_iommu *iommu, u8 bit) | ||
162 | { | ||
163 | u32 ctrl; | ||
164 | |||
165 | ctrl = (u64)readl(iommu->mmio_base + MMIO_CONTROL_OFFSET); | ||
166 | ctrl &= ~(1 << bit); | ||
167 | writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); | ||
168 | } | ||
169 | |||
170 | void __init iommu_enable(struct amd_iommu *iommu) | ||
171 | { | ||
172 | printk(KERN_INFO "AMD IOMMU: Enabling IOMMU at "); | ||
173 | print_devid(iommu->devid, 0); | ||
174 | printk(" cap 0x%hx\n", iommu->cap_ptr); | ||
175 | |||
176 | iommu_feature_enable(iommu, CONTROL_IOMMU_EN); | ||
177 | } | ||
178 | |||
179 | static u8 * __init iommu_map_mmio_space(u64 address) | ||
180 | { | ||
181 | u8 *ret; | ||
182 | |||
183 | if (!request_mem_region(address, MMIO_REGION_LENGTH, "amd_iommu")) | ||
184 | return NULL; | ||
185 | |||
186 | ret = ioremap_nocache(address, MMIO_REGION_LENGTH); | ||
187 | if (ret != NULL) | ||
188 | return ret; | ||
189 | |||
190 | release_mem_region(address, MMIO_REGION_LENGTH); | ||
191 | |||
192 | return NULL; | ||
193 | } | ||
194 | |||
195 | static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu) | ||
196 | { | ||
197 | if (iommu->mmio_base) | ||
198 | iounmap(iommu->mmio_base); | ||
199 | release_mem_region(iommu->mmio_phys, MMIO_REGION_LENGTH); | ||
200 | } | ||
201 | |||
202 | static int __init find_last_devid_on_pci(int bus, int dev, int fn, int cap_ptr) | ||
203 | { | ||
204 | u32 cap; | ||
205 | |||
206 | cap = read_pci_config(bus, dev, fn, cap_ptr+MMIO_RANGE_OFFSET); | ||
207 | UPDATE_LAST_BDF(DEVID(MMIO_GET_BUS(cap), MMIO_GET_LD(cap))); | ||
208 | |||
209 | return 0; | ||
210 | } | ||
211 | |||
212 | static int __init find_last_devid_from_ivhd(struct ivhd_header *h) | ||
213 | { | ||
214 | u8 *p = (void *)h, *end = (void *)h; | ||
215 | struct ivhd_entry *dev; | ||
216 | |||
217 | p += sizeof(*h); | ||
218 | end += h->length; | ||
219 | |||
220 | find_last_devid_on_pci(PCI_BUS(h->devid), | ||
221 | PCI_SLOT(h->devid), | ||
222 | PCI_FUNC(h->devid), | ||
223 | h->cap_ptr); | ||
224 | |||
225 | while (p < end) { | ||
226 | dev = (struct ivhd_entry *)p; | ||
227 | switch (dev->type) { | ||
228 | case IVHD_DEV_SELECT: | ||
229 | case IVHD_DEV_RANGE_END: | ||
230 | case IVHD_DEV_ALIAS: | ||
231 | case IVHD_DEV_EXT_SELECT: | ||
232 | UPDATE_LAST_BDF(dev->devid); | ||
233 | break; | ||
234 | default: | ||
235 | break; | ||
236 | } | ||
237 | p += 0x04 << (*p >> 6); | ||
238 | } | ||
239 | |||
240 | WARN_ON(p != end); | ||
241 | |||
242 | return 0; | ||
243 | } | ||
244 | |||
245 | static int __init find_last_devid_acpi(struct acpi_table_header *table) | ||
246 | { | ||
247 | int i; | ||
248 | u8 checksum = 0, *p = (u8 *)table, *end = (u8 *)table; | ||
249 | struct ivhd_header *h; | ||
250 | |||
251 | /* | ||
252 | * Validate checksum here so we don't need to do it when | ||
253 | * we actually parse the table | ||
254 | */ | ||
255 | for (i = 0; i < table->length; ++i) | ||
256 | checksum += p[i]; | ||
257 | if (checksum != 0) | ||
258 | /* ACPI table corrupt */ | ||
259 | return -ENODEV; | ||
260 | |||
261 | p += IVRS_HEADER_LENGTH; | ||
262 | |||
263 | end += table->length; | ||
264 | while (p < end) { | ||
265 | h = (struct ivhd_header *)p; | ||
266 | switch (h->type) { | ||
267 | case ACPI_IVHD_TYPE: | ||
268 | find_last_devid_from_ivhd(h); | ||
269 | break; | ||
270 | default: | ||
271 | break; | ||
272 | } | ||
273 | p += h->length; | ||
274 | } | ||
275 | WARN_ON(p != end); | ||
276 | |||
277 | return 0; | ||
278 | } | ||
279 | |||
280 | static u8 * __init alloc_command_buffer(struct amd_iommu *iommu) | ||
281 | { | ||
282 | u8 *cmd_buf = (u8 *)__get_free_pages(GFP_KERNEL, | ||
283 | get_order(CMD_BUFFER_SIZE)); | ||
284 | u64 entry = 0; | ||
285 | |||
286 | if (cmd_buf == NULL) | ||
287 | return NULL; | ||
288 | |||
289 | iommu->cmd_buf_size = CMD_BUFFER_SIZE; | ||
290 | |||
291 | memset(cmd_buf, 0, CMD_BUFFER_SIZE); | ||
292 | |||
293 | entry = (u64)virt_to_phys(cmd_buf); | ||
294 | entry |= MMIO_CMD_SIZE_512; | ||
295 | memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET, | ||
296 | &entry, sizeof(entry)); | ||
297 | |||
298 | iommu_feature_enable(iommu, CONTROL_CMDBUF_EN); | ||
299 | |||
300 | return cmd_buf; | ||
301 | } | ||
302 | |||
303 | static void __init free_command_buffer(struct amd_iommu *iommu) | ||
304 | { | ||
305 | if (iommu->cmd_buf) | ||
306 | free_pages((unsigned long)iommu->cmd_buf, | ||
307 | get_order(CMD_BUFFER_SIZE)); | ||
308 | } | ||
309 | |||
310 | static void set_dev_entry_bit(u16 devid, u8 bit) | ||
311 | { | ||
312 | int i = (bit >> 5) & 0x07; | ||
313 | int _bit = bit & 0x1f; | ||
314 | |||
315 | amd_iommu_dev_table[devid].data[i] |= (1 << _bit); | ||
316 | } | ||
317 | |||
318 | static void __init set_dev_entry_from_acpi(u16 devid, u32 flags, u32 ext_flags) | ||
319 | { | ||
320 | if (flags & ACPI_DEVFLAG_INITPASS) | ||
321 | set_dev_entry_bit(devid, DEV_ENTRY_INIT_PASS); | ||
322 | if (flags & ACPI_DEVFLAG_EXTINT) | ||
323 | set_dev_entry_bit(devid, DEV_ENTRY_EINT_PASS); | ||
324 | if (flags & ACPI_DEVFLAG_NMI) | ||
325 | set_dev_entry_bit(devid, DEV_ENTRY_NMI_PASS); | ||
326 | if (flags & ACPI_DEVFLAG_SYSMGT1) | ||
327 | set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1); | ||
328 | if (flags & ACPI_DEVFLAG_SYSMGT2) | ||
329 | set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2); | ||
330 | if (flags & ACPI_DEVFLAG_LINT0) | ||
331 | set_dev_entry_bit(devid, DEV_ENTRY_LINT0_PASS); | ||
332 | if (flags & ACPI_DEVFLAG_LINT1) | ||
333 | set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS); | ||
334 | } | ||
335 | |||
336 | static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid) | ||
337 | { | ||
338 | amd_iommu_rlookup_table[devid] = iommu; | ||
339 | } | ||
340 | |||
341 | static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m) | ||
342 | { | ||
343 | struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; | ||
344 | |||
345 | if (!(m->flags & IVMD_FLAG_EXCL_RANGE)) | ||
346 | return; | ||
347 | |||
348 | if (iommu) { | ||
349 | set_dev_entry_bit(m->devid, DEV_ENTRY_EX); | ||
350 | iommu->exclusion_start = m->range_start; | ||
351 | iommu->exclusion_length = m->range_length; | ||
352 | } | ||
353 | } | ||
354 | |||
355 | static void __init init_iommu_from_pci(struct amd_iommu *iommu) | ||
356 | { | ||
357 | int bus = PCI_BUS(iommu->devid); | ||
358 | int dev = PCI_SLOT(iommu->devid); | ||
359 | int fn = PCI_FUNC(iommu->devid); | ||
360 | int cap_ptr = iommu->cap_ptr; | ||
361 | u32 range; | ||
362 | |||
363 | iommu->cap = read_pci_config(bus, dev, fn, cap_ptr+MMIO_CAP_HDR_OFFSET); | ||
364 | |||
365 | range = read_pci_config(bus, dev, fn, cap_ptr+MMIO_RANGE_OFFSET); | ||
366 | iommu->first_device = DEVID(MMIO_GET_BUS(range), MMIO_GET_FD(range)); | ||
367 | iommu->last_device = DEVID(MMIO_GET_BUS(range), MMIO_GET_LD(range)); | ||
368 | } | ||
369 | |||
370 | static void __init init_iommu_from_acpi(struct amd_iommu *iommu, | ||
371 | struct ivhd_header *h) | ||
372 | { | ||
373 | u8 *p = (u8 *)h; | ||
374 | u8 *end = p, flags = 0; | ||
375 | u16 dev_i, devid = 0, devid_start = 0, devid_to = 0; | ||
376 | u32 ext_flags = 0; | ||
377 | bool alias = 0; | ||
378 | struct ivhd_entry *e; | ||
379 | |||
380 | /* | ||
381 | * First set the recommended feature enable bits from ACPI | ||
382 | * into the IOMMU control registers | ||
383 | */ | ||
384 | h->flags & IVHD_FLAG_HT_TUN_EN ? | ||
385 | iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) : | ||
386 | iommu_feature_disable(iommu, CONTROL_HT_TUN_EN); | ||
387 | |||
388 | h->flags & IVHD_FLAG_PASSPW_EN ? | ||
389 | iommu_feature_enable(iommu, CONTROL_PASSPW_EN) : | ||
390 | iommu_feature_disable(iommu, CONTROL_PASSPW_EN); | ||
391 | |||
392 | h->flags & IVHD_FLAG_RESPASSPW_EN ? | ||
393 | iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) : | ||
394 | iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN); | ||
395 | |||
396 | h->flags & IVHD_FLAG_ISOC_EN ? | ||
397 | iommu_feature_enable(iommu, CONTROL_ISOC_EN) : | ||
398 | iommu_feature_disable(iommu, CONTROL_ISOC_EN); | ||
399 | |||
400 | /* | ||
401 | * make IOMMU memory accesses cache coherent | ||
402 | */ | ||
403 | iommu_feature_enable(iommu, CONTROL_COHERENT_EN); | ||
404 | |||
405 | /* | ||
406 | * Done. Now parse the device entries | ||
407 | */ | ||
408 | p += sizeof(struct ivhd_header); | ||
409 | end += h->length; | ||
410 | |||
411 | while (p < end) { | ||
412 | e = (struct ivhd_entry *)p; | ||
413 | switch (e->type) { | ||
414 | case IVHD_DEV_ALL: | ||
415 | for (dev_i = iommu->first_device; | ||
416 | dev_i <= iommu->last_device; ++dev_i) | ||
417 | set_dev_entry_from_acpi(dev_i, e->flags, 0); | ||
418 | break; | ||
419 | case IVHD_DEV_SELECT: | ||
420 | devid = e->devid; | ||
421 | set_dev_entry_from_acpi(devid, e->flags, 0); | ||
422 | break; | ||
423 | case IVHD_DEV_SELECT_RANGE_START: | ||
424 | devid_start = e->devid; | ||
425 | flags = e->flags; | ||
426 | ext_flags = 0; | ||
427 | alias = 0; | ||
428 | break; | ||
429 | case IVHD_DEV_ALIAS: | ||
430 | devid = e->devid; | ||
431 | devid_to = e->ext >> 8; | ||
432 | set_dev_entry_from_acpi(devid, e->flags, 0); | ||
433 | amd_iommu_alias_table[devid] = devid_to; | ||
434 | break; | ||
435 | case IVHD_DEV_ALIAS_RANGE: | ||
436 | devid_start = e->devid; | ||
437 | flags = e->flags; | ||
438 | devid_to = e->ext >> 8; | ||
439 | ext_flags = 0; | ||
440 | alias = 1; | ||
441 | break; | ||
442 | case IVHD_DEV_EXT_SELECT: | ||
443 | devid = e->devid; | ||
444 | set_dev_entry_from_acpi(devid, e->flags, e->ext); | ||
445 | break; | ||
446 | case IVHD_DEV_EXT_SELECT_RANGE: | ||
447 | devid_start = e->devid; | ||
448 | flags = e->flags; | ||
449 | ext_flags = e->ext; | ||
450 | alias = 0; | ||
451 | break; | ||
452 | case IVHD_DEV_RANGE_END: | ||
453 | devid = e->devid; | ||
454 | for (dev_i = devid_start; dev_i <= devid; ++dev_i) { | ||
455 | if (alias) | ||
456 | amd_iommu_alias_table[dev_i] = devid_to; | ||
457 | set_dev_entry_from_acpi( | ||
458 | amd_iommu_alias_table[dev_i], | ||
459 | flags, ext_flags); | ||
460 | } | ||
461 | break; | ||
462 | default: | ||
463 | break; | ||
464 | } | ||
465 | |||
466 | p += 0x04 << (e->type >> 6); | ||
467 | } | ||
468 | } | ||
469 | |||
470 | static int __init init_iommu_devices(struct amd_iommu *iommu) | ||
471 | { | ||
472 | u16 i; | ||
473 | |||
474 | for (i = iommu->first_device; i <= iommu->last_device; ++i) | ||
475 | set_iommu_for_device(iommu, i); | ||
476 | |||
477 | return 0; | ||
478 | } | ||
479 | |||
480 | static void __init free_iommu_one(struct amd_iommu *iommu) | ||
481 | { | ||
482 | free_command_buffer(iommu); | ||
483 | iommu_unmap_mmio_space(iommu); | ||
484 | } | ||
485 | |||
486 | static void __init free_iommu_all(void) | ||
487 | { | ||
488 | struct amd_iommu *iommu, *next; | ||
489 | |||
490 | list_for_each_entry_safe(iommu, next, &amd_iommu_list, list) { | ||
491 | list_del(&iommu->list); | ||
492 | free_iommu_one(iommu); | ||
493 | kfree(iommu); | ||
494 | } | ||
495 | } | ||
496 | |||
497 | static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) | ||
498 | { | ||
499 | spin_lock_init(&iommu->lock); | ||
500 | list_add_tail(&iommu->list, &amd_iommu_list); | ||
501 | |||
502 | /* | ||
503 | * Copy data from ACPI table entry to the iommu struct | ||
504 | */ | ||
505 | iommu->devid = h->devid; | ||
506 | iommu->cap_ptr = h->cap_ptr; | ||
507 | iommu->mmio_phys = h->mmio_phys; | ||
508 | iommu->mmio_base = iommu_map_mmio_space(h->mmio_phys); | ||
509 | if (!iommu->mmio_base) | ||
510 | return -ENOMEM; | ||
511 | |||
512 | iommu_set_device_table(iommu); | ||
513 | iommu->cmd_buf = alloc_command_buffer(iommu); | ||
514 | if (!iommu->cmd_buf) | ||
515 | return -ENOMEM; | ||
516 | |||
517 | init_iommu_from_pci(iommu); | ||
518 | init_iommu_from_acpi(iommu, h); | ||
519 | init_iommu_devices(iommu); | ||
520 | |||
521 | return 0; | ||
522 | } | ||
523 | |||
524 | static int __init init_iommu_all(struct acpi_table_header *table) | ||
525 | { | ||
526 | u8 *p = (u8 *)table, *end = (u8 *)table; | ||
527 | struct ivhd_header *h; | ||
528 | struct amd_iommu *iommu; | ||
529 | int ret; | ||
530 | |||
531 | INIT_LIST_HEAD(&amd_iommu_list); | ||
532 | |||
533 | end += table->length; | ||
534 | p += IVRS_HEADER_LENGTH; | ||
535 | |||
536 | while (p < end) { | ||
537 | h = (struct ivhd_header *)p; | ||
538 | switch (*p) { | ||
539 | case ACPI_IVHD_TYPE: | ||
540 | iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL); | ||
541 | if (iommu == NULL) | ||
542 | return -ENOMEM; | ||
543 | ret = init_iommu_one(iommu, h); | ||
544 | if (ret) | ||
545 | return ret; | ||
546 | break; | ||
547 | default: | ||
548 | break; | ||
549 | } | ||
550 | p += h->length; | ||
551 | |||
552 | } | ||
553 | WARN_ON(p != end); | ||
554 | |||
555 | return 0; | ||
556 | } | ||
557 | |||
558 | static void __init free_unity_maps(void) | ||
559 | { | ||
560 | struct unity_map_entry *entry, *next; | ||
561 | |||
562 | list_for_each_entry_safe(entry, next, &amd_iommu_unity_map, list) { | ||
563 | list_del(&entry->list); | ||
564 | kfree(entry); | ||
565 | } | ||
566 | } | ||
567 | |||
568 | static int __init init_exclusion_range(struct ivmd_header *m) | ||
569 | { | ||
570 | int i; | ||
571 | |||
572 | switch (m->type) { | ||
573 | case ACPI_IVMD_TYPE: | ||
574 | set_device_exclusion_range(m->devid, m); | ||
575 | break; | ||
576 | case ACPI_IVMD_TYPE_ALL: | ||
577 | for (i = 0; i < amd_iommu_last_bdf; ++i) | ||
578 | set_device_exclusion_range(i, m); | ||
579 | break; | ||
580 | case ACPI_IVMD_TYPE_RANGE: | ||
581 | for (i = m->devid; i <= m->aux; ++i) | ||
582 | set_device_exclusion_range(i, m); | ||
583 | break; | ||
584 | default: | ||
585 | break; | ||
586 | } | ||
587 | |||
588 | return 0; | ||
589 | } | ||
590 | |||
591 | static int __init init_unity_map_range(struct ivmd_header *m) | ||
592 | { | ||
593 | struct unity_map_entry *e = 0; | ||
594 | |||
595 | e = kzalloc(sizeof(*e), GFP_KERNEL); | ||
596 | if (e == NULL) | ||
597 | return -ENOMEM; | ||
598 | |||
599 | switch (m->type) { | ||
600 | default: | ||
601 | case ACPI_IVMD_TYPE: | ||
602 | e->devid_start = e->devid_end = m->devid; | ||
603 | break; | ||
604 | case ACPI_IVMD_TYPE_ALL: | ||
605 | e->devid_start = 0; | ||
606 | e->devid_end = amd_iommu_last_bdf; | ||
607 | break; | ||
608 | case ACPI_IVMD_TYPE_RANGE: | ||
609 | e->devid_start = m->devid; | ||
610 | e->devid_end = m->aux; | ||
611 | break; | ||
612 | } | ||
613 | e->address_start = PAGE_ALIGN(m->range_start); | ||
614 | e->address_end = e->address_start + PAGE_ALIGN(m->range_length); | ||
615 | e->prot = m->flags >> 1; | ||
616 | |||
617 | list_add_tail(&e->list, &amd_iommu_unity_map); | ||
618 | |||
619 | return 0; | ||
620 | } | ||
621 | |||
622 | static int __init init_memory_definitions(struct acpi_table_header *table) | ||
623 | { | ||
624 | u8 *p = (u8 *)table, *end = (u8 *)table; | ||
625 | struct ivmd_header *m; | ||
626 | |||
627 | INIT_LIST_HEAD(&amd_iommu_unity_map); | ||
628 | |||
629 | end += table->length; | ||
630 | p += IVRS_HEADER_LENGTH; | ||
631 | |||
632 | while (p < end) { | ||
633 | m = (struct ivmd_header *)p; | ||
634 | if (m->flags & IVMD_FLAG_EXCL_RANGE) | ||
635 | init_exclusion_range(m); | ||
636 | else if (m->flags & IVMD_FLAG_UNITY_MAP) | ||
637 | init_unity_map_range(m); | ||
638 | |||
639 | p += m->length; | ||
640 | } | ||
641 | |||
642 | return 0; | ||
643 | } | ||
644 | |||
645 | static void __init enable_iommus(void) | ||
646 | { | ||
647 | struct amd_iommu *iommu; | ||
648 | |||
649 | list_for_each_entry(iommu, &amd_iommu_list, list) { | ||
650 | iommu_set_exclusion_range(iommu); | ||
651 | iommu_enable(iommu); | ||
652 | } | ||
653 | } | ||
654 | |||
655 | /* | ||
656 | * Suspend/Resume support | ||
657 | * disable suspend until real resume implemented | ||
658 | */ | ||
659 | |||
660 | static int amd_iommu_resume(struct sys_device *dev) | ||
661 | { | ||
662 | return 0; | ||
663 | } | ||
664 | |||
665 | static int amd_iommu_suspend(struct sys_device *dev, pm_message_t state) | ||
666 | { | ||
667 | return -EINVAL; | ||
668 | } | ||
669 | |||
670 | static struct sysdev_class amd_iommu_sysdev_class = { | ||
671 | .name = "amd_iommu", | ||
672 | .suspend = amd_iommu_suspend, | ||
673 | .resume = amd_iommu_resume, | ||
674 | }; | ||
675 | |||
676 | static struct sys_device device_amd_iommu = { | ||
677 | .id = 0, | ||
678 | .cls = &amd_iommu_sysdev_class, | ||
679 | }; | ||
680 | |||
681 | int __init amd_iommu_init(void) | ||
682 | { | ||
683 | int i, ret = 0; | ||
684 | |||
685 | |||
686 | if (no_iommu) { | ||
687 | printk(KERN_INFO "AMD IOMMU disabled by kernel command line\n"); | ||
688 | return 0; | ||
689 | } | ||
690 | |||
691 | if (!amd_iommu_detected) | ||
692 | return -ENODEV; | ||
693 | |||
694 | /* | ||
695 | * First parse ACPI tables to find the largest Bus/Dev/Func | ||
696 | * we need to handle. Upon this information the shared data | ||
697 | * structures for the IOMMUs in the system will be allocated | ||
698 | */ | ||
699 | if (acpi_table_parse("IVRS", find_last_devid_acpi) != 0) | ||
700 | return -ENODEV; | ||
701 | |||
702 | dev_table_size = TBL_SIZE(DEV_TABLE_ENTRY_SIZE); | ||
703 | alias_table_size = TBL_SIZE(ALIAS_TABLE_ENTRY_SIZE); | ||
704 | rlookup_table_size = TBL_SIZE(RLOOKUP_TABLE_ENTRY_SIZE); | ||
705 | |||
706 | ret = -ENOMEM; | ||
707 | |||
708 | /* Device table - directly used by all IOMMUs */ | ||
709 | amd_iommu_dev_table = (void *)__get_free_pages(GFP_KERNEL, | ||
710 | get_order(dev_table_size)); | ||
711 | if (amd_iommu_dev_table == NULL) | ||
712 | goto out; | ||
713 | |||
714 | /* | ||
715 | * Alias table - map PCI Bus/Dev/Func to Bus/Dev/Func the | ||
716 | * IOMMU see for that device | ||
717 | */ | ||
718 | amd_iommu_alias_table = (void *)__get_free_pages(GFP_KERNEL, | ||
719 | get_order(alias_table_size)); | ||
720 | if (amd_iommu_alias_table == NULL) | ||
721 | goto free; | ||
722 | |||
723 | /* IOMMU rlookup table - find the IOMMU for a specific device */ | ||
724 | amd_iommu_rlookup_table = (void *)__get_free_pages(GFP_KERNEL, | ||
725 | get_order(rlookup_table_size)); | ||
726 | if (amd_iommu_rlookup_table == NULL) | ||
727 | goto free; | ||
728 | |||
729 | /* | ||
730 | * Protection Domain table - maps devices to protection domains | ||
731 | * This table has the same size as the rlookup_table | ||
732 | */ | ||
733 | amd_iommu_pd_table = (void *)__get_free_pages(GFP_KERNEL, | ||
734 | get_order(rlookup_table_size)); | ||
735 | if (amd_iommu_pd_table == NULL) | ||
736 | goto free; | ||
737 | |||
738 | amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages(GFP_KERNEL, | ||
739 | get_order(MAX_DOMAIN_ID/8)); | ||
740 | if (amd_iommu_pd_alloc_bitmap == NULL) | ||
741 | goto free; | ||
742 | |||
743 | /* | ||
744 | * memory is allocated now; initialize the device table with all zeroes | ||
745 | * and let all alias entries point to itself | ||
746 | */ | ||
747 | memset(amd_iommu_dev_table, 0, dev_table_size); | ||
748 | for (i = 0; i < amd_iommu_last_bdf; ++i) | ||
749 | amd_iommu_alias_table[i] = i; | ||
750 | |||
751 | memset(amd_iommu_pd_table, 0, rlookup_table_size); | ||
752 | memset(amd_iommu_pd_alloc_bitmap, 0, MAX_DOMAIN_ID / 8); | ||
753 | |||
754 | /* | ||
755 | * never allocate domain 0 because its used as the non-allocated and | ||
756 | * error value placeholder | ||
757 | */ | ||
758 | amd_iommu_pd_alloc_bitmap[0] = 1; | ||
759 | |||
760 | /* | ||
761 | * now the data structures are allocated and basically initialized | ||
762 | * start the real acpi table scan | ||
763 | */ | ||
764 | ret = -ENODEV; | ||
765 | if (acpi_table_parse("IVRS", init_iommu_all) != 0) | ||
766 | goto free; | ||
767 | |||
768 | if (acpi_table_parse("IVRS", init_memory_definitions) != 0) | ||
769 | goto free; | ||
770 | |||
771 | ret = amd_iommu_init_dma_ops(); | ||
772 | if (ret) | ||
773 | goto free; | ||
774 | |||
775 | ret = sysdev_class_register(&amd_iommu_sysdev_class); | ||
776 | if (ret) | ||
777 | goto free; | ||
778 | |||
779 | ret = sysdev_register(&device_amd_iommu); | ||
780 | if (ret) | ||
781 | goto free; | ||
782 | |||
783 | enable_iommus(); | ||
784 | |||
785 | printk(KERN_INFO "AMD IOMMU: aperture size is %d MB\n", | ||
786 | (1 << (amd_iommu_aperture_order-20))); | ||
787 | |||
788 | printk(KERN_INFO "AMD IOMMU: device isolation "); | ||
789 | if (amd_iommu_isolate) | ||
790 | printk("enabled\n"); | ||
791 | else | ||
792 | printk("disabled\n"); | ||
793 | |||
794 | out: | ||
795 | return ret; | ||
796 | |||
797 | free: | ||
798 | if (amd_iommu_pd_alloc_bitmap) | ||
799 | free_pages((unsigned long)amd_iommu_pd_alloc_bitmap, 1); | ||
800 | |||
801 | if (amd_iommu_pd_table) | ||
802 | free_pages((unsigned long)amd_iommu_pd_table, | ||
803 | get_order(rlookup_table_size)); | ||
804 | |||
805 | if (amd_iommu_rlookup_table) | ||
806 | free_pages((unsigned long)amd_iommu_rlookup_table, | ||
807 | get_order(rlookup_table_size)); | ||
808 | |||
809 | if (amd_iommu_alias_table) | ||
810 | free_pages((unsigned long)amd_iommu_alias_table, | ||
811 | get_order(alias_table_size)); | ||
812 | |||
813 | if (amd_iommu_dev_table) | ||
814 | free_pages((unsigned long)amd_iommu_dev_table, | ||
815 | get_order(dev_table_size)); | ||
816 | |||
817 | free_iommu_all(); | ||
818 | |||
819 | free_unity_maps(); | ||
820 | |||
821 | goto out; | ||
822 | } | ||
823 | |||
824 | static int __init early_amd_iommu_detect(struct acpi_table_header *table) | ||
825 | { | ||
826 | return 0; | ||
827 | } | ||
828 | |||
829 | void __init amd_iommu_detect(void) | ||
830 | { | ||
831 | if (swiotlb || no_iommu || iommu_detected) | ||
832 | return; | ||
833 | |||
834 | if (acpi_table_parse("IVRS", early_amd_iommu_detect) == 0) { | ||
835 | iommu_detected = 1; | ||
836 | amd_iommu_detected = 1; | ||
837 | #ifdef CONFIG_GART_IOMMU | ||
838 | gart_iommu_aperture_disabled = 1; | ||
839 | gart_iommu_aperture = 0; | ||
840 | #endif | ||
841 | } | ||
842 | } | ||
843 | |||
844 | static int __init parse_amd_iommu_options(char *str) | ||
845 | { | ||
846 | for (; *str; ++str) { | ||
847 | if (strcmp(str, "isolate") == 0) | ||
848 | amd_iommu_isolate = 1; | ||
849 | } | ||
850 | |||
851 | return 1; | ||
852 | } | ||
853 | |||
854 | static int __init parse_amd_iommu_size_options(char *str) | ||
855 | { | ||
856 | for (; *str; ++str) { | ||
857 | if (strcmp(str, "32M") == 0) | ||
858 | amd_iommu_aperture_order = 25; | ||
859 | if (strcmp(str, "64M") == 0) | ||
860 | amd_iommu_aperture_order = 26; | ||
861 | if (strcmp(str, "128M") == 0) | ||
862 | amd_iommu_aperture_order = 27; | ||
863 | if (strcmp(str, "256M") == 0) | ||
864 | amd_iommu_aperture_order = 28; | ||
865 | if (strcmp(str, "512M") == 0) | ||
866 | amd_iommu_aperture_order = 29; | ||
867 | if (strcmp(str, "1G") == 0) | ||
868 | amd_iommu_aperture_order = 30; | ||
869 | } | ||
870 | |||
871 | return 1; | ||
872 | } | ||
873 | |||
874 | __setup("amd_iommu=", parse_amd_iommu_options); | ||
875 | __setup("amd_iommu_size=", parse_amd_iommu_size_options); | ||
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 5cf0aa993f4f..e4c5f951e68d 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S | |||
@@ -925,11 +925,11 @@ error_kernelspace: | |||
925 | iret run with kernel gs again, so don't set the user space flag. | 925 | iret run with kernel gs again, so don't set the user space flag. |
926 | B stepping K8s sometimes report an truncated RIP for IRET | 926 | B stepping K8s sometimes report an truncated RIP for IRET |
927 | exceptions returning to compat mode. Check for these here too. */ | 927 | exceptions returning to compat mode. Check for these here too. */ |
928 | leaq irq_return(%rip),%rbp | 928 | leaq irq_return(%rip),%rcx |
929 | cmpq %rbp,RIP(%rsp) | 929 | cmpq %rcx,RIP(%rsp) |
930 | je error_swapgs | 930 | je error_swapgs |
931 | movl %ebp,%ebp /* zero extend */ | 931 | movl %ecx,%ecx /* zero extend */ |
932 | cmpq %rbp,RIP(%rsp) | 932 | cmpq %rcx,RIP(%rsp) |
933 | je error_swapgs | 933 | je error_swapgs |
934 | cmpq $gs_change,RIP(%rsp) | 934 | cmpq $gs_change,RIP(%rsp) |
935 | je error_swapgs | 935 | je error_swapgs |
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c index 95e80e5033c3..eb9ddd8efb82 100644 --- a/arch/x86/kernel/i387.c +++ b/arch/x86/kernel/i387.c | |||
@@ -162,7 +162,7 @@ int xfpregs_get(struct task_struct *target, const struct user_regset *regset, | |||
162 | int ret; | 162 | int ret; |
163 | 163 | ||
164 | if (!cpu_has_fxsr) | 164 | if (!cpu_has_fxsr) |
165 | return -EIO; | 165 | return -ENODEV; |
166 | 166 | ||
167 | ret = init_fpu(target); | 167 | ret = init_fpu(target); |
168 | if (ret) | 168 | if (ret) |
@@ -179,7 +179,7 @@ int xfpregs_set(struct task_struct *target, const struct user_regset *regset, | |||
179 | int ret; | 179 | int ret; |
180 | 180 | ||
181 | if (!cpu_has_fxsr) | 181 | if (!cpu_has_fxsr) |
182 | return -EIO; | 182 | return -ENODEV; |
183 | 183 | ||
184 | ret = init_fpu(target); | 184 | ret = init_fpu(target); |
185 | if (ret) | 185 | if (ret) |
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index 3c43109ba054..cb0bdf440715 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c | |||
@@ -7,6 +7,7 @@ | |||
7 | #include <asm/dma.h> | 7 | #include <asm/dma.h> |
8 | #include <asm/gart.h> | 8 | #include <asm/gart.h> |
9 | #include <asm/calgary.h> | 9 | #include <asm/calgary.h> |
10 | #include <asm/amd_iommu.h> | ||
10 | 11 | ||
11 | int forbid_dac __read_mostly; | 12 | int forbid_dac __read_mostly; |
12 | EXPORT_SYMBOL(forbid_dac); | 13 | EXPORT_SYMBOL(forbid_dac); |
@@ -123,6 +124,8 @@ void __init pci_iommu_alloc(void) | |||
123 | 124 | ||
124 | detect_intel_iommu(); | 125 | detect_intel_iommu(); |
125 | 126 | ||
127 | amd_iommu_detect(); | ||
128 | |||
126 | #ifdef CONFIG_SWIOTLB | 129 | #ifdef CONFIG_SWIOTLB |
127 | pci_swiotlb_init(); | 130 | pci_swiotlb_init(); |
128 | #endif | 131 | #endif |
@@ -503,6 +506,8 @@ static int __init pci_iommu_init(void) | |||
503 | 506 | ||
504 | intel_iommu_init(); | 507 | intel_iommu_init(); |
505 | 508 | ||
509 | amd_iommu_init(); | ||
510 | |||
506 | #ifdef CONFIG_GART_IOMMU | 511 | #ifdef CONFIG_GART_IOMMU |
507 | gart_iommu_init(); | 512 | gart_iommu_init(); |
508 | #endif | 513 | #endif |
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index a7835f282936..77040b6070e1 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c | |||
@@ -943,13 +943,13 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) | |||
943 | return copy_regset_to_user(child, &user_x86_32_view, | 943 | return copy_regset_to_user(child, &user_x86_32_view, |
944 | REGSET_XFP, | 944 | REGSET_XFP, |
945 | 0, sizeof(struct user_fxsr_struct), | 945 | 0, sizeof(struct user_fxsr_struct), |
946 | datap); | 946 | datap) ? -EIO : 0; |
947 | 947 | ||
948 | case PTRACE_SETFPXREGS: /* Set the child extended FPU state. */ | 948 | case PTRACE_SETFPXREGS: /* Set the child extended FPU state. */ |
949 | return copy_regset_from_user(child, &user_x86_32_view, | 949 | return copy_regset_from_user(child, &user_x86_32_view, |
950 | REGSET_XFP, | 950 | REGSET_XFP, |
951 | 0, sizeof(struct user_fxsr_struct), | 951 | 0, sizeof(struct user_fxsr_struct), |
952 | datap); | 952 | datap) ? -EIO : 0; |
953 | #endif | 953 | #endif |
954 | 954 | ||
955 | #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION | 955 | #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION |
diff --git a/arch/x86/kernel/time_64.c b/arch/x86/kernel/time_64.c index c737849e2ef7..39ae8511a137 100644 --- a/arch/x86/kernel/time_64.c +++ b/arch/x86/kernel/time_64.c | |||
@@ -123,6 +123,8 @@ void __init time_init(void) | |||
123 | (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)) | 123 | (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)) |
124 | cpu_khz = calculate_cpu_khz(); | 124 | cpu_khz = calculate_cpu_khz(); |
125 | 125 | ||
126 | lpj_fine = ((unsigned long)tsc_khz * 1000)/HZ; | ||
127 | |||
126 | if (unsynchronized_tsc()) | 128 | if (unsynchronized_tsc()) |
127 | mark_tsc_unstable("TSCs unsynchronized"); | 129 | mark_tsc_unstable("TSCs unsynchronized"); |
128 | 130 | ||
diff --git a/arch/x86/kernel/tsc_32.c b/arch/x86/kernel/tsc_32.c index 774a5a83c296..6240922e497c 100644 --- a/arch/x86/kernel/tsc_32.c +++ b/arch/x86/kernel/tsc_32.c | |||
@@ -1,6 +1,7 @@ | |||
1 | #include <linux/sched.h> | 1 | #include <linux/sched.h> |
2 | #include <linux/clocksource.h> | 2 | #include <linux/clocksource.h> |
3 | #include <linux/workqueue.h> | 3 | #include <linux/workqueue.h> |
4 | #include <linux/delay.h> | ||
4 | #include <linux/cpufreq.h> | 5 | #include <linux/cpufreq.h> |
5 | #include <linux/jiffies.h> | 6 | #include <linux/jiffies.h> |
6 | #include <linux/init.h> | 7 | #include <linux/init.h> |
@@ -403,6 +404,7 @@ static inline void check_geode_tsc_reliable(void) { } | |||
403 | void __init tsc_init(void) | 404 | void __init tsc_init(void) |
404 | { | 405 | { |
405 | int cpu; | 406 | int cpu; |
407 | u64 lpj; | ||
406 | 408 | ||
407 | if (!cpu_has_tsc || tsc_disabled > 0) | 409 | if (!cpu_has_tsc || tsc_disabled > 0) |
408 | return; | 410 | return; |
@@ -415,6 +417,10 @@ void __init tsc_init(void) | |||
415 | return; | 417 | return; |
416 | } | 418 | } |
417 | 419 | ||
420 | lpj = ((u64)tsc_khz * 1000); | ||
421 | do_div(lpj, HZ); | ||
422 | lpj_fine = lpj; | ||
423 | |||
418 | /* now allow native_sched_clock() to use rdtsc */ | 424 | /* now allow native_sched_clock() to use rdtsc */ |
419 | tsc_disabled = 0; | 425 | tsc_disabled = 0; |
420 | 426 | ||
diff --git a/arch/x86/lib/delay_32.c b/arch/x86/lib/delay_32.c index d710f2d167bb..ef691316f8b6 100644 --- a/arch/x86/lib/delay_32.c +++ b/arch/x86/lib/delay_32.c | |||
@@ -3,6 +3,7 @@ | |||
3 | * | 3 | * |
4 | * Copyright (C) 1993 Linus Torvalds | 4 | * Copyright (C) 1993 Linus Torvalds |
5 | * Copyright (C) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz> | 5 | * Copyright (C) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz> |
6 | * Copyright (C) 2008 Jiri Hladky <hladky _dot_ jiri _at_ gmail _dot_ com> | ||
6 | * | 7 | * |
7 | * The __delay function must _NOT_ be inlined as its execution time | 8 | * The __delay function must _NOT_ be inlined as its execution time |
8 | * depends wildly on alignment on many x86 processors. The additional | 9 | * depends wildly on alignment on many x86 processors. The additional |
@@ -28,16 +29,22 @@ | |||
28 | /* simple loop based delay: */ | 29 | /* simple loop based delay: */ |
29 | static void delay_loop(unsigned long loops) | 30 | static void delay_loop(unsigned long loops) |
30 | { | 31 | { |
31 | int d0; | ||
32 | |||
33 | __asm__ __volatile__( | 32 | __asm__ __volatile__( |
34 | "\tjmp 1f\n" | 33 | " test %0,%0 \n" |
35 | ".align 16\n" | 34 | " jz 3f \n" |
36 | "1:\tjmp 2f\n" | 35 | " jmp 1f \n" |
37 | ".align 16\n" | 36 | |
38 | "2:\tdecl %0\n\tjns 2b" | 37 | ".align 16 \n" |
39 | :"=&a" (d0) | 38 | "1: jmp 2f \n" |
40 | :"0" (loops)); | 39 | |
40 | ".align 16 \n" | ||
41 | "2: decl %0 \n" | ||
42 | " jnz 2b \n" | ||
43 | "3: decl %0 \n" | ||
44 | |||
45 | : /* we don't need output */ | ||
46 | :"a" (loops) | ||
47 | ); | ||
41 | } | 48 | } |
42 | 49 | ||
43 | /* TSC based delay: */ | 50 | /* TSC based delay: */ |
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 1e64795714c8..578b76819551 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c | |||
@@ -392,11 +392,7 @@ static void show_fault_oops(struct pt_regs *regs, unsigned long error_code, | |||
392 | printk(KERN_CONT "NULL pointer dereference"); | 392 | printk(KERN_CONT "NULL pointer dereference"); |
393 | else | 393 | else |
394 | printk(KERN_CONT "paging request"); | 394 | printk(KERN_CONT "paging request"); |
395 | #ifdef CONFIG_X86_32 | 395 | printk(KERN_CONT " at %p\n", (void *) address); |
396 | printk(KERN_CONT " at %08lx\n", address); | ||
397 | #else | ||
398 | printk(KERN_CONT " at %016lx\n", address); | ||
399 | #endif | ||
400 | printk(KERN_ALERT "IP:"); | 396 | printk(KERN_ALERT "IP:"); |
401 | printk_address(regs->ip, 1); | 397 | printk_address(regs->ip, 1); |
402 | dump_pagetable(address); | 398 | dump_pagetable(address); |
@@ -796,14 +792,10 @@ bad_area_nosemaphore: | |||
796 | if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) && | 792 | if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) && |
797 | printk_ratelimit()) { | 793 | printk_ratelimit()) { |
798 | printk( | 794 | printk( |
799 | #ifdef CONFIG_X86_32 | 795 | "%s%s[%d]: segfault at %lx ip %p sp %p error %lx", |
800 | "%s%s[%d]: segfault at %lx ip %08lx sp %08lx error %lx", | ||
801 | #else | ||
802 | "%s%s[%d]: segfault at %lx ip %lx sp %lx error %lx", | ||
803 | #endif | ||
804 | task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG, | 796 | task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG, |
805 | tsk->comm, task_pid_nr(tsk), address, regs->ip, | 797 | tsk->comm, task_pid_nr(tsk), address, |
806 | regs->sp, error_code); | 798 | (void *) regs->ip, (void *) regs->sp, error_code); |
807 | print_vma_addr(" in ", regs->ip); | 799 | print_vma_addr(" in ", regs->ip); |
808 | printk("\n"); | 800 | printk("\n"); |
809 | } | 801 | } |
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index cc48d3fde545..2b6ad5b9f9d5 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c | |||
@@ -269,12 +269,13 @@ static void nmi_cpu_shutdown(void *dummy) | |||
269 | 269 | ||
270 | static void nmi_shutdown(void) | 270 | static void nmi_shutdown(void) |
271 | { | 271 | { |
272 | struct op_msrs *msrs = &__get_cpu_var(cpu_msrs); | 272 | struct op_msrs *msrs = &get_cpu_var(cpu_msrs); |
273 | nmi_enabled = 0; | 273 | nmi_enabled = 0; |
274 | on_each_cpu(nmi_cpu_shutdown, NULL, 0, 1); | 274 | on_each_cpu(nmi_cpu_shutdown, NULL, 0, 1); |
275 | unregister_die_notifier(&profile_exceptions_nb); | 275 | unregister_die_notifier(&profile_exceptions_nb); |
276 | model->shutdown(msrs); | 276 | model->shutdown(msrs); |
277 | free_msrs(); | 277 | free_msrs(); |
278 | put_cpu_var(cpu_msrs); | ||
278 | } | 279 | } |
279 | 280 | ||
280 | static void nmi_cpu_start(void *dummy) | 281 | static void nmi_cpu_start(void *dummy) |