aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/amd_iommu.c
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2008-06-26 15:27:58 -0400
committerIngo Molnar <mingo@elte.hu>2008-06-27 04:12:17 -0400
commitec487d1a110abfffebdf116560db5c1d71b94cab (patch)
tree2514fe1a103a5f9e300962d1447f3f11c48234ba /arch/x86/kernel/amd_iommu.c
parentd3086444b2988659a50af09462261c78d3012cb4 (diff)
x86, AMD IOMMU: add domain allocation and deallocation functions
This patch adds the functions to allocate and free protection domains for the IOMMU. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com> Cc: iommu@lists.linux-foundation.org Cc: bhavna.sarathy@amd.com Cc: Sebastian.Biemueller@amd.com Cc: robert.richter@amd.com Cc: joro@8bytes.org Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/amd_iommu.c')
-rw-r--r--arch/x86/kernel/amd_iommu.c147
1 files changed, 147 insertions, 0 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index 69d8d024387e..c43d15dbc8fa 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -314,3 +314,150 @@ static void dma_ops_free_addresses(struct dma_ops_domain *dom,
314 iommu_area_free(dom->bitmap, address, pages); 314 iommu_area_free(dom->bitmap, address, pages);
315} 315}
316 316
317static u16 domain_id_alloc(void)
318{
319 unsigned long flags;
320 int id;
321
322 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
323 id = find_first_zero_bit(amd_iommu_pd_alloc_bitmap, MAX_DOMAIN_ID);
324 BUG_ON(id == 0);
325 if (id > 0 && id < MAX_DOMAIN_ID)
326 __set_bit(id, amd_iommu_pd_alloc_bitmap);
327 else
328 id = 0;
329 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
330
331 return id;
332}
333
334static void dma_ops_reserve_addresses(struct dma_ops_domain *dom,
335 unsigned long start_page,
336 unsigned int pages)
337{
338 unsigned int last_page = dom->aperture_size >> PAGE_SHIFT;
339
340 if (start_page + pages > last_page)
341 pages = last_page - start_page;
342
343 set_bit_string(dom->bitmap, start_page, pages);
344}
345
346static void dma_ops_free_pagetable(struct dma_ops_domain *dma_dom)
347{
348 int i, j;
349 u64 *p1, *p2, *p3;
350
351 p1 = dma_dom->domain.pt_root;
352
353 if (!p1)
354 return;
355
356 for (i = 0; i < 512; ++i) {
357 if (!IOMMU_PTE_PRESENT(p1[i]))
358 continue;
359
360 p2 = IOMMU_PTE_PAGE(p1[i]);
361 for (j = 0; j < 512; ++i) {
362 if (!IOMMU_PTE_PRESENT(p2[j]))
363 continue;
364 p3 = IOMMU_PTE_PAGE(p2[j]);
365 free_page((unsigned long)p3);
366 }
367
368 free_page((unsigned long)p2);
369 }
370
371 free_page((unsigned long)p1);
372}
373
374static void dma_ops_domain_free(struct dma_ops_domain *dom)
375{
376 if (!dom)
377 return;
378
379 dma_ops_free_pagetable(dom);
380
381 kfree(dom->pte_pages);
382
383 kfree(dom->bitmap);
384
385 kfree(dom);
386}
387
388static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu,
389 unsigned order)
390{
391 struct dma_ops_domain *dma_dom;
392 unsigned i, num_pte_pages;
393 u64 *l2_pde;
394 u64 address;
395
396 /*
397 * Currently the DMA aperture must be between 32 MB and 1GB in size
398 */
399 if ((order < 25) || (order > 30))
400 return NULL;
401
402 dma_dom = kzalloc(sizeof(struct dma_ops_domain), GFP_KERNEL);
403 if (!dma_dom)
404 return NULL;
405
406 spin_lock_init(&dma_dom->domain.lock);
407
408 dma_dom->domain.id = domain_id_alloc();
409 if (dma_dom->domain.id == 0)
410 goto free_dma_dom;
411 dma_dom->domain.mode = PAGE_MODE_3_LEVEL;
412 dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL);
413 dma_dom->domain.priv = dma_dom;
414 if (!dma_dom->domain.pt_root)
415 goto free_dma_dom;
416 dma_dom->aperture_size = (1ULL << order);
417 dma_dom->bitmap = kzalloc(dma_dom->aperture_size / (PAGE_SIZE * 8),
418 GFP_KERNEL);
419 if (!dma_dom->bitmap)
420 goto free_dma_dom;
421 /*
422 * mark the first page as allocated so we never return 0 as
423 * a valid dma-address. So we can use 0 as error value
424 */
425 dma_dom->bitmap[0] = 1;
426 dma_dom->next_bit = 0;
427
428 if (iommu->exclusion_start &&
429 iommu->exclusion_start < dma_dom->aperture_size) {
430 unsigned long startpage = iommu->exclusion_start >> PAGE_SHIFT;
431 int pages = to_pages(iommu->exclusion_start,
432 iommu->exclusion_length);
433 dma_ops_reserve_addresses(dma_dom, startpage, pages);
434 }
435
436 num_pte_pages = dma_dom->aperture_size / (PAGE_SIZE * 512);
437 dma_dom->pte_pages = kzalloc(num_pte_pages * sizeof(void *),
438 GFP_KERNEL);
439 if (!dma_dom->pte_pages)
440 goto free_dma_dom;
441
442 l2_pde = (u64 *)get_zeroed_page(GFP_KERNEL);
443 if (l2_pde == NULL)
444 goto free_dma_dom;
445
446 dma_dom->domain.pt_root[0] = IOMMU_L2_PDE(virt_to_phys(l2_pde));
447
448 for (i = 0; i < num_pte_pages; ++i) {
449 dma_dom->pte_pages[i] = (u64 *)get_zeroed_page(GFP_KERNEL);
450 if (!dma_dom->pte_pages[i])
451 goto free_dma_dom;
452 address = virt_to_phys(dma_dom->pte_pages[i]);
453 l2_pde[i] = IOMMU_L1_PDE(address);
454 }
455
456 return dma_dom;
457
458free_dma_dom:
459 dma_ops_domain_free(dma_dom);
460
461 return NULL;
462}
463