aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/amd_iommu.c
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2011-04-06 05:49:28 -0400
committerJoerg Roedel <joerg.roedel@amd.com>2011-04-06 05:49:28 -0400
commit11b6402c6673b530fac9920c5640c75e99fee956 (patch)
tree1b1f914ae87c35ce0bb847879326eb8859c4d6db /arch/x86/kernel/amd_iommu.c
parent94fe79e2f100bfcd8e7689cbf8838634779b80a2 (diff)
x86/amd-iommu: Cleanup inv_pages command handling
This patch reworks the processing of invalidate-pages commands to the IOMMU. The function building the the command is extended so we can get rid of another function. It was also renamed to match with the other function names. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'arch/x86/kernel/amd_iommu.c')
-rw-r--r--arch/x86/kernel/amd_iommu.c83
1 files changed, 36 insertions, 47 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index 4e5631a433aa..f8ec28ea3314 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -397,6 +397,37 @@ static void build_inv_dte(struct iommu_cmd *cmd, u16 devid)
397 CMD_SET_TYPE(cmd, CMD_INV_DEV_ENTRY); 397 CMD_SET_TYPE(cmd, CMD_INV_DEV_ENTRY);
398} 398}
399 399
400static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
401 size_t size, u16 domid, int pde)
402{
403 u64 pages;
404 int s;
405
406 pages = iommu_num_pages(address, size, PAGE_SIZE);
407 s = 0;
408
409 if (pages > 1) {
410 /*
411 * If we have to flush more than one page, flush all
412 * TLB entries for this domain
413 */
414 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
415 s = 1;
416 }
417
418 address &= PAGE_MASK;
419
420 memset(cmd, 0, sizeof(*cmd));
421 cmd->data[1] |= domid;
422 cmd->data[2] = lower_32_bits(address);
423 cmd->data[3] = upper_32_bits(address);
424 CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
425 if (s) /* size bit - we flush more than one 4kb page */
426 cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
427 if (pde) /* PDE bit - we wan't flush everything not only the PTEs */
428 cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
429}
430
400/* 431/*
401 * Writes the command to the IOMMUs command buffer and informs the 432 * Writes the command to the IOMMUs command buffer and informs the
402 * hardware about the new command. Must be called with iommu->lock held. 433 * hardware about the new command. Must be called with iommu->lock held.
@@ -545,37 +576,6 @@ static int iommu_flush_device(struct device *dev)
545 return iommu_queue_command(iommu, &cmd); 576 return iommu_queue_command(iommu, &cmd);
546} 577}
547 578
548static void __iommu_build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
549 u16 domid, int pde, int s)
550{
551 memset(cmd, 0, sizeof(*cmd));
552 address &= PAGE_MASK;
553 CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
554 cmd->data[1] |= domid;
555 cmd->data[2] = lower_32_bits(address);
556 cmd->data[3] = upper_32_bits(address);
557 if (s) /* size bit - we flush more than one 4kb page */
558 cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
559 if (pde) /* PDE bit - we wan't flush everything not only the PTEs */
560 cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
561}
562
563/*
564 * Generic command send function for invalidaing TLB entries
565 */
566static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu,
567 u64 address, u16 domid, int pde, int s)
568{
569 struct iommu_cmd cmd;
570 int ret;
571
572 __iommu_build_inv_iommu_pages(&cmd, address, domid, pde, s);
573
574 ret = iommu_queue_command(iommu, &cmd);
575
576 return ret;
577}
578
579/* 579/*
580 * TLB invalidation function which is called from the mapping functions. 580 * TLB invalidation function which is called from the mapping functions.
581 * It invalidates a single PTE if the range to flush is within a single 581 * It invalidates a single PTE if the range to flush is within a single
@@ -584,20 +584,10 @@ static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu,
584static void __iommu_flush_pages(struct protection_domain *domain, 584static void __iommu_flush_pages(struct protection_domain *domain,
585 u64 address, size_t size, int pde) 585 u64 address, size_t size, int pde)
586{ 586{
587 int s = 0, i; 587 struct iommu_cmd cmd;
588 unsigned long pages = iommu_num_pages(address, size, PAGE_SIZE); 588 int ret = 0, i;
589
590 address &= PAGE_MASK;
591
592 if (pages > 1) {
593 /*
594 * If we have to flush more than one page, flush all
595 * TLB entries for this domain
596 */
597 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
598 s = 1;
599 }
600 589
590 build_inv_iommu_pages(&cmd, address, size, domain->id, pde);
601 591
602 for (i = 0; i < amd_iommus_present; ++i) { 592 for (i = 0; i < amd_iommus_present; ++i) {
603 if (!domain->dev_iommu[i]) 593 if (!domain->dev_iommu[i])
@@ -607,11 +597,10 @@ static void __iommu_flush_pages(struct protection_domain *domain,
607 * Devices of this domain are behind this IOMMU 597 * Devices of this domain are behind this IOMMU
608 * We need a TLB flush 598 * We need a TLB flush
609 */ 599 */
610 iommu_queue_inv_iommu_pages(amd_iommus[i], address, 600 ret |= iommu_queue_command(amd_iommus[i], &cmd);
611 domain->id, pde, s);
612 } 601 }
613 602
614 return; 603 WARN_ON(ret);
615} 604}
616 605
617static void iommu_flush_pages(struct protection_domain *domain, 606static void iommu_flush_pages(struct protection_domain *domain,